diff options
Diffstat (limited to 'llvm/test/CodeGen')
101 files changed, 13462 insertions, 4332 deletions
diff --git a/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll b/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll index 1c216e7..e371748 100644 --- a/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll +++ b/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll @@ -11,6 +11,16 @@ entry: ret <4 x i16> %1 } +define <4 x half> @v4bf16_to_v4f16(float, <4 x bfloat> %a) nounwind { +; CHECK-LABEL: v4bf16_to_v4f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret +entry: + %1 = bitcast <4 x bfloat> %a to <4 x half> + ret <4 x half> %1 +} + define <2 x i32> @v4bf16_to_v2i32(float, <4 x bfloat> %a) nounwind { ; CHECK-LABEL: v4bf16_to_v2i32: ; CHECK: // %bb.0: // %entry @@ -82,6 +92,16 @@ entry: ret <4 x bfloat> %1 } +define <4 x bfloat> @v4f16_to_v4bf16(float, <4 x half> %a) nounwind { +; CHECK-LABEL: v4f16_to_v4bf16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret +entry: + %1 = bitcast <4 x half> %a to <4 x bfloat> + ret <4 x bfloat> %1 +} + define <4 x bfloat> @v2i32_to_v4bf16(float, <2 x i32> %a) nounwind { ; CHECK-LABEL: v2i32_to_v4bf16: ; CHECK: // %bb.0: // %entry @@ -152,6 +172,16 @@ entry: ret <8 x i16> %1 } +define <8 x half> @v8bf16_to_v8f16(float, <8 x bfloat> %a) nounwind { +; CHECK-LABEL: v8bf16_to_v8f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov v0.16b, v1.16b +; CHECK-NEXT: ret +entry: + %1 = bitcast <8 x bfloat> %a to <8 x half> + ret <8 x half> %1 +} + define <4 x i32> @v8bf16_to_v4i32(float, <8 x bfloat> %a) nounwind { ; CHECK-LABEL: v8bf16_to_v4i32: ; CHECK: // %bb.0: // %entry @@ -202,6 +232,16 @@ entry: ret <8 x bfloat> %1 } +define <8 x bfloat> @v8f16_to_v8bf16(float, <8 x half> %a) nounwind { +; CHECK-LABEL: v8f16_to_v8bf16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov v0.16b, v1.16b +; CHECK-NEXT: ret +entry: + %1 = bitcast <8 x half> %a to <8 x bfloat> + ret <8 x bfloat> %1 +} + define <8 x bfloat> @v4i32_to_v8bf16(float, <4 x i32> %a) nounwind { ; CHECK-LABEL: v4i32_to_v8bf16: ; CHECK: // %bb.0: // %entry diff --git a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll index 0960c4c..a56d5b1 100644 --- a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll +++ b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll @@ -78,9 +78,8 @@ B: define i32 @g_i8_sign_extend_inreg(i8 %in, i32 %a, i32 %b) nounwind { ; CHECK-LABEL: g_i8_sign_extend_inreg: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w8, w1, w2, mi +; CHECK-NEXT: tst w0, #0x80 +; CHECK-NEXT: csel w8, w1, w2, ne ; CHECK-NEXT: add w0, w8, w0, uxtb ; CHECK-NEXT: ret entry: @@ -100,9 +99,8 @@ B: define i32 @g_i16_sign_extend_inreg(i16 %in, i32 %a, i32 %b) nounwind { ; CHECK-LABEL: g_i16_sign_extend_inreg: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w8, w1, w2, mi +; CHECK-NEXT: tst w0, #0x8000 +; CHECK-NEXT: csel w8, w1, w2, ne ; CHECK-NEXT: add w0, w8, w0, uxth ; CHECK-NEXT: ret entry: @@ -167,10 +165,8 @@ B: define i64 @g_i32_sign_extend_i64(i32 %in, i64 %a, i64 %b) nounwind { ; CHECK-LABEL: g_i32_sign_extend_i64: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxtw x8, w0 -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: csel x8, x1, x2, mi +; CHECK-NEXT: tst w0, #0x80000000 +; CHECK-NEXT: csel x8, x1, x2, ne ; CHECK-NEXT: add x0, x8, w0, uxtw ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll b/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll index 8bc3497..6233ce7 100644 --- a/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll +++ b/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll @@ -1,20 +1,30 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 | FileCheck %s --check-prefixes=CHECK-COMMON,CHECK-CVT -; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK-COMMON,CHECK-FP16 +; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-CVT,CHECK-CVT-SD +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16,CHECK-FP16-SD +; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-CVT,CHECK-CVT-GI +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-FP16,CHECK-FP16-GI define <4 x half> @add_h(<4 x half> %a, <4 x half> %b) { -; CHECK-CVT-LABEL: add_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fadd v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: add_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fadd v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: add_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fadd v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: add_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fadd v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fadd <4 x half> %a, %b @@ -22,28 +32,54 @@ entry: } define <4 x half> @build_h4(<4 x half> %a) { -; CHECK-COMMON-LABEL: build_h4: -; CHECK-COMMON: // %bb.0: // %entry -; CHECK-COMMON-NEXT: mov w8, #15565 // =0x3ccd -; CHECK-COMMON-NEXT: dup v0.4h, w8 -; CHECK-COMMON-NEXT: ret +; CHECK-CVT-SD-LABEL: build_h4: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: mov w8, #15565 // =0x3ccd +; CHECK-CVT-SD-NEXT: dup v0.4h, w8 +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: build_h4: +; CHECK-FP16-SD: // %bb.0: // %entry +; CHECK-FP16-SD-NEXT: mov w8, #15565 // =0x3ccd +; CHECK-FP16-SD-NEXT: dup v0.4h, w8 +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: build_h4: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: adrp x8, .LCPI1_0 +; CHECK-CVT-GI-NEXT: ldr d0, [x8, :lo12:.LCPI1_0] +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: build_h4: +; CHECK-FP16-GI: // %bb.0: // %entry +; CHECK-FP16-GI-NEXT: adrp x8, .LCPI1_0 +; CHECK-FP16-GI-NEXT: ldr d0, [x8, :lo12:.LCPI1_0] +; CHECK-FP16-GI-NEXT: ret entry: ret <4 x half> <half 0xH3CCD, half 0xH3CCD, half 0xH3CCD, half 0xH3CCD> } define <4 x half> @sub_h(<4 x half> %a, <4 x half> %b) { -; CHECK-CVT-LABEL: sub_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fsub v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: sub_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fsub v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: sub_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fsub v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sub_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fsub v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fsub <4 x half> %a, %b @@ -51,18 +87,26 @@ entry: } define <4 x half> @mul_h(<4 x half> %a, <4 x half> %b) { -; CHECK-CVT-LABEL: mul_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fmul v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: mul_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fmul v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: mul_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fmul v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: mul_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fmul v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fmul <4 x half> %a, %b @@ -70,18 +114,26 @@ entry: } define <4 x half> @div_h(<4 x half> %a, <4 x half> %b) { -; CHECK-CVT-LABEL: div_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fdiv v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: div_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fdiv v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: div_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fdiv v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: div_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fdiv v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fdiv <4 x half> %a, %b @@ -89,92 +141,162 @@ entry: } define <4 x half> @load_h(ptr %a) { -; CHECK-COMMON-LABEL: load_h: -; CHECK-COMMON: // %bb.0: // %entry -; CHECK-COMMON-NEXT: ldr d0, [x0] -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: load_h: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ret entry: %0 = load <4 x half>, ptr %a, align 4 ret <4 x half> %0 } define void @store_h(ptr %a, <4 x half> %b) { -; CHECK-COMMON-LABEL: store_h: -; CHECK-COMMON: // %bb.0: // %entry -; CHECK-COMMON-NEXT: str d0, [x0] -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: store_h: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret entry: store <4 x half> %b, ptr %a, align 4 ret void } define <4 x half> @s_to_h(<4 x float> %a) { -; CHECK-COMMON-LABEL: s_to_h: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: s_to_h: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: ret %1 = fptrunc <4 x float> %a to <4 x half> ret <4 x half> %1 } define <4 x half> @d_to_h(<4 x double> %a) { -; CHECK-COMMON-LABEL: d_to_h: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: fcvtxn v0.2s, v0.2d -; CHECK-COMMON-NEXT: fcvtxn2 v0.4s, v1.2d -; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s -; CHECK-COMMON-NEXT: ret +; CHECK-CVT-SD-LABEL: d_to_h: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtxn v0.2s, v0.2d +; CHECK-CVT-SD-NEXT: fcvtxn2 v0.4s, v1.2d +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: d_to_h: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: fcvtxn v0.2s, v0.2d +; CHECK-FP16-SD-NEXT: fcvtxn2 v0.4s, v1.2d +; CHECK-FP16-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: d_to_h: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: mov d2, v0.d[1] +; CHECK-CVT-GI-NEXT: fcvt h0, d0 +; CHECK-CVT-GI-NEXT: mov d3, v1.d[1] +; CHECK-CVT-GI-NEXT: fcvt h1, d1 +; CHECK-CVT-GI-NEXT: fcvt h2, d2 +; CHECK-CVT-GI-NEXT: mov v0.h[1], v2.h[0] +; CHECK-CVT-GI-NEXT: fcvt h2, d3 +; CHECK-CVT-GI-NEXT: mov v0.h[2], v1.h[0] +; CHECK-CVT-GI-NEXT: mov v0.h[3], v2.h[0] +; CHECK-CVT-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: d_to_h: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: mov d2, v0.d[1] +; CHECK-FP16-GI-NEXT: fcvt h0, d0 +; CHECK-FP16-GI-NEXT: mov d3, v1.d[1] +; CHECK-FP16-GI-NEXT: fcvt h1, d1 +; CHECK-FP16-GI-NEXT: fcvt h2, d2 +; CHECK-FP16-GI-NEXT: mov v0.h[1], v2.h[0] +; CHECK-FP16-GI-NEXT: fcvt h2, d3 +; CHECK-FP16-GI-NEXT: mov v0.h[2], v1.h[0] +; CHECK-FP16-GI-NEXT: mov v0.h[3], v2.h[0] +; CHECK-FP16-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-FP16-GI-NEXT: ret %1 = fptrunc <4 x double> %a to <4 x half> ret <4 x half> %1 } define <4 x float> @h_to_s(<4 x half> %a) { -; CHECK-COMMON-LABEL: h_to_s: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: fcvtl v0.4s, v0.4h -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: h_to_s: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtl v0.4s, v0.4h +; CHECK-NEXT: ret %1 = fpext <4 x half> %a to <4 x float> ret <4 x float> %1 } define <4 x double> @h_to_d(<4 x half> %a) { -; CHECK-COMMON-LABEL: h_to_d: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: fcvtl v0.4s, v0.4h -; CHECK-COMMON-NEXT: fcvtl2 v1.2d, v0.4s -; CHECK-COMMON-NEXT: fcvtl v0.2d, v0.2s -; CHECK-COMMON-NEXT: ret +; CHECK-CVT-SD-LABEL: h_to_d: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl2 v1.2d, v0.4s +; CHECK-CVT-SD-NEXT: fcvtl v0.2d, v0.2s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: h_to_d: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-FP16-SD-NEXT: fcvtl2 v1.2d, v0.4s +; CHECK-FP16-SD-NEXT: fcvtl v0.2d, v0.2s +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: h_to_d: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-CVT-GI-NEXT: mov h1, v0.h[1] +; CHECK-CVT-GI-NEXT: mov h2, v0.h[2] +; CHECK-CVT-GI-NEXT: mov h3, v0.h[3] +; CHECK-CVT-GI-NEXT: fcvt d0, h0 +; CHECK-CVT-GI-NEXT: fcvt d4, h1 +; CHECK-CVT-GI-NEXT: fcvt d1, h2 +; CHECK-CVT-GI-NEXT: fcvt d2, h3 +; CHECK-CVT-GI-NEXT: mov v0.d[1], v4.d[0] +; CHECK-CVT-GI-NEXT: mov v1.d[1], v2.d[0] +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: h_to_d: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-FP16-GI-NEXT: mov h1, v0.h[1] +; CHECK-FP16-GI-NEXT: mov h2, v0.h[2] +; CHECK-FP16-GI-NEXT: mov h3, v0.h[3] +; CHECK-FP16-GI-NEXT: fcvt d0, h0 +; CHECK-FP16-GI-NEXT: fcvt d4, h1 +; CHECK-FP16-GI-NEXT: fcvt d1, h2 +; CHECK-FP16-GI-NEXT: fcvt d2, h3 +; CHECK-FP16-GI-NEXT: mov v0.d[1], v4.d[0] +; CHECK-FP16-GI-NEXT: mov v1.d[1], v2.d[0] +; CHECK-FP16-GI-NEXT: ret %1 = fpext <4 x half> %a to <4 x double> ret <4 x double> %1 } define <4 x half> @bitcast_i_to_h(float, <4 x i16> %a) { -; CHECK-COMMON-LABEL: bitcast_i_to_h: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: fmov d0, d1 -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: bitcast_i_to_h: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret %2 = bitcast <4 x i16> %a to <4 x half> ret <4 x half> %2 } define <4 x i16> @bitcast_h_to_i(float, <4 x half> %a) { -; CHECK-COMMON-LABEL: bitcast_h_to_i: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: fmov d0, d1 -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: bitcast_h_to_i: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret %2 = bitcast <4 x half> %a to <4 x i16> ret <4 x i16> %2 } define <4 x half> @sitofp_i8(<4 x i8> %a) #0 { -; CHECK-CVT-LABEL: sitofp_i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: shl v0.4h, v0.4h, #8 -; CHECK-CVT-NEXT: sshr v0.4h, v0.4h, #8 -; CHECK-CVT-NEXT: sshll v0.4s, v0.4h, #0 -; CHECK-CVT-NEXT: scvtf v0.4s, v0.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: sitofp_i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: shl v0.4h, v0.4h, #8 +; CHECK-CVT-SD-NEXT: sshr v0.4h, v0.4h, #8 +; CHECK-CVT-SD-NEXT: sshll v0.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: scvtf v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: sitofp_i8: ; CHECK-FP16: // %bb.0: @@ -182,6 +304,15 @@ define <4 x half> @sitofp_i8(<4 x i8> %a) #0 { ; CHECK-FP16-NEXT: sshr v0.4h, v0.4h, #8 ; CHECK-FP16-NEXT: scvtf v0.4h, v0.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sitofp_i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: shl v0.4s, v0.4s, #24 +; CHECK-CVT-GI-NEXT: sshr v0.4s, v0.4s, #24 +; CHECK-CVT-GI-NEXT: scvtf v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = sitofp <4 x i8> %a to <4 x half> ret <4 x half> %1 } @@ -204,43 +335,59 @@ define <4 x half> @sitofp_i16(<4 x i16> %a) #0 { define <4 x half> @sitofp_i32(<4 x i32> %a) #0 { -; CHECK-COMMON-LABEL: sitofp_i32: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: scvtf v0.4s, v0.4s -; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: sitofp_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: scvtf v0.4s, v0.4s +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: ret %1 = sitofp <4 x i32> %a to <4 x half> ret <4 x half> %1 } define <4 x half> @sitofp_i64(<4 x i64> %a) #0 { -; CHECK-COMMON-LABEL: sitofp_i64: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: scvtf v0.2d, v0.2d -; CHECK-COMMON-NEXT: scvtf v1.2d, v1.2d -; CHECK-COMMON-NEXT: fcvtn v0.2s, v0.2d -; CHECK-COMMON-NEXT: fcvtn2 v0.4s, v1.2d -; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: sitofp_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: scvtf v0.2d, v0.2d +; CHECK-NEXT: scvtf v1.2d, v1.2d +; CHECK-NEXT: fcvtn v0.2s, v0.2d +; CHECK-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: ret %1 = sitofp <4 x i64> %a to <4 x half> ret <4 x half> %1 } define <4 x half> @uitofp_i8(<4 x i8> %a) #0 { -; CHECK-CVT-LABEL: uitofp_i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: bic v0.4h, #255, lsl #8 -; CHECK-CVT-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-CVT-NEXT: ucvtf v0.4s, v0.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: uitofp_i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: bic v0.4h, #255, lsl #8 +; CHECK-CVT-SD-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: ucvtf v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; -; CHECK-FP16-LABEL: uitofp_i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: bic v0.4h, #255, lsl #8 -; CHECK-FP16-NEXT: ucvtf v0.4h, v0.4h -; CHECK-FP16-NEXT: ret +; CHECK-FP16-SD-LABEL: uitofp_i8: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: bic v0.4h, #255, lsl #8 +; CHECK-FP16-SD-NEXT: ucvtf v0.4h, v0.4h +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: uitofp_i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: movi v1.2d, #0x0000ff000000ff +; CHECK-CVT-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-CVT-GI-NEXT: ucvtf v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: uitofp_i8: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: movi d1, #0xff00ff00ff00ff +; CHECK-FP16-GI-NEXT: and v0.8b, v0.8b, v1.8b +; CHECK-FP16-GI-NEXT: ucvtf v0.4h, v0.4h +; CHECK-FP16-GI-NEXT: ret %1 = uitofp <4 x i8> %a to <4 x half> ret <4 x half> %1 } @@ -264,35 +411,35 @@ define <4 x half> @uitofp_i16(<4 x i16> %a) #0 { define <4 x half> @uitofp_i32(<4 x i32> %a) #0 { -; CHECK-COMMON-LABEL: uitofp_i32: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: ucvtf v0.4s, v0.4s -; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: uitofp_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ucvtf v0.4s, v0.4s +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: ret %1 = uitofp <4 x i32> %a to <4 x half> ret <4 x half> %1 } define <4 x half> @uitofp_i64(<4 x i64> %a) #0 { -; CHECK-COMMON-LABEL: uitofp_i64: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: ucvtf v0.2d, v0.2d -; CHECK-COMMON-NEXT: ucvtf v1.2d, v1.2d -; CHECK-COMMON-NEXT: fcvtn v0.2s, v0.2d -; CHECK-COMMON-NEXT: fcvtn2 v0.4s, v1.2d -; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: uitofp_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ucvtf v0.2d, v0.2d +; CHECK-NEXT: ucvtf v1.2d, v1.2d +; CHECK-NEXT: fcvtn v0.2s, v0.2d +; CHECK-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: ret %1 = uitofp <4 x i64> %a to <4 x half> ret <4 x half> %1 } define void @test_insert_at_zero(half %a, ptr %b) #0 { -; CHECK-COMMON-LABEL: test_insert_at_zero: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: // kill: def $h0 killed $h0 def $d0 -; CHECK-COMMON-NEXT: str d0, [x0] -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: test_insert_at_zero: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0 +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret %1 = insertelement <4 x half> undef, half %a, i64 0 store <4 x half> %1, ptr %b, align 4 ret void @@ -331,17 +478,29 @@ define <4 x i16> @fptosi_i16(<4 x half> %a) #0 { } define <4 x i8> @fptoui_i8(<4 x half> %a) #0 { -; CHECK-CVT-LABEL: fptoui_i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: fptoui_i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; -; CHECK-FP16-LABEL: fptoui_i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzs v0.4h, v0.4h -; CHECK-FP16-NEXT: ret +; CHECK-FP16-SD-LABEL: fptoui_i8: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: fcvtzs v0.4h, v0.4h +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: fptoui_i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: fptoui_i8: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: fcvtzu v0.4h, v0.4h +; CHECK-FP16-GI-NEXT: ret ; NOTE: fcvtzs selected here because the xtn shaves the sign bit %1 = fptoui<4 x half> %a to <4 x i8> ret <4 x i8> %1 @@ -364,36 +523,45 @@ define <4 x i16> @fptoui_i16(<4 x half> %a) #0 { } define <4 x i1> @test_fcmp_une(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_une: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmeq v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_une: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_une: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmeq v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_une: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp une <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_ueq(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ueq: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v2.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v2.16b -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: mvn v0.8b, v0.8b -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ueq: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ueq: ; CHECK-FP16: // %bb.0: @@ -402,102 +570,149 @@ define <4 x i1> @test_fcmp_ueq(<4 x half> %a, <4 x half> %b) #0 { ; CHECK-FP16-NEXT: orr v0.8b, v0.8b, v2.8b ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ueq: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ueq <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_ugt(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ugt: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcmge v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: mvn v0.8b, v0.8b -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ugt: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ugt: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmge v0.4h, v1.4h, v0.4h ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ugt: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ugt <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_uge(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_uge: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: mvn v0.8b, v0.8b -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_uge: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_uge: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmgt v0.4h, v1.4h, v0.4h ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_uge: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp uge <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_ult(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ult: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: mvn v0.8b, v0.8b -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ult: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ult: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmge v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ult: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ult <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_ule(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ule: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: mvn v0.8b, v0.8b -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ule: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ule: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmgt v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ule: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ule <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_uno(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_uno: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v2.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v2.16b -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: mvn v0.8b, v0.8b -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_uno: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v2.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_uno: ; CHECK-FP16: // %bb.0: @@ -506,21 +721,32 @@ define <4 x i1> @test_fcmp_uno(<4 x half> %a, <4 x half> %b) #0 { ; CHECK-FP16-NEXT: orr v0.8b, v0.8b, v2.8b ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_uno: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmge v2.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp uno <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_one(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_one: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v2.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v2.16b -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_one: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_one: ; CHECK-FP16: // %bb.0: @@ -528,60 +754,94 @@ define <4 x i1> @test_fcmp_one(<4 x half> %a, <4 x half> %b) #0 { ; CHECK-FP16-NEXT: fcmgt v0.4h, v1.4h, v0.4h ; CHECK-FP16-NEXT: orr v0.8b, v0.8b, v2.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_one: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp one <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_oeq(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_oeq: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmeq v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_oeq: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_oeq: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmeq v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_oeq: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp oeq <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_ogt(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ogt: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ogt: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ogt: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmgt v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ogt: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ogt <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_oge(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_oge: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_oge: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_oge: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmge v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_oge: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp oge <4 x half> %a, %b ret <4 x i1> %1 @@ -624,15 +884,15 @@ define <4 x i1> @test_fcmp_ole(<4 x half> %a, <4 x half> %b) #0 { } define <4 x i1> @test_fcmp_ord(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ord: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v2.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v2.16b -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ord: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v2.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ord: ; CHECK-FP16: // %bb.0: @@ -640,6 +900,16 @@ define <4 x i1> @test_fcmp_ord(<4 x half> %a, <4 x half> %b) #0 { ; CHECK-FP16-NEXT: fcmgt v0.4h, v1.4h, v0.4h ; CHECK-FP16-NEXT: orr v0.8b, v0.8b, v2.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ord: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmge v2.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ord <4 x half> %a, %b ret <4 x i1> %1 diff --git a/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll b/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll index fcb42a7..86763eb 100644 --- a/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll +++ b/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll @@ -1,24 +1,38 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-CVT -; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16 +; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-CVT,CHECK-CVT-SD +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16,CHECK-FP16-SD +; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-CVT,CHECK-CVT-GI +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-FP16,CHECK-FP16-GI define <8 x half> @add_h(<8 x half> %a, <8 x half> %b) { -; CHECK-CVT-LABEL: add_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v2.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v3.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h -; CHECK-CVT-NEXT: fadd v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fadd v1.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: add_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v2.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v3.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-SD-NEXT: fadd v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fadd v1.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: add_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fadd v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: add_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fadd v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fadd v1.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fadd <8 x half> %a, %b ret <8 x half> %0 @@ -26,22 +40,34 @@ entry: define <8 x half> @sub_h(<8 x half> %a, <8 x half> %b) { -; CHECK-CVT-LABEL: sub_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v2.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v3.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h -; CHECK-CVT-NEXT: fsub v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fsub v1.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: sub_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v2.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v3.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-SD-NEXT: fsub v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fsub v1.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: sub_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fsub v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sub_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fsub v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fsub v1.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fsub <8 x half> %a, %b ret <8 x half> %0 @@ -49,22 +75,34 @@ entry: define <8 x half> @mul_h(<8 x half> %a, <8 x half> %b) { -; CHECK-CVT-LABEL: mul_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v2.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v3.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h -; CHECK-CVT-NEXT: fmul v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fmul v1.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: mul_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v2.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v3.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-SD-NEXT: fmul v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fmul v1.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: mul_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fmul v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: mul_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fmul v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fmul v1.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fmul <8 x half> %a, %b ret <8 x half> %0 @@ -72,22 +110,34 @@ entry: define <8 x half> @div_h(<8 x half> %a, <8 x half> %b) { -; CHECK-CVT-LABEL: div_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v2.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v3.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h -; CHECK-CVT-NEXT: fdiv v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fdiv v1.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: div_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v2.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v3.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-SD-NEXT: fdiv v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fdiv v1.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: div_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fdiv v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: div_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fdiv v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fdiv v1.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fdiv <8 x half> %a, %b ret <8 x half> %0 @@ -126,39 +176,171 @@ define <8 x half> @s_to_h(<8 x float> %a) { } define <8 x half> @d_to_h(<8 x double> %a) { -; CHECK-LABEL: d_to_h: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtxn v0.2s, v0.2d -; CHECK-NEXT: fcvtxn v2.2s, v2.2d -; CHECK-NEXT: fcvtxn2 v0.4s, v1.2d -; CHECK-NEXT: fcvtxn2 v2.4s, v3.2d -; CHECK-NEXT: fcvtn v0.4h, v0.4s -; CHECK-NEXT: fcvtn2 v0.8h, v2.4s -; CHECK-NEXT: ret +; CHECK-CVT-SD-LABEL: d_to_h: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtxn v0.2s, v0.2d +; CHECK-CVT-SD-NEXT: fcvtxn v2.2s, v2.2d +; CHECK-CVT-SD-NEXT: fcvtxn2 v0.4s, v1.2d +; CHECK-CVT-SD-NEXT: fcvtxn2 v2.4s, v3.2d +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: d_to_h: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: fcvtxn v0.2s, v0.2d +; CHECK-FP16-SD-NEXT: fcvtxn v2.2s, v2.2d +; CHECK-FP16-SD-NEXT: fcvtxn2 v0.4s, v1.2d +; CHECK-FP16-SD-NEXT: fcvtxn2 v2.4s, v3.2d +; CHECK-FP16-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-FP16-SD-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: d_to_h: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: mov d4, v0.d[1] +; CHECK-CVT-GI-NEXT: fcvt h0, d0 +; CHECK-CVT-GI-NEXT: mov d5, v1.d[1] +; CHECK-CVT-GI-NEXT: fcvt h1, d1 +; CHECK-CVT-GI-NEXT: fcvt h4, d4 +; CHECK-CVT-GI-NEXT: mov v0.h[1], v4.h[0] +; CHECK-CVT-GI-NEXT: fcvt h4, d5 +; CHECK-CVT-GI-NEXT: mov v0.h[2], v1.h[0] +; CHECK-CVT-GI-NEXT: mov d1, v2.d[1] +; CHECK-CVT-GI-NEXT: fcvt h2, d2 +; CHECK-CVT-GI-NEXT: mov v0.h[3], v4.h[0] +; CHECK-CVT-GI-NEXT: fcvt h1, d1 +; CHECK-CVT-GI-NEXT: mov v0.h[4], v2.h[0] +; CHECK-CVT-GI-NEXT: mov d2, v3.d[1] +; CHECK-CVT-GI-NEXT: fcvt h3, d3 +; CHECK-CVT-GI-NEXT: mov v0.h[5], v1.h[0] +; CHECK-CVT-GI-NEXT: fcvt h1, d2 +; CHECK-CVT-GI-NEXT: mov v0.h[6], v3.h[0] +; CHECK-CVT-GI-NEXT: mov v0.h[7], v1.h[0] +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: d_to_h: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: mov d4, v0.d[1] +; CHECK-FP16-GI-NEXT: fcvt h0, d0 +; CHECK-FP16-GI-NEXT: mov d5, v1.d[1] +; CHECK-FP16-GI-NEXT: fcvt h1, d1 +; CHECK-FP16-GI-NEXT: fcvt h4, d4 +; CHECK-FP16-GI-NEXT: mov v0.h[1], v4.h[0] +; CHECK-FP16-GI-NEXT: fcvt h4, d5 +; CHECK-FP16-GI-NEXT: mov v0.h[2], v1.h[0] +; CHECK-FP16-GI-NEXT: mov d1, v2.d[1] +; CHECK-FP16-GI-NEXT: fcvt h2, d2 +; CHECK-FP16-GI-NEXT: mov v0.h[3], v4.h[0] +; CHECK-FP16-GI-NEXT: fcvt h1, d1 +; CHECK-FP16-GI-NEXT: mov v0.h[4], v2.h[0] +; CHECK-FP16-GI-NEXT: mov d2, v3.d[1] +; CHECK-FP16-GI-NEXT: fcvt h3, d3 +; CHECK-FP16-GI-NEXT: mov v0.h[5], v1.h[0] +; CHECK-FP16-GI-NEXT: fcvt h1, d2 +; CHECK-FP16-GI-NEXT: mov v0.h[6], v3.h[0] +; CHECK-FP16-GI-NEXT: mov v0.h[7], v1.h[0] +; CHECK-FP16-GI-NEXT: ret %1 = fptrunc <8 x double> %a to <8 x half> ret <8 x half> %1 } define <8 x float> @h_to_s(<8 x half> %a) { -; CHECK-LABEL: h_to_s: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl2 v1.4s, v0.8h -; CHECK-NEXT: fcvtl v0.4s, v0.4h -; CHECK-NEXT: ret +; CHECK-CVT-SD-LABEL: h_to_s: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: h_to_s: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-FP16-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: h_to_s: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-CVT-GI-NEXT: mov v0.16b, v2.16b +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: h_to_s: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-FP16-GI-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-FP16-GI-NEXT: mov v0.16b, v2.16b +; CHECK-FP16-GI-NEXT: ret %1 = fpext <8 x half> %a to <8 x float> ret <8 x float> %1 } define <8 x double> @h_to_d(<8 x half> %a) { -; CHECK-LABEL: h_to_d: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl v1.4s, v0.4h -; CHECK-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-NEXT: fcvtl v0.2d, v1.2s -; CHECK-NEXT: fcvtl2 v3.2d, v2.4s -; CHECK-NEXT: fcvtl2 v1.2d, v1.4s -; CHECK-NEXT: fcvtl v2.2d, v2.2s -; CHECK-NEXT: ret +; CHECK-CVT-SD-LABEL: h_to_d: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.2d, v1.2s +; CHECK-CVT-SD-NEXT: fcvtl2 v3.2d, v2.4s +; CHECK-CVT-SD-NEXT: fcvtl2 v1.2d, v1.4s +; CHECK-CVT-SD-NEXT: fcvtl v2.2d, v2.2s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: h_to_d: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: fcvtl v1.4s, v0.4h +; CHECK-FP16-SD-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-FP16-SD-NEXT: fcvtl v0.2d, v1.2s +; CHECK-FP16-SD-NEXT: fcvtl2 v3.2d, v2.4s +; CHECK-FP16-SD-NEXT: fcvtl2 v1.2d, v1.4s +; CHECK-FP16-SD-NEXT: fcvtl v2.2d, v2.2s +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: h_to_d: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: mov h1, v0.h[1] +; CHECK-CVT-GI-NEXT: mov h2, v0.h[2] +; CHECK-CVT-GI-NEXT: mov h3, v0.h[3] +; CHECK-CVT-GI-NEXT: mov h4, v0.h[4] +; CHECK-CVT-GI-NEXT: mov h5, v0.h[5] +; CHECK-CVT-GI-NEXT: mov h6, v0.h[6] +; CHECK-CVT-GI-NEXT: mov h7, v0.h[7] +; CHECK-CVT-GI-NEXT: fcvt d0, h0 +; CHECK-CVT-GI-NEXT: fcvt d16, h1 +; CHECK-CVT-GI-NEXT: fcvt d1, h2 +; CHECK-CVT-GI-NEXT: fcvt d17, h3 +; CHECK-CVT-GI-NEXT: fcvt d2, h4 +; CHECK-CVT-GI-NEXT: fcvt d4, h5 +; CHECK-CVT-GI-NEXT: fcvt d3, h6 +; CHECK-CVT-GI-NEXT: fcvt d5, h7 +; CHECK-CVT-GI-NEXT: mov v0.d[1], v16.d[0] +; CHECK-CVT-GI-NEXT: mov v1.d[1], v17.d[0] +; CHECK-CVT-GI-NEXT: mov v2.d[1], v4.d[0] +; CHECK-CVT-GI-NEXT: mov v3.d[1], v5.d[0] +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: h_to_d: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: mov h1, v0.h[1] +; CHECK-FP16-GI-NEXT: mov h2, v0.h[2] +; CHECK-FP16-GI-NEXT: mov h3, v0.h[3] +; CHECK-FP16-GI-NEXT: mov h4, v0.h[4] +; CHECK-FP16-GI-NEXT: mov h5, v0.h[5] +; CHECK-FP16-GI-NEXT: mov h6, v0.h[6] +; CHECK-FP16-GI-NEXT: mov h7, v0.h[7] +; CHECK-FP16-GI-NEXT: fcvt d0, h0 +; CHECK-FP16-GI-NEXT: fcvt d16, h1 +; CHECK-FP16-GI-NEXT: fcvt d1, h2 +; CHECK-FP16-GI-NEXT: fcvt d17, h3 +; CHECK-FP16-GI-NEXT: fcvt d2, h4 +; CHECK-FP16-GI-NEXT: fcvt d4, h5 +; CHECK-FP16-GI-NEXT: fcvt d3, h6 +; CHECK-FP16-GI-NEXT: fcvt d5, h7 +; CHECK-FP16-GI-NEXT: mov v0.d[1], v16.d[0] +; CHECK-FP16-GI-NEXT: mov v1.d[1], v17.d[0] +; CHECK-FP16-GI-NEXT: mov v2.d[1], v4.d[0] +; CHECK-FP16-GI-NEXT: mov v3.d[1], v5.d[0] +; CHECK-FP16-GI-NEXT: ret %1 = fpext <8 x half> %a to <8 x double> ret <8 x double> %1 } @@ -183,14 +365,14 @@ define <8 x i16> @bitcast_h_to_i(float, <8 x half> %a) { } define <4 x half> @sitofp_v4i8(<4 x i8> %a) #0 { -; CHECK-CVT-LABEL: sitofp_v4i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: shl v0.4h, v0.4h, #8 -; CHECK-CVT-NEXT: sshr v0.4h, v0.4h, #8 -; CHECK-CVT-NEXT: sshll v0.4s, v0.4h, #0 -; CHECK-CVT-NEXT: scvtf v0.4s, v0.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: sitofp_v4i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: shl v0.4h, v0.4h, #8 +; CHECK-CVT-SD-NEXT: sshr v0.4h, v0.4h, #8 +; CHECK-CVT-SD-NEXT: sshll v0.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: scvtf v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: sitofp_v4i8: ; CHECK-FP16: // %bb.0: @@ -198,76 +380,132 @@ define <4 x half> @sitofp_v4i8(<4 x i8> %a) #0 { ; CHECK-FP16-NEXT: sshr v0.4h, v0.4h, #8 ; CHECK-FP16-NEXT: scvtf v0.4h, v0.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sitofp_v4i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: shl v0.4s, v0.4s, #24 +; CHECK-CVT-GI-NEXT: sshr v0.4s, v0.4s, #24 +; CHECK-CVT-GI-NEXT: scvtf v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = sitofp <4 x i8> %a to <4 x half> ret <4 x half> %1 } define <8 x half> @sitofp_v8i8(<8 x i8> %a) #0 { -; CHECK-CVT-LABEL: sitofp_v8i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: sshll v0.8h, v0.8b, #0 -; CHECK-CVT-NEXT: sshll v1.4s, v0.4h, #0 -; CHECK-CVT-NEXT: sshll2 v2.4s, v0.8h, #0 -; CHECK-CVT-NEXT: scvtf v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v1.4s -; CHECK-CVT-NEXT: scvtf v1.4s, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: sitofp_v8i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: sshll v0.8h, v0.8b, #0 +; CHECK-CVT-SD-NEXT: sshll v1.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: sshll2 v2.4s, v0.8h, #0 +; CHECK-CVT-SD-NEXT: scvtf v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-SD-NEXT: scvtf v1.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: sitofp_v8i8: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: sshll v0.8h, v0.8b, #0 ; CHECK-FP16-NEXT: scvtf v0.8h, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sitofp_v8i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: sshll v0.8h, v0.8b, #0 +; CHECK-CVT-GI-NEXT: sshll v1.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: sshll2 v0.4s, v0.8h, #0 +; CHECK-CVT-GI-NEXT: scvtf v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: scvtf v2.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-GI-NEXT: ret %1 = sitofp <8 x i8> %a to <8 x half> ret <8 x half> %1 } define <16 x half> @sitofp_v16i8(<16 x i8> %a) #0 { -; CHECK-CVT-LABEL: sitofp_v16i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: sshll2 v1.8h, v0.16b, #0 -; CHECK-CVT-NEXT: sshll v0.8h, v0.8b, #0 -; CHECK-CVT-NEXT: sshll v2.4s, v1.4h, #0 -; CHECK-CVT-NEXT: sshll v3.4s, v0.4h, #0 -; CHECK-CVT-NEXT: sshll2 v4.4s, v1.8h, #0 -; CHECK-CVT-NEXT: sshll2 v5.4s, v0.8h, #0 -; CHECK-CVT-NEXT: scvtf v2.4s, v2.4s -; CHECK-CVT-NEXT: scvtf v3.4s, v3.4s -; CHECK-CVT-NEXT: fcvtn v1.4h, v2.4s -; CHECK-CVT-NEXT: scvtf v2.4s, v4.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v3.4s -; CHECK-CVT-NEXT: scvtf v3.4s, v5.4s -; CHECK-CVT-NEXT: fcvtn2 v1.8h, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v3.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: sitofp_v16i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: sshll2 v1.8h, v0.16b, #0 -; CHECK-FP16-NEXT: sshll v0.8h, v0.8b, #0 -; CHECK-FP16-NEXT: scvtf v1.8h, v1.8h -; CHECK-FP16-NEXT: scvtf v0.8h, v0.8h -; CHECK-FP16-NEXT: ret +; CHECK-CVT-SD-LABEL: sitofp_v16i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: sshll2 v1.8h, v0.16b, #0 +; CHECK-CVT-SD-NEXT: sshll v0.8h, v0.8b, #0 +; CHECK-CVT-SD-NEXT: sshll v2.4s, v1.4h, #0 +; CHECK-CVT-SD-NEXT: sshll v3.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: sshll2 v4.4s, v1.8h, #0 +; CHECK-CVT-SD-NEXT: sshll2 v5.4s, v0.8h, #0 +; CHECK-CVT-SD-NEXT: scvtf v2.4s, v2.4s +; CHECK-CVT-SD-NEXT: scvtf v3.4s, v3.4s +; CHECK-CVT-SD-NEXT: fcvtn v1.4h, v2.4s +; CHECK-CVT-SD-NEXT: scvtf v2.4s, v4.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v3.4s +; CHECK-CVT-SD-NEXT: scvtf v3.4s, v5.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v1.8h, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v3.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: sitofp_v16i8: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: sshll2 v1.8h, v0.16b, #0 +; CHECK-FP16-SD-NEXT: sshll v0.8h, v0.8b, #0 +; CHECK-FP16-SD-NEXT: scvtf v1.8h, v1.8h +; CHECK-FP16-SD-NEXT: scvtf v0.8h, v0.8h +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sitofp_v16i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: sshll v1.8h, v0.8b, #0 +; CHECK-CVT-GI-NEXT: sshll2 v0.8h, v0.16b, #0 +; CHECK-CVT-GI-NEXT: sshll v2.4s, v1.4h, #0 +; CHECK-CVT-GI-NEXT: sshll v3.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: sshll2 v1.4s, v1.8h, #0 +; CHECK-CVT-GI-NEXT: sshll2 v0.4s, v0.8h, #0 +; CHECK-CVT-GI-NEXT: scvtf v2.4s, v2.4s +; CHECK-CVT-GI-NEXT: scvtf v3.4s, v3.4s +; CHECK-CVT-GI-NEXT: scvtf v4.4s, v1.4s +; CHECK-CVT-GI-NEXT: scvtf v5.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-GI-NEXT: fcvtn v1.4h, v3.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v4.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v1.8h, v5.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: sitofp_v16i8: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: sshll v1.8h, v0.8b, #0 +; CHECK-FP16-GI-NEXT: sshll2 v2.8h, v0.16b, #0 +; CHECK-FP16-GI-NEXT: scvtf v0.8h, v1.8h +; CHECK-FP16-GI-NEXT: scvtf v1.8h, v2.8h +; CHECK-FP16-GI-NEXT: ret %1 = sitofp <16 x i8> %a to <16 x half> ret <16 x half> %1 } define <8 x half> @sitofp_i16(<8 x i16> %a) #0 { -; CHECK-CVT-LABEL: sitofp_i16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: sshll v1.4s, v0.4h, #0 -; CHECK-CVT-NEXT: sshll2 v2.4s, v0.8h, #0 -; CHECK-CVT-NEXT: scvtf v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v1.4s -; CHECK-CVT-NEXT: scvtf v1.4s, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: sitofp_i16: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: sshll v1.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: sshll2 v2.4s, v0.8h, #0 +; CHECK-CVT-SD-NEXT: scvtf v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-SD-NEXT: scvtf v1.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: sitofp_i16: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: scvtf v0.8h, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sitofp_i16: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: sshll v1.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: sshll2 v0.4s, v0.8h, #0 +; CHECK-CVT-GI-NEXT: scvtf v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: scvtf v2.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-GI-NEXT: ret %1 = sitofp <8 x i16> %a to <8 x half> ret <8 x half> %1 } @@ -286,108 +524,213 @@ define <8 x half> @sitofp_i32(<8 x i32> %a) #0 { define <8 x half> @sitofp_i64(<8 x i64> %a) #0 { -; CHECK-LABEL: sitofp_i64: -; CHECK: // %bb.0: -; CHECK-NEXT: scvtf v0.2d, v0.2d -; CHECK-NEXT: scvtf v2.2d, v2.2d -; CHECK-NEXT: scvtf v1.2d, v1.2d -; CHECK-NEXT: scvtf v3.2d, v3.2d -; CHECK-NEXT: fcvtn v0.2s, v0.2d -; CHECK-NEXT: fcvtn v2.2s, v2.2d -; CHECK-NEXT: fcvtn2 v0.4s, v1.2d -; CHECK-NEXT: fcvtn2 v2.4s, v3.2d -; CHECK-NEXT: fcvtn v0.4h, v0.4s -; CHECK-NEXT: fcvtn2 v0.8h, v2.4s -; CHECK-NEXT: ret +; CHECK-CVT-SD-LABEL: sitofp_i64: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: scvtf v0.2d, v0.2d +; CHECK-CVT-SD-NEXT: scvtf v2.2d, v2.2d +; CHECK-CVT-SD-NEXT: scvtf v1.2d, v1.2d +; CHECK-CVT-SD-NEXT: scvtf v3.2d, v3.2d +; CHECK-CVT-SD-NEXT: fcvtn v0.2s, v0.2d +; CHECK-CVT-SD-NEXT: fcvtn v2.2s, v2.2d +; CHECK-CVT-SD-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-CVT-SD-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: sitofp_i64: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: scvtf v0.2d, v0.2d +; CHECK-FP16-SD-NEXT: scvtf v2.2d, v2.2d +; CHECK-FP16-SD-NEXT: scvtf v1.2d, v1.2d +; CHECK-FP16-SD-NEXT: scvtf v3.2d, v3.2d +; CHECK-FP16-SD-NEXT: fcvtn v0.2s, v0.2d +; CHECK-FP16-SD-NEXT: fcvtn v2.2s, v2.2d +; CHECK-FP16-SD-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-FP16-SD-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-FP16-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-FP16-SD-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sitofp_i64: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: scvtf v0.2d, v0.2d +; CHECK-CVT-GI-NEXT: scvtf v1.2d, v1.2d +; CHECK-CVT-GI-NEXT: scvtf v2.2d, v2.2d +; CHECK-CVT-GI-NEXT: scvtf v3.2d, v3.2d +; CHECK-CVT-GI-NEXT: fcvtn v0.2s, v0.2d +; CHECK-CVT-GI-NEXT: fcvtn v2.2s, v2.2d +; CHECK-CVT-GI-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-CVT-GI-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: sitofp_i64: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: scvtf v0.2d, v0.2d +; CHECK-FP16-GI-NEXT: scvtf v1.2d, v1.2d +; CHECK-FP16-GI-NEXT: scvtf v2.2d, v2.2d +; CHECK-FP16-GI-NEXT: scvtf v3.2d, v3.2d +; CHECK-FP16-GI-NEXT: fcvtn v0.2s, v0.2d +; CHECK-FP16-GI-NEXT: fcvtn v2.2s, v2.2d +; CHECK-FP16-GI-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-FP16-GI-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-FP16-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-FP16-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-FP16-GI-NEXT: ret %1 = sitofp <8 x i64> %a to <8 x half> ret <8 x half> %1 } define <4 x half> @uitofp_v4i8(<4 x i8> %a) #0 { -; CHECK-CVT-LABEL: uitofp_v4i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: bic v0.4h, #255, lsl #8 -; CHECK-CVT-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-CVT-NEXT: ucvtf v0.4s, v0.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: uitofp_v4i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: bic v0.4h, #255, lsl #8 -; CHECK-FP16-NEXT: ucvtf v0.4h, v0.4h -; CHECK-FP16-NEXT: ret +; CHECK-CVT-SD-LABEL: uitofp_v4i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: bic v0.4h, #255, lsl #8 +; CHECK-CVT-SD-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: ucvtf v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: uitofp_v4i8: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: bic v0.4h, #255, lsl #8 +; CHECK-FP16-SD-NEXT: ucvtf v0.4h, v0.4h +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: uitofp_v4i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: movi v1.2d, #0x0000ff000000ff +; CHECK-CVT-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-CVT-GI-NEXT: ucvtf v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: uitofp_v4i8: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: movi d1, #0xff00ff00ff00ff +; CHECK-FP16-GI-NEXT: and v0.8b, v0.8b, v1.8b +; CHECK-FP16-GI-NEXT: ucvtf v0.4h, v0.4h +; CHECK-FP16-GI-NEXT: ret %1 = uitofp <4 x i8> %a to <4 x half> ret <4 x half> %1 } define <8 x half> @uitofp_v8i8(<8 x i8> %a) #0 { -; CHECK-CVT-LABEL: uitofp_v8i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: ushll v0.8h, v0.8b, #0 -; CHECK-CVT-NEXT: ushll v1.4s, v0.4h, #0 -; CHECK-CVT-NEXT: ushll2 v2.4s, v0.8h, #0 -; CHECK-CVT-NEXT: ucvtf v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v1.4s -; CHECK-CVT-NEXT: ucvtf v1.4s, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: uitofp_v8i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: ushll v0.8h, v0.8b, #0 +; CHECK-CVT-SD-NEXT: ushll v1.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: ushll2 v2.4s, v0.8h, #0 +; CHECK-CVT-SD-NEXT: ucvtf v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-SD-NEXT: ucvtf v1.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: uitofp_v8i8: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: ushll v0.8h, v0.8b, #0 ; CHECK-FP16-NEXT: ucvtf v0.8h, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: uitofp_v8i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: ushll v0.8h, v0.8b, #0 +; CHECK-CVT-GI-NEXT: ushll v1.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: ushll2 v0.4s, v0.8h, #0 +; CHECK-CVT-GI-NEXT: ucvtf v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: ucvtf v2.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-GI-NEXT: ret %1 = uitofp <8 x i8> %a to <8 x half> ret <8 x half> %1 } define <16 x half> @uitofp_v16i8(<16 x i8> %a) #0 { -; CHECK-CVT-LABEL: uitofp_v16i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: ushll2 v1.8h, v0.16b, #0 -; CHECK-CVT-NEXT: ushll v0.8h, v0.8b, #0 -; CHECK-CVT-NEXT: ushll v2.4s, v1.4h, #0 -; CHECK-CVT-NEXT: ushll v3.4s, v0.4h, #0 -; CHECK-CVT-NEXT: ushll2 v4.4s, v1.8h, #0 -; CHECK-CVT-NEXT: ushll2 v5.4s, v0.8h, #0 -; CHECK-CVT-NEXT: ucvtf v2.4s, v2.4s -; CHECK-CVT-NEXT: ucvtf v3.4s, v3.4s -; CHECK-CVT-NEXT: fcvtn v1.4h, v2.4s -; CHECK-CVT-NEXT: ucvtf v2.4s, v4.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v3.4s -; CHECK-CVT-NEXT: ucvtf v3.4s, v5.4s -; CHECK-CVT-NEXT: fcvtn2 v1.8h, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v3.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: uitofp_v16i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: ushll2 v1.8h, v0.16b, #0 -; CHECK-FP16-NEXT: ushll v0.8h, v0.8b, #0 -; CHECK-FP16-NEXT: ucvtf v1.8h, v1.8h -; CHECK-FP16-NEXT: ucvtf v0.8h, v0.8h -; CHECK-FP16-NEXT: ret +; CHECK-CVT-SD-LABEL: uitofp_v16i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: ushll2 v1.8h, v0.16b, #0 +; CHECK-CVT-SD-NEXT: ushll v0.8h, v0.8b, #0 +; CHECK-CVT-SD-NEXT: ushll v2.4s, v1.4h, #0 +; CHECK-CVT-SD-NEXT: ushll v3.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: ushll2 v4.4s, v1.8h, #0 +; CHECK-CVT-SD-NEXT: ushll2 v5.4s, v0.8h, #0 +; CHECK-CVT-SD-NEXT: ucvtf v2.4s, v2.4s +; CHECK-CVT-SD-NEXT: ucvtf v3.4s, v3.4s +; CHECK-CVT-SD-NEXT: fcvtn v1.4h, v2.4s +; CHECK-CVT-SD-NEXT: ucvtf v2.4s, v4.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v3.4s +; CHECK-CVT-SD-NEXT: ucvtf v3.4s, v5.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v1.8h, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v3.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: uitofp_v16i8: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: ushll2 v1.8h, v0.16b, #0 +; CHECK-FP16-SD-NEXT: ushll v0.8h, v0.8b, #0 +; CHECK-FP16-SD-NEXT: ucvtf v1.8h, v1.8h +; CHECK-FP16-SD-NEXT: ucvtf v0.8h, v0.8h +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: uitofp_v16i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: ushll v1.8h, v0.8b, #0 +; CHECK-CVT-GI-NEXT: ushll2 v0.8h, v0.16b, #0 +; CHECK-CVT-GI-NEXT: ushll v2.4s, v1.4h, #0 +; CHECK-CVT-GI-NEXT: ushll v3.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: ushll2 v1.4s, v1.8h, #0 +; CHECK-CVT-GI-NEXT: ushll2 v0.4s, v0.8h, #0 +; CHECK-CVT-GI-NEXT: ucvtf v2.4s, v2.4s +; CHECK-CVT-GI-NEXT: ucvtf v3.4s, v3.4s +; CHECK-CVT-GI-NEXT: ucvtf v4.4s, v1.4s +; CHECK-CVT-GI-NEXT: ucvtf v5.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-GI-NEXT: fcvtn v1.4h, v3.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v4.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v1.8h, v5.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: uitofp_v16i8: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: ushll v1.8h, v0.8b, #0 +; CHECK-FP16-GI-NEXT: ushll2 v2.8h, v0.16b, #0 +; CHECK-FP16-GI-NEXT: ucvtf v0.8h, v1.8h +; CHECK-FP16-GI-NEXT: ucvtf v1.8h, v2.8h +; CHECK-FP16-GI-NEXT: ret %1 = uitofp <16 x i8> %a to <16 x half> ret <16 x half> %1 } define <8 x half> @uitofp_i16(<8 x i16> %a) #0 { -; CHECK-CVT-LABEL: uitofp_i16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: ushll v1.4s, v0.4h, #0 -; CHECK-CVT-NEXT: ushll2 v2.4s, v0.8h, #0 -; CHECK-CVT-NEXT: ucvtf v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v1.4s -; CHECK-CVT-NEXT: ucvtf v1.4s, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: uitofp_i16: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: ushll v1.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: ushll2 v2.4s, v0.8h, #0 +; CHECK-CVT-SD-NEXT: ucvtf v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-SD-NEXT: ucvtf v1.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: uitofp_i16: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: ucvtf v0.8h, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: uitofp_i16: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: ushll v1.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: ushll2 v0.4s, v0.8h, #0 +; CHECK-CVT-GI-NEXT: ucvtf v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: ucvtf v2.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-GI-NEXT: ret %1 = uitofp <8 x i16> %a to <8 x half> ret <8 x half> %1 } @@ -407,19 +750,61 @@ define <8 x half> @uitofp_i32(<8 x i32> %a) #0 { define <8 x half> @uitofp_i64(<8 x i64> %a) #0 { -; CHECK-LABEL: uitofp_i64: -; CHECK: // %bb.0: -; CHECK-NEXT: ucvtf v0.2d, v0.2d -; CHECK-NEXT: ucvtf v2.2d, v2.2d -; CHECK-NEXT: ucvtf v1.2d, v1.2d -; CHECK-NEXT: ucvtf v3.2d, v3.2d -; CHECK-NEXT: fcvtn v0.2s, v0.2d -; CHECK-NEXT: fcvtn v2.2s, v2.2d -; CHECK-NEXT: fcvtn2 v0.4s, v1.2d -; CHECK-NEXT: fcvtn2 v2.4s, v3.2d -; CHECK-NEXT: fcvtn v0.4h, v0.4s -; CHECK-NEXT: fcvtn2 v0.8h, v2.4s -; CHECK-NEXT: ret +; CHECK-CVT-SD-LABEL: uitofp_i64: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: ucvtf v0.2d, v0.2d +; CHECK-CVT-SD-NEXT: ucvtf v2.2d, v2.2d +; CHECK-CVT-SD-NEXT: ucvtf v1.2d, v1.2d +; CHECK-CVT-SD-NEXT: ucvtf v3.2d, v3.2d +; CHECK-CVT-SD-NEXT: fcvtn v0.2s, v0.2d +; CHECK-CVT-SD-NEXT: fcvtn v2.2s, v2.2d +; CHECK-CVT-SD-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-CVT-SD-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: uitofp_i64: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: ucvtf v0.2d, v0.2d +; CHECK-FP16-SD-NEXT: ucvtf v2.2d, v2.2d +; CHECK-FP16-SD-NEXT: ucvtf v1.2d, v1.2d +; CHECK-FP16-SD-NEXT: ucvtf v3.2d, v3.2d +; CHECK-FP16-SD-NEXT: fcvtn v0.2s, v0.2d +; CHECK-FP16-SD-NEXT: fcvtn v2.2s, v2.2d +; CHECK-FP16-SD-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-FP16-SD-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-FP16-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-FP16-SD-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: uitofp_i64: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: ucvtf v0.2d, v0.2d +; CHECK-CVT-GI-NEXT: ucvtf v1.2d, v1.2d +; CHECK-CVT-GI-NEXT: ucvtf v2.2d, v2.2d +; CHECK-CVT-GI-NEXT: ucvtf v3.2d, v3.2d +; CHECK-CVT-GI-NEXT: fcvtn v0.2s, v0.2d +; CHECK-CVT-GI-NEXT: fcvtn v2.2s, v2.2d +; CHECK-CVT-GI-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-CVT-GI-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: uitofp_i64: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: ucvtf v0.2d, v0.2d +; CHECK-FP16-GI-NEXT: ucvtf v1.2d, v1.2d +; CHECK-FP16-GI-NEXT: ucvtf v2.2d, v2.2d +; CHECK-FP16-GI-NEXT: ucvtf v3.2d, v3.2d +; CHECK-FP16-GI-NEXT: fcvtn v0.2s, v0.2d +; CHECK-FP16-GI-NEXT: fcvtn v2.2s, v2.2d +; CHECK-FP16-GI-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-FP16-GI-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-FP16-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-FP16-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-FP16-GI-NEXT: ret %1 = uitofp <8 x i64> %a to <8 x half> ret <8 x half> %1 } @@ -436,94 +821,132 @@ define void @test_insert_at_zero(half %a, ptr %b) #0 { } define <8 x i8> @fptosi_i8(<8 x half> %a) #0 { -; CHECK-CVT-LABEL: fptosi_i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtzs v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: fptosi_i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: fptosi_i8: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcvtzs v0.8h, v0.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: fptosi_i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fptosi<8 x half> %a to <8 x i8> ret <8 x i8> %1 } define <8 x i16> @fptosi_i16(<8 x half> %a) #0 { -; CHECK-CVT-LABEL: fptosi_i16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtzs v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: fptosi_i16: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: fptosi_i16: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcvtzs v0.8h, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: fptosi_i16: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fptosi<8 x half> %a to <8 x i16> ret <8 x i16> %1 } define <8 x i8> @fptoui_i8(<8 x half> %a) #0 { -; CHECK-CVT-LABEL: fptoui_i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtzu v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: fptoui_i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: fptoui_i8: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcvtzu v0.8h, v0.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: fptoui_i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fptoui<8 x half> %a to <8 x i8> ret <8 x i8> %1 } define <8 x i16> @fptoui_i16(<8 x half> %a) #0 { -; CHECK-CVT-LABEL: fptoui_i16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtzu v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: fptoui_i16: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: fptoui_i16: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcvtzu v0.8h, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: fptoui_i16: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fptoui<8 x half> %a to <8 x i16> ret <8 x i16> %1 } define <8 x i1> @test_fcmp_une(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_une: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmeq v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmeq v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_une: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmeq v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_une: ; CHECK-FP16: // %bb.0: @@ -531,27 +954,41 @@ define <8 x i1> @test_fcmp_une(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_une: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmeq v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp une <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ueq(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ueq: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v4.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v2.4s, v2.4s, v3.4s -; CHECK-CVT-NEXT: fcmgt v3.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v1.16b, v2.16b, v4.16b -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v3.16b -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ueq: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v4.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v2.4s, v3.4s +; CHECK-CVT-SD-NEXT: fcmgt v3.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ueq: ; CHECK-FP16: // %bb.0: @@ -561,23 +998,41 @@ define <8 x i1> @test_fcmp_ueq(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ueq: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmgt v4.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmgt v3.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-GI-NEXT: mvn v1.16b, v1.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ueq <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ugt(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ugt: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcmge v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmge v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ugt: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcmge v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ugt: ; CHECK-FP16: // %bb.0: @@ -585,23 +1040,37 @@ define <8 x i1> @test_fcmp_ugt(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ugt: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmge v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ugt <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_uge(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_uge: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcmgt v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_uge: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_uge: ; CHECK-FP16: // %bb.0: @@ -609,23 +1078,37 @@ define <8 x i1> @test_fcmp_uge(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_uge: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp uge <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ult(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ult: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmge v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ult: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ult: ; CHECK-FP16: // %bb.0: @@ -633,23 +1116,37 @@ define <8 x i1> @test_fcmp_ult(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ult: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmge v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ult <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ule(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ule: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ule: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ule: ; CHECK-FP16: // %bb.0: @@ -657,27 +1154,41 @@ define <8 x i1> @test_fcmp_ule(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ule: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ule <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_uno(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_uno: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v4.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v2.4s, v2.4s, v3.4s -; CHECK-CVT-NEXT: fcmge v3.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v1.16b, v2.16b, v4.16b -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v3.16b -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_uno: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v4.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v2.4s, v3.4s +; CHECK-CVT-SD-NEXT: fcmge v3.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_uno: ; CHECK-FP16: // %bb.0: @@ -687,26 +1198,44 @@ define <8 x i1> @test_fcmp_uno(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_uno: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmge v4.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmge v3.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-GI-NEXT: mvn v1.16b, v1.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp uno <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_one(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_one: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v4.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v2.4s, v2.4s, v3.4s -; CHECK-CVT-NEXT: fcmgt v3.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v1.16b, v2.16b, v4.16b -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v3.16b -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_one: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v4.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v2.4s, v3.4s +; CHECK-CVT-SD-NEXT: fcmgt v3.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_one: ; CHECK-FP16: // %bb.0: @@ -715,136 +1244,212 @@ define <8 x i1> @test_fcmp_one(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: orr v0.16b, v0.16b, v2.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_one: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmgt v4.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmgt v3.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp one <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_oeq(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_oeq: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmeq v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmeq v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_oeq: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmeq v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_oeq: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmeq v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_oeq: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmeq v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp oeq <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ogt(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ogt: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ogt: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ogt: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmgt v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ogt: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ogt <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_oge(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_oge: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmge v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_oge: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_oge: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmge v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_oge: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmge v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp oge <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_olt(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_olt: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcmgt v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_olt: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_olt: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmgt v0.8h, v1.8h, v0.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_olt: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp olt <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ole(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ole: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcmge v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmge v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ole: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcmge v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ole: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmge v0.8h, v1.8h, v0.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ole: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmge v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ole <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ord(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ord: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v4.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v2.4s, v2.4s, v3.4s -; CHECK-CVT-NEXT: fcmge v3.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v1.16b, v2.16b, v4.16b -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v3.16b -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ord: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v4.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v2.4s, v3.4s +; CHECK-CVT-SD-NEXT: fcmge v3.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ord: ; CHECK-FP16: // %bb.0: @@ -853,8 +1458,27 @@ define <8 x i1> @test_fcmp_ord(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: orr v0.16b, v0.16b, v2.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ord: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmge v4.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmge v3.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ord <8 x half> %a, %b ret <8 x i1> %1 } attributes #0 = { nounwind } + +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK-CVT: {{.*}} diff --git a/llvm/test/CodeGen/AArch64/icmp.ll b/llvm/test/CodeGen/AArch64/icmp.ll index 18665bc..7195e2b 100644 --- a/llvm/test/CodeGen/AArch64/icmp.ll +++ b/llvm/test/CodeGen/AArch64/icmp.ll @@ -2093,3 +2093,54 @@ define <2 x i1> @icmp_slt_v2i64_Zero_LHS(<2 x i64> %a) { %c = icmp slt <2 x i64> <i64 0, i64 0>, %a ret <2 x i1> %c } + +; Test TST optimization for i8 sign bit testing with cross-type select +; This tests the pattern: icmp slt i8 %val, 0; select i1 %cmp, i32 %a, i32 %b +; The optimization should convert sxtb+cmp to tst for sign bit testing. + +define i32 @i8_signbit_tst_constants(i8 %x, i8 %y) { +; CHECK-SD-LABEL: i8_signbit_tst_constants: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: add w9, w0, w1 +; CHECK-SD-NEXT: mov w8, #42 // =0x2a +; CHECK-SD-NEXT: tst w9, #0x80 +; CHECK-SD-NEXT: mov w9, #20894 // =0x519e +; CHECK-SD-NEXT: csel w0, w9, w8, ne +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: i8_signbit_tst_constants: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: add w8, w0, w1 +; CHECK-GI-NEXT: mov w9, #42 // =0x2a +; CHECK-GI-NEXT: mov w10, #20894 // =0x519e +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: cmp w8, #0 +; CHECK-GI-NEXT: csel w0, w10, w9, mi +; CHECK-GI-NEXT: ret + %add = add i8 %x, %y + %cmp = icmp slt i8 %add, 0 + %sel = select i1 %cmp, i32 20894, i32 42 + ret i32 %sel +} + +; Test i8 sign bit testing with variable select values (problematic case) +define i32 @i8_signbit_variables(i8 %x, i8 %y, i32 %a, i32 %b) { +; CHECK-SD-LABEL: i8_signbit_variables: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: add w8, w0, w1 +; CHECK-SD-NEXT: tst w8, #0x80 +; CHECK-SD-NEXT: csel w0, w2, w3, ne +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: i8_signbit_variables: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: add w8, w0, w1 +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: cmp w8, #0 +; CHECK-GI-NEXT: csel w0, w2, w3, mi +; CHECK-GI-NEXT: ret + %add = add i8 %x, %y + %cmp = icmp slt i8 %add, 0 + %sel = select i1 %cmp, i32 %a, i32 %b + ret i32 %sel +} diff --git a/llvm/test/CodeGen/AArch64/isinf.ll b/llvm/test/CodeGen/AArch64/isinf.ll index e68539b..e8bbaf9 100644 --- a/llvm/test/CodeGen/AArch64/isinf.ll +++ b/llvm/test/CodeGen/AArch64/isinf.ll @@ -27,9 +27,8 @@ define i32 @replace_isinf_call_f32(float %x) { ; CHECK-LABEL: replace_isinf_call_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: mov w8, #2139095040 // =0x7f800000 -; CHECK-NEXT: and w9, w9, #0x7fffffff -; CHECK-NEXT: cmp w9, w8 +; CHECK-NEXT: mov w8, #-16777216 // =0xff000000 +; CHECK-NEXT: cmp w8, w9, lsl #1 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ret %abs = tail call float @llvm.fabs.f32(float %x) @@ -43,9 +42,8 @@ define i32 @replace_isinf_call_f64(double %x) { ; CHECK-LABEL: replace_isinf_call_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fmov x9, d0 -; CHECK-NEXT: mov x8, #9218868437227405312 // =0x7ff0000000000000 -; CHECK-NEXT: and x9, x9, #0x7fffffffffffffff -; CHECK-NEXT: cmp x9, x8 +; CHECK-NEXT: mov x8, #-9007199254740992 // =0xffe0000000000000 +; CHECK-NEXT: cmp x8, x9, lsl #1 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ret %abs = tail call double @llvm.fabs.f64(double %x) diff --git a/llvm/test/CodeGen/AArch64/masked-integer-compare.ll b/llvm/test/CodeGen/AArch64/masked-integer-compare.ll new file mode 100644 index 0000000..363cd10 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/masked-integer-compare.ll @@ -0,0 +1,178 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=aarch64-none-linux-gnu < %s -o -| FileCheck %s + +; Test code generation support for SUBS (shifted register) from masked integer +; compare sequences. These sequences appear in isinf tests, for example. + +define i1 @combine_masked_i32(i32 %x) { +; CHECK-LABEL: combine_masked_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #-16777216 // =0xff000000 +; CHECK-NEXT: cmp w8, w0, lsl #1 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: ret + %and = and i32 %x, u0x7fffffff + %sub = sub i32 %and, u0x7f800000 + %cmp = icmp eq i32 %sub, 0 + ret i1 %cmp +} + +define i1 @combine_masked_i64(i64 %x) { +; CHECK-LABEL: combine_masked_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov x8, #-9007199254740992 // =0xffe0000000000000 +; CHECK-NEXT: cmp x8, x0, lsl #1 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: ret + %and = and i64 %x, u0x7fffffffffffffff + %sub = sub i64 %and, u0x7ff0000000000000 + %cmp = icmp eq i64 %sub, 0 + ret i1 %cmp +} + +define i1 @combine_masked_ne(i32 %x) { +; CHECK-LABEL: combine_masked_ne: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #-16777216 // =0xff000000 +; CHECK-NEXT: cmp w8, w0, lsl #1 +; CHECK-NEXT: cset w0, ne +; CHECK-NEXT: ret + %and = and i32 %x, u0x7fffffff + %cmp = icmp ne i32 %and, u0x7f800000 + ret i1 %cmp +} + +define i1 @combine_masked_lsl4(i32 %x) { +; CHECK-LABEL: combine_masked_lsl4: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #-134217728 // =0xf8000000 +; CHECK-NEXT: cmp w8, w0, lsl #4 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: ret + %and = and i32 %x, u0x0fffffff + %cmp = icmp eq i32 %and, u0x0f800000 + ret i1 %cmp +} + +define i1 @dont_combine_not_mask(i32 %x) { +; CHECK-LABEL: dont_combine_not_mask: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #2139095040 // =0x7f800000 +; CHECK-NEXT: and w9, w0, #0x7ffffffe +; CHECK-NEXT: cmp w9, w8 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: ret + %and = and i32 %x, u0x7ffffffe + %cmp = icmp eq i32 %and, u0x7f800000 + ret i1 %cmp +} + +define i1 @dont_combine_cmp_not_masked(i32 %x) { +; CHECK-LABEL: dont_combine_cmp_not_masked: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #2139095040 // =0x7f800000 +; CHECK-NEXT: and w9, w0, #0x3fffffff +; CHECK-NEXT: cmp w9, w8 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: ret + %and = and i32 %x, u0x3fffffff + %cmp = icmp eq i32 %and, u0x7f800000 + ret i1 %cmp +} + +define i1 @dont_combine_not_constant_mask(i32 %x, i32 %m) { +; CHECK-LABEL: dont_combine_not_constant_mask: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #2139095040 // =0x7f800000 +; CHECK-NEXT: and w9, w0, w1 +; CHECK-NEXT: cmp w9, w8 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: ret + %and = and i32 %x, %m + %cmp = icmp eq i32 %and, u0x7f800000 + ret i1 %cmp +} + +define i1 @dont_combine_not_constant_cmp(i32 %x, i32 %c) { +; CHECK-LABEL: dont_combine_not_constant_cmp: +; CHECK: // %bb.0: +; CHECK-NEXT: and w8, w0, #0xfffffff +; CHECK-NEXT: cmp w8, w1 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: ret + %and = and i32 %x, u0x0fffffff + %cmp = icmp eq i32 %and, %c + ret i1 %cmp +} + +define i1 @dont_combine_subs_imm(i32 %x) { +; CHECK-LABEL: dont_combine_subs_imm: +; CHECK: // %bb.0: +; CHECK-NEXT: and w8, w0, #0x7fffffff +; CHECK-NEXT: cmp w8, #291 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: ret + %and = and i32 %x, u0x7fffffff + %cmp = icmp eq i32 %and, u0x123 + ret i1 %cmp +} + +define i1 @dont_combine_subs_imm_lsl12(i32 %x) { +; CHECK-LABEL: dont_combine_subs_imm_lsl12: +; CHECK: // %bb.0: +; CHECK-NEXT: and w8, w0, #0x7fffffff +; CHECK-NEXT: cmp w8, #291, lsl #12 // =1191936 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: ret + %and = and i32 %x, u0x7fffffff + %cmp = icmp eq i32 %and, u0x123000 + ret i1 %cmp +} + +define { i1, i1 } @dont_combine_multi_use_cmp(i32 %x) { +; CHECK-LABEL: dont_combine_multi_use_cmp: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #2139095040 // =0x7f800000 +; CHECK-NEXT: and w9, w0, #0x7fffffff +; CHECK-NEXT: cmp w9, w8 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: cset w1, lt +; CHECK-NEXT: ret + %and = and i32 %x, u0x7fffffff + %eq = icmp eq i32 %and, u0x7f800000 + %lt = icmp slt i32 %and, u0x7f800000 + %r1 = insertvalue { i1, i1 } poison, i1 %eq, 0 + %r2 = insertvalue { i1, i1 } %r1, i1 %lt, 1 + ret { i1, i1 } %r2 +} + +define { i32, i1 } @dont_combine_multi_use_sub(i32 %x) { +; CHECK-LABEL: dont_combine_multi_use_sub: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #-2139095040 // =0x80800000 +; CHECK-NEXT: and w9, w0, #0x7fffffff +; CHECK-NEXT: adds w0, w9, w8 +; CHECK-NEXT: cset w1, eq +; CHECK-NEXT: ret + %and = and i32 %x, u0x7fffffff + %sub = sub i32 %and, u0x7f800000 + %cmp = icmp eq i32 %sub, 0 + %r1 = insertvalue { i32, i1 } poison, i32 %sub, 0 + %r2 = insertvalue { i32, i1 } %r1, i1 %cmp, 1 + ret { i32, i1 } %r2 +} + +define { i32, i1 } @dont_combine_multi_use_and(i32 %x) { +; CHECK-LABEL: dont_combine_multi_use_and: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #2139095040 // =0x7f800000 +; CHECK-NEXT: and w0, w0, #0x7fffffff +; CHECK-NEXT: cmp w0, w8 +; CHECK-NEXT: cset w1, eq +; CHECK-NEXT: ret + %and = and i32 %x, u0x7fffffff + %cmp = icmp eq i32 %and, u0x7f800000 + %r1 = insertvalue { i32, i1 } poison, i32 %and, 0 + %r2 = insertvalue { i32, i1 } %r1, i1 %cmp, 1 + ret { i32, i1 } %r2 +} diff --git a/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll b/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll index fc43c71..b6dee97e 100644 --- a/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll +++ b/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme -aarch64-new-sme-abi -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -aarch64-new-sme-abi -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-SDAG ; A simple EH test case that corresponds to the following C++ source: ; @@ -87,6 +88,90 @@ define void @za_with_raii(i1 %fail) "aarch64_inout_za" personality ptr @__gxx_pe ; CHECK-NEXT: mov x0, x19 ; CHECK-NEXT: msr TPIDR2_EL0, x8 ; CHECK-NEXT: bl _Unwind_Resume +; +; CHECK-SDAG-LABEL: za_with_raii: +; CHECK-SDAG: .Lfunc_begin0: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception0 +; CHECK-SDAG-NEXT: // %bb.0: +; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-SDAG-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-SDAG-NEXT: mov x29, sp +; CHECK-SDAG-NEXT: sub sp, sp, #16 +; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32 +; CHECK-SDAG-NEXT: .cfi_offset w19, -8 +; CHECK-SDAG-NEXT: .cfi_offset w20, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -24 +; CHECK-SDAG-NEXT: .cfi_offset w29, -32 +; CHECK-SDAG-NEXT: rdsvl x8, #1 +; CHECK-SDAG-NEXT: mov x9, sp +; CHECK-SDAG-NEXT: msub x9, x8, x8, x9 +; CHECK-SDAG-NEXT: mov sp, x9 +; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16] +; CHECK-SDAG-NEXT: tbnz w0, #0, .LBB0_2 +; CHECK-SDAG-NEXT: // %bb.1: // %return_normally +; CHECK-SDAG-NEXT: mov sp, x29 +; CHECK-SDAG-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-SDAG-NEXT: b shared_za_call +; CHECK-SDAG-NEXT: .LBB0_2: // %throw_exception +; CHECK-SDAG-NEXT: sub x20, x29, #16 +; CHECK-SDAG-NEXT: mov w0, #8 // =0x8 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x20 +; CHECK-SDAG-NEXT: bl __cxa_allocate_exception +; CHECK-SDAG-NEXT: mov x8, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x9, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x9, .LBB0_4 +; CHECK-SDAG-NEXT: // %bb.3: // %throw_exception +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB0_4: // %throw_exception +; CHECK-SDAG-NEXT: adrp x9, .L.str +; CHECK-SDAG-NEXT: add x9, x9, :lo12:.L.str +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: str x9, [x8] +; CHECK-SDAG-NEXT: .Ltmp0: // EH_LABEL +; CHECK-SDAG-NEXT: adrp x1, :got:typeinfo_for_char_const_ptr +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x20 +; CHECK-SDAG-NEXT: mov x0, x8 +; CHECK-SDAG-NEXT: ldr x1, [x1, :got_lo12:typeinfo_for_char_const_ptr] +; CHECK-SDAG-NEXT: mov x2, xzr +; CHECK-SDAG-NEXT: bl __cxa_throw +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB0_6 +; CHECK-SDAG-NEXT: // %bb.5: // %throw_exception +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB0_6: // %throw_exception +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: .Ltmp1: // EH_LABEL +; CHECK-SDAG-NEXT: // %bb.7: // %throw_fail +; CHECK-SDAG-NEXT: .LBB0_8: // %unwind_dtors +; CHECK-SDAG-NEXT: .Ltmp2: // EH_LABEL +; CHECK-SDAG-NEXT: mov x19, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB0_10 +; CHECK-SDAG-NEXT: // %bb.9: // %unwind_dtors +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB0_10: // %unwind_dtors +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: bl shared_za_call +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x20 +; CHECK-SDAG-NEXT: bl _Unwind_Resume +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB0_12 +; CHECK-SDAG-NEXT: // %bb.11: // %unwind_dtors +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB0_12: // %unwind_dtors +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr br i1 %fail, label %throw_exception, label %return_normally throw_exception: @@ -124,7 +209,7 @@ throw_fail: ; } ; shared_za_call(); ; } -define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_personality_v0 { +define void @try_catch() "aarch64_inout_za" personality ptr @__gxx_personality_v0 { ; CHECK-LABEL: try_catch: ; CHECK: .Lfunc_begin1: ; CHECK-NEXT: .cfi_startproc @@ -142,11 +227,11 @@ define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_per ; CHECK-NEXT: msub x9, x8, x8, x9 ; CHECK-NEXT: mov sp, x9 ; CHECK-NEXT: stp x9, x8, [x29, #-16] -; CHECK-NEXT: .Ltmp3: +; CHECK-NEXT: .Ltmp3: // EH_LABEL ; CHECK-NEXT: sub x8, x29, #16 ; CHECK-NEXT: msr TPIDR2_EL0, x8 ; CHECK-NEXT: bl may_throw -; CHECK-NEXT: .Ltmp4: +; CHECK-NEXT: .Ltmp4: // EH_LABEL ; CHECK-NEXT: .LBB1_1: // %after_catch ; CHECK-NEXT: smstart za ; CHECK-NEXT: mrs x8, TPIDR2_EL0 @@ -160,7 +245,7 @@ define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_per ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload ; CHECK-NEXT: b shared_za_call ; CHECK-NEXT: .LBB1_4: // %catch -; CHECK-NEXT: .Ltmp5: +; CHECK-NEXT: .Ltmp5: // EH_LABEL ; CHECK-NEXT: bl __cxa_begin_catch ; CHECK-NEXT: smstart za ; CHECK-NEXT: mrs x8, TPIDR2_EL0 @@ -175,6 +260,78 @@ define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_per ; CHECK-NEXT: msr TPIDR2_EL0, x8 ; CHECK-NEXT: bl __cxa_end_catch ; CHECK-NEXT: b .LBB1_1 +; +; CHECK-SDAG-LABEL: try_catch: +; CHECK-SDAG: .Lfunc_begin1: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception1 +; CHECK-SDAG-NEXT: // %bb.0: +; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill +; CHECK-SDAG-NEXT: mov x29, sp +; CHECK-SDAG-NEXT: sub sp, sp, #16 +; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32 +; CHECK-SDAG-NEXT: .cfi_offset w19, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -24 +; CHECK-SDAG-NEXT: .cfi_offset w29, -32 +; CHECK-SDAG-NEXT: rdsvl x8, #1 +; CHECK-SDAG-NEXT: mov x9, sp +; CHECK-SDAG-NEXT: msub x9, x8, x8, x9 +; CHECK-SDAG-NEXT: mov sp, x9 +; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16] +; CHECK-SDAG-NEXT: .Ltmp3: // EH_LABEL +; CHECK-SDAG-NEXT: sub x19, x29, #16 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl may_throw +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB1_2 +; CHECK-SDAG-NEXT: // %bb.1: +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB1_2: +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: .Ltmp4: // EH_LABEL +; CHECK-SDAG-NEXT: .LBB1_3: // %after_catch +; CHECK-SDAG-NEXT: mov sp, x29 +; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload +; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-SDAG-NEXT: b shared_za_call +; CHECK-SDAG-NEXT: .LBB1_4: // %catch +; CHECK-SDAG-NEXT: .Ltmp5: // EH_LABEL +; CHECK-SDAG-NEXT: mov x1, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB1_6 +; CHECK-SDAG-NEXT: // %bb.5: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB1_6: // %catch +; CHECK-SDAG-NEXT: mov x0, x1 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl __cxa_begin_catch +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB1_8 +; CHECK-SDAG-NEXT: // %bb.7: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB1_8: // %catch +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: bl shared_za_call +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl __cxa_end_catch +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB1_10 +; CHECK-SDAG-NEXT: // %bb.9: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB1_10: // %catch +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: b .LBB1_3 invoke void @may_throw() to label %after_catch unwind label %catch @@ -235,16 +392,16 @@ define void @try_catch_shared_za_callee() "aarch64_new_za" personality ptr @__gx ; CHECK-NEXT: zero {za} ; CHECK-NEXT: .LBB2_2: ; CHECK-NEXT: smstart za -; CHECK-NEXT: .Ltmp6: +; CHECK-NEXT: .Ltmp6: // EH_LABEL ; CHECK-NEXT: bl shared_za_call -; CHECK-NEXT: .Ltmp7: +; CHECK-NEXT: .Ltmp7: // EH_LABEL ; CHECK-NEXT: .LBB2_3: // %exit ; CHECK-NEXT: smstop za ; CHECK-NEXT: mov sp, x29 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB2_4: // %catch -; CHECK-NEXT: .Ltmp8: +; CHECK-NEXT: .Ltmp8: // EH_LABEL ; CHECK-NEXT: bl __cxa_begin_catch ; CHECK-NEXT: smstart za ; CHECK-NEXT: mrs x8, TPIDR2_EL0 @@ -260,6 +417,78 @@ define void @try_catch_shared_za_callee() "aarch64_new_za" personality ptr @__gx ; CHECK-NEXT: bl __cxa_end_catch ; CHECK-NEXT: msr TPIDR2_EL0, xzr ; CHECK-NEXT: b .LBB2_3 +; +; CHECK-SDAG-LABEL: try_catch_shared_za_callee: +; CHECK-SDAG: .Lfunc_begin2: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception2 +; CHECK-SDAG-NEXT: // %bb.0: // %prelude +; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill +; CHECK-SDAG-NEXT: mov x29, sp +; CHECK-SDAG-NEXT: sub sp, sp, #16 +; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32 +; CHECK-SDAG-NEXT: .cfi_offset w19, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -24 +; CHECK-SDAG-NEXT: .cfi_offset w29, -32 +; CHECK-SDAG-NEXT: rdsvl x8, #1 +; CHECK-SDAG-NEXT: mov x9, sp +; CHECK-SDAG-NEXT: msub x9, x8, x8, x9 +; CHECK-SDAG-NEXT: mov sp, x9 +; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16] +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: cbz x8, .LBB2_2 +; CHECK-SDAG-NEXT: // %bb.1: // %save.za +; CHECK-SDAG-NEXT: bl __arm_tpidr2_save +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: .LBB2_2: +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: zero {za} +; CHECK-SDAG-NEXT: .Ltmp6: // EH_LABEL +; CHECK-SDAG-NEXT: bl shared_za_call +; CHECK-SDAG-NEXT: .Ltmp7: // EH_LABEL +; CHECK-SDAG-NEXT: .LBB2_3: // %exit +; CHECK-SDAG-NEXT: smstop za +; CHECK-SDAG-NEXT: mov sp, x29 +; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload +; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-SDAG-NEXT: ret +; CHECK-SDAG-NEXT: .LBB2_4: // %catch +; CHECK-SDAG-NEXT: .Ltmp8: // EH_LABEL +; CHECK-SDAG-NEXT: mov x1, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: sub x19, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB2_6 +; CHECK-SDAG-NEXT: // %bb.5: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB2_6: // %catch +; CHECK-SDAG-NEXT: mov x0, x1 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl __cxa_begin_catch +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB2_8 +; CHECK-SDAG-NEXT: // %bb.7: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB2_8: // %catch +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: bl noexcept_shared_za_call +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl __cxa_end_catch +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB2_10 +; CHECK-SDAG-NEXT: // %bb.9: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB2_10: // %catch +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: b .LBB2_3 invoke void @shared_za_call() #4 to label %exit unwind label %catch catch: @@ -275,6 +504,234 @@ exit: ret void } +; A simple ZT0 exception example that corresponds to: +; +; struct ZT0Resource { +; ~ZT0Resource() __arm_inout("zt0") { +; shared_zt0_call(); // simulate cleanup in destructor +; } +; }; +; +; void za_with_raii() __arm_inout("zt0") { +; ZT0Resource r; +; may_throw(); +; } +; +; This code may require reloading ZT0 in the cleanup for ~ZT0Resource(). +; +; FIXME: Codegen with `-aarch64-new-sme-abi` is broken with ZT0 (as it is not implemented). +define void @try_catch_shared_zt0_callee() "aarch64_inout_zt0" personality ptr @__gxx_personality_v0 { +; CHECK-LABEL: try_catch_shared_zt0_callee: +; CHECK: .Lfunc_begin3: +; CHECK-NEXT: .cfi_startproc +; CHECK-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-NEXT: .cfi_lsda 28, .Lexception3 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .cfi_def_cfa w29, 32 +; CHECK-NEXT: .cfi_offset w19, -8 +; CHECK-NEXT: .cfi_offset w20, -16 +; CHECK-NEXT: .cfi_offset w30, -24 +; CHECK-NEXT: .cfi_offset w29, -32 +; CHECK-NEXT: rdsvl x8, #1 +; CHECK-NEXT: mov x9, sp +; CHECK-NEXT: msub x9, x8, x8, x9 +; CHECK-NEXT: mov sp, x9 +; CHECK-NEXT: stp x9, x8, [x29, #-80] +; CHECK-NEXT: .Ltmp9: // EH_LABEL +; CHECK-NEXT: sub x19, x29, #64 +; CHECK-NEXT: str zt0, [x19] +; CHECK-NEXT: smstop za +; CHECK-NEXT: bl may_throw +; CHECK-NEXT: smstart za +; CHECK-NEXT: ldr zt0, [x19] +; CHECK-NEXT: .Ltmp10: // EH_LABEL +; CHECK-NEXT: // %bb.1: // %return_normally +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB3_2: // %unwind_dtors +; CHECK-NEXT: .Ltmp11: // EH_LABEL +; CHECK-NEXT: sub x20, x29, #64 +; CHECK-NEXT: mov x19, x0 +; CHECK-NEXT: smstart za +; CHECK-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-NEXT: sub x0, x29, #80 +; CHECK-NEXT: cbnz x8, .LBB3_4 +; CHECK-NEXT: // %bb.3: // %unwind_dtors +; CHECK-NEXT: bl __arm_tpidr2_restore +; CHECK-NEXT: .LBB3_4: // %unwind_dtors +; CHECK-NEXT: msr TPIDR2_EL0, xzr +; CHECK-NEXT: bl shared_zt0_call +; CHECK-NEXT: str zt0, [x20] +; CHECK-NEXT: smstop za +; CHECK-NEXT: mov x0, x19 +; CHECK-NEXT: bl _Unwind_Resume +; CHECK-NEXT: smstart za +; CHECK-NEXT: ldr zt0, [x20] +; +; CHECK-SDAG-LABEL: try_catch_shared_zt0_callee: +; CHECK-SDAG: .Lfunc_begin3: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception3 +; CHECK-SDAG-NEXT: // %bb.0: +; CHECK-SDAG-NEXT: sub sp, sp, #96 +; CHECK-SDAG-NEXT: str x30, [sp, #64] // 8-byte Folded Spill +; CHECK-SDAG-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill +; CHECK-SDAG-NEXT: .cfi_def_cfa_offset 96 +; CHECK-SDAG-NEXT: .cfi_offset w19, -8 +; CHECK-SDAG-NEXT: .cfi_offset w20, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -32 +; CHECK-SDAG-NEXT: .Ltmp9: // EH_LABEL +; CHECK-SDAG-NEXT: mov x19, sp +; CHECK-SDAG-NEXT: str zt0, [x19] +; CHECK-SDAG-NEXT: smstop za +; CHECK-SDAG-NEXT: bl may_throw +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: ldr zt0, [x19] +; CHECK-SDAG-NEXT: .Ltmp10: // EH_LABEL +; CHECK-SDAG-NEXT: // %bb.1: // %return_normally +; CHECK-SDAG-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload +; CHECK-SDAG-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload +; CHECK-SDAG-NEXT: add sp, sp, #96 +; CHECK-SDAG-NEXT: ret +; CHECK-SDAG-NEXT: .LBB3_2: // %unwind_dtors +; CHECK-SDAG-NEXT: .Ltmp11: // EH_LABEL +; CHECK-SDAG-NEXT: mov x20, sp +; CHECK-SDAG-NEXT: mov x19, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: ldr zt0, [x20] +; CHECK-SDAG-NEXT: bl shared_zt0_call +; CHECK-SDAG-NEXT: str zt0, [x20] +; CHECK-SDAG-NEXT: smstop za +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl _Unwind_Resume +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: ldr zt0, [x20] + invoke void @may_throw() + to label %return_normally unwind label %unwind_dtors + +unwind_dtors: + %5 = landingpad { ptr, i32 } + cleanup + tail call void @shared_zt0_call() + resume { ptr, i32 } %5 + +return_normally: + ret void +} + +; This example corresponds to: +; +; __arm_agnostic("sme_za_state") void try_catch_agnostic_za() +; { +; try { +; may_throw(); +; } catch(...) { +; } +; } +; +; In this example we must execute __arm_sme_restore once we enter the catch block +; (before executing __arm_sme_save again, which would invalidate the prior save). +define void @try_catch_agnostic_za() "aarch64_za_state_agnostic" personality ptr @__gxx_personality_v0 { +; CHECK-LABEL: try_catch_agnostic_za: +; CHECK: .Lfunc_begin4: +; CHECK-NEXT: .cfi_startproc +; CHECK-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-NEXT: .cfi_lsda 28, .Lexception4 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-NEXT: str x19, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: .cfi_def_cfa w29, 32 +; CHECK-NEXT: .cfi_offset w19, -16 +; CHECK-NEXT: .cfi_offset w30, -24 +; CHECK-NEXT: .cfi_offset w29, -32 +; CHECK-NEXT: bl __arm_sme_state_size +; CHECK-NEXT: sub sp, sp, x0 +; CHECK-NEXT: mov x19, sp +; CHECK-NEXT: .Ltmp12: // EH_LABEL +; CHECK-NEXT: mov x0, x19 +; CHECK-NEXT: bl __arm_sme_save +; CHECK-NEXT: bl may_throw +; CHECK-NEXT: .Ltmp13: // EH_LABEL +; CHECK-NEXT: .LBB4_1: // %exit +; CHECK-NEXT: mov x0, x19 +; CHECK-NEXT: bl __arm_sme_restore +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB4_2: // %catch +; CHECK-NEXT: .Ltmp14: // EH_LABEL +; CHECK-NEXT: bl __cxa_begin_catch +; CHECK-NEXT: bl __cxa_end_catch +; CHECK-NEXT: b .LBB4_1 +; +; CHECK-SDAG-LABEL: try_catch_agnostic_za: +; CHECK-SDAG: .Lfunc_begin4: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception4 +; CHECK-SDAG-NEXT: // %bb.0: +; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill +; CHECK-SDAG-NEXT: mov x29, sp +; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32 +; CHECK-SDAG-NEXT: .cfi_offset w19, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -24 +; CHECK-SDAG-NEXT: .cfi_offset w29, -32 +; CHECK-SDAG-NEXT: bl __arm_sme_state_size +; CHECK-SDAG-NEXT: sub sp, sp, x0 +; CHECK-SDAG-NEXT: mov x19, sp +; CHECK-SDAG-NEXT: .Ltmp12: // EH_LABEL +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_save +; CHECK-SDAG-NEXT: bl may_throw +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: .Ltmp13: // EH_LABEL +; CHECK-SDAG-NEXT: .LBB4_1: // %exit +; CHECK-SDAG-NEXT: mov sp, x29 +; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload +; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-SDAG-NEXT: ret +; CHECK-SDAG-NEXT: .LBB4_2: // %catch +; CHECK-SDAG-NEXT: .Ltmp14: // EH_LABEL +; CHECK-SDAG-NEXT: mov x1, x0 +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_save +; CHECK-SDAG-NEXT: mov x0, x1 +; CHECK-SDAG-NEXT: bl __cxa_begin_catch +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_save +; CHECK-SDAG-NEXT: bl __cxa_end_catch +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: b .LBB4_1 + invoke void @may_throw() + to label %exit unwind label %catch +catch: + %eh_info = landingpad { ptr, i32 } + catch ptr null + %exception_ptr = extractvalue { ptr, i32 } %eh_info, 0 + tail call ptr @__cxa_begin_catch(ptr %exception_ptr) + tail call void @__cxa_end_catch() + br label %exit + +exit: + ret void +} + declare ptr @__cxa_allocate_exception(i64) declare void @__cxa_throw(ptr, ptr, ptr) declare ptr @__cxa_begin_catch(ptr) @@ -284,3 +741,4 @@ declare i32 @__gxx_personality_v0(...) declare void @may_throw() declare void @shared_za_call() "aarch64_inout_za" declare void @noexcept_shared_za_call() "aarch64_inout_za" +declare void @shared_zt0_call() "aarch64_inout_zt0" diff --git a/llvm/test/CodeGen/AArch64/strict-fp-opt.ll b/llvm/test/CodeGen/AArch64/strict-fp-opt.ll index bb7cd22..c433291 100644 --- a/llvm/test/CodeGen/AArch64/strict-fp-opt.ll +++ b/llvm/test/CodeGen/AArch64/strict-fp-opt.ll @@ -1,31 +1,40 @@ -; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s -; RUN: llc -mtriple=aarch64 -global-isel=true -global-isel-abort=2 %s -o - | FileCheck %s - +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64 -global-isel=true -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI + +; CHECK-GI: warning: Instruction selection used fallback path for unused_div_fpexcept_strict +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for unused_div_round_dynamic +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for add_twice_fpexcept_strict +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for add_twice_round_dynamic +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for set_rounding +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for set_rounding_fpexcept_strict +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for set_rounding_round_dynamic ; Div whose result is unused should be removed unless we have strict exceptions -; CHECK-LABEL: unused_div: -; CHECK-NOT: fdiv -; CHECK: ret define void @unused_div(float %x, float %y) { +; CHECK-LABEL: unused_div: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ret entry: %add = fdiv float %x, %y ret void } -; CHECK-LABEL: unused_div_fpexcept_strict: -; CHECK: fdiv s0, s0, s1 -; CHECK-NEXT: ret define void @unused_div_fpexcept_strict(float %x, float %y) #0 { +; CHECK-LABEL: unused_div_fpexcept_strict: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fdiv s0, s0, s1 +; CHECK-NEXT: ret entry: %add = call float @llvm.experimental.constrained.fdiv.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret void } -; CHECK-LABEL: unused_div_round_dynamic: -; CHECK-NOT: fdiv -; CHECK: ret define void @unused_div_round_dynamic(float %x, float %y) #0 { +; CHECK-LABEL: unused_div_round_dynamic: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ret entry: %add = call float @llvm.experimental.constrained.fdiv.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 ret void @@ -33,14 +42,14 @@ entry: ; Machine CSE should eliminate the second add unless we have strict exceptions - -; CHECK-LABEL: add_twice: -; CHECK: fadd [[ADD:s[0-9]+]], s0, s1 -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: fmul [[MUL:s[0-9]+]], [[ADD]], [[ADD]] -; CHECK-NEXT: fcsel s0, [[ADD]], [[MUL]], eq -; CHECK-NEXT: ret define float @add_twice(float %x, float %y, i32 %n) { +; CHECK-LABEL: add_twice: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: cmp w0, #0 +; CHECK-NEXT: fmul s1, s0, s0 +; CHECK-NEXT: fcsel s0, s0, s1, eq +; CHECK-NEXT: ret entry: %add = fadd float %x, %y %tobool.not = icmp eq i32 %n, 0 @@ -56,15 +65,17 @@ if.end: ret float %a.0 } -; CHECK-LABEL: add_twice_fpexcept_strict: -; CHECK: fmov [[X:s[0-9]+]], s0 -; CHECK-NEXT: fadd s0, s0, s1 -; CHECK-NEXT: cbz w0, [[LABEL:.LBB[0-9_]+]] -; CHECK: fadd [[ADD:s[0-9]+]], [[X]], s1 -; CHECK-NEXT: fmul s0, s0, [[ADD]] -; CHECK: [[LABEL]]: -; CHECK-NEXT: ret define float @add_twice_fpexcept_strict(float %x, float %y, i32 %n) #0 { +; CHECK-LABEL: add_twice_fpexcept_strict: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmov s2, s0 +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: cbz w0, .LBB4_2 +; CHECK-NEXT: // %bb.1: // %if.then +; CHECK-NEXT: fadd s1, s2, s1 +; CHECK-NEXT: fmul s0, s0, s1 +; CHECK-NEXT: .LBB4_2: // %if.end +; CHECK-NEXT: ret entry: %add = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 %tobool.not = icmp eq i32 %n, 0 @@ -80,14 +91,15 @@ if.end: ret float %a.0 } -; CHECK-LABEL: add_twice_round_dynamic: -; CHECK: fadd s0, s0, s1 -; CHECK-NEXT: cbz w0, [[LABEL:.LBB[0-9_]+]] -; CHECK-NOT: fadd -; CHECK: fmul s0, s0, s0 -; CHECK: [[LABEL]]: -; CHECK-NEXT: ret define float @add_twice_round_dynamic(float %x, float %y, i32 %n) #0 { +; CHECK-LABEL: add_twice_round_dynamic: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: cbz w0, .LBB5_2 +; CHECK-NEXT: // %bb.1: // %if.then +; CHECK-NEXT: fmul s0, s0, s0 +; CHECK-NEXT: .LBB5_2: // %if.end +; CHECK-NEXT: ret entry: %add = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 %tobool.not = icmp eq i32 %n, 0 @@ -108,17 +120,18 @@ if.end: ; dynamic (as they may give different results) or when we have strict exceptions ; (the llvm.set.rounding is irrelevant, but both could trap). -; CHECK-LABEL: set_rounding: -; CHECK-DAG: fadd [[SREG:s[0-9]+]], s0, s1 -; CHECK-DAG: mrs [[XREG1:x[0-9]+]], FPCR -; CHECK-DAG: orr [[XREG2:x[0-9]+]], [[XREG1]], #0xc00000 -; CHECK: msr FPCR, [[XREG2]] -; CHECK-NEXT: mrs [[XREG3:x[0-9]+]], FPCR -; CHECK-NEXT: and [[XREG4:x[0-9]+]], [[XREG3]], #0xffffffffff3fffff -; CHECK-NEXT: msr FPCR, [[XREG4]] -; CHECK-NEXT: fsub s0, [[SREG]], [[SREG]] -; CHECK-NEXT: ret define float @set_rounding(float %x, float %y) { +; CHECK-LABEL: set_rounding: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mrs x8, FPCR +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: orr x8, x8, #0xc00000 +; CHECK-NEXT: msr FPCR, x8 +; CHECK-NEXT: mrs x8, FPCR +; CHECK-NEXT: and x8, x8, #0xffffffffff3fffff +; CHECK-NEXT: msr FPCR, x8 +; CHECK-NEXT: fsub s0, s0, s0 +; CHECK-NEXT: ret entry: %add1 = fadd float %x, %y call void @llvm.set.rounding(i32 0) @@ -128,18 +141,19 @@ entry: ret float %sub } -; CHECK-LABEL: set_rounding_fpexcept_strict: -; CHECK-DAG: fadd [[SREG1:s[0-9]+]], s0, s1 -; CHECK-DAG: mrs [[XREG1:x[0-9]+]], FPCR -; CHECK-DAG: orr [[XREG2:x[0-9]+]], [[XREG1]], #0xc00000 -; CHECK: msr FPCR, [[XREG2]] -; CHECK-DAG: fadd [[SREG2:s[0-9]+]], s0, s1 -; CHECK-DAG: mrs [[XREG3:x[0-9]+]], FPCR -; CHECK-DAG: and [[XREG4:x[0-9]+]], [[XREG3]], #0xffffffffff3fffff -; CHECK-NEXT: msr FPCR, [[XREG4]] -; CHECK-NEXT: fsub s0, [[SREG1]], [[SREG2]] -; CHECK-NEXT: ret define float @set_rounding_fpexcept_strict(float %x, float %y) #0 { +; CHECK-LABEL: set_rounding_fpexcept_strict: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fadd s2, s0, s1 +; CHECK-NEXT: mrs x8, FPCR +; CHECK-NEXT: orr x8, x8, #0xc00000 +; CHECK-NEXT: msr FPCR, x8 +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: mrs x8, FPCR +; CHECK-NEXT: and x8, x8, #0xffffffffff3fffff +; CHECK-NEXT: msr FPCR, x8 +; CHECK-NEXT: fsub s0, s2, s0 +; CHECK-NEXT: ret entry: %add1 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 call void @llvm.set.rounding(i32 0) #0 @@ -149,18 +163,19 @@ entry: ret float %sub } -; CHECK-LABEL: set_rounding_round_dynamic: -; CHECK-DAG: fadd [[SREG1:s[0-9]+]], s0, s1 -; CHECK-DAG: mrs [[XREG1:x[0-9]+]], FPCR -; CHECK-DAG: orr [[XREG2:x[0-9]+]], [[XREG1]], #0xc00000 -; CHECK: msr FPCR, [[XREG2]] -; CHECK-DAG: fadd [[SREG2:s[0-9]+]], s0, s1 -; CHECK-DAG: mrs [[XREG3:x[0-9]+]], FPCR -; CHECK-DAG: and [[XREG4:x[0-9]+]], [[XREG3]], #0xffffffffff3fffff -; CHECK-NEXT: msr FPCR, [[XREG4]] -; CHECK-NEXT: fsub s0, [[SREG1]], [[SREG2]] -; CHECK-NEXT: ret define float @set_rounding_round_dynamic(float %x, float %y) #0 { +; CHECK-LABEL: set_rounding_round_dynamic: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mrs x8, FPCR +; CHECK-NEXT: fadd s2, s0, s1 +; CHECK-NEXT: orr x8, x8, #0xc00000 +; CHECK-NEXT: msr FPCR, x8 +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: mrs x8, FPCR +; CHECK-NEXT: and x8, x8, #0xffffffffff3fffff +; CHECK-NEXT: msr FPCR, x8 +; CHECK-NEXT: fsub s0, s2, s0 +; CHECK-NEXT: ret entry: %add1 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 call void @llvm.set.rounding(i32 0) #0 @@ -178,3 +193,6 @@ declare i32 @llvm.get.rounding() declare void @llvm.set.rounding(i32) attributes #0 = { strictfp } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK-GI: {{.*}} +; CHECK-SD: {{.*}} diff --git a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll index 62d41fc..19e1aa5 100644 --- a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll +++ b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll @@ -26,9 +26,9 @@ define i32 @reduce_and_v1i8(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_and_v1i8: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: smov w8, v0.b[0] -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w0, w0, w1, mi +; CHECK-NEXT: umov w8, v0.b[0] +; CHECK-NEXT: tst w8, #0x80 +; CHECK-NEXT: csel w0, w0, w1, ne ; CHECK-NEXT: ret %x = icmp slt <1 x i8> %a0, zeroinitializer %y = call i1 @llvm.vector.reduce.and.v1i1(<1 x i1> %x) @@ -120,9 +120,9 @@ define i32 @reduce_and_v1i16(<1 x i16> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_and_v1i16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: smov w8, v0.h[0] -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w0, w0, w1, mi +; CHECK-NEXT: umov w8, v0.h[0] +; CHECK-NEXT: tst w8, #0x8000 +; CHECK-NEXT: csel w0, w0, w1, ne ; CHECK-NEXT: ret %x = icmp slt <1 x i16> %a0, zeroinitializer %y = call i1 @llvm.vector.reduce.and.v1i1(<1 x i1> %x) @@ -305,9 +305,9 @@ define i32 @reduce_or_v1i8(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_or_v1i8: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: smov w8, v0.b[0] -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w0, w0, w1, mi +; CHECK-NEXT: umov w8, v0.b[0] +; CHECK-NEXT: tst w8, #0x80 +; CHECK-NEXT: csel w0, w0, w1, ne ; CHECK-NEXT: ret %x = icmp slt <1 x i8> %a0, zeroinitializer %y = call i1 @llvm.vector.reduce.or.v1i1(<1 x i1> %x) @@ -399,9 +399,9 @@ define i32 @reduce_or_v1i16(<1 x i16> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_or_v1i16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: smov w8, v0.h[0] -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w0, w0, w1, mi +; CHECK-NEXT: umov w8, v0.h[0] +; CHECK-NEXT: tst w8, #0x8000 +; CHECK-NEXT: csel w0, w0, w1, ne ; CHECK-NEXT: ret %x = icmp slt <1 x i16> %a0, zeroinitializer %y = call i1 @llvm.vector.reduce.or.v1i1(<1 x i1> %x) @@ -584,9 +584,9 @@ define i32 @reduce_xor_v1i8(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_xor_v1i8: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: smov w8, v0.b[0] -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w0, w0, w1, mi +; CHECK-NEXT: umov w8, v0.b[0] +; CHECK-NEXT: tst w8, #0x80 +; CHECK-NEXT: csel w0, w0, w1, ne ; CHECK-NEXT: ret %x = icmp slt <1 x i8> %a0, zeroinitializer %y = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %x) @@ -679,9 +679,9 @@ define i32 @reduce_xor_v1i16(<1 x i16> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_xor_v1i16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: smov w8, v0.h[0] -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w0, w0, w1, mi +; CHECK-NEXT: umov w8, v0.h[0] +; CHECK-NEXT: tst w8, #0x8000 +; CHECK-NEXT: csel w0, w0, w1, ne ; CHECK-NEXT: ret %x = icmp slt <1 x i16> %a0, zeroinitializer %y = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %x) diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll index 0490e5a..94ba5cd 100644 --- a/llvm/test/CodeGen/AMDGPU/bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/bf16.ll @@ -10908,12 +10908,13 @@ define <2 x bfloat> @v_fadd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) { ; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v1, 0x7060302 ; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; ; GFX1250-LABEL: v_fadd_v2bf16: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_pk_add_bf16 v0, v0, v1 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_pk_add_bf16 v0, v0, v1 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %op = fadd <2 x bfloat> %a, %b ret <2 x bfloat> %op } @@ -11446,13 +11447,14 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) { ; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11FAKE16-NEXT: v_perm_b32 v1, v1, v4, 0x7060302 ; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; ; GFX1250-LABEL: v_fadd_v4bf16: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_pk_add_bf16 v0, v0, v2 -; GFX1250-NEXT: v_pk_add_bf16 v1, v1, v3 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_pk_add_bf16 v0, v0, v2 +; GFX1250-NEXT: v_pk_add_bf16 v1, v1, v3 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %op = fadd <4 x bfloat> %a, %b ret <4 x bfloat> %op } @@ -49991,6 +49993,622 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat> ret <4 x bfloat> %op } +define <8 x bfloat> @v_fma_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c) { +; GCN-LABEL: v_fma_v8bf16: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: v_fma_f32 v7, v7, v15, v23 +; GCN-NEXT: v_fma_f32 v6, v6, v14, v22 +; GCN-NEXT: v_fma_f32 v5, v5, v13, v21 +; GCN-NEXT: v_fma_f32 v4, v4, v12, v20 +; GCN-NEXT: v_fma_f32 v3, v3, v11, v19 +; GCN-NEXT: v_fma_f32 v2, v2, v10, v18 +; GCN-NEXT: v_fma_f32 v1, v1, v9, v17 +; GCN-NEXT: v_fma_f32 v0, v0, v8, v16 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: v_fma_v8bf16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: v_fma_f32 v7, v7, v15, v23 +; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v22 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: v_fma_f32 v6, v6, v14, v15 +; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v21 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: v_fma_f32 v5, v5, v13, v14 +; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: v_fma_f32 v4, v4, v12, v13 +; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v19 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: v_fma_f32 v3, v3, v11, v12 +; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v18 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: v_fma_f32 v2, v2, v10, v11 +; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v17 +; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v16 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: v_fma_f32 v1, v1, v9, v11 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v10 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: v_fma_f32 v0, v0, v8, v9 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_fma_v8bf16: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v7 +; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v3 +; GFX8-NEXT: v_fma_f32 v12, v14, v13, v12 +; GFX8-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v13, vcc, v13, v12 +; GFX8-NEXT: s_movk_i32 s4, 0x7fff +; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX8-NEXT: v_add_u32_e32 v13, vcc, s4, v13 +; GFX8-NEXT: v_fma_f32 v3, v3, v7, v11 +; GFX8-NEXT: v_or_b32_e32 v14, 0x400000, v12 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX8-NEXT: v_bfe_u32 v7, v3, 16, 1 +; GFX8-NEXT: v_cndmask_b32_e32 v12, v13, v14, vcc +; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v3 +; GFX8-NEXT: v_add_u32_e32 v7, vcc, s4, v7 +; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v11, vcc +; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v10 +; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v6 +; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v2 +; GFX8-NEXT: v_fma_f32 v7, v13, v11, v7 +; GFX8-NEXT: v_bfe_u32 v11, v7, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v11, vcc, v11, v7 +; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX8-NEXT: v_add_u32_e32 v11, vcc, s4, v11 +; GFX8-NEXT: v_fma_f32 v2, v2, v6, v10 +; GFX8-NEXT: v_or_b32_e32 v13, 0x400000, v7 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX8-NEXT: v_bfe_u32 v6, v2, 16, 1 +; GFX8-NEXT: v_cndmask_b32_e32 v7, v11, v13, vcc +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v2 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, s4, v6 +; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v2 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v10, vcc +; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v1 +; GFX8-NEXT: v_fma_f32 v6, v11, v10, v6 +; GFX8-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v6 +; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, s4, v10 +; GFX8-NEXT: v_fma_f32 v1, v1, v5, v9 +; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX8-NEXT: v_bfe_u32 v5, v1, 16, 1 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v1 +; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5 +; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v1 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v8 +; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v0 +; GFX8-NEXT: v_fma_f32 v5, v10, v9, v5 +; GFX8-NEXT: v_bfe_u32 v9, v5, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v9, vcc, v9, v5 +; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9 +; GFX8-NEXT: v_fma_f32 v0, v0, v4, v8 +; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX8-NEXT: v_bfe_u32 v4, v0, 16, 1 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v0 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4 +; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v0 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v8, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX8-NEXT: v_alignbit_b32 v0, v0, v5, 16 +; GFX8-NEXT: v_alignbit_b32 v1, v1, v6, 16 +; GFX8-NEXT: v_alignbit_b32 v2, v2, v7, 16 +; GFX8-NEXT: v_alignbit_b32 v3, v3, v12, 16 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX900-LABEL: v_fma_v8bf16: +; GFX900: ; %bb.0: +; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX900-NEXT: v_lshlrev_b32_e32 v13, 16, v7 +; GFX900-NEXT: v_lshlrev_b32_e32 v14, 16, v3 +; GFX900-NEXT: v_fma_f32 v12, v14, v13, v12 +; GFX900-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX900-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX900-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX900-NEXT: s_movk_i32 s4, 0x7fff +; GFX900-NEXT: v_fma_f32 v3, v3, v7, v11 +; GFX900-NEXT: v_add3_u32 v13, v13, v12, s4 +; GFX900-NEXT: v_or_b32_e32 v14, 0x400000, v12 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX900-NEXT: v_bfe_u32 v7, v3, 16, 1 +; GFX900-NEXT: v_cndmask_b32_e32 v12, v13, v14, vcc +; GFX900-NEXT: v_add3_u32 v7, v7, v3, s4 +; GFX900-NEXT: v_or_b32_e32 v11, 0x400000, v3 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX900-NEXT: v_cndmask_b32_e32 v3, v7, v11, vcc +; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v10 +; GFX900-NEXT: v_lshlrev_b32_e32 v11, 16, v6 +; GFX900-NEXT: v_lshlrev_b32_e32 v13, 16, v2 +; GFX900-NEXT: v_fma_f32 v7, v13, v11, v7 +; GFX900-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX900-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX900-NEXT: v_bfe_u32 v11, v7, 16, 1 +; GFX900-NEXT: v_fma_f32 v2, v2, v6, v10 +; GFX900-NEXT: v_add3_u32 v11, v11, v7, s4 +; GFX900-NEXT: v_or_b32_e32 v13, 0x400000, v7 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX900-NEXT: v_bfe_u32 v6, v2, 16, 1 +; GFX900-NEXT: v_cndmask_b32_e32 v7, v11, v13, vcc +; GFX900-NEXT: v_add3_u32 v6, v6, v2, s4 +; GFX900-NEXT: v_or_b32_e32 v10, 0x400000, v2 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX900-NEXT: v_cndmask_b32_e32 v2, v6, v10, vcc +; GFX900-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX900-NEXT: v_lshlrev_b32_e32 v10, 16, v5 +; GFX900-NEXT: v_lshlrev_b32_e32 v11, 16, v1 +; GFX900-NEXT: v_fma_f32 v6, v11, v10, v6 +; GFX900-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX900-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX900-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX900-NEXT: v_fma_f32 v1, v1, v5, v9 +; GFX900-NEXT: v_add3_u32 v10, v10, v6, s4 +; GFX900-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX900-NEXT: v_bfe_u32 v5, v1, 16, 1 +; GFX900-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX900-NEXT: v_add3_u32 v5, v5, v1, s4 +; GFX900-NEXT: v_or_b32_e32 v9, 0x400000, v1 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX900-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc +; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v8 +; GFX900-NEXT: v_lshlrev_b32_e32 v9, 16, v4 +; GFX900-NEXT: v_lshlrev_b32_e32 v10, 16, v0 +; GFX900-NEXT: v_fma_f32 v5, v10, v9, v5 +; GFX900-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX900-NEXT: v_bfe_u32 v9, v5, 16, 1 +; GFX900-NEXT: v_fma_f32 v0, v0, v4, v8 +; GFX900-NEXT: v_add3_u32 v9, v9, v5, s4 +; GFX900-NEXT: v_or_b32_e32 v10, 0x400000, v5 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX900-NEXT: v_bfe_u32 v4, v0, 16, 1 +; GFX900-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc +; GFX900-NEXT: v_add3_u32 v4, v4, v0, s4 +; GFX900-NEXT: v_or_b32_e32 v8, 0x400000, v0 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX900-NEXT: v_cndmask_b32_e32 v0, v4, v8, vcc +; GFX900-NEXT: s_mov_b32 s4, 0x7060302 +; GFX900-NEXT: v_perm_b32 v0, v0, v5, s4 +; GFX900-NEXT: v_perm_b32 v1, v1, v6, s4 +; GFX900-NEXT: v_perm_b32 v2, v2, v7, s4 +; GFX900-NEXT: v_perm_b32 v3, v3, v12, s4 +; GFX900-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fma_v8bf16: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v11 +; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v7 +; GFX950-NEXT: v_and_b32_e32 v14, 0xffff0000, v3 +; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX950-NEXT: v_fmac_f32_e32 v12, v14, v13 +; GFX950-NEXT: v_fmac_f32_e32 v11, v3, v7 +; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v10 +; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v6 +; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v2 +; GFX950-NEXT: v_fmac_f32_e32 v3, v13, v7 +; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v10 +; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX950-NEXT: v_fmac_f32_e32 v7, v2, v6 +; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v9 +; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 +; GFX950-NEXT: v_and_b32_e32 v10, 0xffff0000, v1 +; GFX950-NEXT: v_fmac_f32_e32 v2, v10, v6 +; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX950-NEXT: v_fmac_f32_e32 v6, v1, v5 +; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v8 +; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX950-NEXT: v_and_b32_e32 v9, 0xffff0000, v0 +; GFX950-NEXT: v_fmac_f32_e32 v1, v9, v5 +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v8 +; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX950-NEXT: v_fmac_f32_e32 v5, v0, v4 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v5, v1 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v6, v2 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v7, v3 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v11, v12 +; GFX950-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_fma_v8bf16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v3 +; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v0 +; GFX10-NEXT: v_fmac_f32_e32 v12, v14, v13 +; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v2 +; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX10-NEXT: v_fmac_f32_e32 v11, v3, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v10 +; GFX10-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v6 +; GFX10-NEXT: v_or_b32_e32 v15, 0x400000, v12 +; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX10-NEXT: v_add3_u32 v13, v13, v12, 0x7fff +; GFX10-NEXT: v_fmac_f32_e32 v3, v14, v7 +; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v10 +; GFX10-NEXT: v_bfe_u32 v16, v11, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v1 +; GFX10-NEXT: v_cndmask_b32_e32 v10, v13, v15, vcc_lo +; GFX10-NEXT: v_bfe_u32 v13, v3, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v7, v2, v6 +; GFX10-NEXT: v_add3_u32 v12, v16, v11, 0x7fff +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v9 +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v5 +; GFX10-NEXT: v_add3_u32 v13, v13, v3, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v15, 0x400000, v3 +; GFX10-NEXT: v_bfe_u32 v16, v7, 16, 1 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX10-NEXT: v_fmac_f32_e32 v2, v14, v6 +; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX10-NEXT: v_add3_u32 v6, v16, v7, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v3, v13, v15, vcc_lo +; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v15, 16, v8 +; GFX10-NEXT: v_lshlrev_b32_e32 v16, 16, v4 +; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v7 +; GFX10-NEXT: v_bfe_u32 v14, v2, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX10-NEXT: v_fmac_f32_e32 v9, v1, v5 +; GFX10-NEXT: v_fmac_f32_e32 v15, v18, v16 +; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v2 +; GFX10-NEXT: v_fmac_f32_e32 v8, v0, v4 +; GFX10-NEXT: v_cndmask_b32_e32 v6, v6, v13, vcc_lo +; GFX10-NEXT: v_add3_u32 v0, v14, v2, 0x7fff +; GFX10-NEXT: v_bfe_u32 v4, v9, 16, 1 +; GFX10-NEXT: v_bfe_u32 v5, v15, 16, 1 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX10-NEXT: v_bfe_u32 v7, v8, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v9 +; GFX10-NEXT: v_or_b32_e32 v17, 0x400000, v11 +; GFX10-NEXT: v_add3_u32 v2, v5, v15, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo +; GFX10-NEXT: v_add3_u32 v0, v4, v9, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v15 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX10-NEXT: v_add3_u32 v5, v7, v8, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v8 +; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8 +; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9 +; GFX10-NEXT: v_cndmask_b32_e32 v5, v0, v13, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11 +; GFX10-NEXT: v_perm_b32 v0, v4, v2, 0x7060302 +; GFX10-NEXT: v_perm_b32 v2, v6, v3, 0x7060302 +; GFX10-NEXT: v_perm_b32 v1, v5, v1, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v7, v12, v17, vcc_lo +; GFX10-NEXT: v_perm_b32 v3, v7, v10, 0x7060302 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11TRUE16-LABEL: v_fma_v8bf16: +; GFX11TRUE16: ; %bb.0: +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11TRUE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v11 +; GFX11TRUE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v7 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX11TRUE16-NEXT: v_and_b32_e32 v15, 0xffff0000, v6 +; GFX11TRUE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v2 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX11TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v3 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v11, v3, v7 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v10 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v7, v2, v6 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v12, v14, v13 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v11 +; GFX11TRUE16-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v12 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_add3_u32 v13, v13, v12, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v13, v17, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v1 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX11TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v10 +; GFX11TRUE16-NEXT: v_bfe_u32 v10, v11, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11 +; GFX11TRUE16-NEXT: v_bfe_u32 v13, v7, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v14, v16, v15 +; GFX11TRUE16-NEXT: v_add3_u32 v2, v10, v11, 0x7fff +; GFX11TRUE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v5 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX11TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v7 +; GFX11TRUE16-NEXT: v_bfe_u32 v15, v14, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v2, v6, vcc_lo +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_add3_u32 v10, v15, v14, 0x7fff +; GFX11TRUE16-NEXT: v_and_b32_e32 v15, 0xffff0000, v9 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX11TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v14 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v14, 16, v4 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v6.h +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v9, v1, v5 +; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v10, v12, vcc_lo +; GFX11TRUE16-NEXT: v_add3_u32 v10, v13, v7, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX11TRUE16-NEXT: v_bfe_u32 v7, v9, 16, 1 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v13, 16, v8 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v4, v10, v11 :: v_dual_and_b32 v5, 0xffff0000, v8 +; GFX11TRUE16-NEXT: v_add3_u32 v7, v7, v9, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v9 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v4.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v7, v7, v10, vcc_lo +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v15, v17, v16 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v16, 16, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_bfe_u32 v12, v15, 16, 1 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v13, v16, v14 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v5, v0, v1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v15 +; GFX11TRUE16-NEXT: v_add3_u32 v8, v12, v15, 0x7fff +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v13, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v13 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13 +; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v13, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v9, v11, v5, 0x7fff +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v11, v0, v12, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v7.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v9, v10, vcc_lo +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v11.h +; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11FAKE16-LABEL: v_fma_v8bf16: +; GFX11FAKE16: ; %bb.0: +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v13, 16, v7 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v14, 16, v3 +; GFX11FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v18, 16, v0 +; GFX11FAKE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v12, v14, v13 :: v_dual_and_b32 v3, 0xffff0000, v3 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v14, 16, v2 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v11, v3, v7 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v6 +; GFX11FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v12 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX11FAKE16-NEXT: v_add3_u32 v13, v13, v12, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v10 +; GFX11FAKE16-NEXT: v_bfe_u32 v16, v11, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX11FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v11 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v3, v14, v7 +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v10, v13, v15 :: v_dual_and_b32 v7, 0xffff0000, v10 +; GFX11FAKE16-NEXT: v_add3_u32 v12, v16, v11, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v14, 16, v1 +; GFX11FAKE16-NEXT: v_bfe_u32 v13, v3, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v3 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_add3_u32 v13, v13, v3, 0x7fff +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v3, v13, v15 :: v_dual_and_b32 v2, 0xffff0000, v2 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v7, v2, v6 :: v_dual_lshlrev_b32 v6, 16, v5 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v15, 16, v8 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v9 +; GFX11FAKE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX11FAKE16-NEXT: v_bfe_u32 v16, v7, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v7 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v2, v14, v6 :: v_dual_and_b32 v5, 0xffff0000, v5 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX11FAKE16-NEXT: v_add3_u32 v6, v16, v7, 0x7fff +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v9, v1, v5 :: v_dual_and_b32 v8, 0xffff0000, v8 +; GFX11FAKE16-NEXT: v_bfe_u32 v14, v2, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v2 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v6, v6, v13, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v16, 16, v4 +; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX11FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v9 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v8, v0, v4 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v14, v2, 0x7fff +; GFX11FAKE16-NEXT: v_bfe_u32 v4, v9, 16, 1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v15, v18, v16 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_add3_u32 v0, v4, v9, 0x7fff +; GFX11FAKE16-NEXT: v_bfe_u32 v5, v15, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v15 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_add3_u32 v2, v5, v15, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v5, v7, v8, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v8 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v5, v0, v13, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11 +; GFX11FAKE16-NEXT: v_perm_b32 v0, v4, v2, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v2, v6, v3, 0x7060302 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_perm_b32 v1, v5, v1, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v7, v12, v17, vcc_lo +; GFX11FAKE16-NEXT: v_perm_b32 v3, v7, v10, 0x7060302 +; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; ; GFX1250-LABEL: v_fma_v8bf16: ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 @@ -50000,11 +50618,1239 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat> ; GFX1250-NEXT: v_pk_fma_bf16 v2, v2, v6, v10 ; GFX1250-NEXT: v_pk_fma_bf16 v3, v3, v7, v11 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] -define <8 x bfloat> @v_fma_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c) { %op = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c) ret <8 x bfloat> %op } +define <16 x bfloat> @v_fma_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b, <16 x bfloat> %c) { +; GCN-LABEL: v_fma_v16bf16: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s32 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GCN-NEXT: v_fma_f32 v15, v15, v31, v32 +; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:60 +; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GCN-NEXT: v_fma_f32 v14, v14, v30, v31 +; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GCN-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:56 +; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GCN-NEXT: v_fma_f32 v13, v13, v29, v30 +; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GCN-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:52 +; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GCN-NEXT: v_fma_f32 v12, v12, v28, v29 +; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GCN-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:48 +; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GCN-NEXT: v_fma_f32 v11, v11, v27, v28 +; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GCN-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:44 +; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GCN-NEXT: v_fma_f32 v10, v10, v26, v27 +; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GCN-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:40 +; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GCN-NEXT: v_fma_f32 v9, v9, v25, v26 +; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GCN-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:36 +; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GCN-NEXT: v_fma_f32 v8, v8, v24, v25 +; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GCN-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:32 +; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GCN-NEXT: v_fma_f32 v7, v7, v23, v24 +; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GCN-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:28 +; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: v_fma_f32 v6, v6, v22, v23 +; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GCN-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:24 +; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: v_fma_f32 v5, v5, v21, v22 +; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GCN-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:20 +; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: v_fma_f32 v4, v4, v20, v21 +; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:16 +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: v_fma_f32 v3, v3, v19, v20 +; GCN-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:12 +; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:4 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_fma_f32 v2, v2, v18, v19 +; GCN-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:8 +; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v20 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_fma_f32 v1, v1, v17, v18 +; GCN-NEXT: v_fma_f32 v0, v0, v16, v19 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: v_fma_v16bf16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX7-NEXT: v_fma_f32 v15, v15, v31, v32 +; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:60 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX7-NEXT: v_fma_f32 v14, v14, v30, v31 +; GFX7-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:56 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX7-NEXT: v_fma_f32 v13, v13, v29, v30 +; GFX7-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:52 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX7-NEXT: v_fma_f32 v12, v12, v28, v29 +; GFX7-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:48 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX7-NEXT: v_fma_f32 v11, v11, v27, v28 +; GFX7-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:44 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX7-NEXT: v_fma_f32 v10, v10, v26, v27 +; GFX7-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:40 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX7-NEXT: v_fma_f32 v9, v9, v25, v26 +; GFX7-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:36 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX7-NEXT: v_fma_f32 v8, v8, v24, v25 +; GFX7-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:32 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX7-NEXT: v_fma_f32 v7, v7, v23, v24 +; GFX7-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:28 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: v_fma_f32 v6, v6, v22, v23 +; GFX7-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:24 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX7-NEXT: v_fma_f32 v5, v5, v21, v22 +; GFX7-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:20 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX7-NEXT: v_fma_f32 v4, v4, v20, v21 +; GFX7-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:16 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX7-NEXT: v_fma_f32 v3, v3, v19, v20 +; GFX7-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:12 +; GFX7-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:4 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX7-NEXT: v_fma_f32 v2, v2, v18, v19 +; GFX7-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:8 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX7-NEXT: v_fma_f32 v1, v1, v17, v18 +; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v19 +; GFX7-NEXT: v_fma_f32 v0, v0, v16, v17 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_fma_v16bf16: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v24, 16, v23 +; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v15 +; GFX8-NEXT: v_lshlrev_b32_e32 v26, 16, v7 +; GFX8-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX8-NEXT: v_fma_f32 v24, v26, v25, v24 +; GFX8-NEXT: v_fma_f32 v7, v7, v15, v23 +; GFX8-NEXT: v_lshlrev_b32_e32 v15, 16, v22 +; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v14 +; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v6 +; GFX8-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX8-NEXT: v_fma_f32 v15, v25, v23, v15 +; GFX8-NEXT: v_fma_f32 v6, v6, v14, v22 +; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v21 +; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v13 +; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v5 +; GFX8-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX8-NEXT: v_fma_f32 v14, v23, v22, v14 +; GFX8-NEXT: v_fma_f32 v5, v5, v13, v21 +; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v20 +; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v12 +; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v4 +; GFX8-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX8-NEXT: v_fma_f32 v13, v22, v21, v13 +; GFX8-NEXT: v_fma_f32 v4, v4, v12, v20 +; GFX8-NEXT: v_lshlrev_b32_e32 v12, 16, v19 +; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v11 +; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v3 +; GFX8-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX8-NEXT: v_fma_f32 v12, v21, v20, v12 +; GFX8-NEXT: v_fma_f32 v3, v3, v11, v19 +; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v18 +; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v10 +; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v2 +; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX8-NEXT: v_fma_f32 v11, v20, v19, v11 +; GFX8-NEXT: v_fma_f32 v2, v2, v10, v18 +; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v17 +; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v9 +; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v1 +; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX8-NEXT: v_fma_f32 v10, v19, v18, v10 +; GFX8-NEXT: v_fma_f32 v1, v1, v9, v17 +; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v16 +; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v8 +; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v0 +; GFX8-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX8-NEXT: v_fma_f32 v0, v0, v8, v16 +; GFX8-NEXT: v_bfe_u32 v8, v24, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v24 +; GFX8-NEXT: s_movk_i32 s4, 0x7fff +; GFX8-NEXT: v_add_u32_e32 v8, vcc, s4, v8 +; GFX8-NEXT: v_or_b32_e32 v16, 0x400000, v24 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v24, v24 +; GFX8-NEXT: v_cndmask_b32_e32 v8, v8, v16, vcc +; GFX8-NEXT: v_bfe_u32 v16, v7, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v7 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_fma_f32 v9, v18, v17, v9 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v7 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX8-NEXT: v_cndmask_b32_e32 v7, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v15, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v15 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v15 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 +; GFX8-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v6, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v6 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v6 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v14, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v14 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v14 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v14 +; GFX8-NEXT: v_cndmask_b32_e32 v14, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v5, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v5 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v5 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v13, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v13 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v13 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v13, v13 +; GFX8-NEXT: v_cndmask_b32_e32 v13, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v4, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v4 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v4 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX8-NEXT: v_cndmask_b32_e32 v4, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v12, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v12 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v12 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX8-NEXT: v_cndmask_b32_e32 v12, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v3, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v3 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v11, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v11 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v11 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v11, v11 +; GFX8-NEXT: v_cndmask_b32_e32 v11, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v2, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v2 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v2 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX8-NEXT: v_cndmask_b32_e32 v2, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v10, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v10 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v10 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v10 +; GFX8-NEXT: v_cndmask_b32_e32 v10, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v1, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v1 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v9, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v9 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v9 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v9, v9 +; GFX8-NEXT: v_cndmask_b32_e32 v9, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v0, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v0 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v0 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX8-NEXT: v_alignbit_b32 v0, v0, v9, 16 +; GFX8-NEXT: v_alignbit_b32 v1, v1, v10, 16 +; GFX8-NEXT: v_alignbit_b32 v2, v2, v11, 16 +; GFX8-NEXT: v_alignbit_b32 v3, v3, v12, 16 +; GFX8-NEXT: v_alignbit_b32 v4, v4, v13, 16 +; GFX8-NEXT: v_alignbit_b32 v5, v5, v14, 16 +; GFX8-NEXT: v_alignbit_b32 v6, v6, v15, 16 +; GFX8-NEXT: v_alignbit_b32 v7, v7, v8, 16 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX900-LABEL: v_fma_v16bf16: +; GFX900: ; %bb.0: +; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v24, 16, v23 +; GFX900-NEXT: v_lshlrev_b32_e32 v25, 16, v15 +; GFX900-NEXT: v_lshlrev_b32_e32 v26, 16, v7 +; GFX900-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX900-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX900-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX900-NEXT: v_fma_f32 v24, v26, v25, v24 +; GFX900-NEXT: v_fma_f32 v7, v7, v15, v23 +; GFX900-NEXT: v_lshlrev_b32_e32 v15, 16, v22 +; GFX900-NEXT: v_lshlrev_b32_e32 v23, 16, v14 +; GFX900-NEXT: v_lshlrev_b32_e32 v25, 16, v6 +; GFX900-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX900-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX900-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX900-NEXT: v_fma_f32 v15, v25, v23, v15 +; GFX900-NEXT: v_fma_f32 v6, v6, v14, v22 +; GFX900-NEXT: v_lshlrev_b32_e32 v14, 16, v21 +; GFX900-NEXT: v_lshlrev_b32_e32 v22, 16, v13 +; GFX900-NEXT: v_lshlrev_b32_e32 v23, 16, v5 +; GFX900-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX900-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX900-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX900-NEXT: v_fma_f32 v14, v23, v22, v14 +; GFX900-NEXT: v_fma_f32 v5, v5, v13, v21 +; GFX900-NEXT: v_lshlrev_b32_e32 v13, 16, v20 +; GFX900-NEXT: v_lshlrev_b32_e32 v21, 16, v12 +; GFX900-NEXT: v_lshlrev_b32_e32 v22, 16, v4 +; GFX900-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX900-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX900-NEXT: v_fma_f32 v13, v22, v21, v13 +; GFX900-NEXT: v_fma_f32 v4, v4, v12, v20 +; GFX900-NEXT: v_lshlrev_b32_e32 v12, 16, v19 +; GFX900-NEXT: v_lshlrev_b32_e32 v20, 16, v11 +; GFX900-NEXT: v_lshlrev_b32_e32 v21, 16, v3 +; GFX900-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX900-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX900-NEXT: v_fma_f32 v12, v21, v20, v12 +; GFX900-NEXT: v_fma_f32 v3, v3, v11, v19 +; GFX900-NEXT: v_lshlrev_b32_e32 v11, 16, v18 +; GFX900-NEXT: v_lshlrev_b32_e32 v19, 16, v10 +; GFX900-NEXT: v_lshlrev_b32_e32 v20, 16, v2 +; GFX900-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX900-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX900-NEXT: v_fma_f32 v11, v20, v19, v11 +; GFX900-NEXT: v_fma_f32 v2, v2, v10, v18 +; GFX900-NEXT: v_lshlrev_b32_e32 v10, 16, v17 +; GFX900-NEXT: v_lshlrev_b32_e32 v18, 16, v9 +; GFX900-NEXT: v_lshlrev_b32_e32 v19, 16, v1 +; GFX900-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX900-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX900-NEXT: v_fma_f32 v10, v19, v18, v10 +; GFX900-NEXT: v_fma_f32 v1, v1, v9, v17 +; GFX900-NEXT: v_lshlrev_b32_e32 v9, 16, v16 +; GFX900-NEXT: v_lshlrev_b32_e32 v17, 16, v8 +; GFX900-NEXT: v_lshlrev_b32_e32 v18, 16, v0 +; GFX900-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX900-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX900-NEXT: v_fma_f32 v0, v0, v8, v16 +; GFX900-NEXT: s_movk_i32 s4, 0x7fff +; GFX900-NEXT: v_bfe_u32 v8, v24, 16, 1 +; GFX900-NEXT: v_add3_u32 v8, v8, v24, s4 +; GFX900-NEXT: v_or_b32_e32 v16, 0x400000, v24 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v24, v24 +; GFX900-NEXT: v_cndmask_b32_e32 v8, v8, v16, vcc +; GFX900-NEXT: v_bfe_u32 v16, v7, 16, 1 +; GFX900-NEXT: v_fma_f32 v9, v18, v17, v9 +; GFX900-NEXT: v_add3_u32 v16, v16, v7, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v7 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX900-NEXT: v_cndmask_b32_e32 v7, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v15, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v15, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v15 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 +; GFX900-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v6, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v6, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v6 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX900-NEXT: v_cndmask_b32_e32 v6, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v14, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v14, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v14 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v14, v14 +; GFX900-NEXT: v_cndmask_b32_e32 v14, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v5, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v5, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v5 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX900-NEXT: v_cndmask_b32_e32 v5, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v13, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v13, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v13 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v13, v13 +; GFX900-NEXT: v_cndmask_b32_e32 v13, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v4, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v4, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX900-NEXT: v_cndmask_b32_e32 v4, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v12, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v12, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v12 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX900-NEXT: v_cndmask_b32_e32 v12, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v3, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v3, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v3 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX900-NEXT: v_cndmask_b32_e32 v3, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v11, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v11, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v11 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v11, v11 +; GFX900-NEXT: v_cndmask_b32_e32 v11, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v2, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v2, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v2 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX900-NEXT: v_cndmask_b32_e32 v2, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v10, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v10, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v10 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v10, v10 +; GFX900-NEXT: v_cndmask_b32_e32 v10, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v1, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v1, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v1 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX900-NEXT: v_cndmask_b32_e32 v1, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v9, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v9, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v9 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v9, v9 +; GFX900-NEXT: v_cndmask_b32_e32 v9, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v0, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v0, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v0 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX900-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc +; GFX900-NEXT: s_mov_b32 s4, 0x7060302 +; GFX900-NEXT: v_perm_b32 v0, v0, v9, s4 +; GFX900-NEXT: v_perm_b32 v1, v1, v10, s4 +; GFX900-NEXT: v_perm_b32 v2, v2, v11, s4 +; GFX900-NEXT: v_perm_b32 v3, v3, v12, s4 +; GFX900-NEXT: v_perm_b32 v4, v4, v13, s4 +; GFX900-NEXT: v_perm_b32 v5, v5, v14, s4 +; GFX900-NEXT: v_perm_b32 v6, v6, v15, s4 +; GFX900-NEXT: v_perm_b32 v7, v7, v8, s4 +; GFX900-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fma_v16bf16: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v24, 0xffff0000, v23 +; GFX950-NEXT: v_and_b32_e32 v25, 0xffff0000, v15 +; GFX950-NEXT: v_and_b32_e32 v26, 0xffff0000, v7 +; GFX950-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX950-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX950-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX950-NEXT: v_fmac_f32_e32 v23, v7, v15 +; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v22 +; GFX950-NEXT: v_and_b32_e32 v15, 0xffff0000, v14 +; GFX950-NEXT: v_and_b32_e32 v25, 0xffff0000, v6 +; GFX950-NEXT: v_fmac_f32_e32 v7, v25, v15 +; GFX950-NEXT: v_lshlrev_b32_e32 v15, 16, v22 +; GFX950-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX950-NEXT: v_fmac_f32_e32 v15, v6, v14 +; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v21 +; GFX950-NEXT: v_and_b32_e32 v14, 0xffff0000, v13 +; GFX950-NEXT: v_and_b32_e32 v22, 0xffff0000, v5 +; GFX950-NEXT: v_fmac_f32_e32 v6, v22, v14 +; GFX950-NEXT: v_lshlrev_b32_e32 v14, 16, v21 +; GFX950-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX950-NEXT: v_fmac_f32_e32 v14, v5, v13 +; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v20 +; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v12 +; GFX950-NEXT: v_and_b32_e32 v21, 0xffff0000, v4 +; GFX950-NEXT: v_fmac_f32_e32 v5, v21, v13 +; GFX950-NEXT: v_lshlrev_b32_e32 v13, 16, v20 +; GFX950-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX950-NEXT: v_fmac_f32_e32 v13, v4, v12 +; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v19 +; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v11 +; GFX950-NEXT: v_and_b32_e32 v20, 0xffff0000, v3 +; GFX950-NEXT: v_fmac_f32_e32 v4, v20, v12 +; GFX950-NEXT: v_lshlrev_b32_e32 v12, 16, v19 +; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX950-NEXT: v_fmac_f32_e32 v12, v3, v11 +; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v18 +; GFX950-NEXT: v_and_b32_e32 v11, 0xffff0000, v10 +; GFX950-NEXT: v_and_b32_e32 v19, 0xffff0000, v2 +; GFX950-NEXT: v_fmac_f32_e32 v3, v19, v11 +; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v18 +; GFX950-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX950-NEXT: v_fmac_f32_e32 v11, v2, v10 +; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v17 +; GFX950-NEXT: v_and_b32_e32 v10, 0xffff0000, v9 +; GFX950-NEXT: v_and_b32_e32 v18, 0xffff0000, v1 +; GFX950-NEXT: v_fmac_f32_e32 v2, v18, v10 +; GFX950-NEXT: v_lshlrev_b32_e32 v10, 16, v17 +; GFX950-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX950-NEXT: v_fmac_f32_e32 v10, v1, v9 +; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v16 +; GFX950-NEXT: v_and_b32_e32 v9, 0xffff0000, v8 +; GFX950-NEXT: v_and_b32_e32 v17, 0xffff0000, v0 +; GFX950-NEXT: v_fmac_f32_e32 v1, v17, v9 +; GFX950-NEXT: v_lshlrev_b32_e32 v9, 16, v16 +; GFX950-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX950-NEXT: v_fmac_f32_e32 v9, v0, v8 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v9, v1 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v10, v2 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v11, v3 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v12, v4 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v13, v5 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v5, v14, v6 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v6, v15, v7 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v7, v23, v24 +; GFX950-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_fma_v16bf16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v23 +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v15 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v7 +; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX10-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v6 +; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX10-NEXT: v_fmac_f32_e32 v23, v7, v15 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v22 +; GFX10-NEXT: v_bfe_u32 v25, v24, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v15, 16, v14 +; GFX10-NEXT: v_or_b32_e32 v27, 0x400000, v24 +; GFX10-NEXT: v_bfe_u32 v28, v23, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX10-NEXT: v_add3_u32 v25, v25, v24, 0x7fff +; GFX10-NEXT: v_fmac_f32_e32 v7, v26, v15 +; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v22 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX10-NEXT: v_add3_u32 v24, v28, v23, 0x7fff +; GFX10-NEXT: v_bfe_u32 v26, v7, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v15, v6, v14 +; GFX10-NEXT: v_cndmask_b32_e32 v22, v25, v27, vcc_lo +; GFX10-NEXT: v_or_b32_e32 v25, 0x400000, v23 +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v21 +; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v13 +; GFX10-NEXT: v_lshlrev_b32_e32 v27, 16, v5 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23 +; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX10-NEXT: v_fmac_f32_e32 v6, v27, v14 +; GFX10-NEXT: v_cndmask_b32_e32 v23, v24, v25, vcc_lo +; GFX10-NEXT: v_add3_u32 v24, v26, v7, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v25, 0x400000, v7 +; GFX10-NEXT: v_bfe_u32 v26, v15, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v21 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX10-NEXT: v_add3_u32 v21, v26, v15, 0x7fff +; GFX10-NEXT: v_fmac_f32_e32 v14, v5, v13 +; GFX10-NEXT: v_cndmask_b32_e32 v7, v24, v25, vcc_lo +; GFX10-NEXT: v_or_b32_e32 v24, 0x400000, v15 +; GFX10-NEXT: v_bfe_u32 v25, v6, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v20 +; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v12 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v4 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX10-NEXT: v_fmac_f32_e32 v5, v26, v13 +; GFX10-NEXT: v_cndmask_b32_e32 v15, v21, v24, vcc_lo +; GFX10-NEXT: v_add3_u32 v21, v25, v6, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v24, 0x400000, v6 +; GFX10-NEXT: v_bfe_u32 v25, v14, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v20 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v2 +; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX10-NEXT: v_add3_u32 v20, v25, v14, 0x7fff +; GFX10-NEXT: v_fmac_f32_e32 v13, v4, v12 +; GFX10-NEXT: v_cndmask_b32_e32 v6, v21, v24, vcc_lo +; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v14 +; GFX10-NEXT: v_bfe_u32 v24, v5, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v19 +; GFX10-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v3 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14 +; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX10-NEXT: v_fmac_f32_e32 v4, v25, v12 +; GFX10-NEXT: v_cndmask_b32_e32 v14, v20, v21, vcc_lo +; GFX10-NEXT: v_add3_u32 v20, v24, v5, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v5 +; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v19 +; GFX10-NEXT: v_lshlrev_b32_e32 v19, 16, v18 +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v10 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX10-NEXT: v_bfe_u32 v24, v13, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v12, v3, v11 +; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX10-NEXT: v_fmac_f32_e32 v19, v26, v25 +; GFX10-NEXT: v_cndmask_b32_e32 v5, v20, v21, vcc_lo +; GFX10-NEXT: v_bfe_u32 v20, v4, 16, 1 +; GFX10-NEXT: v_add3_u32 v21, v24, v13, 0x7fff +; GFX10-NEXT: v_bfe_u32 v24, v12, 16, 1 +; GFX10-NEXT: v_bfe_u32 v25, v19, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX10-NEXT: v_add3_u32 v11, v20, v4, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v4 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX10-NEXT: v_or_b32_e32 v26, 0x400000, v19 +; GFX10-NEXT: v_fmac_f32_e32 v18, v2, v10 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v17 +; GFX10-NEXT: v_lshlrev_b32_e32 v10, 16, v9 +; GFX10-NEXT: v_cndmask_b32_e32 v4, v11, v20, vcc_lo +; GFX10-NEXT: v_add3_u32 v11, v24, v12, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v12 +; GFX10-NEXT: v_add3_u32 v24, v25, v19, 0x7fff +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v1 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX10-NEXT: v_fmac_f32_e32 v2, v25, v10 +; GFX10-NEXT: v_cndmask_b32_e32 v11, v11, v20, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v8 +; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX10-NEXT: v_bfe_u32 v20, v2, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v17, v1, v9 +; GFX10-NEXT: v_cndmask_b32_e32 v10, v24, v26, vcc_lo +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v16 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v0 +; GFX10-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX10-NEXT: v_add3_u32 v1, v20, v2, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v2 +; GFX10-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX10-NEXT: v_fmac_f32_e32 v16, v0, v8 +; GFX10-NEXT: v_bfe_u32 v0, v17, 16, 1 +; GFX10-NEXT: v_bfe_u32 v27, v18, 16, 1 +; GFX10-NEXT: v_bfe_u32 v8, v24, 16, 1 +; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo +; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v17 +; GFX10-NEXT: v_add3_u32 v0, v0, v17, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 +; GFX10-NEXT: v_bfe_u32 v2, v16, 16, 1 +; GFX10-NEXT: v_add3_u32 v8, v8, v24, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v24 +; GFX10-NEXT: v_or_b32_e32 v25, 0x400000, v16 +; GFX10-NEXT: v_cndmask_b32_e32 v9, v0, v9, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX10-NEXT: v_add3_u32 v2, v2, v16, 0x7fff +; GFX10-NEXT: v_add3_u32 v12, v27, v18, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v19, 0x400000, v18 +; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v13 +; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v20, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16 +; GFX10-NEXT: v_perm_b32 v1, v9, v1, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v25, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 +; GFX10-NEXT: v_perm_b32 v0, v2, v0, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v8, v12, v19, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13 +; GFX10-NEXT: v_perm_b32 v2, v8, v10, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v12, v21, v3, vcc_lo +; GFX10-NEXT: v_perm_b32 v3, v11, v4, 0x7060302 +; GFX10-NEXT: v_perm_b32 v4, v12, v5, 0x7060302 +; GFX10-NEXT: v_perm_b32 v5, v14, v6, 0x7060302 +; GFX10-NEXT: v_perm_b32 v6, v15, v7, 0x7060302 +; GFX10-NEXT: v_perm_b32 v7, v23, v22, 0x7060302 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11TRUE16-LABEL: v_fma_v16bf16: +; GFX11TRUE16: ; %bb.0: +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11TRUE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v23 +; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v7 +; GFX11TRUE16-NEXT: v_and_b32_e32 v27, 0xffff0000, v14 +; GFX11TRUE16-NEXT: v_and_b32_e32 v28, 0xffff0000, v6 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v15 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v24, v26, v25 :: v_dual_lshlrev_b32 v7, 16, v7 +; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v22 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX11TRUE16-NEXT: v_bfe_u32 v25, v24, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v26, v28, v27 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v22, v6, v14 +; GFX11TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v21 +; GFX11TRUE16-NEXT: v_and_b32_e32 v28, 0xffff0000, v13 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; GFX11TRUE16-NEXT: v_add3_u32 v25, v25, v24, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v24 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v23, v7, v15 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v7, v25, v29, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v29, 0xffff0000, v5 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_bfe_u32 v15, v23, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v24, v26, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v25, 0x400000, v23 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23 +; GFX11TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v26 +; GFX11TRUE16-NEXT: v_add3_u32 v15, v15, v23, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v24, v24, v26, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v22, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v14, v29, v28 :: v_dual_cndmask_b32 v15, v15, v25 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26 +; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v12 +; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v4 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v24, v27, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v20 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX11TRUE16-NEXT: v_add3_u32 v23, v23, v22, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v22 +; GFX11TRUE16-NEXT: v_bfe_u32 v28, v14, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v20, v4, v12 +; GFX11TRUE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v19 +; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v11 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v14 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v7.l, v15.h +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v21, v5, v13 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v13, v23, v27, vcc_lo +; GFX11TRUE16-NEXT: v_add3_u32 v5, v28, v14, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14 +; GFX11TRUE16-NEXT: v_bfe_u32 v25, v24, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v21, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v27, v20, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v26, 0x400000, v24 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v5, v5, v22, vcc_lo +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v21 +; GFX11TRUE16-NEXT: v_add3_u32 v14, v23, v21, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 +; GFX11TRUE16-NEXT: v_add3_u32 v23, v25, v24, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v21, v27, v20, 0x7fff +; GFX11TRUE16-NEXT: v_mov_b16_e32 v6.l, v13.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v14, v14, v22, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v20 +; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v3 +; GFX11TRUE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v18 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v5.l, v14.h +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v12, v25, v4 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v4, v23, v26, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v10 +; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v2 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v12, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v20, v21, v22 :: v_dual_and_b32 v25, 0xffff0000, v1 +; GFX11TRUE16-NEXT: v_add3_u32 v21, v23, v12, 0x7fff +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v24, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v12 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v4.l, v20.h +; GFX11TRUE16-NEXT: v_add3_u32 v12, v23, v24, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11TRUE16-NEXT: v_and_b32_e32 v23, 0xffff0000, v9 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v19, v3, v11 +; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v3, v21, v22 :: v_dual_and_b32 v22, 0xffff0000, v17 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v18 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; GFX11TRUE16-NEXT: v_bfe_u32 v18, v19, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v22, v25, v23 :: v_dual_fmac_f32 v11, v2, v10 +; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v19 +; GFX11TRUE16-NEXT: v_add3_u32 v2, v18, v19, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v24 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v23, 16, v8 +; GFX11TRUE16-NEXT: v_bfe_u32 v21, v11, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v17, v1, v9 :: v_dual_cndmask_b32 v10, v2, v10 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v24, 16, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v16 +; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v8 +; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v12, v18, vcc_lo +; GFX11TRUE16-NEXT: v_add3_u32 v12, v21, v11, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v16 +; GFX11TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v11 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11 +; GFX11TRUE16-NEXT: v_bfe_u32 v11, v17, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v21, v24, v23 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v9, v0, v1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v8, v12, v18, vcc_lo +; GFX11TRUE16-NEXT: v_add3_u32 v11, v11, v17, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v17 +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v21, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 +; GFX11TRUE16-NEXT: v_add3_u32 v12, v19, v22, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v18, v9, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v21 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v21, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v11, v11, v16, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 +; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v22 +; GFX11TRUE16-NEXT: v_add3_u32 v16, v18, v9, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v9 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v8.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v18, v0, v19, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v10.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v12, v1, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v11.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc_lo +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v18.h +; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11FAKE16-LABEL: v_fma_v16bf16: +; GFX11FAKE16: ; %bb.0: +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v24, 16, v23 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v15 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v7 +; GFX11FAKE16-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX11FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v24, v26, v25 :: v_dual_and_b32 v23, 0xffff0000, v23 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v23, v7, v15 :: v_dual_lshlrev_b32 v26, 16, v6 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v15, 16, v14 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v24, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v24 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11FAKE16-NEXT: v_bfe_u32 v28, v23, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX11FAKE16-NEXT: v_add3_u32 v25, v25, v24, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v22 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_add3_u32 v24, v28, v23, 0x7fff +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v7, v26, v15 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v22, v25, v27 :: v_dual_and_b32 v15, 0xffff0000, v22 +; GFX11FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v23 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23 +; GFX11FAKE16-NEXT: v_bfe_u32 v26, v7, 16, 1 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v27, 16, v5 +; GFX11FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v23, v24, v25, vcc_lo +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_add3_u32 v24, v26, v7, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v7 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v7, v24, v25 :: v_dual_and_b32 v6, 0xffff0000, v6 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v15, v6, v14 :: v_dual_lshlrev_b32 v14, 16, v13 +; GFX11FAKE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX11FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v15 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v21 +; GFX11FAKE16-NEXT: v_bfe_u32 v26, v15, 16, 1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v6, v27, v14 +; GFX11FAKE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v21 +; GFX11FAKE16-NEXT: v_add3_u32 v21, v26, v15, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v4 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v6, 16, 1 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v14, v5, v13 :: v_dual_lshlrev_b32 v5, 16, v20 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v13, 16, v12 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v15, v21, v24, vcc_lo +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_add3_u32 v21, v25, v6, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v6 +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v14, 16, 1 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v5, v26, v13 :: v_dual_and_b32 v12, 0xffff0000, v12 +; GFX11FAKE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v20 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v2 +; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX11FAKE16-NEXT: v_add3_u32 v20, v25, v14, 0x7fff +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v6, v21, v24 :: v_dual_lshlrev_b32 v25, 16, v3 +; GFX11FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v14 +; GFX11FAKE16-NEXT: v_bfe_u32 v24, v5, 16, 1 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v13, v4, v12 :: v_dual_lshlrev_b32 v4, 16, v19 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14 +; GFX11FAKE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX11FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v4, v25, v12 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v14, v20, v21, vcc_lo +; GFX11FAKE16-NEXT: v_add3_u32 v20, v24, v5, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v5 +; GFX11FAKE16-NEXT: v_bfe_u32 v24, v13, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v19 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v10 +; GFX11FAKE16-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v12, v3, v11 :: v_dual_cndmask_b32 v5, v20, v21 +; GFX11FAKE16-NEXT: v_add3_u32 v21, v24, v13, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v13 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v19, 16, v18 +; GFX11FAKE16-NEXT: v_bfe_u32 v20, v4, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v24, v12, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v19, v26, v25 +; GFX11FAKE16-NEXT: v_add3_u32 v11, v20, v4, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v4 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v18, v2, v10 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v17 +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v19, 16, 1 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v10, 16, v9 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v4, v11, v20, vcc_lo +; GFX11FAKE16-NEXT: v_add3_u32 v11, v24, v12, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v12 +; GFX11FAKE16-NEXT: v_add3_u32 v24, v25, v19, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v1 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX11FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v19 +; GFX11FAKE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v2, v25, v10 :: v_dual_and_b32 v9, 0xffff0000, v9 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v20, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 +; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v8 +; GFX11FAKE16-NEXT: v_bfe_u32 v20, v2, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v10, v24, v26, vcc_lo +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v24, 16, v16 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v0 +; GFX11FAKE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v17, v1, v9 :: v_dual_and_b32 v0, 0xffff0000, v0 +; GFX11FAKE16-NEXT: v_add3_u32 v1, v20, v2, 0x7fff +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v16, v0, v8 +; GFX11FAKE16-NEXT: v_bfe_u32 v0, v17, 16, 1 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX11FAKE16-NEXT: v_bfe_u32 v8, v24, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v24 +; GFX11FAKE16-NEXT: v_bfe_u32 v2, v16, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v17, 0x7fff +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo +; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v17 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 +; GFX11FAKE16-NEXT: v_add3_u32 v8, v8, v24, 0x7fff +; GFX11FAKE16-NEXT: v_bfe_u32 v27, v18, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v16, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v16 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v9, v0, v9, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11FAKE16-NEXT: v_add3_u32 v12, v27, v18, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v18 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v1, v9, v1, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v8, v20, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v25, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 +; GFX11FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v8, v12, v19, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v2, v8, v10, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v12, v21, v3, vcc_lo +; GFX11FAKE16-NEXT: v_perm_b32 v3, v11, v4, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v4, v12, v5, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v5, v14, v6, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v6, v15, v7, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v7, v23, v22, 0x7060302 +; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; ; GFX1250-LABEL: v_fma_v16bf16: ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 @@ -50018,67 +51864,2797 @@ define <8 x bfloat> @v_fma_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> ; GFX1250-NEXT: v_pk_fma_bf16 v6, v6, v14, v22 ; GFX1250-NEXT: v_pk_fma_bf16 v7, v7, v15, v23 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] -define <16 x bfloat> @v_fma_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b, <16 x bfloat> %c) { %op = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %a, <16 x bfloat> %b, <16 x bfloat> %c) ret <16 x bfloat> %op } +define <32 x bfloat> @v_fma_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b, <32 x bfloat> %c) { +; GCN-LABEL: v_fma_v32bf16: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s32 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:256 +; GCN-NEXT: s_waitcnt vmcnt(2) +; GCN-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GCN-NEXT: v_fma_f32 v31, v31, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:252 +; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v30, v30, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:120 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:248 +; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v29, v29, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:116 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:244 +; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v28, v28, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:112 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:240 +; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v27, v27, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:236 +; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v26, v26, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:104 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:232 +; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v25, v25, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:100 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:228 +; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v24, v24, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:224 +; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v23, v23, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:92 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:220 +; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v22, v22, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:88 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:216 +; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v21, v21, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:84 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:212 +; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v20, v20, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:80 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:208 +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v19, v19, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:204 +; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v18, v18, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:200 +; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v17, v17, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:196 +; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v16, v16, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:192 +; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v15, v15, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:188 +; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v14, v14, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:184 +; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v13, v13, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:52 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:180 +; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v12, v12, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:48 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:176 +; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v11, v11, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:172 +; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v10, v10, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:168 +; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v9, v9, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:36 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:164 +; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v8, v8, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:32 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:160 +; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v7, v7, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:28 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:156 +; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v6, v6, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:24 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:152 +; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v5, v5, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:20 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:148 +; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v4, v4, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:16 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:144 +; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v3, v3, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:12 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:140 +; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v2, v2, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:136 +; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v1, v1, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:132 +; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v0, v0, v32, v33 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: v_fma_v32bf16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:256 +; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: s_waitcnt vmcnt(2) +; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX7-NEXT: v_fma_f32 v31, v31, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:252 +; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v30, v30, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:120 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:248 +; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v29, v29, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:116 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:244 +; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v28, v28, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:112 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:240 +; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v27, v27, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:236 +; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v26, v26, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:104 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:232 +; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v25, v25, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:100 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:228 +; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v24, v24, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:224 +; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v23, v23, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:92 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:220 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v22, v22, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:88 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:216 +; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v21, v21, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:84 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:212 +; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v20, v20, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:80 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:208 +; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v19, v19, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:204 +; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v18, v18, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:200 +; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v17, v17, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:196 +; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v16, v16, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:192 +; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v15, v15, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:188 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v14, v14, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:184 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v13, v13, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:52 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:180 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v12, v12, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:48 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:176 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v11, v11, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:172 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v10, v10, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:168 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v9, v9, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:36 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:164 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v8, v8, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:32 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:160 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v7, v7, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:28 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:156 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v6, v6, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:24 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:152 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v5, v5, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:20 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:148 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v4, v4, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:16 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:144 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v3, v3, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:12 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:140 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v2, v2, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:136 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v1, v1, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:132 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v0, v0, v32, v33 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_fma_v32bf16: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 +; GFX8-NEXT: v_lshlrev_b32_e32 v31, 16, v15 +; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX8-NEXT: s_movk_i32 s4, 0x7fff +; GFX8-NEXT: s_waitcnt vmcnt(1) +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v32 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v15, v15, v33, v32 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:60 +; GFX8-NEXT: v_fma_f32 v31, v31, v35, v34 +; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v30 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v14 +; GFX8-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v14, v14, v30, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:56 +; GFX8-NEXT: v_fma_f32 v32, v34, v32, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v30, 16, v29 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v13 +; GFX8-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v13, v13, v29, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:52 +; GFX8-NEXT: v_fma_f32 v30, v34, v30, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v29, 16, v28 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v12 +; GFX8-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v12, v12, v28, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48 +; GFX8-NEXT: v_fma_f32 v29, v34, v29, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v28, 16, v27 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v11 +; GFX8-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v11, v11, v27, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:44 +; GFX8-NEXT: v_fma_f32 v28, v34, v28, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v27, 16, v26 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v10 +; GFX8-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v10, v10, v26, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:40 +; GFX8-NEXT: v_fma_f32 v27, v34, v27, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v25 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v9 +; GFX8-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v26, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v9, v9, v25, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:36 +; GFX8-NEXT: v_fma_f32 v26, v35, v34, v26 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v24 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v8 +; GFX8-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v8, v8, v24, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32 +; GFX8-NEXT: v_fma_f32 v25, v35, v34, v25 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v23 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v7 +; GFX8-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v24, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v7, v7, v23, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:28 +; GFX8-NEXT: v_fma_f32 v24, v35, v34, v24 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v22 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v6 +; GFX8-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v6, v6, v22, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:24 +; GFX8-NEXT: v_fma_f32 v23, v35, v34, v23 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v21 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v5 +; GFX8-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v5, v5, v21, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20 +; GFX8-NEXT: v_fma_f32 v22, v35, v34, v22 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v20 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v4 +; GFX8-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v4, v4, v20, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16 +; GFX8-NEXT: v_fma_f32 v21, v35, v34, v21 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v19 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v3 +; GFX8-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v3, v3, v19, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12 +; GFX8-NEXT: v_fma_f32 v20, v35, v34, v20 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v18 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v2 +; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v2, v2, v18, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8 +; GFX8-NEXT: v_fma_f32 v19, v35, v34, v19 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v17 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v1 +; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v1, v1, v17, v33 +; GFX8-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:4 +; GFX8-NEXT: v_fma_f32 v18, v35, v34, v18 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v16 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v0 +; GFX8-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v17 +; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX8-NEXT: v_fma_f32 v0, v0, v16, v17 +; GFX8-NEXT: v_bfe_u32 v16, v31, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v31 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v31, v31 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v31 +; GFX8-NEXT: v_cndmask_b32_e32 v16, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v17, v15, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v17, vcc, v17, v15 +; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 +; GFX8-NEXT: v_or_b32_e32 v15, 0x400000, v15 +; GFX8-NEXT: v_cndmask_b32_e32 v15, v17, v15, vcc +; GFX8-NEXT: v_bfe_u32 v17, v32, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v17, vcc, v17, v32 +; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v32, v32 +; GFX8-NEXT: v_or_b32_e32 v31, 0x400000, v32 +; GFX8-NEXT: v_cndmask_b32_e32 v17, v17, v31, vcc +; GFX8-NEXT: v_bfe_u32 v31, v14, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v14 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v14 +; GFX8-NEXT: v_or_b32_e32 v14, 0x400000, v14 +; GFX8-NEXT: v_cndmask_b32_e32 v14, v31, v14, vcc +; GFX8-NEXT: v_bfe_u32 v31, v30, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v30 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v30, v30 +; GFX8-NEXT: v_or_b32_e32 v30, 0x400000, v30 +; GFX8-NEXT: v_cndmask_b32_e32 v30, v31, v30, vcc +; GFX8-NEXT: v_bfe_u32 v31, v13, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v13 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v13, v13 +; GFX8-NEXT: v_or_b32_e32 v13, 0x400000, v13 +; GFX8-NEXT: v_cndmask_b32_e32 v13, v31, v13, vcc +; GFX8-NEXT: v_bfe_u32 v31, v29, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v29 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v29, v29 +; GFX8-NEXT: v_or_b32_e32 v29, 0x400000, v29 +; GFX8-NEXT: v_cndmask_b32_e32 v29, v31, v29, vcc +; GFX8-NEXT: v_bfe_u32 v31, v12, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v12 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX8-NEXT: v_or_b32_e32 v12, 0x400000, v12 +; GFX8-NEXT: v_cndmask_b32_e32 v12, v31, v12, vcc +; GFX8-NEXT: v_bfe_u32 v31, v28, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v28 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v28, v28 +; GFX8-NEXT: v_or_b32_e32 v28, 0x400000, v28 +; GFX8-NEXT: v_cndmask_b32_e32 v28, v31, v28, vcc +; GFX8-NEXT: v_bfe_u32 v31, v11, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v11 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v11, v11 +; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v11 +; GFX8-NEXT: v_cndmask_b32_e32 v11, v31, v11, vcc +; GFX8-NEXT: v_bfe_u32 v31, v27, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v27 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v27, v27 +; GFX8-NEXT: v_or_b32_e32 v27, 0x400000, v27 +; GFX8-NEXT: v_cndmask_b32_e32 v27, v31, v27, vcc +; GFX8-NEXT: v_bfe_u32 v31, v10, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v10 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v10 +; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10 +; GFX8-NEXT: v_cndmask_b32_e32 v10, v31, v10, vcc +; GFX8-NEXT: v_bfe_u32 v31, v26, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v26 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v26, v26 +; GFX8-NEXT: v_or_b32_e32 v26, 0x400000, v26 +; GFX8-NEXT: v_cndmask_b32_e32 v26, v31, v26, vcc +; GFX8-NEXT: v_bfe_u32 v31, v9, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v9 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v9, v9 +; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9 +; GFX8-NEXT: v_cndmask_b32_e32 v9, v31, v9, vcc +; GFX8-NEXT: v_bfe_u32 v31, v25, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v25 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v25, v25 +; GFX8-NEXT: v_or_b32_e32 v25, 0x400000, v25 +; GFX8-NEXT: v_cndmask_b32_e32 v25, v31, v25, vcc +; GFX8-NEXT: v_bfe_u32 v31, v8, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v8 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v8, v8 +; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v8 +; GFX8-NEXT: v_cndmask_b32_e32 v8, v31, v8, vcc +; GFX8-NEXT: v_bfe_u32 v31, v24, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v24 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v24, v24 +; GFX8-NEXT: v_or_b32_e32 v24, 0x400000, v24 +; GFX8-NEXT: v_cndmask_b32_e32 v24, v31, v24, vcc +; GFX8-NEXT: v_bfe_u32 v31, v7, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v7 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v7 +; GFX8-NEXT: v_cndmask_b32_e32 v7, v31, v7, vcc +; GFX8-NEXT: v_bfe_u32 v31, v23, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v23 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v23, v23 +; GFX8-NEXT: v_or_b32_e32 v23, 0x400000, v23 +; GFX8-NEXT: v_cndmask_b32_e32 v23, v31, v23, vcc +; GFX8-NEXT: v_bfe_u32 v31, v6, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v6 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v31, v6, vcc +; GFX8-NEXT: v_bfe_u32 v31, v22, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v22 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v22, v22 +; GFX8-NEXT: v_or_b32_e32 v22, 0x400000, v22 +; GFX8-NEXT: v_cndmask_b32_e32 v22, v31, v22, vcc +; GFX8-NEXT: v_bfe_u32 v31, v5, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v5 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v31, v5, vcc +; GFX8-NEXT: v_bfe_u32 v31, v21, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v21 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v21, v21 +; GFX8-NEXT: v_or_b32_e32 v21, 0x400000, v21 +; GFX8-NEXT: v_cndmask_b32_e32 v21, v31, v21, vcc +; GFX8-NEXT: v_bfe_u32 v31, v4, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v4 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4 +; GFX8-NEXT: v_cndmask_b32_e32 v4, v31, v4, vcc +; GFX8-NEXT: v_bfe_u32 v31, v20, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v20 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v20, v20 +; GFX8-NEXT: v_or_b32_e32 v20, 0x400000, v20 +; GFX8-NEXT: v_cndmask_b32_e32 v20, v31, v20, vcc +; GFX8-NEXT: v_bfe_u32 v31, v3, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v3 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v31, v3, vcc +; GFX8-NEXT: v_bfe_u32 v31, v19, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v19 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v19, v19 +; GFX8-NEXT: v_or_b32_e32 v19, 0x400000, v19 +; GFX8-NEXT: v_cndmask_b32_e32 v19, v31, v19, vcc +; GFX8-NEXT: v_bfe_u32 v31, v2, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v2 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2 +; GFX8-NEXT: v_cndmask_b32_e32 v2, v31, v2, vcc +; GFX8-NEXT: v_bfe_u32 v31, v18, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v18 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v18, v18 +; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18 +; GFX8-NEXT: v_cndmask_b32_e32 v18, v31, v18, vcc +; GFX8-NEXT: v_bfe_u32 v31, v1, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_fma_f32 v33, v35, v34, v33 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX8-NEXT: v_or_b32_e32 v1, 0x400000, v1 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v31, v1, vcc +; GFX8-NEXT: v_bfe_u32 v31, v33, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v33 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v33, v33 +; GFX8-NEXT: v_or_b32_e32 v32, 0x400000, v33 +; GFX8-NEXT: v_cndmask_b32_e32 v31, v31, v32, vcc +; GFX8-NEXT: v_bfe_u32 v32, v0, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v32, vcc, v32, v0 +; GFX8-NEXT: v_add_u32_e32 v32, vcc, s4, v32 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX8-NEXT: v_or_b32_e32 v0, 0x400000, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v32, v0, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v8 +; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v9 +; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v10 +; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v11 +; GFX8-NEXT: v_lshrrev_b32_e32 v15, 16, v15 +; GFX8-NEXT: v_lshrrev_b32_e32 v14, 16, v14 +; GFX8-NEXT: v_lshrrev_b32_e32 v13, 16, v13 +; GFX8-NEXT: v_lshrrev_b32_e32 v12, 16, v12 +; GFX8-NEXT: v_alignbit_b32 v0, v0, v31, 16 +; GFX8-NEXT: v_alignbit_b32 v1, v1, v18, 16 +; GFX8-NEXT: v_alignbit_b32 v2, v2, v19, 16 +; GFX8-NEXT: v_alignbit_b32 v3, v3, v20, 16 +; GFX8-NEXT: v_alignbit_b32 v4, v4, v21, 16 +; GFX8-NEXT: v_alignbit_b32 v5, v5, v22, 16 +; GFX8-NEXT: v_alignbit_b32 v6, v6, v23, 16 +; GFX8-NEXT: v_alignbit_b32 v7, v7, v24, 16 +; GFX8-NEXT: v_alignbit_b32 v8, v8, v25, 16 +; GFX8-NEXT: v_alignbit_b32 v9, v9, v26, 16 +; GFX8-NEXT: v_alignbit_b32 v10, v10, v27, 16 +; GFX8-NEXT: v_alignbit_b32 v11, v11, v28, 16 +; GFX8-NEXT: v_alignbit_b32 v12, v12, v29, 16 +; GFX8-NEXT: v_alignbit_b32 v13, v13, v30, 16 +; GFX8-NEXT: v_alignbit_b32 v14, v14, v17, 16 +; GFX8-NEXT: v_alignbit_b32 v15, v15, v16, 16 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX900-LABEL: v_fma_v32bf16: +; GFX900: ; %bb.0: +; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 +; GFX900-NEXT: v_lshlrev_b32_e32 v31, 16, v15 +; GFX900-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX900-NEXT: s_movk_i32 s4, 0x7fff +; GFX900-NEXT: s_waitcnt vmcnt(1) +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v32 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v15, v15, v33, v32 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:60 +; GFX900-NEXT: v_fma_f32 v31, v31, v35, v34 +; GFX900-NEXT: v_lshlrev_b32_e32 v32, 16, v30 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v14 +; GFX900-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX900-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v31, v31 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v14, v14, v30, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:56 +; GFX900-NEXT: v_fma_f32 v32, v34, v32, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v30, 16, v29 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v13 +; GFX900-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX900-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v13, v13, v29, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:52 +; GFX900-NEXT: v_fma_f32 v30, v34, v30, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v29, 16, v28 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v12 +; GFX900-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX900-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v12, v12, v28, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48 +; GFX900-NEXT: v_fma_f32 v29, v34, v29, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v28, 16, v27 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v11 +; GFX900-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX900-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v11, v11, v27, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:44 +; GFX900-NEXT: v_fma_f32 v28, v34, v28, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v27, 16, v26 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v10 +; GFX900-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX900-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v10, v10, v26, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:40 +; GFX900-NEXT: v_fma_f32 v27, v34, v27, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v25 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v9 +; GFX900-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX900-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v26, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v9, v9, v25, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:36 +; GFX900-NEXT: v_fma_f32 v26, v35, v34, v26 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v24 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v8 +; GFX900-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX900-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v25, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v8, v8, v24, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32 +; GFX900-NEXT: v_fma_f32 v25, v35, v34, v25 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v23 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v7 +; GFX900-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX900-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v24, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v7, v7, v23, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:28 +; GFX900-NEXT: v_fma_f32 v24, v35, v34, v24 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v22 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v6 +; GFX900-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX900-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v23, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v6, v6, v22, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:24 +; GFX900-NEXT: v_fma_f32 v23, v35, v34, v23 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v21 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v5 +; GFX900-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX900-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v22, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v5, v5, v21, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20 +; GFX900-NEXT: v_fma_f32 v22, v35, v34, v22 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v20 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v4 +; GFX900-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v21, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v4, v4, v20, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16 +; GFX900-NEXT: v_fma_f32 v21, v35, v34, v21 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v19 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v3 +; GFX900-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v20, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v3, v3, v19, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12 +; GFX900-NEXT: v_fma_f32 v20, v35, v34, v20 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v18 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v2 +; GFX900-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v19, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v2, v2, v18, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8 +; GFX900-NEXT: v_fma_f32 v19, v35, v34, v19 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v17 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v1 +; GFX900-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v18, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v1, v1, v17, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:4 +; GFX900-NEXT: v_fma_f32 v18, v35, v34, v18 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v16 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v0 +; GFX900-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v17, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v0, v0, v16, v33 +; GFX900-NEXT: v_bfe_u32 v16, v31, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v31, s4 +; GFX900-NEXT: v_or_b32_e32 v31, 0x400000, v31 +; GFX900-NEXT: v_cndmask_b32_e32 v16, v16, v31, vcc +; GFX900-NEXT: v_bfe_u32 v31, v15, 16, 1 +; GFX900-NEXT: v_add3_u32 v31, v31, v15, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 +; GFX900-NEXT: v_or_b32_e32 v15, 0x400000, v15 +; GFX900-NEXT: v_cndmask_b32_e32 v15, v31, v15, vcc +; GFX900-NEXT: v_bfe_u32 v31, v32, 16, 1 +; GFX900-NEXT: v_add3_u32 v31, v31, v32, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v32, v32 +; GFX900-NEXT: v_or_b32_e32 v32, 0x400000, v32 +; GFX900-NEXT: v_cndmask_b32_e32 v31, v31, v32, vcc +; GFX900-NEXT: v_bfe_u32 v32, v14, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v14, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v14, v14 +; GFX900-NEXT: v_or_b32_e32 v14, 0x400000, v14 +; GFX900-NEXT: v_cndmask_b32_e32 v14, v32, v14, vcc +; GFX900-NEXT: v_bfe_u32 v32, v30, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v30, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v30, v30 +; GFX900-NEXT: v_or_b32_e32 v30, 0x400000, v30 +; GFX900-NEXT: v_cndmask_b32_e32 v30, v32, v30, vcc +; GFX900-NEXT: v_bfe_u32 v32, v13, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v13, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v13, v13 +; GFX900-NEXT: v_or_b32_e32 v13, 0x400000, v13 +; GFX900-NEXT: v_cndmask_b32_e32 v13, v32, v13, vcc +; GFX900-NEXT: v_bfe_u32 v32, v29, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v29, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v29, v29 +; GFX900-NEXT: v_or_b32_e32 v29, 0x400000, v29 +; GFX900-NEXT: v_cndmask_b32_e32 v29, v32, v29, vcc +; GFX900-NEXT: v_bfe_u32 v32, v12, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v12, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX900-NEXT: v_or_b32_e32 v12, 0x400000, v12 +; GFX900-NEXT: v_cndmask_b32_e32 v12, v32, v12, vcc +; GFX900-NEXT: v_bfe_u32 v32, v28, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v28, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v28, v28 +; GFX900-NEXT: v_or_b32_e32 v28, 0x400000, v28 +; GFX900-NEXT: v_cndmask_b32_e32 v28, v32, v28, vcc +; GFX900-NEXT: v_bfe_u32 v32, v11, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v11, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v11, v11 +; GFX900-NEXT: v_or_b32_e32 v11, 0x400000, v11 +; GFX900-NEXT: v_cndmask_b32_e32 v11, v32, v11, vcc +; GFX900-NEXT: v_bfe_u32 v32, v27, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v27, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v27, v27 +; GFX900-NEXT: v_or_b32_e32 v27, 0x400000, v27 +; GFX900-NEXT: v_cndmask_b32_e32 v27, v32, v27, vcc +; GFX900-NEXT: v_bfe_u32 v32, v10, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v10, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v10, v10 +; GFX900-NEXT: v_or_b32_e32 v10, 0x400000, v10 +; GFX900-NEXT: v_cndmask_b32_e32 v10, v32, v10, vcc +; GFX900-NEXT: v_bfe_u32 v32, v26, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v26, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v26, v26 +; GFX900-NEXT: v_or_b32_e32 v26, 0x400000, v26 +; GFX900-NEXT: v_cndmask_b32_e32 v26, v32, v26, vcc +; GFX900-NEXT: v_bfe_u32 v32, v9, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v9, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v9, v9 +; GFX900-NEXT: v_or_b32_e32 v9, 0x400000, v9 +; GFX900-NEXT: v_cndmask_b32_e32 v9, v32, v9, vcc +; GFX900-NEXT: v_bfe_u32 v32, v25, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v25, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v25, v25 +; GFX900-NEXT: v_or_b32_e32 v25, 0x400000, v25 +; GFX900-NEXT: v_cndmask_b32_e32 v25, v32, v25, vcc +; GFX900-NEXT: v_bfe_u32 v32, v8, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v8, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v8, v8 +; GFX900-NEXT: v_or_b32_e32 v8, 0x400000, v8 +; GFX900-NEXT: v_cndmask_b32_e32 v8, v32, v8, vcc +; GFX900-NEXT: v_bfe_u32 v32, v24, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v24, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v24, v24 +; GFX900-NEXT: v_or_b32_e32 v24, 0x400000, v24 +; GFX900-NEXT: v_cndmask_b32_e32 v24, v32, v24, vcc +; GFX900-NEXT: v_bfe_u32 v32, v7, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v7, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX900-NEXT: v_or_b32_e32 v7, 0x400000, v7 +; GFX900-NEXT: v_cndmask_b32_e32 v7, v32, v7, vcc +; GFX900-NEXT: v_bfe_u32 v32, v23, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v23, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v23, v23 +; GFX900-NEXT: v_or_b32_e32 v23, 0x400000, v23 +; GFX900-NEXT: v_cndmask_b32_e32 v23, v32, v23, vcc +; GFX900-NEXT: v_bfe_u32 v32, v6, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v6, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX900-NEXT: v_or_b32_e32 v6, 0x400000, v6 +; GFX900-NEXT: v_cndmask_b32_e32 v6, v32, v6, vcc +; GFX900-NEXT: v_bfe_u32 v32, v22, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v22, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v22, v22 +; GFX900-NEXT: v_or_b32_e32 v22, 0x400000, v22 +; GFX900-NEXT: v_cndmask_b32_e32 v22, v32, v22, vcc +; GFX900-NEXT: v_bfe_u32 v32, v5, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v5, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v5 +; GFX900-NEXT: v_cndmask_b32_e32 v5, v32, v5, vcc +; GFX900-NEXT: v_bfe_u32 v32, v21, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v21, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v21, v21 +; GFX900-NEXT: v_or_b32_e32 v21, 0x400000, v21 +; GFX900-NEXT: v_cndmask_b32_e32 v21, v32, v21, vcc +; GFX900-NEXT: v_bfe_u32 v32, v4, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v4, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v4 +; GFX900-NEXT: v_cndmask_b32_e32 v4, v32, v4, vcc +; GFX900-NEXT: v_bfe_u32 v32, v20, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v20, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v20, v20 +; GFX900-NEXT: v_or_b32_e32 v20, 0x400000, v20 +; GFX900-NEXT: v_cndmask_b32_e32 v20, v32, v20, vcc +; GFX900-NEXT: v_bfe_u32 v32, v3, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v3, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX900-NEXT: v_or_b32_e32 v3, 0x400000, v3 +; GFX900-NEXT: v_cndmask_b32_e32 v3, v32, v3, vcc +; GFX900-NEXT: v_bfe_u32 v32, v19, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v19, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v19, v19 +; GFX900-NEXT: v_or_b32_e32 v19, 0x400000, v19 +; GFX900-NEXT: v_cndmask_b32_e32 v19, v32, v19, vcc +; GFX900-NEXT: v_bfe_u32 v32, v2, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v2, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX900-NEXT: v_or_b32_e32 v2, 0x400000, v2 +; GFX900-NEXT: v_cndmask_b32_e32 v2, v32, v2, vcc +; GFX900-NEXT: v_bfe_u32 v32, v18, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v18, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v18, v18 +; GFX900-NEXT: v_or_b32_e32 v18, 0x400000, v18 +; GFX900-NEXT: v_cndmask_b32_e32 v18, v32, v18, vcc +; GFX900-NEXT: v_bfe_u32 v32, v1, 16, 1 +; GFX900-NEXT: v_fma_f32 v17, v35, v34, v17 +; GFX900-NEXT: v_add3_u32 v32, v32, v1, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX900-NEXT: v_or_b32_e32 v1, 0x400000, v1 +; GFX900-NEXT: v_cndmask_b32_e32 v1, v32, v1, vcc +; GFX900-NEXT: v_bfe_u32 v32, v17, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v17, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v17 +; GFX900-NEXT: v_cndmask_b32_e32 v17, v32, v17, vcc +; GFX900-NEXT: v_bfe_u32 v32, v0, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v0, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX900-NEXT: v_or_b32_e32 v0, 0x400000, v0 +; GFX900-NEXT: v_cndmask_b32_e32 v0, v32, v0, vcc +; GFX900-NEXT: s_mov_b32 s4, 0x7060302 +; GFX900-NEXT: v_perm_b32 v0, v0, v17, s4 +; GFX900-NEXT: v_perm_b32 v1, v1, v18, s4 +; GFX900-NEXT: v_perm_b32 v2, v2, v19, s4 +; GFX900-NEXT: v_perm_b32 v3, v3, v20, s4 +; GFX900-NEXT: v_perm_b32 v4, v4, v21, s4 +; GFX900-NEXT: v_perm_b32 v5, v5, v22, s4 +; GFX900-NEXT: v_perm_b32 v6, v6, v23, s4 +; GFX900-NEXT: v_perm_b32 v7, v7, v24, s4 +; GFX900-NEXT: v_perm_b32 v8, v8, v25, s4 +; GFX900-NEXT: v_perm_b32 v9, v9, v26, s4 +; GFX900-NEXT: v_perm_b32 v10, v10, v27, s4 +; GFX900-NEXT: v_perm_b32 v11, v11, v28, s4 +; GFX900-NEXT: v_perm_b32 v12, v12, v29, s4 +; GFX900-NEXT: v_perm_b32 v13, v13, v30, s4 +; GFX900-NEXT: v_perm_b32 v14, v14, v31, s4 +; GFX900-NEXT: v_perm_b32 v15, v15, v16, s4 +; GFX900-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fma_v32bf16: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: scratch_load_dword v35, off, s32 offset:64 +; GFX950-NEXT: scratch_load_dword v36, off, s32 +; GFX950-NEXT: scratch_load_dword v38, off, s32 offset:60 +; GFX950-NEXT: scratch_load_dword v39, off, s32 offset:56 +; GFX950-NEXT: scratch_load_dword v48, off, s32 offset:52 +; GFX950-NEXT: scratch_load_dword v49, off, s32 offset:48 +; GFX950-NEXT: scratch_load_dword v50, off, s32 offset:44 +; GFX950-NEXT: scratch_load_dword v51, off, s32 offset:40 +; GFX950-NEXT: scratch_load_dword v52, off, s32 offset:36 +; GFX950-NEXT: scratch_load_dword v53, off, s32 offset:32 +; GFX950-NEXT: scratch_load_dword v54, off, s32 offset:28 +; GFX950-NEXT: scratch_load_dword v31, off, s32 offset:4 +; GFX950-NEXT: scratch_load_dword v32, off, s32 offset:8 +; GFX950-NEXT: scratch_load_dword v33, off, s32 offset:12 +; GFX950-NEXT: scratch_load_dword v34, off, s32 offset:16 +; GFX950-NEXT: scratch_load_dword v37, off, s32 offset:20 +; GFX950-NEXT: scratch_load_dword v55, off, s32 offset:24 +; GFX950-NEXT: v_accvgpr_write_b32 a3, v43 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a5, v45 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a6, v46 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a8, v56 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a11, v59 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a13, v61 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a14, v62 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a15, v63 ; Reload Reuse +; GFX950-NEXT: v_and_b32_e32 v43, 0xffff0000, v14 +; GFX950-NEXT: v_lshlrev_b32_e32 v45, 16, v14 +; GFX950-NEXT: v_and_b32_e32 v46, 0xffff0000, v29 +; GFX950-NEXT: v_lshlrev_b32_e32 v56, 16, v29 +; GFX950-NEXT: v_and_b32_e32 v59, 0xffff0000, v12 +; GFX950-NEXT: v_lshlrev_b32_e32 v61, 16, v12 +; GFX950-NEXT: v_and_b32_e32 v62, 0xffff0000, v27 +; GFX950-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; GFX950-NEXT: v_accvgpr_write_b32 a2, v42 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a4, v44 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a7, v47 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a9, v57 ; Reload Reuse +; GFX950-NEXT: v_and_b32_e32 v42, 0xffff0000, v30 +; GFX950-NEXT: v_lshlrev_b32_e32 v44, 16, v30 +; GFX950-NEXT: v_and_b32_e32 v47, 0xffff0000, v13 +; GFX950-NEXT: v_lshlrev_b32_e32 v57, 16, v13 +; GFX950-NEXT: v_accvgpr_write_b32 a0, v40 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a1, v41 ; Reload Reuse +; GFX950-NEXT: v_and_b32_e32 v40, 0xffff0000, v15 +; GFX950-NEXT: v_lshlrev_b32_e32 v41, 16, v15 +; GFX950-NEXT: v_accvgpr_write_b32 a10, v58 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a12, v60 ; Reload Reuse +; GFX950-NEXT: v_and_b32_e32 v58, 0xffff0000, v28 +; GFX950-NEXT: v_lshlrev_b32_e32 v60, 16, v28 +; GFX950-NEXT: s_waitcnt vmcnt(16) +; GFX950-NEXT: v_and_b32_e32 v15, 0xffff0000, v35 +; GFX950-NEXT: s_waitcnt vmcnt(15) +; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v36 +; GFX950-NEXT: v_lshlrev_b32_e32 v63, 16, v36 +; GFX950-NEXT: s_waitcnt vmcnt(14) +; GFX950-NEXT: v_and_b32_e32 v14, 0xffff0000, v38 +; GFX950-NEXT: v_lshlrev_b32_e32 v29, 16, v38 +; GFX950-NEXT: s_waitcnt vmcnt(11) +; GFX950-NEXT: v_and_b32_e32 v36, 0xffff0000, v49 +; GFX950-NEXT: v_and_b32_e32 v38, 0xffff0000, v11 +; GFX950-NEXT: v_fmac_f32_e32 v36, v38, v62 +; GFX950-NEXT: v_lshlrev_b32_e32 v38, 16, v49 +; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v39 +; GFX950-NEXT: v_lshlrev_b32_e32 v30, 16, v39 +; GFX950-NEXT: v_fmac_f32_e32 v38, v11, v27 +; GFX950-NEXT: s_waitcnt vmcnt(10) +; GFX950-NEXT: v_and_b32_e32 v11, 0xffff0000, v50 +; GFX950-NEXT: v_and_b32_e32 v27, 0xffff0000, v26 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v10 +; GFX950-NEXT: v_fmac_f32_e32 v11, v39, v27 +; GFX950-NEXT: v_lshlrev_b32_e32 v27, 16, v50 +; GFX950-NEXT: v_lshlrev_b32_e32 v26, 16, v26 +; GFX950-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX950-NEXT: v_fmac_f32_e32 v27, v10, v26 +; GFX950-NEXT: s_waitcnt vmcnt(9) +; GFX950-NEXT: v_and_b32_e32 v10, 0xffff0000, v51 +; GFX950-NEXT: v_and_b32_e32 v26, 0xffff0000, v25 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v9 +; GFX950-NEXT: v_fmac_f32_e32 v10, v39, v26 +; GFX950-NEXT: v_lshlrev_b32_e32 v26, 16, v51 +; GFX950-NEXT: v_lshlrev_b32_e32 v25, 16, v25 +; GFX950-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX950-NEXT: v_fmac_f32_e32 v26, v9, v25 +; GFX950-NEXT: s_waitcnt vmcnt(8) +; GFX950-NEXT: v_and_b32_e32 v9, 0xffff0000, v52 +; GFX950-NEXT: v_and_b32_e32 v25, 0xffff0000, v24 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v8 +; GFX950-NEXT: v_fmac_f32_e32 v9, v39, v25 +; GFX950-NEXT: v_lshlrev_b32_e32 v25, 16, v52 +; GFX950-NEXT: v_lshlrev_b32_e32 v24, 16, v24 +; GFX950-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX950-NEXT: v_fmac_f32_e32 v25, v8, v24 +; GFX950-NEXT: s_waitcnt vmcnt(7) +; GFX950-NEXT: v_and_b32_e32 v8, 0xffff0000, v53 +; GFX950-NEXT: v_and_b32_e32 v24, 0xffff0000, v23 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v7 +; GFX950-NEXT: v_fmac_f32_e32 v8, v39, v24 +; GFX950-NEXT: v_lshlrev_b32_e32 v24, 16, v53 +; GFX950-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX950-NEXT: v_fmac_f32_e32 v24, v7, v23 +; GFX950-NEXT: s_waitcnt vmcnt(6) +; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v54 +; GFX950-NEXT: v_and_b32_e32 v23, 0xffff0000, v22 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v6 +; GFX950-NEXT: v_fmac_f32_e32 v7, v39, v23 +; GFX950-NEXT: v_lshlrev_b32_e32 v23, 16, v54 +; GFX950-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX950-NEXT: v_fmac_f32_e32 v23, v6, v22 +; GFX950-NEXT: s_waitcnt vmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v55 +; GFX950-NEXT: v_and_b32_e32 v22, 0xffff0000, v21 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v5 +; GFX950-NEXT: v_fmac_f32_e32 v6, v39, v22 +; GFX950-NEXT: v_lshlrev_b32_e32 v22, 16, v55 +; GFX950-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX950-NEXT: v_fmac_f32_e32 v22, v5, v21 +; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v37 +; GFX950-NEXT: v_and_b32_e32 v21, 0xffff0000, v20 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v4 +; GFX950-NEXT: v_fmac_f32_e32 v5, v39, v21 +; GFX950-NEXT: v_lshlrev_b32_e32 v21, 16, v37 +; GFX950-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX950-NEXT: v_fmac_f32_e32 v21, v4, v20 +; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v34 +; GFX950-NEXT: v_and_b32_e32 v20, 0xffff0000, v19 +; GFX950-NEXT: v_and_b32_e32 v37, 0xffff0000, v3 +; GFX950-NEXT: v_fmac_f32_e32 v4, v37, v20 +; GFX950-NEXT: v_lshlrev_b32_e32 v20, 16, v34 +; GFX950-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX950-NEXT: v_fmac_f32_e32 v20, v3, v19 +; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v33 +; GFX950-NEXT: v_and_b32_e32 v19, 0xffff0000, v18 +; GFX950-NEXT: v_and_b32_e32 v34, 0xffff0000, v2 +; GFX950-NEXT: v_fmac_f32_e32 v3, v34, v19 +; GFX950-NEXT: v_lshlrev_b32_e32 v19, 16, v33 +; GFX950-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX950-NEXT: v_fmac_f32_e32 v19, v2, v18 +; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v32 +; GFX950-NEXT: v_and_b32_e32 v18, 0xffff0000, v17 +; GFX950-NEXT: v_and_b32_e32 v33, 0xffff0000, v1 +; GFX950-NEXT: v_fmac_f32_e32 v2, v33, v18 +; GFX950-NEXT: v_lshlrev_b32_e32 v18, 16, v32 +; GFX950-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX950-NEXT: v_fmac_f32_e32 v18, v1, v17 +; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v31 +; GFX950-NEXT: v_and_b32_e32 v17, 0xffff0000, v16 +; GFX950-NEXT: v_and_b32_e32 v32, 0xffff0000, v0 +; GFX950-NEXT: v_lshlrev_b32_e32 v28, 16, v35 +; GFX950-NEXT: v_fmac_f32_e32 v15, v40, v12 +; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v48 +; GFX950-NEXT: v_lshlrev_b32_e32 v35, 16, v48 +; GFX950-NEXT: v_fmac_f32_e32 v1, v32, v17 +; GFX950-NEXT: v_lshlrev_b32_e32 v17, 16, v31 +; GFX950-NEXT: v_lshlrev_b32_e32 v16, 16, v16 +; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX950-NEXT: v_fmac_f32_e32 v28, v41, v63 +; GFX950-NEXT: v_fmac_f32_e32 v14, v43, v42 +; GFX950-NEXT: v_fmac_f32_e32 v29, v45, v44 +; GFX950-NEXT: v_fmac_f32_e32 v13, v47, v46 +; GFX950-NEXT: v_fmac_f32_e32 v30, v57, v56 +; GFX950-NEXT: v_fmac_f32_e32 v12, v59, v58 +; GFX950-NEXT: v_fmac_f32_e32 v35, v61, v60 +; GFX950-NEXT: v_fmac_f32_e32 v17, v0, v16 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v17, v1 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v18, v2 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v19, v3 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v20, v4 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v21, v5 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v5, v22, v6 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v6, v23, v7 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v7, v24, v8 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v8, v25, v9 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v9, v26, v10 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v10, v27, v11 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v11, v38, v36 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v12, v35, v12 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v13, v30, v13 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v14, v29, v14 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v15, v28, v15 +; GFX950-NEXT: v_accvgpr_read_b32 v63, a15 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v62, a14 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v61, a13 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v60, a12 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v59, a11 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v58, a10 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v57, a9 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v56, a8 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v47, a7 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v46, a6 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v45, a5 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v44, a4 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v43, a3 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v42, a2 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v41, a1 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v40, a0 ; Reload Reuse +; GFX950-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_fma_v32bf16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_clause 0x8 +; GFX10-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX10-NEXT: buffer_load_dword v33, off, s[0:3], s32 +; GFX10-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:60 +; GFX10-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:56 +; GFX10-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:52 +; GFX10-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:48 +; GFX10-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:44 +; GFX10-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:40 +; GFX10-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:36 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v15 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v15 +; GFX10-NEXT: v_and_b32_e32 v52, 0xffff0000, v10 +; GFX10-NEXT: s_waitcnt vmcnt(8) +; GFX10-NEXT: v_lshlrev_b32_e32 v31, 16, v32 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v33 +; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v32 +; GFX10-NEXT: v_and_b32_e32 v32, 0xffff0000, v33 +; GFX10-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32 +; GFX10-NEXT: v_fmac_f32_e32 v31, v49, v50 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v30 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v14 +; GFX10-NEXT: v_fmac_f32_e32 v15, v51, v32 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v32, 16, v34 +; GFX10-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v14 +; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v34 +; GFX10-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:28 +; GFX10-NEXT: v_fmac_f32_e32 v32, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v29 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v13 +; GFX10-NEXT: v_fmac_f32_e32 v14, v51, v30 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v30, 16, v35 +; GFX10-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v13 +; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v35 +; GFX10-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:24 +; GFX10-NEXT: v_fmac_f32_e32 v30, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v28 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v12 +; GFX10-NEXT: v_fmac_f32_e32 v13, v51, v29 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v29, 16, v36 +; GFX10-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v12 +; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v36 +; GFX10-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:20 +; GFX10-NEXT: v_fmac_f32_e32 v29, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v27 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v11 +; GFX10-NEXT: v_fmac_f32_e32 v12, v51, v28 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v28, 16, v37 +; GFX10-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v11 +; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v37 +; GFX10-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:16 +; GFX10-NEXT: v_fmac_f32_e32 v28, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v26 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v10 +; GFX10-NEXT: v_fmac_f32_e32 v11, v51, v27 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v27, 16, v38 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v26 +; GFX10-NEXT: v_and_b32_e32 v10, 0xffff0000, v38 +; GFX10-NEXT: v_lshlrev_b32_e32 v38, 16, v25 +; GFX10-NEXT: s_waitcnt vmcnt(6) +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v39 +; GFX10-NEXT: v_fmac_f32_e32 v27, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v9 +; GFX10-NEXT: v_fmac_f32_e32 v10, v52, v51 +; GFX10-NEXT: s_clause 0x1 +; GFX10-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:12 +; GFX10-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:8 +; GFX10-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v31 +; GFX10-NEXT: v_fmac_f32_e32 v26, v49, v38 +; GFX10-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:4 +; GFX10-NEXT: v_and_b32_e32 v49, 0xffff0000, v9 +; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v39 +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v24 +; GFX10-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v31, v31 +; GFX10-NEXT: v_fmac_f32_e32 v9, v49, v25 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v8 +; GFX10-NEXT: s_waitcnt vmcnt(8) +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v48 +; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX10-NEXT: v_and_b32_e32 v48, 0xffff0000, v48 +; GFX10-NEXT: v_fmac_f32_e32 v25, v49, v39 +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v23 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v7 +; GFX10-NEXT: v_fmac_f32_e32 v48, v8, v24 +; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v22 +; GFX10-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v33 +; GFX10-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX10-NEXT: v_fmac_f32_e32 v8, v49, v39 +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v6 +; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX10-NEXT: v_fmac_f32_e32 v33, v7, v23 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v21 +; GFX10-NEXT: s_waitcnt vmcnt(6) +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v34 +; GFX10-NEXT: v_and_b32_e32 v34, 0xffff0000, v34 +; GFX10-NEXT: v_lshlrev_b32_e32 v23, 16, v5 +; GFX10-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX10-NEXT: v_fmac_f32_e32 v7, v39, v24 +; GFX10-NEXT: v_fmac_f32_e32 v34, v6, v22 +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v20 +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v4 +; GFX10-NEXT: s_waitcnt vmcnt(5) +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v35 +; GFX10-NEXT: v_and_b32_e32 v35, 0xffff0000, v35 +; GFX10-NEXT: v_lshlrev_b32_e32 v22, 16, v19 +; GFX10-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX10-NEXT: v_fmac_f32_e32 v6, v23, v49 +; GFX10-NEXT: v_fmac_f32_e32 v35, v5, v21 +; GFX10-NEXT: v_lshlrev_b32_e32 v23, 16, v3 +; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX10-NEXT: s_waitcnt vmcnt(4) +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v36 +; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX10-NEXT: v_and_b32_e32 v36, 0xffff0000, v36 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v18 +; GFX10-NEXT: v_lshlrev_b32_e32 v21, 16, v2 +; GFX10-NEXT: v_fmac_f32_e32 v5, v39, v24 +; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX10-NEXT: v_fmac_f32_e32 v36, v4, v20 +; GFX10-NEXT: v_lshlrev_b32_e32 v20, 16, v16 +; GFX10-NEXT: s_waitcnt vmcnt(3) +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v37 +; GFX10-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v17 +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v1 +; GFX10-NEXT: v_fmac_f32_e32 v39, v23, v22 +; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v37 +; GFX10-NEXT: v_lshlrev_b32_e32 v22, 16, v0 +; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX10-NEXT: v_fmac_f32_e32 v23, v3, v19 +; GFX10-NEXT: s_waitcnt vmcnt(2) +; GFX10-NEXT: v_lshlrev_b32_e32 v37, 16, v50 +; GFX10-NEXT: s_waitcnt vmcnt(1) +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v51 +; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v51 +; GFX10-NEXT: v_and_b32_e32 v50, 0xffff0000, v50 +; GFX10-NEXT: v_cmp_u_f32_e64 s5, v33, v33 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_lshlrev_b32_e32 v51, 16, v38 +; GFX10-NEXT: v_and_b32_e32 v38, 0xffff0000, v38 +; GFX10-NEXT: v_fmac_f32_e32 v37, v21, v49 +; GFX10-NEXT: v_fmac_f32_e32 v50, v2, v18 +; GFX10-NEXT: v_fmac_f32_e32 v19, v1, v17 +; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v48 +; GFX10-NEXT: v_fmac_f32_e32 v38, v0, v16 +; GFX10-NEXT: v_bfe_u32 v0, v48, 16, 1 +; GFX10-NEXT: v_bfe_u32 v16, v33, 16, 1 +; GFX10-NEXT: v_bfe_u32 v2, v8, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v17, 0x400000, v33 +; GFX10-NEXT: v_bfe_u32 v18, v7, 16, 1 +; GFX10-NEXT: v_bfe_u32 v21, v34, 16, 1 +; GFX10-NEXT: v_add3_u32 v0, v0, v48, 0x7fff +; GFX10-NEXT: v_bfe_u32 v48, v35, 16, 1 +; GFX10-NEXT: v_add3_u32 v16, v16, v33, 0x7fff +; GFX10-NEXT: v_bfe_u32 v33, v5, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v3, v4, v24 +; GFX10-NEXT: v_fmac_f32_e32 v51, v22, v20 +; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v8 +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v7 +; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v34 +; GFX10-NEXT: v_bfe_u32 v24, v6, 16, 1 +; GFX10-NEXT: v_add3_u32 v2, v2, v8, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s4, v8, v8 +; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v35 +; GFX10-NEXT: v_add3_u32 v18, v18, v7, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s6, v7, v7 +; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v5 +; GFX10-NEXT: v_add3_u32 v21, v21, v34, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s7, v34, v34 +; GFX10-NEXT: v_bfe_u32 v34, v39, 16, 1 +; GFX10-NEXT: v_add3_u32 v48, v48, v35, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s9, v35, v35 +; GFX10-NEXT: v_bfe_u32 v35, v23, 16, 1 +; GFX10-NEXT: v_add3_u32 v33, v33, v5, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s10, v5, v5 +; GFX10-NEXT: v_bfe_u32 v5, v37, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v49, 0x400000, v6 +; GFX10-NEXT: v_add3_u32 v24, v24, v6, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s8, v6, v6 +; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v39 +; GFX10-NEXT: v_add3_u32 v34, v34, v39, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s11, v39, v39 +; GFX10-NEXT: v_or_b32_e32 v39, 0x400000, v23 +; GFX10-NEXT: v_add3_u32 v35, v35, v23, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s12, v23, v23 +; GFX10-NEXT: v_or_b32_e32 v23, 0x400000, v37 +; GFX10-NEXT: v_add3_u32 v5, v5, v37, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s13, v37, v37 +; GFX10-NEXT: v_bfe_u32 v37, v31, 16, 1 +; GFX10-NEXT: v_cndmask_b32_e64 v53, v2, v4, s4 +; GFX10-NEXT: v_bfe_u32 v4, v3, 16, 1 +; GFX10-NEXT: v_cndmask_b32_e64 v16, v16, v17, s5 +; GFX10-NEXT: v_cndmask_b32_e64 v17, v18, v20, s6 +; GFX10-NEXT: v_add3_u32 v37, v37, v31, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v18, v21, v22, s7 +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v3 +; GFX10-NEXT: v_bfe_u32 v22, v19, 16, 1 +; GFX10-NEXT: v_add3_u32 v4, v4, v3, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v31, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v15, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v15 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v15, v15 +; GFX10-NEXT: v_cndmask_b32_e64 v21, v24, v49, s8 +; GFX10-NEXT: v_or_b32_e32 v24, 0x400000, v19 +; GFX10-NEXT: v_add3_u32 v37, v37, v15, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v7, v33, v7, s10 +; GFX10-NEXT: v_bfe_u32 v33, v51, 16, 1 +; GFX10-NEXT: v_add3_u32 v22, v22, v19, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v6, v34, v6, s11 +; GFX10-NEXT: v_cndmask_b32_e64 v15, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v32, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v32 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v32, v32 +; GFX10-NEXT: v_or_b32_e32 v34, 0x400000, v51 +; GFX10-NEXT: v_cndmask_b32_e64 v35, v35, v39, s12 +; GFX10-NEXT: v_add3_u32 v37, v37, v32, 0x7fff +; GFX10-NEXT: v_bfe_u32 v39, v38, 16, 1 +; GFX10-NEXT: v_add3_u32 v33, v33, v51, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v23, s13 +; GFX10-NEXT: v_or_b32_e32 v23, 0x400000, v38 +; GFX10-NEXT: v_cndmask_b32_e64 v32, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v14, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v14 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v14, v14 +; GFX10-NEXT: v_add3_u32 v39, v39, v38, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v50 +; GFX10-NEXT: v_add3_u32 v37, v37, v14, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v8, v48, v8, s9 +; GFX10-NEXT: v_perm_b32 v15, v15, v31, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e64 v14, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v30, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v30 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v30, v30 +; GFX10-NEXT: v_perm_b32 v14, v14, v32, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v30, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v30, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v13, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v13 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v13, v13 +; GFX10-NEXT: v_add3_u32 v37, v37, v13, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v13, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v29, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v29 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v29, v29 +; GFX10-NEXT: v_perm_b32 v13, v13, v30, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v29, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v29, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v12, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v12 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v12, v12 +; GFX10-NEXT: v_add3_u32 v37, v37, v12, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v12, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v28, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v28 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v28, v28 +; GFX10-NEXT: v_perm_b32 v12, v12, v29, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v28, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v28, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v11, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v11 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v11, v11 +; GFX10-NEXT: v_add3_u32 v37, v37, v11, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v11, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v27, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v27 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v27, v27 +; GFX10-NEXT: v_perm_b32 v11, v11, v28, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v27, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v27, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v10, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v10 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v10, v10 +; GFX10-NEXT: v_add3_u32 v37, v37, v10, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v10, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v26, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v26 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v26, v26 +; GFX10-NEXT: v_perm_b32 v10, v10, v27, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v26, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v26, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v9, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v9 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v9, v9 +; GFX10-NEXT: v_add3_u32 v37, v37, v9, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v9, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v25, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v25 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v25, v25 +; GFX10-NEXT: v_perm_b32 v9, v9, v26, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v25, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v25, v37, v52, s14 +; GFX10-NEXT: v_cndmask_b32_e32 v52, v0, v1, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX10-NEXT: v_bfe_u32 v1, v50, 16, 1 +; GFX10-NEXT: v_bfe_u32 v37, v36, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v0, 0x400000, v36 +; GFX10-NEXT: v_cndmask_b32_e32 v3, v4, v20, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 +; GFX10-NEXT: v_add3_u32 v1, v1, v50, 0x7fff +; GFX10-NEXT: v_add3_u32 v37, v37, v36, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v4, v22, v24, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v51, v51 +; GFX10-NEXT: v_cndmask_b32_e32 v19, v33, v34, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38 +; GFX10-NEXT: v_cndmask_b32_e32 v20, v39, v23, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50 +; GFX10-NEXT: v_cndmask_b32_e32 v2, v1, v2, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36 +; GFX10-NEXT: v_perm_b32 v1, v4, v3, 0x7060302 +; GFX10-NEXT: v_perm_b32 v3, v35, v6, 0x7060302 +; GFX10-NEXT: v_perm_b32 v6, v18, v17, 0x7060302 +; GFX10-NEXT: v_perm_b32 v2, v2, v5, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v22, v37, v0, vcc_lo +; GFX10-NEXT: v_perm_b32 v0, v20, v19, 0x7060302 +; GFX10-NEXT: v_perm_b32 v5, v8, v21, 0x7060302 +; GFX10-NEXT: v_perm_b32 v8, v52, v25, 0x7060302 +; GFX10-NEXT: v_perm_b32 v4, v22, v7, 0x7060302 +; GFX10-NEXT: v_perm_b32 v7, v16, v53, 0x7060302 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11TRUE16-LABEL: v_fma_v32bf16: +; GFX11TRUE16: ; %bb.0: +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11TRUE16-NEXT: s_clause 0x10 +; GFX11TRUE16-NEXT: scratch_load_b32 v31, off, s32 offset:64 +; GFX11TRUE16-NEXT: scratch_load_b32 v32, off, s32 +; GFX11TRUE16-NEXT: scratch_load_b32 v33, off, s32 offset:60 +; GFX11TRUE16-NEXT: scratch_load_b32 v34, off, s32 offset:56 +; GFX11TRUE16-NEXT: scratch_load_b32 v35, off, s32 offset:52 +; GFX11TRUE16-NEXT: scratch_load_b32 v36, off, s32 offset:48 +; GFX11TRUE16-NEXT: scratch_load_b32 v37, off, s32 offset:44 +; GFX11TRUE16-NEXT: scratch_load_b32 v38, off, s32 offset:40 +; GFX11TRUE16-NEXT: scratch_load_b32 v39, off, s32 offset:36 +; GFX11TRUE16-NEXT: scratch_load_b32 v48, off, s32 offset:32 +; GFX11TRUE16-NEXT: scratch_load_b32 v49, off, s32 offset:28 +; GFX11TRUE16-NEXT: scratch_load_b32 v50, off, s32 offset:24 +; GFX11TRUE16-NEXT: scratch_load_b32 v51, off, s32 offset:20 +; GFX11TRUE16-NEXT: scratch_load_b32 v52, off, s32 offset:16 +; GFX11TRUE16-NEXT: scratch_load_b32 v53, off, s32 offset:12 +; GFX11TRUE16-NEXT: scratch_load_b32 v54, off, s32 offset:8 +; GFX11TRUE16-NEXT: scratch_load_b32 v55, off, s32 offset:4 +; GFX11TRUE16-NEXT: v_and_b32_e32 v99, 0xffff0000, v21 +; GFX11TRUE16-NEXT: v_and_b32_e32 v100, 0xffff0000, v5 +; GFX11TRUE16-NEXT: v_and_b32_e32 v101, 0xffff0000, v20 +; GFX11TRUE16-NEXT: v_and_b32_e32 v102, 0xffff0000, v4 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX11TRUE16-NEXT: v_and_b32_e32 v115, 0xffff0000, v17 +; GFX11TRUE16-NEXT: v_and_b32_e32 v116, 0xffff0000, v1 +; GFX11TRUE16-NEXT: v_and_b32_e32 v97, 0xffff0000, v22 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX11TRUE16-NEXT: v_and_b32_e32 v117, 0xffff0000, v16 +; GFX11TRUE16-NEXT: v_and_b32_e32 v118, 0xffff0000, v0 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v16, 16, v16 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v103, 0xffff0000, v19 +; GFX11TRUE16-NEXT: v_and_b32_e32 v112, 0xffff0000, v3 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11TRUE16-NEXT: v_and_b32_e32 v85, 0xffff0000, v24 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v24, 16, v24 +; GFX11TRUE16-NEXT: v_and_b32_e32 v113, 0xffff0000, v18 +; GFX11TRUE16-NEXT: v_and_b32_e32 v114, 0xffff0000, v2 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(16) +; GFX11TRUE16-NEXT: v_and_b32_e32 v119, 0xffff0000, v31 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(15) +; GFX11TRUE16-NEXT: v_and_b32_e32 v128, 0xffff0000, v32 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(14) +; GFX11TRUE16-NEXT: v_and_b32_e32 v129, 0xffff0000, v33 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v33, 16, v33 +; GFX11TRUE16-NEXT: v_and_b32_e32 v68, 0xffff0000, v13 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(12) +; GFX11TRUE16-NEXT: v_and_b32_e32 v131, 0xffff0000, v35 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(10) +; GFX11TRUE16-NEXT: v_and_b32_e32 v133, 0xffff0000, v37 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(9) +; GFX11TRUE16-NEXT: v_and_b32_e32 v134, 0xffff0000, v38 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v37, 16, v37 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(7) +; GFX11TRUE16-NEXT: v_and_b32_e32 v144, 0xffff0000, v48 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v48, 16, v48 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(5) +; GFX11TRUE16-NEXT: v_and_b32_e32 v146, 0xffff0000, v50 +; GFX11TRUE16-NEXT: v_and_b32_e32 v145, 0xffff0000, v49 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v49, 16, v49 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(4) +; GFX11TRUE16-NEXT: v_and_b32_e32 v147, 0xffff0000, v51 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v51, 16, v51 +; GFX11TRUE16-NEXT: v_and_b32_e32 v96, 0xffff0000, v7 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11TRUE16-NEXT: v_and_b32_e32 v148, 0xffff0000, v55 +; GFX11TRUE16-NEXT: v_and_b32_e32 v87, 0xffff0000, v23 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX11TRUE16-NEXT: v_and_b32_e32 v83, 0xffff0000, v25 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v146, v100, v99 :: v_dual_lshlrev_b32 v25, 16, v25 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v55, 16, v55 +; GFX11TRUE16-NEXT: v_and_b32_e32 v98, 0xffff0000, v6 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX11TRUE16-NEXT: v_and_b32_e32 v84, 0xffff0000, v9 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v48, v7, v23 +; GFX11TRUE16-NEXT: v_and_b32_e32 v135, 0xffff0000, v39 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v39, 16, v39 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v49, v6, v22 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v134, v84, v83 :: v_dual_lshlrev_b32 v13, 16, v13 +; GFX11TRUE16-NEXT: v_bfe_u32 v83, v146, 16, 1 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v51, v4, v20 :: v_dual_fmac_f32 v148, v118, v117 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v144, v96, v87 :: v_dual_and_b32 v81, 0xffff0000, v26 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v55, v0, v16 :: v_dual_lshlrev_b32 v26, 16, v26 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v145, v98, v97 +; GFX11TRUE16-NEXT: v_or_b32_e32 v84, 0x400000, v146 +; GFX11TRUE16-NEXT: v_add3_u32 v83, v83, v146, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; GFX11TRUE16-NEXT: v_and_b32_e32 v86, 0xffff0000, v8 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX11TRUE16-NEXT: v_and_b32_e32 v82, 0xffff0000, v10 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v147, v102, v101 :: v_dual_lshlrev_b32 v10, 16, v10 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v38, 16, v38 +; GFX11TRUE16-NEXT: v_and_b32_e32 v69, 0xffff0000, v28 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v37, v10, v26 :: v_dual_lshlrev_b32 v28, 16, v28 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v39, v8, v24 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v133, v82, v81 :: v_dual_and_b32 v70, 0xffff0000, v12 +; GFX11TRUE16-NEXT: v_bfe_u32 v97, v51, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v37, 16, 1 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v135, v86, v85 :: v_dual_lshlrev_b32 v12, 16, v12 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v35, 16, v35 +; GFX11TRUE16-NEXT: v_and_b32_e32 v80, 0xffff0000, v11 +; GFX11TRUE16-NEXT: v_and_b32_e32 v132, 0xffff0000, v36 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v36, 16, v36 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v50, 16, v50 +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v133 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX11TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v37 +; GFX11TRUE16-NEXT: v_or_b32_e32 v98, 0x400000, v51 +; GFX11TRUE16-NEXT: v_add3_u32 v23, v23, v37, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; GFX11TRUE16-NEXT: v_and_b32_e32 v71, 0xffff0000, v27 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; GFX11TRUE16-NEXT: v_add3_u32 v97, v97, v51, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v31, 16, v31 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v32, 16, v32 +; GFX11TRUE16-NEXT: v_and_b32_e32 v64, 0xffff0000, v15 +; GFX11TRUE16-NEXT: v_and_b32_e32 v130, 0xffff0000, v34 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v35, v12, v28 :: v_dual_lshlrev_b32 v34, 16, v34 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v36, v11, v27 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v50, v5, v21 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v132, v80, v71 :: v_dual_and_b32 v67, 0xffff0000, v29 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v29, 16, v29 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v130, v68, v67 :: v_dual_and_b32 v65, 0xffff0000, v30 +; GFX11TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v36 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v34, v13, v29 :: v_dual_fmac_f32 v31, v15, v32 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v119, v64, v128 :: v_dual_and_b32 v66, 0xffff0000, v14 +; GFX11TRUE16-NEXT: v_and_b32_e32 v64, 0xffff0000, v52 +; GFX11TRUE16-NEXT: v_and_b32_e32 v128, 0xffff0000, v53 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v53, 16, v53 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v129, v66, v65 :: v_dual_lshlrev_b32 v30, 16, v30 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v52, 16, v52 +; GFX11TRUE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v54 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v54, 16, v54 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v64, v112, v103 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v38, v9, v25 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v131, v70, v69 :: v_dual_lshlrev_b32 v14, 16, v14 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v53, v2, v18 +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v119, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v2, v31, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v33, v14, v30 :: v_dual_fmac_f32 v52, v3, v19 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v54, v1, v17 +; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v119 +; GFX11TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v31 +; GFX11TRUE16-NEXT: v_bfe_u32 v4, v129, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v119, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v119, v119 +; GFX11TRUE16-NEXT: v_add3_u32 v2, v2, v31, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e64 s0, v31, v31 +; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v129 +; GFX11TRUE16-NEXT: v_bfe_u32 v6, v33, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v14, v132, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v15, v0, v1, vcc_lo +; GFX11TRUE16-NEXT: v_cndmask_b32_e64 v149, v2, v3, s0 +; GFX11TRUE16-NEXT: v_add3_u32 v2, v4, v129, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v129, v129 +; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v33 +; GFX11TRUE16-NEXT: v_bfe_u32 v8, v130, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v3, v6, v33, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v150, v14, v132, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v14, v2, v5, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33 +; GFX11TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v130 +; GFX11TRUE16-NEXT: v_bfe_u32 v10, v34, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v13, v35, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v4, v8, v130, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v33, v3, v7, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v130, v130 +; GFX11TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v34 +; GFX11TRUE16-NEXT: v_bfe_u32 v12, v131, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v6, v10, v34, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v10, v13, v35, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v13, v4, v9, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34 +; GFX11TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v131 +; GFX11TRUE16-NEXT: v_add3_u32 v8, v12, v131, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v35 +; GFX11TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v132 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v34, v6, v11, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v131, v131 +; GFX11TRUE16-NEXT: v_bfe_u32 v19, v36, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v21, v133, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v25, v134, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v26, 0x400000, v134 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v12, v8, v16, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35 +; GFX11TRUE16-NEXT: v_add3_u32 v19, v19, v36, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v21, v21, v133, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v27, v38, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v25, v25, v134, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v16, v10, v17, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v132, v132 +; GFX11TRUE16-NEXT: v_or_b32_e32 v28, 0x400000, v38 +; GFX11TRUE16-NEXT: v_bfe_u32 v29, v135, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v27, v27, v38, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v30, 0x400000, v135 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v11, v150, v18, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36 +; GFX11TRUE16-NEXT: v_bfe_u32 v65, v39, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v29, v29, v135, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v66, 0x400000, v39 +; GFX11TRUE16-NEXT: v_bfe_u32 v67, v144, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v17, v19, v20, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v133, v133 +; GFX11TRUE16-NEXT: v_add3_u32 v65, v65, v39, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v68, 0x400000, v144 +; GFX11TRUE16-NEXT: v_bfe_u32 v69, v48, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v67, v67, v144, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v10, v21, v22, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37 +; GFX11TRUE16-NEXT: v_or_b32_e32 v70, 0x400000, v48 +; GFX11TRUE16-NEXT: v_bfe_u32 v71, v145, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v69, v69, v48, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v80, 0x400000, v145 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v18, v23, v24, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v134, v134 +; GFX11TRUE16-NEXT: v_bfe_u32 v81, v49, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v71, v71, v145, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v82, 0x400000, v49 +; GFX11TRUE16-NEXT: v_bfe_u32 v85, v50, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v9, v25, v26, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38 +; GFX11TRUE16-NEXT: v_add3_u32 v81, v81, v49, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v86, 0x400000, v50 +; GFX11TRUE16-NEXT: v_bfe_u32 v87, v147, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v85, v85, v50, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v19, v27, v28, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v135, v135 +; GFX11TRUE16-NEXT: v_or_b32_e32 v96, 0x400000, v147 +; GFX11TRUE16-NEXT: v_add3_u32 v87, v87, v147, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v99, v64, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v100, 0x400000, v64 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v8, v29, v30, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39 +; GFX11TRUE16-NEXT: v_bfe_u32 v101, v52, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v99, v99, v64, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v102, 0x400000, v52 +; GFX11TRUE16-NEXT: v_bfe_u32 v117, v54, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v20, v65, v66, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v144, v144 +; GFX11TRUE16-NEXT: v_add3_u32 v101, v101, v52, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v118, 0x400000, v54 +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v55, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v117, v117, v54, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v7, v67, v68, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48 +; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v55 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v55, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v119, v148, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v31, 0x400000, v148 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v21, v69, v70, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v145, v145 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v8.l, v20.h +; GFX11TRUE16-NEXT: v_add3_u32 v119, v119, v148, 0x7fff +; GFX11TRUE16-NEXT: v_mov_b16_e32 v9.l, v19.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v7.l, v21.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v71, v80, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v49, v49 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v10.l, v18.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v11.l, v17.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v12.l, v16.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v13.l, v34.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v22, v81, v82, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v146, v146 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v14.l, v33.h +; GFX11TRUE16-NEXT: v_mov_b16_e64 v15.l, v149.h +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v6.l, v22.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v5, v83, v84, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v23, v85, v86, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v147, v147 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v5.l, v23.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v4, v87, v96, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v51, v51 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v128, v114, v113 +; GFX11TRUE16-NEXT: v_bfe_u32 v113, v53, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v114, 0x400000, v53 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v24, v97, v98, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v64, v64 +; GFX11TRUE16-NEXT: v_bfe_u32 v103, v128, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v112, 0x400000, v128 +; GFX11TRUE16-NEXT: v_add3_u32 v113, v113, v53, 0x7fff +; GFX11TRUE16-NEXT: v_mov_b16_e32 v4.l, v24.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v99, v100, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v52, v52 +; GFX11TRUE16-NEXT: v_add3_u32 v103, v103, v128, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v25, v101, v102, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v128, v128 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v32, v116, v115 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v25.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v103, v112, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v53, v53 +; GFX11TRUE16-NEXT: v_bfe_u32 v115, v32, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v116, 0x400000, v32 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v26, v113, v114, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v54, v54 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_add3_u32 v115, v115, v32, 0x7fff +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v26.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v27, v117, v118, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v55, v55 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v28, v0, v1, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v115, v116, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v148, v148 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v27.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v119, v31, vcc_lo +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v28.h +; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11FAKE16-LABEL: v_fma_v32bf16: +; GFX11FAKE16: ; %bb.0: +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11FAKE16-NEXT: s_clause 0x10 +; GFX11FAKE16-NEXT: scratch_load_b32 v31, off, s32 offset:64 +; GFX11FAKE16-NEXT: scratch_load_b32 v32, off, s32 +; GFX11FAKE16-NEXT: scratch_load_b32 v33, off, s32 offset:60 +; GFX11FAKE16-NEXT: scratch_load_b32 v34, off, s32 offset:56 +; GFX11FAKE16-NEXT: scratch_load_b32 v35, off, s32 offset:52 +; GFX11FAKE16-NEXT: scratch_load_b32 v36, off, s32 offset:48 +; GFX11FAKE16-NEXT: scratch_load_b32 v37, off, s32 offset:44 +; GFX11FAKE16-NEXT: scratch_load_b32 v38, off, s32 offset:40 +; GFX11FAKE16-NEXT: scratch_load_b32 v39, off, s32 offset:36 +; GFX11FAKE16-NEXT: scratch_load_b32 v48, off, s32 offset:32 +; GFX11FAKE16-NEXT: scratch_load_b32 v49, off, s32 offset:28 +; GFX11FAKE16-NEXT: scratch_load_b32 v50, off, s32 offset:24 +; GFX11FAKE16-NEXT: scratch_load_b32 v51, off, s32 offset:20 +; GFX11FAKE16-NEXT: scratch_load_b32 v52, off, s32 offset:16 +; GFX11FAKE16-NEXT: scratch_load_b32 v53, off, s32 offset:12 +; GFX11FAKE16-NEXT: scratch_load_b32 v54, off, s32 offset:8 +; GFX11FAKE16-NEXT: scratch_load_b32 v55, off, s32 offset:4 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v99, 16, v21 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v100, 16, v5 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v97, 16, v22 +; GFX11FAKE16-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v101, 16, v20 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v102, 16, v4 +; GFX11FAKE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v117, 16, v16 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v118, 16, v0 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v87, 16, v23 +; GFX11FAKE16-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX11FAKE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v98, 16, v6 +; GFX11FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v103, 16, v19 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v112, 16, v3 +; GFX11FAKE16-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX11FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v85, 16, v24 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v113, 16, v18 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v114, 16, v2 +; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v115, 16, v17 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v116, 16, v1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX11FAKE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(15) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v128, 16, v32 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(14) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v129, 16, v33 +; GFX11FAKE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v68, 16, v13 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(12) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v131, 16, v35 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(10) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v133, 16, v37 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(9) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v134, 16, v38 +; GFX11FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v37 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(7) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v144, 16, v48 +; GFX11FAKE16-NEXT: v_and_b32_e32 v48, 0xffff0000, v48 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(5) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v146, 16, v50 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v145, 16, v49 +; GFX11FAKE16-NEXT: v_and_b32_e32 v49, 0xffff0000, v49 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v84, 16, v9 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(4) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v147, 16, v51 +; GFX11FAKE16-NEXT: v_and_b32_e32 v51, 0xffff0000, v51 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v96, 16, v7 +; GFX11FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v83, 16, v25 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v146, v100, v99 :: v_dual_and_b32 v25, 0xffff0000, v25 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v135, 16, v39 +; GFX11FAKE16-NEXT: v_and_b32_e32 v39, 0xffff0000, v39 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v48, v7, v23 :: v_dual_fmac_f32 v49, v6, v22 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v134, v84, v83 :: v_dual_and_b32 v13, 0xffff0000, v13 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v51, v4, v20 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v144, v96, v87 :: v_dual_lshlrev_b32 v81, 16, v26 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v145, v98, v97 :: v_dual_and_b32 v26, 0xffff0000, v26 +; GFX11FAKE16-NEXT: v_or_b32_e32 v84, 0x400000, v146 +; GFX11FAKE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v86, 16, v8 +; GFX11FAKE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v82, 16, v10 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v147, v102, v101 :: v_dual_and_b32 v10, 0xffff0000, v10 +; GFX11FAKE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX11FAKE16-NEXT: v_and_b32_e32 v38, 0xffff0000, v38 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v69, 16, v28 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v37, v10, v26 :: v_dual_and_b32 v28, 0xffff0000, v28 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v39, v8, v24 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v133, v82, v81 :: v_dual_lshlrev_b32 v70, 16, v12 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v135, v86, v85 :: v_dual_and_b32 v12, 0xffff0000, v12 +; GFX11FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v35 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v80, 16, v11 +; GFX11FAKE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v132, 16, v36 +; GFX11FAKE16-NEXT: v_and_b32_e32 v36, 0xffff0000, v36 +; GFX11FAKE16-NEXT: v_and_b32_e32 v50, 0xffff0000, v50 +; GFX11FAKE16-NEXT: v_or_b32_e32 v22, 0x400000, v133 +; GFX11FAKE16-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX11FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v37 +; GFX11FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v71, 16, v27 +; GFX11FAKE16-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX11FAKE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v130, 16, v34 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v35, v12, v28 :: v_dual_and_b32 v34, 0xffff0000, v34 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v36, v11, v27 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v50, v5, v21 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v132, v80, v71 :: v_dual_lshlrev_b32 v67, 16, v29 +; GFX11FAKE16-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX11FAKE16-NEXT: v_or_b32_e32 v98, 0x400000, v51 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v119, 16, v31 +; GFX11FAKE16-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v64, 16, v15 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v34, v13, v29 :: v_dual_and_b32 v15, 0xffff0000, v15 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v130, v68, v67 :: v_dual_lshlrev_b32 v65, 16, v30 +; GFX11FAKE16-NEXT: v_bfe_u32 v23, v37, 16, 1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v31, v15, v32 :: v_dual_lshlrev_b32 v66, 16, v14 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v119, v64, v128 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(3) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v64, 16, v52 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(2) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v128, 16, v53 +; GFX11FAKE16-NEXT: v_and_b32_e32 v53, 0xffff0000, v53 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(1) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v15, 16, v54 +; GFX11FAKE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v54 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v54, 16, v55 +; GFX11FAKE16-NEXT: v_and_b32_e32 v55, 0xffff0000, v55 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v129, v66, v65 :: v_dual_and_b32 v30, 0xffff0000, v30 +; GFX11FAKE16-NEXT: v_and_b32_e32 v52, 0xffff0000, v52 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v64, v112, v103 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v38, v9, v25 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v131, v70, v69 :: v_dual_and_b32 v14, 0xffff0000, v14 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v53, v2, v18 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v55, v0, v16 +; GFX11FAKE16-NEXT: v_bfe_u32 v0, v119, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v2, v31, 16, 1 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v33, v14, v30 :: v_dual_fmac_f32 v52, v3, v19 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v32, v1, v17 +; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v119 +; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v31 +; GFX11FAKE16-NEXT: v_bfe_u32 v4, v129, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v119, 0x7fff +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v119, v119 +; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v31, 0x7fff +; GFX11FAKE16-NEXT: v_cmp_u_f32_e64 s0, v31, v31 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v128, v114, v113 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v54, v118, v117 +; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v129 +; GFX11FAKE16-NEXT: v_bfe_u32 v6, v33, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v10, v34, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v14, v35, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v19, v36, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v27, v38, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v65, v39, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v69, v48, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v81, v49, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v85, v50, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v97, v51, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v101, v52, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v113, v53, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v117, v32, 16, 1 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v148, v0, v1, vcc_lo +; GFX11FAKE16-NEXT: v_cndmask_b32_e64 v149, v2, v3, s0 +; GFX11FAKE16-NEXT: v_add3_u32 v2, v4, v129, 0x7fff +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v129, v129 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v15, v116, v115 +; GFX11FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v33 +; GFX11FAKE16-NEXT: v_bfe_u32 v8, v130, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v3, v6, v33, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v6, v10, v34, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v10, v14, v35, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v14, v19, v36, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v19, v23, v37, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v23, v27, v38, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v27, v65, v39, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v65, v69, v48, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v69, v81, v49, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v81, v85, v50, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v85, v97, v51, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v97, v101, v52, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v101, v113, v53, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v113, v117, v32, 0x7fff +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v117, v2, v5, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33 +; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v130 +; GFX11FAKE16-NEXT: v_bfe_u32 v12, v131, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v17, v132, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v21, v133, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v134, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v29, v135, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v67, v144, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v71, v145, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v83, v146, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v87, v147, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v99, v64, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v103, v128, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v115, v15, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v119, v54, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v4, v8, v130, 0x7fff +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v33, v3, v7, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v130, v130 +; GFX11FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v34 +; GFX11FAKE16-NEXT: v_add3_u32 v8, v12, v131, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v12, v17, v132, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v17, v21, v133, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v21, v25, v134, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v25, v29, v135, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v29, v67, v144, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v67, v71, v145, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v71, v83, v146, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v83, v87, v147, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v87, v99, v64, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v99, v103, v128, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v103, v115, v15, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v115, v119, v54, 0x7fff +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v119, v4, v9, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34 +; GFX11FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v131 +; GFX11FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v35 +; GFX11FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v132 +; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v36 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v34, v6, v11, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v131, v131 +; GFX11FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v134 +; GFX11FAKE16-NEXT: v_or_b32_e32 v28, 0x400000, v38 +; GFX11FAKE16-NEXT: v_or_b32_e32 v30, 0x400000, v135 +; GFX11FAKE16-NEXT: v_or_b32_e32 v66, 0x400000, v39 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v13, v8, v13, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35 +; GFX11FAKE16-NEXT: v_or_b32_e32 v68, 0x400000, v144 +; GFX11FAKE16-NEXT: v_or_b32_e32 v70, 0x400000, v48 +; GFX11FAKE16-NEXT: v_or_b32_e32 v80, 0x400000, v145 +; GFX11FAKE16-NEXT: v_or_b32_e32 v82, 0x400000, v49 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v16, v10, v16, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v132, v132 +; GFX11FAKE16-NEXT: v_or_b32_e32 v86, 0x400000, v50 +; GFX11FAKE16-NEXT: v_or_b32_e32 v96, 0x400000, v147 +; GFX11FAKE16-NEXT: v_or_b32_e32 v100, 0x400000, v64 +; GFX11FAKE16-NEXT: v_or_b32_e32 v102, 0x400000, v52 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v18, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36 +; GFX11FAKE16-NEXT: v_or_b32_e32 v112, 0x400000, v128 +; GFX11FAKE16-NEXT: v_or_b32_e32 v116, 0x400000, v15 +; GFX11FAKE16-NEXT: v_or_b32_e32 v118, 0x400000, v32 +; GFX11FAKE16-NEXT: v_or_b32_e32 v31, 0x400000, v54 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v20, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v133, v133 +; GFX11FAKE16-NEXT: v_bfe_u32 v0, v55, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v55 +; GFX11FAKE16-NEXT: v_or_b32_e32 v114, 0x400000, v53 +; GFX11FAKE16-NEXT: v_perm_b32 v11, v12, v11, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v10, v17, v22, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v55, 0x7fff +; GFX11FAKE16-NEXT: v_perm_b32 v12, v16, v13, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v13, v34, v119, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v14, v19, v24, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v134, v134 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v10, v14, v10, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v9, v21, v26, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38 +; GFX11FAKE16-NEXT: v_perm_b32 v14, v33, v117, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v17, v23, v28, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v135, v135 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v9, v17, v9, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v8, v25, v30, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v18, v27, v66, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v144, v144 +; GFX11FAKE16-NEXT: v_perm_b32 v8, v18, v8, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v7, v29, v68, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v19, v65, v70, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v145, v145 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v7, v19, v7, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v6, v67, v80, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v49, v49 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v20, v69, v82, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v146, v146 +; GFX11FAKE16-NEXT: v_perm_b32 v6, v20, v6, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v5, v71, v84, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v21, v81, v86, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v147, v147 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v5, v21, v5, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v4, v83, v96, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v64, v64 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v3, v87, v100, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v52, v52 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v22, v97, v102, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v128, v128 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v3, v22, v3, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v99, v112, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v15, v103, v116, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v23, v113, v118, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v54, v54 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v24, v115, v31, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v55, v55 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v53, v53 +; GFX11FAKE16-NEXT: v_perm_b32 v1, v23, v15, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v15, v149, v148, 0x7060302 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v24, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v25, v101, v114, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v51, v51 +; GFX11FAKE16-NEXT: v_perm_b32 v2, v25, v2, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v26, v85, v98, vcc_lo +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_perm_b32 v4, v26, v4, 0x7060302 +; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; ; GFX1250-LABEL: v_fma_v32bf16: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_clause 0x10 -; GFX1250-NEXT: scratch_load_b32 v31, off, s32 offset:64 -; GFX1250-NEXT: scratch_load_b32 v32, off, s32 offset:4 -; GFX1250-NEXT: scratch_load_b32 v33, off, s32 offset:8 -; GFX1250-NEXT: scratch_load_b32 v34, off, s32 offset:12 -; GFX1250-NEXT: scratch_load_b32 v35, off, s32 offset:16 -; GFX1250-NEXT: scratch_load_b32 v36, off, s32 offset:20 -; GFX1250-NEXT: scratch_load_b32 v37, off, s32 offset:24 -; GFX1250-NEXT: scratch_load_b32 v38, off, s32 offset:28 -; GFX1250-NEXT: scratch_load_b32 v39, off, s32 offset:32 -; GFX1250-NEXT: scratch_load_b32 v48, off, s32 offset:36 -; GFX1250-NEXT: scratch_load_b32 v49, off, s32 offset:40 -; GFX1250-NEXT: scratch_load_b32 v50, off, s32 offset:44 -; GFX1250-NEXT: scratch_load_b32 v51, off, s32 offset:48 -; GFX1250-NEXT: scratch_load_b32 v52, off, s32 offset:52 -; GFX1250-NEXT: scratch_load_b32 v53, off, s32 offset:56 -; GFX1250-NEXT: scratch_load_b32 v54, off, s32 offset:60 -; GFX1250-NEXT: scratch_load_b32 v55, off, s32 -; GFX1250-NEXT: s_wait_loadcnt 0xf -; GFX1250-NEXT: v_pk_fma_bf16 v0, v0, v16, v32 -; GFX1250-NEXT: s_wait_loadcnt 0xe -; GFX1250-NEXT: v_pk_fma_bf16 v1, v1, v17, v33 -; GFX1250-NEXT: s_wait_loadcnt 0xd -; GFX1250-NEXT: v_pk_fma_bf16 v2, v2, v18, v34 -; GFX1250-NEXT: s_wait_loadcnt 0xc -; GFX1250-NEXT: v_pk_fma_bf16 v3, v3, v19, v35 -; GFX1250-NEXT: s_wait_loadcnt 0xb -; GFX1250-NEXT: v_pk_fma_bf16 v4, v4, v20, v36 -; GFX1250-NEXT: s_wait_loadcnt 0xa -; GFX1250-NEXT: v_pk_fma_bf16 v5, v5, v21, v37 -; GFX1250-NEXT: s_wait_loadcnt 0x9 -; GFX1250-NEXT: v_pk_fma_bf16 v6, v6, v22, v38 -; GFX1250-NEXT: s_wait_loadcnt 0x8 -; GFX1250-NEXT: v_pk_fma_bf16 v7, v7, v23, v39 -; GFX1250-NEXT: s_wait_loadcnt 0x7 -; GFX1250-NEXT: v_pk_fma_bf16 v8, v8, v24, v48 -; GFX1250-NEXT: s_wait_loadcnt 0x6 -; GFX1250-NEXT: v_pk_fma_bf16 v9, v9, v25, v49 -; GFX1250-NEXT: s_wait_loadcnt 0x5 -; GFX1250-NEXT: v_pk_fma_bf16 v10, v10, v26, v50 -; GFX1250-NEXT: s_wait_loadcnt 0x4 -; GFX1250-NEXT: v_pk_fma_bf16 v11, v11, v27, v51 -; GFX1250-NEXT: s_wait_loadcnt 0x3 -; GFX1250-NEXT: v_pk_fma_bf16 v12, v12, v28, v52 -; GFX1250-NEXT: s_wait_loadcnt 0x2 -; GFX1250-NEXT: v_pk_fma_bf16 v13, v13, v29, v53 -; GFX1250-NEXT: s_wait_loadcnt 0x1 -; GFX1250-NEXT: v_pk_fma_bf16 v14, v14, v30, v54 -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_pk_fma_bf16 v15, v15, v55, v31 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] -define <32 x bfloat> @v_fma_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b, <32 x bfloat> %c) { +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_clause 0x10 +; GFX1250-NEXT: scratch_load_b32 v31, off, s32 offset:64 +; GFX1250-NEXT: scratch_load_b32 v32, off, s32 offset:4 +; GFX1250-NEXT: scratch_load_b32 v33, off, s32 offset:8 +; GFX1250-NEXT: scratch_load_b32 v34, off, s32 offset:12 +; GFX1250-NEXT: scratch_load_b32 v35, off, s32 offset:16 +; GFX1250-NEXT: scratch_load_b32 v36, off, s32 offset:20 +; GFX1250-NEXT: scratch_load_b32 v37, off, s32 offset:24 +; GFX1250-NEXT: scratch_load_b32 v38, off, s32 offset:28 +; GFX1250-NEXT: scratch_load_b32 v39, off, s32 offset:32 +; GFX1250-NEXT: scratch_load_b32 v48, off, s32 offset:36 +; GFX1250-NEXT: scratch_load_b32 v49, off, s32 offset:40 +; GFX1250-NEXT: scratch_load_b32 v50, off, s32 offset:44 +; GFX1250-NEXT: scratch_load_b32 v51, off, s32 offset:48 +; GFX1250-NEXT: scratch_load_b32 v52, off, s32 offset:52 +; GFX1250-NEXT: scratch_load_b32 v53, off, s32 offset:56 +; GFX1250-NEXT: scratch_load_b32 v54, off, s32 offset:60 +; GFX1250-NEXT: scratch_load_b32 v55, off, s32 +; GFX1250-NEXT: s_wait_loadcnt 0xf +; GFX1250-NEXT: v_pk_fma_bf16 v0, v0, v16, v32 +; GFX1250-NEXT: s_wait_loadcnt 0xe +; GFX1250-NEXT: v_pk_fma_bf16 v1, v1, v17, v33 +; GFX1250-NEXT: s_wait_loadcnt 0xd +; GFX1250-NEXT: v_pk_fma_bf16 v2, v2, v18, v34 +; GFX1250-NEXT: s_wait_loadcnt 0xc +; GFX1250-NEXT: v_pk_fma_bf16 v3, v3, v19, v35 +; GFX1250-NEXT: s_wait_loadcnt 0xb +; GFX1250-NEXT: v_pk_fma_bf16 v4, v4, v20, v36 +; GFX1250-NEXT: s_wait_loadcnt 0xa +; GFX1250-NEXT: v_pk_fma_bf16 v5, v5, v21, v37 +; GFX1250-NEXT: s_wait_loadcnt 0x9 +; GFX1250-NEXT: v_pk_fma_bf16 v6, v6, v22, v38 +; GFX1250-NEXT: s_wait_loadcnt 0x8 +; GFX1250-NEXT: v_pk_fma_bf16 v7, v7, v23, v39 +; GFX1250-NEXT: s_wait_loadcnt 0x7 +; GFX1250-NEXT: v_pk_fma_bf16 v8, v8, v24, v48 +; GFX1250-NEXT: s_wait_loadcnt 0x6 +; GFX1250-NEXT: v_pk_fma_bf16 v9, v9, v25, v49 +; GFX1250-NEXT: s_wait_loadcnt 0x5 +; GFX1250-NEXT: v_pk_fma_bf16 v10, v10, v26, v50 +; GFX1250-NEXT: s_wait_loadcnt 0x4 +; GFX1250-NEXT: v_pk_fma_bf16 v11, v11, v27, v51 +; GFX1250-NEXT: s_wait_loadcnt 0x3 +; GFX1250-NEXT: v_pk_fma_bf16 v12, v12, v28, v52 +; GFX1250-NEXT: s_wait_loadcnt 0x2 +; GFX1250-NEXT: v_pk_fma_bf16 v13, v13, v29, v53 +; GFX1250-NEXT: s_wait_loadcnt 0x1 +; GFX1250-NEXT: v_pk_fma_bf16 v14, v14, v30, v54 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_pk_fma_bf16 v15, v15, v55, v31 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %op = call <32 x bfloat> @llvm.fma.v32bf16(<32 x bfloat> %a, <32 x bfloat> %b, <32 x bfloat> %c) ret <32 x bfloat> %op } diff --git a/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll b/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll index ed48999..bd28f72 100644 --- a/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll @@ -1,734 +1,759 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-NNAN %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s -; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-NNAN %s +; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI %s -; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefixes=SI-NNAN %s +; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI %s -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-TRUE16 %s -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-FAKE16 %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-TRUE16 %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-FAKE16 %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-TRUE16 %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-FAKE16 %s define half @test_fmax_legacy_ugt_f16(half %a, half %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_max_f16_e32 v0, v0, v1 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v1 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v1, v0 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v1 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.l, v1.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-TRUE16-LABEL: test_fmax_legacy_ugt_f16: -; GFX11-NNAN-TRUE16: ; %bb.0: -; GFX11-NNAN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v1.l -; GFX11-NNAN-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-FAKE16-LABEL: test_fmax_legacy_ugt_f16: -; GFX11-NNAN-FAKE16: ; %bb.0: -; GFX11-NNAN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-FAKE16-NEXT: v_max_f16_e32 v0, v0, v1 -; GFX11-NNAN-FAKE16-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_max_legacy_f32_e32 v0, v1, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.l, v1.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt half %a, %b %val = select i1 %cmp, half %a, half %b ret half %val } +define half @test_fmax_legacy_ugt_f16_fast(half %a, half %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_max_f16_e32 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_e32 v0, v0, v1 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_f16_fast: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v1.l +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_f16_fast: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_max_f16_e32 v0, v0, v1 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt half %a, %b + %val = select nnan nsz i1 %cmp, half %a, half %b + ret half %val +} + define <2 x half> @test_fmax_legacy_ugt_v2f16(<2 x half> %a, <2 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v2, v0, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v1 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_v2f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2 -; VI-SAFE-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_v2f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v1 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v2 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_v2f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v2, v0 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v3, v1 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_v2f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v2 -; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v3 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v1.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v1.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v1 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_v2f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v2, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v2f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2 +; VI-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v2f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_max_legacy_f32_e32 v0, v2, v0 +; SI-NEXT: v_max_legacy_f32_e32 v1, v3, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v2f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v1.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v1.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v2f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt <2 x half> %a, %b %val = select <2 x i1> %cmp, <2 x half> %a, <2 x half> %b ret <2 x half> %val } +define <2 x half> @test_fmax_legacy_ugt_v2f16_fast(<2 x half> %a, <2 x half> %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_v2f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v2f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_e32 v0, v0, v1 +; VI-NEXT: v_or_b32_e32 v0, v0, v2 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v2f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v2 +; SI-NEXT: v_max_f32_e32 v1, v1, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmax_legacy_ugt_v2f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v0, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt <2 x half> %a, %b + %val = select nnan nsz <2 x i1> %cmp, <2 x half> %a, <2 x half> %b + ret <2 x half> %val +} + define <3 x half> @test_fmax_legacy_ugt_v3f16(<3 x half> %a, <3 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_max_f16 v1, v1, v3 -; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v2 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_v3f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 -; VI-SAFE-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_v3f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v2 -; VI-NNAN-NEXT: v_max_f16_e32 v1, v1, v3 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v4 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_v3f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v3, v0 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v4, v1 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v2, v5, v2 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_v3f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v3 -; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v4 -; SI-NNAN-NEXT: v_max_f32_e32 v2, v2, v5 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v2.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v2.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v1.l, v3.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v2 -; GFX11-NNAN-NEXT: v_pk_max_f16 v1, v1, v3 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_v3f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v3f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 +; VI-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 +; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 +; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v3f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_max_legacy_f32_e32 v0, v3, v0 +; SI-NEXT: v_max_legacy_f32_e32 v1, v4, v1 +; SI-NEXT: v_max_legacy_f32_e32 v2, v5, v2 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v3f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v2.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v3f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt <3 x half> %a, %b %val = select <3 x i1> %cmp, <3 x half> %a, <3 x half> %b ret <3 x half> %val } +define <3 x half> @test_fmax_legacy_ugt_v3f16_fast(<3 x half> %a, <3 x half> %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_v3f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v3, v3, v3 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v1 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v3 +; GFX9-NEXT: v_pk_max_f16 v0, v0, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v3f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_e32 v0, v0, v2 +; VI-NEXT: v_max_f16_e32 v1, v1, v3 +; VI-NEXT: v_or_b32_e32 v0, v0, v4 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v3f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v3 +; SI-NEXT: v_max_f32_e32 v1, v1, v4 +; SI-NEXT: v_max_f32_e32 v2, v2, v5 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmax_legacy_ugt_v3f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v3, v3, v3 +; GFX11-NEXT: v_pk_max_f16 v1, v1, v1 +; GFX11-NEXT: v_pk_max_f16 v0, v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_pk_max_f16 v1, v1, v3 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt <3 x half> %a, %b + %val = select nnan nsz <3 x i1> %cmp, <3 x half> %a, <3 x half> %b + ret <3 x half> %val +} + define <4 x half> @test_fmax_legacy_ugt_v4f16(<4 x half> %a, <4 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v1, v6, v1, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v2 -; GFX9-NNAN-NEXT: v_pk_max_f16 v1, v1, v3 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_v4f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v4 -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v6 -; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_v4f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_e32 v1, v1, v3 -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v2 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v5 -; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v4 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_v4f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v4, v0 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v5, v1 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v2, v6, v2 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v3, v7, v3 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_v4f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v4 -; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v5 -; SI-NNAN-NEXT: v_max_f32_e32 v2, v2, v6 -; SI-NNAN-NEXT: v_max_f32_e32 v3, v3, v7 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1.h, v3.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.h, v2.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v0.l, v2.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v1.l, v3.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1 -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v7, v6 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v2 -; GFX11-NNAN-NEXT: v_pk_max_f16 v1, v1, v3 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_v4f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4 +; GFX9-NEXT: v_perm_b32 v1, v6, v1, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v4f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v6, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6 +; VI-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 +; VI-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 +; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 +; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v4 +; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v6 +; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v4f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_max_legacy_f32_e32 v0, v4, v0 +; SI-NEXT: v_max_legacy_f32_e32 v1, v5, v1 +; SI-NEXT: v_max_legacy_f32_e32 v2, v6, v2 +; SI-NEXT: v_max_legacy_f32_e32 v3, v7, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v4f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1.h, v3.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.h, v2.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v4f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v7, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100 +; GFX11-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt <4 x half> %a, %b %val = select <4 x i1> %cmp, <4 x half> %a, <4 x half> %b ret <4 x half> %val } +define <4 x half> @test_fmax_legacy_ugt_v4f16_fast(<4 x half> %a, <4 x half> %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_v4f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v0, v0, v2 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v3 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v4f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_e32 v1, v1, v3 +; VI-NEXT: v_max_f16_e32 v0, v0, v2 +; VI-NEXT: v_or_b32_e32 v0, v0, v5 +; VI-NEXT: v_or_b32_e32 v1, v1, v4 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v4f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v4 +; SI-NEXT: v_max_f32_e32 v1, v1, v5 +; SI-NEXT: v_max_f32_e32 v2, v2, v6 +; SI-NEXT: v_max_f32_e32 v3, v3, v7 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmax_legacy_ugt_v4f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v0, v0, v2 +; GFX11-NEXT: v_pk_max_f16 v1, v1, v3 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt <4 x half> %a, %b + %val = select nnan nsz <4 x i1> %cmp, <4 x half> %a, <4 x half> %b + ret <4 x half> %val +} + define <8 x half> @test_fmax_legacy_ugt_v8f16(<8 x half> %a, <8 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v8, v0, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v1, v10, v1, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v2, v12, v2, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v3, v14, v3, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v4 -; GFX9-NNAN-NEXT: v_pk_max_f16 v1, v1, v5 -; GFX9-NNAN-NEXT: v_pk_max_f16 v2, v2, v6 -; GFX9-NNAN-NEXT: v_pk_max_f16 v3, v3, v7 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_v8f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v8 -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v10 -; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v12 -; VI-SAFE-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v14 -; VI-SAFE-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_v8f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_e32 v3, v3, v7 -; VI-NNAN-NEXT: v_max_f16_e32 v2, v2, v6 -; VI-NNAN-NEXT: v_max_f16_e32 v1, v1, v5 -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v4 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v11 -; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v10 -; VI-NNAN-NEXT: v_or_b32_e32 v2, v2, v9 -; VI-NNAN-NEXT: v_or_b32_e32 v3, v3, v8 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_v8f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v8, v0 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v9, v1 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v2, v10, v2 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v3, v11, v3 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v4, v12, v4 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v5, v13, v5 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v6, v14, v6 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v7, v15, v7 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_v8f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v8 -; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v9 -; SI-NNAN-NEXT: v_max_f32_e32 v2, v2, v10 -; SI-NNAN-NEXT: v_max_f32_e32 v3, v3, v11 -; SI-NNAN-NEXT: v_max_f32_e32 v4, v4, v12 -; SI-NNAN-NEXT: v_max_f32_e32 v5, v5, v13 -; SI-NNAN-NEXT: v_max_f32_e32 v6, v6, v14 -; SI-NNAN-NEXT: v_max_f32_e32 v7, v7, v15 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v4.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v1.h, v5.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v2.h, v6.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v3.h, v7.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s3, v0.l, v4.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s4, v1.l, v5.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s5, v2.l, v6.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s6, v3.l, v7.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v11, v10 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v13, v12 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v15, v14 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v9, v8 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v2, v6 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v4 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v5 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v7 -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v4 -; GFX11-NNAN-NEXT: v_pk_max_f16 v1, v1, v5 -; GFX11-NNAN-NEXT: v_pk_max_f16 v2, v2, v6 -; GFX11-NNAN-NEXT: v_pk_max_f16 v3, v3, v7 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_v8f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v7 +; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; GFX9-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14 +; GFX9-NEXT: v_lshrrev_b32_e32 v10, 16, v5 +; GFX9-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12 +; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10 +; GFX9-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8 +; GFX9-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7 +; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v8, v0, s4 +; GFX9-NEXT: v_perm_b32 v1, v10, v1, s4 +; GFX9-NEXT: v_perm_b32 v2, v12, v2, s4 +; GFX9-NEXT: v_perm_b32 v3, v14, v3, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v8f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v7 +; VI-NEXT: v_lshrrev_b32_e32 v15, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; VI-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14 +; VI-NEXT: v_lshrrev_b32_e32 v10, 16, v5 +; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; VI-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12 +; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; VI-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; VI-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10 +; VI-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8 +; VI-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7 +; VI-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6 +; VI-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5 +; VI-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4 +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v8 +; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v10 +; VI-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v12 +; VI-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v14 +; VI-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v8f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_max_legacy_f32_e32 v0, v8, v0 +; SI-NEXT: v_max_legacy_f32_e32 v1, v9, v1 +; SI-NEXT: v_max_legacy_f32_e32 v2, v10, v2 +; SI-NEXT: v_max_legacy_f32_e32 v3, v11, v3 +; SI-NEXT: v_max_legacy_f32_e32 v4, v12, v4 +; SI-NEXT: v_max_legacy_f32_e32 v5, v13, v5 +; SI-NEXT: v_max_legacy_f32_e32 v6, v14, v6 +; SI-NEXT: v_max_legacy_f32_e32 v7, v15, v7 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v8f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v4.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v1.h, v5.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v2.h, v6.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v3.h, v7.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s3, v0.l, v4.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s4, v1.l, v5.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s5, v2.l, v6.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s6, v3.l, v7.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v8f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1 +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v11, v10 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v13, v12 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v15, v14 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v9, v8 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v2, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v4 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v5 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v7 +; GFX11-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt <8 x half> %a, %b %val = select <8 x i1> %cmp, <8 x half> %a, <8 x half> %b ret <8 x half> %val } +define <8 x half> @test_fmax_legacy_ugt_v8f16_fast(<8 x half> %a, <8 x half> %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_v8f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v0, v0, v4 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v5 +; GFX9-NEXT: v_pk_max_f16 v2, v2, v6 +; GFX9-NEXT: v_pk_max_f16 v3, v3, v7 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v8f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_e32 v3, v3, v7 +; VI-NEXT: v_max_f16_e32 v2, v2, v6 +; VI-NEXT: v_max_f16_e32 v1, v1, v5 +; VI-NEXT: v_max_f16_e32 v0, v0, v4 +; VI-NEXT: v_or_b32_e32 v0, v0, v11 +; VI-NEXT: v_or_b32_e32 v1, v1, v10 +; VI-NEXT: v_or_b32_e32 v2, v2, v9 +; VI-NEXT: v_or_b32_e32 v3, v3, v8 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v8f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v8 +; SI-NEXT: v_max_f32_e32 v1, v1, v9 +; SI-NEXT: v_max_f32_e32 v2, v2, v10 +; SI-NEXT: v_max_f32_e32 v3, v3, v11 +; SI-NEXT: v_max_f32_e32 v4, v4, v12 +; SI-NEXT: v_max_f32_e32 v5, v5, v13 +; SI-NEXT: v_max_f32_e32 v6, v6, v14 +; SI-NEXT: v_max_f32_e32 v7, v7, v15 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmax_legacy_ugt_v8f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v0, v0, v4 +; GFX11-NEXT: v_pk_max_f16 v1, v1, v5 +; GFX11-NEXT: v_pk_max_f16 v2, v2, v6 +; GFX11-NEXT: v_pk_max_f16 v3, v3, v7 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt <8 x half> %a, %b + %val = select nnan nsz <8 x i1> %cmp, <8 x half> %a, <8 x half> %b + ret <8 x half> %val +} + attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll b/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll index eee2bd1..f3a84e6 100644 --- a/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll +++ b/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll @@ -1,8 +1,6 @@ -; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-SAFE,GCN,FUNC %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=GCN-NONAN,GCN,FUNC %s +; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI,GCN,FUNC %s -; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-SAFE,GCN,FUNC %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=GCN-NONAN,GCN,FUNC %s +; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI,GCN,FUNC %s ; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck -enable-var-scope --check-prefixes=EG,FUNC %s @@ -12,12 +10,10 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1 ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] -; VI-SAFE: v_cmp_nlt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] - -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; VI: v_cmp_nlt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_uge_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { @@ -34,18 +30,38 @@ define amdgpu_kernel void @test_fmax_legacy_uge_f32(ptr addrspace(1) %out, ptr a ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_uge_f32_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_uge_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 + + %cmp = fcmp uge float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_uge_f32_nnan_src: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] ; GCN-DAG: v_add_f32_e32 [[ADD_A:v[0-9]+]], 1.0, [[A]] ; GCN-DAG: v_add_f32_e32 [[ADD_B:v[0-9]+]], 2.0, [[B]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]] +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]] -; VI-SAFE: v_cmp_nlt_f32_e32 vcc, [[ADD_A]], [[ADD_B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[ADD_B]], [[ADD_A]] +; VI: v_cmp_nlt_f32_e32 vcc, [[ADD_A]], [[ADD_B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[ADD_B]], [[ADD_A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_uge_f32_nnan_src(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { @@ -64,16 +80,40 @@ define amdgpu_kernel void @test_fmax_legacy_uge_f32_nnan_src(ptr addrspace(1) %o ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_uge_f32_nnan_src_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] +; GCN-DAG: v_add_f32_e32 [[ADD_A:v[0-9]+]], 1.0, [[A]] +; GCN-DAG: v_add_f32_e32 [[ADD_B:v[0-9]+]], 2.0, [[B]] + +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]] + +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_uge_f32_nnan_src_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 + %a.nnan = fadd nnan float %a, 1.0 + %b.nnan = fadd nnan float %b, 2.0 + + %cmp = fcmp uge float %a.nnan, %b.nnan + %val = select nnan nsz i1 %cmp, float %a.nnan, float %b.nnan + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_oge_f32: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] -; VI-SAFE: v_cmp_ge_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] +; VI: v_cmp_ge_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_oge_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -89,17 +129,35 @@ define amdgpu_kernel void @test_fmax_legacy_oge_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmax_legacy_ugt_f32: +; FUNC-LABEL: {{^}}test_fmax_legacy_oge_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_oge_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; VI-SAFE: v_cmp_nle_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %cmp = fcmp oge float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} +; FUNC-LABEL: {{^}}test_fmax_legacy_ugt_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] + +; VI: v_cmp_nle_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_ugt_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -115,16 +173,35 @@ define amdgpu_kernel void @test_fmax_legacy_ugt_f32(ptr addrspace(1) %out, ptr a ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_ugt_f32_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_ugt_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 + + %cmp = fcmp ugt float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_f32: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] -; VI-SAFE: v_cmp_gt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] +; VI: v_cmp_gt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_ogt_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -140,17 +217,35 @@ define amdgpu_kernel void @test_fmax_legacy_ogt_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v1f32: +; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_ogt_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; VI-SAFE: v_cmp_gt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %cmp = fcmp ogt float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} +; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v1f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + +; VI: v_cmp_gt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_ogt_v1f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -166,23 +261,39 @@ define amdgpu_kernel void @test_fmax_legacy_ogt_v1f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v1f32_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_ogt_v1f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <1 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <1 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile <1 x float>, ptr addrspace(1) %gep.0 + %b = load volatile <1 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ogt <1 x float> %a, %b + %val = select nnan nsz <1 x i1> %cmp, <1 x float> %a, <1 x float> %b + store <1 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v3f32: -; SI-SAFE: v_max_legacy_f32_e32 -; SI-SAFE: v_max_legacy_f32_e32 -; SI-SAFE: v_max_legacy_f32_e32 - -; VI-SAFE: v_cmp_gt_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_gt_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_gt_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE-NOT: v_cmp -; VI-SAFE-NOT: v_cndmask - -; GCN-NONAN: v_max_f32_e32 -; GCN-NONAN: v_max_f32_e32 -; GCN-NONAN: v_max_f32_e32 +; SI: v_max_legacy_f32_e32 +; SI: v_max_legacy_f32_e32 +; SI: v_max_legacy_f32_e32 + +; VI: v_cmp_gt_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_gt_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_gt_f32_e32 +; VI: v_cndmask_b32_e32 +; VI-NOT: v_cmp +; VI-NOT: v_cndmask ; GCN-NOT: v_max define amdgpu_kernel void @test_fmax_legacy_ogt_v3f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { @@ -199,6 +310,27 @@ define amdgpu_kernel void @test_fmax_legacy_ogt_v3f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v3f32_fast: + +; GCN: v_max_f32_e32 +; GCN: v_max_f32_e32 +; GCN: v_max_f32_e32 + +; GCN-NOT: v_max +define amdgpu_kernel void @test_fmax_legacy_ogt_v3f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <3 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <3 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load <3 x float>, ptr addrspace(1) %gep.0 + %b = load <3 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ogt <3 x float> %a, %b + %val = select nnan nsz <3 x i1> %cmp, <3 x float> %a, <3 x float> %b + store <3 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_f32_multi_use: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] diff --git a/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll b/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll index 2ac5891..37f077d5 100644 --- a/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll +++ b/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll @@ -1,16 +1,12 @@ -; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-SAFE,GCN %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn < %s | FileCheck -enable-var-scope --check-prefixes=GCN %s +; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI,GCN %s -; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-SAFE,GCN %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope --check-prefixes=GCN,VI-NNAN %s +; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI,GCN %s ; GCN-LABEL: {{^}}min_fneg_select_regression_0: ; GCN-NOT: v_mul -; SI: v_max_legacy_f32_e64 [[MIN:v[0-9]+]], -1.0, -v0 - -; VI-SAFE: v_cmp_nle_f32_e32 vcc, 1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc +; VI: v_cmp_nle_f32_e32 vcc, 1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc define amdgpu_ps float @min_fneg_select_regression_0(float %a, float %b) #0 { %fneg.a = fsub float -0.0, %a %cmp.a = fcmp ult float %a, 1.0 @@ -18,15 +14,23 @@ define amdgpu_ps float @min_fneg_select_regression_0(float %a, float %b) #0 { ret float %min.a } +; GCN-LABEL: {{^}}min_fneg_select_regression_0_fast: +; GCN-NOT: v_mul + +define amdgpu_ps float @min_fneg_select_regression_0_fast(float %a, float %b) #0 { + %fneg.a = fsub float -0.0, %a + %cmp.a = fcmp ult float %a, 1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float -1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}min_fneg_select_regression_posk_0: ; GCN-NOT: v_mul ; SI: v_max_legacy_f32_e64 [[MIN:v[0-9]+]], 1.0, -v0 -; VI-SAFE: v_cmp_nle_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NNAN: v_max_f32_e64 v{{[0-9]+}}, -v0, 1.0 +; VI: v_cmp_nle_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @min_fneg_select_regression_posk_0(float %a, float %b) #0 { %fneg.a = fsub float -0.0, %a %cmp.a = fcmp ult float %a, -1.0 @@ -34,15 +38,24 @@ define amdgpu_ps float @min_fneg_select_regression_posk_0(float %a, float %b) #0 ret float %min.a } -; GCN-LABEL: {{^}}max_fneg_select_regression_0: +; GCN-LABEL: {{^}}min_fneg_select_regression_posk_0_fast: ; GCN-NOT: v_mul -; SI-SAFE: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], -1.0, -v0 +; VI: v_max_f32_e64 v{{[0-9]+}}, -v0, 1.0 +define amdgpu_ps float @min_fneg_select_regression_posk_0_fast(float %a, float %b) #0 { + %fneg.a = fsub float -0.0, %a + %cmp.a = fcmp ult float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + +; GCN-LABEL: {{^}}max_fneg_select_regression_0: +; GCN-NOT: v_mul -; VI-SAFE: v_cmp_nge_f32_e32 vcc, 1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc +; SI: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], -1.0, -v0 -; GCN-NONAN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, -1.0 +; VI: v_cmp_nge_f32_e32 vcc, 1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc define amdgpu_ps float @max_fneg_select_regression_0(float %a) #0 { %fneg.a = fsub float -0.0, %a %cmp.a = fcmp ugt float %a, 1.0 @@ -50,15 +63,24 @@ define amdgpu_ps float @max_fneg_select_regression_0(float %a) #0 { ret float %min.a } -; GCN-LABEL: {{^}}max_fneg_select_regression_posk_0: +; GCN-LABEL: {{^}}max_fneg_select_regression_0_fast: ; GCN-NOT: v_mul -; SI-SAFE: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], 1.0, -v0 +; GCN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, -1.0 +define amdgpu_ps float @max_fneg_select_regression_0_fast(float %a) #0 { + %fneg.a = fsub float -0.0, %a + %cmp.a = fcmp ugt float %a, 1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float -1.0 + ret float %min.a +} + +; GCN-LABEL: {{^}}max_fneg_select_regression_posk_0: +; GCN-NOT: v_mul -; VI-SAFE: v_cmp_nge_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc +; SI: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], 1.0, -v0 -; GCN-NONAN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, 1.0 +; VI: v_cmp_nge_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @max_fneg_select_regression_posk_0(float %a) #0 { %fneg.a = fsub float -0.0, %a %cmp.a = fcmp ugt float %a, -1.0 @@ -66,13 +88,22 @@ define amdgpu_ps float @max_fneg_select_regression_posk_0(float %a) #0 { ret float %min.a } +; GCN-LABEL: {{^}}max_fneg_select_regression_posk_0_fast: +; GCN-NOT: v_mul + +; GCN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, 1.0 +define amdgpu_ps float @max_fneg_select_regression_posk_0_fast(float %a) #0 { + %fneg.a = fsub float -0.0, %a + %cmp.a = fcmp ugt float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg1: ; SI: v_min_legacy_f32_e64 v0, 1.0, -v0 -; VI-SAFE: v_cmp_nge_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NNAN: v_min_f32_e64 v0, -v0, 1.0 +; VI: v_cmp_nge_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ugt float %a, -1.0 @@ -80,13 +111,21 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg1_fast: + +; VI: v_min_f32_e64 v0, -v0, 1.0 +define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ugt float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg1: ; SI: v_max_legacy_f32_e64 v0, 1.0, -v0 -; VI-SAFE: v_cmp_nle_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NNAN: v_max_f32_e64 v0, -v0, 1.0 +; VI: v_cmp_nle_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ult float %a, -1.0 @@ -94,13 +133,21 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg1_fast: + +; VI: v_max_f32_e64 v0, -v0, 1.0 +define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ult float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg1: ; SI: v_min_legacy_f32_e64 v0, -v0, 1.0 -; VI-SAFE: v_cmp_lt_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NNAN: v_min_f32_e64 v0, -v0, 1.0 +; VI: v_cmp_lt_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ogt float %a, -1.0 @@ -108,13 +155,21 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg1_fast: + +; VI: v_min_f32_e64 v0, -v0, 1.0 +define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ogt float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg1: ; SI: v_max_legacy_f32_e64 v0, -v0, 1.0 -; VI-SAFE: v_cmp_gt_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NANN: v_max_f32_e64 v0, -v0, 1.0 +; VI: v_cmp_gt_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp olt float %a, -1.0 @@ -122,17 +177,24 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg1_fast: + +; VI-NANN: v_max_f32_e64 v0, -v0, 1.0 +define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp olt float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg8: ; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 ; SI-NEXT: v_min_legacy_f32_e64 v0, [[K]], -v0 -; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 -; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 -; VI-SAFE: v_cmp_nge_f32_e32 vcc, [[K0]], v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc - -; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000 -; VI-NNAN-NEXT: v_min_f32_e64 v0, -v0, [[K]] +; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 +; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 +; VI: v_cmp_nge_f32_e32 vcc, [[K0]], v0 +; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg8(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ugt float %a, -8.0 @@ -140,17 +202,25 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg8(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg8_fast: + +; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 +; VI-NEXT: v_min_f32_e64 v0, -v0, [[K]] +define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg8_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ugt float %a, -8.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg8: ; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 ; SI-NEXT: v_max_legacy_f32_e64 v0, [[K]], -v0 -; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 -; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 -; VI-SAFE: v_cmp_nle_f32_e32 vcc, [[K0]], v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc - -; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000 -; VI-NNAN-NEXT: v_max_f32_e64 v0, -v0, [[K]] +; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 +; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 +; VI: v_cmp_nle_f32_e32 vcc, [[K0]], v0 +; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg8(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ult float %a, -8.0 @@ -158,17 +228,25 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg8(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg8_fast: + +; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 +; VI-NEXT: v_max_f32_e64 v0, -v0, [[K]] +define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg8_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ult float %a, -8.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg8: ; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 ; SI-NEXT: v_min_legacy_f32_e64 v0, -v0, [[K]] -; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 -; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 -; VI-SAFE: v_cmp_lt_f32_e32 vcc, [[K0]], v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc - -; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000 -; VI-NNAN-NEXT: v_min_f32_e64 v0, -v0, [[K]] +; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 +; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 +; VI: v_cmp_lt_f32_e32 vcc, [[K0]], v0 +; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg8(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ogt float %a, -8.0 @@ -176,18 +254,26 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg8(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg8_fast: + +; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 +; VI-NEXT: v_min_f32_e64 v0, -v0, [[K]] +define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg8_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ogt float %a, -8.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg8: ; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 ; SI-NEXT: v_max_legacy_f32_e64 v0, -v0, [[K]] -; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 -; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 -; VI-SAFE: v_cmp_gt_f32_e32 vcc, [[K0]], v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc - -; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000 -; VI-NNAN-NEXT: v_max_f32_e64 v0, -v0, [[K]] +; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 +; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 +; VI: v_cmp_gt_f32_e32 vcc, [[K0]], v0 +; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg8(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp olt float %a, -8.0 @@ -195,13 +281,22 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg8(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg8_fast: + +; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 +; VI-NEXT: v_max_f32_e64 v0, -v0, [[K]] +define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg8_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp olt float %a, -8.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_neg1_cmp_olt_a_1: ; SI: v_max_legacy_f32_e64 v0, -v0, -1.0 -; VI-SAFE: v_cmp_gt_f32_e32 vcc, 1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc - -; VI-NNAN: v_max_f32_e64 v0, -v0, -1.0 +; VI: v_cmp_gt_f32_e32 vcc, 1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_neg1_cmp_olt_a_1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp olt float %a, 1.0 @@ -209,15 +304,22 @@ define amdgpu_ps float @select_fneg_a_or_neg1_cmp_olt_a_1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_neg1_cmp_olt_a_1_fast: + +; VI: v_max_f32_e64 v0, -v0, -1.0 +define amdgpu_ps float @select_fneg_a_or_neg1_cmp_olt_a_1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp olt float %a, 1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float -1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}ult_a_select_fneg_a_b: ; SI: v_cmp_nge_f32_e32 vcc, v0, v1 ; SI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc -; VI-SAFE: v_cmp_nge_f32_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc - -; VI-NNAN: v_cmp_lt_f32_e32 vcc, v0, v1 -; VI-NNAN-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc +; VI: v_cmp_nge_f32_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc define amdgpu_ps float @ult_a_select_fneg_a_b(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ult float %a, %b @@ -225,15 +327,23 @@ define amdgpu_ps float @ult_a_select_fneg_a_b(float %a, float %b) #0 { ret float %min.a } +; GCN-LABEL: {{^}}ult_a_select_fneg_a_b_fast: + +; VI: v_cmp_lt_f32_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc +define amdgpu_ps float @ult_a_select_fneg_a_b_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp nnan nsz ult float %a, %b + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float %b + ret float %min.a +} + ; GCN-LABEL: {{^}}ugt_a_select_fneg_a_b: ; SI: v_cmp_nle_f32_e32 vcc, v0, v1 ; SI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc -; VI-SAFE: v_cmp_nle_f32_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc - -; VI-NNAN: v_cmp_gt_f32_e32 vcc, v0, v1 -; VI-NNAN-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc +; VI: v_cmp_nle_f32_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc define amdgpu_ps float @ugt_a_select_fneg_a_b(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ugt float %a, %b @@ -241,5 +351,16 @@ define amdgpu_ps float @ugt_a_select_fneg_a_b(float %a, float %b) #0 { ret float %min.a } +; GCN-LABEL: {{^}}ugt_a_select_fneg_a_b_fast: + +; VI: v_cmp_gt_f32_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc +define amdgpu_ps float @ugt_a_select_fneg_a_b_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp nnan nsz ugt float %a, %b + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float %b + ret float %min.a +} + attributes #0 = { nounwind } attributes #1 = { nounwind readnone } diff --git a/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll b/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll index 34cb0b1..40c2ec0 100644 --- a/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll @@ -1,735 +1,760 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-NNAN %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s -; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-NNAN %s +; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI %s -; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefixes=SI-NNAN %s +; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI %s -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-TRUE16 %s -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-FAKE16 %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-TRUE16 %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-FAKE16 %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-TRUE16 %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-FAKE16 %s define half @test_fmin_legacy_ule_f16(half %a, half %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_min_f16_e32 v0, v0, v1 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v1 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v1, v0 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v1 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.l, v1.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-TRUE16-LABEL: test_fmin_legacy_ule_f16: -; GFX11-NNAN-TRUE16: ; %bb.0: -; GFX11-NNAN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-TRUE16-NEXT: v_min_f16_e32 v0.l, v0.l, v1.l -; GFX11-NNAN-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-FAKE16-LABEL: test_fmin_legacy_ule_f16: -; GFX11-NNAN-FAKE16: ; %bb.0: -; GFX11-NNAN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-FAKE16-NEXT: v_min_f16_e32 v0, v0, v1 -; GFX11-NNAN-FAKE16-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_min_legacy_f32_e32 v0, v1, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.l, v1.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule half %a, %b %val = select i1 %cmp, half %a, half %b ret half %val } +define half @test_fmin_legacy_ule_f16_fast(half %a, half %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_min_f16_e32 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_e32 v0, v0, v1 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_f16_fast: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_min_f16_e32 v0.l, v0.l, v1.l +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_f16_fast: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_min_f16_e32 v0, v0, v1 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule half %a, %b + %val = select nnan nsz i1 %cmp, half %a, half %b + ret half %val +} + define <2 x half> @test_fmin_legacy_ule_v2f16(<2 x half> %a, <2 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v2f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v2, v0, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v2f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v1 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_v2f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2 -; VI-SAFE-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_v2f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v1 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v2 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_v2f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v2, v0 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v3, v1 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_v2f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v2 -; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v3 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v2f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v1.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v1.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v2f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v2f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v1 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_v2f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v2, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v2f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2 +; VI-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v2f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_min_legacy_f32_e32 v0, v2, v0 +; SI-NEXT: v_min_legacy_f32_e32 v1, v3, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v2f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v1.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v1.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v2f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule <2 x half> %a, %b %val = select <2 x i1> %cmp, <2 x half> %a, <2 x half> %b ret <2 x half> %val } +define <2 x half> @test_fmin_legacy_ule_v2f16_fast(<2 x half> %a, <2 x half> %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_v2f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_min_f16 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v2f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_e32 v0, v0, v1 +; VI-NEXT: v_or_b32_e32 v0, v0, v2 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v2f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v2 +; SI-NEXT: v_min_f32_e32 v1, v1, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmin_legacy_ule_v2f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_min_f16 v0, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule <2 x half> %a, %b + %val = select nnan nsz <2 x i1> %cmp, <2 x half> %a, <2 x half> %b + ret <2 x half> %val +} + define <3 x half> @test_fmin_legacy_ule_v3f16(<3 x half> %a, <3 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v3f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v3f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_min_f16 v1, v1, v3 -; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v2 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_v3f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 -; VI-SAFE-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_v3f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v2 -; VI-NNAN-NEXT: v_min_f16_e32 v1, v1, v3 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v4 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_v3f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v3, v0 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v4, v1 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v2, v5, v2 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_v3f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v3 -; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v4 -; SI-NNAN-NEXT: v_min_f32_e32 v2, v2, v5 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v3f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v2.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v2.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v1.l, v3.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v3f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v3f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v2 -; GFX11-NNAN-NEXT: v_pk_min_f16 v1, v1, v3 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_v3f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v3f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 +; VI-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 +; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 +; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v3f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_min_legacy_f32_e32 v0, v3, v0 +; SI-NEXT: v_min_legacy_f32_e32 v1, v4, v1 +; SI-NEXT: v_min_legacy_f32_e32 v2, v5, v2 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v3f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v2.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v3f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule <3 x half> %a, %b %val = select <3 x i1> %cmp, <3 x half> %a, <3 x half> %b ret <3 x half> %val } +define <3 x half> @test_fmin_legacy_ule_v3f16_fast(<3 x half> %a, <3 x half> %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_v3f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v3, v3, v3 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v1 +; GFX9-NEXT: v_pk_min_f16 v1, v1, v3 +; GFX9-NEXT: v_pk_min_f16 v0, v0, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v3f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_e32 v0, v0, v2 +; VI-NEXT: v_min_f16_e32 v1, v1, v3 +; VI-NEXT: v_or_b32_e32 v0, v0, v4 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v3f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v3 +; SI-NEXT: v_min_f32_e32 v1, v1, v4 +; SI-NEXT: v_min_f32_e32 v2, v2, v5 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmin_legacy_ule_v3f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v3, v3, v3 +; GFX11-NEXT: v_pk_max_f16 v1, v1, v1 +; GFX11-NEXT: v_pk_min_f16 v0, v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_pk_min_f16 v1, v1, v3 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule <3 x half> %a, %b + %val = select nnan nsz <3 x i1> %cmp, <3 x half> %a, <3 x half> %b + ret <3 x half> %val +} + define <4 x half> @test_fmin_legacy_ule_v4f16(<4 x half> %a, <4 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v4f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v1, v6, v1, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v4f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v2 -; GFX9-NNAN-NEXT: v_pk_min_f16 v1, v1, v3 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_v4f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v4 -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v6 -; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_v4f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_e32 v1, v1, v3 -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v2 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v5 -; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v4 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_v4f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v4, v0 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v5, v1 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v2, v6, v2 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v3, v7, v3 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_v4f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v4 -; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v5 -; SI-NNAN-NEXT: v_min_f32_e32 v2, v2, v6 -; SI-NNAN-NEXT: v_min_f32_e32 v3, v3, v7 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v4f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1.h, v3.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.h, v2.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v0.l, v2.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v1.l, v3.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1 -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v4f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v7, v6 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v4f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v2 -; GFX11-NNAN-NEXT: v_pk_min_f16 v1, v1, v3 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_v4f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4 +; GFX9-NEXT: v_perm_b32 v1, v6, v1, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v4f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v6, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6 +; VI-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 +; VI-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 +; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 +; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v4 +; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v6 +; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v4f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_min_legacy_f32_e32 v0, v4, v0 +; SI-NEXT: v_min_legacy_f32_e32 v1, v5, v1 +; SI-NEXT: v_min_legacy_f32_e32 v2, v6, v2 +; SI-NEXT: v_min_legacy_f32_e32 v3, v7, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v4f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1.h, v3.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.h, v2.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v4f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v7, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100 +; GFX11-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule <4 x half> %a, %b %val = select <4 x i1> %cmp, <4 x half> %a, <4 x half> %b ret <4 x half> %val } +define <4 x half> @test_fmin_legacy_ule_v4f16_fast(<4 x half> %a, <4 x half> %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_v4f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_min_f16 v0, v0, v2 +; GFX9-NEXT: v_pk_min_f16 v1, v1, v3 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v4f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_e32 v1, v1, v3 +; VI-NEXT: v_min_f16_e32 v0, v0, v2 +; VI-NEXT: v_or_b32_e32 v0, v0, v5 +; VI-NEXT: v_or_b32_e32 v1, v1, v4 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v4f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v4 +; SI-NEXT: v_min_f32_e32 v1, v1, v5 +; SI-NEXT: v_min_f32_e32 v2, v2, v6 +; SI-NEXT: v_min_f32_e32 v3, v3, v7 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmin_legacy_ule_v4f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_min_f16 v0, v0, v2 +; GFX11-NEXT: v_pk_min_f16 v1, v1, v3 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule <4 x half> %a, %b + %val = select nnan nsz <4 x i1> %cmp, <4 x half> %a, <4 x half> %b + ret <4 x half> %val +} + define <8 x half> @test_fmin_legacy_ule_v8f16(<8 x half> %a, <8 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v8f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v8, v0, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v1, v10, v1, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v2, v12, v2, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v3, v14, v3, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v8f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v4 -; GFX9-NNAN-NEXT: v_pk_min_f16 v1, v1, v5 -; GFX9-NNAN-NEXT: v_pk_min_f16 v2, v2, v6 -; GFX9-NNAN-NEXT: v_pk_min_f16 v3, v3, v7 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_v8f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v8 -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v10 -; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v12 -; VI-SAFE-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v14 -; VI-SAFE-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_v8f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_e32 v3, v3, v7 -; VI-NNAN-NEXT: v_min_f16_e32 v2, v2, v6 -; VI-NNAN-NEXT: v_min_f16_e32 v1, v1, v5 -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v4 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v11 -; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v10 -; VI-NNAN-NEXT: v_or_b32_e32 v2, v2, v9 -; VI-NNAN-NEXT: v_or_b32_e32 v3, v3, v8 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_v8f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v8, v0 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v9, v1 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v2, v10, v2 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v3, v11, v3 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v4, v12, v4 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v5, v13, v5 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v6, v14, v6 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v7, v15, v7 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_v8f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v8 -; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v9 -; SI-NNAN-NEXT: v_min_f32_e32 v2, v2, v10 -; SI-NNAN-NEXT: v_min_f32_e32 v3, v3, v11 -; SI-NNAN-NEXT: v_min_f32_e32 v4, v4, v12 -; SI-NNAN-NEXT: v_min_f32_e32 v5, v5, v13 -; SI-NNAN-NEXT: v_min_f32_e32 v6, v6, v14 -; SI-NNAN-NEXT: v_min_f32_e32 v7, v7, v15 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v8f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v4.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v1.h, v5.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v2.h, v6.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v3.h, v7.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s3, v0.l, v4.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s4, v1.l, v5.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s5, v2.l, v6.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s6, v3.l, v7.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v8f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v11, v10 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v13, v12 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v15, v14 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v9, v8 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v2, v6 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v4 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v5 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v7 -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v8f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v4 -; GFX11-NNAN-NEXT: v_pk_min_f16 v1, v1, v5 -; GFX11-NNAN-NEXT: v_pk_min_f16 v2, v2, v6 -; GFX11-NNAN-NEXT: v_pk_min_f16 v3, v3, v7 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_v8f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v7 +; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; GFX9-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14 +; GFX9-NEXT: v_lshrrev_b32_e32 v10, 16, v5 +; GFX9-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12 +; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10 +; GFX9-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8 +; GFX9-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7 +; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v8, v0, s4 +; GFX9-NEXT: v_perm_b32 v1, v10, v1, s4 +; GFX9-NEXT: v_perm_b32 v2, v12, v2, s4 +; GFX9-NEXT: v_perm_b32 v3, v14, v3, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v8f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v7 +; VI-NEXT: v_lshrrev_b32_e32 v15, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; VI-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14 +; VI-NEXT: v_lshrrev_b32_e32 v10, 16, v5 +; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; VI-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12 +; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; VI-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; VI-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10 +; VI-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8 +; VI-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7 +; VI-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6 +; VI-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5 +; VI-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4 +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v8 +; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v10 +; VI-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v12 +; VI-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v14 +; VI-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v8f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_min_legacy_f32_e32 v0, v8, v0 +; SI-NEXT: v_min_legacy_f32_e32 v1, v9, v1 +; SI-NEXT: v_min_legacy_f32_e32 v2, v10, v2 +; SI-NEXT: v_min_legacy_f32_e32 v3, v11, v3 +; SI-NEXT: v_min_legacy_f32_e32 v4, v12, v4 +; SI-NEXT: v_min_legacy_f32_e32 v5, v13, v5 +; SI-NEXT: v_min_legacy_f32_e32 v6, v14, v6 +; SI-NEXT: v_min_legacy_f32_e32 v7, v15, v7 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v8f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v4.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v1.h, v5.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v2.h, v6.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v3.h, v7.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s3, v0.l, v4.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s4, v1.l, v5.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s5, v2.l, v6.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s6, v3.l, v7.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v8f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1 +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v11, v10 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v13, v12 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v15, v14 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v9, v8 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v2, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v4 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v5 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v7 +; GFX11-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule <8 x half> %a, %b %val = select <8 x i1> %cmp, <8 x half> %a, <8 x half> %b ret <8 x half> %val } +define <8 x half> @test_fmin_legacy_ule_v8f16_fast(<8 x half> %a, <8 x half> %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_v8f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_min_f16 v0, v0, v4 +; GFX9-NEXT: v_pk_min_f16 v1, v1, v5 +; GFX9-NEXT: v_pk_min_f16 v2, v2, v6 +; GFX9-NEXT: v_pk_min_f16 v3, v3, v7 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v8f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_e32 v3, v3, v7 +; VI-NEXT: v_min_f16_e32 v2, v2, v6 +; VI-NEXT: v_min_f16_e32 v1, v1, v5 +; VI-NEXT: v_min_f16_e32 v0, v0, v4 +; VI-NEXT: v_or_b32_e32 v0, v0, v11 +; VI-NEXT: v_or_b32_e32 v1, v1, v10 +; VI-NEXT: v_or_b32_e32 v2, v2, v9 +; VI-NEXT: v_or_b32_e32 v3, v3, v8 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v8f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v8 +; SI-NEXT: v_min_f32_e32 v1, v1, v9 +; SI-NEXT: v_min_f32_e32 v2, v2, v10 +; SI-NEXT: v_min_f32_e32 v3, v3, v11 +; SI-NEXT: v_min_f32_e32 v4, v4, v12 +; SI-NEXT: v_min_f32_e32 v5, v5, v13 +; SI-NEXT: v_min_f32_e32 v6, v6, v14 +; SI-NEXT: v_min_f32_e32 v7, v7, v15 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmin_legacy_ule_v8f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_min_f16 v0, v0, v4 +; GFX11-NEXT: v_pk_min_f16 v1, v1, v5 +; GFX11-NEXT: v_pk_min_f16 v2, v2, v6 +; GFX11-NEXT: v_pk_min_f16 v3, v3, v7 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule <8 x half> %a, %b + %val = select nnan nsz <8 x i1> %cmp, <8 x half> %a, <8 x half> %b + ret <8 x half> %val +} + attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll index ec4dd85..defcffa 100644 --- a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll +++ b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll @@ -1,8 +1,6 @@ -; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-SAFE,GCN,FUNC %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-NONAN,GCN-NONAN,GCN,FUNC %s +; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI,GCN,FUNC %s -; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-SAFE,GCN,FUNC %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-NONAN,GCN-NONAN,GCN,FUNC %s +; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI,GCN,FUNC %s ; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck -enable-var-scope --check-prefixes=EG,FUNC %s @@ -14,13 +12,9 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1 ; FUNC-LABEL: {{^}}s_test_fmin_legacy_subreg_inputs_f32: ; EG: MIN * -; SI-SAFE: v_min_legacy_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} +; SI: v_min_legacy_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} -; SI-NONAN: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} - -; VI-SAFE: v_cmp_nlt_f32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} - -; VI-NONAN: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} +; VI: v_cmp_nlt_f32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} define amdgpu_kernel void @s_test_fmin_legacy_subreg_inputs_f32(ptr addrspace(1) %out, <4 x float> %reg0) #0 { %r0 = extractelement <4 x float> %reg0, i32 0 %r1 = extractelement <4 x float> %reg0, i32 1 @@ -30,22 +24,32 @@ define amdgpu_kernel void @s_test_fmin_legacy_subreg_inputs_f32(ptr addrspace(1) ret void } -; FUNC-LABEL: {{^}}s_test_fmin_legacy_ule_f32: -; GCN-DAG: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}} +; FUNC-LABEL: {{^}}s_test_fmin_legacy_subreg_inputs_f32_fast: -; SI-SAFE: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]] +; SI: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} -; GCN-NONAN: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]] +; VI: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} +define amdgpu_kernel void @s_test_fmin_legacy_subreg_inputs_f32_fast(ptr addrspace(1) %out, <4 x float> %reg0) #0 { + %r0 = extractelement <4 x float> %reg0, i32 0 + %r1 = extractelement <4 x float> %reg0, i32 1 + %r2 = fcmp nnan nsz uge float %r0, %r1 + %r3 = select nnan nsz i1 %r2, float %r1, float %r0 + store float %r3, ptr addrspace(1) %out + ret void +} -; VI-SAFE: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]] +; FUNC-LABEL: {{^}}s_test_fmin_legacy_ule_f32: +; GCN-DAG: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}} + +; SI: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, s[[#LOAD + 3]], [[VA]] +; VI: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]] -; VI-SAFE: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]] -; VI-SAFE: v_cmp_ngt_f32_e32 vcc, s[[#LOAD + 2]], [[VB]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[VB]], [[VA]] +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, s[[#LOAD + 3]], [[VA]] -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, s[[#LOAD + 2]], [[VB]] +; VI: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]] +; VI: v_cmp_ngt_f32_e32 vcc, s[[#LOAD + 2]], [[VB]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[VB]], [[VA]] define amdgpu_kernel void @s_test_fmin_legacy_ule_f32(ptr addrspace(1) %out, float %a, float %b) #0 { %cmp = fcmp ule float %a, %b %val = select i1 %cmp, float %a, float %b @@ -53,6 +57,19 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32(ptr addrspace(1) %out, flo ret void } +; FUNC-LABEL: {{^}}s_test_fmin_legacy_ule_f32_fast: +; GCN-DAG: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}} + +; GCN: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]] + +; GCN: v_min_f32_e32 {{v[0-9]+}}, s[[#LOAD + 2]], [[VB]] +define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_fast(ptr addrspace(1) %out, float %a, float %b) #0 { + %cmp = fcmp ule float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; Nsz also needed ; FIXME: Should separate tests ; GCN-LABEL: {{^}}s_test_fmin_legacy_ule_f32_nnan_src: @@ -61,12 +78,10 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32(ptr addrspace(1) %out, flo ; GCN-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0 ; GCN-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0 -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]] - -; VI-SAFE: v_cmp_ngt_f32_e32 vcc, [[ADD_A]], [[ADD_B]] -; VI-SAFE: v_cndmask_b32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]], vcc +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]] -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]] +; VI: v_cmp_ngt_f32_e32 vcc, [[ADD_A]], [[ADD_B]] +; VI: v_cndmask_b32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]], vcc define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src(ptr addrspace(1) %out, float %a, float %b) #0 { %a.nnan = fadd nnan float %a, 1.0 %b.nnan = fadd nnan float %b, 2.0 @@ -76,16 +91,32 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src(ptr addrspace(1) ret void } +; Nsz also needed +; FIXME: Should separate tests +; GCN-LABEL: {{^}}s_test_fmin_legacy_ule_f32_nnan_src_fast: +; GCN: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}} + +; GCN-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0 +; GCN-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0 + +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]] +define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src_fast(ptr addrspace(1) %out, float %a, float %b) #0 { + %a.nnan = fadd nnan float %a, 1.0 + %b.nnan = fadd nnan float %b, 2.0 + %cmp = fcmp ule float %a.nnan, %b.nnan + %val = select nnan nsz i1 %cmp, float %a.nnan, float %b.nnan + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; FUNC-LABEL: {{^}}test_fmin_legacy_ule_f32: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] -; VI-SAFE: v_cmp_ngt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] - -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; VI: v_cmp_ngt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_ule_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid @@ -100,16 +131,33 @@ define amdgpu_kernel void @test_fmin_legacy_ule_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32: +; FUNC-LABEL: {{^}}test_fmin_legacy_ule_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_ule_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; VI-SAFE: v_cmp_le_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %cmp = fcmp ule float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + +; VI: v_cmp_le_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_ole_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid @@ -124,16 +172,33 @@ define amdgpu_kernel void @test_fmin_legacy_ole_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmin_legacy_olt_f32: +; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_ole_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 -; VI-SAFE: v_cmp_lt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 + + %cmp = fcmp ole float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}test_fmin_legacy_olt_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; VI: v_cmp_lt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_olt_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid @@ -148,16 +213,33 @@ define amdgpu_kernel void @test_fmin_legacy_olt_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmin_legacy_ult_f32: +; FUNC-LABEL: {{^}}test_fmin_legacy_olt_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_olt_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 -; VI-SAFE: v_cmp_nge_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + %cmp = fcmp olt float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] + +; VI: v_cmp_nge_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_ult_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid @@ -172,16 +254,33 @@ define amdgpu_kernel void @test_fmin_legacy_ult_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v1f32: +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_ult_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 -; VI-SAFE: v_cmp_nge_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + %cmp = fcmp ult float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v1f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] + +; VI: v_cmp_nge_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_ult_v1f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr <1 x float>, ptr addrspace(1) %in, i32 %tid @@ -196,19 +295,35 @@ define amdgpu_kernel void @test_fmin_legacy_ult_v1f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v1f32_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_ult_v1f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <1 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <1 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile <1 x float>, ptr addrspace(1) %gep.0 + %b = load volatile <1 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ult <1 x float> %a, %b + %val = select nnan nsz <1 x i1> %cmp, <1 x float> %a, <1 x float> %b + store <1 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v2f32: ; GCN: {{buffer|flat}}_load_dwordx2 ; GCN: {{buffer|flat}}_load_dwordx2 -; SI-SAFE: v_min_legacy_f32_e32 -; SI-SAFE: v_min_legacy_f32_e32 - -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 +; SI: v_min_legacy_f32_e32 +; SI: v_min_legacy_f32_e32 -; GCN-NONAN: v_min_f32_e32 -; GCN-NONAN: v_min_f32_e32 +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 define amdgpu_kernel void @test_fmin_legacy_ult_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr <2 x float>, ptr addrspace(1) %in, i32 %tid @@ -223,25 +338,40 @@ define amdgpu_kernel void @test_fmin_legacy_ult_v2f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v2f32_fast: +; GCN: {{buffer|flat}}_load_dwordx2 +; GCN: {{buffer|flat}}_load_dwordx2 + +; GCN: v_min_f32_e32 +; GCN: v_min_f32_e32 +define amdgpu_kernel void @test_fmin_legacy_ult_v2f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <2 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <2 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile <2 x float>, ptr addrspace(1) %gep.0 + %b = load volatile <2 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ult <2 x float> %a, %b + %val = select nnan nsz <2 x i1> %cmp, <2 x float> %a, <2 x float> %b + store <2 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v3f32: -; SI-SAFE: v_min_legacy_f32_e32 -; SI-SAFE: v_min_legacy_f32_e32 -; SI-SAFE: v_min_legacy_f32_e32 -; SI-SAFE-NOT: v_min_ - -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 +; SI: v_min_legacy_f32_e32 +; SI: v_min_legacy_f32_e32 +; SI: v_min_legacy_f32_e32 +; SI-NOT: v_min_ + +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 ; VI-NOT: v_cmp ; VI-NOT: v_cndmask - -; GCN-NONAN: v_min_f32_e32 -; GCN-NONAN: v_min_f32_e32 -; GCN-NONAN: v_min_f32_e32 -; GCN-NONAN-NOT: v_min_ define amdgpu_kernel void @test_fmin_legacy_ult_v3f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr <3 x float>, ptr addrspace(1) %in, i32 %tid @@ -256,6 +386,28 @@ define amdgpu_kernel void @test_fmin_legacy_ult_v3f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v3f32_fast: +; VI-NOT: v_cmp +; VI-NOT: v_cndmask + +; GCN: v_min_f32_e32 +; GCN: v_min_f32_e32 +; GCN: v_min_f32_e32 +; GCN-NOT: v_min_ +define amdgpu_kernel void @test_fmin_legacy_ult_v3f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <3 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <3 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load <3 x float>, ptr addrspace(1) %gep.0 + %b = load <3 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ult <3 x float> %a, %b + %val = select nnan nsz <3 x i1> %cmp, <3 x float> %a, <3 x float> %b + store <3 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32_multi_use: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll index 12e9888..aaea4f7 100644 --- a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll +++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll @@ -5015,7 +5015,7 @@ define amdgpu_kernel void @v_fneg_fp_round_fneg_f64_to_f32(ptr addrspace(1) %out %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i64 %tid.ext %a = load volatile double, ptr addrspace(1) %a.gep - %fneg.a = fsub double -0.000000e+00, %a + %fneg.a = fsub nsz double -0.000000e+00, %a %fpround = fptrunc double %fneg.a to float %fneg = fneg float %fpround store float %fneg, ptr addrspace(1) %out.gep diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll index c4ca79d..3de6df2 100644 --- a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll +++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll @@ -4441,25 +4441,40 @@ define float @v_fneg_fabs_select_infloop_regression(float %arg, i1 %arg1) { ret float %i3 } -define float @v_fmul_0_fsub_0_infloop_regression(float %arg) { -; GCN-SAFE-LABEL: v_fmul_0_fsub_0_infloop_regression: +define float @v_fmul_0_fsub_0_safe_infloop_regression(float %arg) { +; GCN-SAFE-LABEL: v_fmul_0_fsub_0_safe_infloop_regression: ; GCN-SAFE: ; %bb.0: ; %bb ; GCN-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-SAFE-NEXT: v_mul_f32_e32 v0, 0, v0 ; GCN-SAFE-NEXT: v_sub_f32_e32 v0, 0, v0 ; GCN-SAFE-NEXT: s_setpc_b64 s[30:31] ; -; GCN-NSZ-LABEL: v_fmul_0_fsub_0_infloop_regression: -; GCN-NSZ: ; %bb.0: ; %bb -; GCN-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NSZ-NEXT: v_mul_f32_e32 v0, 0x80000000, v0 -; GCN-NSZ-NEXT: s_setpc_b64 s[30:31] +; SI-NSZ-LABEL: v_fmul_0_fsub_0_safe_infloop_regression: +; SI-NSZ: ; %bb.0: ; %bb +; SI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NSZ-NEXT: s_brev_b32 s4, 1 +; SI-NSZ-NEXT: v_fma_f32 v0, v0, s4, 0 +; SI-NSZ-NEXT: s_setpc_b64 s[30:31] +; FIXME: utils/update_llc_test_checks.py will generate redundant VI +; labels, remove them, they will cause test failure. bb: %i = fmul float %arg, 0.0 %i1 = fsub float 0.0, %i ret float %i1 } +define float @v_fmul_0_fsub_0_nsz_infloop_regression(float %arg) { +; GCN-LABEL: v_fmul_0_fsub_0_nsz_infloop_regression: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v0, 0x80000000, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] +bb: + %i = fmul float %arg, 0.0 + %i1 = fsub nsz float 0.0, %i + ret float %i1 +} + declare i32 @llvm.amdgcn.workitem.id.x() #1 declare float @llvm.fma.f32(float, float, float) #1 declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-dead-intrinsics.ll b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-dead-intrinsics.ll new file mode 100644 index 0000000..d6198f5 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-dead-intrinsics.ll @@ -0,0 +1,9 @@ +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -amdgpu-lower-buffer-fat-pointers < %s | FileCheck %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=amdgpu-lower-buffer-fat-pointers < %s | FileCheck %s + +; CHECK: @arbitrary +declare amdgpu_kernel void @arbitrary(ptr addrspace(1)) + +; COM: This used to cause verifier errors when "lowered" +declare <4 x i8> @llvm.masked.load.v4i8.p7(ptr addrspace(7) captures(none), i32 immarg, <4 x i1>, <4 x i8>) +; CHECK-NOT: llvm.masked.load diff --git a/llvm/test/CodeGen/AMDGPU/lro-phi-samebb-nonlookthrough-store.ll b/llvm/test/CodeGen/AMDGPU/lro-phi-samebb-nonlookthrough-store.ll new file mode 100644 index 0000000..b508f73 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/lro-phi-samebb-nonlookthrough-store.ll @@ -0,0 +1,46 @@ +; RUN: opt -S -passes=amdgpu-late-codegenprepare \ +; RUN: -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a %s | FileCheck %s + +; Goal: With a loop-header PHI in illegal vector type and a same-BB +; non-lookthrough user (vector add) in the header, LRO should still coerce +; the PHI to i32 because a profitable sink (store) exists across BB. + +define amdgpu_kernel void @phi_samebb_nonlookthrough_store( + ptr addrspace(1) %out, <4 x i8> %v, i1 %exit) { +; CHECK-LABEL: @phi_samebb_nonlookthrough_store( +entry: + br label %loop + +loop: ; preds = %entry, %loop + ; Loop-carried PHI in illegal vector type. + %acc = phi <4 x i8> [ zeroinitializer, %entry ], [ %acc.next, %loop ] + + ; Same-BB non-lookthrough use in header. + %acc.next = add <4 x i8> %acc, %v + + ; Make it a real loop: either iterate or exit to the sink block. + br i1 %exit, label %store, label %loop + +store: ; preds = %loop + ; The across-BB sink: storing the PHI coerced to i32. + %acc.bc = bitcast <4 x i8> %acc to i32 + store i32 %acc.bc, ptr addrspace(1) %out, align 4 + ret void +} + +; After AMDGPULateCodeGenPrepare we expect: +; - PHI is coerced to i32 +; - A header bitcast materializes for the add +; This proves the same-BB non-lookthrough user (add) did not get pruned +; when the def is a PHI. + +; CHECK: loop: +; CHECK: %[[ACC_TC:[^ ]+]] = phi i32 +; CHECK: %[[ACC_TC_BC:[^ ]+]] = bitcast i32 %[[ACC_TC]] to <4 x i8> +; CHECK: %[[ACC_NEXT:[^ ]+]] = add <4 x i8> %[[ACC_TC_BC]], %v +; CHECK: br i1 %exit, label %store, label %loop +; CHECK: store: +; CHECK: %[[ACC_TC_BC2:[^ ]+]] = bitcast i32 %[[ACC_TC]] to <4 x i8> +; CHECK: %[[ST_I32:[^ ]+]] = bitcast <4 x i8> %[[ACC_TC_BC2]] to i32 +; CHECK: store i32 %[[ST_I32]], + diff --git a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll index 92d3277..bb22144 100644 --- a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll +++ b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll @@ -4148,28 +4148,28 @@ define <2 x half> @mul_select_negk_negfabs_v2f16(<2 x i32> %c, <2 x half> %x, <2 ; -------------------------------------------------------------------------------- define <2 x half> @select_fneg_posk_src_add_v2f16(<2 x i32> %c, <2 x half> %x, <2 x half> %y) { -; CI-SAFE-LABEL: select_fneg_posk_src_add_v2f16: -; CI-SAFE: ; %bb.0: -; CI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-SAFE-NEXT: v_add_f32_e32 v3, 4.0, v3 -; CI-SAFE-NEXT: v_add_f32_e32 v2, 4.0, v2 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-SAFE-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; CI-SAFE-NEXT: v_or_b32_e32 v2, v2, v3 -; CI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v2 -; CI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-SAFE-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc -; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; CI-SAFE-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc -; CI-SAFE-NEXT: s_setpc_b64 s[30:31] +; CI-LABEL: select_fneg_posk_src_add_v2f16: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_add_f32_e32 v3, 4.0, v3 +; CI-NEXT: v_add_f32_e32 v2, 4.0, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; CI-NEXT: v_or_b32_e32 v2, v2, v3 +; CI-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v2 +; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc +; CI-NEXT: s_setpc_b64 s[30:31] ; ; VI-SAFE-LABEL: select_fneg_posk_src_add_v2f16: ; VI-SAFE: ; %bb.0: @@ -4229,21 +4229,6 @@ define <2 x half> @select_fneg_posk_src_add_v2f16(<2 x i32> %c, <2 x half> %x, < ; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 ; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] ; -; CI-NSZ-LABEL: select_fneg_posk_src_add_v2f16: -; CI-NSZ: ; %bb.0: -; CI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v3, v3 -; CI-NSZ-NEXT: v_sub_f32_e32 v2, -4.0, v2 -; CI-NSZ-NEXT: v_sub_f32_e32 v3, -4.0, v3 -; CI-NSZ-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc -; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; CI-NSZ-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc -; CI-NSZ-NEXT: s_setpc_b64 s[30:31] -; ; VI-NSZ-LABEL: select_fneg_posk_src_add_v2f16: ; VI-NSZ: ; %bb.0: ; VI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -4302,6 +4287,105 @@ define <2 x half> @select_fneg_posk_src_add_v2f16(<2 x i32> %c, <2 x half> %x, < ret <2 x half> %select } +define <2 x half> @select_fneg_posk_src_add_v2f16_nsz(<2 x i32> %c, <2 x half> %x, <2 x half> %y) { +; CI-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; CI-NEXT: v_sub_f32_e32 v2, -4.0, v2 +; CI-NEXT: v_sub_f32_e32 v3, -4.0, v3 +; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc +; CI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; VI-NEXT: v_mov_b32_e32 v1, 0xc400 +; VI-NEXT: v_sub_f16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; VI-NEXT: v_sub_f16_e32 v2, -4.0, v2 +; VI-NEXT: v_mov_b32_e32 v3, 0x4000 +; VI-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, v3, v2, s[4:5] +; VI-NEXT: v_cndmask_b32_sdwa v1, v3, v1, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GFX9-NEXT: v_pk_add_f16 v1, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX9-NEXT: v_mov_b32_e32 v2, 0x4000 +; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0 +; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, v1, s[4:5] +; GFX9-NEXT: v_cndmask_b32_sdwa v1, v2, v1, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v1, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SAFE-TRUE16-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX11-SAFE-TRUE16: ; %bb.0: +; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-SAFE-TRUE16-NEXT: v_pk_add_f16 v0, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1 +; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo +; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0 +; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SAFE-FAKE16-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX11-SAFE-FAKE16: ; %bb.0: +; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SAFE-FAKE16-NEXT: v_pk_add_f16 v2, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo +; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo +; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-NSZ-TRUE16-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX11-NSZ-TRUE16: ; %bb.0: +; GFX11-NSZ-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NSZ-TRUE16-NEXT: v_pk_add_f16 v0, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1 +; GFX11-NSZ-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo +; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0 +; GFX11-NSZ-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-NSZ-FAKE16-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX11-NSZ-FAKE16: ; %bb.0: +; GFX11-NSZ-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NSZ-FAKE16-NEXT: v_pk_add_f16 v2, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-NSZ-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo +; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo +; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NSZ-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX11-NSZ-FAKE16-NEXT: s_setpc_b64 s[30:31] + %cmp = icmp eq <2 x i32> %c, zeroinitializer + %add = fadd nsz <2 x half> %x, <half 4.0, half 4.0> + %fneg = fneg <2 x half> %add + %select = select <2 x i1> %cmp, <2 x half> %fneg, <2 x half> <half 2.0, half 2.0> + ret <2 x half> %select +} + define <2 x half> @select_fneg_posk_src_sub_v2f16(<2 x i32> %c, <2 x half> %x) { ; CI-SAFE-LABEL: select_fneg_posk_src_sub_v2f16: ; CI-SAFE: ; %bb.0: @@ -4704,34 +4788,34 @@ define <2 x half> @select_fneg_posk_src_fma_v2f16(<2 x i32> %c, <2 x half> %x, < } define <2 x half> @select_fneg_posk_src_fmad_v2f16(<2 x i32> %c, <2 x half> %x, <2 x half> %z) { -; CI-SAFE-LABEL: select_fneg_posk_src_fmad_v2f16: -; CI-SAFE: ; %bb.0: -; CI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; CI-SAFE-NEXT: v_mul_f32_e32 v3, 4.0, v3 -; CI-SAFE-NEXT: v_add_f32_e32 v3, v3, v5 -; CI-SAFE-NEXT: v_mul_f32_e32 v2, 4.0, v2 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-SAFE-NEXT: v_add_f32_e32 v2, v2, v4 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 -; CI-SAFE-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; CI-SAFE-NEXT: v_or_b32_e32 v2, v2, v3 -; CI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v2 -; CI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-SAFE-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc -; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; CI-SAFE-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc -; CI-SAFE-NEXT: s_setpc_b64 s[30:31] +; CI-LABEL: select_fneg_posk_src_fmad_v2f16: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; CI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; CI-NEXT: v_mul_f32_e32 v3, 4.0, v3 +; CI-NEXT: v_add_f32_e32 v3, v3, v5 +; CI-NEXT: v_mul_f32_e32 v2, 4.0, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_add_f32_e32 v2, v2, v4 +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; CI-NEXT: v_or_b32_e32 v2, v2, v3 +; CI-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v2 +; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc +; CI-NEXT: s_setpc_b64 s[30:31] ; ; VI-SAFE-LABEL: select_fneg_posk_src_fmad_v2f16: ; VI-SAFE: ; %bb.0: @@ -4793,27 +4877,6 @@ define <2 x half> @select_fneg_posk_src_fmad_v2f16(<2 x i32> %c, <2 x half> %x, ; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 ; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] ; -; CI-NSZ-LABEL: select_fneg_posk_src_fmad_v2f16: -; CI-NSZ: ; %bb.0: -; CI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v4, v4 -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v5, v5 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v3, v3 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v4, v4 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v5, v5 -; CI-NSZ-NEXT: v_mul_f32_e32 v2, -4.0, v2 -; CI-NSZ-NEXT: v_mul_f32_e32 v3, -4.0, v3 -; CI-NSZ-NEXT: v_sub_f32_e32 v2, v2, v4 -; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 -; CI-NSZ-NEXT: v_sub_f32_e32 v3, v3, v5 -; CI-NSZ-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc -; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; CI-NSZ-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc -; CI-NSZ-NEXT: s_setpc_b64 s[30:31] -; ; VI-NSZ-LABEL: select_fneg_posk_src_fmad_v2f16: ; VI-NSZ: ; %bb.0: ; VI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -4873,6 +4936,112 @@ define <2 x half> @select_fneg_posk_src_fmad_v2f16(<2 x i32> %c, <2 x half> %x, ret <2 x half> %select } +define <2 x half> @select_fneg_posk_src_fmad_v2f16_nsz(<2 x i32> %c, <2 x half> %x, <2 x half> %z) { +; CI-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; CI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; CI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; CI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; CI-NEXT: v_mul_f32_e32 v2, -4.0, v2 +; CI-NEXT: v_mul_f32_e32 v3, -4.0, v3 +; CI-NEXT: v_sub_f32_e32 v2, v2, v4 +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; CI-NEXT: v_sub_f32_e32 v3, v3, v5 +; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc +; CI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_fma_f16 v1, v4, -4.0, -v1 +; VI-NEXT: v_fma_f16 v2, v2, -4.0, -v3 +; VI-NEXT: v_mov_b32_e32 v3, 0x4000 +; VI-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, v3, v2, s[4:5] +; VI-NEXT: v_cndmask_b32_sdwa v1, v3, v1, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GFX9-NEXT: v_pk_fma_f16 v1, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX9-NEXT: v_mov_b32_e32 v2, 0x4000 +; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0 +; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, v1, s[4:5] +; GFX9-NEXT: v_cndmask_b32_sdwa v1, v2, v1, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v1, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SAFE-TRUE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX11-SAFE-TRUE16: ; %bb.0: +; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-SAFE-TRUE16-NEXT: v_pk_fma_f16 v0, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1 +; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo +; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0 +; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SAFE-FAKE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX11-SAFE-FAKE16: ; %bb.0: +; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SAFE-FAKE16-NEXT: v_pk_fma_f16 v2, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo +; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo +; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-NSZ-TRUE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX11-NSZ-TRUE16: ; %bb.0: +; GFX11-NSZ-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NSZ-TRUE16-NEXT: v_pk_fma_f16 v0, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1 +; GFX11-NSZ-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo +; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0 +; GFX11-NSZ-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-NSZ-FAKE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX11-NSZ-FAKE16: ; %bb.0: +; GFX11-NSZ-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NSZ-FAKE16-NEXT: v_pk_fma_f16 v2, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-NSZ-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo +; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo +; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NSZ-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX11-NSZ-FAKE16-NEXT: s_setpc_b64 s[30:31] + %cmp = icmp eq <2 x i32> %c, zeroinitializer + %fmad = call nsz <2 x half> @llvm.fmuladd.v2f16(<2 x half> %x, <2 x half> <half 4.0, half 4.0>, <2 x half> %z) + %fneg = fneg <2 x half> %fmad + %select = select <2 x i1> %cmp, <2 x half> %fneg, <2 x half> <half 2.0, half 2.0> + ret <2 x half> %select +} + declare <2 x half> @llvm.fabs.v2f16(<2 x half>) #0 declare <2 x half> @llvm.fma.v2f16(<2 x half>, <2 x half>, <2 x half>) #0 declare <2 x half> @llvm.fmuladd.v2f16(<2 x half>, <2 x half>, <2 x half>) #0 diff --git a/llvm/test/CodeGen/AMDGPU/v_mac.ll b/llvm/test/CodeGen/AMDGPU/v_mac.ll index c128715..f5dc824 100644 --- a/llvm/test/CodeGen/AMDGPU/v_mac.ll +++ b/llvm/test/CodeGen/AMDGPU/v_mac.ll @@ -116,7 +116,7 @@ entry: ; GCN-LABEL: {{^}}nsz_mad_sub0_src0: ; GCN-NOT: v_mac_f32 ; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}} -define amdgpu_kernel void @nsz_mad_sub0_src0(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 { +define amdgpu_kernel void @nsz_mad_sub0_src0(ptr addrspace(1) %out, ptr addrspace(1) %in) { entry: %b_ptr = getelementptr float, ptr addrspace(1) %in, i32 1 %c_ptr = getelementptr float, ptr addrspace(1) %in, i32 2 @@ -125,7 +125,7 @@ entry: %b = load float, ptr addrspace(1) %b_ptr %c = load float, ptr addrspace(1) %c_ptr - %neg_a = fsub float 0.0, %a + %neg_a = fsub nsz float 0.0, %a %tmp0 = fmul float %neg_a, %b %tmp1 = fadd float %tmp0, %c @@ -176,7 +176,7 @@ entry: ; GCN-LABEL: {{^}}nsz_mad_sub0_src1: ; GCN-NOT: v_mac_f32 ; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}} -define amdgpu_kernel void @nsz_mad_sub0_src1(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 { +define amdgpu_kernel void @nsz_mad_sub0_src1(ptr addrspace(1) %out, ptr addrspace(1) %in) { entry: %b_ptr = getelementptr float, ptr addrspace(1) %in, i32 1 %c_ptr = getelementptr float, ptr addrspace(1) %in, i32 2 @@ -185,7 +185,7 @@ entry: %b = load float, ptr addrspace(1) %b_ptr %c = load float, ptr addrspace(1) %c_ptr - %neg_b = fsub float 0.0, %b + %neg_b = fsub nsz float 0.0, %b %tmp0 = fmul float %a, %neg_b %tmp1 = fadd float %tmp0, %c @@ -310,6 +310,5 @@ define float @v_mac_f32_dynamic_ftz(float %a, float %b, float %c) "denormal-fp-m declare i32 @llvm.amdgcn.workitem.id.x() #2 attributes #0 = { nounwind "no-signed-zeros-fp-math"="false" } -attributes #1 = { nounwind "no-signed-zeros-fp-math"="true" } attributes #2 = { nounwind readnone } attributes #3 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll b/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll index bcc60b0..8da6f23 100644 --- a/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll +++ b/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll @@ -236,7 +236,7 @@ entry: %b.val = load half, ptr addrspace(1) %b %c.val = load half, ptr addrspace(1) %c - %a.neg = fsub half 0.0, %a.val + %a.neg = fsub nsz half 0.0, %a.val %t.val = fmul half %a.neg, %b.val %r.val = fadd half %t.val, %c.val @@ -263,7 +263,7 @@ entry: %b.val = load half, ptr addrspace(1) %b %c.val = load half, ptr addrspace(1) %c - %b.neg = fsub half 0.0, %b.val + %b.neg = fsub nsz half 0.0, %b.val %t.val = fmul half %a.val, %b.neg %r.val = fadd half %t.val, %c.val @@ -290,7 +290,7 @@ entry: %b.val = load half, ptr addrspace(1) %b %c.val = load half, ptr addrspace(1) %c - %c.neg = fsub half 0.0, %c.val + %c.neg = fsub nsz half 0.0, %c.val %t.val = fmul half %a.val, %b.val %r.val = fadd half %t.val, %c.neg @@ -601,7 +601,7 @@ entry: %b.val = load <2 x half>, ptr addrspace(1) %b %c.val = load <2 x half>, ptr addrspace(1) %c - %a.neg = fsub <2 x half> <half 0.0, half 0.0>, %a.val + %a.neg = fsub nsz <2 x half> <half 0.0, half 0.0>, %a.val %t.val = fmul <2 x half> %a.neg, %b.val %r.val = fadd <2 x half> %t.val, %c.val @@ -634,7 +634,7 @@ entry: %b.val = load <2 x half>, ptr addrspace(1) %b %c.val = load <2 x half>, ptr addrspace(1) %c - %b.neg = fsub <2 x half> <half 0.0, half 0.0>, %b.val + %b.neg = fsub nsz <2 x half> <half 0.0, half 0.0>, %b.val %t.val = fmul <2 x half> %a.val, %b.neg %r.val = fadd <2 x half> %t.val, %c.val @@ -667,7 +667,7 @@ entry: %b.val = load <2 x half>, ptr addrspace(1) %b %c.val = load <2 x half>, ptr addrspace(1) %c - %c.neg = fsub <2 x half> <half 0.0, half 0.0>, %c.val + %c.neg = fsub nsz <2 x half> <half 0.0, half 0.0>, %c.val %t.val = fmul <2 x half> %a.val, %b.val %r.val = fadd <2 x half> %t.val, %c.neg @@ -678,5 +678,5 @@ entry: declare void @llvm.amdgcn.s.barrier() #2 attributes #0 = { nounwind "no-signed-zeros-fp-math"="false" "denormal-fp-math"="preserve-sign,preserve-sign" } -attributes #1 = { nounwind "no-signed-zeros-fp-math"="true" "denormal-fp-math"="preserve-sign,preserve-sign" } +attributes #1 = { nounwind "denormal-fp-math"="preserve-sign,preserve-sign" } attributes #2 = { nounwind convergent } diff --git a/llvm/test/CodeGen/ARM/build-attributes.ll b/llvm/test/CodeGen/ARM/build-attributes.ll index 68844ae..306a4a3 100644 --- a/llvm/test/CodeGen/ARM/build-attributes.ll +++ b/llvm/test/CodeGen/ARM/build-attributes.ll @@ -3,23 +3,16 @@ ; RUN: llc < %s -mtriple=thumbv5-linux-gnueabi -mcpu=xscale -mattr=+strict-align | FileCheck %s --check-prefix=XSCALE ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align | FileCheck %s --check-prefix=V6 -; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6-FAST ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mattr=+strict-align | FileCheck %s --check-prefix=V6M -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6M-FAST ; RUN: llc < %s -mtriple=thumbv6sm-linux-gnueabi -mattr=+strict-align | FileCheck %s --check-prefix=V6M -; RUN: llc < %s -mtriple=thumbv6sm-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6M-FAST ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align | FileCheck %s --check-prefix=ARM1156T2F-S -; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=ARM1156T2F-S-FAST ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi | FileCheck %s --check-prefix=V7M -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V7M-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=V7 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V7-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi | FileCheck %s --check-prefix=V8 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V8-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi | FileCheck %s --check-prefix=Vt8 ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING @@ -31,35 +24,24 @@ ; RUN: llc < %s -mtriple=thumbv8m.main-linux-gnueabi | FileCheck %s --check-prefix=V8MMAINLINE ; RUN: llc < %s -mtriple=thumbv8m.main-linux-gnueabi -mattr=+dsp | FileCheck %s --check-prefix=V8MMAINLINE_DSP ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 | FileCheck %s --check-prefix=CORTEX-A5-DEFAULT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-DEFAULT-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-neon,-d32 | FileCheck %s --check-prefix=CORTEX-A5-NONEON ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2sp | FileCheck %s --check-prefix=CORTEX-A5-NOFPU -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A8-SOFT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A8-SOFT-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-A8-HARD -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=hard -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A8-HARD-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A8-SOFT ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A9-SOFT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A9-SOFT-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-A9-HARD -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A9-HARD-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 | FileCheck %s --check-prefix=CORTEX-A12-DEFAULT ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A9-SOFT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-DEFAULT-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2sp | FileCheck %s --check-prefix=CORTEX-A12-NOFPU -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 | FileCheck %s --check-prefix=CORTEX-A15 -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A15-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 | FileCheck %s --check-prefix=CORTEX-A17-DEFAULT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2sp | FileCheck %s --check-prefix=CORTEX-A17-NOFPU -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-no-trapping-fp-math | FileCheck %s --check-prefix=NO-TRAPPING-MATH ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -denormal-fp-math=ieee | FileCheck %s --check-prefix=DENORMAL-IEEE @@ -74,37 +56,26 @@ ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 | FileCheck %s --check-prefix=CORTEX-M0 -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M0-FAST ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus | FileCheck %s --check-prefix=CORTEX-M0PLUS -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M0PLUS-FAST ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 | FileCheck %s --check-prefix=CORTEX-M1 -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M1-FAST ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align | FileCheck %s --check-prefix=SC000 -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=SC000-FAST ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 | FileCheck %s --check-prefix=CORTEX-M3 -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M3-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 | FileCheck %s --check-prefix=SC300 -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=SC300-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-M4-SOFT -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=soft -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M4-SOFT-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-M4-HARD -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M4-HARD-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2sp | FileCheck %s --check-prefix=CORTEX-M7 --check-prefix=CORTEX-M7-SOFT -; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-NOFPU-FAST ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-fp64 | FileCheck %s --check-prefix=CORTEX-M7 --check-prefix=CORTEX-M7-SINGLE -; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-fp64 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-FAST ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 | FileCheck %s --check-prefix=CORTEX-M7-DOUBLE ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m23 | FileCheck %s --check-prefix=CORTEX-M23 ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 | FileCheck %s --check-prefix=CORTEX-M33 -; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M33-FAST ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m35p | FileCheck %s --check-prefix=CORTEX-M35P @@ -113,49 +84,34 @@ ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r4 | FileCheck %s --check-prefix=CORTEX-R4 ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r4f | FileCheck %s --check-prefix=CORTEX-R4F ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 | FileCheck %s --check-prefix=CORTEX-R5 -; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R5-FAST ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 | FileCheck %s --check-prefix=CORTEX-R7 -; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R7-FAST ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8 | FileCheck %s --check-prefix=CORTEX-R8 -; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R8-FAST ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32 | FileCheck %s --check-prefix=CORTEX-A32 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A32-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35 | FileCheck %s --check-prefix=CORTEX-A35 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A35-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 | FileCheck %s --check-prefix=CORTEX-A53 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A53-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 | FileCheck %s --check-prefix=CORTEX-A57 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A57-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 | FileCheck %s --check-prefix=CORTEX-A72 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A72-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a73 | FileCheck %s --check-prefix=CORTEX-A73 ; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi | FileCheck %s --check-prefix=GENERIC-ARMV8_1-A ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3 | FileCheck %s --check-prefix=EXYNOS-M3 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4 | FileCheck %s --check-prefix=EXYNOS-M4 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m5 | FileCheck %s --check-prefix=EXYNOS-M5 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m5 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m5 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING -; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=GENERIC-ARMV8_1-A-FAST ; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 | FileCheck %s --check-prefix=CORTEX-A7-CHECK -; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-CHECK-FAST ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2sp,-vfp3,-vfp4,-neon,-fp16 | FileCheck %s --check-prefix=CORTEX-A7-NOFPU -; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2sp,-vfp3,-vfp4,-neon,-fp16 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,-neon | FileCheck %s --check-prefix=CORTEX-A7-FPUV4 ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING -; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,-neon -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-FPUV4-FAST ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,,-d32,-neon | FileCheck %s --check-prefix=CORTEX-A7-FPUV4 ; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -mattr=+strict-align -relocation-model=pic | FileCheck %s --check-prefix=RELOC-PIC ; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -mattr=+strict-align -relocation-model=static | FileCheck %s --check-prefix=RELOC-OTHER @@ -278,15 +234,6 @@ ; V6-NOT: .eabi_attribute 28 ; V6: .eabi_attribute 38, 1 -; V6-FAST-NOT: .eabi_attribute 19 -;; Despite the V6 CPU having no FPU by default, we chose to flush to -;; positive zero here. There's no hardware support doing this, but the -;; fast maths software library might. -; V6-FAST-NOT: .eabi_attribute 20 -; V6-FAST-NOT: .eabi_attribute 21 -; V6-FAST-NOT: .eabi_attribute 22 -; V6-FAST: .eabi_attribute 23, 1 - ;; We emit 6, 12 for both v6-M and v6S-M, technically this is incorrect for ;; V6-M, however we don't model the OS extension so this is fine. ; V6M: .eabi_attribute 6, 12 @@ -312,14 +259,6 @@ ; V6M-NOT: .eabi_attribute 28 ; V6M: .eabi_attribute 38, 1 -; V6M-FAST-NOT: .eabi_attribute 19 -;; Despite the V6M CPU having no FPU by default, we chose to flush to -;; positive zero here. There's no hardware support doing this, but the -;; fast maths software library might. -; V6M-FAST-NOT: .eabi_attribute 20 -; V6M-FAST-NOT: .eabi_attribute 21 -; V6M-FAST-NOT: .eabi_attribute 22 -; V6M-FAST: .eabi_attribute 23, 1 ; ARM1156T2F-S: .cpu arm1156t2f-s ; ARM1156T2F-S: .eabi_attribute 6, 8 @@ -342,14 +281,6 @@ ; ARM1156T2F-S-NOT: .eabi_attribute 28 ; ARM1156T2F-S: .eabi_attribute 38, 1 -; ARM1156T2F-S-FAST-NOT: .eabi_attribute 19 -;; V6 cores default to flush to positive zero (value 0). Note that value 2 is also equally -;; valid for this core, it's an implementation defined question as to which of 0 and 2 you -;; select. LLVM historically picks 0. -; ARM1156T2F-S-FAST-NOT: .eabi_attribute 20 -; ARM1156T2F-S-FAST-NOT: .eabi_attribute 21 -; ARM1156T2F-S-FAST-NOT: .eabi_attribute 22 -; ARM1156T2F-S-FAST: .eabi_attribute 23, 1 ; V7M: .eabi_attribute 6, 10 ; V7M: .eabi_attribute 7, 77 @@ -374,15 +305,6 @@ ; V7M-NOT: .eabi_attribute 28 ; V7M: .eabi_attribute 38, 1 -; V7M-FAST-NOT: .eabi_attribute 19 -;; Despite the V7M CPU having no FPU by default, we chose to flush -;; preserving sign. This matches what the hardware would do in the -;; architecture revision were to exist on the current target. -; V7M-FAST: .eabi_attribute 20, 2 -; V7M-FAST-NOT: .eabi_attribute 21 -; V7M-FAST-NOT: .eabi_attribute 22 -; V7M-FAST: .eabi_attribute 23, 1 - ; V7: .syntax unified ; V7: .eabi_attribute 6, 10 ; V7-NOT: .eabi_attribute 27 @@ -401,13 +323,6 @@ ; V7-NOT: .eabi_attribute 28 ; V7: .eabi_attribute 38, 1 -; V7-FAST-NOT: .eabi_attribute 19 -;; The default CPU does have an FPU and it must be VFPv3 or better, so it flushes -;; denormals to zero preserving the sign. -; V7-FAST: .eabi_attribute 20, 2 -; V7-FAST-NOT: .eabi_attribute 21 -; V7-FAST-NOT: .eabi_attribute 22 -; V7-FAST: .eabi_attribute 23, 1 ; V7VE: .syntax unified ; V7VE: .eabi_attribute 6, 10 @ Tag_CPU_arch @@ -435,12 +350,6 @@ ; V8-NOT: .eabi_attribute 22 ; V8: .eabi_attribute 23, 3 -; V8-FAST-NOT: .eabi_attribute 19 -;; The default does have an FPU, and for V8-A, it flushes preserving sign. -; V8-FAST: .eabi_attribute 20, 2 -; V8-FAST-NOT: .eabi_attribute 21 -; V8-FAST-NOT: .eabi_attribute 22 -; V8-FAST: .eabi_attribute 23, 1 ; Vt8: .syntax unified ; Vt8: .eabi_attribute 6, 14 @@ -552,15 +461,11 @@ ;; We default to IEEE 754 compliance ; CORTEX-A7-CHECK: .eabi_attribute 20, 1 ;; The A7 has VFPv3 support by default, so flush preserving sign. -; CORTEX-A7-CHECK-FAST: .eabi_attribute 20, 2 ; CORTEX-A7-NOFPU: .eabi_attribute 20, 1 ;; Despite there being no FPU, we chose to flush to zero preserving ;; sign. This matches what the hardware would do for this architecture ;; revision. -; CORTEX-A7-NOFPU-FAST: .eabi_attribute 20, 2 ; CORTEX-A7-FPUV4: .eabi_attribute 20, 1 -;; The VFPv4 FPU flushes preserving sign. -; CORTEX-A7-FPUV4-FAST: .eabi_attribute 20, 2 ; Tag_ABI_FP_exceptions ; CORTEX-A7-CHECK: .eabi_attribute 21, 1 @@ -610,13 +515,6 @@ ; CORTEX-A5-DEFAULT: .eabi_attribute 24, 1 ; CORTEX-A5-DEFAULT: .eabi_attribute 25, 1 -; CORTEX-A5-DEFAULT-FAST-NOT: .eabi_attribute 19 -;; The A5 defaults to a VFPv4 FPU, so it flushed preserving the sign when -ffast-math -;; is given. -; CORTEX-A5-DEFAULT-FAST: .eabi_attribute 20, 2 -; CORTEX-A5-DEFAULT-FAST-NOT: .eabi_attribute 21 -; CORTEX-A5-DEFAULT-FAST-NOT: .eabi_attribute 22 -; CORTEX-A5-DEFAULT-FAST: .eabi_attribute 23, 1 ; CORTEX-A5-NONEON: .cpu cortex-a5 ; CORTEX-A5-NONEON: .eabi_attribute 6, 10 @@ -634,13 +532,6 @@ ; CORTEX-A5-NONEON: .eabi_attribute 24, 1 ; CORTEX-A5-NONEON: .eabi_attribute 25, 1 -; CORTEX-A5-NONEON-FAST-NOT: .eabi_attribute 19 -;; The A5 defaults to a VFPv4 FPU, so it flushed preserving sign when -ffast-math -;; is given. -; CORTEX-A5-NONEON-FAST: .eabi_attribute 20, 2 -; CORTEX-A5-NONEON-FAST-NOT: .eabi_attribute 21 -; CORTEX-A5-NONEON-FAST-NOT: .eabi_attribute 22 -; CORTEX-A5-NONEON-FAST: .eabi_attribute 23, 1 ; CORTEX-A5-NOFPU: .cpu cortex-a5 ; CORTEX-A5-NOFPU: .eabi_attribute 6, 10 @@ -659,14 +550,9 @@ ; CORTEX-A5-NOFPU: .eabi_attribute 24, 1 ; CORTEX-A5-NOFPU: .eabi_attribute 25, 1 -; CORTEX-A5-NOFPU-FAST-NOT: .eabi_attribute 19 ;; Despite there being no FPU, we chose to flush to zero preserving ;; sign. This matches what the hardware would do for this architecture ;; revision. -; CORTEX-A5-NOFPU-FAST: .eabi_attribute 20, 2 -; CORTEX-A5-NOFPU-FAST-NOT: .eabi_attribute 21 -; CORTEX-A5-NOFPU-FAST-NOT: .eabi_attribute 22 -; CORTEX-A5-NOFPU-FAST: .eabi_attribute 23, 1 ; CORTEX-A8-SOFT: .cpu cortex-a8 ; CORTEX-A8-SOFT: .eabi_attribute 6, 10 @@ -712,15 +598,6 @@ ; CORTEX-A9-SOFT-NOT: .eabi_attribute 28 ; CORTEX-A9-SOFT: .eabi_attribute 38, 1 -; CORTEX-A8-SOFT-FAST-NOT: .eabi_attribute 19 -; CORTEX-A9-SOFT-FAST-NOT: .eabi_attribute 19 -;; The A9 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A8-SOFT-FAST: .eabi_attribute 20, 2 -; CORTEX-A9-SOFT-FAST: .eabi_attribute 20, 2 -; CORTEX-A5-SOFT-FAST-NOT: .eabi_attribute 21 -; CORTEX-A5-SOFT-FAST-NOT: .eabi_attribute 22 -; CORTEX-A5-SOFT-FAST: .eabi_attribute 23, 1 ; CORTEX-A8-HARD: .cpu cortex-a8 ; CORTEX-A8-HARD: .eabi_attribute 6, 10 @@ -766,21 +643,6 @@ ; CORTEX-A9-HARD: .eabi_attribute 28, 1 ; CORTEX-A9-HARD: .eabi_attribute 38, 1 -; CORTEX-A8-HARD-FAST-NOT: .eabi_attribute 19 -;; The A8 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A8-HARD-FAST: .eabi_attribute 20, 2 -; CORTEX-A8-HARD-FAST-NOT: .eabi_attribute 21 -; CORTEX-A8-HARD-FAST-NOT: .eabi_attribute 22 -; CORTEX-A8-HARD-FAST: .eabi_attribute 23, 1 - -; CORTEX-A9-HARD-FAST-NOT: .eabi_attribute 19 -;; The A9 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A9-HARD-FAST: .eabi_attribute 20, 2 -; CORTEX-A9-HARD-FAST-NOT: .eabi_attribute 21 -; CORTEX-A9-HARD-FAST-NOT: .eabi_attribute 22 -; CORTEX-A9-HARD-FAST: .eabi_attribute 23, 1 ; CORTEX-A12-DEFAULT: .cpu cortex-a12 ; CORTEX-A12-DEFAULT: .eabi_attribute 6, 10 @@ -800,13 +662,6 @@ ; CORTEX-A12-DEFAULT: .eabi_attribute 24, 1 ; CORTEX-A12-DEFAULT: .eabi_attribute 25, 1 -; CORTEX-A12-DEFAULT-FAST-NOT: .eabi_attribute 19 -;; The A12 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A12-DEFAULT-FAST: .eabi_attribute 20, 2 -; CORTEX-A12-HARD-FAST-NOT: .eabi_attribute 21 -; CORTEX-A12-HARD-FAST-NOT: .eabi_attribute 22 -; CORTEX-A12-HARD-FAST: .eabi_attribute 23, 1 ; CORTEX-A12-NOFPU: .cpu cortex-a12 ; CORTEX-A12-NOFPU: .eabi_attribute 6, 10 @@ -826,14 +681,6 @@ ; CORTEX-A12-NOFPU: .eabi_attribute 24, 1 ; CORTEX-A12-NOFPU: .eabi_attribute 25, 1 -; CORTEX-A12-NOFPU-FAST-NOT: .eabi_attribute 19 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; CORTEX-A12-NOFPU-FAST: .eabi_attribute 20, 2 -; CORTEX-A12-NOFPU-FAST-NOT: .eabi_attribute 21 -; CORTEX-A12-NOFPU-FAST-NOT: .eabi_attribute 22 -; CORTEX-A12-NOFPU-FAST: .eabi_attribute 23, 1 ; CORTEX-A15: .cpu cortex-a15 ; CORTEX-A15: .eabi_attribute 6, 10 @@ -857,13 +704,6 @@ ; CORTEX-A15-NOT: .eabi_attribute 28 ; CORTEX-A15: .eabi_attribute 38, 1 -; CORTEX-A15-FAST-NOT: .eabi_attribute 19 -;; The A15 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A15-FAST: .eabi_attribute 20, 2 -; CORTEX-A15-FAST-NOT: .eabi_attribute 21 -; CORTEX-A15-FAST-NOT: .eabi_attribute 22 -; CORTEX-A15-FAST: .eabi_attribute 23, 1 ; CORTEX-A17-DEFAULT: .cpu cortex-a17 ; CORTEX-A17-DEFAULT: .eabi_attribute 6, 10 @@ -883,13 +723,6 @@ ; CORTEX-A17-DEFAULT: .eabi_attribute 24, 1 ; CORTEX-A17-DEFAULT: .eabi_attribute 25, 1 -; CORTEX-A17-FAST-NOT: .eabi_attribute 19 -;; The A17 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A17-FAST: .eabi_attribute 20, 2 -; CORTEX-A17-FAST-NOT: .eabi_attribute 21 -; CORTEX-A17-FAST-NOT: .eabi_attribute 22 -; CORTEX-A17-FAST: .eabi_attribute 23, 1 ; CORTEX-A17-NOFPU: .cpu cortex-a17 ; CORTEX-A17-NOFPU: .eabi_attribute 6, 10 @@ -910,13 +743,6 @@ ; CORTEX-A17-NOFPU: .eabi_attribute 25, 1 ; CORTEX-A17-NOFPU-NOT: .eabi_attribute 19 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; CORTEX-A17-NOFPU-FAST: .eabi_attribute 20, 2 -; CORTEX-A17-NOFPU-FAST-NOT: .eabi_attribute 21 -; CORTEX-A17-NOFPU-FAST-NOT: .eabi_attribute 22 -; CORTEX-A17-NOFPU-FAST: .eabi_attribute 23, 1 ; Test flags -enable-no-trapping-fp-math and -denormal-fp-math: ; NO-TRAPPING-MATH: .eabi_attribute 21, 0 @@ -946,16 +772,6 @@ ; CORTEX-M0-NOT: .eabi_attribute 28 ; CORTEX-M0: .eabi_attribute 38, 1 -; CORTEX-M0-FAST-NOT: .eabi_attribute 19 -;; Despite the M0 CPU having no FPU in this scenario, we chose to -;; flush to positive zero here. There's no hardware support doing -;; this, but the fast maths software library might and such behaviour -;; would match hardware support on this architecture revision if it -;; existed. -; CORTEX-M0-FAST-NOT: .eabi_attribute 20 -; CORTEX-M0-FAST-NOT: .eabi_attribute 21 -; CORTEX-M0-FAST-NOT: .eabi_attribute 22 -; CORTEX-M0-FAST: .eabi_attribute 23, 1 ; CORTEX-M0PLUS: .cpu cortex-m0plus ; CORTEX-M0PLUS: .eabi_attribute 6, 12 @@ -978,16 +794,6 @@ ; CORTEX-M0PLUS-NOT: .eabi_attribute 28 ; CORTEX-M0PLUS: .eabi_attribute 38, 1 -; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 19 -;; Despite the M0+ CPU having no FPU in this scenario, we chose to -;; flush to positive zero here. There's no hardware support doing -;; this, but the fast maths software library might and such behaviour -;; would match hardware support on this architecture revision if it -;; existed. -; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 20 -; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 21 -; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 22 -; CORTEX-M0PLUS-FAST: .eabi_attribute 23, 1 ; CORTEX-M1: .cpu cortex-m1 ; CORTEX-M1: .eabi_attribute 6, 12 @@ -1010,16 +816,6 @@ ; CORTEX-M1-NOT: .eabi_attribute 28 ; CORTEX-M1: .eabi_attribute 38, 1 -; CORTEX-M1-FAST-NOT: .eabi_attribute 19 -;; Despite the M1 CPU having no FPU in this scenario, we chose to -;; flush to positive zero here. There's no hardware support doing -;; this, but the fast maths software library might and such behaviour -;; would match hardware support on this architecture revision if it -;; existed. -; CORTEX-M1-FAST-NOT: .eabi_attribute 20 -; CORTEX-M1-FAST-NOT: .eabi_attribute 21 -; CORTEX-M1-FAST-NOT: .eabi_attribute 22 -; CORTEX-M1-FAST: .eabi_attribute 23, 1 ; SC000: .cpu sc000 ; SC000: .eabi_attribute 6, 12 @@ -1041,16 +837,6 @@ ; SC000-NOT: .eabi_attribute 28 ; SC000: .eabi_attribute 38, 1 -; SC000-FAST-NOT: .eabi_attribute 19 -;; Despite the SC000 CPU having no FPU in this scenario, we chose to -;; flush to positive zero here. There's no hardware support doing -;; this, but the fast maths software library might and such behaviour -;; would match hardware support on this architecture revision if it -;; existed. -; SC000-FAST-NOT: .eabi_attribute 20 -; SC000-FAST-NOT: .eabi_attribute 21 -; SC000-FAST-NOT: .eabi_attribute 22 -; SC000-FAST: .eabi_attribute 23, 1 ; CORTEX-M3: .cpu cortex-m3 ; CORTEX-M3: .eabi_attribute 6, 10 @@ -1073,14 +859,6 @@ ; CORTEX-M3-NOT: .eabi_attribute 28 ; CORTEX-M3: .eabi_attribute 38, 1 -; CORTEX-M3-FAST-NOT: .eabi_attribute 19 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; CORTEX-M3-FAST: .eabi_attribute 20, 2 -; CORTEX-M3-FAST-NOT: .eabi_attribute 21 -; CORTEX-M3-FAST-NOT: .eabi_attribute 22 -; CORTEX-M3-FAST: .eabi_attribute 23, 1 ; SC300: .cpu sc300 ; SC300: .eabi_attribute 6, 10 @@ -1103,14 +881,6 @@ ; SC300-NOT: .eabi_attribute 28 ; SC300: .eabi_attribute 38, 1 -; SC300-FAST-NOT: .eabi_attribute 19 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; SC300-FAST: .eabi_attribute 20, 2 -; SC300-FAST-NOT: .eabi_attribute 21 -; SC300-FAST-NOT: .eabi_attribute 22 -; SC300-FAST: .eabi_attribute 23, 1 ; CORTEX-M4-SOFT: .cpu cortex-m4 ; CORTEX-M4-SOFT: .eabi_attribute 6, 13 @@ -1134,13 +904,6 @@ ; CORTEX-M4-SOFT-NOT: .eabi_attribute 28 ; CORTEX-M4-SOFT: .eabi_attribute 38, 1 -; CORTEX-M4-SOFT-FAST-NOT: .eabi_attribute 19 -;; The M4 defaults to a VFPv4 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-M4-SOFT-FAST: .eabi_attribute 20, 2 -; CORTEX-M4-SOFT-FAST-NOT: .eabi_attribute 21 -; CORTEX-M4-SOFT-FAST-NOT: .eabi_attribute 22 -; CORTEX-M4-SOFT-FAST: .eabi_attribute 23, 1 ; CORTEX-M4-HARD: .cpu cortex-m4 ; CORTEX-M4-HARD: .eabi_attribute 6, 13 @@ -1164,13 +927,6 @@ ; CORTEX-M4-HARD: .eabi_attribute 28, 1 ; CORTEX-M4-HARD: .eabi_attribute 38, 1 -; CORTEX-M4-HARD-FAST-NOT: .eabi_attribute 19 -;; The M4 defaults to a VFPv4 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-M4-HARD-FAST: .eabi_attribute 20, 2 -; CORTEX-M4-HARD-FAST-NOT: .eabi_attribute 21 -; CORTEX-M4-HARD-FAST-NOT: .eabi_attribute 22 -; CORTEX-M4-HARD-FAST: .eabi_attribute 23, 1 ; CORTEX-M7: .cpu cortex-m7 ; CORTEX-M7: .eabi_attribute 6, 13 @@ -1197,16 +953,6 @@ ; CORTEX-M7: .eabi_attribute 38, 1 ; CORTEX-M7: .eabi_attribute 14, 0 -; CORTEX-M7-NOFPU-FAST-NOT: .eabi_attribute 19 -;; The M7 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-M7-FAST: .eabi_attribute 20, 2 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; CORTEX-M7-NOFPU-FAST: .eabi_attribute 20, 2 -; CORTEX-M7-NOFPU-FAST-NOT: .eabi_attribute 21 -; CORTEX-M7-NOFPU-FAST-NOT: .eabi_attribute 22 -; CORTEX-M7-NOFPU-FAST: .eabi_attribute 23, 1 ; CORTEX-R4: .cpu cortex-r4 ; CORTEX-R4: .eabi_attribute 6, 10 @@ -1273,12 +1019,6 @@ ; CORTEX-R5-NOT: .eabi_attribute 28 ; CORTEX-R5: .eabi_attribute 38, 1 -; CORTEX-R5-FAST-NOT: .eabi_attribute 19 -;; The R5 has the VFPv3 FP unit, which always flushes preserving sign. -; CORTEX-R5-FAST: .eabi_attribute 20, 2 -; CORTEX-R5-FAST-NOT: .eabi_attribute 21 -; CORTEX-R5-FAST-NOT: .eabi_attribute 22 -; CORTEX-R5-FAST: .eabi_attribute 23, 1 ; CORTEX-R7: .cpu cortex-r7 ; CORTEX-R7: .eabi_attribute 6, 10 @@ -1301,12 +1041,6 @@ ; CORTEX-R7-NOT: .eabi_attribute 28 ; CORTEX-R7: .eabi_attribute 38, 1 -; CORTEX-R7-FAST-NOT: .eabi_attribute 19 -;; The R7 has the VFPv3 FP unit, which always flushes preserving sign. -; CORTEX-R7-FAST: .eabi_attribute 20, 2 -; CORTEX-R7-FAST-NOT: .eabi_attribute 21 -; CORTEX-R7-FAST-NOT: .eabi_attribute 22 -; CORTEX-R7-FAST: .eabi_attribute 23, 1 ; CORTEX-R8: .cpu cortex-r8 ; CORTEX-R8: .eabi_attribute 6, 10 @@ -1329,12 +1063,6 @@ ; CORTEX-R8-NOT: .eabi_attribute 28 ; CORTEX-R8: .eabi_attribute 38, 1 -; CORTEX-R8-FAST-NOT: .eabi_attribute 19 -;; The R8 has the VFPv3 FP unit, which always flushes preserving sign. -; CORTEX-R8-FAST: .eabi_attribute 20, 2 -; CORTEX-R8-FAST-NOT: .eabi_attribute 21 -; CORTEX-R8-FAST-NOT: .eabi_attribute 22 -; CORTEX-R8-FAST: .eabi_attribute 23, 1 ; CORTEX-A32: .cpu cortex-a32 ; CORTEX-A32: .eabi_attribute 6, 14 @@ -1359,12 +1087,6 @@ ; CORTEX-A32-NOT: .eabi_attribute 28 ; CORTEX-A32: .eabi_attribute 38, 1 -; CORTEX-A32-FAST-NOT: .eabi_attribute 19 -;; The A32 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A32-FAST: .eabi_attribute 20, 2 -; CORTEX-A32-FAST-NOT: .eabi_attribute 21 -; CORTEX-A32-FAST-NOT: .eabi_attribute 22 -; CORTEX-A32-FAST: .eabi_attribute 23, 1 ; CORTEX-M23: .cpu cortex-m23 ; CORTEX-M23: .eabi_attribute 6, 16 @@ -1430,11 +1152,6 @@ ; CORTEX-M35P: .eabi_attribute 38, 1 ; CORTEX-M35P: .eabi_attribute 14, 0 -; CORTEX-M33-FAST-NOT: .eabi_attribute 19 -; CORTEX-M33-FAST: .eabi_attribute 20, 2 -; CORTEX-M33-FAST-NOT: .eabi_attribute 21 -; CORTEX-M33-FAST-NOT: .eabi_attribute 22 -; CORTEX-M33-FAST: .eabi_attribute 23, 1 ; CORTEX-A35: .cpu cortex-a35 ; CORTEX-A35: .eabi_attribute 6, 14 @@ -1459,12 +1176,6 @@ ; CORTEX-A35-NOT: .eabi_attribute 28 ; CORTEX-A35: .eabi_attribute 38, 1 -; CORTEX-A35-FAST-NOT: .eabi_attribute 19 -;; The A35 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A35-FAST: .eabi_attribute 20, 2 -; CORTEX-A35-FAST-NOT: .eabi_attribute 21 -; CORTEX-A35-FAST-NOT: .eabi_attribute 22 -; CORTEX-A35-FAST: .eabi_attribute 23, 1 ; CORTEX-A53: .cpu cortex-a53 ; CORTEX-A53: .eabi_attribute 6, 14 @@ -1489,12 +1200,6 @@ ; CORTEX-A53-NOT: .eabi_attribute 28 ; CORTEX-A53: .eabi_attribute 38, 1 -; CORTEX-A53-FAST-NOT: .eabi_attribute 19 -;; The A53 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A53-FAST: .eabi_attribute 20, 2 -; CORTEX-A53-FAST-NOT: .eabi_attribute 21 -; CORTEX-A53-FAST-NOT: .eabi_attribute 22 -; CORTEX-A53-FAST: .eabi_attribute 23, 1 ; CORTEX-A57: .cpu cortex-a57 ; CORTEX-A57: .eabi_attribute 6, 14 @@ -1519,12 +1224,6 @@ ; CORTEX-A57-NOT: .eabi_attribute 28 ; CORTEX-A57: .eabi_attribute 38, 1 -; CORTEX-A57-FAST-NOT: .eabi_attribute 19 -;; The A57 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A57-FAST: .eabi_attribute 20, 2 -; CORTEX-A57-FAST-NOT: .eabi_attribute 21 -; CORTEX-A57-FAST-NOT: .eabi_attribute 22 -; CORTEX-A57-FAST: .eabi_attribute 23, 1 ; CORTEX-A72: .cpu cortex-a72 ; CORTEX-A72: .eabi_attribute 6, 14 @@ -1549,12 +1248,6 @@ ; CORTEX-A72-NOT: .eabi_attribute 28 ; CORTEX-A72: .eabi_attribute 38, 1 -; CORTEX-A72-FAST-NOT: .eabi_attribute 19 -;; The A72 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A72-FAST: .eabi_attribute 20, 2 -; CORTEX-A72-FAST-NOT: .eabi_attribute 21 -; CORTEX-A72-FAST-NOT: .eabi_attribute 22 -; CORTEX-A72-FAST: .eabi_attribute 23, 1 ; CORTEX-A73: .cpu cortex-a73 ; CORTEX-A73: .eabi_attribute 6, 14 @@ -1580,12 +1273,6 @@ ; CORTEX-A73: .eabi_attribute 38, 1 ; CORTEX-A73: .eabi_attribute 14, 0 -; EXYNOS-FAST-NOT: .eabi_attribute 19 -;; The Exynos processors have the ARMv8 FP unit, which always flushes preserving sign. -; EXYNOS-FAST: .eabi_attribute 20, 2 -; EXYNOS-FAST-NOT: .eabi_attribute 21 -; EXYNOS-FAST-NOT: .eabi_attribute 22 -; EXYNOS-FAST: .eabi_attribute 23, 1 ; EXYNOS-M3: .cpu exynos-m3 ; EXYNOS-M3: .eabi_attribute 6, 14 @@ -1684,12 +1371,6 @@ ; GENERIC-ARMV8_1-A-NOT: .eabi_attribute 28 ; GENERIC-ARMV8_1-A: .eabi_attribute 38, 1 -; GENERIC-ARMV8_1-A-FAST-NOT: .eabi_attribute 19 -;; GENERIC-ARMV8_1-A has the ARMv8 FP unit, which always flushes preserving sign. -; GENERIC-ARMV8_1-A-FAST: .eabi_attribute 20, 2 -; GENERIC-ARMV8_1-A-FAST-NOT: .eabi_attribute 21 -; GENERIC-ARMV8_1-A-FAST-NOT: .eabi_attribute 22 -; GENERIC-ARMV8_1-A-FAST: .eabi_attribute 23, 1 ; RELOC-PIC: .eabi_attribute 15, 1 ; RELOC-PIC: .eabi_attribute 16, 1 diff --git a/llvm/test/CodeGen/LoongArch/lasx/abs.ll b/llvm/test/CodeGen/LoongArch/lasx/abs.ll new file mode 100644 index 0000000..e3b0d04d --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/abs.ll @@ -0,0 +1,128 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s + +define void @vabs_b(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_b: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.b $xr1, $xr0 +; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <32 x i8>, ptr %src + %b = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> %a, i1 true) + store <32 x i8> %b, ptr %dst + ret void +} + +define void @vabs_b_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_b_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.b $xr1, $xr0 +; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <32 x i8>, ptr %src + %b = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> %a, i1 false) + store <32 x i8> %b, ptr %dst + ret void +} + +define void @vabs_h(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.h $xr1, $xr0 +; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <16 x i16>, ptr %src + %b = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> %a, i1 true) + store <16 x i16> %b, ptr %dst + ret void +} + +define void @vabs_h_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_h_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.h $xr1, $xr0 +; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <16 x i16>, ptr %src + %b = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> %a, i1 false) + store <16 x i16> %b, ptr %dst + ret void +} + +define void @vabs_w(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_w: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.w $xr1, $xr0 +; CHECK-NEXT: xvmax.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <8 x i32>, ptr %src + %b = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> %a, i1 true) + store <8 x i32> %b, ptr %dst + ret void +} + +define void @vabs_w_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_w_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.w $xr1, $xr0 +; CHECK-NEXT: xvmax.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <8 x i32>, ptr %src + %b = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> %a, i1 false) + store <8 x i32> %b, ptr %dst + ret void +} + +define void @vabs_d(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.d $xr1, $xr0 +; CHECK-NEXT: xvmax.d $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <4 x i64>, ptr %src + %b = tail call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 true) + store <4 x i64> %b, ptr %dst + ret void +} + +define void @vabs_d_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_d_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.d $xr1, $xr0 +; CHECK-NEXT: xvmax.d $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <4 x i64>, ptr %src + %b = tail call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 false) + store <4 x i64> %b, ptr %dst + ret void +} + +declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1) +declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1) +declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1) +declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1) diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/adda.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/adda.ll index e66a152..9868775 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/adda.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/adda.ll @@ -7,11 +7,7 @@ define void @vadda_b(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xvld $xr0, $a1, 0 ; CHECK-NEXT: xvld $xr1, $a2, 0 -; CHECK-NEXT: xvneg.b $xr2, $xr0 -; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr2 -; CHECK-NEXT: xvneg.b $xr2, $xr1 -; CHECK-NEXT: xvmax.b $xr1, $xr1, $xr2 -; CHECK-NEXT: xvadd.b $xr0, $xr0, $xr1 +; CHECK-NEXT: xvadda.b $xr0, $xr0, $xr1 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -33,11 +29,7 @@ define void @vadda_h(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xvld $xr0, $a1, 0 ; CHECK-NEXT: xvld $xr1, $a2, 0 -; CHECK-NEXT: xvneg.h $xr2, $xr0 -; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr2 -; CHECK-NEXT: xvneg.h $xr2, $xr1 -; CHECK-NEXT: xvmax.h $xr1, $xr1, $xr2 -; CHECK-NEXT: xvadd.h $xr0, $xr0, $xr1 +; CHECK-NEXT: xvadda.h $xr0, $xr0, $xr1 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -59,11 +51,7 @@ define void @vadda_w(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xvld $xr0, $a1, 0 ; CHECK-NEXT: xvld $xr1, $a2, 0 -; CHECK-NEXT: xvneg.w $xr2, $xr0 -; CHECK-NEXT: xvmax.w $xr0, $xr0, $xr2 -; CHECK-NEXT: xvneg.w $xr2, $xr1 -; CHECK-NEXT: xvmax.w $xr1, $xr1, $xr2 -; CHECK-NEXT: xvadd.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvadda.w $xr0, $xr0, $xr1 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -85,11 +73,7 @@ define void @vadda_d(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xvld $xr0, $a1, 0 ; CHECK-NEXT: xvld $xr1, $a2, 0 -; CHECK-NEXT: xvneg.d $xr2, $xr0 -; CHECK-NEXT: xvmax.d $xr0, $xr0, $xr2 -; CHECK-NEXT: xvneg.d $xr2, $xr1 -; CHECK-NEXT: xvmax.d $xr1, $xr1, $xr2 -; CHECK-NEXT: xvadd.d $xr0, $xr0, $xr1 +; CHECK-NEXT: xvadda.d $xr0, $xr0, $xr1 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll new file mode 100644 index 0000000..e1784f8 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll @@ -0,0 +1,197 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s + +;; xvinsve0.w +define void @xvinsve0_v8i32_l_0(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_l_0: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 0 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> <i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8i32_l_4(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_l_4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 4 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 5, i32 6, i32 7> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8f32_l(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8f32_l: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 0 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x float>, ptr %a + %vb = load <8 x float>, ptr %b + %vc = shufflevector <8 x float> %va, <8 x float> %vb, <8 x i32> <i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + store <8 x float> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8i32_h_1(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_h_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 1 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> <i32 8, i32 0, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8i32_h_6(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_h_6: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 6 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 0, i32 15> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8f32_h(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8f32_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 0 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x float>, ptr %a + %vb = load <8 x float>, ptr %b + %vc = shufflevector <8 x float> %va, <8 x float> %vb, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + store <8 x float> %vc, ptr %d + ret void +} + +;; xvinsve0.d +define void @xvinsve0_v4i64_l_1(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_l_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> <i32 0, i32 4, i32 2, i32 3> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4i64_l_2(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_l_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> <i32 0, i32 1, i32 4, i32 3> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4f64_l(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4f64_l: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 0 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x double>, ptr %a + %vb = load <4 x double>, ptr %b + %vc = shufflevector <4 x double> %va, <4 x double> %vb, <4 x i32> <i32 4, i32 1, i32 2, i32 3> + store <4 x double> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4i64_h_0(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_h_0: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 0 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> <i32 0, i32 5, i32 6, i32 7> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4i64_h_2(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_h_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 2 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> <i32 4, i32 5, i32 0, i32 7> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4f64_h(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4f64_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 0 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x double>, ptr %a + %vb = load <4 x double>, ptr %b + %vc = shufflevector <4 x double> %va, <4 x double> %vb, <4 x i32> <i32 0, i32 5, i32 6, i32 7> + store <4 x double> %vc, ptr %d + ret void +} diff --git a/llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll b/llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll new file mode 100644 index 0000000..39ac647 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll @@ -0,0 +1,58 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 -mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s + +define <8 x float> @fadd_elt0_v8f32(float %a) nounwind { +; CHECK-LABEL: fadd_elt0_v8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -1168 +; CHECK-NEXT: fadd.s $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %b = insertelement <8 x float> poison, float %a, i32 0 + %c = fadd <8 x float> %b, <float 1.0, float poison, float poison, float poison, float poison, float poison, float poison, float poison> + ret <8 x float> %c +} + +define <4 x double> @fadd_elt0_v4f64(double %a) nounwind { +; CHECK-LABEL: fadd_elt0_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -912 +; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %b = insertelement <4 x double> poison, double %a, i32 0 + %c = fadd <4 x double> %b, <double 1.0, double poison, double poison, double poison> + ret <4 x double> %c +} + +define <8 x float> @fsub_splat_v8f32(float %a, float %b) nounwind { +; CHECK-LABEL: fsub_splat_v8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsub.s $fa0, $fa0, $fa1 +; CHECK-NEXT: xvreplve0.w $xr0, $xr0 +; CHECK-NEXT: ret +entry: + %insa = insertelement <8 x float> poison, float %a, i32 0 + %insb = insertelement <8 x float> poison, float %b, i32 0 + %va = shufflevector <8 x float> %insa, <8 x float> poison, <8 x i32> zeroinitializer + %vb = shufflevector <8 x float> %insb, <8 x float> poison, <8 x i32> zeroinitializer + %c = fsub <8 x float> %va, %vb + ret <8 x float> %c +} + +define <4 x double> @fsub_splat_v4f64(double %a) nounwind { +; CHECK-LABEL: fsub_splat_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -784 +; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1 +; CHECK-NEXT: xvreplve0.d $xr0, $xr0 +; CHECK-NEXT: ret +entry: + %insa = insertelement <4 x double> poison, double %a, i32 0 + %insb = insertelement <4 x double> poison, double 1.0, i32 0 + %va = shufflevector <4 x double> %insa, <4 x double> poison, <4 x i32> zeroinitializer + %vb = shufflevector <4 x double> %insb, <4 x double> poison, <4 x i32> zeroinitializer + %c = fsub <4 x double> %va, %vb + ret <4 x double> %c +} diff --git a/llvm/test/CodeGen/LoongArch/lsx/abs.ll b/llvm/test/CodeGen/LoongArch/lsx/abs.ll new file mode 100644 index 0000000..85fe1fe --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lsx/abs.ll @@ -0,0 +1,128 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s + +define void @vabs_b(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_b: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.b $vr1, $vr0 +; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <16 x i8>, ptr %src + %b = tail call <16 x i8> @llvm.abs.v16i8(<16 x i8> %a, i1 true) + store <16 x i8> %b, ptr %dst + ret void +} + +define void @vabs_b_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_b_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.b $vr1, $vr0 +; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <16 x i8>, ptr %src + %b = tail call <16 x i8> @llvm.abs.v16i8(<16 x i8> %a, i1 false) + store <16 x i8> %b, ptr %dst + ret void +} + +define void @vabs_h(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.h $vr1, $vr0 +; CHECK-NEXT: vmax.h $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <8 x i16>, ptr %src + %b = tail call <8 x i16> @llvm.abs.v8i16(<8 x i16> %a, i1 true) + store <8 x i16> %b, ptr %dst + ret void +} + +define void @vabs_h_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_h_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.h $vr1, $vr0 +; CHECK-NEXT: vmax.h $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <8 x i16>, ptr %src + %b = tail call <8 x i16> @llvm.abs.v8i16(<8 x i16> %a, i1 false) + store <8 x i16> %b, ptr %dst + ret void +} + +define void @vabs_w(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_w: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.w $vr1, $vr0 +; CHECK-NEXT: vmax.w $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <4 x i32>, ptr %src + %b = tail call <4 x i32> @llvm.abs.v4i32(<4 x i32> %a, i1 true) + store <4 x i32> %b, ptr %dst + ret void +} + +define void @vabs_w_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_w_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.w $vr1, $vr0 +; CHECK-NEXT: vmax.w $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <4 x i32>, ptr %src + %b = tail call <4 x i32> @llvm.abs.v4i32(<4 x i32> %a, i1 false) + store <4 x i32> %b, ptr %dst + ret void +} + +define void @vabs_d(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.d $vr1, $vr0 +; CHECK-NEXT: vmax.d $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <2 x i64>, ptr %src + %b = tail call <2 x i64> @llvm.abs.v2i64(<2 x i64> %a, i1 true) + store <2 x i64> %b, ptr %dst + ret void +} + +define void @vabs_d_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_d_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.d $vr1, $vr0 +; CHECK-NEXT: vmax.d $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <2 x i64>, ptr %src + %b = tail call <2 x i64> @llvm.abs.v2i64(<2 x i64> %a, i1 false) + store <2 x i64> %b, ptr %dst + ret void +} + +declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1) +declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1) +declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1) +declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1) diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/adda.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/adda.ll index 2bd0b59..34f22e1f 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/adda.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/adda.ll @@ -7,11 +7,7 @@ define void @vadda_b(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vld $vr0, $a1, 0 ; CHECK-NEXT: vld $vr1, $a2, 0 -; CHECK-NEXT: vneg.b $vr2, $vr0 -; CHECK-NEXT: vmax.b $vr0, $vr0, $vr2 -; CHECK-NEXT: vneg.b $vr2, $vr1 -; CHECK-NEXT: vmax.b $vr1, $vr1, $vr2 -; CHECK-NEXT: vadd.b $vr0, $vr0, $vr1 +; CHECK-NEXT: vadda.b $vr0, $vr0, $vr1 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -33,11 +29,7 @@ define void @vadda_h(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vld $vr0, $a1, 0 ; CHECK-NEXT: vld $vr1, $a2, 0 -; CHECK-NEXT: vneg.h $vr2, $vr0 -; CHECK-NEXT: vmax.h $vr0, $vr0, $vr2 -; CHECK-NEXT: vneg.h $vr2, $vr1 -; CHECK-NEXT: vmax.h $vr1, $vr1, $vr2 -; CHECK-NEXT: vadd.h $vr0, $vr0, $vr1 +; CHECK-NEXT: vadda.h $vr0, $vr0, $vr1 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -59,11 +51,7 @@ define void @vadda_w(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vld $vr0, $a1, 0 ; CHECK-NEXT: vld $vr1, $a2, 0 -; CHECK-NEXT: vneg.w $vr2, $vr0 -; CHECK-NEXT: vmax.w $vr0, $vr0, $vr2 -; CHECK-NEXT: vneg.w $vr2, $vr1 -; CHECK-NEXT: vmax.w $vr1, $vr1, $vr2 -; CHECK-NEXT: vadd.w $vr0, $vr0, $vr1 +; CHECK-NEXT: vadda.w $vr0, $vr0, $vr1 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -85,11 +73,7 @@ define void @vadda_d(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vld $vr0, $a1, 0 ; CHECK-NEXT: vld $vr1, $a2, 0 -; CHECK-NEXT: vneg.d $vr2, $vr0 -; CHECK-NEXT: vmax.d $vr0, $vr0, $vr2 -; CHECK-NEXT: vneg.d $vr2, $vr1 -; CHECK-NEXT: vmax.d $vr1, $vr1, $vr2 -; CHECK-NEXT: vadd.d $vr0, $vr0, $vr1 +; CHECK-NEXT: vadda.d $vr0, $vr0, $vr1 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll b/llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll new file mode 100644 index 0000000..b651f11 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll @@ -0,0 +1,58 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 -mattr=+32s,+lsx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s + +define <4 x float> @fadd_elt0_v4f32(float %a) nounwind { +; CHECK-LABEL: fadd_elt0_v4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -1168 +; CHECK-NEXT: fadd.s $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %b = insertelement <4 x float> poison, float %a, i32 0 + %c = fadd <4 x float> %b, <float 1.0, float poison, float poison, float poison> + ret <4 x float> %c +} + +define <2 x double> @fadd_elt0_v2f64(double %a) nounwind { +; CHECK-LABEL: fadd_elt0_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -912 +; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %b = insertelement <2 x double> poison, double %a, i32 0 + %c = fadd <2 x double> %b, <double 1.0, double poison> + ret <2 x double> %c +} + +define <4 x float> @fsub_splat_v4f32(float %b) nounwind { +; CHECK-LABEL: fsub_splat_v4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -1168 +; CHECK-NEXT: fsub.s $fa0, $fa1, $fa0 +; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0 +; CHECK-NEXT: ret +entry: + %insa = insertelement <4 x float> poison, float 1.0, i32 0 + %insb = insertelement <4 x float> poison, float %b, i32 0 + %va = shufflevector <4 x float> %insa, <4 x float> poison, <4 x i32> zeroinitializer + %vb = shufflevector <4 x float> %insb, <4 x float> poison, <4 x i32> zeroinitializer + %c = fsub <4 x float> %va, %vb + ret <4 x float> %c +} + +define <2 x double> @fsub_splat_v2f64(double %a, double %b) nounwind { +; CHECK-LABEL: fsub_splat_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsub.d $fa0, $fa0, $fa1 +; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0 +; CHECK-NEXT: ret +entry: + %insa = insertelement <2 x double> poison, double %a, i32 0 + %insb = insertelement <2 x double> poison, double %b, i32 0 + %va = shufflevector <2 x double> %insa, <2 x double> poison, <2 x i32> zeroinitializer + %vb = shufflevector <2 x double> %insb, <2 x double> poison, <2 x i32> zeroinitializer + %c = fsub <2 x double> %va, %vb + ret <2 x double> %c +} diff --git a/llvm/test/CodeGen/LoongArch/merge-offset-option.ll b/llvm/test/CodeGen/LoongArch/merge-offset-option.ll new file mode 100644 index 0000000..e5351a6 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/merge-offset-option.ll @@ -0,0 +1,24 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch64 -mattr=+d --relocation-model=static -O1 \ +; RUN: < %s | FileCheck %s --check-prefix=MERGE +; RUN: llc --mtriple=loongarch64 -mattr=+d --relocation-model=static -O1 \ +; RUN: --loongarch-enable-merge-offset=false < %s | FileCheck %s --check-prefix=NO_MERGE + +@g = dso_local global i32 zeroinitializer, align 4 + +define void @foo() nounwind { +; MERGE-LABEL: foo: +; MERGE: # %bb.0: +; MERGE-NEXT: pcalau12i $a0, %pc_hi20(g) +; MERGE-NEXT: ld.w $zero, $a0, %pc_lo12(g) +; MERGE-NEXT: ret +; +; NO_MERGE-LABEL: foo: +; NO_MERGE: # %bb.0: +; NO_MERGE-NEXT: pcalau12i $a0, %pc_hi20(g) +; NO_MERGE-NEXT: addi.d $a0, $a0, %pc_lo12(g) +; NO_MERGE-NEXT: ld.w $zero, $a0, 0 +; NO_MERGE-NEXT: ret + %v = load volatile i32, ptr @g + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/bug22322.ll b/llvm/test/CodeGen/NVPTX/bug22322.ll index 055c512..71e180b 100644 --- a/llvm/test/CodeGen/NVPTX/bug22322.ll +++ b/llvm/test/CodeGen/NVPTX/bug22322.ll @@ -20,12 +20,12 @@ _ZL11compute_vecRK6float3jb.exit: call void @llvm.lifetime.start.p0(i64 4, ptr %ret_vec.sroa.8.i) %6 = and i32 %4, 15 %7 = icmp eq i32 %6, 0 - %8 = select i1 %7, float 0.000000e+00, float -1.000000e+00 + %8 = select nnan nsz i1 %7, float 0.000000e+00, float -1.000000e+00 store float %8, ptr %ret_vec.sroa.8.i, align 4 ; CHECK: max.f32 %r{{[0-9]+}}, %r{{[0-9]+}}, 0f00000000 %9 = fcmp olt float %8, 0.000000e+00 %ret_vec.sroa.8.i.val = load float, ptr %ret_vec.sroa.8.i, align 4 - %10 = select i1 %9, float 0.000000e+00, float %ret_vec.sroa.8.i.val + %10 = select nnan nsz i1 %9, float 0.000000e+00, float %ret_vec.sroa.8.i.val call void @llvm.lifetime.end.p0(i64 4, ptr %ret_vec.sroa.8.i) %11 = getelementptr inbounds %class.float3, ptr %dst, i64 %5, i32 0 store float 0.000000e+00, ptr %11, align 4 @@ -51,7 +51,7 @@ declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #2 ; Function Attrs: nounwind declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #2 -attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "no-signed-zeros-fp-math"="true" "use-soft-float"="false" } +attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "use-soft-float"="false" } attributes #1 = { nounwind readnone } attributes #2 = { nounwind } diff --git a/llvm/test/CodeGen/PowerPC/scalar-min-max.ll b/llvm/test/CodeGen/PowerPC/scalar-min-max.ll index 216d498..5f637e3 100644 --- a/llvm/test/CodeGen/PowerPC/scalar-min-max.ll +++ b/llvm/test/CodeGen/PowerPC/scalar-min-max.ll @@ -1,36 +1,23 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names --enable-unsafe-fp-math \ -; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \ -; RUN: --enable-no-nans-fp-math \ -; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s -; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names --enable-unsafe-fp-math \ -; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \ -; RUN: --enable-no-nans-fp-math \ -; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s ; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names -verify-machineinstrs \ ; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \ -; RUN: --check-prefix=NO-FAST-P9 +; RUN: --check-prefix=P9 ; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -verify-machineinstrs \ ; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \ -; RUN: --check-prefix=NO-FAST-P8 +; RUN: --check-prefix=P8 define dso_local float @testfmax(float %a, float %b) local_unnamed_addr { -; CHECK-LABEL: testfmax: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmaxdp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testfmax: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testfmax: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P8-NEXT: bgtlr cr0 -; NO-FAST-P8-NEXT: # %bb.1: # %entry -; NO-FAST-P8-NEXT: fmr f1, f2 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testfmax: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmaxcdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testfmax: +; P8: # %bb.0: # %entry +; P8-NEXT: fcmpu cr0, f1, f2 +; P8-NEXT: bgtlr cr0 +; P8-NEXT: # %bb.1: # %entry +; P8-NEXT: fmr f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp ogt float %a, %b %cond = select i1 %cmp, float %a, float %b @@ -38,23 +25,18 @@ entry: } define dso_local double @testdmax(double %a, double %b) local_unnamed_addr { -; CHECK-LABEL: testdmax: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmaxdp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testdmax: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testdmax: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: bgtlr cr0 -; NO-FAST-P8-NEXT: # %bb.1: # %entry -; NO-FAST-P8-NEXT: fmr f1, f2 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testdmax: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmaxcdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testdmax: +; P8: # %bb.0: # %entry +; P8-NEXT: xscmpudp cr0, f1, f2 +; P8-NEXT: bgtlr cr0 +; P8-NEXT: # %bb.1: # %entry +; P8-NEXT: fmr f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp ogt double %a, %b %cond = select i1 %cmp, double %a, double %b @@ -62,23 +44,18 @@ entry: } define dso_local float @testfmin(float %a, float %b) local_unnamed_addr { -; CHECK-LABEL: testfmin: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmindp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testfmin: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testfmin: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P8-NEXT: bltlr cr0 -; NO-FAST-P8-NEXT: # %bb.1: # %entry -; NO-FAST-P8-NEXT: fmr f1, f2 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testfmin: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmincdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testfmin: +; P8: # %bb.0: # %entry +; P8-NEXT: fcmpu cr0, f1, f2 +; P8-NEXT: bltlr cr0 +; P8-NEXT: # %bb.1: # %entry +; P8-NEXT: fmr f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp olt float %a, %b %cond = select i1 %cmp, float %a, float %b @@ -86,23 +63,18 @@ entry: } define dso_local double @testdmin(double %a, double %b) local_unnamed_addr { -; CHECK-LABEL: testdmin: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmindp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testdmin: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testdmin: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: bltlr cr0 -; NO-FAST-P8-NEXT: # %bb.1: # %entry -; NO-FAST-P8-NEXT: fmr f1, f2 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testdmin: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmincdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testdmin: +; P8: # %bb.0: # %entry +; P8-NEXT: xscmpudp cr0, f1, f2 +; P8-NEXT: bltlr cr0 +; P8-NEXT: # %bb.1: # %entry +; P8-NEXT: fmr f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp olt double %a, %b %cond = select i1 %cmp, double %a, double %b @@ -110,86 +82,62 @@ entry: } define dso_local float @testfmax_fast(float %a, float %b) local_unnamed_addr { -; CHECK-LABEL: testfmax_fast: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmaxdp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testfmax_fast: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testfmax_fast: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xssubsp f0, f2, f1 -; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testfmax_fast: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmaxdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testfmax_fast: +; P8: # %bb.0: # %entry +; P8-NEXT: xsmaxdp f1, f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp nnan ninf ogt float %a, %b - %cond = select i1 %cmp, float %a, float %b + %cond = select nnan nsz i1 %cmp, float %a, float %b ret float %cond } define dso_local double @testdmax_fast(double %a, double %b) local_unnamed_addr { -; CHECK-LABEL: testdmax_fast: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmaxdp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testdmax_fast: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testdmax_fast: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xssubdp f0, f2, f1 -; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testdmax_fast: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmaxdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testdmax_fast: +; P8: # %bb.0: # %entry +; P8-NEXT: xsmaxdp f1, f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp nnan ninf ogt double %a, %b - %cond = select i1 %cmp, double %a, double %b + %cond = select nnan nsz i1 %cmp, double %a, double %b ret double %cond } define dso_local float @testfmin_fast(float %a, float %b) local_unnamed_addr { -; CHECK-LABEL: testfmin_fast: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmindp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testfmin_fast: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testfmin_fast: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xssubsp f0, f1, f2 -; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testfmin_fast: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmindp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testfmin_fast: +; P8: # %bb.0: # %entry +; P8-NEXT: xsmindp f1, f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp nnan ninf olt float %a, %b - %cond = select i1 %cmp, float %a, float %b + %cond = select nnan nsz i1 %cmp, float %a, float %b ret float %cond } define dso_local double @testdmin_fast(double %a, double %b) local_unnamed_addr { -; CHECK-LABEL: testdmin_fast: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmindp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testdmin_fast: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testdmin_fast: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xssubdp f0, f1, f2 -; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testdmin_fast: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmindp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testdmin_fast: +; P8: # %bb.0: # %entry +; P8-NEXT: xsmindp f1, f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp nnan ninf olt double %a, %b - %cond = select i1 %cmp, double %a, double %b + %cond = select nnan nsz i1 %cmp, double %a, double %b ret double %cond } diff --git a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll index aaabd76e..fd0b494 100644 --- a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll +++ b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll @@ -20,18 +20,18 @@ define float @select_oeq_float(float %a, float %b, float %c, float %d) { ; FAST-P8-LABEL: select_oeq_float: ; FAST-P8: # %bb.0: # %entry -; FAST-P8-NEXT: xssubsp f0, f2, f1 -; FAST-P8-NEXT: xssubsp f1, f1, f2 -; FAST-P8-NEXT: fsel f1, f1, f3, f4 -; FAST-P8-NEXT: fsel f1, f0, f1, f4 +; FAST-P8-NEXT: xssubsp f0, f1, f2 +; FAST-P8-NEXT: xsnegdp f1, f0 +; FAST-P8-NEXT: fsel f0, f0, f3, f4 +; FAST-P8-NEXT: fsel f1, f1, f0, f4 ; FAST-P8-NEXT: blr ; ; FAST-P9-LABEL: select_oeq_float: ; FAST-P9: # %bb.0: # %entry -; FAST-P9-NEXT: xssubsp f0, f2, f1 -; FAST-P9-NEXT: xssubsp f1, f1, f2 -; FAST-P9-NEXT: fsel f1, f1, f3, f4 -; FAST-P9-NEXT: fsel f1, f0, f1, f4 +; FAST-P9-NEXT: xssubsp f0, f1, f2 +; FAST-P9-NEXT: xsnegdp f1, f0 +; FAST-P9-NEXT: fsel f0, f0, f3, f4 +; FAST-P9-NEXT: fsel f1, f1, f0, f4 ; FAST-P9-NEXT: blr ; ; NO-FAST-P8-LABEL: select_oeq_float: @@ -59,6 +59,48 @@ entry: ret float %cond } +define float @select_oeq_float_nsz(float %a, float %b, float %c, float %d) { +; FAST-P8-LABEL: select_oeq_float_nsz: +; FAST-P8: # %bb.0: # %entry +; FAST-P8-NEXT: xssubsp f0, f2, f1 +; FAST-P8-NEXT: xssubsp f1, f1, f2 +; FAST-P8-NEXT: fsel f1, f1, f3, f4 +; FAST-P8-NEXT: fsel f1, f0, f1, f4 +; FAST-P8-NEXT: blr +; +; FAST-P9-LABEL: select_oeq_float_nsz: +; FAST-P9: # %bb.0: # %entry +; FAST-P9-NEXT: xssubsp f0, f2, f1 +; FAST-P9-NEXT: xssubsp f1, f1, f2 +; FAST-P9-NEXT: fsel f1, f1, f3, f4 +; FAST-P9-NEXT: fsel f1, f0, f1, f4 +; FAST-P9-NEXT: blr +; +; NO-FAST-P8-LABEL: select_oeq_float_nsz: +; NO-FAST-P8: # %bb.0: # %entry +; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 +; NO-FAST-P8-NEXT: beq cr0, .LBB1_2 +; NO-FAST-P8-NEXT: # %bb.1: # %entry +; NO-FAST-P8-NEXT: fmr f3, f4 +; NO-FAST-P8-NEXT: .LBB1_2: # %entry +; NO-FAST-P8-NEXT: fmr f1, f3 +; NO-FAST-P8-NEXT: blr +; +; NO-FAST-P9-LABEL: select_oeq_float_nsz: +; NO-FAST-P9: # %bb.0: # %entry +; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 +; NO-FAST-P9-NEXT: beq cr0, .LBB1_2 +; NO-FAST-P9-NEXT: # %bb.1: # %entry +; NO-FAST-P9-NEXT: fmr f3, f4 +; NO-FAST-P9-NEXT: .LBB1_2: # %entry +; NO-FAST-P9-NEXT: fmr f1, f3 +; NO-FAST-P9-NEXT: blr +entry: + %cmp = fcmp nsz oeq float %a, %b + %cond = select i1 %cmp, float %c, float %d + ret float %cond +} + define double @select_oeq_double(double %a, double %b, double %c, double %d) { ; FAST-P8-LABEL: select_oeq_double: ; FAST-P8: # %bb.0: # %entry @@ -79,20 +121,20 @@ define double @select_oeq_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8-LABEL: select_oeq_double: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: beq cr0, .LBB1_2 +; NO-FAST-P8-NEXT: beq cr0, .LBB2_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB1_2: # %entry +; NO-FAST-P8-NEXT: .LBB2_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_oeq_double: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P9-NEXT: beq cr0, .LBB1_2 +; NO-FAST-P9-NEXT: beq cr0, .LBB2_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB1_2: # %entry +; NO-FAST-P9-NEXT: .LBB2_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -182,13 +224,57 @@ entry: define float @select_one_float(float %a, float %b, float %c, float %d) { ; FAST-P8-LABEL: select_one_float: ; FAST-P8: # %bb.0: # %entry +; FAST-P8-NEXT: xssubsp f0, f1, f2 +; FAST-P8-NEXT: xsnegdp f1, f0 +; FAST-P8-NEXT: fsel f0, f0, f4, f3 +; FAST-P8-NEXT: fsel f1, f1, f0, f3 +; FAST-P8-NEXT: blr +; +; FAST-P9-LABEL: select_one_float: +; FAST-P9: # %bb.0: # %entry +; FAST-P9-NEXT: xssubsp f0, f1, f2 +; FAST-P9-NEXT: xsnegdp f1, f0 +; FAST-P9-NEXT: fsel f0, f0, f4, f3 +; FAST-P9-NEXT: fsel f1, f1, f0, f3 +; FAST-P9-NEXT: blr +; +; NO-FAST-P8-LABEL: select_one_float: +; NO-FAST-P8: # %bb.0: # %entry +; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 +; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB5_2 +; NO-FAST-P8-NEXT: # %bb.1: # %entry +; NO-FAST-P8-NEXT: fmr f3, f4 +; NO-FAST-P8-NEXT: .LBB5_2: # %entry +; NO-FAST-P8-NEXT: fmr f1, f3 +; NO-FAST-P8-NEXT: blr +; +; NO-FAST-P9-LABEL: select_one_float: +; NO-FAST-P9: # %bb.0: # %entry +; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 +; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB5_2 +; NO-FAST-P9-NEXT: # %bb.1: # %entry +; NO-FAST-P9-NEXT: fmr f3, f4 +; NO-FAST-P9-NEXT: .LBB5_2: # %entry +; NO-FAST-P9-NEXT: fmr f1, f3 +; NO-FAST-P9-NEXT: blr +entry: + %cmp = fcmp one float %a, %b + %cond = select i1 %cmp, float %c, float %d + ret float %cond +} + +define float @select_one_float_nsz(float %a, float %b, float %c, float %d) { +; FAST-P8-LABEL: select_one_float_nsz: +; FAST-P8: # %bb.0: # %entry ; FAST-P8-NEXT: xssubsp f0, f2, f1 ; FAST-P8-NEXT: xssubsp f1, f1, f2 ; FAST-P8-NEXT: fsel f1, f1, f4, f3 ; FAST-P8-NEXT: fsel f1, f0, f1, f3 ; FAST-P8-NEXT: blr ; -; FAST-P9-LABEL: select_one_float: +; FAST-P9-LABEL: select_one_float_nsz: ; FAST-P9: # %bb.0: # %entry ; FAST-P9-NEXT: xssubsp f0, f2, f1 ; FAST-P9-NEXT: xssubsp f1, f1, f2 @@ -196,29 +282,29 @@ define float @select_one_float(float %a, float %b, float %c, float %d) { ; FAST-P9-NEXT: fsel f1, f0, f1, f3 ; FAST-P9-NEXT: blr ; -; NO-FAST-P8-LABEL: select_one_float: +; NO-FAST-P8-LABEL: select_one_float_nsz: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB4_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB6_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB4_2: # %entry +; NO-FAST-P8-NEXT: .LBB6_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; -; NO-FAST-P9-LABEL: select_one_float: +; NO-FAST-P9-LABEL: select_one_float_nsz: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB4_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB6_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB4_2: # %entry +; NO-FAST-P9-NEXT: .LBB6_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: - %cmp = fcmp one float %a, %b + %cmp = fcmp nsz one float %a, %b %cond = select i1 %cmp, float %c, float %d ret float %cond } @@ -244,10 +330,10 @@ define double @select_one_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB5_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB7_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB5_2: # %entry +; NO-FAST-P8-NEXT: .LBB7_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -255,10 +341,10 @@ define double @select_one_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB5_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB7_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB5_2: # %entry +; NO-FAST-P9-NEXT: .LBB7_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -362,10 +448,10 @@ define float @select_oge_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, lt -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB8_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB10_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB8_2: # %entry +; NO-FAST-P8-NEXT: .LBB10_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -373,10 +459,10 @@ define float @select_oge_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, lt -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB8_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB10_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB8_2: # %entry +; NO-FAST-P9-NEXT: .LBB10_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -402,10 +488,10 @@ define double @select_oge_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, lt -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB9_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB11_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB9_2: # %entry +; NO-FAST-P8-NEXT: .LBB11_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -413,10 +499,10 @@ define double @select_oge_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, lt -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB9_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB11_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB9_2: # %entry +; NO-FAST-P9-NEXT: .LBB11_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -503,20 +589,20 @@ define float @select_olt_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P8-LABEL: select_olt_float: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P8-NEXT: blt cr0, .LBB12_2 +; NO-FAST-P8-NEXT: blt cr0, .LBB14_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB12_2: # %entry +; NO-FAST-P8-NEXT: .LBB14_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_olt_float: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P9-NEXT: blt cr0, .LBB12_2 +; NO-FAST-P9-NEXT: blt cr0, .LBB14_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB12_2: # %entry +; NO-FAST-P9-NEXT: .LBB14_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -541,20 +627,20 @@ define double @select_olt_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8-LABEL: select_olt_double: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: blt cr0, .LBB13_2 +; NO-FAST-P8-NEXT: blt cr0, .LBB15_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB13_2: # %entry +; NO-FAST-P8-NEXT: .LBB15_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_olt_double: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P9-NEXT: blt cr0, .LBB13_2 +; NO-FAST-P9-NEXT: blt cr0, .LBB15_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB13_2: # %entry +; NO-FAST-P9-NEXT: .LBB15_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -641,20 +727,20 @@ define float @select_ogt_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P8-LABEL: select_ogt_float: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P8-NEXT: bgt cr0, .LBB16_2 +; NO-FAST-P8-NEXT: bgt cr0, .LBB18_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB16_2: # %entry +; NO-FAST-P8-NEXT: .LBB18_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_ogt_float: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P9-NEXT: bgt cr0, .LBB16_2 +; NO-FAST-P9-NEXT: bgt cr0, .LBB18_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB16_2: # %entry +; NO-FAST-P9-NEXT: .LBB18_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -679,20 +765,20 @@ define double @select_ogt_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8-LABEL: select_ogt_double: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: bgt cr0, .LBB17_2 +; NO-FAST-P8-NEXT: bgt cr0, .LBB19_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB17_2: # %entry +; NO-FAST-P8-NEXT: .LBB19_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_ogt_double: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P9-NEXT: bgt cr0, .LBB17_2 +; NO-FAST-P9-NEXT: bgt cr0, .LBB19_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB17_2: # %entry +; NO-FAST-P9-NEXT: .LBB19_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -780,10 +866,10 @@ define float @select_ole_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, gt -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB20_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB22_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB20_2: # %entry +; NO-FAST-P8-NEXT: .LBB22_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -791,10 +877,10 @@ define float @select_ole_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, gt -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB20_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB22_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB20_2: # %entry +; NO-FAST-P9-NEXT: .LBB22_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -820,10 +906,10 @@ define double @select_ole_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, gt -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB21_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB23_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB21_2: # %entry +; NO-FAST-P8-NEXT: .LBB23_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -831,10 +917,10 @@ define double @select_ole_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, gt -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB21_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB23_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB21_2: # %entry +; NO-FAST-P9-NEXT: .LBB23_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -926,13 +1012,13 @@ define double @onecmp1(double %a, double %y, double %z) { ; NO-FAST-P8-NEXT: vspltisw v2, 1 ; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f0 -; NO-FAST-P8-NEXT: bc 12, lt, .LBB24_3 +; NO-FAST-P8-NEXT: bc 12, lt, .LBB26_3 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f1 -; NO-FAST-P8-NEXT: bc 12, un, .LBB24_3 +; NO-FAST-P8-NEXT: bc 12, un, .LBB26_3 ; NO-FAST-P8-NEXT: # %bb.2: # %entry ; NO-FAST-P8-NEXT: fmr f3, f2 -; NO-FAST-P8-NEXT: .LBB24_3: # %entry +; NO-FAST-P8-NEXT: .LBB26_3: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -941,13 +1027,13 @@ define double @onecmp1(double %a, double %y, double %z) { ; NO-FAST-P9-NEXT: vspltisw v2, 1 ; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f0 -; NO-FAST-P9-NEXT: bc 12, lt, .LBB24_3 +; NO-FAST-P9-NEXT: bc 12, lt, .LBB26_3 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f1 -; NO-FAST-P9-NEXT: bc 12, un, .LBB24_3 +; NO-FAST-P9-NEXT: bc 12, un, .LBB26_3 ; NO-FAST-P9-NEXT: # %bb.2: # %entry ; NO-FAST-P9-NEXT: fmr f3, f2 -; NO-FAST-P9-NEXT: .LBB24_3: # %entry +; NO-FAST-P9-NEXT: .LBB26_3: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -978,10 +1064,10 @@ define double @onecmp2(double %a, double %y, double %z) { ; NO-FAST-P8-NEXT: vspltisw v2, 1 ; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f0 -; NO-FAST-P8-NEXT: bgt cr0, .LBB25_2 +; NO-FAST-P8-NEXT: bgt cr0, .LBB27_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f2, f3 -; NO-FAST-P8-NEXT: .LBB25_2: # %entry +; NO-FAST-P8-NEXT: .LBB27_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f2 ; NO-FAST-P8-NEXT: blr ; @@ -990,10 +1076,10 @@ define double @onecmp2(double %a, double %y, double %z) { ; NO-FAST-P9-NEXT: vspltisw v2, 1 ; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f0 -; NO-FAST-P9-NEXT: bgt cr0, .LBB25_2 +; NO-FAST-P9-NEXT: bgt cr0, .LBB27_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f2, f3 -; NO-FAST-P9-NEXT: .LBB25_2: # %entry +; NO-FAST-P9-NEXT: .LBB27_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f2 ; NO-FAST-P9-NEXT: blr entry: @@ -1028,10 +1114,10 @@ define double @onecmp3(double %a, double %y, double %z) { ; NO-FAST-P8-NEXT: vspltisw v2, 1 ; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f0 -; NO-FAST-P8-NEXT: beq cr0, .LBB26_2 +; NO-FAST-P8-NEXT: beq cr0, .LBB28_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f2, f3 -; NO-FAST-P8-NEXT: .LBB26_2: # %entry +; NO-FAST-P8-NEXT: .LBB28_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f2 ; NO-FAST-P8-NEXT: blr ; @@ -1040,10 +1126,10 @@ define double @onecmp3(double %a, double %y, double %z) { ; NO-FAST-P9-NEXT: vspltisw v2, 1 ; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f0 -; NO-FAST-P9-NEXT: beq cr0, .LBB26_2 +; NO-FAST-P9-NEXT: beq cr0, .LBB28_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f2, f3 -; NO-FAST-P9-NEXT: .LBB26_2: # %entry +; NO-FAST-P9-NEXT: .LBB28_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f2 ; NO-FAST-P9-NEXT: blr entry: diff --git a/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll b/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll index 55b0d1f..2a46a59 100644 --- a/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll +++ b/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll @@ -155,3 +155,109 @@ define i1 @test9(i64 %x) { %b = icmp eq i64 %a, u0x08000000 ret i1 %b } + +; Make sure the and constant doesn't get converted to an opaque constant by +; ConstantHoisting. If it's an opaque constant, we'll have addi -16 and addi 15. +define i64 @test10(i64 %0) #0 { +; RV32-LABEL: test10: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: andi a0, a0, -16 +; RV32-NEXT: snez a0, a0 +; RV32-NEXT: li a1, 0 +; RV32-NEXT: ret +; +; RV64-LABEL: test10: +; RV64: # %bb.0: # %entry +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: sraiw a0, a0, 4 +; RV64-NEXT: snez a0, a0 +; RV64-NEXT: ret +entry: + %1 = add nuw nsw i64 %0, u0xffffffff + %2 = and i64 %1, u0xfffffff0 + %3 = icmp ne i64 %2, 0 + %4 = zext i1 %3 to i64 + ret i64 %4 +} + +; Make sure the and constant doesn't get converted to an opaque constant by +; ConstantHoisting. If it's an opaque constant, we'll have addi -16 and addi 15. +define i64 @test11(i64 %0) #0 { +; RV32-LABEL: test11: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: srai a0, a0, 4 +; RV32-NEXT: addi a0, a0, 1621 +; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: li a1, 0 +; RV32-NEXT: ret +; +; RV64-LABEL: test11: +; RV64: # %bb.0: # %entry +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: sraiw a0, a0, 4 +; RV64-NEXT: addi a0, a0, 1621 +; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: ret +entry: + %1 = add nuw nsw i64 %0, u0xffffffff + %2 = and i64 %1, u0xfffffff0 + %3 = icmp eq i64 %2, u0xffff9ab0 + %4 = zext i1 %3 to i64 + ret i64 %4 +} + +; Make sure the and constant doesn't get converted to an opaque constant by +; ConstantHoisting. If it's an opaque constant we'll end up with constant +; materialization sequences on RV64. +define i64 @test12(i64 %0) #0 { +; RV32-LABEL: test12: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi a0, a0, -3 +; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: li a1, 0 +; RV32-NEXT: ret +; +; RV64-LABEL: test12: +; RV64: # %bb.0: # %entry +; RV64-NEXT: addiw a0, a0, -16 +; RV64-NEXT: addi a0, a0, 13 +; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: ret +entry: + %1 = add nuw nsw i64 %0, u0xfffffff0 + %2 = and i64 %1, u0xffffffff + %3 = icmp eq i64 %2, u0xfffffff3 + %4 = zext i1 %3 to i64 + ret i64 %4 +} + +; Make sure the and constant doesn't get converted to an opaque constant by +; ConstantHoisting. +define i64 @test13(i64 %0) #0 { +; RV32-LABEL: test13: +; RV32: # %bb.0: # %entry +; RV32-NEXT: lui a1, 524288 +; RV32-NEXT: addi a1, a1, 15 +; RV32-NEXT: add a0, a0, a1 +; RV32-NEXT: srli a0, a0, 31 +; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: li a1, 0 +; RV32-NEXT: ret +; +; RV64-LABEL: test13: +; RV64: # %bb.0: # %entry +; RV64-NEXT: lui a1, 524288 +; RV64-NEXT: addi a1, a1, -15 +; RV64-NEXT: sub a0, a0, a1 +; RV64-NEXT: sraiw a0, a0, 31 +; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: ret +entry: + %1 = add nuw nsw i64 %0, u0x8000000f + %2 = and i64 %1, u0x80000000 + %3 = icmp eq i64 %2, 0 + %4 = zext i1 %3 to i64 + ret i64 %4 +} diff --git a/llvm/test/CodeGen/RISCV/select-bare.ll b/llvm/test/CodeGen/RISCV/select-bare.ll index 796121a..44028a7 100644 --- a/llvm/test/CodeGen/RISCV/select-bare.ll +++ b/llvm/test/CodeGen/RISCV/select-bare.ll @@ -26,8 +26,8 @@ define i32 @bare_select(i1 %a, i32 %b, i32 %c) nounwind { ; RV32IXQCI-LABEL: bare_select: ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret %1 = select i1 %a, i32 %b, i32 %c ret i32 %1 @@ -53,8 +53,8 @@ define float @bare_select_float(i1 %a, float %b, float %c) nounwind { ; RV32IXQCI-LABEL: bare_select_float: ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret %1 = select i1 %a, float %b, float %c ret float %1 diff --git a/llvm/test/CodeGen/RISCV/select-cc.ll b/llvm/test/CodeGen/RISCV/select-cc.ll index 14055df..b57f625 100644 --- a/llvm/test/CodeGen/RISCV/select-cc.ll +++ b/llvm/test/CodeGen/RISCV/select-cc.ll @@ -87,40 +87,40 @@ define signext i32 @foo(i32 signext %a, ptr %b) nounwind { ; ; RV32IXQCI-LABEL: foo: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: lw a5, 0(a1) ; RV32IXQCI-NEXT: lw a2, 0(a1) ; RV32IXQCI-NEXT: lw a4, 0(a1) ; RV32IXQCI-NEXT: lw t5, 0(a1) ; RV32IXQCI-NEXT: lw t4, 0(a1) +; RV32IXQCI-NEXT: lw t3, 0(a1) ; RV32IXQCI-NEXT: lw t2, 0(a1) -; RV32IXQCI-NEXT: lw t1, 0(a1) ; RV32IXQCI-NEXT: lw t0, 0(a1) ; RV32IXQCI-NEXT: lw a7, 0(a1) ; RV32IXQCI-NEXT: lw a6, 0(a1) -; RV32IXQCI-NEXT: lw t3, 0(a1) ; RV32IXQCI-NEXT: lw a3, 0(a1) -; RV32IXQCI-NEXT: bltz t3, .LBB0_2 +; RV32IXQCI-NEXT: lw t1, 0(a1) +; RV32IXQCI-NEXT: lw a5, 0(a1) +; RV32IXQCI-NEXT: bltz t1, .LBB0_2 ; RV32IXQCI-NEXT: # %bb.1: -; RV32IXQCI-NEXT: li t6, 0 -; RV32IXQCI-NEXT: qc.mveq a5, a0, a5, a0 -; RV32IXQCI-NEXT: qc.mvne a2, a5, a2, a5 -; RV32IXQCI-NEXT: qc.mvltu a4, a4, a2, a2 -; RV32IXQCI-NEXT: qc.mvgeu t5, a4, t5, a4 -; RV32IXQCI-NEXT: qc.mvltu t4, t5, t4, t5 -; RV32IXQCI-NEXT: qc.mvgeu t2, t2, t4, t4 -; RV32IXQCI-NEXT: qc.mvlt t1, t1, t2, t2 -; RV32IXQCI-NEXT: qc.mvge t0, t1, t0, t1 -; RV32IXQCI-NEXT: qc.mvlt a7, t0, a7, t0 -; RV32IXQCI-NEXT: qc.mvge a6, a6, a7, a7 -; RV32IXQCI-NEXT: mv a3, t3 -; RV32IXQCI-NEXT: qc.mvge a3, t6, t3, a6 +; RV32IXQCI-NEXT: li a5, 0 +; RV32IXQCI-NEXT: qc.mveq a2, a0, a2, a0 +; RV32IXQCI-NEXT: qc.mvne a4, a2, a4, a2 +; RV32IXQCI-NEXT: qc.mvltu t5, t5, a4, a4 +; RV32IXQCI-NEXT: qc.mvgeu t4, t5, t4, t5 +; RV32IXQCI-NEXT: qc.mvltu t3, t4, t3, t4 +; RV32IXQCI-NEXT: qc.mvgeu t2, t2, t3, t3 +; RV32IXQCI-NEXT: qc.mvlt t0, t0, t2, t2 +; RV32IXQCI-NEXT: qc.mvge a7, t0, a7, t0 +; RV32IXQCI-NEXT: qc.mvlt a6, a7, a6, a7 +; RV32IXQCI-NEXT: qc.mvge a3, a3, a6, a6 +; RV32IXQCI-NEXT: qc.mvlt a3, a5, t1, t1 +; RV32IXQCI-NEXT: mv a5, a3 ; RV32IXQCI-NEXT: .LBB0_2: ; RV32IXQCI-NEXT: lw a2, 0(a1) ; RV32IXQCI-NEXT: lw a0, 0(a1) ; RV32IXQCI-NEXT: li a1, 1024 -; RV32IXQCI-NEXT: qc.mvlt a2, a1, a2, a3 +; RV32IXQCI-NEXT: qc.mvlt a2, a1, a2, a5 ; RV32IXQCI-NEXT: li a1, 2046 -; RV32IXQCI-NEXT: qc.mvltu a0, a1, t3, a2 +; RV32IXQCI-NEXT: qc.mvltu a0, a1, t1, a2 ; RV32IXQCI-NEXT: ret ; ; RV64I-LABEL: foo: @@ -417,8 +417,8 @@ define i32 @select_sge_int16min(i32 signext %x, i32 signext %y, i32 signext %z) ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: lui a3, 1048560 ; RV32IXQCI-NEXT: addi a3, a3, -1 -; RV32IXQCI-NEXT: qc.mvlt a2, a3, a0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mvge a1, a3, a0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64I-LABEL: select_sge_int16min: @@ -471,10 +471,10 @@ define i64 @select_sge_int32min(i64 %x, i64 %y, i64 %z) { ; RV32IXQCI-NEXT: srli a0, a1, 31 ; RV32IXQCI-NEXT: xori a0, a0, 1 ; RV32IXQCI-NEXT: qc.mveqi a0, a1, -1, a6 -; RV32IXQCI-NEXT: qc.mvnei a4, a0, 0, a2 -; RV32IXQCI-NEXT: qc.mvnei a5, a0, 0, a3 -; RV32IXQCI-NEXT: mv a0, a4 -; RV32IXQCI-NEXT: mv a1, a5 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 0, a4 +; RV32IXQCI-NEXT: qc.mveqi a3, a0, 0, a5 +; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: mv a1, a3 ; RV32IXQCI-NEXT: ret ; ; RV64I-LABEL: select_sge_int32min: diff --git a/llvm/test/CodeGen/RISCV/select-cond.ll b/llvm/test/CodeGen/RISCV/select-cond.ll index b88fe9a..3ca0f46 100644 --- a/llvm/test/CodeGen/RISCV/select-cond.ll +++ b/llvm/test/CodeGen/RISCV/select-cond.ll @@ -35,8 +35,8 @@ define signext i32 @select_i32_trunc(i32 signext %cond, i32 signext %x, i32 sign ; RV32-XQCICM-LABEL: select_i32_trunc: ; RV32-XQCICM: # %bb.0: ; RV32-XQCICM-NEXT: andi a0, a0, 1 -; RV32-XQCICM-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32-XQCICM-NEXT: mv a0, a2 +; RV32-XQCICM-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32-XQCICM-NEXT: mv a0, a1 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_trunc: @@ -48,8 +48,8 @@ define signext i32 @select_i32_trunc(i32 signext %cond, i32 signext %x, i32 sign ; RV32IXQCI-LABEL: select_i32_trunc: ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_trunc: @@ -93,8 +93,8 @@ define signext i32 @select_i32_param(i1 signext %cond, i32 signext %x, i32 signe ; RV32-XQCICM-LABEL: select_i32_param: ; RV32-XQCICM: # %bb.0: ; RV32-XQCICM-NEXT: andi a0, a0, 1 -; RV32-XQCICM-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32-XQCICM-NEXT: mv a0, a2 +; RV32-XQCICM-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32-XQCICM-NEXT: mv a0, a1 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_param: @@ -106,8 +106,8 @@ define signext i32 @select_i32_param(i1 signext %cond, i32 signext %x, i32 signe ; RV32IXQCI-LABEL: select_i32_param: ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_param: @@ -148,8 +148,8 @@ define signext i32 @select_i32_eq(i32 signext %a, i32 signext %b, i32 signext %x ; ; RV32-XQCICM-LABEL: select_i32_eq: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mveq a3, a0, a1, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvne a2, a0, a1, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_eq: @@ -163,8 +163,8 @@ define signext i32 @select_i32_eq(i32 signext %a, i32 signext %b, i32 signext %x ; ; RV32IXQCI-LABEL: select_i32_eq: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mveq a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvne a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_eq: @@ -205,8 +205,8 @@ define signext i32 @select_i32_ne(i32 signext %a, i32 signext %b, i32 signext %x ; ; RV32-XQCICM-LABEL: select_i32_ne: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvne a3, a0, a1, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mveq a2, a0, a1, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_ne: @@ -220,8 +220,8 @@ define signext i32 @select_i32_ne(i32 signext %a, i32 signext %b, i32 signext %x ; ; RV32IXQCI-LABEL: select_i32_ne: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvne a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveq a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_ne: @@ -262,8 +262,8 @@ define signext i32 @select_i32_ugt(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_ugt: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvltu a3, a1, a0, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvgeu a2, a1, a0, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_ugt: @@ -277,8 +277,8 @@ define signext i32 @select_i32_ugt(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_ugt: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvltu a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeu a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_ugt: @@ -319,8 +319,8 @@ define signext i32 @select_i32_uge(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_uge: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvgeu a3, a0, a1, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvltu a2, a0, a1, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_uge: @@ -334,8 +334,8 @@ define signext i32 @select_i32_uge(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_uge: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvgeu a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltu a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_uge: @@ -376,8 +376,8 @@ define signext i32 @select_i32_ult(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_ult: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_ult: @@ -391,8 +391,8 @@ define signext i32 @select_i32_ult(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_ult: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_ult: @@ -433,8 +433,8 @@ define signext i32 @select_i32_ule(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_ule: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvgeu a3, a1, a0, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvltu a2, a1, a0, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_ule: @@ -448,8 +448,8 @@ define signext i32 @select_i32_ule(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_ule: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvgeu a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltu a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_ule: @@ -490,8 +490,8 @@ define signext i32 @select_i32_sgt(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_sgt: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvlt a3, a1, a0, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvge a2, a1, a0, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_sgt: @@ -505,8 +505,8 @@ define signext i32 @select_i32_sgt(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_sgt: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvlt a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvge a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_sgt: @@ -547,8 +547,8 @@ define signext i32 @select_i32_sge(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_sge: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvge a3, a0, a1, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvlt a2, a0, a1, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_sge: @@ -562,8 +562,8 @@ define signext i32 @select_i32_sge(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_sge: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvge a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlt a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_sge: @@ -604,8 +604,8 @@ define signext i32 @select_i32_slt(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_slt: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvlt a3, a0, a1, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvge a2, a0, a1, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_slt: @@ -619,8 +619,8 @@ define signext i32 @select_i32_slt(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_slt: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvlt a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvge a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_slt: @@ -661,8 +661,8 @@ define signext i32 @select_i32_sle(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_sle: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvge a3, a1, a0, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvlt a2, a1, a0, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_sle: @@ -676,8 +676,8 @@ define signext i32 @select_i32_sle(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_sle: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvge a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlt a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_sle: @@ -723,11 +723,11 @@ define i64 @select_i64_trunc(i64 %cond, i64 %x, i64 %y) nounwind { ; ; RV32-XQCICM-LABEL: select_i64_trunc: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: mv a1, a5 +; RV32-XQCICM-NEXT: mv a1, a3 ; RV32-XQCICM-NEXT: andi a0, a0, 1 -; RV32-XQCICM-NEXT: qc.mvnei a4, a0, 0, a2 -; RV32-XQCICM-NEXT: qc.mvnei a1, a0, 0, a3 -; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: qc.mveqi a2, a0, 0, a4 +; RV32-XQCICM-NEXT: qc.mveqi a1, a0, 0, a5 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_trunc: @@ -740,11 +740,11 @@ define i64 @select_i64_trunc(i64 %cond, i64 %x, i64 %y) nounwind { ; ; RV32IXQCI-LABEL: select_i64_trunc: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: mv a1, a5 +; RV32IXQCI-NEXT: mv a1, a3 ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a4, a0, 0, a2 -; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a3 -; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 0, a4 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a5 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_trunc: @@ -792,10 +792,10 @@ define i64 @select_i64_param(i1 %cond, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-LABEL: select_i64_param: ; RV32-XQCICM: # %bb.0: ; RV32-XQCICM-NEXT: andi a0, a0, 1 -; RV32-XQCICM-NEXT: qc.mvnei a3, a0, 0, a1 -; RV32-XQCICM-NEXT: qc.mvnei a4, a0, 0, a2 -; RV32-XQCICM-NEXT: mv a0, a3 -; RV32-XQCICM-NEXT: mv a1, a4 +; RV32-XQCICM-NEXT: qc.mveqi a1, a0, 0, a3 +; RV32-XQCICM-NEXT: qc.mveqi a2, a0, 0, a4 +; RV32-XQCICM-NEXT: mv a0, a1 +; RV32-XQCICM-NEXT: mv a1, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_param: @@ -810,10 +810,10 @@ define i64 @select_i64_param(i1 %cond, i64 %x, i64 %y) nounwind { ; RV32IXQCI-LABEL: select_i64_param: ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a3, a0, 0, a1 -; RV32IXQCI-NEXT: qc.mvnei a4, a0, 0, a2 -; RV32IXQCI-NEXT: mv a0, a3 -; RV32IXQCI-NEXT: mv a1, a4 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a3 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 0, a4 +; RV32IXQCI-NEXT: mv a0, a1 +; RV32IXQCI-NEXT: mv a1, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_param: @@ -866,10 +866,10 @@ define i64 @select_i64_eq(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: xor a1, a1, a3 ; RV32-XQCICM-NEXT: xor a0, a0, a2 ; RV32-XQCICM-NEXT: or a0, a0, a1 -; RV32-XQCICM-NEXT: qc.mveqi a6, a0, 0, a4 -; RV32-XQCICM-NEXT: qc.mveqi a7, a0, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mvnei a4, a0, 0, a6 +; RV32-XQCICM-NEXT: qc.mvnei a5, a0, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_eq: @@ -887,10 +887,10 @@ define i64 @select_i64_eq(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: xor a1, a1, a3 ; RV32IXQCI-NEXT: xor a0, a0, a2 ; RV32IXQCI-NEXT: or a0, a0, a1 -; RV32IXQCI-NEXT: qc.mveqi a6, a0, 0, a4 -; RV32IXQCI-NEXT: qc.mveqi a7, a0, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mvnei a4, a0, 0, a6 +; RV32IXQCI-NEXT: qc.mvnei a5, a0, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_eq: @@ -943,10 +943,10 @@ define i64 @select_i64_ne(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: xor a1, a1, a3 ; RV32-XQCICM-NEXT: xor a0, a0, a2 ; RV32-XQCICM-NEXT: or a0, a0, a1 -; RV32-XQCICM-NEXT: qc.mvnei a6, a0, 0, a4 -; RV32-XQCICM-NEXT: qc.mvnei a7, a0, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mveqi a4, a0, 0, a6 +; RV32-XQCICM-NEXT: qc.mveqi a5, a0, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_ne: @@ -964,10 +964,10 @@ define i64 @select_i64_ne(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: xor a1, a1, a3 ; RV32IXQCI-NEXT: xor a0, a0, a2 ; RV32IXQCI-NEXT: or a0, a0, a1 -; RV32IXQCI-NEXT: qc.mvnei a6, a0, 0, a4 -; RV32IXQCI-NEXT: qc.mvnei a7, a0, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mveqi a4, a0, 0, a6 +; RV32IXQCI-NEXT: qc.mveqi a5, a0, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_ne: @@ -1025,10 +1025,10 @@ define i64 @select_i64_ugt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a2, a0 ; RV32-XQCICM-NEXT: sltu a2, a3, a1 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_ugt: @@ -1050,10 +1050,10 @@ define i64 @select_i64_ugt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a2, a0 ; RV32IXQCI-NEXT: sltu a2, a3, a1 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_ugt: @@ -1111,10 +1111,10 @@ define i64 @select_i64_uge(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a0, a2 ; RV32-XQCICM-NEXT: sltu a2, a1, a3 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_uge: @@ -1136,10 +1136,10 @@ define i64 @select_i64_uge(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a0, a2 ; RV32IXQCI-NEXT: sltu a2, a1, a3 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_uge: @@ -1197,10 +1197,10 @@ define i64 @select_i64_ult(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a0, a2 ; RV32-XQCICM-NEXT: sltu a2, a1, a3 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_ult: @@ -1222,10 +1222,10 @@ define i64 @select_i64_ult(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a0, a2 ; RV32IXQCI-NEXT: sltu a2, a1, a3 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_ult: @@ -1283,10 +1283,10 @@ define i64 @select_i64_ule(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a2, a0 ; RV32-XQCICM-NEXT: sltu a2, a3, a1 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_ule: @@ -1308,10 +1308,10 @@ define i64 @select_i64_ule(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a2, a0 ; RV32IXQCI-NEXT: sltu a2, a3, a1 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_ule: @@ -1369,10 +1369,10 @@ define i64 @select_i64_sgt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a2, a0 ; RV32-XQCICM-NEXT: slt a2, a3, a1 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_sgt: @@ -1394,10 +1394,10 @@ define i64 @select_i64_sgt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a2, a0 ; RV32IXQCI-NEXT: slt a2, a3, a1 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_sgt: @@ -1455,10 +1455,10 @@ define i64 @select_i64_sge(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a0, a2 ; RV32-XQCICM-NEXT: slt a2, a1, a3 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_sge: @@ -1480,10 +1480,10 @@ define i64 @select_i64_sge(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a0, a2 ; RV32IXQCI-NEXT: slt a2, a1, a3 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_sge: @@ -1541,10 +1541,10 @@ define i64 @select_i64_slt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a0, a2 ; RV32-XQCICM-NEXT: slt a2, a1, a3 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_slt: @@ -1566,10 +1566,10 @@ define i64 @select_i64_slt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a0, a2 ; RV32IXQCI-NEXT: slt a2, a1, a3 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_slt: @@ -1627,10 +1627,10 @@ define i64 @select_i64_sle(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a2, a0 ; RV32-XQCICM-NEXT: slt a2, a3, a1 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_sle: @@ -1652,10 +1652,10 @@ define i64 @select_i64_sle(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a2, a0 ; RV32IXQCI-NEXT: slt a2, a3, a1 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_sle: diff --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll index 19fade6..8273c65 100644 --- a/llvm/test/CodeGen/RISCV/select.ll +++ b/llvm/test/CodeGen/RISCV/select.ll @@ -1153,8 +1153,8 @@ define i32 @select_sub_1(i1 zeroext %cond, i32 %a, i32 %b) { ; RV32IXQCI-LABEL: select_sub_1: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: sub a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = sub i32 %a, %b @@ -1301,9 +1301,9 @@ define i32 @select_sub_4(i1 zeroext %cond, i32 %x) { ; ; RV32IXQCI-LABEL: select_sub_4: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: addi a1, a1, -128 -; RV32IXQCI-NEXT: li a2, 128 -; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a2 +; RV32IXQCI-NEXT: addi a2, a1, -128 +; RV32IXQCI-NEXT: li a1, 128 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 ; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret %add = sub i32 %x, 128 @@ -1348,8 +1348,8 @@ define i32 @select_and_1(i1 zeroext %cond, i32 %a, i32 %b) { ; RV32IXQCI-LABEL: select_and_1: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: and a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = and i32 %a, %b @@ -1493,8 +1493,8 @@ define i32 @select_udiv_1(i1 zeroext %cond, i32 %a, i32 %b) { ; RV32IXQCI-LABEL: select_udiv_1: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: divu a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = udiv i32 %a, %b @@ -1682,8 +1682,8 @@ define i32 @select_shl_1(i1 zeroext %cond, i32 %a, i32 %b) { ; RV32IXQCI-LABEL: select_shl_1: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: sll a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = shl i32 %a, %b @@ -1798,8 +1798,8 @@ define i32 @select_ashr_1(i1 zeroext %cond, i32 %a, i32 %b) { ; RV32IXQCI-LABEL: select_ashr_1: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: sra a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = ashr i32 %a, %b @@ -1914,8 +1914,8 @@ define i32 @select_lshr_1(i1 zeroext %cond, i32 %a, i32 %b) { ; RV32IXQCI-LABEL: select_lshr_1: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: srl a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = lshr i32 %a, %b @@ -2371,9 +2371,9 @@ define i32 @select_cst5(i1 zeroext %cond) { ; RV32IXQCI-LABEL: select_cst5: ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: lui a1, 1 -; RV32IXQCI-NEXT: addi a1, a1, -2047 -; RV32IXQCI-NEXT: li a2, 2047 -; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a2 +; RV32IXQCI-NEXT: addi a2, a1, -2047 +; RV32IXQCI-NEXT: li a1, 2047 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 ; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret %ret = select i1 %cond, i32 2047, i32 2049 @@ -2870,8 +2870,8 @@ define void @select_redundant_czero_eqz1(ptr %0, ptr %1) { ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: lui a2, %hi(select_redundant_czero_eqz_data) ; RV32IXQCI-NEXT: addi a2, a2, %lo(select_redundant_czero_eqz_data) -; RV32IXQCI-NEXT: qc.mveqi a0, a0, 0, a2 -; RV32IXQCI-NEXT: sw a0, 0(a1) +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a0 +; RV32IXQCI-NEXT: sw a2, 0(a1) ; RV32IXQCI-NEXT: ret entry: %3 = icmp eq ptr %0, null diff --git a/llvm/test/CodeGen/RISCV/xqcicm.ll b/llvm/test/CodeGen/RISCV/xqcicm.ll index 1741be7..fb48301 100644 --- a/llvm/test/CodeGen/RISCV/xqcicm.ll +++ b/llvm/test/CodeGen/RISCV/xqcicm.ll @@ -23,15 +23,15 @@ define i32 @select_example(i32 %cond, i32 %x, i32 %y) { ; RV32IXQCICM-LABEL: select_example: ; RV32IXQCICM: # %bb.0: # %entry ; RV32IXQCICM-NEXT: andi a0, a0, 1 -; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCICM-NEXT: mv a0, a2 +; RV32IXQCICM-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCICM-NEXT: mv a0, a1 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_example: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %cond_trunc = trunc i32 %cond to i1 @@ -52,14 +52,14 @@ define i32 @select_cc_example_eq(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 %a, 11 @@ -80,14 +80,14 @@ define i32 @select_cc_example_eq1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 11, %a @@ -108,14 +108,14 @@ define i32 @select_cc_example_ne(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 %a, 11 @@ -136,14 +136,14 @@ define i32 @select_cc_example_ne1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 11, %a @@ -164,14 +164,14 @@ define i32 @select_cc_example_slt(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_slt: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_slt: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlti a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp slt i32 %a, 11 @@ -192,14 +192,14 @@ define i32 @select_cc_example_slt1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_slt1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_slt1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgei a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlti a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp slt i32 11, %a @@ -220,14 +220,14 @@ define i32 @select_cc_example_sle(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sle: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sle: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlti a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgei a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sle i32 %a, 11 @@ -248,14 +248,14 @@ define i32 @select_cc_example_sle1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sle1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sle1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlti a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sle i32 11, %a @@ -276,14 +276,14 @@ define i32 @select_cc_example_sgt(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sgt: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sgt: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgei a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlti a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sgt i32 %a, 11 @@ -304,14 +304,14 @@ define i32 @select_cc_example_sgt1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sgt1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sgt1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlti a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sgt i32 11, %a @@ -332,14 +332,14 @@ define i32 @select_cc_example_sge(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sge: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sge: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlti a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sge i32 %a, 11 @@ -360,14 +360,14 @@ define i32 @select_cc_example_sge1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sge1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sge1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlti a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgei a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sge i32 11, %a @@ -388,14 +388,14 @@ define i32 @select_cc_example_ule(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ule: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ule: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltui a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ule i32 %a, 11 @@ -416,14 +416,14 @@ define i32 @select_cc_example_ule1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ule1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ule1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltui a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ule i32 11, %a @@ -444,14 +444,14 @@ define i32 @select_cc_example_ugt(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ugt: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ugt: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltui a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ugt i32 %a, 11 @@ -472,14 +472,14 @@ define i32 @select_cc_example_ugt1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ugt1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ugt1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltui a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ugt i32 11, %a @@ -500,14 +500,14 @@ define i32 @select_cc_example_ult(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ult: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ult: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltui a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ult i32 %a, 11 @@ -528,14 +528,14 @@ define i32 @select_cc_example_ult1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ult1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ult1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltui a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ult i32 11, %a @@ -556,14 +556,14 @@ define i32 @select_cc_example_uge(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_uge: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_uge: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltui a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp uge i32 %a, 11 @@ -584,14 +584,14 @@ define i32 @select_cc_example_uge1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_uge1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_uge1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltui a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp uge i32 11, %a @@ -611,14 +611,14 @@ define i32 @select_cc_example_eq_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveq a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvne a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveq a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvne a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 %a, %b @@ -638,14 +638,14 @@ define i32 @select_cc_example_ne_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvne a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveq a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvne a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveq a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 %a, %b @@ -665,14 +665,14 @@ define i32 @select_cc_example_slt_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_slt_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlt a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvge a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_slt_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlt a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvge a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp slt i32 %a, %b @@ -692,14 +692,14 @@ define i32 @select_cc_example_sge_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sge_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvge a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlt a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sge_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvge a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlt a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sge i32 %a, %b @@ -719,14 +719,14 @@ define i32 @select_cc_example_sgt_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sgt_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlt a3, a1, a0, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvge a2, a1, a0, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sgt_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlt a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvge a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sgt i32 %a, %b @@ -746,14 +746,14 @@ define i32 @select_cc_example_sle_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sle_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvge a3, a1, a0, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlt a2, a1, a0, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sle_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvge a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlt a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sle i32 %a, %b @@ -773,14 +773,14 @@ define i32 @select_cc_example_ugt_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ugt_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltu a3, a1, a0, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeu a2, a1, a0, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ugt_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltu a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeu a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ugt i32 %a, %b @@ -800,14 +800,14 @@ define i32 @select_cc_example_ult_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ult_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ult_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ult i32 %a, %b @@ -827,14 +827,14 @@ define i32 @select_cc_example_uge_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_uge_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeu a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltu a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_uge_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeu a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltu a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp uge i32 %a, %b @@ -854,14 +854,14 @@ define i32 @select_cc_example_ule_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ule_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeu a3, a1, a0, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltu a2, a1, a0, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ule_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeu a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltu a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ule i32 %a, %b @@ -883,18 +883,263 @@ define i32 @select_cc_example_ule_neg(i32 %a, i32 %b, i32 %x, i32 %y) { ; RV32IXQCICM-LABEL: select_cc_example_ule_neg: ; RV32IXQCICM: # %bb.0: # %entry ; RV32IXQCICM-NEXT: li a1, -10 -; RV32IXQCICM-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ule_neg: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: li a1, -10 -; RV32IXQCI-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ule i32 %a, -11 %sel = select i1 %cmp, i32 %x, i32 %y ret i32 %sel } + +define i32 @select_cc_example_eq_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_eq_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: beq a2, a1, .LBB32_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB32_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_eq_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvne a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_eq_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvne a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp eq i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_lt_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_lt_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: blt a2, a1, .LBB33_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB33_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_lt_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvge a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_lt_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvge a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp slt i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_ge_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_ge_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: bge a2, a1, .LBB34_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB34_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_ge_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvlt a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_ge_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvlt a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp sge i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_ult_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_ult_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: bltu a2, a1, .LBB35_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB35_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_ult_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvgeu a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_ult_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvgeu a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp ult i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_uge_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_uge_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: bgeu a2, a1, .LBB36_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB36_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_uge_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvltu a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_uge_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvltu a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp uge i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_eq_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_eq_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 11 +; RV32I-NEXT: beq a2, a1, .LBB37_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB37_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_eq_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvnei a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_eq_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvnei a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp eq i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_lt_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_lt_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 11 +; RV32I-NEXT: blt a2, a1, .LBB38_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB38_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_lt_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvgei a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_lt_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvgei a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp slt i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_ge_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_ge_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 10 +; RV32I-NEXT: blt a1, a2, .LBB39_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB39_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_ge_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvlti a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_ge_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvlti a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp sge i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_ult_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_ult_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 11 +; RV32I-NEXT: bltu a2, a1, .LBB40_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB40_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_ult_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvgeui a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_ult_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvgeui a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp ult i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_uge_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_uge_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 10 +; RV32I-NEXT: bltu a1, a2, .LBB41_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB41_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_uge_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvltui a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_uge_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvltui a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp uge i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} diff --git a/llvm/test/CodeGen/RISCV/xqcics.ll b/llvm/test/CodeGen/RISCV/xqcics.ll index 38de8fb..5b7ca9e7 100644 --- a/llvm/test/CodeGen/RISCV/xqcics.ll +++ b/llvm/test/CodeGen/RISCV/xqcics.ll @@ -134,14 +134,14 @@ define i32 @select_cc_example_eq(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 %a, 11 @@ -167,14 +167,14 @@ define i32 @select_cc_example_eq_c(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq_c: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq_c: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 11, %a @@ -200,14 +200,14 @@ define i32 @select_cc_example_ne(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 %a, 11 @@ -233,14 +233,14 @@ define i32 @select_cc_example_ne_c(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne_c: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne_c: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 11, %a diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/builtin_printf.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/builtin_printf.ll new file mode 100644 index 0000000..093d172 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/builtin_printf.ll @@ -0,0 +1,24 @@ +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_relaxed_printf_string_address_space %s -o - | FileCheck %s +; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR + +; CHECK: OpExtension "SPV_EXT_relaxed_printf_string_address_space" +; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] printf + +; CHECK-ERROR: LLVM ERROR: SPV_EXT_relaxed_printf_string_address_space is required because printf uses a format string not in constant address space. + +@.str = private unnamed_addr addrspace(1) constant [4 x i8] c"%d\0A\00", align 1 + +declare spir_func i32 @printf(ptr addrspace(4), ...) + +define spir_kernel void @test_kernel() { +entry: + ; Format string in addrspace(1) → cast to addrspace(4) + %format = addrspacecast ptr addrspace(1) @.str to ptr addrspace(4) + %val = alloca i32, align 4 + store i32 123, ptr %val, align 4 + %loaded = load i32, ptr %val, align 4 + + ; Call printf with non-constant format string + %call = call spir_func i32 (ptr addrspace(4), ...) @printf(ptr addrspace(4) %format, i32 %loaded) + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/non-constant-printf.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/non-constant-printf.ll new file mode 100644 index 0000000..b54d59b --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/non-constant-printf.ll @@ -0,0 +1,48 @@ +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_relaxed_printf_string_address_space %s -o - | FileCheck %s +; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR + +; CHECK: OpExtension "SPV_EXT_relaxed_printf_string_address_space" +; CHECK: %[[#ExtInstSetId:]] = OpExtInstImport "OpenCL.std" +; CHECK-DAG: %[[#TypeInt32Id:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#TypeInt8Id:]] = OpTypeInt 8 0 +; CHECK-DAG: %[[#TypeInt64Id:]] = OpTypeInt 64 0 +; CHECK-DAG: %[[#TypeArrayId:]] = OpTypeArray %[[#TypeInt8Id]] %[[#]] +; CHECK-DAG: %[[#ConstantStorClassGlobalPtrTy:]] = OpTypePointer UniformConstant %[[#TypeArrayId]] +; CHECK-DAG: %[[#WGStorClassGlobalPtrTy:]] = OpTypePointer Workgroup %[[#TypeArrayId]] +; CHECK-DAG: %[[#CrossWFStorClassGlobalPtrTy:]] = OpTypePointer CrossWorkgroup %[[#TypeArrayId]] +; CHECK-DAG: %[[#FunctionStorClassPtrTy:]] = OpTypePointer Function %[[#TypeInt8Id]] +; CHECK-DAG: %[[#WGStorClassPtrTy:]] = OpTypePointer Workgroup %[[#TypeInt8Id]] +; CHECK-DAG: %[[#CrossWFStorClassPtrTy:]] = OpTypePointer CrossWorkgroup %[[#TypeInt8Id]] +; CHECK: %[[#ConstantCompositeId:]] = OpConstantComposite %[[#TypeArrayId]] %[[#]] %[[#]] %[[#]] %[[#]] %[[#]] %[[#]] +; CHECK: %[[#]] = OpVariable %[[#ConstantStorClassGlobalPtrTy]] UniformConstant %[[#ConstantCompositeId]] +; CHECK: %[[#]] = OpVariable %[[#CrossWFStorClassGlobalPtrTy]] CrossWorkgroup %[[#ConstantCompositeId]] +; CHECK: %[[#]] = OpVariable %[[#WGStorClassGlobalPtrTy]] Workgroup %[[#ConstantCompositeId]] +; CHECK: %[[#GEP1:]] = OpInBoundsPtrAccessChain %[[#FunctionStorClassPtrTy]] %[[#]] %[[#]] %[[#]] +; CHECK: %[[#]] = OpExtInst %[[#TypeInt32Id]] %[[#ExtInstSetId:]] printf %[[#GEP1]] +; CHECK: %[[#GEP2:]] = OpInBoundsPtrAccessChain %[[#CrossWFStorClassPtrTy]] %[[#]] %[[#]] %[[#]] +; CHECK: %[[#]] = OpExtInst %[[#TypeInt32Id]] %[[#ExtInstSetId:]] printf %[[#GEP2]] +; CHECK: %[[#GEP3:]] = OpInBoundsPtrAccessChain %[[#WGStorClassPtrTy]] %[[#]] %[[#]] %[[#]] +; CHECK: %[[#]] = OpExtInst %[[#TypeInt32Id]] %[[#ExtInstSetId:]] printf %[[#GEP3]] + +; CHECK-ERROR: LLVM ERROR: SPV_EXT_relaxed_printf_string_address_space is required because printf uses a format string not in constant address space. + +@0 = internal unnamed_addr addrspace(2) constant [6 x i8] c"Test\0A\00", align 1 +@1 = internal unnamed_addr addrspace(1) constant [6 x i8] c"Test\0A\00", align 1 +@2 = internal unnamed_addr addrspace(3) constant [6 x i8] c"Test\0A\00", align 1 + +define spir_kernel void @test() { + %tmp1 = alloca [6 x i8], align 1 + call void @llvm.memcpy.p0.p2.i64(ptr align 1 %tmp1, ptr addrspace(2) align 1 @0, i64 6, i1 false) + %1 = getelementptr inbounds [6 x i8], ptr %tmp1, i32 0, i32 0 + %2 = call spir_func i32 @_Z18__spirv_ocl_printfPc(ptr %1) + %3 = getelementptr inbounds [6 x i8], ptr addrspace(1) @1, i32 0, i32 0 + %4 = call spir_func i32 @_Z18__spirv_ocl_printfPU3AS1c(ptr addrspace(1) %3) + %5 = getelementptr inbounds [6 x i8], ptr addrspace(3) @2, i32 0, i32 0 + %6 = call spir_func i32 @_Z18__spirv_ocl_printfPU3AS3c(ptr addrspace(3) %5) + ret void +} + +declare spir_func i32 @_Z18__spirv_ocl_printfPc(ptr) +declare spir_func i32 @_Z18__spirv_ocl_printfPU3AS1c(ptr addrspace(1)) +declare spir_func i32 @_Z18__spirv_ocl_printfPU3AS3c(ptr addrspace(3)) +declare void @llvm.memcpy.p0.p2.i64(ptr captures(none), ptr addrspace(2) captures(none) readonly, i64, i1) diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bindless_images/i32-in-physical64.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bindless_images/i32-in-physical64.ll new file mode 100644 index 0000000..3624f14 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bindless_images/i32-in-physical64.ll @@ -0,0 +1,19 @@ +; RUN: not llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_INTEL_bindless_images %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR + +; CHECK-ERROR: LLVM ERROR: Parameter value must be a 32-bit scalar in case of Physical32 addressing model or a 64-bit scalar in case of Physical64 addressing model + +target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64" +target triple = "spir64-unknown-unknown" + +define spir_func void @foo(i32 %in) { + %img = call spir_func target("spirv.Image", i32, 2, 0, 0, 0, 0, 0, 0) @_Z33__spirv_ConvertHandleToImageINTELi(i32 %in) + %samp = call spir_func target("spirv.Sampler") @_Z35__spirv_ConvertHandleToSamplerINTELl(i64 42) + %sampImage = call spir_func target("spirv.SampledImage", i64, 1, 0, 0, 0, 0, 0, 0) @_Z40__spirv_ConvertHandleToSampledImageINTELl(i64 43) + ret void +} + +declare spir_func target("spirv.Image", i32, 2, 0, 0, 0, 0, 0, 0) @_Z33__spirv_ConvertHandleToImageINTELi(i32) + +declare spir_func target("spirv.Sampler") @_Z35__spirv_ConvertHandleToSamplerINTELl(i64) + +declare spir_func target("spirv.SampledImage", i64, 1, 0, 0, 0, 0, 0, 0) @_Z40__spirv_ConvertHandleToSampledImageINTELl(i64) diff --git a/llvm/test/CodeGen/SPIRV/image_store.ll b/llvm/test/CodeGen/SPIRV/image_store.ll new file mode 100644 index 0000000..a70651c --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/image_store.ll @@ -0,0 +1,22 @@ +; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; Image types may be represented in two ways while translating to SPIR-V: +; - OpenCL form, for example, '%opencl.image2d_ro_t', +; - SPIR-V form, for example, '%spirv.Image._void_1_0_0_0_0_0_0', +; but it is still one type which should be translated to one SPIR-V type. +; +; The test checks that the code below is successfully translated and only one +; SPIR-V type for images is generated (no duplicate OpTypeImage instructions). + +; CHECK: %[[#]] = OpTypeImage %[[#]] 2D +; CHECK-NOT: %[[#]] = OpTypeImage %[[#]] 2D + +declare spir_func <4 x float> @_Z11read_imagef14ocl_image2d_ro11ocl_samplerDv2_ff(ptr addrspace(1), ptr addrspace(2), <2 x float>, float) + +define spir_kernel void @read_image(ptr addrspace(1) %srcimg, ptr addrspace(2) %sampler){ +entry: + %spirvimg.addr = alloca target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0), align 8 + %val = call <4 x float> @_Z11read_imagef14ocl_image2d_ro11ocl_samplerDv2_ff(ptr addrspace(1) %srcimg, ptr addrspace(2) %sampler, <2 x float> zeroinitializer, float 0.0) + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll b/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll new file mode 100644 index 0000000..b788f34b --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll @@ -0,0 +1,28 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-LABEL: Begin function original_testcase +define fastcc void @original_testcase() { +top: + ; CHECK: OpCompositeInsert + %0 = insertvalue [1 x ptr] zeroinitializer, ptr poison, 0 + ret void +} + +; CHECK-LABEL: Begin function additional_testcases +define fastcc void @additional_testcases() { +top: + ; Test with different pointer types + ; CHECK: OpCompositeInsert + %1 = insertvalue [1 x ptr] zeroinitializer, ptr undef, 0 + ; CHECK-NEXT: OpCompositeInsert + %2 = insertvalue {ptr, i32} zeroinitializer, ptr poison, 0 + ; CHECK-NEXT: OpCompositeInsert + %3 = insertvalue {ptr, ptr} undef, ptr null, 0 + + ; Test with undef aggregate + ; CHECK-NEXT: OpCompositeInsert + %4 = insertvalue [1 x ptr] undef, ptr undef, 0 + + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-comparison.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-comparison.ll new file mode 100644 index 0000000..49bb8ea --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-comparison.ll @@ -0,0 +1,56 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: OpFOrdEqual +; CHECK-DAG: OpFOrdGreaterThan +; CHECK-DAG: OpFOrdGreaterThanEqual +; CHECK-DAG: OpFOrdLessThan +; CHECK-DAG: OpFOrdLessThanEqual +; CHECK-DAG: OpFOrdNotEqual +; CHECK-DAG: OpOrdered +; CHECK-DAG: OpFUnordEqual +; CHECK-DAG: OpFUnordGreaterThan +; CHECK-DAG: OpFUnordGreaterThanEqual +; CHECK-DAG: OpFUnordLessThan +; CHECK-DAG: OpFUnordLessThanEqual +; CHECK-DAG: OpFUnordNotEqual +; CHECK-DAG: OpUnordered + +define dso_local spir_kernel void @test(float %a){ +entry: + %cmp = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %a, metadata !"oeq", metadata !"fpexcept.strict") + %cmp1 = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %a, metadata !"ogt", metadata !"fpexcept.strict") + %cmp2 = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %a, metadata !"oge", metadata !"fpexcept.strict") + %cmp3 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"olt", metadata !"fpexcept.strict") + %cmp4 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ole", metadata !"fpexcept.strict") + %cmp5 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"one", metadata !"fpexcept.strict") + %cmp6 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ord", metadata !"fpexcept.strict") + %cmp7 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ueq", metadata !"fpexcept.strict") + %cmp8 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ugt", metadata !"fpexcept.strict") + %cmp9 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"uge", metadata !"fpexcept.strict") + %cmp10 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ult", metadata !"fpexcept.strict") + %cmp11 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ule", metadata !"fpexcept.strict") + %cmp12 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"une", metadata !"fpexcept.strict") + %cmp13 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"uno", metadata !"fpexcept.strict") + + %or1 = or i1 %cmp, %cmp1 + %or2 = or i1 %or1, %cmp2 + %or3 = or i1 %or2, %cmp3 + %or4 = or i1 %or3, %cmp4 + %or5 = or i1 %or4, %cmp5 + %or6 = or i1 %or5, %cmp6 + %or7 = or i1 %or6, %cmp7 + %or8 = or i1 %or7, %cmp8 + %or9 = or i1 %or8, %cmp9 + %or10 = or i1 %or9, %cmp10 + %or11 = or i1 %or10, %cmp11 + %or12 = or i1 %or11, %cmp12 + %or13 = or i1 %or12, %cmp13 + br i1 %or13, label %true_block, label %false_block +true_block: + ret void +false_block: + ret void +} +declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata) +declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/debugtrap.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/debugtrap.ll new file mode 100644 index 0000000..fd8cb9d --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/debugtrap.ll @@ -0,0 +1,14 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s + +; CHECK: OpNop +; CHECK-NEXT: OpReturn + +declare void @llvm.debugtrap() + +define spir_kernel void @foo(ptr addrspace(1) %a){ +entry: + %a.addr = alloca ptr addrspace(1), align 4 + store ptr addrspace(1) %a, ptr %a.addr, align 4 + call void @llvm.debugtrap() + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/frexp.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/frexp.ll new file mode 100644 index 0000000..f6434e9 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/frexp.ll @@ -0,0 +1,114 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: %[[#extinst_id:]] = OpExtInstImport "OpenCL.std" +; CHECK-DAG: %[[#float_32_type:]] = OpTypeFloat 32 +; CHECK-DAG: %[[#int_32_type:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#fn_ptr_type_i32:]] = OpTypePointer Function %[[#int_32_type]] +; CHECK-DAG: %[[#const_negzero:]] = OpConstant %[[#float_32_type]] -0 +; CHECK-DAG: %[[#vec2_float_type:]] = OpTypeVector %[[#float_32_type]] 2 +; CHECK-DAG: %[[#vec2_int_type:]] = OpTypeVector %[[#int_32_type]] 2 +; CHECK-DAG: %[[#fn_ptr_type_vec2_i32:]] = OpTypePointer Function %[[#vec2_int_type]] +; CHECK-DAG: %[[#vec2_null:]] = OpConstantNull %[[#vec2_float_type]] +; CHECK-DAG: %[[#scalar_null:]] = OpConstantNull %[[#float_32_type]] +; CHECK-DAG: %[[#const_composite1:]] = OpConstantComposite %[[#vec2_float_type]] %[[#scalar_null]] %[[#const_negzero]] +; CHECK-DAG: %[[#vec4_float_type:]] = OpTypeVector %[[#float_32_type]] 4 +; CHECK-DAG: %[[#vec4_int_type:]] = OpTypeVector %[[#int_32_type]] 4 +; CHECK-DAG: %[[#fn_ptr_type_vec4_i32:]] = OpTypePointer Function %[[#vec4_int_type]] +; CHECK-DAG: %[[#const_composite2:]] = OpConstantComposite %[[#vec4_float_type]] %[[#const_16:]] %[[#const_neg32:]] %[[#const_0:]] %[[#const_9999:]] +; CHECK-DAG: %[[#float_64_type:]] = OpTypeFloat 64 +; CHECK-DAG: %[[#vec2_double_type:]] = OpTypeVector %[[#float_64_type]] 2 + +; CHECK: %[[#]] = OpFunctionParameter %[[#float_32_type]] +; CHECK: %[[#var1:]] = OpVariable %[[#fn_ptr_type_i32]] Function +; CHECK: %[[#extinst1:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#const_negzero]] %[[#var1]] +; CHECK: %[[#exp_part_var:]] = OpLoad %[[#int_32_type]] %[[#var1]] +; CHECK: OpReturnValue %[[#exp_part_var]] +define i32 @frexp_negzero(float %x) { + %ret = call { float, i32 } @llvm.frexp.f32.i32(float -0.0) + %f_part = extractvalue { float, i32 } %ret, 0 + %exp_part = extractvalue { float, i32 } %ret, 1 + ret i32 %exp_part +} + +; CHECK: %[[#x_var4:]] = OpFunctionParameter %[[#float_32_type]] +; CHECK: %[[#var10:]] = OpVariable %[[#fn_ptr_type_i32]] Function +; CHECK: %[[#extinst10:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#x_var4]] %[[#var10]] +; CHECK: %[[#exp_part_var2:]] = OpLoad %[[#int_32_type]] %[[#var10]] +; CHECK: OpReturnValue %[[#exp_part_var2]] +define i32 @frexp_frexp_get_int(float %x) { + %frexp0 = call { float, i32 } @llvm.frexp.f32.i32(float %x) + %f_part = extractvalue { float, i32 } %frexp0, 0 + %exp_part = extractvalue { float, i32 } %frexp0, 1 + ret i32 %exp_part +} + +; CHECK: %[[#var3:]] = OpVariable %[[#fn_ptr_type_vec2_i32]] Function +; CHECK: %[[#extinst3:]] = OpExtInst %[[#vec2_float_type]] %[[#extinst_id]] frexp %[[#vec2_null]] %[[#var3]] +; CHECK: %[[#f_part_var2:]] = OpLoad %[[#vec2_int_type]] %[[#var3]] +; CHECK: OpReturnValue %[[#extinst3]] +define <2 x float> @frexp_zero_vector() { + %ret = call { <2 x float>, <2 x i32> } @llvm.frexp.v2f32.v2i32(<2 x float> zeroinitializer) + %f_part = extractvalue { <2 x float>, <2 x i32> } %ret, 0 + %exp_part = extractvalue { <2 x float>, <2 x i32> } %ret, 1 + ret <2 x float> %f_part +} + +; CHECK: %[[#var4:]] = OpVariable %[[#fn_ptr_type_vec2_i32]] Function +; CHECK: %[[#extinst4:]] = OpExtInst %[[#vec2_float_type]] %[[#extinst_id]] frexp %[[#const_composite1]] %[[#var4]] +; CHECK: %[[#f_part_var3:]] = OpLoad %[[#vec2_int_type]] %[[#var4]] +; CHECK: OpReturnValue %[[#extinst4]] +define <2 x float> @frexp_zero_negzero_vector() { + %ret = call { <2 x float>, <2 x i32> } @llvm.frexp.v2f32.v2i32(<2 x float> <float 0.0, float -0.0>) + %f_part = extractvalue { <2 x float>, <2 x i32> } %ret, 0 + %exp_part = extractvalue { <2 x float>, <2 x i32> } %ret, 1 + ret <2 x float> %f_part +} + +; CHECK: %[[#var5:]] = OpVariable %[[#fn_ptr_type_vec4_i32]] Function +; CHECK: %[[#extinst5:]] = OpExtInst %[[#vec4_float_type]] %[[#extinst_id]] frexp %[[#const_composite2]] %[[#var5]] +; CHECK: %[[#f_part_var4:]] = OpLoad %[[#vec4_int_type]] %[[#var5]] +; CHECK: OpReturnValue %[[#extinst5]] +define <4 x float> @frexp_nonsplat_vector() { + %ret = call { <4 x float>, <4 x i32> } @llvm.frexp.v4f32.v4i32(<4 x float> <float 16.0, float -32.0, float 0.0, float 9999.0>) + %f_part = extractvalue { <4 x float>, <4 x i32> } %ret, 0 + %exp_part = extractvalue { <4 x float>, <4 x i32> } %ret, 1 + ret <4 x float> %f_part +} + +; CHECK: %[[#x_var2:]] = OpFunctionParameter %[[#float_32_type]] +; CHECK: %[[#var6:]] = OpVariable %[[#fn_ptr_type_i32]] Function +; CHECK: %[[#var7:]] = OpVariable %[[#fn_ptr_type_i32]] Function +; CHECK: %[[#extinst6:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#x_var2]] %[[#var6]] +; CHECK: %[[#load1:]] = OpLoad %[[#int_32_type]] %[[#var6]] +; CHECK: %[[#extinst7:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#extinst6]] %[[#var7]] +; CHECK: %[[#f_part_var5:]] = OpLoad %[[#int_32_type]] %[[#var7]] +; CHECK: OpReturnValue %[[#extinst7]] +define float @frexp_frexp(float %x) { + %frexp0 = call { float, i32 } @llvm.frexp.f32.i32(float %x) + %frexp0_f_part = extractvalue { float, i32 } %frexp0, 0 + %frexp0_exp_part = extractvalue { float, i32 } %frexp0, 1 + %frexp1 = call { float, i32 } @llvm.frexp.f32.i32(float %frexp0_f_part) + %frexp1_f_part = extractvalue { float, i32 } %frexp1, 0 + %frexp1_exp_part = extractvalue { float, i32 } %frexp1, 1 + ret float %frexp1_f_part +} + +; CHECK: %[[#x_var3:]] = OpFunctionParameter %[[#vec2_double_type]] +; CHECK: %[[#var9:]] = OpVariable %[[#fn_ptr_type_vec2_i32]] Function +; CHECK: %[[#extinst9:]] = OpExtInst %[[#vec2_double_type]] %[[#extinst_id]] frexp %[[#x_var3]] %[[#var9]] +; CHECK: %[[#f_part_var6:]] = OpLoad %[[#vec2_int_type]] %[[#var9]] +; CHECK: OpReturnValue %[[#extinst9]] +define <2 x double> @frexp_frexp_vector(<2 x double> %x) { + %frexp0 = call { <2 x double>, <2 x i32> } @llvm.frexp.v2f64.v2i32(<2 x double> %x) + %f_part = extractvalue { <2 x double>, <2 x i32> } %frexp0, 0 + %exp_part = extractvalue { <2 x double>, <2 x i32> } %frexp0, 1 + ret <2 x double> %f_part +} + +declare { float, i32 } @llvm.frexp.f32.i32(float) +declare { double, i32 } @llvm.frexp.f64.i32(double) +declare { <2 x float>, <2 x i32> } @llvm.frexp.v2f32.v2i32(<2 x float>) +declare { <4 x float>, <4 x i32> } @llvm.frexp.v4f32.v4i32(<4 x float>) +declare { <2 x double>, <2 x i32> } @llvm.frexp.v2f64.v2i32(<2 x double>) +declare { float, i8 } @llvm.frexp.f32.i8(float) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll index a15a807..b3ef6d6 100644 --- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll @@ -11,7 +11,6 @@ define spir_kernel void @foo(ptr %p) { entry: call void @llvm.trap() - call void @llvm.debugtrap() call void @llvm.ubsantrap(i8 100) %r1 = call ptr @llvm.invariant.start.p0(i64 1024, ptr %p) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll new file mode 100644 index 0000000..51b7664 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll @@ -0,0 +1,86 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-SPIRV-NOT: llvm.memmove + +; CHECK-DAG: %[[#Int8:]] = OpTypeInt 8 0 +; CHECK-DAG: %[[#Int32:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#Int64:]] = OpTypeInt 64 0 +; CHECK-DAG: %[[#Ptr_CrossWG_8:]] = OpTypePointer CrossWorkgroup %[[#Int8]] +; CHECK-DAG: %[[#Ptr_Generic_32:]] = OpTypePointer Generic %[[#Int32]] +; CHECK-DAG: %[[#Const_64:]] = OpConstant %[[#Int32]] 64 +; CHECK-DAG: %[[#Const_36:]] = OpConstant %[[#Int32]] 36 +; CHECK-DAG: %[[#Const_30:]] = OpConstant %[[#Int32]] 30 +; CHECK-DAG: %[[#Const_32_64:]] = OpConstant %[[#Int64]] 32 + +; CHECK: %[[#Param1:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#Param2:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#Size1:]] = OpUConvert %[[#Int64]] %[[#Const_64]] +; CHECK: OpCopyMemorySized %[[#Param2]] %[[#Param1]] %[[#Size1]] Aligned 64 + +; CHECK: %[[#Src:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#CastDst2:]] = OpGenericCastToPtr %[[#Ptr_CrossWG_8]] %[[#GenPtr:]] +; CHECK: %[[#Size2:]] = OpUConvert %[[#Int64]] %[[#Const_36]] +; CHECK: OpCopyMemorySized %[[#CastDst2]] %[[#Src]] %[[#Size2]] Aligned 64 + +; CHECK: %[[#Param1:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#Param2:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#Size3:]] = OpUConvert %[[#Int64]] %[[#Const_30]] +; CHECK: OpCopyMemorySized %[[#Param2]] %[[#Param1]] %[[#Size3]] Aligned 1 + +; CHECK: %[[#Phi:]] = OpPhi %[[#Ptr_Generic_32]] %[[#Op1:]] %[[#Lbl1:]] %[[#Op2:]] %[[#Lbl2:]] +; CHECK: %[[#Cast:]] = OpPtrCastToGeneric %[[#]] %[[#]] +; CHECK: OpCopyMemorySized %[[#Cast]] %[[#Phi]] %[[#Const_32_64]] Aligned 8 + +%struct.SomeStruct = type { <16 x float>, i32, [60 x i8] } +%class.kfunc = type <{ i32, i32, i32, [4 x i8] }> + +@InvocIndex = external local_unnamed_addr addrspace(1) constant i64, align 8 +@"func_object1" = internal addrspace(3) global %class.kfunc zeroinitializer, align 8 + +define spir_kernel void @test_full_move(%struct.SomeStruct addrspace(1)* captures(none) readonly %in, %struct.SomeStruct addrspace(1)* captures(none) %out) { + %1 = bitcast %struct.SomeStruct addrspace(1)* %in to i8 addrspace(1)* + %2 = bitcast %struct.SomeStruct addrspace(1)* %out to i8 addrspace(1)* + call void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* align 64 %2, i8 addrspace(1)* align 64 %1, i32 64, i1 false) + ret void +} + +define spir_kernel void @test_partial_move(%struct.SomeStruct addrspace(1)* captures(none) readonly %in, %struct.SomeStruct addrspace(4)* captures(none) %out) { + %1 = bitcast %struct.SomeStruct addrspace(1)* %in to i8 addrspace(1)* + %2 = bitcast %struct.SomeStruct addrspace(4)* %out to i8 addrspace(4)* + %3 = addrspacecast i8 addrspace(4)* %2 to i8 addrspace(1)* + call void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* align 64 %3, i8 addrspace(1)* align 64 %1, i32 36, i1 false) + ret void +} + +define spir_kernel void @test_array(i8 addrspace(1)* %in, i8 addrspace(1)* %out) { + call void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* %out, i8 addrspace(1)* %in, i32 30, i1 false) + ret void +} + +define weak_odr dso_local spir_kernel void @test_phi() local_unnamed_addr { +entry: + %0 = alloca i32, align 8 + %1 = addrspacecast i32* %0 to i32 addrspace(4)* + %2 = load i64, i64 addrspace(1)* @InvocIndex, align 8 + %cmp = icmp eq i64 %2, 0 + br i1 %cmp, label %leader, label %entry.merge_crit_edge + +entry.merge_crit_edge: ; preds = %entry + %3 = bitcast i32 addrspace(4)* %1 to i8 addrspace(4)* + br label %merge + +leader: ; preds = %entry + %4 = bitcast i32 addrspace(4)* %1 to i8 addrspace(4)* + br label %merge + +merge: ; preds = %entry.merge_crit_edge, %leader + %phi = phi i8 addrspace(4)* [ %3, %entry.merge_crit_edge ], [ %4, %leader ] + %5 = addrspacecast i8 addrspace(3)* bitcast (%class.kfunc addrspace(3)* @"func_object1" to i8 addrspace(3)*) to i8 addrspace(4)* + call void @llvm.memmove.p4i8.p4i8.i64(i8 addrspace(4)* align 8 dereferenceable(32) %5, i8 addrspace(4)* align 8 dereferenceable(32) %phi, i64 32, i1 false) + ret void +} + +declare void @llvm.memmove.p4i8.p4i8.i64(i8 addrspace(4)* captures(none) writeonly, i8 addrspace(4)* captures(none) readonly, i64, i1 immarg) + +declare void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* captures(none), i8 addrspace(1)* captures(none) readonly, i32, i1) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/signed_arithmetic_overflow.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/signed_arithmetic_overflow.ll new file mode 100644 index 0000000..52f939f --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/signed_arithmetic_overflow.ll @@ -0,0 +1,30 @@ +; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -filetype=obj -o - | spirv-val %} +; XFAIL: * +;@llvm.sadd.with.overflow and @llvm.ssub.with.overflow has not been implemented. + +define spir_func void @test_sadd_overflow(ptr %out_result, ptr %out_overflow, i32 %a, i32 %b) { +entry: + %res = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) + %val = extractvalue { i32, i1 } %res, 0 + %ofl = extractvalue { i32, i1 } %res, 1 + store i32 %val, ptr %out_result + %zext_ofl = zext i1 %ofl to i8 + store i8 %zext_ofl, ptr %out_overflow + ret void +} + +declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) + +define spir_func void @test_ssub_overflow(ptr %out_result, ptr %out_overflow, i32 %a, i32 %b) { +entry: + %res = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 %b) + %val = extractvalue { i32, i1 } %res, 0 + %ofl = extractvalue { i32, i1 } %res, 1 + store i32 %val, ptr %out_result + %zext_ofl = zext i1 %ofl to i8 + store i8 %zext_ofl, ptr %out_overflow + ret void +} + +declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) diff --git a/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll b/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll index e405ef0..5e66b8b6 100644 --- a/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll +++ b/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll @@ -7,10 +7,11 @@ ;; ;; Positive tests: ;; -; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-NEGATIVE +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV ;; ;; Negative tests: ;; +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV-NEGATIVE ;; Check that backend is able to skip nsw/nuw attributes if extension is ;; disabled implicitly or explicitly and if max SPIR-V version is lower then 1.4 diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpVariable_Initializer.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpVariable_Initializer.ll new file mode 100644 index 0000000..c8953c7 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/OpVariable_Initializer.ll @@ -0,0 +1,11 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-SPIRV: [[#PtrT:]] = OpTypePointer Workgroup %[[#]] +; CHECK-SPIRV: %[[#]] = OpVariable %[[#PtrT]] Workgroup + +@test_atomic_fn.L = internal addrspace(3) global [64 x i32] zeroinitializer, align 4 + +define spir_kernel void @test_atomic_fn() { + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_pipe.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_pipe.ll new file mode 100644 index 0000000..607997d --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_pipe.ll @@ -0,0 +1,140 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: OpCapability Kernel +; CHECK: OpCapability Addresses +; CHECK: OpCapability Pipes +; CHECK: OpCapability Int8 +; CHECK: OpCapability GenericPointer + +; CHECK-DAG: %[[#PipeWriteTy:]] = OpTypePipe WriteOnly +; CHECK-DAG: %[[#PipeReadTy:]] = OpTypePipe ReadOnly +; CHECK-DAG: %[[#ReserveIdTy:]] = OpTypeReserveId +; CHECK-DAG: %[[#BoolTy:]] = OpTypeBool +; CHECK-DAG: %[[#Int32Ty:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#Uint1:]] = OpConstant %[[#Int32Ty]] 1 +; CHECK-DAG: %[[#Uint2:]] = OpConstant %[[#Int32Ty]] 2 +; CHECK-DAG: %[[#Uint3:]] = OpConstant %[[#Int32Ty]] 3 +; CHECK-DAG: %[[#Uint4:]] = OpConstant %[[#Int32Ty]] 4 +; CHECK-DAG: %[[#NullUint:]] = OpConstantNull %[[#Int32Ty]] + +; CHECK: OpFunction +; CHECK: %[[#FuncParam1:]] = OpFunctionParameter %[[#PipeWriteTy]] +; CHECK: %[[#FuncParam2:]] = OpFunctionParameter %[[#PipeReadTy]] + +; CHECK: %[[#BasicWriteReserve:]] = OpReserveWritePipePackets %[[#ReserveIdTy]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpWritePipe %[[#Int32Ty]] %[[#FuncParam1]] %[[#]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpCommitWritePipe %[[#FuncParam1]] %[[#BasicWriteReserve]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#BasicReadReserve:]] = OpReserveReadPipePackets %[[#ReserveIdTy]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpReadPipe %[[#Int32Ty]] %[[#FuncParam2]] %[[#]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpCommitReadPipe %[[#FuncParam2]] %[[#BasicReadReserve]] %[[#Uint4]] %[[#Uint4]] + +; --- Reserved pipe operations --- +; CHECK: %[[#ReservedWriteReserve:]] = OpReserveWritePipePackets %[[#ReserveIdTy]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#ReservedWrite:]] = OpReservedWritePipe %[[#Int32Ty]] %[[#FuncParam1]] %[[#ReservedWriteReserve]] %[[#NullUint]] %[[#]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#IsValidWrite:]] = OpIsValidReserveId %[[#BoolTy]] %[[#ReservedWriteReserve]] +; CHECK: OpCommitWritePipe %[[#FuncParam1]] %[[#ReservedWriteReserve]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#ReservedReadReserve:]] = OpReserveReadPipePackets %[[#ReserveIdTy]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#ReservedRead:]] = OpReservedReadPipe %[[#Int32Ty]] %[[#FuncParam2]] %[[#ReservedReadReserve]] %[[#NullUint]] %[[#]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#IsValidRead:]] = OpIsValidReserveId %[[#BoolTy]] %[[#ReservedReadReserve]] +; CHECK: OpCommitReadPipe %[[#FuncParam2]] %[[#ReservedReadReserve]] %[[#Uint4]] %[[#Uint4]] + +; --- Pipe packet queries --- +; CHECK: %[[#MaxPacketsWO:]] = OpGetMaxPipePackets %[[#Int32Ty]] %[[#FuncParam1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpStore %[[#]] %[[#MaxPacketsWO]] Aligned 4 +; CHECK: %[[#NumPacketsWO:]] = OpGetNumPipePackets %[[#Int32Ty]] %[[#FuncParam1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpStore %[[#]] %[[#NumPacketsWO]] Aligned 4 +; CHECK: %[[#MaxPacketsRO:]] = OpGetMaxPipePackets %[[#Int32Ty]] %[[#FuncParam2]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpStore %[[#]] %[[#MaxPacketsRO]] Aligned 4 +; CHECK: %[[#NumPacketsRO:]] = OpGetNumPipePackets %[[#Int32Ty]] %[[#FuncParam2]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpStore %[[#]] %[[#NumPacketsRO]] Aligned 4 + +; --- Workgroup operations --- +; CHECK: %[[#WorkgroupWriteReserve:]] = OpGroupReserveWritePipePackets %[[#ReserveIdTy]] %[[#Uint2]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint1]] %[[#Uint1]] +; CHECK: OpGroupCommitWritePipe %[[#Uint2]] %[[#FuncParam1]] %[[#WorkgroupWriteReserve]] %[[#Uint1]] %[[#Uint1]] +; CHECK: %[[#WorkgroupReadReserve:]] = OpGroupReserveReadPipePackets %[[#ReserveIdTy]] %[[#Uint2]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint1]] %[[#Uint1]] +; CHECK: OpGroupCommitReadPipe %[[#Uint2]] %[[#FuncParam2]] %[[#WorkgroupReadReserve]] %[[#Uint1]] %[[#Uint1]] + +; --- Subgroup operations --- +; CHECK: %[[#SubgroupWriteReserve:]] = OpGroupReserveWritePipePackets %[[#ReserveIdTy]] %[[#Uint3]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpGroupCommitWritePipe %[[#Uint3]] %[[#FuncParam1]] %[[#SubgroupWriteReserve]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#SubgroupReadReserve:]] = OpGroupReserveReadPipePackets %[[#ReserveIdTy]] %[[#Uint3]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpGroupCommitReadPipe %[[#Uint3]] %[[#FuncParam2]] %[[#SubgroupReadReserve]] %[[#Uint4]] %[[#Uint4]] + +define spir_kernel void @test_pipe_builtins( + target("spirv.Pipe", 1) %out_pipe, + target("spirv.Pipe", 0) %in_pipe, + ptr addrspace(4) %src, + ptr addrspace(4) %dst, + ptr addrspace(1) %max_packets_wo, + ptr addrspace(1) %num_packets_wo, + ptr addrspace(1) %max_packets_ro, + ptr addrspace(1) %num_packets_ro +) { +entry: + ; Basic pipe operations + %0 = call spir_func target("spirv.ReserveId") @__reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 4, i32 4) + %1 = call spir_func i32 @__write_pipe_2(target("spirv.Pipe", 1) %out_pipe, ptr addrspace(4) %src, i32 4, i32 4) + call spir_func void @__commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %0, i32 4, i32 4) + + %2 = call spir_func target("spirv.ReserveId") @__reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 4, i32 4) + %3 = call spir_func i32 @__read_pipe_2(target("spirv.Pipe", 0) %in_pipe, ptr addrspace(4) %dst, i32 4, i32 4) + call spir_func void @__commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %2, i32 4, i32 4) + + ; Reserved pipe operations + %4 = call spir_func target("spirv.ReserveId") @__reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 4, i32 4) + %5 = call spir_func i32 @__write_pipe_4(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %4, i32 0, ptr addrspace(4) %src, i32 4, i32 4) + %6 = call spir_func i1 @_Z19is_valid_reserve_id13ocl_reserveid(target("spirv.ReserveId") %4) + call spir_func void @__commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %4, i32 4, i32 4) + + %7 = call spir_func target("spirv.ReserveId") @__reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 4, i32 4) + %8 = call spir_func i32 @__read_pipe_4(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %7, i32 0, ptr addrspace(4) %dst, i32 4, i32 4) + %9 = call spir_func i1 @_Z19is_valid_reserve_id13ocl_reserveid(target("spirv.ReserveId") %7) + call spir_func void @__commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %7, i32 4, i32 4) + + ; Pipe packet queries + %10 = call spir_func i32 @__get_pipe_max_packets_wo(target("spirv.Pipe", 1) %out_pipe, i32 4, i32 4) + store i32 %10, ptr addrspace(1) %max_packets_wo, align 4 + %11 = call spir_func i32 @__get_pipe_num_packets_wo(target("spirv.Pipe", 1) %out_pipe, i32 4, i32 4) + store i32 %11, ptr addrspace(1) %num_packets_wo, align 4 + %12 = call spir_func i32 @__get_pipe_max_packets_ro(target("spirv.Pipe", 0) %in_pipe, i32 4, i32 4) + store i32 %12, ptr addrspace(1) %max_packets_ro, align 4 + %13 = call spir_func i32 @__get_pipe_num_packets_ro(target("spirv.Pipe", 0) %in_pipe, i32 4, i32 4) + store i32 %13, ptr addrspace(1) %num_packets_ro, align 4 + + ; Workgroup operations + %14 = call spir_func target("spirv.ReserveId") @__work_group_reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 1, i32 1) + call spir_func void @__work_group_commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %14, i32 1, i32 1) + %15 = call spir_func target("spirv.ReserveId") @__work_group_reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 1, i32 1) + call spir_func void @__work_group_commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %15, i32 1, i32 1) + + ; Subgroup operations + %16 = call spir_func target("spirv.ReserveId") @__sub_group_reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 4, i32 4) + call spir_func void @__sub_group_commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %16, i32 4, i32 4) + %17 = call spir_func target("spirv.ReserveId") @__sub_group_reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 4, i32 4) + call spir_func void @__sub_group_commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %17, i32 4, i32 4) + + ret void +} + +declare spir_func target("spirv.ReserveId") @__reserve_write_pipe(target("spirv.Pipe", 1), i32, i32, i32) +declare spir_func target("spirv.ReserveId") @__reserve_read_pipe(target("spirv.Pipe", 0), i32, i32, i32) +declare spir_func i32 @__write_pipe_2(target("spirv.Pipe", 1), ptr addrspace(4), i32, i32) +declare spir_func i32 @__read_pipe_2(target("spirv.Pipe", 0), ptr addrspace(4), i32, i32) +declare spir_func i32 @__write_pipe_4(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, ptr addrspace(4), i32, i32) +declare spir_func i32 @__read_pipe_4(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, ptr addrspace(4), i32, i32) +declare spir_func void @__commit_write_pipe(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, i32) +declare spir_func void @__commit_read_pipe(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, i32) +declare spir_func i1 @_Z19is_valid_reserve_id13ocl_reserveid(target("spirv.ReserveId")) +declare spir_func i32 @__get_pipe_max_packets_wo(target("spirv.Pipe", 1), i32, i32) +declare spir_func i32 @__get_pipe_num_packets_wo(target("spirv.Pipe", 1), i32, i32) +declare spir_func i32 @__get_pipe_max_packets_ro(target("spirv.Pipe", 0), i32, i32) +declare spir_func i32 @__get_pipe_num_packets_ro(target("spirv.Pipe", 0), i32, i32) +declare spir_func target("spirv.ReserveId") @__work_group_reserve_write_pipe(target("spirv.Pipe", 1), i32, i32, i32) +declare spir_func void @__work_group_commit_write_pipe(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, i32) +declare spir_func target("spirv.ReserveId") @__work_group_reserve_read_pipe(target("spirv.Pipe", 0), i32, i32, i32) +declare spir_func void @__work_group_commit_read_pipe(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, i32) +declare spir_func target("spirv.ReserveId") @__sub_group_reserve_write_pipe(target("spirv.Pipe", 1), i32, i32, i32) +declare spir_func void @__sub_group_commit_write_pipe(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, i32) +declare spir_func target("spirv.ReserveId") @__sub_group_reserve_read_pipe(target("spirv.Pipe", 0), i32, i32, i32) +declare spir_func void @__sub_group_commit_read_pipe(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, i32) diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_gep.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_gep.ll new file mode 100644 index 0000000..4c64a12 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_gep.ll @@ -0,0 +1,16 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: OpDecorate %[[#Id:]] BuiltIn GlobalInvocationId +; CHECK: %[[#Id]] = OpVariable %[[#]] CrossWorkgroup + +@__spirv_BuiltInGlobalInvocationId = external dso_local local_unnamed_addr addrspace(1) constant <3 x i64>, align 32 + +define spir_kernel void @f() { +entry: + %0 = load i64, ptr addrspace(1) @__spirv_BuiltInGlobalInvocationId, align 32 + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/transcoding/decoration-forward-decl.ll b/llvm/test/CodeGen/SPIRV/transcoding/decoration-forward-decl.ll new file mode 100644 index 0000000..74ce26b --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/decoration-forward-decl.ll @@ -0,0 +1,30 @@ +; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; Check saturation conversion is translated when there is forward declaration +; of SPIRV entry. + +; CHECK: OpDecorate %[[#SAT:]] SaturatedConversion +; CHECK: %[[#SAT]] = OpConvertFToU %[[#]] %[[#]] + +declare spir_func zeroext i8 @_Z30__spirv_ConvertFToU_Ruchar_satf(float) + +define spir_func void @forward(float %val, i8 %initval, ptr addrspace(1) %dst) { +entry: + br label %for.cond + +for.cond: ; preds = %for.body, %entry + %new_val.0 = phi i8 [ %initval, %entry ], [ %call1, %for.body ] + %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ] + %cmp = icmp ult i32 %i.0, 1 + br i1 %cmp, label %for.body, label %for.end + +for.body: ; preds = %for.cond + %call1 = call spir_func zeroext i8 @_Z30__spirv_ConvertFToU_Ruchar_satf(float noundef %val) + %inc = add i32 %i.0, 1 + br label %for.cond + +for.end: ; preds = %for.cond + store i8 %new_val.0, ptr addrspace(1) %dst, align 1 + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/transcoding/float16.ll b/llvm/test/CodeGen/SPIRV/transcoding/float16.ll new file mode 100644 index 0000000..0018dba --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/float16.ll @@ -0,0 +1,25 @@ +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-SPIRV: %[[#HALF:]] = OpTypeFloat 16 +; CHECK-SPIRV: %[[#HALFPTR:]] = OpTypePointer Function %[[#HALF]] +; CHECK-SPIRV: %[[#HALFV2:]] = OpTypeVector %[[#HALF]] 2 +; CHECK-SPIRV: %[[#HALFV2PTR:]] = OpTypePointer Function %[[#HALFV2]] +; CHECK-SPIRV: %[[#CONST:]] = OpConstant %[[#HALF]] 14788 +; CHECK-SPIRV: %[[#ADDR:]] = OpVariable %[[#HALFPTR]] Function +; CHECK-SPIRV: %[[#ADDR2:]] = OpVariable %[[#HALFV2PTR]] Function +; CHECK-SPIRV: %[[#]] = OpExtInst %[[#HALF]] %[[#]] fract %[[#CONST]] %[[#ADDR]] +; CHECK-SPIRV: %[[#]] = OpExtInst %[[#HALFV2]] %[[#]] fract %[[#]] %[[#ADDR2]] + +define spir_kernel void @test() { +entry: + %addr = alloca half + %addr2 = alloca <2 x half> + %res = call spir_func noundef half @_Z17__spirv_ocl_fractDF16_PU3AS0DF16_(half noundef 0xH39C4, ptr noundef %addr) + %res2 = call spir_func noundef <2 x half> @_Z17__spirv_ocl_fractDv2_DF16_PU3AS0S_(<2 x half> noundef <half 0xH39C4, half 0xH0000>, ptr noundef %addr2) + ret void +} + +declare spir_func noundef half @_Z17__spirv_ocl_fractDF16_PU3AS0DF16_(half noundef, ptr noundef) local_unnamed_addr + +declare spir_func noundef <2 x half> @_Z17__spirv_ocl_fractDv2_DF16_PU3AS0S_(<2 x half> noundef, ptr noundef) local_unnamed_addr diff --git a/llvm/test/CodeGen/VE/Scalar/max.ll b/llvm/test/CodeGen/VE/Scalar/max.ll index 51da557..7950842 100644 --- a/llvm/test/CodeGen/VE/Scalar/max.ll +++ b/llvm/test/CodeGen/VE/Scalar/max.ll @@ -1,7 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s -; RUN: llc < %s -mtriple=ve-unknown-unknown -enable-no-signed-zeros-fp-math \ -; RUN: -enable-no-nans-fp-math | FileCheck %s -check-prefix=OPT define double @maxf64(double, double) { ; CHECK-LABEL: maxf64: @@ -10,16 +8,21 @@ define double @maxf64(double, double) { ; CHECK-NEXT: cmov.d.gt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxf64: -; OPT: # %bb.0: -; OPT-NEXT: fmax.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ogt double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @maxf64_fast(double, double) { +; CHECK-LABEL: maxf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ogt double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define double @max2f64(double, double) { ; CHECK-LABEL: max2f64: ; CHECK: # %bb.0: @@ -27,16 +30,21 @@ define double @max2f64(double, double) { ; CHECK-NEXT: cmov.d.ge %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2f64: -; OPT: # %bb.0: -; OPT-NEXT: fmax.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp oge double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @max2f64_fast(double, double) { +; CHECK-LABEL: max2f64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp oge double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + ; VE has no max for unordered comparison define double @maxuf64(double, double) { ; CHECK-LABEL: maxuf64: @@ -45,16 +53,21 @@ define double @maxuf64(double, double) { ; CHECK-NEXT: cmov.d.gtnan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxuf64: -; OPT: # %bb.0: -; OPT-NEXT: fmax.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ugt double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @maxuf64_fast(double, double) { +; CHECK-LABEL: maxuf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ugt double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + ; VE has no max for unordered comparison define double @max2uf64(double, double) { ; CHECK-LABEL: max2uf64: @@ -63,16 +76,21 @@ define double @max2uf64(double, double) { ; CHECK-NEXT: cmov.d.genan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2uf64: -; OPT: # %bb.0: -; OPT-NEXT: fmax.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp uge double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @max2uf64_fast(double, double) { +; CHECK-LABEL: max2uf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp uge double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define float @maxf32(float, float) { ; CHECK-LABEL: maxf32: ; CHECK: # %bb.0: @@ -80,16 +98,21 @@ define float @maxf32(float, float) { ; CHECK-NEXT: cmov.s.gt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxf32: -; OPT: # %bb.0: -; OPT-NEXT: fmax.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ogt float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @maxf32_fast(float, float) { +; CHECK-LABEL: maxf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ogt float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @max2f32(float, float) { ; CHECK-LABEL: max2f32: ; CHECK: # %bb.0: @@ -97,16 +120,21 @@ define float @max2f32(float, float) { ; CHECK-NEXT: cmov.s.ge %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2f32: -; OPT: # %bb.0: -; OPT-NEXT: fmax.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp oge float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @max2f32_fast(float, float) { +; CHECK-LABEL: max2f32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp oge float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @maxuf32(float, float) { ; CHECK-LABEL: maxuf32: ; CHECK: # %bb.0: @@ -114,16 +142,21 @@ define float @maxuf32(float, float) { ; CHECK-NEXT: cmov.s.gtnan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxuf32: -; OPT: # %bb.0: -; OPT-NEXT: fmax.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ugt float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @maxuf32_fast(float, float) { +; CHECK-LABEL: maxuf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ugt float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @max2uf32(float, float) { ; CHECK-LABEL: max2uf32: ; CHECK: # %bb.0: @@ -131,26 +164,26 @@ define float @max2uf32(float, float) { ; CHECK-NEXT: cmov.s.genan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2uf32: -; OPT: # %bb.0: -; OPT-NEXT: fmax.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp uge float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @max2uf32_fast(float, float) { +; CHECK-LABEL: max2uf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp uge float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define i64 @maxi64(i64, i64) { ; CHECK-LABEL: maxi64: ; CHECK: # %bb.0: ; CHECK-NEXT: maxs.l %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxi64: -; OPT: # %bb.0: -; OPT-NEXT: maxs.l %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sgt i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -161,11 +194,6 @@ define i64 @max2i64(i64, i64) { ; CHECK: # %bb.0: ; CHECK-NEXT: maxs.l %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2i64: -; OPT: # %bb.0: -; OPT-NEXT: maxs.l %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sge i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -178,13 +206,6 @@ define i64 @maxu64(i64, i64) { ; CHECK-NEXT: cmov.l.gt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxu64: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.l %s2, %s0, %s1 -; OPT-NEXT: cmov.l.gt %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ugt i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -197,13 +218,6 @@ define i64 @max2u64(i64, i64) { ; CHECK-NEXT: cmov.l.ge %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2u64: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.l %s2, %s0, %s1 -; OPT-NEXT: cmov.l.ge %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp uge i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -214,11 +228,6 @@ define i32 @maxi32(i32, i32) { ; CHECK: # %bb.0: ; CHECK-NEXT: maxs.w.sx %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxi32: -; OPT: # %bb.0: -; OPT-NEXT: maxs.w.sx %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sgt i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -229,11 +238,6 @@ define i32 @max2i32(i32, i32) { ; CHECK: # %bb.0: ; CHECK-NEXT: maxs.w.sx %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2i32: -; OPT: # %bb.0: -; OPT-NEXT: maxs.w.sx %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sge i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -246,13 +250,6 @@ define i32 @maxu32(i32, i32) { ; CHECK-NEXT: cmov.w.gt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxu32: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.w %s2, %s0, %s1 -; OPT-NEXT: cmov.w.gt %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ugt i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -265,13 +262,6 @@ define i32 @max2u32(i32, i32) { ; CHECK-NEXT: cmov.w.ge %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2u32: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.w %s2, %s0, %s1 -; OPT-NEXT: cmov.w.ge %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp uge i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -283,12 +273,6 @@ define zeroext i1 @maxi1(i1 zeroext, i1 zeroext) { ; CHECK-NEXT: or %s0, %s0, %s1 ; CHECK-NEXT: and %s0, 1, %s0 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxi1: -; OPT: # %bb.0: -; OPT-NEXT: or %s0, %s0, %s1 -; OPT-NEXT: and %s0, 1, %s0 -; OPT-NEXT: b.l.t (, %s10) %3 = xor i1 %1, true %4 = and i1 %3, %0 %5 = select i1 %4, i1 %0, i1 %1 diff --git a/llvm/test/CodeGen/VE/Scalar/min.ll b/llvm/test/CodeGen/VE/Scalar/min.ll index e8f4939..36a2e06 100644 --- a/llvm/test/CodeGen/VE/Scalar/min.ll +++ b/llvm/test/CodeGen/VE/Scalar/min.ll @@ -1,7 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s -; RUN: llc < %s -mtriple=ve-unknown-unknown -enable-no-signed-zeros-fp-math \ -; RUN: -enable-no-nans-fp-math | FileCheck %s -check-prefix=OPT define double @minf64(double, double) { ; CHECK-LABEL: minf64: @@ -10,16 +8,21 @@ define double @minf64(double, double) { ; CHECK-NEXT: cmov.d.lt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minf64: -; OPT: # %bb.0: -; OPT-NEXT: fmin.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp olt double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @minf64_fast(double, double) { +; CHECK-LABEL: minf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp olt double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define double @min2f64(double, double) { ; CHECK-LABEL: min2f64: ; CHECK: # %bb.0: @@ -27,16 +30,21 @@ define double @min2f64(double, double) { ; CHECK-NEXT: cmov.d.le %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2f64: -; OPT: # %bb.0: -; OPT-NEXT: fmin.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ole double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @min2f64_fast(double, double) { +; CHECK-LABEL: min2f64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ole double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define double @minuf64(double, double) { ; CHECK-LABEL: minuf64: ; CHECK: # %bb.0: @@ -44,16 +52,21 @@ define double @minuf64(double, double) { ; CHECK-NEXT: cmov.d.ltnan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minuf64: -; OPT: # %bb.0: -; OPT-NEXT: fmin.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ult double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @minuf64_fast(double, double) { +; CHECK-LABEL: minuf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ult double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define double @min2uf64(double, double) { ; CHECK-LABEL: min2uf64: ; CHECK: # %bb.0: @@ -61,16 +74,21 @@ define double @min2uf64(double, double) { ; CHECK-NEXT: cmov.d.lenan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2uf64: -; OPT: # %bb.0: -; OPT-NEXT: fmin.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ule double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @min2uf64_fast(double, double) { +; CHECK-LABEL: min2uf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ule double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define float @minf32(float, float) { ; CHECK-LABEL: minf32: ; CHECK: # %bb.0: @@ -78,16 +96,21 @@ define float @minf32(float, float) { ; CHECK-NEXT: cmov.s.lt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minf32: -; OPT: # %bb.0: -; OPT-NEXT: fmin.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp olt float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @minf32_fast(float, float) { +; CHECK-LABEL: minf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp olt float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @min2f32(float, float) { ; CHECK-LABEL: min2f32: ; CHECK: # %bb.0: @@ -95,16 +118,21 @@ define float @min2f32(float, float) { ; CHECK-NEXT: cmov.s.le %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2f32: -; OPT: # %bb.0: -; OPT-NEXT: fmin.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ole float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @min2f32_fast(float, float) { +; CHECK-LABEL: min2f32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ole float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @minuf32(float, float) { ; CHECK-LABEL: minuf32: ; CHECK: # %bb.0: @@ -112,16 +140,21 @@ define float @minuf32(float, float) { ; CHECK-NEXT: cmov.s.ltnan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minuf32: -; OPT: # %bb.0: -; OPT-NEXT: fmin.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ult float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @minuf32_fast(float, float) { +; CHECK-LABEL: minuf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ult float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @min2uf32(float, float) { ; CHECK-LABEL: min2uf32: ; CHECK: # %bb.0: @@ -129,26 +162,26 @@ define float @min2uf32(float, float) { ; CHECK-NEXT: cmov.s.lenan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2uf32: -; OPT: # %bb.0: -; OPT-NEXT: fmin.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ule float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @min2uf32_fast(float, float) { +; CHECK-LABEL: min2uf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ule float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define i64 @mini64(i64, i64) { ; CHECK-LABEL: mini64: ; CHECK: # %bb.0: ; CHECK-NEXT: mins.l %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: mini64: -; OPT: # %bb.0: -; OPT-NEXT: mins.l %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp slt i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -159,11 +192,6 @@ define i64 @min2i64(i64, i64) { ; CHECK: # %bb.0: ; CHECK-NEXT: mins.l %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2i64: -; OPT: # %bb.0: -; OPT-NEXT: mins.l %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sle i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -176,13 +204,6 @@ define i64 @minu64(i64, i64) { ; CHECK-NEXT: cmov.l.lt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minu64: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.l %s2, %s0, %s1 -; OPT-NEXT: cmov.l.lt %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ult i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -195,13 +216,6 @@ define i64 @min2u64(i64, i64) { ; CHECK-NEXT: cmov.l.le %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2u64: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.l %s2, %s0, %s1 -; OPT-NEXT: cmov.l.le %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ule i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -212,11 +226,6 @@ define i32 @mini32(i32, i32) { ; CHECK: # %bb.0: ; CHECK-NEXT: mins.w.sx %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: mini32: -; OPT: # %bb.0: -; OPT-NEXT: mins.w.sx %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp slt i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -227,11 +236,6 @@ define i32 @min2i32(i32, i32) { ; CHECK: # %bb.0: ; CHECK-NEXT: mins.w.sx %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2i32: -; OPT: # %bb.0: -; OPT-NEXT: mins.w.sx %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sle i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -244,13 +248,6 @@ define i32 @minu32(i32, i32) { ; CHECK-NEXT: cmov.w.lt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minu32: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.w %s2, %s0, %s1 -; OPT-NEXT: cmov.w.lt %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ult i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -263,13 +260,6 @@ define i32 @min2u32(i32, i32) { ; CHECK-NEXT: cmov.w.le %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2u32: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.w %s2, %s0, %s1 -; OPT-NEXT: cmov.w.le %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ule i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -283,14 +273,6 @@ define zeroext i1 @mini1(i1 zeroext, i1 zeroext) { ; CHECK-NEXT: cmov.w.ne %s0, %s1, %s2 ; CHECK-NEXT: adds.w.zx %s0, %s0, (0)1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: mini1: -; OPT: # %bb.0: -; OPT-NEXT: and %s2, 1, %s0 -; OPT-NEXT: and %s0, %s1, %s0 -; OPT-NEXT: cmov.w.ne %s0, %s1, %s2 -; OPT-NEXT: adds.w.zx %s0, %s0, (0)1 -; OPT-NEXT: b.l.t (, %s10) %3 = xor i1 %0, true %4 = and i1 %3, %1 %5 = select i1 %4, i1 %0, i1 %1 diff --git a/llvm/test/CodeGen/WebAssembly/partial-reduce-accumulate.ll b/llvm/test/CodeGen/WebAssembly/partial-reduce-accumulate.ll index 47ea762..a599f46 100644 --- a/llvm/test/CodeGen/WebAssembly/partial-reduce-accumulate.ll +++ b/llvm/test/CodeGen/WebAssembly/partial-reduce-accumulate.ll @@ -19,11 +19,11 @@ define hidden i32 @accumulate_add_u8_u8(ptr noundef readonly %a, ptr noundef re ; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_u ; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_u -; MAX-BANDWIDTH: i32x4.add ; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_u ; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_u ; MAX-BANDWIDTH: i32x4.add +; MAX-BANDWIDTH: i32x4.add entry: %cmp8.not = icmp eq i32 %N, 0 @@ -65,11 +65,11 @@ define hidden i32 @accumulate_add_s8_s8(ptr noundef readonly %a, ptr noundef re ; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_s ; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_s -; MAX-BANDWIDTH: i32x4.add ; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_s ; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_s ; MAX-BANDWIDTH: i32x4.add +; MAX-BANDWIDTH: i32x4.add entry: %cmp8.not = icmp eq i32 %N, 0 br i1 %cmp8.not, label %for.cond.cleanup, label %for.body @@ -108,12 +108,11 @@ define hidden i32 @accumulate_add_s8_u8(ptr noundef readonly %a, ptr noundef re ; MAX-BANDWIDTH: loop ; MAX-BANDWIDTH: v128.load -; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_s -; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_s -; MAX-BANDWIDTH: i32x4.add -; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_u ; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_u +; MAX-BANDWIDTH: v128.load +; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_s +; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_s ; MAX-BANDWIDTH: i32x4.add entry: %cmp8.not = icmp eq i32 %N, 0 @@ -363,10 +362,10 @@ define hidden i32 @accumulate_add_u16_u16(ptr noundef readonly %a, ptr noundef ; MAX-BANDWIDTH: loop ; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_u -; MAX-BANDWIDTH: i32x4.add ; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_u ; MAX-BANDWIDTH: i32x4.add +; MAX-BANDWIDTH: i32x4.add entry: %cmp8.not = icmp eq i32 %N, 0 br i1 %cmp8.not, label %for.cond.cleanup, label %for.body @@ -402,10 +401,10 @@ define hidden i32 @accumulate_add_s16_s16(ptr noundef readonly %a, ptr noundef ; MAX-BANDWIDTH: loop ; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_s -; MAX-BANDWIDTH: i32x4.add ; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_s ; MAX-BANDWIDTH: i32x4.add +; MAX-BANDWIDTH: i32x4.add entry: %cmp8.not = icmp eq i32 %N, 0 br i1 %cmp8.not, label %for.cond.cleanup, label %for.body diff --git a/llvm/test/CodeGen/X86/apx/ndd-neg-addr-index.ll b/llvm/test/CodeGen/X86/apx/ndd-neg-addr-index.ll index 6679b5f5..41fa346 100644 --- a/llvm/test/CodeGen/X86/apx/ndd-neg-addr-index.ll +++ b/llvm/test/CodeGen/X86/apx/ndd-neg-addr-index.ll @@ -8,7 +8,7 @@ define void @neg_8bit_1(i1 %cmp) { ; NDD-NEXT: andb $1, %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x80,0xe7,0x01] ; NDD-NEXT: movzbl 0, %ecx # encoding: [0x0f,0xb6,0x0c,0x25,0x00,0x00,0x00,0x00] ; NDD-NEXT: negb %al, %al # encoding: [0x62,0xf4,0x7c,0x18,0xf6,0xd8] -; NDD-NEXT: leab 2(%rcx,%rax), %al # encoding: [0x66,0x8d,0x44,0x01,0x02] +; NDD-NEXT: leal 2(%rcx,%rax), %eax # encoding: [0x8d,0x44,0x01,0x02] ; NDD-NEXT: movb %al, 0 # encoding: [0x88,0x04,0x25,0x00,0x00,0x00,0x00] ; NDD-NEXT: retq # encoding: [0xc3] entry: @@ -25,7 +25,8 @@ define void @neg_8bit_2(i8 %int8) { ; NDD-NEXT: # kill: def $edi killed $edi def $rdi ; NDD-NEXT: addb %dil, %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x00,0xff] ; NDD-NEXT: negb %al, %al # encoding: [0x62,0xf4,0x7c,0x18,0xf6,0xd8] -; NDD-NEXT: leab 1(%rdi,%rax), %al # encoding: [0x66,0x8d,0x44,0x07,0x01] +; NDD-NEXT: leal 1(%rdi,%rax), %eax # encoding: [0x8d,0x44,0x07,0x01] +; NDD-NEXT: # kill: def $al killed $al killed $eax ; NDD-NEXT: mulb %dil # encoding: [0x40,0xf6,0xe7] ; NDD-NEXT: testb %al, %al # encoding: [0x84,0xc0] ; NDD-NEXT: retq # encoding: [0xc3] @@ -55,7 +56,7 @@ define i32 @neg_16bit(i16 %0) { ; NDD-NEXT: cmovsl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x48,0xc1] ; NDD-NEXT: andw $-256, %ax # EVEX TO LEGACY Compression encoding: [0x66,0x25,0x00,0xff] ; NDD-NEXT: negw %ax, %ax # encoding: [0x62,0xf4,0x7d,0x18,0xf7,0xd8] -; NDD-NEXT: leaw 1(%rdi,%rax), %ax # encoding: [0x66,0x8d,0x44,0x07,0x01] +; NDD-NEXT: leal 1(%rdi,%rax), %eax # encoding: [0x8d,0x44,0x07,0x01] ; NDD-NEXT: movzwl %ax, %eax # encoding: [0x0f,0xb7,0xc0] ; NDD-NEXT: movq %rax, 0 # encoding: [0x48,0x89,0x04,0x25,0x00,0x00,0x00,0x00] ; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] diff --git a/llvm/test/CodeGen/X86/avx10.2-intrinsic-upgrade.ll b/llvm/test/CodeGen/X86/avx10.2-intrinsic-upgrade.ll new file mode 100644 index 0000000..76d84c1 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx10.2-intrinsic-upgrade.ll @@ -0,0 +1,99 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx10.2 --show-mc-encoding | FileCheck %s --check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx10.2 --show-mc-encoding | FileCheck %s --check-prefixes=X64 + +declare <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32>, <16 x i32>, <16 x i32>) + +define <16 x i32>@test_int_x86_avx10_vpdpbssd_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) { +; X86-LABEL: test_int_x86_avx10_vpdpbssd_512: +; X86: # %bb.0: +; X86-NEXT: vpdpbssd %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x77,0x48,0x50,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx10_vpdpbssd_512: +; X64: # %bb.0: +; X64-NEXT: vpdpbssd %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x77,0x48,0x50,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) + ret <16 x i32> %res +} + +declare <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32>, <16 x i32>, <16 x i32>) + +define <16 x i32>@test_int_x86_avx10_vpdpbssds_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) { +; X86-LABEL: test_int_x86_avx10_vpdpbssds_512: +; X86: # %bb.0: +; X86-NEXT: vpdpbssds %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x77,0x48,0x51,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx10_vpdpbssds_512: +; X64: # %bb.0: +; X64-NEXT: vpdpbssds %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x77,0x48,0x51,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) + ret <16 x i32> %res +} + +declare <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32>, <16 x i32>, <16 x i32>) + +define <16 x i32>@test_int_x86_avx10_vpdpbsud_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) { +; X86-LABEL: test_int_x86_avx10_vpdpbsud_512: +; X86: # %bb.0: +; X86-NEXT: vpdpbsud %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x76,0x48,0x50,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx10_vpdpbsud_512: +; X64: # %bb.0: +; X64-NEXT: vpdpbsud %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x76,0x48,0x50,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) + ret <16 x i32> %res +} + +declare <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32>, <16 x i32>, <16 x i32>) + +define <16 x i32>@test_int_x86_avx10_vpdpbsuds_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) { +; X86-LABEL: test_int_x86_avx10_vpdpbsuds_512: +; X86: # %bb.0: +; X86-NEXT: vpdpbsuds %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x76,0x48,0x51,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx10_vpdpbsuds_512: +; X64: # %bb.0: +; X64-NEXT: vpdpbsuds %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x76,0x48,0x51,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) + ret <16 x i32> %res +} + +declare <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32>, <16 x i32>, <16 x i32>) + +define <16 x i32>@test_int_x86_avx10_vpdpbuud_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) { +; X86-LABEL: test_int_x86_avx10_vpdpbuud_512: +; X86: # %bb.0: +; X86-NEXT: vpdpbuud %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x74,0x48,0x50,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx10_vpdpbuud_512: +; X64: # %bb.0: +; X64-NEXT: vpdpbuud %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x74,0x48,0x50,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) + ret <16 x i32> %res +} + +declare <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32>, <16 x i32>, <16 x i32>) + +define <16 x i32>@test_int_x86_avx10_vpdpbuuds_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) { +; X86-LABEL: test_int_x86_avx10_vpdpbuuds_512: +; X86: # %bb.0: +; X86-NEXT: vpdpbuuds %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x74,0x48,0x51,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx10_vpdpbuuds_512: +; X64: # %bb.0: +; X64-NEXT: vpdpbuuds %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x74,0x48,0x51,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) + ret <16 x i32> %res +} diff --git a/llvm/test/CodeGen/X86/avx10_2_512ni-intrinsics.ll b/llvm/test/CodeGen/X86/avx10_2_512ni-intrinsics.ll index 09eb53f..a2aad60 100644 --- a/llvm/test/CodeGen/X86/avx10_2_512ni-intrinsics.ll +++ b/llvm/test/CodeGen/X86/avx10_2_512ni-intrinsics.ll @@ -53,7 +53,7 @@ declare <16 x float> @llvm.x86.avx10.vdpphps.512(<16 x float>, <32 x half>, <32 ; VNNI INT8 -define <16 x i32> @test_mm512_dpbssd_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) { +define <16 x i32> @test_mm512_dpbssd_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr %pB) { ; X86-LABEL: test_mm512_dpbssd_epi32: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -64,12 +64,12 @@ define <16 x i32> @test_mm512_dpbssd_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr ; X64: # %bb.0: ; X64-NEXT: vpdpbssd (%rdi), %zmm1, %zmm0 # encoding: [0x62,0xf2,0x77,0x48,0x50,0x07] ; X64-NEXT: retq # encoding: [0xc3] - %__B = load <16 x i32>, ptr %pB - %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %__B = load <64 x i8>, ptr %pB + %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) ret <16 x i32> %res } -define <16 x i32> @test_mm512_mask_dpbssds_epi32(<16 x i32> %__W, i16 zeroext %__U, <16 x i32> %__A, <16 x i32> %__B) { +define <16 x i32> @test_mm512_mask_dpbssds_epi32(<16 x i32> %__W, i16 zeroext %__U, <64 x i8> %__A, <64 x i8> %__B) { ; X86-LABEL: test_mm512_mask_dpbssds_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] @@ -81,13 +81,13 @@ define <16 x i32> @test_mm512_mask_dpbssds_epi32(<16 x i32> %__W, i16 zeroext %_ ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbssds %zmm2, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf2,0x77,0x49,0x51,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) %bst = bitcast i16 %__U to <16 x i1> %res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> %__W ret <16 x i32> %res } -define <16 x i32> @test_mm512_maskz_dpbssd_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) { +define <16 x i32> @test_mm512_maskz_dpbssd_epi32(i16 zeroext %__U, <16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) { ; X86-LABEL: test_mm512_maskz_dpbssd_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] @@ -99,16 +99,16 @@ define <16 x i32> @test_mm512_maskz_dpbssd_epi32(i16 zeroext %__U, <16 x i32> %_ ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbssd %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x77,0xc9,0x50,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) %bst = bitcast i16 %__U to <16 x i1> %res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> zeroinitializer ret <16 x i32> %res } -declare <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32>, <16 x i32>, <16 x i32>) -declare <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32>, <16 x i32>, <16 x i32>) +declare <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32>, <64 x i8>, <64 x i8>) +declare <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32>, <64 x i8>, <64 x i8>) -define <16 x i32> @test_mm512_dpbsud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) { +define <16 x i32> @test_mm512_dpbsud_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr %pB) { ; X86-LABEL: test_mm512_dpbsud_epi32: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -119,12 +119,12 @@ define <16 x i32> @test_mm512_dpbsud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr ; X64: # %bb.0: ; X64-NEXT: vpdpbsud (%rdi), %zmm1, %zmm0 # encoding: [0x62,0xf2,0x76,0x48,0x50,0x07] ; X64-NEXT: retq # encoding: [0xc3] - %__B = load <16 x i32>, ptr %pB - %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %__B = load <64 x i8>, ptr %pB + %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) ret <16 x i32> %res } -define <16 x i32> @test_mm512_mask_dpbsuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <16 x i32> %__A, <16 x i32> %__B) { +define <16 x i32> @test_mm512_mask_dpbsuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <64 x i8> %__A, <64 x i8> %__B) { ; X86-LABEL: test_mm512_mask_dpbsuds_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] @@ -136,13 +136,13 @@ define <16 x i32> @test_mm512_mask_dpbsuds_epi32(<16 x i32> %__W, i16 zeroext %_ ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbsuds %zmm2, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf2,0x76,0x49,0x51,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) %bst = bitcast i16 %__U to <16 x i1> %res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> %__W ret <16 x i32> %res } -define <16 x i32> @test_mm512_maskz_dpbsud_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) { +define <16 x i32> @test_mm512_maskz_dpbsud_epi32(i16 zeroext %__U, <16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) { ; X86-LABEL: test_mm512_maskz_dpbsud_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] @@ -154,16 +154,16 @@ define <16 x i32> @test_mm512_maskz_dpbsud_epi32(i16 zeroext %__U, <16 x i32> %_ ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbsud %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x76,0xc9,0x50,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) %bst = bitcast i16 %__U to <16 x i1> %res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> zeroinitializer ret <16 x i32> %res } -declare <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32>, <16 x i32>, <16 x i32>) -declare <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32>, <16 x i32>, <16 x i32>) +declare <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32>, <64 x i8>, <64 x i8>) +declare <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32>, <64 x i8>, <64 x i8>) -define <16 x i32> @test_mm512_dpbuud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) { +define <16 x i32> @test_mm512_dpbuud_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr %pB) { ; X86-LABEL: test_mm512_dpbuud_epi32: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -174,12 +174,12 @@ define <16 x i32> @test_mm512_dpbuud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr ; X64: # %bb.0: ; X64-NEXT: vpdpbuud (%rdi), %zmm1, %zmm0 # encoding: [0x62,0xf2,0x74,0x48,0x50,0x07] ; X64-NEXT: retq # encoding: [0xc3] - %__B = load <16 x i32>, ptr %pB - %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %__B = load <64 x i8>, ptr %pB + %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) ret <16 x i32> %res } -define <16 x i32> @test_mm512_mask_dpbuuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <16 x i32> %__A, <16 x i32> %__B) { +define <16 x i32> @test_mm512_mask_dpbuuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <64 x i8> %__A, <64 x i8> %__B) { ; X86-LABEL: test_mm512_mask_dpbuuds_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] @@ -191,13 +191,13 @@ define <16 x i32> @test_mm512_mask_dpbuuds_epi32(<16 x i32> %__W, i16 zeroext %_ ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbuuds %zmm2, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf2,0x74,0x49,0x51,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) %bst = bitcast i16 %__U to <16 x i1> %res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> %__W ret <16 x i32> %res } -define <16 x i32> @test_mm512_maskz_dpbuud_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) { +define <16 x i32> @test_mm512_maskz_dpbuud_epi32(i16 zeroext %__U, <16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) { ; X86-LABEL: test_mm512_maskz_dpbuud_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] @@ -209,14 +209,14 @@ define <16 x i32> @test_mm512_maskz_dpbuud_epi32(i16 zeroext %__U, <16 x i32> %_ ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbuud %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x74,0xc9,0x50,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) %bst = bitcast i16 %__U to <16 x i1> %res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> zeroinitializer ret <16 x i32> %res } -declare <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32>, <16 x i32>, <16 x i32>) -declare <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32>, <16 x i32>, <16 x i32>) +declare <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32>, <64 x i8>, <64 x i8>) +declare <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32>, <64 x i8>, <64 x i8>) ; VNNI INT16 diff --git a/llvm/test/CodeGen/X86/avx10_2ni-intrinsics.ll b/llvm/test/CodeGen/X86/avx10_2ni-intrinsics.ll index 0c5fd3b..1f270d5 100644 --- a/llvm/test/CodeGen/X86/avx10_2ni-intrinsics.ll +++ b/llvm/test/CodeGen/X86/avx10_2ni-intrinsics.ll @@ -101,7 +101,7 @@ declare <8 x float> @llvm.x86.avx10.vdpphps.256(<8 x float>, <16 x half>, <16 x ; VNNI INT8 -define <4 x i32> @test_mm_mask_dpbssd_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 x i32> %__A, <4 x i32> %__B) { +define <4 x i32> @test_mm_mask_dpbssd_epi32(<4 x i32> %__W, i4 zeroext %__U, <16 x i8> %__A, <16 x i8> %__B) { ; X86-LABEL: test_mm_mask_dpbssd_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -113,13 +113,13 @@ define <4 x i32> @test_mm_mask_dpbssd_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbssd %xmm2, %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf2,0x77,0x09,0x50,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) + %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) %bst = bitcast i4 %__U to <4 x i1> %res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> %__W ret <4 x i32> %res } -define <4 x i32> @test_mm_maskz_dpbssds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) { +define <4 x i32> @test_mm_maskz_dpbssds_epi32(i4 zeroext %__U, <4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) { ; X86-LABEL: test_mm_maskz_dpbssds_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -131,13 +131,13 @@ define <4 x i32> @test_mm_maskz_dpbssds_epi32(i4 zeroext %__U, <4 x i32> %__W, < ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbssds %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x77,0x89,0x51,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) + %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) %bst = bitcast i4 %__U to <4 x i1> %res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> zeroinitializer ret <4 x i32> %res } -define <8 x i32> @test_mm256_maskz_dpbssds_epi32(<8 x i32> %__W, i8 zeroext %__U, <8 x i32> %__A, <8 x i32> %__B) { +define <8 x i32> @test_mm256_maskz_dpbssds_epi32(<8 x i32> %__W, i8 zeroext %__U, <32 x i8> %__A, <32 x i8> %__B) { ; X86-LABEL: test_mm256_maskz_dpbssds_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -149,13 +149,13 @@ define <8 x i32> @test_mm256_maskz_dpbssds_epi32(<8 x i32> %__W, i8 zeroext %__U ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbssds %ymm2, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf2,0x77,0x29,0x51,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) + %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) %bst = bitcast i8 %__U to <8 x i1> %res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> %__W ret <8 x i32> %res } -define <8 x i32> @test_mm256_mask_dpbssd_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) { +define <8 x i32> @test_mm256_mask_dpbssd_epi32(i8 zeroext %__U, <8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) { ; X86-LABEL: test_mm256_mask_dpbssd_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -167,18 +167,18 @@ define <8 x i32> @test_mm256_mask_dpbssd_epi32(i8 zeroext %__U, <8 x i32> %__W, ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbssd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x77,0xa9,0x50,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) + %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) %bst = bitcast i8 %__U to <8 x i1> %res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> zeroinitializer ret <8 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <32 x i8>, <32 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <4 x i32> @test_mm_mask_dpbsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 x i32> %__A, <4 x i32> %__B) { +define <4 x i32> @test_mm_mask_dpbsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <16 x i8> %__A, <16 x i8> %__B) { ; X86-LABEL: test_mm_mask_dpbsud_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -190,13 +190,13 @@ define <4 x i32> @test_mm_mask_dpbsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbsud %xmm2, %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf2,0x76,0x09,0x50,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) + %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) %bst = bitcast i4 %__U to <4 x i1> %res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> %__W ret <4 x i32> %res } -define <4 x i32> @test_mm_maskz_dpbsuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) { +define <4 x i32> @test_mm_maskz_dpbsuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) { ; X86-LABEL: test_mm_maskz_dpbsuds_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -208,13 +208,13 @@ define <4 x i32> @test_mm_maskz_dpbsuds_epi32(i4 zeroext %__U, <4 x i32> %__W, < ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbsuds %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x76,0x89,0x51,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) + %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) %bst = bitcast i4 %__U to <4 x i1> %res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> zeroinitializer ret <4 x i32> %res } -define <8 x i32> @test_mm256_maskz_dpbsuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <8 x i32> %__A, <8 x i32> %__B) { +define <8 x i32> @test_mm256_maskz_dpbsuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <32 x i8> %__A, <32 x i8> %__B) { ; X86-LABEL: test_mm256_maskz_dpbsuds_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -226,13 +226,13 @@ define <8 x i32> @test_mm256_maskz_dpbsuds_epi32(<8 x i32> %__W, i8 zeroext %__U ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbsuds %ymm2, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf2,0x76,0x29,0x51,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) + %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) %bst = bitcast i8 %__U to <8 x i1> %res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> %__W ret <8 x i32> %res } -define <8 x i32> @test_mm256_mask_dpbsud_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) { +define <8 x i32> @test_mm256_mask_dpbsud_epi32(i8 zeroext %__U, <8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) { ; X86-LABEL: test_mm256_mask_dpbsud_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -244,18 +244,18 @@ define <8 x i32> @test_mm256_mask_dpbsud_epi32(i8 zeroext %__U, <8 x i32> %__W, ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbsud %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x76,0xa9,0x50,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) + %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) %bst = bitcast i8 %__U to <8 x i1> %res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> zeroinitializer ret <8 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <32 x i8>, <32 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <4 x i32> @test_mm_mask_dpbuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 x i32> %__A, <4 x i32> %__B) { +define <4 x i32> @test_mm_mask_dpbuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <16 x i8> %__A, <16 x i8> %__B) { ; X86-LABEL: test_mm_mask_dpbuud_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -267,13 +267,13 @@ define <4 x i32> @test_mm_mask_dpbuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbuud %xmm2, %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf2,0x74,0x09,0x50,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) + %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) %bst = bitcast i4 %__U to <4 x i1> %res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> %__W ret <4 x i32> %res } -define <4 x i32> @test_mm_maskz_dpbuuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) { +define <4 x i32> @test_mm_maskz_dpbuuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) { ; X86-LABEL: test_mm_maskz_dpbuuds_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -285,13 +285,13 @@ define <4 x i32> @test_mm_maskz_dpbuuds_epi32(i4 zeroext %__U, <4 x i32> %__W, < ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbuuds %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x74,0x89,0x51,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) + %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) %bst = bitcast i4 %__U to <4 x i1> %res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> zeroinitializer ret <4 x i32> %res } -define <8 x i32> @test_mm256_maskz_dpbuuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <8 x i32> %__A, <8 x i32> %__B) { +define <8 x i32> @test_mm256_maskz_dpbuuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <32 x i8> %__A, <32 x i8> %__B) { ; X86-LABEL: test_mm256_maskz_dpbuuds_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -303,13 +303,13 @@ define <8 x i32> @test_mm256_maskz_dpbuuds_epi32(<8 x i32> %__W, i8 zeroext %__U ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbuuds %ymm2, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf2,0x74,0x29,0x51,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) + %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) %bst = bitcast i8 %__U to <8 x i1> %res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> %__W ret <8 x i32> %res } -define <8 x i32> @test_mm256_mask_dpbuud_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) { +define <8 x i32> @test_mm256_mask_dpbuud_epi32(i8 zeroext %__U, <8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) { ; X86-LABEL: test_mm256_mask_dpbuud_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -321,16 +321,16 @@ define <8 x i32> @test_mm256_mask_dpbuud_epi32(i8 zeroext %__U, <8 x i32> %__W, ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbuud %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x74,0xa9,0x50,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) + %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) %bst = bitcast i8 %__U to <8 x i1> %res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> zeroinitializer ret <8 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <32 x i8>, <32 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <32 x i8>, <32 x i8>) ; VNNI INT16 diff --git a/llvm/test/CodeGen/X86/avx512-mask-op.ll b/llvm/test/CodeGen/X86/avx512-mask-op.ll index 8aa898f..da0cef0 100644 --- a/llvm/test/CodeGen/X86/avx512-mask-op.ll +++ b/llvm/test/CodeGen/X86/avx512-mask-op.ll @@ -2119,8 +2119,7 @@ define void @ktest_1(<8 x double> %in, ptr %base) { ; KNL-LABEL: ktest_1: ; KNL: ## %bb.0: ; KNL-NEXT: vcmpgtpd (%rdi), %zmm0, %k1 -; KNL-NEXT: vmovupd 8(%rdi), %zmm1 {%k1} {z} -; KNL-NEXT: vcmpltpd %zmm1, %zmm0, %k0 {%k1} +; KNL-NEXT: vcmpltpd 8(%rdi), %zmm0, %k0 {%k1} ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb %al, %al ; KNL-NEXT: je LBB44_2 @@ -2152,8 +2151,7 @@ define void @ktest_1(<8 x double> %in, ptr %base) { ; AVX512BW-LABEL: ktest_1: ; AVX512BW: ## %bb.0: ; AVX512BW-NEXT: vcmpgtpd (%rdi), %zmm0, %k1 -; AVX512BW-NEXT: vmovupd 8(%rdi), %zmm1 {%k1} {z} -; AVX512BW-NEXT: vcmpltpd %zmm1, %zmm0, %k0 {%k1} +; AVX512BW-NEXT: vcmpltpd 8(%rdi), %zmm0, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax ; AVX512BW-NEXT: testb %al, %al ; AVX512BW-NEXT: je LBB44_2 diff --git a/llvm/test/CodeGen/X86/avxvnniint8-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avxvnniint8-intrinsics-upgrade.ll new file mode 100644 index 0000000..ce9a0fb --- /dev/null +++ b/llvm/test/CodeGen/X86/avxvnniint8-intrinsics-upgrade.ll @@ -0,0 +1,318 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avxvnniint8 --show-mc-encoding | FileCheck %s --check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnniint8 --show-mc-encoding | FileCheck %s --check-prefixes=X64 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx10.2 --show-mc-encoding | FileCheck %s --check-prefixes=AVX10-X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx10.2 --show-mc-encoding | FileCheck %s --check-prefixes=AVX10-X64 + +declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <4 x i32>, <4 x i32>) + +define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbssd_128: +; X86: # %bb.0: +; X86-NEXT: vpdpbssd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x73,0x50,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbssd_128: +; X64: # %bb.0: +; X64-NEXT: vpdpbssd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x73,0x50,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbssd_128: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbssd %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x77,0x08,0x50,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbssd_128: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbssd %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x77,0x08,0x50,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) + ret <4 x i32> %res +} + +declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <4 x i32>, <4 x i32>) + +define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbssds_128: +; X86: # %bb.0: +; X86-NEXT: vpdpbssds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x73,0x51,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbssds_128: +; X64: # %bb.0: +; X64-NEXT: vpdpbssds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x73,0x51,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbssds_128: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbssds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x77,0x08,0x51,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbssds_128: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbssds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x77,0x08,0x51,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) + ret <4 x i32> %res +} + +declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <8 x i32>, <8 x i32>) + +define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbssd_256: +; X86: # %bb.0: +; X86-NEXT: vpdpbssd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x77,0x50,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbssd_256: +; X64: # %bb.0: +; X64-NEXT: vpdpbssd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x77,0x50,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbssd_256: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbssd %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x77,0x28,0x50,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbssd_256: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbssd %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x77,0x28,0x50,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) + ret <8 x i32> %res +} + +declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <8 x i32>, <8 x i32>) + +define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbssds_256: +; X86: # %bb.0: +; X86-NEXT: vpdpbssds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x77,0x51,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbssds_256: +; X64: # %bb.0: +; X64-NEXT: vpdpbssds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x77,0x51,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbssds_256: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbssds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x77,0x28,0x51,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbssds_256: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbssds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x77,0x28,0x51,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) + ret <8 x i32> %res +} + +declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <4 x i32>, <4 x i32>) + +define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbsud_128: +; X86: # %bb.0: +; X86-NEXT: vpdpbsud %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x72,0x50,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbsud_128: +; X64: # %bb.0: +; X64-NEXT: vpdpbsud %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x72,0x50,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbsud_128: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbsud %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x76,0x08,0x50,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbsud_128: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbsud %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x76,0x08,0x50,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) + ret <4 x i32> %res +} + +declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <4 x i32>, <4 x i32>) + +define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbsuds_128: +; X86: # %bb.0: +; X86-NEXT: vpdpbsuds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x72,0x51,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbsuds_128: +; X64: # %bb.0: +; X64-NEXT: vpdpbsuds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x72,0x51,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbsuds_128: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbsuds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x76,0x08,0x51,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbsuds_128: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbsuds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x76,0x08,0x51,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) + ret <4 x i32> %res +} + +declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <8 x i32>, <8 x i32>) + +define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbsud_256: +; X86: # %bb.0: +; X86-NEXT: vpdpbsud %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x76,0x50,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbsud_256: +; X64: # %bb.0: +; X64-NEXT: vpdpbsud %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x76,0x50,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbsud_256: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbsud %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x76,0x28,0x50,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbsud_256: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbsud %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x76,0x28,0x50,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) + ret <8 x i32> %res +} + +declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <8 x i32>, <8 x i32>) + +define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbsuds_256: +; X86: # %bb.0: +; X86-NEXT: vpdpbsuds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x76,0x51,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbsuds_256: +; X64: # %bb.0: +; X64-NEXT: vpdpbsuds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x76,0x51,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbsuds_256: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbsuds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x76,0x28,0x51,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbsuds_256: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbsuds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x76,0x28,0x51,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) + ret <8 x i32> %res +} + +declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <4 x i32>, <4 x i32>) + +define <4 x i32>@test_int_x86_avx2_vpdpbuud(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbuud: +; X86: # %bb.0: +; X86-NEXT: vpdpbuud %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x70,0x50,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbuud: +; X64: # %bb.0: +; X64-NEXT: vpdpbuud %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x70,0x50,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbuud: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbuud %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x74,0x08,0x50,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbuud: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbuud %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x74,0x08,0x50,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) + ret <4 x i32> %res +} + +declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <4 x i32>, <4 x i32>) + +define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbuuds_128: +; X86: # %bb.0: +; X86-NEXT: vpdpbuuds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x70,0x51,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbuuds_128: +; X64: # %bb.0: +; X64-NEXT: vpdpbuuds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x70,0x51,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbuuds_128: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbuuds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x74,0x08,0x51,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbuuds_128: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbuuds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x74,0x08,0x51,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) + ret <4 x i32> %res +} + +declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <8 x i32>, <8 x i32>) + +define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbuud_256: +; X86: # %bb.0: +; X86-NEXT: vpdpbuud %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x74,0x50,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbuud_256: +; X64: # %bb.0: +; X64-NEXT: vpdpbuud %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x74,0x50,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbuud_256: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbuud %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x74,0x28,0x50,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbuud_256: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbuud %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x74,0x28,0x50,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) + ret <8 x i32> %res +} + +declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <8 x i32>, <8 x i32>) + +define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbuuds_256: +; X86: # %bb.0: +; X86-NEXT: vpdpbuuds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x74,0x51,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbuuds_256: +; X64: # %bb.0: +; X64-NEXT: vpdpbuuds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x74,0x51,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbuuds_256: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbuuds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x74,0x28,0x51,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbuuds_256: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbuuds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x74,0x28,0x51,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) + ret <8 x i32> %res +} + diff --git a/llvm/test/CodeGen/X86/avxvnniint8-intrinsics.ll b/llvm/test/CodeGen/X86/avxvnniint8-intrinsics.ll index 0ddd017..6c3d90aa 100644 --- a/llvm/test/CodeGen/X86/avxvnniint8-intrinsics.ll +++ b/llvm/test/CodeGen/X86/avxvnniint8-intrinsics.ll @@ -5,9 +5,9 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx10.2 --show-mc-encoding | FileCheck %s --check-prefixes=AVX10-X64 -declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <16 x i8>, <16 x i8>) -define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) { +define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbssd_128: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -41,16 +41,16 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, pt ; AVX10-X64-NEXT: vpdpbssd %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x77,0x08,0x50,0xc2] ; AVX10-X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <4 x i32>, ptr %x2p - %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4) + %x2 = load <16 x i8>, ptr %x2p + %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4) %res = add <4 x i32> %1, %2 ret <4 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <16 x i8>, <16 x i8>) -define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) { +define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbssds_128: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -84,16 +84,16 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, p ; AVX10-X64-NEXT: vpdpbssds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x77,0x08,0x51,0xc2] ; AVX10-X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <4 x i32>, ptr %x2p - %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4) + %x2 = load <16 x i8>, ptr %x2p + %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4) %res = add <4 x i32> %1, %2 ret <4 x i32> %res } -declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) { +define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbssd_256: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -127,16 +127,16 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, pt ; AVX10-X64-NEXT: vpdpbssd %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x77,0x28,0x50,0xc2] ; AVX10-X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <8 x i32>, ptr %x2p - %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4) + %x2 = load <32 x i8>, ptr %x2p + %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4) %res = add <8 x i32> %1, %2 ret <8 x i32> %res } -declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) { +define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbssds_256: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -170,16 +170,16 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, p ; AVX10-X64-NEXT: vpdpbssds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x77,0x28,0x51,0xc2] ; AVX10-X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <8 x i32>, ptr %x2p - %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4) + %x2 = load <32 x i8>, ptr %x2p + %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4) %res = add <8 x i32> %1, %2 ret <8 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <16 x i8>, <16 x i8>) -define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) { +define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbsud_128: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -213,16 +213,16 @@ define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <4 x i32> %x1, pt ; AVX10-X64-NEXT: vpdpbsud %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x76,0x08,0x50,0xc2] ; AVX10-X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <4 x i32>, ptr %x2p - %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4) + %x2 = load <16 x i8>, ptr %x2p + %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4) %res = add <4 x i32> %1, %2 ret <4 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <16 x i8>, <16 x i8>) -define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) { +define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbsuds_128: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -256,16 +256,16 @@ define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <4 x i32> %x1, p ; AVX10-X64-NEXT: vpdpbsuds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x76,0x08,0x51,0xc2] ; AVX10-X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <4 x i32>, ptr %x2p - %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4) + %x2 = load <16 x i8>, ptr %x2p + %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4) %res = add <4 x i32> %1, %2 ret <4 x i32> %res } -declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) { +define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbsud_256: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -299,16 +299,16 @@ define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <8 x i32> %x1, pt ; AVX10-X64-NEXT: vpdpbsud %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x76,0x28,0x50,0xc2] ; AVX10-X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <8 x i32>, ptr %x2p - %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4) + %x2 = load <32 x i8>, ptr %x2p + %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4) %res = add <8 x i32> %1, %2 ret <8 x i32> %res } -declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) { +define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbsuds_256: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -342,16 +342,16 @@ define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <8 x i32> %x1, p ; AVX10-X64-NEXT: vpdpbsuds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x76,0x28,0x51,0xc2] ; AVX10-X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <8 x i32>, ptr %x2p - %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4) + %x2 = load <32 x i8>, ptr %x2p + %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4) %res = add <8 x i32> %1, %2 ret <8 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <16 x i8>, <16 x i8>) -define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) { +define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbuud_128: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -385,16 +385,16 @@ define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <4 x i32> %x1, pt ; AVX10-X64-NEXT: vpdpbuud %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x74,0x08,0x50,0xc2] ; AVX10-X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <4 x i32>, ptr %x2p - %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4) + %x2 = load <16 x i8>, ptr %x2p + %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4) %res = add <4 x i32> %1, %2 ret <4 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <16 x i8>, <16 x i8>) -define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) { +define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbuuds_128: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -428,16 +428,16 @@ define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <4 x i32> %x1, p ; AVX10-X64-NEXT: vpdpbuuds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x74,0x08,0x51,0xc2] ; AVX10-X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <4 x i32>, ptr %x2p - %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4) + %x2 = load <16 x i8>, ptr %x2p + %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4) %res = add <4 x i32> %1, %2 ret <4 x i32> %res } -declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) { +define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbuud_256: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -471,16 +471,16 @@ define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <8 x i32> %x1, pt ; AVX10-X64-NEXT: vpdpbuud %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x74,0x28,0x50,0xc2] ; AVX10-X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <8 x i32>, ptr %x2p - %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4) + %x2 = load <32 x i8>, ptr %x2p + %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4) %res = add <8 x i32> %1, %2 ret <8 x i32> %res } -declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) { +define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbuuds_256: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -514,9 +514,9 @@ define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <8 x i32> %x1, p ; AVX10-X64-NEXT: vpdpbuuds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x74,0x28,0x51,0xc2] ; AVX10-X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <8 x i32>, ptr %x2p - %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4) + %x2 = load <32 x i8>, ptr %x2p + %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4) %res = add <8 x i32> %1, %2 ret <8 x i32> %res } diff --git a/llvm/test/CodeGen/X86/combine-add.ll b/llvm/test/CodeGen/X86/combine-add.ll index ff9f995..51a8bf5 100644 --- a/llvm/test/CodeGen/X86/combine-add.ll +++ b/llvm/test/CodeGen/X86/combine-add.ll @@ -235,10 +235,10 @@ define void @PR52039(ptr %pa, ptr %pb) { ; SSE-NEXT: psubd %xmm1, %xmm3 ; SSE-NEXT: psubd %xmm0, %xmm2 ; SSE-NEXT: movdqa %xmm2, %xmm0 -; SSE-NEXT: paddd %xmm2, %xmm0 +; SSE-NEXT: paddd %xmm0, %xmm0 ; SSE-NEXT: paddd %xmm2, %xmm0 ; SSE-NEXT: movdqa %xmm3, %xmm1 -; SSE-NEXT: paddd %xmm3, %xmm1 +; SSE-NEXT: paddd %xmm1, %xmm1 ; SSE-NEXT: paddd %xmm3, %xmm1 ; SSE-NEXT: movdqu %xmm3, 16(%rsi) ; SSE-NEXT: movdqu %xmm2, (%rsi) diff --git a/llvm/test/CodeGen/X86/combine-mul.ll b/llvm/test/CodeGen/X86/combine-mul.ll index 8e4a50e..ae4d24f 100644 --- a/llvm/test/CodeGen/X86/combine-mul.ll +++ b/llvm/test/CodeGen/X86/combine-mul.ll @@ -81,7 +81,7 @@ define <4 x i64> @combine_vec_mul_pow2c(<4 x i64> %x) { ; SSE-LABEL: combine_vec_mul_pow2c: ; SSE: # %bb.0: ; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: paddq %xmm0, %xmm2 +; SSE-NEXT: paddq %xmm2, %xmm2 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] ; SSE-NEXT: movdqa %xmm1, %xmm2 ; SSE-NEXT: psllq $4, %xmm2 diff --git a/llvm/test/CodeGen/X86/combine-sdiv.ll b/llvm/test/CodeGen/X86/combine-sdiv.ll index 98187d6..6bcbfe1 100644 --- a/llvm/test/CodeGen/X86/combine-sdiv.ll +++ b/llvm/test/CodeGen/X86/combine-sdiv.ll @@ -2187,13 +2187,13 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) { ; SSE41-NEXT: pxor %xmm0, %xmm0 ; SSE41-NEXT: pxor %xmm3, %xmm3 ; SSE41-NEXT: pcmpgtb %xmm1, %xmm3 -; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero ; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] ; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [256,2,2,2,2,128,2,128] ; SSE41-NEXT: psrlw $8, %xmm3 -; SSE41-NEXT: paddw %xmm4, %xmm4 -; SSE41-NEXT: pmovsxbw %xmm1, %xmm2 -; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3,4,5],xmm4[6],xmm2[7] +; SSE41-NEXT: pmovsxbw %xmm1, %xmm0 +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; SSE41-NEXT: paddw %xmm2, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2],xmm0[3,4,5],xmm2[6],xmm0[7] ; SSE41-NEXT: psrlw $8, %xmm2 ; SSE41-NEXT: packuswb %xmm3, %xmm2 ; SSE41-NEXT: paddb %xmm1, %xmm2 @@ -2201,15 +2201,14 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) { ; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] ; SSE41-NEXT: psraw $8, %xmm0 ; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: paddw %xmm0, %xmm3 -; SSE41-NEXT: psllw $7, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm3[5],xmm0[6],xmm3[7] -; SSE41-NEXT: psrlw $8, %xmm0 +; SSE41-NEXT: psllw $7, %xmm3 +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm0[5],xmm3[6],xmm0[7] +; SSE41-NEXT: psrlw $8, %xmm3 ; SSE41-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE41-NEXT: psraw $8, %xmm2 ; SSE41-NEXT: psllw $7, %xmm2 ; SSE41-NEXT: psrlw $8, %xmm2 -; SSE41-NEXT: packuswb %xmm0, %xmm2 +; SSE41-NEXT: packuswb %xmm3, %xmm2 ; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255] ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [255,255,0,255,255,255,0,255,255,0,0,0,0,255,0,255] @@ -2225,18 +2224,17 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) { ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [256,2,2,2,2,128,2,128] ; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1 -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpmovsxbw %xmm0, %xmm3 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2],xmm3[3,4,5],xmm2[6],xmm3[7] +; AVX1-NEXT: vpmovsxbw %xmm0, %xmm2 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3,4,5],xmm3[6],xmm2[7] ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm1 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; AVX1-NEXT: vpsraw $8, %xmm2, %xmm2 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3 -; AVX1-NEXT: vpsllw $7, %xmm2, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5],xmm2[6],xmm3[7] +; AVX1-NEXT: vpsllw $7, %xmm2, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5],xmm3[6],xmm2[7] ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; AVX1-NEXT: vpsraw $8, %xmm1, %xmm1 diff --git a/llvm/test/CodeGen/X86/dbg-distringtype-uint.ll b/llvm/test/CodeGen/X86/dbg-distringtype-uint.ll index 638a65d..7542c1b 100644 --- a/llvm/test/CodeGen/X86/dbg-distringtype-uint.ll +++ b/llvm/test/CodeGen/X86/dbg-distringtype-uint.ll @@ -1,5 +1,13 @@ ; RUN: llc -mtriple=x86_64 -filetype=obj < %s | llvm-dwarfdump -debug-info - | FileCheck %s -; + +; Ensure that static local variable elemnt is placed in abstract subprogram DIE. +; CHECK: DW_TAG_subprogram +; CHECK-NOT: DW_TAG +; CHECK: DW_AT_inline (DW_INL_inlined) +; CHECK-EMPTY: +; CHECK-NEXT: DW_TAG_variable +; CHECK-NEXT: DW_AT_name ("elemnt") + ; CHECK: [[SYM:[a-z0-9]+]]: DW_TAG_formal_parameter ; CHECK: DW_AT_name ("esym") ; CHECK: DW_AT_type ([[TYPE:[a-z0-9]+]] "CHARACTER_1") diff --git a/llvm/test/CodeGen/X86/dpbusd.ll b/llvm/test/CodeGen/X86/dpbusd.ll index 3aa77c3..7bd22d5 100644 --- a/llvm/test/CodeGen/X86/dpbusd.ll +++ b/llvm/test/CodeGen/X86/dpbusd.ll @@ -1,40 +1,25 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=AVXVNNI -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=AVX512,AVX512VNNI -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLVNNI +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX512 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VNNI +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VLVNNI define i32 @no_dpbusd(ptr%a, ptr%b, i32 %c, i32 %n) { -; AVXVNNI-LABEL: no_dpbusd: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVXVNNI-NEXT: vpmaddwd %ymm0, %ymm1, %ymm0 -; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vmovd %xmm0, %eax -; AVXVNNI-NEXT: addl %edx, %eax -; AVXVNNI-NEXT: vzeroupper -; AVXVNNI-NEXT: retq -; -; AVX512-LABEL: no_dpbusd: -; AVX512: # %bb.0: # %entry -; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVX512-NEXT: vpmaddwd %ymm0, %ymm1, %ymm0 -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] -; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: vmovd %xmm0, %eax -; AVX512-NEXT: addl %edx, %eax -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: retq +; CHECK-LABEL: no_dpbusd: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; CHECK-NEXT: vpmaddwd %ymm0, %ymm1, %ymm0 +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vmovd %xmm0, %eax +; CHECK-NEXT: addl %edx, %eax +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq entry: %0 = load <16 x i8>, ptr %a, align 16 %1 = zext <16 x i8> %0 to <16 x i32> @@ -99,25 +84,44 @@ entry: } define i32 @mul_zext(ptr%a, ptr%b, i32 %c, i32 %n) { -; AVXVNNI-LABEL: mul_zext: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVXVNNI-NEXT: vpmovsxbw (%rsi), %ymm1 -; AVXVNNI-NEXT: vpmullw %ymm0, %ymm1, %ymm0 -; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVXVNNI-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVXVNNI-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVXVNNI-NEXT: vpaddd %ymm1, %ymm0, %ymm0 -; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vmovd %xmm0, %eax -; AVXVNNI-NEXT: addl %edx, %eax -; AVXVNNI-NEXT: vzeroupper -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: mul_zext: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVXVNNI-AVX-NEXT: vpmovsxbw (%rsi), %ymm1 +; AVXVNNI-AVX-NEXT: vpmullw %ymm0, %ymm1, %ymm0 +; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVXVNNI-AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVXVNNI-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vmovd %xmm0, %eax +; AVXVNNI-AVX-NEXT: addl %edx, %eax +; AVXVNNI-AVX-NEXT: vzeroupper +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: mul_zext: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVXVNNI-AVX512-NEXT: vpmovsxbw (%rsi), %ymm1 +; AVXVNNI-AVX512-NEXT: vpmullw %ymm0, %ymm1, %ymm0 +; AVXVNNI-AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVXVNNI-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVXVNNI-AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVXVNNI-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vmovd %xmm0, %eax +; AVXVNNI-AVX512-NEXT: addl %edx, %eax +; AVXVNNI-AVX512-NEXT: vzeroupper +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512-LABEL: mul_zext: ; AVX512: # %bb.0: # %entry @@ -153,25 +157,44 @@ entry: } define i32 @mul_sext(ptr%a, ptr%b, i32 %c, i32 %n) { -; AVXVNNI-LABEL: mul_sext: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVXVNNI-NEXT: vpmovsxbw (%rsi), %ymm1 -; AVXVNNI-NEXT: vpmullw %ymm0, %ymm1, %ymm0 -; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVXVNNI-NEXT: vpmovsxwd %xmm1, %ymm1 -; AVXVNNI-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVXVNNI-NEXT: vpaddd %ymm1, %ymm0, %ymm0 -; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vmovd %xmm0, %eax -; AVXVNNI-NEXT: addl %edx, %eax -; AVXVNNI-NEXT: vzeroupper -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: mul_sext: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVXVNNI-AVX-NEXT: vpmovsxbw (%rsi), %ymm1 +; AVXVNNI-AVX-NEXT: vpmullw %ymm0, %ymm1, %ymm0 +; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX-NEXT: vpmovsxwd %xmm1, %ymm1 +; AVXVNNI-AVX-NEXT: vpmovsxwd %xmm0, %ymm0 +; AVXVNNI-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vmovd %xmm0, %eax +; AVXVNNI-AVX-NEXT: addl %edx, %eax +; AVXVNNI-AVX-NEXT: vzeroupper +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: mul_sext: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVXVNNI-AVX512-NEXT: vpmovsxbw (%rsi), %ymm1 +; AVXVNNI-AVX512-NEXT: vpmullw %ymm0, %ymm1, %ymm0 +; AVXVNNI-AVX512-NEXT: vpmovsxwd %ymm0, %zmm0 +; AVXVNNI-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVXVNNI-AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVXVNNI-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vmovd %xmm0, %eax +; AVXVNNI-AVX512-NEXT: addl %edx, %eax +; AVXVNNI-AVX512-NEXT: vzeroupper +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512-LABEL: mul_sext: ; AVX512: # %bb.0: # %entry @@ -312,17 +335,30 @@ entry: declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>) define i32 @vpdpbusd_128(ptr%a, ptr%b, i32 %c, i32 %n) { -; AVXVNNI-LABEL: vpdpbusd_128: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVXVNNI-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVXVNNI-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3] -; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3] -; AVXVNNI-NEXT: {vex} vpdpbusd %xmm1, %xmm0, %xmm2 -; AVXVNNI-NEXT: vmovd %xmm2, %eax -; AVXVNNI-NEXT: addl %edx, %eax -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: vpdpbusd_128: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVXVNNI-AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVXVNNI-AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3] +; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3] +; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %xmm1, %xmm0, %xmm2 +; AVXVNNI-AVX-NEXT: vmovd %xmm2, %eax +; AVXVNNI-AVX-NEXT: addl %edx, %eax +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: vpdpbusd_128: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVXVNNI-AVX512-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVXVNNI-AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3] +; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3] +; AVXVNNI-AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %xmm1, %xmm0, %xmm2 +; AVXVNNI-AVX512-NEXT: vmovd %xmm2, %eax +; AVXVNNI-AVX512-NEXT: addl %edx, %eax +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512VNNI-LABEL: vpdpbusd_128: ; AVX512VNNI: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/X86/dpbusd_const.ll b/llvm/test/CodeGen/X86/dpbusd_const.ll index 456e6e8..bb47df5 100644 --- a/llvm/test/CodeGen/X86/dpbusd_const.ll +++ b/llvm/test/CodeGen/X86/dpbusd_const.ll @@ -1,20 +1,21 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=ALL,AVXVNNI -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VNNI -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni -mattr=+avx512vl | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VLVNNI +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX512 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VNNI +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VLVNNI define i32 @mul_4xi8_zc_exceed(<4 x i8> %a, i32 %c) { -; ALL-LABEL: mul_4xi8_zc_exceed: -; ALL: # %bb.0: # %entry -; ALL-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; ALL-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,128,0] -; ALL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; ALL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; ALL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] -; ALL-NEXT: vpaddd %xmm0, %xmm1, %xmm0 -; ALL-NEXT: vmovd %xmm0, %eax -; ALL-NEXT: addl %edi, %eax -; ALL-NEXT: retq +; CHECK-LABEL: mul_4xi8_zc_exceed: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; CHECK-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,128,0] +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] +; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: vmovd %xmm0, %eax +; CHECK-NEXT: addl %edi, %eax +; CHECK-NEXT: retq entry: %0 = zext <4 x i8> %a to <4 x i32> %1 = mul nsw <4 x i32> %0, <i32 0, i32 1, i32 2, i32 128> @@ -24,14 +25,24 @@ entry: } define i32 @mul_4xi8_zc(<4 x i8> %a, i32 %c) { -; AVXVNNI-LABEL: mul_4xi8_zc: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] -; AVXVNNI-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 -; AVXVNNI-NEXT: vmovd %xmm1, %eax -; AVXVNNI-NEXT: addl %edi, %eax -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: mul_4xi8_zc: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVXVNNI-AVX-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; AVXVNNI-AVX-NEXT: vmovd %xmm1, %eax +; AVXVNNI-AVX-NEXT: addl %edi, %eax +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: mul_4xi8_zc: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; AVXVNNI-AVX512-NEXT: vmovd %xmm1, %eax +; AVXVNNI-AVX512-NEXT: addl %edi, %eax +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512VNNI-LABEL: mul_4xi8_zc: ; AVX512VNNI: # %bb.0: # %entry @@ -62,16 +73,26 @@ entry: } define i32 @mul_4xi4_cz(<4 x i4> %a, i32 %c) { -; AVXVNNI-LABEL: mul_4xi4_cz: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] -; AVXVNNI-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] -; AVXVNNI-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 -; AVXVNNI-NEXT: vmovd %xmm1, %eax -; AVXVNNI-NEXT: addl %edi, %eax -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: mul_4xi4_cz: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVXVNNI-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVXVNNI-AVX-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; AVXVNNI-AVX-NEXT: vmovd %xmm1, %eax +; AVXVNNI-AVX-NEXT: addl %edi, %eax +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: mul_4xi4_cz: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vpmovdb %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; AVXVNNI-AVX512-NEXT: vmovd %xmm1, %eax +; AVXVNNI-AVX512-NEXT: addl %edi, %eax +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512VNNI-LABEL: mul_4xi4_cz: ; AVX512VNNI: # %bb.0: # %entry @@ -104,15 +125,26 @@ entry: } define i32 @mul_4xi8_cs(<4 x i8> %a, i32 %c) { -; AVXVNNI-LABEL: mul_4xi8_cs: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] -; AVXVNNI-NEXT: vmovd {{.*#+}} xmm2 = [16,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0] -; AVXVNNI-NEXT: {vex} vpdpbusd %xmm0, %xmm2, %xmm1 -; AVXVNNI-NEXT: vmovd %xmm1, %eax -; AVXVNNI-NEXT: addl %edi, %eax -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: mul_4xi8_cs: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVXVNNI-AVX-NEXT: vmovd {{.*#+}} xmm2 = [16,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0] +; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %xmm0, %xmm2, %xmm1 +; AVXVNNI-AVX-NEXT: vmovd %xmm1, %eax +; AVXVNNI-AVX-NEXT: addl %edi, %eax +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: mul_4xi8_cs: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVXVNNI-AVX512-NEXT: vmovd {{.*#+}} xmm1 = [16,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0] +; AVXVNNI-AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %xmm0, %xmm1, %xmm2 +; AVXVNNI-AVX512-NEXT: vmovd %xmm2, %eax +; AVXVNNI-AVX512-NEXT: addl %edi, %eax +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512VNNI-LABEL: mul_4xi8_cs: ; AVX512VNNI: # %bb.0: # %entry @@ -145,17 +177,17 @@ entry: } define i32 @mul_4xi8_cs_exceed(<4 x i8> %a, i32 %c) { -; ALL-LABEL: mul_4xi8_cs_exceed: -; ALL: # %bb.0: # %entry -; ALL-NEXT: vpmovsxbd %xmm0, %xmm0 -; ALL-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,256,0] -; ALL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; ALL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; ALL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] -; ALL-NEXT: vpaddd %xmm0, %xmm1, %xmm0 -; ALL-NEXT: vmovd %xmm0, %eax -; ALL-NEXT: addl %edi, %eax -; ALL-NEXT: retq +; CHECK-LABEL: mul_4xi8_cs_exceed: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vpmovsxbd %xmm0, %xmm0 +; CHECK-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,256,0] +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] +; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: vmovd %xmm0, %eax +; CHECK-NEXT: addl %edi, %eax +; CHECK-NEXT: retq entry: %0 = sext <4 x i8> %a to <4 x i32> %1 = mul nsw <4 x i32> <i32 0, i32 1, i32 2, i32 256>, %0 @@ -265,24 +297,44 @@ entry: } define i32 @mul_64xi8_zc(<64 x i8> %a, i32 %c) { -; AVXVNNI-LABEL: mul_64xi8_zc: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpbroadcastd {{.*#+}} ymm2 = [0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64] -; AVXVNNI-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; AVXVNNI-NEXT: vpxor %xmm4, %xmm4, %xmm4 -; AVXVNNI-NEXT: {vex} vpdpbusd %ymm2, %ymm1, %ymm4 -; AVXVNNI-NEXT: {vex} vpdpbusd %ymm2, %ymm0, %ymm3 -; AVXVNNI-NEXT: vpaddd %ymm4, %ymm3, %ymm0 -; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vmovd %xmm0, %eax -; AVXVNNI-NEXT: addl %edi, %eax -; AVXVNNI-NEXT: vzeroupper -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: mul_64xi8_zc: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vpbroadcastd {{.*#+}} ymm2 = [0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64] +; AVXVNNI-AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVXVNNI-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %ymm2, %ymm1, %ymm4 +; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %ymm2, %ymm0, %ymm3 +; AVXVNNI-AVX-NEXT: vpaddd %ymm4, %ymm3, %ymm0 +; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vmovd %xmm0, %eax +; AVXVNNI-AVX-NEXT: addl %edi, %eax +; AVXVNNI-AVX-NEXT: vzeroupper +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: mul_64xi8_zc: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVXVNNI-AVX512-NEXT: vpbroadcastd {{.*#+}} ymm2 = [0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64] +; AVXVNNI-AVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVXVNNI-AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %ymm2, %ymm1, %ymm4 +; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %ymm2, %ymm0, %ymm3 +; AVXVNNI-AVX512-NEXT: vpaddd %ymm4, %ymm3, %ymm0 +; AVXVNNI-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vmovd %xmm0, %eax +; AVXVNNI-AVX512-NEXT: addl %edi, %eax +; AVXVNNI-AVX512-NEXT: vzeroupper +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512-LABEL: mul_64xi8_zc: ; AVX512: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/X86/ftrunc.ll b/llvm/test/CodeGen/X86/ftrunc.ll index 3ed9858..9095fb1 100644 --- a/llvm/test/CodeGen/X86/ftrunc.ll +++ b/llvm/test/CodeGen/X86/ftrunc.ll @@ -243,7 +243,7 @@ define <4 x double> @trunc_unsigned_v4f64(<4 x double> %x) #0 { ret <4 x double> %r } -define float @trunc_signed_f32_no_fast_math(float %x) { +define float @trunc_signed_f32_no_fast_math(float %x) nounwind { ; SSE-LABEL: trunc_signed_f32_no_fast_math: ; SSE: # %bb.0: ; SSE-NEXT: cvttps2dq %xmm0, %xmm0 @@ -259,14 +259,12 @@ define float @trunc_signed_f32_no_fast_math(float %x) { ; X86-AVX1-LABEL: trunc_signed_f32_no_fast_math: ; X86-AVX1: # %bb.0: ; X86-AVX1-NEXT: pushl %eax -; X86-AVX1-NEXT: .cfi_def_cfa_offset 8 ; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0 ; X86-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X86-AVX1-NEXT: vmovss %xmm0, (%esp) ; X86-AVX1-NEXT: flds (%esp) ; X86-AVX1-NEXT: popl %eax -; X86-AVX1-NEXT: .cfi_def_cfa_offset 4 ; X86-AVX1-NEXT: retl %i = fptosi float %x to i32 %r = sitofp i32 %i to float @@ -306,7 +304,7 @@ define float @trunc_signed_f32_nsz(float %x) #0 { ret float %r } -define double @trunc_signed32_f64_no_fast_math(double %x) { +define double @trunc_signed32_f64_no_fast_math(double %x) nounwind { ; SSE-LABEL: trunc_signed32_f64_no_fast_math: ; SSE: # %bb.0: ; SSE-NEXT: cvttpd2dq %xmm0, %xmm0 @@ -322,10 +320,7 @@ define double @trunc_signed32_f64_no_fast_math(double %x) { ; X86-AVX1-LABEL: trunc_signed32_f64_no_fast_math: ; X86-AVX1: # %bb.0: ; X86-AVX1-NEXT: pushl %ebp -; X86-AVX1-NEXT: .cfi_def_cfa_offset 8 -; X86-AVX1-NEXT: .cfi_offset %ebp, -8 ; X86-AVX1-NEXT: movl %esp, %ebp -; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp ; X86-AVX1-NEXT: andl $-8, %esp ; X86-AVX1-NEXT: subl $8, %esp ; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero @@ -335,7 +330,6 @@ define double @trunc_signed32_f64_no_fast_math(double %x) { ; X86-AVX1-NEXT: fldl (%esp) ; X86-AVX1-NEXT: movl %ebp, %esp ; X86-AVX1-NEXT: popl %ebp -; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4 ; X86-AVX1-NEXT: retl %i = fptosi double %x to i32 %r = sitofp i32 %i to double @@ -377,7 +371,7 @@ define double @trunc_signed32_f64_nsz(double %x) #0 { ret double %r } -define double @trunc_f32_signed32_f64_no_fast_math(float %x) { +define double @trunc_f32_signed32_f64_no_fast_math(float %x) nounwind { ; SSE-LABEL: trunc_f32_signed32_f64_no_fast_math: ; SSE: # %bb.0: ; SSE-NEXT: cvttps2dq %xmm0, %xmm0 @@ -393,10 +387,7 @@ define double @trunc_f32_signed32_f64_no_fast_math(float %x) { ; X86-AVX1-LABEL: trunc_f32_signed32_f64_no_fast_math: ; X86-AVX1: # %bb.0: ; X86-AVX1-NEXT: pushl %ebp -; X86-AVX1-NEXT: .cfi_def_cfa_offset 8 -; X86-AVX1-NEXT: .cfi_offset %ebp, -8 ; X86-AVX1-NEXT: movl %esp, %ebp -; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp ; X86-AVX1-NEXT: andl $-8, %esp ; X86-AVX1-NEXT: subl $8, %esp ; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero @@ -406,7 +397,6 @@ define double @trunc_f32_signed32_f64_no_fast_math(float %x) { ; X86-AVX1-NEXT: fldl (%esp) ; X86-AVX1-NEXT: movl %ebp, %esp ; X86-AVX1-NEXT: popl %ebp -; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4 ; X86-AVX1-NEXT: retl %i = fptosi float %x to i32 %r = sitofp i32 %i to double @@ -445,7 +435,7 @@ define double @trunc_f32_signed32_f64_nsz(float %x) #0 { ret double %r } -define float @trunc_f64_signed32_f32_no_fast_math(double %x) { +define float @trunc_f64_signed32_f32_no_fast_math(double %x) nounwind { ; SSE-LABEL: trunc_f64_signed32_f32_no_fast_math: ; SSE: # %bb.0: ; SSE-NEXT: cvttpd2dq %xmm0, %xmm0 @@ -461,14 +451,12 @@ define float @trunc_f64_signed32_f32_no_fast_math(double %x) { ; X86-AVX1-LABEL: trunc_f64_signed32_f32_no_fast_math: ; X86-AVX1: # %bb.0: ; X86-AVX1-NEXT: pushl %eax -; X86-AVX1-NEXT: .cfi_def_cfa_offset 8 ; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X86-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0 ; X86-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X86-AVX1-NEXT: vmovss %xmm0, (%esp) ; X86-AVX1-NEXT: flds (%esp) ; X86-AVX1-NEXT: popl %eax -; X86-AVX1-NEXT: .cfi_def_cfa_offset 4 ; X86-AVX1-NEXT: retl %i = fptosi double %x to i32 %r = sitofp i32 %i to float @@ -503,7 +491,7 @@ define float @trunc_f64_signed32_f32_nsz(double %x) #0 { ret float %r } -define double @trunc_signed_f64_no_fast_math(double %x) { +define double @trunc_signed_f64_no_fast_math(double %x) nounwind { ; SSE-LABEL: trunc_signed_f64_no_fast_math: ; SSE: # %bb.0: ; SSE-NEXT: cvttsd2si %xmm0, %rax @@ -520,10 +508,7 @@ define double @trunc_signed_f64_no_fast_math(double %x) { ; X86-AVX1-LABEL: trunc_signed_f64_no_fast_math: ; X86-AVX1: # %bb.0: ; X86-AVX1-NEXT: pushl %ebp -; X86-AVX1-NEXT: .cfi_def_cfa_offset 8 -; X86-AVX1-NEXT: .cfi_offset %ebp, -8 ; X86-AVX1-NEXT: movl %esp, %ebp -; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp ; X86-AVX1-NEXT: andl $-8, %esp ; X86-AVX1-NEXT: subl $24, %esp ; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero @@ -537,7 +522,6 @@ define double @trunc_signed_f64_no_fast_math(double %x) { ; X86-AVX1-NEXT: fldl {{[0-9]+}}(%esp) ; X86-AVX1-NEXT: movl %ebp, %esp ; X86-AVX1-NEXT: popl %ebp -; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4 ; X86-AVX1-NEXT: retl %i = fptosi double %x to i64 %r = sitofp i64 %i to double diff --git a/llvm/test/CodeGen/X86/isint.ll b/llvm/test/CodeGen/X86/isint.ll index 8a56f49..8c11fe1 100644 --- a/llvm/test/CodeGen/X86/isint.ll +++ b/llvm/test/CodeGen/X86/isint.ll @@ -1,29 +1,29 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=CHECK64 %s -; RUN: llc < %s -mtriple=i686-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=CHECK32 %s +; RUN: llc < %s -mtriple=x86_64-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=X64 %s +; RUN: llc < %s -mtriple=i686-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=X86 %s ; PR19059 define i32 @isint_return(double %d) nounwind { -; CHECK64-LABEL: isint_return: -; CHECK64: # %bb.0: -; CHECK64-NEXT: cvttpd2dq %xmm0, %xmm1 -; CHECK64-NEXT: cvtdq2pd %xmm1, %xmm1 -; CHECK64-NEXT: cmpeqsd %xmm0, %xmm1 -; CHECK64-NEXT: movq %xmm1, %rax -; CHECK64-NEXT: andl $1, %eax -; CHECK64-NEXT: # kill: def $eax killed $eax killed $rax -; CHECK64-NEXT: retq +; X64-LABEL: isint_return: +; X64: # %bb.0: +; X64-NEXT: cvttpd2dq %xmm0, %xmm1 +; X64-NEXT: cvtdq2pd %xmm1, %xmm1 +; X64-NEXT: cmpeqsd %xmm0, %xmm1 +; X64-NEXT: movq %xmm1, %rax +; X64-NEXT: andl $1, %eax +; X64-NEXT: # kill: def $eax killed $eax killed $rax +; X64-NEXT: retq ; -; CHECK32-LABEL: isint_return: -; CHECK32: # %bb.0: -; CHECK32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; CHECK32-NEXT: cvttpd2dq %xmm0, %xmm1 -; CHECK32-NEXT: cvtdq2pd %xmm1, %xmm1 -; CHECK32-NEXT: cmpeqsd %xmm0, %xmm1 -; CHECK32-NEXT: movd %xmm1, %eax -; CHECK32-NEXT: andl $1, %eax -; CHECK32-NEXT: retl +; X86-LABEL: isint_return: +; X86: # %bb.0: +; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X86-NEXT: cvttpd2dq %xmm0, %xmm1 +; X86-NEXT: cvtdq2pd %xmm1, %xmm1 +; X86-NEXT: cmpeqsd %xmm0, %xmm1 +; X86-NEXT: movd %xmm1, %eax +; X86-NEXT: andl $1, %eax +; X86-NEXT: retl %i = fptosi double %d to i32 %e = sitofp i32 %i to double %c = fcmp oeq double %d, %e @@ -32,24 +32,24 @@ define i32 @isint_return(double %d) nounwind { } define i32 @isint_float_return(float %f) nounwind { -; CHECK64-LABEL: isint_float_return: -; CHECK64: # %bb.0: -; CHECK64-NEXT: cvttps2dq %xmm0, %xmm1 -; CHECK64-NEXT: cvtdq2ps %xmm1, %xmm1 -; CHECK64-NEXT: cmpeqss %xmm0, %xmm1 -; CHECK64-NEXT: movd %xmm1, %eax -; CHECK64-NEXT: andl $1, %eax -; CHECK64-NEXT: retq +; X64-LABEL: isint_float_return: +; X64: # %bb.0: +; X64-NEXT: cvttps2dq %xmm0, %xmm1 +; X64-NEXT: cvtdq2ps %xmm1, %xmm1 +; X64-NEXT: cmpeqss %xmm0, %xmm1 +; X64-NEXT: movd %xmm1, %eax +; X64-NEXT: andl $1, %eax +; X64-NEXT: retq ; -; CHECK32-LABEL: isint_float_return: -; CHECK32: # %bb.0: -; CHECK32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; CHECK32-NEXT: cvttps2dq %xmm0, %xmm1 -; CHECK32-NEXT: cvtdq2ps %xmm1, %xmm1 -; CHECK32-NEXT: cmpeqss %xmm0, %xmm1 -; CHECK32-NEXT: movd %xmm1, %eax -; CHECK32-NEXT: andl $1, %eax -; CHECK32-NEXT: retl +; X86-LABEL: isint_float_return: +; X86: # %bb.0: +; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-NEXT: cvttps2dq %xmm0, %xmm1 +; X86-NEXT: cvtdq2ps %xmm1, %xmm1 +; X86-NEXT: cmpeqss %xmm0, %xmm1 +; X86-NEXT: movd %xmm1, %eax +; X86-NEXT: andl $1, %eax +; X86-NEXT: retl %i = fptosi float %f to i32 %g = sitofp i32 %i to float %c = fcmp oeq float %f, %g @@ -60,32 +60,32 @@ define i32 @isint_float_return(float %f) nounwind { declare void @foo() define void @isint_branch(double %d) nounwind { -; CHECK64-LABEL: isint_branch: -; CHECK64: # %bb.0: -; CHECK64-NEXT: cvttpd2dq %xmm0, %xmm1 -; CHECK64-NEXT: cvtdq2pd %xmm1, %xmm1 -; CHECK64-NEXT: ucomisd %xmm1, %xmm0 -; CHECK64-NEXT: jne .LBB2_2 -; CHECK64-NEXT: jp .LBB2_2 -; CHECK64-NEXT: # %bb.1: # %true -; CHECK64-NEXT: pushq %rax -; CHECK64-NEXT: callq foo@PLT -; CHECK64-NEXT: popq %rax -; CHECK64-NEXT: .LBB2_2: # %false -; CHECK64-NEXT: retq +; X64-LABEL: isint_branch: +; X64: # %bb.0: +; X64-NEXT: cvttpd2dq %xmm0, %xmm1 +; X64-NEXT: cvtdq2pd %xmm1, %xmm1 +; X64-NEXT: ucomisd %xmm1, %xmm0 +; X64-NEXT: jne .LBB2_2 +; X64-NEXT: jp .LBB2_2 +; X64-NEXT: # %bb.1: # %true +; X64-NEXT: pushq %rax +; X64-NEXT: callq foo@PLT +; X64-NEXT: popq %rax +; X64-NEXT: .LBB2_2: # %false +; X64-NEXT: retq ; -; CHECK32-LABEL: isint_branch: -; CHECK32: # %bb.0: -; CHECK32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; CHECK32-NEXT: cvttpd2dq %xmm0, %xmm1 -; CHECK32-NEXT: cvtdq2pd %xmm1, %xmm1 -; CHECK32-NEXT: ucomisd %xmm1, %xmm0 -; CHECK32-NEXT: jne .LBB2_2 -; CHECK32-NEXT: jp .LBB2_2 -; CHECK32-NEXT: # %bb.1: # %true -; CHECK32-NEXT: calll foo@PLT -; CHECK32-NEXT: .LBB2_2: # %false -; CHECK32-NEXT: retl +; X86-LABEL: isint_branch: +; X86: # %bb.0: +; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X86-NEXT: cvttpd2dq %xmm0, %xmm1 +; X86-NEXT: cvtdq2pd %xmm1, %xmm1 +; X86-NEXT: ucomisd %xmm1, %xmm0 +; X86-NEXT: jne .LBB2_2 +; X86-NEXT: jp .LBB2_2 +; X86-NEXT: # %bb.1: # %true +; X86-NEXT: calll foo@PLT +; X86-NEXT: .LBB2_2: # %false +; X86-NEXT: retl %i = fptosi double %d to i32 %e = sitofp i32 %i to double %c = fcmp oeq double %d, %e diff --git a/llvm/test/CodeGen/X86/known-signbits-shl.ll b/llvm/test/CodeGen/X86/known-signbits-shl.ll index 473fecc..57d557d 100644 --- a/llvm/test/CodeGen/X86/known-signbits-shl.ll +++ b/llvm/test/CodeGen/X86/known-signbits-shl.ll @@ -137,7 +137,7 @@ define void @computeNumSignBits_shl_zext_vec_3(<2 x i8> %x, ptr %p) nounwind { ; X64-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; X64-NEXT: por %xmm2, %xmm1 ; X64-NEXT: movdqa %xmm0, %xmm2 -; X64-NEXT: paddw %xmm0, %xmm2 +; X64-NEXT: paddw %xmm2, %xmm2 ; X64-NEXT: movdqa %xmm2, %xmm3 ; X64-NEXT: psraw $1, %xmm3 ; X64-NEXT: pcmpeqw %xmm0, %xmm3 diff --git a/llvm/test/CodeGen/X86/lea-16bit.ll b/llvm/test/CodeGen/X86/lea-16bit.ll index cec29ab..40da01d 100644 --- a/llvm/test/CodeGen/X86/lea-16bit.ll +++ b/llvm/test/CodeGen/X86/lea-16bit.ll @@ -13,7 +13,8 @@ define i16 @lea16bit(i16 %in) { ; NDD-LABEL: lea16bit: ; NDD: # %bb.0: ; NDD-NEXT: # kill: def $edi killed $edi def $rdi -; NDD-NEXT: leaw 1(%rdi,%rdi), %ax +; NDD-NEXT: leal 1(%rdi,%rdi), %eax +; NDD-NEXT: # kill: def $ax killed $ax killed $eax ; NDD-NEXT: retq %shl = shl i16 %in, 1 %or = or i16 %shl, 1 diff --git a/llvm/test/CodeGen/X86/lea-8bit.ll b/llvm/test/CodeGen/X86/lea-8bit.ll index 98222df..fc295f7 100644 --- a/llvm/test/CodeGen/X86/lea-8bit.ll +++ b/llvm/test/CodeGen/X86/lea-8bit.ll @@ -14,7 +14,8 @@ define i8 @lea8bit(i8 %in) { ; NDD-LABEL: lea8bit: ; NDD: # %bb.0: ; NDD-NEXT: # kill: def $edi killed $edi def $rdi -; NDD-NEXT: leab 1(%rdi,%rdi), %al +; NDD-NEXT: leal 1(%rdi,%rdi), %eax +; NDD-NEXT: # kill: def $al killed $al killed $eax ; NDD-NEXT: retq %shl = shl i8 %in, 1 %or = or i8 %shl, 1 diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll index 4e6f666..4cde581 100644 --- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll +++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll @@ -4806,9 +4806,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16 ; X64-KNL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-KNL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 -; X64-KNL-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-KNL-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-KNL-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-KNL-NEXT: vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1} ; X64-KNL-NEXT: vmovaps %zmm1, %zmm0 ; X64-KNL-NEXT: retq @@ -4830,9 +4829,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16 ; X64-SKX-SMALL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-SMALL-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-SMALL-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-SKX-SMALL-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1} ; X64-SKX-SMALL-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-SMALL-NEXT: retq @@ -4842,10 +4840,9 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16 ; X64-SKX-LARGE-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-LARGE-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-LARGE-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-LARGE-NEXT: movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax ; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm0 -; X64-SKX-LARGE-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1} ; X64-SKX-LARGE-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-LARGE-NEXT: retq @@ -4875,9 +4872,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a ; X64-KNL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-KNL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 -; X64-KNL-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-KNL-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-KNL-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-KNL-NEXT: vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1} ; X64-KNL-NEXT: vmovaps %zmm1, %zmm0 ; X64-KNL-NEXT: retq @@ -4899,9 +4895,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a ; X64-SKX-SMALL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-SMALL-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-SMALL-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-SKX-SMALL-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1} ; X64-SKX-SMALL-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-SMALL-NEXT: retq @@ -4911,10 +4906,9 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a ; X64-SKX-LARGE-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-LARGE-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-LARGE-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-LARGE-NEXT: movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax ; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm0 -; X64-SKX-LARGE-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1} ; X64-SKX-LARGE-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-LARGE-NEXT: retq @@ -4944,9 +4938,8 @@ define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair( ; X64-KNL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-KNL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 -; X64-KNL-NEXT: vmovdqu64 (%rsi), %zmm0 -; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-KNL-NEXT: vpaddd %zmm0, %zmm0, %zmm2 +; X64-KNL-NEXT: vpslld $1, (%rsi), %zmm0 +; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm2 ; X64-KNL-NEXT: kmovw %k1, %k2 ; X64-KNL-NEXT: vmovaps %zmm1, %zmm0 ; X64-KNL-NEXT: vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2} @@ -4972,9 +4965,8 @@ define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair( ; X64-SKX-SMALL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-SMALL-NEXT: vmovdqu64 (%rsi), %zmm0 -; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-SKX-SMALL-NEXT: vpaddd %zmm0, %zmm0, %zmm2 +; X64-SKX-SMALL-NEXT: vpslld $1, (%rsi), %zmm0 +; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm2 ; X64-SKX-SMALL-NEXT: kmovw %k1, %k2 ; X64-SKX-SMALL-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-SMALL-NEXT: vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2} @@ -4986,10 +4978,9 @@ define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair( ; X64-SKX-LARGE-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-LARGE-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-LARGE-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-LARGE-NEXT: movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax -; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm0 -; X64-SKX-LARGE-NEXT: vpaddd %zmm0, %zmm0, %zmm2 +; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm2 ; X64-SKX-LARGE-NEXT: kmovw %k1, %k2 ; X64-SKX-LARGE-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-LARGE-NEXT: vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2} diff --git a/llvm/test/CodeGen/X86/negative-sin.ll b/llvm/test/CodeGen/X86/negative-sin.ll index f24507d..4836da2 100644 --- a/llvm/test/CodeGen/X86/negative-sin.ll +++ b/llvm/test/CodeGen/X86/negative-sin.ll @@ -82,18 +82,13 @@ define double @semi_strict2(double %e) nounwind { ret double %h } -; FIXME: -; Auto-upgrade function attribute to IR-level fast-math-flags. - -define double @fn_attr(double %e) nounwind #0 { -; CHECK-LABEL: fn_attr: +define double @nsz_flag(double %e) nounwind { +; CHECK-LABEL: nsz_flag: ; CHECK: # %bb.0: ; CHECK-NEXT: jmp sin@PLT # TAILCALL - %f = fsub double 0.0, %e - %g = call double @sin(double %f) readonly - %h = fsub double 0.0, %g + %f = fsub nsz double 0.0, %e + %g = call nsz double @sin(double %f) readonly + %h = fsub nsz double 0.0, %g ret double %h } -attributes #0 = { "unsafe-fp-math"="true" "no-signed-zeros-fp-math"="true" } - diff --git a/llvm/test/CodeGen/X86/oddsubvector.ll b/llvm/test/CodeGen/X86/oddsubvector.ll index f539830..5df1867 100644 --- a/llvm/test/CodeGen/X86/oddsubvector.ll +++ b/llvm/test/CodeGen/X86/oddsubvector.ll @@ -155,10 +155,10 @@ define <16 x i32> @PR42819(ptr %a0) { define void @PR42833() { ; SSE2-LABEL: PR42833: ; SSE2: # %bb.0: +; SSE2-NEXT: movl b(%rip), %eax ; SSE2-NEXT: movdqa c+144(%rip), %xmm2 ; SSE2-NEXT: movdqa c+128(%rip), %xmm0 -; SSE2-NEXT: movd %xmm0, %eax -; SSE2-NEXT: addl b(%rip), %eax +; SSE2-NEXT: addl c+128(%rip), %eax ; SSE2-NEXT: movd %eax, %xmm1 ; SSE2-NEXT: movd %eax, %xmm3 ; SSE2-NEXT: paddd %xmm0, %xmm3 @@ -166,7 +166,7 @@ define void @PR42833() { ; SSE2-NEXT: psubd %xmm2, %xmm4 ; SSE2-NEXT: paddd %xmm2, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm5 -; SSE2-NEXT: paddd %xmm0, %xmm5 +; SSE2-NEXT: paddd %xmm5, %xmm5 ; SSE2-NEXT: movss {{.*#+}} xmm5 = xmm3[0],xmm5[1,2,3] ; SSE2-NEXT: movdqa %xmm2, c+144(%rip) ; SSE2-NEXT: movaps %xmm5, c+128(%rip) @@ -191,17 +191,17 @@ define void @PR42833() { ; ; SSE42-LABEL: PR42833: ; SSE42: # %bb.0: +; SSE42-NEXT: movl b(%rip), %eax ; SSE42-NEXT: movdqa c+144(%rip), %xmm1 ; SSE42-NEXT: movdqa c+128(%rip), %xmm0 -; SSE42-NEXT: movd %xmm0, %eax -; SSE42-NEXT: addl b(%rip), %eax +; SSE42-NEXT: addl c+128(%rip), %eax ; SSE42-NEXT: movd %eax, %xmm2 ; SSE42-NEXT: paddd %xmm0, %xmm2 ; SSE42-NEXT: movdqa d+144(%rip), %xmm3 ; SSE42-NEXT: psubd %xmm1, %xmm3 ; SSE42-NEXT: paddd %xmm1, %xmm1 ; SSE42-NEXT: movdqa %xmm0, %xmm4 -; SSE42-NEXT: paddd %xmm0, %xmm4 +; SSE42-NEXT: paddd %xmm4, %xmm4 ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0,1],xmm4[2,3,4,5,6,7] ; SSE42-NEXT: movdqa %xmm1, c+144(%rip) ; SSE42-NEXT: movdqa %xmm4, c+128(%rip) diff --git a/llvm/test/CodeGen/X86/pr62286.ll b/llvm/test/CodeGen/X86/pr62286.ll index ce03f8f..161e965 100644 --- a/llvm/test/CodeGen/X86/pr62286.ll +++ b/llvm/test/CodeGen/X86/pr62286.ll @@ -26,27 +26,33 @@ define i64 @PR62286(i32 %a) { ; AVX1-LABEL: PR62286: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovd %edi, %xmm0 -; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,0,0,0] +; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6,7] ; AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7] -; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] -; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1 -; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] ; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: PR62286: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovd %edi, %xmm0 -; AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm1 -; AVX2-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3] -; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,0] +; AVX2-NEXT: vpaddd %ymm0, %ymm0, %ymm1 +; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4],ymm0[5,6,7] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 @@ -59,12 +65,12 @@ define i64 @PR62286(i32 %a) { ; AVX512-LABEL: PR62286: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovd %edi, %xmm0 -; AVX512-NEXT: movb $8, %al +; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,0] +; AVX512-NEXT: vpaddd %ymm0, %ymm0, %ymm1 +; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 +; AVX512-NEXT: movw $4369, %ax # imm = 0x1111 ; AVX512-NEXT: kmovd %eax, %k1 -; AVX512-NEXT: vpexpandd %ymm0, %ymm1 {%k1} {z} -; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 -; AVX512-NEXT: vpaddd %ymm0, %ymm0, %ymm0 -; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] +; AVX512-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1} ; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0 ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0 diff --git a/llvm/test/CodeGen/X86/pr74736.ll b/llvm/test/CodeGen/X86/pr74736.ll index ceccee0..5895526 100644 --- a/llvm/test/CodeGen/X86/pr74736.ll +++ b/llvm/test/CodeGen/X86/pr74736.ll @@ -6,8 +6,8 @@ define void @main(<16 x i32> %0, i32 %1) { ; SSE-LABEL: main: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movd %edi, %xmm4 -; SSE-NEXT: movss {{.*#+}} xmm0 = [1,0,0,0] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm4[1,0] +; SSE-NEXT: movsd {{.*#+}} xmm0 = [0,1,0,0] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,0] ; SSE-NEXT: paddd %xmm0, %xmm0 ; SSE-NEXT: paddd %xmm1, %xmm1 ; SSE-NEXT: paddd %xmm3, %xmm3 @@ -32,20 +32,20 @@ define void @main(<16 x i32> %0, i32 %1) { ; AVX-LABEL: main: ; AVX: # %bb.0: # %entry ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm0[1,2,3] ; AVX-NEXT: movl $1, %eax ; AVX-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2 ; AVX-NEXT: vpinsrd $3, %edi, %xmm2, %xmm2 -; AVX-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] -; AVX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 -; AVX-NEXT: vpaddd %ymm1, %ymm1, %ymm1 -; AVX-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,1,1,3,3,5,5,7] -; AVX-NEXT: vpermd %ymm0, %ymm2, %ymm2 +; AVX-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVX-NEXT: vpaddd %ymm2, %ymm2, %ymm2 +; AVX-NEXT: vpaddd %ymm1, %ymm1, %ymm3 ; AVX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] ; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,3,3,3,7,7,7,7] -; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,1,1,3,4,5,5,7] +; AVX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 +; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm3[0,1,1,3,4,5,5,7] ; AVX-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] -; AVX-NEXT: vpxor %ymm0, %ymm2, %ymm0 +; AVX-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,1,1,3,3,5,5,7] +; AVX-NEXT: vpermd %ymm2, %ymm1, %ymm1 +; AVX-NEXT: vpxor %ymm0, %ymm1, %ymm0 ; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] diff --git a/llvm/test/CodeGen/X86/setoeq.ll b/llvm/test/CodeGen/X86/setoeq.ll index f0addf4..131e279 100644 --- a/llvm/test/CodeGen/X86/setoeq.ll +++ b/llvm/test/CodeGen/X86/setoeq.ll @@ -1,40 +1,532 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 | FileCheck %s - -define zeroext i8 @t(double %x) nounwind readnone { -; CHECK-LABEL: t: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; CHECK-NEXT: cvttpd2dq %xmm0, %xmm1 -; CHECK-NEXT: cvtdq2pd %xmm1, %xmm1 -; CHECK-NEXT: cmpeqsd %xmm0, %xmm1 -; CHECK-NEXT: movd %xmm1, %eax -; CHECK-NEXT: andl $1, %eax -; CHECK-NEXT: # kill: def $al killed $al killed $eax -; CHECK-NEXT: retl -entry: - %0 = fptosi double %x to i32 ; <i32> [#uses=1] - %1 = sitofp i32 %0 to double ; <double> [#uses=1] - %2 = fcmp oeq double %1, %x ; <i1> [#uses=1] - %retval12 = zext i1 %2 to i8 ; <i8> [#uses=1] - ret i8 %retval12 -} - -define zeroext i8 @u(double %x) nounwind readnone { -; CHECK-LABEL: u: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; CHECK-NEXT: cvttpd2dq %xmm0, %xmm1 -; CHECK-NEXT: cvtdq2pd %xmm1, %xmm1 -; CHECK-NEXT: cmpneqsd %xmm0, %xmm1 -; CHECK-NEXT: movd %xmm1, %eax -; CHECK-NEXT: andl $1, %eax -; CHECK-NEXT: # kill: def $al killed $al killed $eax -; CHECK-NEXT: retl -entry: - %0 = fptosi double %x to i32 ; <i32> [#uses=1] - %1 = sitofp i32 %0 to double ; <double> [#uses=1] - %2 = fcmp une double %1, %x ; <i1> [#uses=1] - %retval12 = zext i1 %2 to i8 ; <i8> [#uses=1] +; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE +; RUN: llc < %s -mtriple=i686-- -mattr=+avx | FileCheck %s --check-prefixes=AVX +; RUN: llc < %s -mtriple=i686-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX512 + +define zeroext i8 @oeq_f64_i32(double %x) nounwind readnone { +; SSE-LABEL: oeq_f64_i32: +; SSE: # %bb.0: # %entry +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: cvttpd2dq %xmm0, %xmm1 +; SSE-NEXT: cvtdq2pd %xmm1, %xmm1 +; SSE-NEXT: cmpeqsd %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: retl +; +; AVX-LABEL: oeq_f64_i32: +; AVX: # %bb.0: # %entry +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vcvttpd2dq %xmm0, %xmm1 +; AVX-NEXT: vcvtdq2pd %xmm1, %xmm1 +; AVX-NEXT: vcmpeqsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: retl +; +; AVX512-LABEL: oeq_f64_i32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttpd2dq %xmm0, %xmm1 +; AVX512-NEXT: vcvtdq2pd %xmm1, %xmm1 +; AVX512-NEXT: vcmpeqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: retl +entry: + %0 = fptosi double %x to i32 + %1 = sitofp i32 %0 to double + %2 = fcmp oeq double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @oeq_f64_u32(double %x) nounwind readnone { +; SSE-LABEL: oeq_f64_u32: +; SSE: # %bb.0: # %entry +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: cvttsd2si %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: movapd %xmm0, %xmm1 +; SSE-NEXT: subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; SSE-NEXT: cvttsd2si %xmm1, %edx +; SSE-NEXT: andl %ecx, %edx +; SSE-NEXT: orl %eax, %edx +; SSE-NEXT: movd %edx, %xmm1 +; SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; SSE-NEXT: subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; SSE-NEXT: cmpeqsd %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: retl +; +; AVX-LABEL: oeq_f64_u32: +; AVX: # %bb.0: # %entry +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vcvttsd2si %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarl $31, %ecx +; AVX-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 +; AVX-NEXT: vcvttsd2si %xmm1, %edx +; AVX-NEXT: andl %ecx, %edx +; AVX-NEXT: orl %eax, %edx +; AVX-NEXT: vmovd %edx, %xmm1 +; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; AVX-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; AVX-NEXT: vcmpeqsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: retl +; +; AVX512-LABEL: oeq_f64_u32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttsd2usi %xmm0, %eax +; AVX512-NEXT: vcvtusi2sd %eax, %xmm7, %xmm1 +; AVX512-NEXT: vcmpeqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: retl +entry: + %0 = fptoui double %x to i32 + %1 = uitofp i32 %0 to double + %2 = fcmp oeq double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @oeq_f64_i64(double %x) nounwind readnone { +; SSE-LABEL: oeq_f64_i64: +; SSE: # %bb.0: # %entry +; SSE-NEXT: pushl %ebp +; SSE-NEXT: movl %esp, %ebp +; SSE-NEXT: andl $-8, %esp +; SSE-NEXT: subl $32, %esp +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd %xmm0, {{[0-9]+}}(%esp) +; SSE-NEXT: fldl {{[0-9]+}}(%esp) +; SSE-NEXT: fnstcw {{[0-9]+}}(%esp) +; SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; SSE-NEXT: orl $3072, %eax # imm = 0xC00 +; SSE-NEXT: movw %ax, {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: fistpll {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: movlps %xmm1, {{[0-9]+}}(%esp) +; SSE-NEXT: fildll {{[0-9]+}}(%esp) +; SSE-NEXT: fstpl {{[0-9]+}}(%esp) +; SSE-NEXT: cmpeqsd {{[0-9]+}}(%esp), %xmm0 +; SSE-NEXT: movd %xmm0, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: movl %ebp, %esp +; SSE-NEXT: popl %ebp +; SSE-NEXT: retl +; +; AVX-LABEL: oeq_f64_i64: +; AVX: # %bb.0: # %entry +; AVX-NEXT: pushl %ebp +; AVX-NEXT: movl %esp, %ebp +; AVX-NEXT: andl $-8, %esp +; AVX-NEXT: subl $24, %esp +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd %xmm0, (%esp) +; AVX-NEXT: fldl (%esp) +; AVX-NEXT: fisttpll (%esp) +; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp) +; AVX-NEXT: fildll {{[0-9]+}}(%esp) +; AVX-NEXT: fstpl {{[0-9]+}}(%esp) +; AVX-NEXT: vcmpeqsd {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: movl %ebp, %esp +; AVX-NEXT: popl %ebp +; AVX-NEXT: retl +; +; AVX512-LABEL: oeq_f64_i64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttpd2qq %xmm0, %xmm1 +; AVX512-NEXT: vcvtqq2pd %ymm1, %ymm1 +; AVX512-NEXT: vcmpeqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retl +entry: + %0 = fptosi double %x to i64 + %1 = sitofp i64 %0 to double + %2 = fcmp oeq double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @oeq_f64_u64(double %x) nounwind readnone { +; SSE-LABEL: oeq_f64_u64: +; SSE: # %bb.0: # %entry +; SSE-NEXT: pushl %ebp +; SSE-NEXT: movl %esp, %ebp +; SSE-NEXT: andl $-8, %esp +; SSE-NEXT: subl $16, %esp +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0] +; SSE-NEXT: ucomisd %xmm0, %xmm1 +; SSE-NEXT: jbe .LBB3_2 +; SSE-NEXT: # %bb.1: # %entry +; SSE-NEXT: xorpd %xmm1, %xmm1 +; SSE-NEXT: .LBB3_2: # %entry +; SSE-NEXT: movapd %xmm0, %xmm2 +; SSE-NEXT: subsd %xmm1, %xmm2 +; SSE-NEXT: movsd %xmm2, {{[0-9]+}}(%esp) +; SSE-NEXT: setbe %al +; SSE-NEXT: fldl {{[0-9]+}}(%esp) +; SSE-NEXT: fnstcw {{[0-9]+}}(%esp) +; SSE-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; SSE-NEXT: orl $3072, %ecx # imm = 0xC00 +; SSE-NEXT: movw %cx, {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: fistpll {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: movzbl %al, %eax +; SSE-NEXT: shll $31, %eax +; SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax +; SSE-NEXT: movd %eax, %xmm1 +; SSE-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1] +; SSE-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 +; SSE-NEXT: movapd %xmm2, %xmm1 +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] +; SSE-NEXT: addsd %xmm2, %xmm1 +; SSE-NEXT: cmpeqsd %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: movl %ebp, %esp +; SSE-NEXT: popl %ebp +; SSE-NEXT: retl +; +; AVX-LABEL: oeq_f64_u64: +; AVX: # %bb.0: # %entry +; AVX-NEXT: pushl %ebp +; AVX-NEXT: movl %esp, %ebp +; AVX-NEXT: andl $-8, %esp +; AVX-NEXT: subl $8, %esp +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0] +; AVX-NEXT: vucomisd %xmm0, %xmm1 +; AVX-NEXT: jbe .LBB3_2 +; AVX-NEXT: # %bb.1: # %entry +; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: .LBB3_2: # %entry +; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vmovsd %xmm1, (%esp) +; AVX-NEXT: fldl (%esp) +; AVX-NEXT: fisttpll (%esp) +; AVX-NEXT: setbe %al +; AVX-NEXT: movzbl %al, %eax +; AVX-NEXT: shll $31, %eax +; AVX-NEXT: xorl {{[0-9]+}}(%esp), %eax +; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] +; AVX-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; AVX-NEXT: vshufpd {{.*#+}} xmm2 = xmm1[1,0] +; AVX-NEXT: vaddsd %xmm1, %xmm2, %xmm1 +; AVX-NEXT: vcmpeqsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: movl %ebp, %esp +; AVX-NEXT: popl %ebp +; AVX-NEXT: retl +; +; AVX512-LABEL: oeq_f64_u64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttpd2uqq %xmm0, %xmm1 +; AVX512-NEXT: vcvtuqq2pd %ymm1, %ymm1 +; AVX512-NEXT: vcmpeqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retl +entry: + %0 = fptoui double %x to i64 + %1 = uitofp i64 %0 to double + %2 = fcmp oeq double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @une_f64_i32(double %x) nounwind readnone { +; SSE-LABEL: une_f64_i32: +; SSE: # %bb.0: # %entry +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: cvttpd2dq %xmm0, %xmm1 +; SSE-NEXT: cvtdq2pd %xmm1, %xmm1 +; SSE-NEXT: cmpneqsd %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: retl +; +; AVX-LABEL: une_f64_i32: +; AVX: # %bb.0: # %entry +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vcvttpd2dq %xmm0, %xmm1 +; AVX-NEXT: vcvtdq2pd %xmm1, %xmm1 +; AVX-NEXT: vcmpneqsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: retl +; +; AVX512-LABEL: une_f64_i32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttpd2dq %xmm0, %xmm1 +; AVX512-NEXT: vcvtdq2pd %xmm1, %xmm1 +; AVX512-NEXT: vcmpneqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: retl +entry: + %0 = fptosi double %x to i32 + %1 = sitofp i32 %0 to double + %2 = fcmp une double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @une_f64_u32(double %x) nounwind readnone { +; SSE-LABEL: une_f64_u32: +; SSE: # %bb.0: # %entry +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: cvttsd2si %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: movapd %xmm0, %xmm1 +; SSE-NEXT: subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; SSE-NEXT: cvttsd2si %xmm1, %edx +; SSE-NEXT: andl %ecx, %edx +; SSE-NEXT: orl %eax, %edx +; SSE-NEXT: movd %edx, %xmm1 +; SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; SSE-NEXT: subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; SSE-NEXT: cmpneqsd %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: retl +; +; AVX-LABEL: une_f64_u32: +; AVX: # %bb.0: # %entry +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vcvttsd2si %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarl $31, %ecx +; AVX-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 +; AVX-NEXT: vcvttsd2si %xmm1, %edx +; AVX-NEXT: andl %ecx, %edx +; AVX-NEXT: orl %eax, %edx +; AVX-NEXT: vmovd %edx, %xmm1 +; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; AVX-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; AVX-NEXT: vcmpneqsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: retl +; +; AVX512-LABEL: une_f64_u32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttsd2usi %xmm0, %eax +; AVX512-NEXT: vcvtusi2sd %eax, %xmm7, %xmm1 +; AVX512-NEXT: vcmpneqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: retl +entry: + %0 = fptoui double %x to i32 + %1 = uitofp i32 %0 to double + %2 = fcmp une double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @une_f64_i64(double %x) nounwind readnone { +; SSE-LABEL: une_f64_i64: +; SSE: # %bb.0: # %entry +; SSE-NEXT: pushl %ebp +; SSE-NEXT: movl %esp, %ebp +; SSE-NEXT: andl $-8, %esp +; SSE-NEXT: subl $32, %esp +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd %xmm0, {{[0-9]+}}(%esp) +; SSE-NEXT: fldl {{[0-9]+}}(%esp) +; SSE-NEXT: fnstcw {{[0-9]+}}(%esp) +; SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; SSE-NEXT: orl $3072, %eax # imm = 0xC00 +; SSE-NEXT: movw %ax, {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: fistpll {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: movlps %xmm1, {{[0-9]+}}(%esp) +; SSE-NEXT: fildll {{[0-9]+}}(%esp) +; SSE-NEXT: fstpl {{[0-9]+}}(%esp) +; SSE-NEXT: cmpneqsd {{[0-9]+}}(%esp), %xmm0 +; SSE-NEXT: movd %xmm0, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: movl %ebp, %esp +; SSE-NEXT: popl %ebp +; SSE-NEXT: retl +; +; AVX-LABEL: une_f64_i64: +; AVX: # %bb.0: # %entry +; AVX-NEXT: pushl %ebp +; AVX-NEXT: movl %esp, %ebp +; AVX-NEXT: andl $-8, %esp +; AVX-NEXT: subl $24, %esp +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd %xmm0, (%esp) +; AVX-NEXT: fldl (%esp) +; AVX-NEXT: fisttpll (%esp) +; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp) +; AVX-NEXT: fildll {{[0-9]+}}(%esp) +; AVX-NEXT: fstpl {{[0-9]+}}(%esp) +; AVX-NEXT: vcmpneqsd {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: movl %ebp, %esp +; AVX-NEXT: popl %ebp +; AVX-NEXT: retl +; +; AVX512-LABEL: une_f64_i64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttpd2qq %xmm0, %xmm1 +; AVX512-NEXT: vcvtqq2pd %ymm1, %ymm1 +; AVX512-NEXT: vcmpneqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retl +entry: + %0 = fptosi double %x to i64 + %1 = sitofp i64 %0 to double + %2 = fcmp une double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @une_f64_u64(double %x) nounwind readnone { +; SSE-LABEL: une_f64_u64: +; SSE: # %bb.0: # %entry +; SSE-NEXT: pushl %ebp +; SSE-NEXT: movl %esp, %ebp +; SSE-NEXT: andl $-8, %esp +; SSE-NEXT: subl $16, %esp +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0] +; SSE-NEXT: ucomisd %xmm0, %xmm1 +; SSE-NEXT: jbe .LBB7_2 +; SSE-NEXT: # %bb.1: # %entry +; SSE-NEXT: xorpd %xmm1, %xmm1 +; SSE-NEXT: .LBB7_2: # %entry +; SSE-NEXT: movapd %xmm0, %xmm2 +; SSE-NEXT: subsd %xmm1, %xmm2 +; SSE-NEXT: movsd %xmm2, {{[0-9]+}}(%esp) +; SSE-NEXT: setbe %al +; SSE-NEXT: fldl {{[0-9]+}}(%esp) +; SSE-NEXT: fnstcw {{[0-9]+}}(%esp) +; SSE-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; SSE-NEXT: orl $3072, %ecx # imm = 0xC00 +; SSE-NEXT: movw %cx, {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: fistpll {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: movzbl %al, %eax +; SSE-NEXT: shll $31, %eax +; SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax +; SSE-NEXT: movd %eax, %xmm1 +; SSE-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1] +; SSE-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 +; SSE-NEXT: movapd %xmm2, %xmm1 +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] +; SSE-NEXT: addsd %xmm2, %xmm1 +; SSE-NEXT: cmpneqsd %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: movl %ebp, %esp +; SSE-NEXT: popl %ebp +; SSE-NEXT: retl +; +; AVX-LABEL: une_f64_u64: +; AVX: # %bb.0: # %entry +; AVX-NEXT: pushl %ebp +; AVX-NEXT: movl %esp, %ebp +; AVX-NEXT: andl $-8, %esp +; AVX-NEXT: subl $8, %esp +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0] +; AVX-NEXT: vucomisd %xmm0, %xmm1 +; AVX-NEXT: jbe .LBB7_2 +; AVX-NEXT: # %bb.1: # %entry +; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: .LBB7_2: # %entry +; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vmovsd %xmm1, (%esp) +; AVX-NEXT: fldl (%esp) +; AVX-NEXT: fisttpll (%esp) +; AVX-NEXT: setbe %al +; AVX-NEXT: movzbl %al, %eax +; AVX-NEXT: shll $31, %eax +; AVX-NEXT: xorl {{[0-9]+}}(%esp), %eax +; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] +; AVX-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; AVX-NEXT: vshufpd {{.*#+}} xmm2 = xmm1[1,0] +; AVX-NEXT: vaddsd %xmm1, %xmm2, %xmm1 +; AVX-NEXT: vcmpneqsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: movl %ebp, %esp +; AVX-NEXT: popl %ebp +; AVX-NEXT: retl +; +; AVX512-LABEL: une_f64_u64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttpd2uqq %xmm0, %xmm1 +; AVX512-NEXT: vcvtuqq2pd %ymm1, %ymm1 +; AVX512-NEXT: vcmpneqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retl +entry: + %0 = fptoui double %x to i64 + %1 = uitofp i64 %0 to double + %2 = fcmp une double %1, %x + %retval12 = zext i1 %2 to i8 ret i8 %retval12 } diff --git a/llvm/test/CodeGen/X86/shift-i512.ll b/llvm/test/CodeGen/X86/shift-i512.ll index 756019d..03b61d9 100644 --- a/llvm/test/CodeGen/X86/shift-i512.ll +++ b/llvm/test/CodeGen/X86/shift-i512.ll @@ -10,7 +10,7 @@ define <8 x i64> @shl_i512_1(<8 x i64> %a) { ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: valignq {{.*#+}} zmm1 = zmm0[3,4,5,6,7,0,1,2] ; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512VL-NEXT: vpsllq $1, %xmm0, %xmm3 +; AVX512VL-NEXT: vpaddq %xmm0, %xmm0, %xmm3 ; AVX512VL-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3] ; AVX512VL-NEXT: vpsrlq $63, %xmm4, %xmm4 ; AVX512VL-NEXT: vpaddq %xmm2, %xmm2, %xmm2 @@ -34,7 +34,7 @@ define <8 x i64> @shl_i512_1(<8 x i64> %a) { ; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm2 ; AVX512VBMI-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] ; AVX512VBMI-NEXT: vpshldq $1, %xmm3, %xmm2, %xmm3 -; AVX512VBMI-NEXT: vpsllq $1, %xmm0, %xmm4 +; AVX512VBMI-NEXT: vpaddq %xmm0, %xmm0, %xmm4 ; AVX512VBMI-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 ; AVX512VBMI-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 ; AVX512VBMI-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7] @@ -51,7 +51,7 @@ define <8 x i64> @shl_i512_1(<8 x i64> %a) { ; ZNVER4-NEXT: vextracti32x4 $2, %zmm0, %xmm1 ; ZNVER4-NEXT: vextracti128 $1, %ymm0, %xmm2 ; ZNVER4-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] -; ZNVER4-NEXT: vpsllq $1, %xmm0, %xmm4 +; ZNVER4-NEXT: vpaddq %xmm0, %xmm0, %xmm4 ; ZNVER4-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 ; ZNVER4-NEXT: vpshldq $1, %xmm3, %xmm2, %xmm3 ; ZNVER4-NEXT: vextracti64x4 $1, %zmm0, %ymm2 diff --git a/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll index 3f48b22..a48be03 100644 --- a/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll @@ -5791,20 +5791,20 @@ declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone define <2 x i64> @test_mm_slli_epi16(<2 x i64> %a0) { ; SSE-LABEL: test_mm_slli_epi16: ; SSE: # %bb.0: -; SSE-NEXT: psllw $1, %xmm0 # encoding: [0x66,0x0f,0x71,0xf0,0x01] +; SSE-NEXT: psllw $2, %xmm0 # encoding: [0x66,0x0f,0x71,0xf0,0x02] ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX1-LABEL: test_mm_slli_epi16: ; AVX1: # %bb.0: -; AVX1-NEXT: vpsllw $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x71,0xf0,0x01] +; AVX1-NEXT: vpsllw $2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x71,0xf0,0x02] ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512-LABEL: test_mm_slli_epi16: ; AVX512: # %bb.0: -; AVX512-NEXT: vpsllw $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x01] +; AVX512-NEXT: vpsllw $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x02] ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3] %arg0 = bitcast <2 x i64> %a0 to <8 x i16> - %res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %arg0, i32 1) + %res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %arg0, i32 2) %bc = bitcast <8 x i16> %res to <2 x i64> ret <2 x i64> %bc } @@ -5813,20 +5813,20 @@ declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) nounwind readnone define <2 x i64> @test_mm_slli_epi32(<2 x i64> %a0) { ; SSE-LABEL: test_mm_slli_epi32: ; SSE: # %bb.0: -; SSE-NEXT: pslld $1, %xmm0 # encoding: [0x66,0x0f,0x72,0xf0,0x01] +; SSE-NEXT: pslld $2, %xmm0 # encoding: [0x66,0x0f,0x72,0xf0,0x02] ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX1-LABEL: test_mm_slli_epi32: ; AVX1: # %bb.0: -; AVX1-NEXT: vpslld $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x72,0xf0,0x01] +; AVX1-NEXT: vpslld $2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x72,0xf0,0x02] ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512-LABEL: test_mm_slli_epi32: ; AVX512: # %bb.0: -; AVX512-NEXT: vpslld $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xf0,0x01] +; AVX512-NEXT: vpslld $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xf0,0x02] ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3] %arg0 = bitcast <2 x i64> %a0 to <4 x i32> - %res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %arg0, i32 1) + %res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %arg0, i32 2) %bc = bitcast <4 x i32> %res to <2 x i64> ret <2 x i64> %bc } @@ -5835,19 +5835,19 @@ declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32) nounwind readnone define <2 x i64> @test_mm_slli_epi64(<2 x i64> %a0) { ; SSE-LABEL: test_mm_slli_epi64: ; SSE: # %bb.0: -; SSE-NEXT: psllq $1, %xmm0 # encoding: [0x66,0x0f,0x73,0xf0,0x01] +; SSE-NEXT: psllq $2, %xmm0 # encoding: [0x66,0x0f,0x73,0xf0,0x02] ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX1-LABEL: test_mm_slli_epi64: ; AVX1: # %bb.0: -; AVX1-NEXT: vpsllq $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xf0,0x01] +; AVX1-NEXT: vpsllq $2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xf0,0x02] ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512-LABEL: test_mm_slli_epi64: ; AVX512: # %bb.0: -; AVX512-NEXT: vpsllq $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf0,0x01] +; AVX512-NEXT: vpsllq $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf0,0x02] ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3] - %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 1) + %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 2) ret <2 x i64> %res } declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32) nounwind readnone diff --git a/llvm/test/CodeGen/X86/stack-folding-int-avxvnniint8.ll b/llvm/test/CodeGen/X86/stack-folding-int-avxvnniint8.ll index fd988f7..a49d3a5 100644 --- a/llvm/test/CodeGen/X86/stack-folding-int-avxvnniint8.ll +++ b/llvm/test/CodeGen/X86/stack-folding-int-avxvnniint8.ll @@ -1,20 +1,20 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avxvnniint8 < %s | FileCheck %s -declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <8 x i32>, <8 x i32>) -declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <8 x i32>, <8 x i32>) -declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <32 x i8>, <32 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <32 x i8>, <32 x i8>) +declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <32 x i8>, <32 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <32 x i8>, <32 x i8>) +declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <32 x i8>, <32 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <4 x i32> @stack_fold_vpdpbssd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbssd(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbssd: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -24,11 +24,11 @@ define <4 x i32> @stack_fold_vpdpbssd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a ; CHECK-NEXT: vpdpbssd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) ret <4 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbssd_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbssd_commuted(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbssd_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -38,11 +38,11 @@ define <4 x i32> @stack_fold_vpdpbssd_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 ; CHECK-NEXT: vpdpbssd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %a0, <4 x i32> %a2, <4 x i32> %a1) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %a0, <16 x i8> %a2, <16 x i8> %a1) ret <4 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbssd_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbssd_256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbssd_256: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -52,11 +52,11 @@ define <8 x i32> @stack_fold_vpdpbssd_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32 ; CHECK-NEXT: vpdpbssd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) ret <8 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbssd_256_commuted(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbssd_256_commuted(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbssd_256_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -66,11 +66,11 @@ define <8 x i32> @stack_fold_vpdpbssd_256_commuted(<8 x i32> %a0, <8 x i32> %a1, ; CHECK-NEXT: vpdpbssd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %a0, <8 x i32> %a2, <8 x i32> %a1) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %a0, <32 x i8> %a2, <32 x i8> %a1) ret <8 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbssds(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbssds(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbssds: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -80,11 +80,11 @@ define <4 x i32> @stack_fold_vpdpbssds(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> % ; CHECK-NEXT: vpdpbssds {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) ret <4 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbssds_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbssds_commuted(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbssds_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -94,11 +94,11 @@ define <4 x i32> @stack_fold_vpdpbssds_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 ; CHECK-NEXT: vpdpbssds {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %a0, <4 x i32> %a2, <4 x i32> %a1) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %a0, <16 x i8> %a2, <16 x i8> %a1) ret <4 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbssds_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbssds_256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbssds_256: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -108,11 +108,11 @@ define <8 x i32> @stack_fold_vpdpbssds_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i3 ; CHECK-NEXT: vpdpbssds {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) ret <8 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbssds_256_commuted(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbssds_256_commuted(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbssds_256_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -122,11 +122,11 @@ define <8 x i32> @stack_fold_vpdpbssds_256_commuted(<8 x i32> %a0, <8 x i32> %a1 ; CHECK-NEXT: vpdpbssds {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %a0, <8 x i32> %a2, <8 x i32> %a1) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %a0, <32 x i8> %a2, <32 x i8> %a1) ret <8 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbsud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbsud(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbsud: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -136,11 +136,11 @@ define <4 x i32> @stack_fold_vpdpbsud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a ; CHECK-NEXT: vpdpbsud {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) ret <4 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbsud_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbsud_commuted(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbsud_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -151,11 +151,11 @@ define <4 x i32> @stack_fold_vpdpbsud_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 ; CHECK-NEXT: vpdpbsud %xmm1, %xmm2, %xmm0 ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %a0, <4 x i32> %a2, <4 x i32> %a1) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %a0, <16 x i8> %a2, <16 x i8> %a1) ret <4 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbsud_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbsud_256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbsud_256: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -165,11 +165,11 @@ define <8 x i32> @stack_fold_vpdpbsud_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32 ; CHECK-NEXT: vpdpbsud {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) ret <8 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbsud_256_commuted(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbsud_256_commuted(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbsud_256_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -180,11 +180,11 @@ define <8 x i32> @stack_fold_vpdpbsud_256_commuted(<8 x i32> %a0, <8 x i32> %a1, ; CHECK-NEXT: vpdpbsud %ymm1, %ymm2, %ymm0 ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %a0, <8 x i32> %a2, <8 x i32> %a1) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %a0, <32 x i8> %a2, <32 x i8> %a1) ret <8 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbsuds(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbsuds(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbsuds: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -194,11 +194,11 @@ define <4 x i32> @stack_fold_vpdpbsuds(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> % ; CHECK-NEXT: vpdpbsuds {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) ret <4 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbsuds_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbsuds_commuted(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbsuds_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -209,11 +209,11 @@ define <4 x i32> @stack_fold_vpdpbsuds_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 ; CHECK-NEXT: vpdpbsuds %xmm1, %xmm2, %xmm0 ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %a0, <4 x i32> %a2, <4 x i32> %a1) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %a0, <16 x i8> %a2, <16 x i8> %a1) ret <4 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbsuds_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbsuds_256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbsuds_256: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -223,11 +223,11 @@ define <8 x i32> @stack_fold_vpdpbsuds_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i3 ; CHECK-NEXT: vpdpbsuds {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) ret <8 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbsuds_256_commuted(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbsuds_256_commuted(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbsuds_256_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -238,11 +238,11 @@ define <8 x i32> @stack_fold_vpdpbsuds_256_commuted(<8 x i32> %a0, <8 x i32> %a1 ; CHECK-NEXT: vpdpbsuds %ymm1, %ymm2, %ymm0 ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %a0, <8 x i32> %a2, <8 x i32> %a1) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %a0, <32 x i8> %a2, <32 x i8> %a1) ret <8 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbuud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbuud(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbuud: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -252,11 +252,11 @@ define <4 x i32> @stack_fold_vpdpbuud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a ; CHECK-NEXT: vpdpbuud {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) ret <4 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbuud_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbuud_commuted(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbuud_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -266,11 +266,11 @@ define <4 x i32> @stack_fold_vpdpbuud_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 ; CHECK-NEXT: vpdpbuud {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %a0, <4 x i32> %a2, <4 x i32> %a1) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %a0, <16 x i8> %a2, <16 x i8> %a1) ret <4 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbuud_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbuud_256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbuud_256: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -280,11 +280,11 @@ define <8 x i32> @stack_fold_vpdpbuud_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32 ; CHECK-NEXT: vpdpbuud {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) ret <8 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbuud_256_commuted(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbuud_256_commuted(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbuud_256_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -294,11 +294,11 @@ define <8 x i32> @stack_fold_vpdpbuud_256_commuted(<8 x i32> %a0, <8 x i32> %a1, ; CHECK-NEXT: vpdpbuud {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %a0, <8 x i32> %a2, <8 x i32> %a1) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %a0, <32 x i8> %a2, <32 x i8> %a1) ret <8 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbuuds(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbuuds(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbuuds: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -308,11 +308,11 @@ define <4 x i32> @stack_fold_vpdpbuuds(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> % ; CHECK-NEXT: vpdpbuuds {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) ret <4 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbuuds_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbuuds_commuted(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbuuds_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -322,11 +322,11 @@ define <4 x i32> @stack_fold_vpdpbuuds_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 ; CHECK-NEXT: vpdpbuuds {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %a0, <4 x i32> %a2, <4 x i32> %a1) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %a0, <16 x i8> %a2, <16 x i8> %a1) ret <4 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbuuds_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbuuds_256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbuuds_256: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -336,11 +336,11 @@ define <8 x i32> @stack_fold_vpdpbuuds_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i3 ; CHECK-NEXT: vpdpbuuds {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) ret <8 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbuuds_256_commuted(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbuuds_256_commuted(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbuuds_256_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -350,6 +350,6 @@ define <8 x i32> @stack_fold_vpdpbuuds_256_commuted(<8 x i32> %a0, <8 x i32> %a1 ; CHECK-NEXT: vpdpbuuds {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %a0, <8 x i32> %a2, <8 x i32> %a1) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %a0, <32 x i8> %a2, <32 x i8> %a1) ret <8 x i32> %2 } diff --git a/llvm/test/CodeGen/X86/vec_shift6.ll b/llvm/test/CodeGen/X86/vec_shift6.ll index 71e659c..219e32c 100644 --- a/llvm/test/CodeGen/X86/vec_shift6.ll +++ b/llvm/test/CodeGen/X86/vec_shift6.ll @@ -28,14 +28,14 @@ define <8 x i16> @test2(<8 x i16> %a) { ; SSE2-LABEL: test2: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: paddw %xmm0, %xmm1 +; SSE2-NEXT: paddw %xmm1, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] ; SSE2-NEXT: retq ; ; SSE41-LABEL: test2: ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: paddw %xmm0, %xmm1 +; SSE41-NEXT: paddw %xmm1, %xmm1 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; SSE41-NEXT: retq ; @@ -56,7 +56,7 @@ define <4 x i32> @test3(<4 x i32> %a) { ; SSE2-LABEL: test3: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: paddd %xmm1, %xmm1 ; SSE2-NEXT: pslld $2, %xmm0 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE2-NEXT: retq @@ -81,14 +81,14 @@ define <4 x i32> @test4(<4 x i32> %a) { ; SSE2-LABEL: test4: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: paddd %xmm1, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] ; SSE2-NEXT: retq ; ; SSE41-LABEL: test4: ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: paddd %xmm0, %xmm1 +; SSE41-NEXT: paddd %xmm1, %xmm1 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; SSE41-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vec_unsafe-fp-math.ll b/llvm/test/CodeGen/X86/vec_unsafe-fp-math.ll index 23d22e7..3f92d2b 100644 --- a/llvm/test/CodeGen/X86/vec_unsafe-fp-math.ll +++ b/llvm/test/CodeGen/X86/vec_unsafe-fp-math.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -enable-unsafe-fp-math -enable-no-signed-zeros-fp-math -mtriple=x86_64-unknown-unknown | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s ; Make sure that vectors get the same benefits as scalars when using unsafe-fp-math. @@ -18,7 +18,7 @@ define <4 x float> @vec_fneg(<4 x float> %x) { ; CHECK: # %bb.0: ; CHECK-NEXT: xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: retq - %sub = fsub <4 x float> zeroinitializer, %x + %sub = fsub nsz <4 x float> zeroinitializer, %x ret <4 x float> %sub } diff --git a/llvm/test/CodeGen/X86/vector-gep.ll b/llvm/test/CodeGen/X86/vector-gep.ll index 5c48559..b4cffcd 100644 --- a/llvm/test/CodeGen/X86/vector-gep.ll +++ b/llvm/test/CodeGen/X86/vector-gep.ll @@ -122,91 +122,87 @@ define <64 x ptr> @AGEP9(ptr %param, <64 x i32> %off) nounwind { ; CHECK-NEXT: movl %esp, %ebp ; CHECK-NEXT: andl $-32, %esp ; CHECK-NEXT: subl $160, %esp -; CHECK-NEXT: vmovdqa %ymm2, %ymm5 -; CHECK-NEXT: vmovdqa %ymm1, %ymm3 -; CHECK-NEXT: vmovdqa %ymm0, %ymm1 -; CHECK-NEXT: vmovdqa 72(%ebp), %ymm0 -; CHECK-NEXT: vmovdqa 40(%ebp), %ymm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm4 -; CHECK-NEXT: vbroadcastss 12(%ebp), %xmm7 -; CHECK-NEXT: vpaddd %xmm4, %xmm7, %xmm4 -; CHECK-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm3 +; CHECK-NEXT: vbroadcastss 12(%ebp), %xmm5 +; CHECK-NEXT: vpaddd %xmm3, %xmm5, %xmm3 +; CHECK-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vmovdqa 104(%ebp), %ymm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vmovdqa 136(%ebp), %ymm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vmovdqa 168(%ebp), %ymm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, (%esp) # 16-byte Spill -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vmovdqa 40(%ebp), %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm2 -; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0 -; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm1 -; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm1 -; CHECK-NEXT: vpaddd %xmm1, %xmm7, %xmm1 -; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm6 -; CHECK-NEXT: vpaddd %xmm6, %xmm7, %xmm6 -; CHECK-NEXT: vextractf128 $1, %ymm3, %xmm3 -; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm3 -; CHECK-NEXT: vpaddd %xmm3, %xmm7, %xmm3 -; CHECK-NEXT: vmovdqa %ymm5, %ymm4 -; CHECK-NEXT: vpaddd %xmm4, %xmm4, %xmm5 -; CHECK-NEXT: vpaddd %xmm5, %xmm7, %xmm5 -; CHECK-NEXT: vextractf128 $1, %ymm4, %xmm4 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovdqa 56(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovdqa 72(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, (%esp) # 16-byte Spill +; CHECK-NEXT: vmovdqa 88(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm2 +; CHECK-NEXT: vmovdqa 104(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm1 +; CHECK-NEXT: vmovdqa 120(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa 136(%ebp), %xmm6 +; CHECK-NEXT: vpaddd %xmm6, %xmm6, %xmm6 +; CHECK-NEXT: vpaddd %xmm6, %xmm5, %xmm6 +; CHECK-NEXT: vmovdqa 152(%ebp), %xmm7 +; CHECK-NEXT: vpaddd %xmm7, %xmm7, %xmm7 +; CHECK-NEXT: vpaddd %xmm7, %xmm5, %xmm7 +; CHECK-NEXT: vmovdqa 168(%ebp), %xmm4 ; CHECK-NEXT: vpaddd %xmm4, %xmm4, %xmm4 -; CHECK-NEXT: vpaddd %xmm4, %xmm7, %xmm4 +; CHECK-NEXT: vpaddd %xmm4, %xmm5, %xmm4 +; CHECK-NEXT: vmovdqa 184(%ebp), %xmm3 +; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm3 +; CHECK-NEXT: vpaddd %xmm3, %xmm5, %xmm3 ; CHECK-NEXT: movl 8(%ebp), %eax -; CHECK-NEXT: vmovdqa %xmm4, 80(%eax) -; CHECK-NEXT: vmovdqa %xmm5, 64(%eax) -; CHECK-NEXT: vmovdqa %xmm3, 48(%eax) -; CHECK-NEXT: vmovdqa %xmm6, 32(%eax) -; CHECK-NEXT: vmovdqa %xmm1, 16(%eax) -; CHECK-NEXT: vmovdqa %xmm0, (%eax) -; CHECK-NEXT: vmovdqa %xmm2, 240(%eax) +; CHECK-NEXT: vmovdqa %xmm3, 240(%eax) +; CHECK-NEXT: vmovdqa %xmm4, 224(%eax) +; CHECK-NEXT: vmovdqa %xmm7, 208(%eax) +; CHECK-NEXT: vmovdqa %xmm6, 192(%eax) +; CHECK-NEXT: vmovdqa %xmm0, 176(%eax) +; CHECK-NEXT: vmovdqa %xmm1, 160(%eax) +; CHECK-NEXT: vmovdqa %xmm2, 144(%eax) ; CHECK-NEXT: vmovaps (%esp), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 224(%eax) +; CHECK-NEXT: vmovaps %xmm0, 128(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 208(%eax) +; CHECK-NEXT: vmovaps %xmm0, 112(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 192(%eax) +; CHECK-NEXT: vmovaps %xmm0, 96(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 176(%eax) +; CHECK-NEXT: vmovaps %xmm0, 80(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 160(%eax) +; CHECK-NEXT: vmovaps %xmm0, 64(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 144(%eax) +; CHECK-NEXT: vmovaps %xmm0, 48(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 128(%eax) +; CHECK-NEXT: vmovaps %xmm0, 32(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 112(%eax) +; CHECK-NEXT: vmovaps %xmm0, 16(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 96(%eax) +; CHECK-NEXT: vmovaps %xmm0, (%eax) ; CHECK-NEXT: movl %ebp, %esp ; CHECK-NEXT: popl %ebp ; CHECK-NEXT: vzeroupper diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll index 13f7d68..33d80f6 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll @@ -652,7 +652,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE2-NEXT: paddb %xmm4, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: psllw $1, %xmm2 +; SSE2-NEXT: paddw %xmm2, %xmm2 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; SSE2-NEXT: psrlw $2, %xmm1 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -678,7 +678,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE41-NEXT: paddb %xmm3, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psllw $1, %xmm2 +; SSE41-NEXT: paddw %xmm2, %xmm2 ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; SSE41-NEXT: psrlw $2, %xmm1 ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -701,7 +701,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpsllw $1, %xmm1, %xmm2 +; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm1 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 @@ -720,7 +720,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; AVX2NOBW-NEXT: vpsrlw $1, %xmm2, %xmm2 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX2NOBW-NEXT: vpaddb %xmm1, %xmm2, %xmm1 -; AVX2NOBW-NEXT: vpsllw $1, %xmm1, %xmm2 +; AVX2NOBW-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX2NOBW-NEXT: vpsrlw $2, %xmm1, %xmm1 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 @@ -739,7 +739,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; AVX512BW-NEXT: vpsrlw $1, %xmm2, %xmm2 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX512BW-NEXT: vpaddb %xmm1, %xmm2, %xmm1 -; AVX512BW-NEXT: vpsllw $1, %xmm1, %xmm2 +; AVX512BW-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX512BW-NEXT: vpsrlw $2, %xmm1, %xmm1 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll index 1a5c373..e43108f 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll @@ -590,7 +590,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] ; AVX1-NEXT: vpand %xmm6, %xmm5, %xmm5 ; AVX1-NEXT: vpaddb %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vpsllw $1, %xmm3, %xmm5 +; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm5 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm7 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248] ; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5 ; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm3 @@ -609,7 +609,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3 ; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 ; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2 -; AVX1-NEXT: vpsllw $1, %xmm2, %xmm3 +; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3 ; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3 ; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm2 ; AVX1-NEXT: vpand %xmm2, %xmm8, %xmm2 @@ -633,7 +633,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX2NOBW-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpaddb %ymm1, %ymm2, %ymm1 -; AVX2NOBW-NEXT: vpsllw $1, %ymm1, %ymm2 +; AVX2NOBW-NEXT: vpaddw %ymm1, %ymm1, %ymm2 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpsrlw $2, %ymm1, %ymm1 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 @@ -651,7 +651,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX512BW-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 ; AVX512BW-NEXT: vpaddb %ymm1, %ymm2, %ymm1 -; AVX512BW-NEXT: vpsllw $1, %ymm1, %ymm2 +; AVX512BW-NEXT: vpaddw %ymm1, %ymm1, %ymm2 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 ; AVX512BW-NEXT: vpsrlw $2, %ymm1, %ymm1 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll index 9c56894..bf98bcc 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll @@ -485,7 +485,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vpbroadcastb {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] ; AVX512F-NEXT: vpand %ymm6, %ymm5, %ymm5 ; AVX512F-NEXT: vpaddb %ymm3, %ymm5, %ymm3 -; AVX512F-NEXT: vpsllw $1, %ymm3, %ymm5 +; AVX512F-NEXT: vpaddw %ymm3, %ymm3, %ymm5 ; AVX512F-NEXT: vpbroadcastb {{.*#+}} ymm7 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248] ; AVX512F-NEXT: vpand %ymm7, %ymm5, %ymm5 ; AVX512F-NEXT: vpsrlw $2, %ymm3, %ymm3 @@ -504,7 +504,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3 ; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3 ; AVX512F-NEXT: vpaddb %ymm2, %ymm3, %ymm2 -; AVX512F-NEXT: vpsllw $1, %ymm2, %ymm3 +; AVX512F-NEXT: vpaddw %ymm2, %ymm2, %ymm3 ; AVX512F-NEXT: vpand %ymm7, %ymm3, %ymm3 ; AVX512F-NEXT: vpsrlw $2, %ymm2, %ymm2 ; AVX512F-NEXT: vpand %ymm2, %ymm8, %ymm2 @@ -528,7 +528,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind { ; AVX512BW-NEXT: vpsrlw $1, %zmm2, %zmm2 ; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2 ; AVX512BW-NEXT: vpaddb %zmm1, %zmm2, %zmm1 -; AVX512BW-NEXT: vpsllw $1, %zmm1, %zmm2 +; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm2 ; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2 ; AVX512BW-NEXT: vpsrlw $2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm1 diff --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll index 13b21a7..6e1bf25 100644 --- a/llvm/test/CodeGen/X86/vector-mul.ll +++ b/llvm/test/CodeGen/X86/vector-mul.ll @@ -821,10 +821,10 @@ define <16 x i16> @madd_v16i16_3(<16 x i16> %a0, <16 x i16> %a1) nounwind { ; X86-SSE-NEXT: andl $-16, %esp ; X86-SSE-NEXT: subl $16, %esp ; X86-SSE-NEXT: movdqa %xmm1, %xmm3 -; X86-SSE-NEXT: paddw %xmm1, %xmm3 +; X86-SSE-NEXT: paddw %xmm3, %xmm3 ; X86-SSE-NEXT: paddw %xmm3, %xmm1 ; X86-SSE-NEXT: movdqa %xmm0, %xmm3 -; X86-SSE-NEXT: paddw %xmm0, %xmm3 +; X86-SSE-NEXT: paddw %xmm3, %xmm3 ; X86-SSE-NEXT: paddw %xmm2, %xmm0 ; X86-SSE-NEXT: paddw %xmm3, %xmm0 ; X86-SSE-NEXT: paddw 8(%ebp), %xmm1 @@ -835,9 +835,9 @@ define <16 x i16> @madd_v16i16_3(<16 x i16> %a0, <16 x i16> %a1) nounwind { ; X64-SSE-LABEL: madd_v16i16_3: ; X64-SSE: # %bb.0: ; X64-SSE-NEXT: movdqa %xmm1, %xmm4 -; X64-SSE-NEXT: paddw %xmm1, %xmm4 +; X64-SSE-NEXT: paddw %xmm4, %xmm4 ; X64-SSE-NEXT: movdqa %xmm0, %xmm5 -; X64-SSE-NEXT: paddw %xmm0, %xmm5 +; X64-SSE-NEXT: paddw %xmm5, %xmm5 ; X64-SSE-NEXT: paddw %xmm2, %xmm0 ; X64-SSE-NEXT: paddw %xmm5, %xmm0 ; X64-SSE-NEXT: paddw %xmm3, %xmm1 diff --git a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll index 227e000..ab1feba 100644 --- a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll +++ b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll @@ -907,7 +907,7 @@ define i1 @mask_v8i32_2(<8 x i32> %a0) { ; SSE2-LABEL: mask_v8i32_2: ; SSE2: # %bb.0: ; SSE2-NEXT: por %xmm1, %xmm0 -; SSE2-NEXT: pslld $1, %xmm0 +; SSE2-NEXT: paddd %xmm0, %xmm0 ; SSE2-NEXT: movmskps %xmm0, %eax ; SSE2-NEXT: testl %eax, %eax ; SSE2-NEXT: sete %al diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll index 2b1cf5b..99dac74 100644 --- a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll @@ -927,7 +927,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind { ; SSE2-LABEL: constant_shift_v2i64: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: paddq %xmm0, %xmm1 +; SSE2-NEXT: paddq %xmm1, %xmm1 ; SSE2-NEXT: psllq $7, %xmm0 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE2-NEXT: retq @@ -975,7 +975,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind { ; X86-SSE-LABEL: constant_shift_v2i64: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: movdqa %xmm0, %xmm1 -; X86-SSE-NEXT: paddq %xmm0, %xmm1 +; X86-SSE-NEXT: paddq %xmm1, %xmm1 ; X86-SSE-NEXT: psllq $7, %xmm0 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; X86-SSE-NEXT: retl diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll index bec3349..3590c4d 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll @@ -62,15 +62,12 @@ define <4 x i32> @combine_blend_of_permutes_v4i32(<2 x i64> %a0, <2 x i64> %a1) define <4 x float> @freeze_insertps(<4 x float> %a0, <4 x float> %a1) { ; SSE-LABEL: freeze_insertps: ; SSE: # %bb.0: -; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] -; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm0[1],xmm1[1,2,3] ; SSE-NEXT: movaps %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: freeze_insertps: ; AVX: # %bb.0: -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[1],xmm1[1,2,3] +; AVX-NEXT: vmovaps %xmm1, %xmm0 ; AVX-NEXT: retq %s0 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 16) %f0 = freeze <4 x float> %s0 diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll index 5b61de5..ee9d8a5 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll @@ -3550,14 +3550,14 @@ define <8 x i16> @PR141475(i32 %in) { ; SSE-LABEL: PR141475: ; SSE: # %bb.0: ; SSE-NEXT: movd %edi, %xmm0 -; SSE-NEXT: pslld $1, %xmm0 +; SSE-NEXT: paddd %xmm0, %xmm0 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; SSE-NEXT: retq ; ; AVX-LABEL: PR141475: ; AVX: # %bb.0: ; AVX-NEXT: vmovd %edi, %xmm0 -; AVX-NEXT: vpslld $1, %xmm0, %xmm0 +; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; AVX-NEXT: retq %mul = shl i32 %in, 1 diff --git a/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll b/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll index 54dc107..3b93734 100644 --- a/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll +++ b/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll @@ -1438,26 +1438,26 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_10(<8 x i16> %a0) { define <8 x i16> @test_128_i16_x_8_127_mask_shl_1(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddw %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_127_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddw %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX-LABEL: test_128_i16_x_8_127_mask_shl_1: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0 +; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: retq %t0 = and <8 x i16> %a0, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127> %t1 = shl <8 x i16> %t0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> @@ -1656,26 +1656,26 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_6(<8 x i16> %a0) { define <8 x i16> @test_128_i16_x_8_65024_mask_shl_1(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddw %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddw %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_shl_1: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0 +; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: retq %t0 = and <8 x i16> %a0, <i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024> %t1 = shl <8 x i16> %t0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> @@ -2373,40 +2373,40 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_18(<4 x i32> %a0) { define <4 x i32> @test_128_i32_x_4_32767_mask_shl_1(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddd %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; ; X86-AVX2-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X86-AVX2: # %bb.0: -; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767] -; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65534,65534,65534,65534] +; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddd %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X64-AVX1: # %bb.0: -; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX1-NEXT: retq ; ; X64-AVX2-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X64-AVX2: # %bb.0: -; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767] -; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65534,65534,65534,65534] +; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: retq %t0 = and <4 x i32> %a0, <i32 32767, i32 32767, i32 32767, i32 32767> %t1 = shl <4 x i32> %t0, <i32 1, i32 1, i32 1, i32 1> @@ -2675,40 +2675,40 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_10(<4 x i32> %a0) { define <4 x i32> @test_128_i32_x_4_4294836224_mask_shl_1(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddd %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; ; X86-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X86-AVX2: # %bb.0: -; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224] -; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294705152,4294705152,4294705152,4294705152] +; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddd %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X64-AVX1: # %bb.0: -; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX1-NEXT: retq ; ; X64-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X64-AVX2: # %bb.0: -; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224] -; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294705152,4294705152,4294705152,4294705152] +; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: retq %t0 = and <4 x i32> %a0, <i32 4294836224, i32 4294836224, i32 4294836224, i32 4294836224> %t1 = shl <4 x i32> %t0, <i32 1, i32 1, i32 1, i32 1> @@ -3325,26 +3325,26 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_34(<2 x i64> % define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_1(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddq %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddq %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_1: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 +; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: retq %t0 = and <2 x i64> %a0, <i64 2147483647, i64 2147483647> %t1 = shl <2 x i64> %t0, <i64 1, i64 1> @@ -3543,26 +3543,26 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_18(<2 x i64> %a0) { define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_shl_1(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddq %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddq %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 +; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: retq %t0 = and <2 x i64> %a0, <i64 18446744065119617024, i64 18446744065119617024> %t1 = shl <2 x i64> %t0, <i64 1, i64 1> |