diff options
Diffstat (limited to 'llvm/test')
125 files changed, 14412 insertions, 4820 deletions
diff --git a/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll b/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll index 1c216e7..e371748 100644 --- a/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll +++ b/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll @@ -11,6 +11,16 @@ entry: ret <4 x i16> %1 } +define <4 x half> @v4bf16_to_v4f16(float, <4 x bfloat> %a) nounwind { +; CHECK-LABEL: v4bf16_to_v4f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret +entry: + %1 = bitcast <4 x bfloat> %a to <4 x half> + ret <4 x half> %1 +} + define <2 x i32> @v4bf16_to_v2i32(float, <4 x bfloat> %a) nounwind { ; CHECK-LABEL: v4bf16_to_v2i32: ; CHECK: // %bb.0: // %entry @@ -82,6 +92,16 @@ entry: ret <4 x bfloat> %1 } +define <4 x bfloat> @v4f16_to_v4bf16(float, <4 x half> %a) nounwind { +; CHECK-LABEL: v4f16_to_v4bf16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret +entry: + %1 = bitcast <4 x half> %a to <4 x bfloat> + ret <4 x bfloat> %1 +} + define <4 x bfloat> @v2i32_to_v4bf16(float, <2 x i32> %a) nounwind { ; CHECK-LABEL: v2i32_to_v4bf16: ; CHECK: // %bb.0: // %entry @@ -152,6 +172,16 @@ entry: ret <8 x i16> %1 } +define <8 x half> @v8bf16_to_v8f16(float, <8 x bfloat> %a) nounwind { +; CHECK-LABEL: v8bf16_to_v8f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov v0.16b, v1.16b +; CHECK-NEXT: ret +entry: + %1 = bitcast <8 x bfloat> %a to <8 x half> + ret <8 x half> %1 +} + define <4 x i32> @v8bf16_to_v4i32(float, <8 x bfloat> %a) nounwind { ; CHECK-LABEL: v8bf16_to_v4i32: ; CHECK: // %bb.0: // %entry @@ -202,6 +232,16 @@ entry: ret <8 x bfloat> %1 } +define <8 x bfloat> @v8f16_to_v8bf16(float, <8 x half> %a) nounwind { +; CHECK-LABEL: v8f16_to_v8bf16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov v0.16b, v1.16b +; CHECK-NEXT: ret +entry: + %1 = bitcast <8 x half> %a to <8 x bfloat> + ret <8 x bfloat> %1 +} + define <8 x bfloat> @v4i32_to_v8bf16(float, <4 x i32> %a) nounwind { ; CHECK-LABEL: v4i32_to_v8bf16: ; CHECK: // %bb.0: // %entry diff --git a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll index 0960c4c..a56d5b1 100644 --- a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll +++ b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll @@ -78,9 +78,8 @@ B: define i32 @g_i8_sign_extend_inreg(i8 %in, i32 %a, i32 %b) nounwind { ; CHECK-LABEL: g_i8_sign_extend_inreg: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w8, w1, w2, mi +; CHECK-NEXT: tst w0, #0x80 +; CHECK-NEXT: csel w8, w1, w2, ne ; CHECK-NEXT: add w0, w8, w0, uxtb ; CHECK-NEXT: ret entry: @@ -100,9 +99,8 @@ B: define i32 @g_i16_sign_extend_inreg(i16 %in, i32 %a, i32 %b) nounwind { ; CHECK-LABEL: g_i16_sign_extend_inreg: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w8, w1, w2, mi +; CHECK-NEXT: tst w0, #0x8000 +; CHECK-NEXT: csel w8, w1, w2, ne ; CHECK-NEXT: add w0, w8, w0, uxth ; CHECK-NEXT: ret entry: @@ -167,10 +165,8 @@ B: define i64 @g_i32_sign_extend_i64(i32 %in, i64 %a, i64 %b) nounwind { ; CHECK-LABEL: g_i32_sign_extend_i64: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxtw x8, w0 -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: csel x8, x1, x2, mi +; CHECK-NEXT: tst w0, #0x80000000 +; CHECK-NEXT: csel x8, x1, x2, ne ; CHECK-NEXT: add x0, x8, w0, uxtw ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll b/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll index 8bc3497..6233ce7 100644 --- a/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll +++ b/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll @@ -1,20 +1,30 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 | FileCheck %s --check-prefixes=CHECK-COMMON,CHECK-CVT -; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK-COMMON,CHECK-FP16 +; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-CVT,CHECK-CVT-SD +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16,CHECK-FP16-SD +; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-CVT,CHECK-CVT-GI +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-FP16,CHECK-FP16-GI define <4 x half> @add_h(<4 x half> %a, <4 x half> %b) { -; CHECK-CVT-LABEL: add_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fadd v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: add_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fadd v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: add_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fadd v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: add_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fadd v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fadd <4 x half> %a, %b @@ -22,28 +32,54 @@ entry: } define <4 x half> @build_h4(<4 x half> %a) { -; CHECK-COMMON-LABEL: build_h4: -; CHECK-COMMON: // %bb.0: // %entry -; CHECK-COMMON-NEXT: mov w8, #15565 // =0x3ccd -; CHECK-COMMON-NEXT: dup v0.4h, w8 -; CHECK-COMMON-NEXT: ret +; CHECK-CVT-SD-LABEL: build_h4: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: mov w8, #15565 // =0x3ccd +; CHECK-CVT-SD-NEXT: dup v0.4h, w8 +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: build_h4: +; CHECK-FP16-SD: // %bb.0: // %entry +; CHECK-FP16-SD-NEXT: mov w8, #15565 // =0x3ccd +; CHECK-FP16-SD-NEXT: dup v0.4h, w8 +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: build_h4: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: adrp x8, .LCPI1_0 +; CHECK-CVT-GI-NEXT: ldr d0, [x8, :lo12:.LCPI1_0] +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: build_h4: +; CHECK-FP16-GI: // %bb.0: // %entry +; CHECK-FP16-GI-NEXT: adrp x8, .LCPI1_0 +; CHECK-FP16-GI-NEXT: ldr d0, [x8, :lo12:.LCPI1_0] +; CHECK-FP16-GI-NEXT: ret entry: ret <4 x half> <half 0xH3CCD, half 0xH3CCD, half 0xH3CCD, half 0xH3CCD> } define <4 x half> @sub_h(<4 x half> %a, <4 x half> %b) { -; CHECK-CVT-LABEL: sub_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fsub v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: sub_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fsub v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: sub_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fsub v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sub_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fsub v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fsub <4 x half> %a, %b @@ -51,18 +87,26 @@ entry: } define <4 x half> @mul_h(<4 x half> %a, <4 x half> %b) { -; CHECK-CVT-LABEL: mul_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fmul v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: mul_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fmul v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: mul_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fmul v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: mul_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fmul v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fmul <4 x half> %a, %b @@ -70,18 +114,26 @@ entry: } define <4 x half> @div_h(<4 x half> %a, <4 x half> %b) { -; CHECK-CVT-LABEL: div_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fdiv v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: div_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fdiv v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: div_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fdiv v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: div_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fdiv v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fdiv <4 x half> %a, %b @@ -89,92 +141,162 @@ entry: } define <4 x half> @load_h(ptr %a) { -; CHECK-COMMON-LABEL: load_h: -; CHECK-COMMON: // %bb.0: // %entry -; CHECK-COMMON-NEXT: ldr d0, [x0] -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: load_h: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ret entry: %0 = load <4 x half>, ptr %a, align 4 ret <4 x half> %0 } define void @store_h(ptr %a, <4 x half> %b) { -; CHECK-COMMON-LABEL: store_h: -; CHECK-COMMON: // %bb.0: // %entry -; CHECK-COMMON-NEXT: str d0, [x0] -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: store_h: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret entry: store <4 x half> %b, ptr %a, align 4 ret void } define <4 x half> @s_to_h(<4 x float> %a) { -; CHECK-COMMON-LABEL: s_to_h: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: s_to_h: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: ret %1 = fptrunc <4 x float> %a to <4 x half> ret <4 x half> %1 } define <4 x half> @d_to_h(<4 x double> %a) { -; CHECK-COMMON-LABEL: d_to_h: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: fcvtxn v0.2s, v0.2d -; CHECK-COMMON-NEXT: fcvtxn2 v0.4s, v1.2d -; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s -; CHECK-COMMON-NEXT: ret +; CHECK-CVT-SD-LABEL: d_to_h: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtxn v0.2s, v0.2d +; CHECK-CVT-SD-NEXT: fcvtxn2 v0.4s, v1.2d +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: d_to_h: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: fcvtxn v0.2s, v0.2d +; CHECK-FP16-SD-NEXT: fcvtxn2 v0.4s, v1.2d +; CHECK-FP16-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: d_to_h: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: mov d2, v0.d[1] +; CHECK-CVT-GI-NEXT: fcvt h0, d0 +; CHECK-CVT-GI-NEXT: mov d3, v1.d[1] +; CHECK-CVT-GI-NEXT: fcvt h1, d1 +; CHECK-CVT-GI-NEXT: fcvt h2, d2 +; CHECK-CVT-GI-NEXT: mov v0.h[1], v2.h[0] +; CHECK-CVT-GI-NEXT: fcvt h2, d3 +; CHECK-CVT-GI-NEXT: mov v0.h[2], v1.h[0] +; CHECK-CVT-GI-NEXT: mov v0.h[3], v2.h[0] +; CHECK-CVT-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: d_to_h: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: mov d2, v0.d[1] +; CHECK-FP16-GI-NEXT: fcvt h0, d0 +; CHECK-FP16-GI-NEXT: mov d3, v1.d[1] +; CHECK-FP16-GI-NEXT: fcvt h1, d1 +; CHECK-FP16-GI-NEXT: fcvt h2, d2 +; CHECK-FP16-GI-NEXT: mov v0.h[1], v2.h[0] +; CHECK-FP16-GI-NEXT: fcvt h2, d3 +; CHECK-FP16-GI-NEXT: mov v0.h[2], v1.h[0] +; CHECK-FP16-GI-NEXT: mov v0.h[3], v2.h[0] +; CHECK-FP16-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-FP16-GI-NEXT: ret %1 = fptrunc <4 x double> %a to <4 x half> ret <4 x half> %1 } define <4 x float> @h_to_s(<4 x half> %a) { -; CHECK-COMMON-LABEL: h_to_s: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: fcvtl v0.4s, v0.4h -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: h_to_s: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtl v0.4s, v0.4h +; CHECK-NEXT: ret %1 = fpext <4 x half> %a to <4 x float> ret <4 x float> %1 } define <4 x double> @h_to_d(<4 x half> %a) { -; CHECK-COMMON-LABEL: h_to_d: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: fcvtl v0.4s, v0.4h -; CHECK-COMMON-NEXT: fcvtl2 v1.2d, v0.4s -; CHECK-COMMON-NEXT: fcvtl v0.2d, v0.2s -; CHECK-COMMON-NEXT: ret +; CHECK-CVT-SD-LABEL: h_to_d: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl2 v1.2d, v0.4s +; CHECK-CVT-SD-NEXT: fcvtl v0.2d, v0.2s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: h_to_d: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-FP16-SD-NEXT: fcvtl2 v1.2d, v0.4s +; CHECK-FP16-SD-NEXT: fcvtl v0.2d, v0.2s +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: h_to_d: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-CVT-GI-NEXT: mov h1, v0.h[1] +; CHECK-CVT-GI-NEXT: mov h2, v0.h[2] +; CHECK-CVT-GI-NEXT: mov h3, v0.h[3] +; CHECK-CVT-GI-NEXT: fcvt d0, h0 +; CHECK-CVT-GI-NEXT: fcvt d4, h1 +; CHECK-CVT-GI-NEXT: fcvt d1, h2 +; CHECK-CVT-GI-NEXT: fcvt d2, h3 +; CHECK-CVT-GI-NEXT: mov v0.d[1], v4.d[0] +; CHECK-CVT-GI-NEXT: mov v1.d[1], v2.d[0] +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: h_to_d: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-FP16-GI-NEXT: mov h1, v0.h[1] +; CHECK-FP16-GI-NEXT: mov h2, v0.h[2] +; CHECK-FP16-GI-NEXT: mov h3, v0.h[3] +; CHECK-FP16-GI-NEXT: fcvt d0, h0 +; CHECK-FP16-GI-NEXT: fcvt d4, h1 +; CHECK-FP16-GI-NEXT: fcvt d1, h2 +; CHECK-FP16-GI-NEXT: fcvt d2, h3 +; CHECK-FP16-GI-NEXT: mov v0.d[1], v4.d[0] +; CHECK-FP16-GI-NEXT: mov v1.d[1], v2.d[0] +; CHECK-FP16-GI-NEXT: ret %1 = fpext <4 x half> %a to <4 x double> ret <4 x double> %1 } define <4 x half> @bitcast_i_to_h(float, <4 x i16> %a) { -; CHECK-COMMON-LABEL: bitcast_i_to_h: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: fmov d0, d1 -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: bitcast_i_to_h: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret %2 = bitcast <4 x i16> %a to <4 x half> ret <4 x half> %2 } define <4 x i16> @bitcast_h_to_i(float, <4 x half> %a) { -; CHECK-COMMON-LABEL: bitcast_h_to_i: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: fmov d0, d1 -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: bitcast_h_to_i: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret %2 = bitcast <4 x half> %a to <4 x i16> ret <4 x i16> %2 } define <4 x half> @sitofp_i8(<4 x i8> %a) #0 { -; CHECK-CVT-LABEL: sitofp_i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: shl v0.4h, v0.4h, #8 -; CHECK-CVT-NEXT: sshr v0.4h, v0.4h, #8 -; CHECK-CVT-NEXT: sshll v0.4s, v0.4h, #0 -; CHECK-CVT-NEXT: scvtf v0.4s, v0.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: sitofp_i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: shl v0.4h, v0.4h, #8 +; CHECK-CVT-SD-NEXT: sshr v0.4h, v0.4h, #8 +; CHECK-CVT-SD-NEXT: sshll v0.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: scvtf v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: sitofp_i8: ; CHECK-FP16: // %bb.0: @@ -182,6 +304,15 @@ define <4 x half> @sitofp_i8(<4 x i8> %a) #0 { ; CHECK-FP16-NEXT: sshr v0.4h, v0.4h, #8 ; CHECK-FP16-NEXT: scvtf v0.4h, v0.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sitofp_i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: shl v0.4s, v0.4s, #24 +; CHECK-CVT-GI-NEXT: sshr v0.4s, v0.4s, #24 +; CHECK-CVT-GI-NEXT: scvtf v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = sitofp <4 x i8> %a to <4 x half> ret <4 x half> %1 } @@ -204,43 +335,59 @@ define <4 x half> @sitofp_i16(<4 x i16> %a) #0 { define <4 x half> @sitofp_i32(<4 x i32> %a) #0 { -; CHECK-COMMON-LABEL: sitofp_i32: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: scvtf v0.4s, v0.4s -; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: sitofp_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: scvtf v0.4s, v0.4s +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: ret %1 = sitofp <4 x i32> %a to <4 x half> ret <4 x half> %1 } define <4 x half> @sitofp_i64(<4 x i64> %a) #0 { -; CHECK-COMMON-LABEL: sitofp_i64: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: scvtf v0.2d, v0.2d -; CHECK-COMMON-NEXT: scvtf v1.2d, v1.2d -; CHECK-COMMON-NEXT: fcvtn v0.2s, v0.2d -; CHECK-COMMON-NEXT: fcvtn2 v0.4s, v1.2d -; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: sitofp_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: scvtf v0.2d, v0.2d +; CHECK-NEXT: scvtf v1.2d, v1.2d +; CHECK-NEXT: fcvtn v0.2s, v0.2d +; CHECK-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: ret %1 = sitofp <4 x i64> %a to <4 x half> ret <4 x half> %1 } define <4 x half> @uitofp_i8(<4 x i8> %a) #0 { -; CHECK-CVT-LABEL: uitofp_i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: bic v0.4h, #255, lsl #8 -; CHECK-CVT-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-CVT-NEXT: ucvtf v0.4s, v0.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: uitofp_i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: bic v0.4h, #255, lsl #8 +; CHECK-CVT-SD-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: ucvtf v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; -; CHECK-FP16-LABEL: uitofp_i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: bic v0.4h, #255, lsl #8 -; CHECK-FP16-NEXT: ucvtf v0.4h, v0.4h -; CHECK-FP16-NEXT: ret +; CHECK-FP16-SD-LABEL: uitofp_i8: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: bic v0.4h, #255, lsl #8 +; CHECK-FP16-SD-NEXT: ucvtf v0.4h, v0.4h +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: uitofp_i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: movi v1.2d, #0x0000ff000000ff +; CHECK-CVT-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-CVT-GI-NEXT: ucvtf v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: uitofp_i8: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: movi d1, #0xff00ff00ff00ff +; CHECK-FP16-GI-NEXT: and v0.8b, v0.8b, v1.8b +; CHECK-FP16-GI-NEXT: ucvtf v0.4h, v0.4h +; CHECK-FP16-GI-NEXT: ret %1 = uitofp <4 x i8> %a to <4 x half> ret <4 x half> %1 } @@ -264,35 +411,35 @@ define <4 x half> @uitofp_i16(<4 x i16> %a) #0 { define <4 x half> @uitofp_i32(<4 x i32> %a) #0 { -; CHECK-COMMON-LABEL: uitofp_i32: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: ucvtf v0.4s, v0.4s -; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: uitofp_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ucvtf v0.4s, v0.4s +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: ret %1 = uitofp <4 x i32> %a to <4 x half> ret <4 x half> %1 } define <4 x half> @uitofp_i64(<4 x i64> %a) #0 { -; CHECK-COMMON-LABEL: uitofp_i64: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: ucvtf v0.2d, v0.2d -; CHECK-COMMON-NEXT: ucvtf v1.2d, v1.2d -; CHECK-COMMON-NEXT: fcvtn v0.2s, v0.2d -; CHECK-COMMON-NEXT: fcvtn2 v0.4s, v1.2d -; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: uitofp_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ucvtf v0.2d, v0.2d +; CHECK-NEXT: ucvtf v1.2d, v1.2d +; CHECK-NEXT: fcvtn v0.2s, v0.2d +; CHECK-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: ret %1 = uitofp <4 x i64> %a to <4 x half> ret <4 x half> %1 } define void @test_insert_at_zero(half %a, ptr %b) #0 { -; CHECK-COMMON-LABEL: test_insert_at_zero: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: // kill: def $h0 killed $h0 def $d0 -; CHECK-COMMON-NEXT: str d0, [x0] -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: test_insert_at_zero: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0 +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret %1 = insertelement <4 x half> undef, half %a, i64 0 store <4 x half> %1, ptr %b, align 4 ret void @@ -331,17 +478,29 @@ define <4 x i16> @fptosi_i16(<4 x half> %a) #0 { } define <4 x i8> @fptoui_i8(<4 x half> %a) #0 { -; CHECK-CVT-LABEL: fptoui_i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: fptoui_i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; -; CHECK-FP16-LABEL: fptoui_i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzs v0.4h, v0.4h -; CHECK-FP16-NEXT: ret +; CHECK-FP16-SD-LABEL: fptoui_i8: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: fcvtzs v0.4h, v0.4h +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: fptoui_i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: fptoui_i8: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: fcvtzu v0.4h, v0.4h +; CHECK-FP16-GI-NEXT: ret ; NOTE: fcvtzs selected here because the xtn shaves the sign bit %1 = fptoui<4 x half> %a to <4 x i8> ret <4 x i8> %1 @@ -364,36 +523,45 @@ define <4 x i16> @fptoui_i16(<4 x half> %a) #0 { } define <4 x i1> @test_fcmp_une(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_une: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmeq v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_une: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_une: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmeq v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_une: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp une <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_ueq(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ueq: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v2.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v2.16b -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: mvn v0.8b, v0.8b -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ueq: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ueq: ; CHECK-FP16: // %bb.0: @@ -402,102 +570,149 @@ define <4 x i1> @test_fcmp_ueq(<4 x half> %a, <4 x half> %b) #0 { ; CHECK-FP16-NEXT: orr v0.8b, v0.8b, v2.8b ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ueq: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ueq <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_ugt(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ugt: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcmge v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: mvn v0.8b, v0.8b -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ugt: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ugt: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmge v0.4h, v1.4h, v0.4h ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ugt: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ugt <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_uge(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_uge: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: mvn v0.8b, v0.8b -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_uge: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_uge: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmgt v0.4h, v1.4h, v0.4h ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_uge: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp uge <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_ult(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ult: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: mvn v0.8b, v0.8b -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ult: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ult: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmge v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ult: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ult <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_ule(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ule: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: mvn v0.8b, v0.8b -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ule: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ule: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmgt v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ule: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ule <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_uno(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_uno: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v2.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v2.16b -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: mvn v0.8b, v0.8b -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_uno: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v2.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_uno: ; CHECK-FP16: // %bb.0: @@ -506,21 +721,32 @@ define <4 x i1> @test_fcmp_uno(<4 x half> %a, <4 x half> %b) #0 { ; CHECK-FP16-NEXT: orr v0.8b, v0.8b, v2.8b ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_uno: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmge v2.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp uno <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_one(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_one: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v2.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v2.16b -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_one: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_one: ; CHECK-FP16: // %bb.0: @@ -528,60 +754,94 @@ define <4 x i1> @test_fcmp_one(<4 x half> %a, <4 x half> %b) #0 { ; CHECK-FP16-NEXT: fcmgt v0.4h, v1.4h, v0.4h ; CHECK-FP16-NEXT: orr v0.8b, v0.8b, v2.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_one: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp one <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_oeq(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_oeq: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmeq v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_oeq: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_oeq: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmeq v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_oeq: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp oeq <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_ogt(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ogt: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ogt: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ogt: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmgt v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ogt: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ogt <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_oge(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_oge: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_oge: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_oge: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmge v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_oge: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp oge <4 x half> %a, %b ret <4 x i1> %1 @@ -624,15 +884,15 @@ define <4 x i1> @test_fcmp_ole(<4 x half> %a, <4 x half> %b) #0 { } define <4 x i1> @test_fcmp_ord(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ord: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v2.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v2.16b -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ord: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v2.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ord: ; CHECK-FP16: // %bb.0: @@ -640,6 +900,16 @@ define <4 x i1> @test_fcmp_ord(<4 x half> %a, <4 x half> %b) #0 { ; CHECK-FP16-NEXT: fcmgt v0.4h, v1.4h, v0.4h ; CHECK-FP16-NEXT: orr v0.8b, v0.8b, v2.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ord: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmge v2.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ord <4 x half> %a, %b ret <4 x i1> %1 diff --git a/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll b/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll index fcb42a7..86763eb 100644 --- a/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll +++ b/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll @@ -1,24 +1,38 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-CVT -; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16 +; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-CVT,CHECK-CVT-SD +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16,CHECK-FP16-SD +; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-CVT,CHECK-CVT-GI +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-FP16,CHECK-FP16-GI define <8 x half> @add_h(<8 x half> %a, <8 x half> %b) { -; CHECK-CVT-LABEL: add_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v2.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v3.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h -; CHECK-CVT-NEXT: fadd v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fadd v1.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: add_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v2.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v3.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-SD-NEXT: fadd v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fadd v1.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: add_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fadd v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: add_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fadd v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fadd v1.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fadd <8 x half> %a, %b ret <8 x half> %0 @@ -26,22 +40,34 @@ entry: define <8 x half> @sub_h(<8 x half> %a, <8 x half> %b) { -; CHECK-CVT-LABEL: sub_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v2.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v3.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h -; CHECK-CVT-NEXT: fsub v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fsub v1.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: sub_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v2.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v3.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-SD-NEXT: fsub v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fsub v1.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: sub_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fsub v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sub_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fsub v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fsub v1.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fsub <8 x half> %a, %b ret <8 x half> %0 @@ -49,22 +75,34 @@ entry: define <8 x half> @mul_h(<8 x half> %a, <8 x half> %b) { -; CHECK-CVT-LABEL: mul_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v2.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v3.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h -; CHECK-CVT-NEXT: fmul v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fmul v1.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: mul_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v2.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v3.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-SD-NEXT: fmul v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fmul v1.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: mul_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fmul v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: mul_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fmul v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fmul v1.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fmul <8 x half> %a, %b ret <8 x half> %0 @@ -72,22 +110,34 @@ entry: define <8 x half> @div_h(<8 x half> %a, <8 x half> %b) { -; CHECK-CVT-LABEL: div_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v2.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v3.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h -; CHECK-CVT-NEXT: fdiv v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fdiv v1.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: div_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v2.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v3.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-SD-NEXT: fdiv v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fdiv v1.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: div_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fdiv v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: div_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fdiv v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fdiv v1.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fdiv <8 x half> %a, %b ret <8 x half> %0 @@ -126,39 +176,171 @@ define <8 x half> @s_to_h(<8 x float> %a) { } define <8 x half> @d_to_h(<8 x double> %a) { -; CHECK-LABEL: d_to_h: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtxn v0.2s, v0.2d -; CHECK-NEXT: fcvtxn v2.2s, v2.2d -; CHECK-NEXT: fcvtxn2 v0.4s, v1.2d -; CHECK-NEXT: fcvtxn2 v2.4s, v3.2d -; CHECK-NEXT: fcvtn v0.4h, v0.4s -; CHECK-NEXT: fcvtn2 v0.8h, v2.4s -; CHECK-NEXT: ret +; CHECK-CVT-SD-LABEL: d_to_h: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtxn v0.2s, v0.2d +; CHECK-CVT-SD-NEXT: fcvtxn v2.2s, v2.2d +; CHECK-CVT-SD-NEXT: fcvtxn2 v0.4s, v1.2d +; CHECK-CVT-SD-NEXT: fcvtxn2 v2.4s, v3.2d +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: d_to_h: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: fcvtxn v0.2s, v0.2d +; CHECK-FP16-SD-NEXT: fcvtxn v2.2s, v2.2d +; CHECK-FP16-SD-NEXT: fcvtxn2 v0.4s, v1.2d +; CHECK-FP16-SD-NEXT: fcvtxn2 v2.4s, v3.2d +; CHECK-FP16-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-FP16-SD-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: d_to_h: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: mov d4, v0.d[1] +; CHECK-CVT-GI-NEXT: fcvt h0, d0 +; CHECK-CVT-GI-NEXT: mov d5, v1.d[1] +; CHECK-CVT-GI-NEXT: fcvt h1, d1 +; CHECK-CVT-GI-NEXT: fcvt h4, d4 +; CHECK-CVT-GI-NEXT: mov v0.h[1], v4.h[0] +; CHECK-CVT-GI-NEXT: fcvt h4, d5 +; CHECK-CVT-GI-NEXT: mov v0.h[2], v1.h[0] +; CHECK-CVT-GI-NEXT: mov d1, v2.d[1] +; CHECK-CVT-GI-NEXT: fcvt h2, d2 +; CHECK-CVT-GI-NEXT: mov v0.h[3], v4.h[0] +; CHECK-CVT-GI-NEXT: fcvt h1, d1 +; CHECK-CVT-GI-NEXT: mov v0.h[4], v2.h[0] +; CHECK-CVT-GI-NEXT: mov d2, v3.d[1] +; CHECK-CVT-GI-NEXT: fcvt h3, d3 +; CHECK-CVT-GI-NEXT: mov v0.h[5], v1.h[0] +; CHECK-CVT-GI-NEXT: fcvt h1, d2 +; CHECK-CVT-GI-NEXT: mov v0.h[6], v3.h[0] +; CHECK-CVT-GI-NEXT: mov v0.h[7], v1.h[0] +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: d_to_h: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: mov d4, v0.d[1] +; CHECK-FP16-GI-NEXT: fcvt h0, d0 +; CHECK-FP16-GI-NEXT: mov d5, v1.d[1] +; CHECK-FP16-GI-NEXT: fcvt h1, d1 +; CHECK-FP16-GI-NEXT: fcvt h4, d4 +; CHECK-FP16-GI-NEXT: mov v0.h[1], v4.h[0] +; CHECK-FP16-GI-NEXT: fcvt h4, d5 +; CHECK-FP16-GI-NEXT: mov v0.h[2], v1.h[0] +; CHECK-FP16-GI-NEXT: mov d1, v2.d[1] +; CHECK-FP16-GI-NEXT: fcvt h2, d2 +; CHECK-FP16-GI-NEXT: mov v0.h[3], v4.h[0] +; CHECK-FP16-GI-NEXT: fcvt h1, d1 +; CHECK-FP16-GI-NEXT: mov v0.h[4], v2.h[0] +; CHECK-FP16-GI-NEXT: mov d2, v3.d[1] +; CHECK-FP16-GI-NEXT: fcvt h3, d3 +; CHECK-FP16-GI-NEXT: mov v0.h[5], v1.h[0] +; CHECK-FP16-GI-NEXT: fcvt h1, d2 +; CHECK-FP16-GI-NEXT: mov v0.h[6], v3.h[0] +; CHECK-FP16-GI-NEXT: mov v0.h[7], v1.h[0] +; CHECK-FP16-GI-NEXT: ret %1 = fptrunc <8 x double> %a to <8 x half> ret <8 x half> %1 } define <8 x float> @h_to_s(<8 x half> %a) { -; CHECK-LABEL: h_to_s: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl2 v1.4s, v0.8h -; CHECK-NEXT: fcvtl v0.4s, v0.4h -; CHECK-NEXT: ret +; CHECK-CVT-SD-LABEL: h_to_s: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: h_to_s: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-FP16-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: h_to_s: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-CVT-GI-NEXT: mov v0.16b, v2.16b +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: h_to_s: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-FP16-GI-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-FP16-GI-NEXT: mov v0.16b, v2.16b +; CHECK-FP16-GI-NEXT: ret %1 = fpext <8 x half> %a to <8 x float> ret <8 x float> %1 } define <8 x double> @h_to_d(<8 x half> %a) { -; CHECK-LABEL: h_to_d: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl v1.4s, v0.4h -; CHECK-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-NEXT: fcvtl v0.2d, v1.2s -; CHECK-NEXT: fcvtl2 v3.2d, v2.4s -; CHECK-NEXT: fcvtl2 v1.2d, v1.4s -; CHECK-NEXT: fcvtl v2.2d, v2.2s -; CHECK-NEXT: ret +; CHECK-CVT-SD-LABEL: h_to_d: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.2d, v1.2s +; CHECK-CVT-SD-NEXT: fcvtl2 v3.2d, v2.4s +; CHECK-CVT-SD-NEXT: fcvtl2 v1.2d, v1.4s +; CHECK-CVT-SD-NEXT: fcvtl v2.2d, v2.2s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: h_to_d: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: fcvtl v1.4s, v0.4h +; CHECK-FP16-SD-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-FP16-SD-NEXT: fcvtl v0.2d, v1.2s +; CHECK-FP16-SD-NEXT: fcvtl2 v3.2d, v2.4s +; CHECK-FP16-SD-NEXT: fcvtl2 v1.2d, v1.4s +; CHECK-FP16-SD-NEXT: fcvtl v2.2d, v2.2s +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: h_to_d: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: mov h1, v0.h[1] +; CHECK-CVT-GI-NEXT: mov h2, v0.h[2] +; CHECK-CVT-GI-NEXT: mov h3, v0.h[3] +; CHECK-CVT-GI-NEXT: mov h4, v0.h[4] +; CHECK-CVT-GI-NEXT: mov h5, v0.h[5] +; CHECK-CVT-GI-NEXT: mov h6, v0.h[6] +; CHECK-CVT-GI-NEXT: mov h7, v0.h[7] +; CHECK-CVT-GI-NEXT: fcvt d0, h0 +; CHECK-CVT-GI-NEXT: fcvt d16, h1 +; CHECK-CVT-GI-NEXT: fcvt d1, h2 +; CHECK-CVT-GI-NEXT: fcvt d17, h3 +; CHECK-CVT-GI-NEXT: fcvt d2, h4 +; CHECK-CVT-GI-NEXT: fcvt d4, h5 +; CHECK-CVT-GI-NEXT: fcvt d3, h6 +; CHECK-CVT-GI-NEXT: fcvt d5, h7 +; CHECK-CVT-GI-NEXT: mov v0.d[1], v16.d[0] +; CHECK-CVT-GI-NEXT: mov v1.d[1], v17.d[0] +; CHECK-CVT-GI-NEXT: mov v2.d[1], v4.d[0] +; CHECK-CVT-GI-NEXT: mov v3.d[1], v5.d[0] +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: h_to_d: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: mov h1, v0.h[1] +; CHECK-FP16-GI-NEXT: mov h2, v0.h[2] +; CHECK-FP16-GI-NEXT: mov h3, v0.h[3] +; CHECK-FP16-GI-NEXT: mov h4, v0.h[4] +; CHECK-FP16-GI-NEXT: mov h5, v0.h[5] +; CHECK-FP16-GI-NEXT: mov h6, v0.h[6] +; CHECK-FP16-GI-NEXT: mov h7, v0.h[7] +; CHECK-FP16-GI-NEXT: fcvt d0, h0 +; CHECK-FP16-GI-NEXT: fcvt d16, h1 +; CHECK-FP16-GI-NEXT: fcvt d1, h2 +; CHECK-FP16-GI-NEXT: fcvt d17, h3 +; CHECK-FP16-GI-NEXT: fcvt d2, h4 +; CHECK-FP16-GI-NEXT: fcvt d4, h5 +; CHECK-FP16-GI-NEXT: fcvt d3, h6 +; CHECK-FP16-GI-NEXT: fcvt d5, h7 +; CHECK-FP16-GI-NEXT: mov v0.d[1], v16.d[0] +; CHECK-FP16-GI-NEXT: mov v1.d[1], v17.d[0] +; CHECK-FP16-GI-NEXT: mov v2.d[1], v4.d[0] +; CHECK-FP16-GI-NEXT: mov v3.d[1], v5.d[0] +; CHECK-FP16-GI-NEXT: ret %1 = fpext <8 x half> %a to <8 x double> ret <8 x double> %1 } @@ -183,14 +365,14 @@ define <8 x i16> @bitcast_h_to_i(float, <8 x half> %a) { } define <4 x half> @sitofp_v4i8(<4 x i8> %a) #0 { -; CHECK-CVT-LABEL: sitofp_v4i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: shl v0.4h, v0.4h, #8 -; CHECK-CVT-NEXT: sshr v0.4h, v0.4h, #8 -; CHECK-CVT-NEXT: sshll v0.4s, v0.4h, #0 -; CHECK-CVT-NEXT: scvtf v0.4s, v0.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: sitofp_v4i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: shl v0.4h, v0.4h, #8 +; CHECK-CVT-SD-NEXT: sshr v0.4h, v0.4h, #8 +; CHECK-CVT-SD-NEXT: sshll v0.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: scvtf v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: sitofp_v4i8: ; CHECK-FP16: // %bb.0: @@ -198,76 +380,132 @@ define <4 x half> @sitofp_v4i8(<4 x i8> %a) #0 { ; CHECK-FP16-NEXT: sshr v0.4h, v0.4h, #8 ; CHECK-FP16-NEXT: scvtf v0.4h, v0.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sitofp_v4i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: shl v0.4s, v0.4s, #24 +; CHECK-CVT-GI-NEXT: sshr v0.4s, v0.4s, #24 +; CHECK-CVT-GI-NEXT: scvtf v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = sitofp <4 x i8> %a to <4 x half> ret <4 x half> %1 } define <8 x half> @sitofp_v8i8(<8 x i8> %a) #0 { -; CHECK-CVT-LABEL: sitofp_v8i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: sshll v0.8h, v0.8b, #0 -; CHECK-CVT-NEXT: sshll v1.4s, v0.4h, #0 -; CHECK-CVT-NEXT: sshll2 v2.4s, v0.8h, #0 -; CHECK-CVT-NEXT: scvtf v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v1.4s -; CHECK-CVT-NEXT: scvtf v1.4s, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: sitofp_v8i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: sshll v0.8h, v0.8b, #0 +; CHECK-CVT-SD-NEXT: sshll v1.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: sshll2 v2.4s, v0.8h, #0 +; CHECK-CVT-SD-NEXT: scvtf v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-SD-NEXT: scvtf v1.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: sitofp_v8i8: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: sshll v0.8h, v0.8b, #0 ; CHECK-FP16-NEXT: scvtf v0.8h, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sitofp_v8i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: sshll v0.8h, v0.8b, #0 +; CHECK-CVT-GI-NEXT: sshll v1.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: sshll2 v0.4s, v0.8h, #0 +; CHECK-CVT-GI-NEXT: scvtf v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: scvtf v2.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-GI-NEXT: ret %1 = sitofp <8 x i8> %a to <8 x half> ret <8 x half> %1 } define <16 x half> @sitofp_v16i8(<16 x i8> %a) #0 { -; CHECK-CVT-LABEL: sitofp_v16i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: sshll2 v1.8h, v0.16b, #0 -; CHECK-CVT-NEXT: sshll v0.8h, v0.8b, #0 -; CHECK-CVT-NEXT: sshll v2.4s, v1.4h, #0 -; CHECK-CVT-NEXT: sshll v3.4s, v0.4h, #0 -; CHECK-CVT-NEXT: sshll2 v4.4s, v1.8h, #0 -; CHECK-CVT-NEXT: sshll2 v5.4s, v0.8h, #0 -; CHECK-CVT-NEXT: scvtf v2.4s, v2.4s -; CHECK-CVT-NEXT: scvtf v3.4s, v3.4s -; CHECK-CVT-NEXT: fcvtn v1.4h, v2.4s -; CHECK-CVT-NEXT: scvtf v2.4s, v4.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v3.4s -; CHECK-CVT-NEXT: scvtf v3.4s, v5.4s -; CHECK-CVT-NEXT: fcvtn2 v1.8h, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v3.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: sitofp_v16i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: sshll2 v1.8h, v0.16b, #0 -; CHECK-FP16-NEXT: sshll v0.8h, v0.8b, #0 -; CHECK-FP16-NEXT: scvtf v1.8h, v1.8h -; CHECK-FP16-NEXT: scvtf v0.8h, v0.8h -; CHECK-FP16-NEXT: ret +; CHECK-CVT-SD-LABEL: sitofp_v16i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: sshll2 v1.8h, v0.16b, #0 +; CHECK-CVT-SD-NEXT: sshll v0.8h, v0.8b, #0 +; CHECK-CVT-SD-NEXT: sshll v2.4s, v1.4h, #0 +; CHECK-CVT-SD-NEXT: sshll v3.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: sshll2 v4.4s, v1.8h, #0 +; CHECK-CVT-SD-NEXT: sshll2 v5.4s, v0.8h, #0 +; CHECK-CVT-SD-NEXT: scvtf v2.4s, v2.4s +; CHECK-CVT-SD-NEXT: scvtf v3.4s, v3.4s +; CHECK-CVT-SD-NEXT: fcvtn v1.4h, v2.4s +; CHECK-CVT-SD-NEXT: scvtf v2.4s, v4.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v3.4s +; CHECK-CVT-SD-NEXT: scvtf v3.4s, v5.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v1.8h, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v3.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: sitofp_v16i8: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: sshll2 v1.8h, v0.16b, #0 +; CHECK-FP16-SD-NEXT: sshll v0.8h, v0.8b, #0 +; CHECK-FP16-SD-NEXT: scvtf v1.8h, v1.8h +; CHECK-FP16-SD-NEXT: scvtf v0.8h, v0.8h +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sitofp_v16i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: sshll v1.8h, v0.8b, #0 +; CHECK-CVT-GI-NEXT: sshll2 v0.8h, v0.16b, #0 +; CHECK-CVT-GI-NEXT: sshll v2.4s, v1.4h, #0 +; CHECK-CVT-GI-NEXT: sshll v3.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: sshll2 v1.4s, v1.8h, #0 +; CHECK-CVT-GI-NEXT: sshll2 v0.4s, v0.8h, #0 +; CHECK-CVT-GI-NEXT: scvtf v2.4s, v2.4s +; CHECK-CVT-GI-NEXT: scvtf v3.4s, v3.4s +; CHECK-CVT-GI-NEXT: scvtf v4.4s, v1.4s +; CHECK-CVT-GI-NEXT: scvtf v5.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-GI-NEXT: fcvtn v1.4h, v3.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v4.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v1.8h, v5.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: sitofp_v16i8: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: sshll v1.8h, v0.8b, #0 +; CHECK-FP16-GI-NEXT: sshll2 v2.8h, v0.16b, #0 +; CHECK-FP16-GI-NEXT: scvtf v0.8h, v1.8h +; CHECK-FP16-GI-NEXT: scvtf v1.8h, v2.8h +; CHECK-FP16-GI-NEXT: ret %1 = sitofp <16 x i8> %a to <16 x half> ret <16 x half> %1 } define <8 x half> @sitofp_i16(<8 x i16> %a) #0 { -; CHECK-CVT-LABEL: sitofp_i16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: sshll v1.4s, v0.4h, #0 -; CHECK-CVT-NEXT: sshll2 v2.4s, v0.8h, #0 -; CHECK-CVT-NEXT: scvtf v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v1.4s -; CHECK-CVT-NEXT: scvtf v1.4s, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: sitofp_i16: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: sshll v1.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: sshll2 v2.4s, v0.8h, #0 +; CHECK-CVT-SD-NEXT: scvtf v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-SD-NEXT: scvtf v1.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: sitofp_i16: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: scvtf v0.8h, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sitofp_i16: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: sshll v1.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: sshll2 v0.4s, v0.8h, #0 +; CHECK-CVT-GI-NEXT: scvtf v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: scvtf v2.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-GI-NEXT: ret %1 = sitofp <8 x i16> %a to <8 x half> ret <8 x half> %1 } @@ -286,108 +524,213 @@ define <8 x half> @sitofp_i32(<8 x i32> %a) #0 { define <8 x half> @sitofp_i64(<8 x i64> %a) #0 { -; CHECK-LABEL: sitofp_i64: -; CHECK: // %bb.0: -; CHECK-NEXT: scvtf v0.2d, v0.2d -; CHECK-NEXT: scvtf v2.2d, v2.2d -; CHECK-NEXT: scvtf v1.2d, v1.2d -; CHECK-NEXT: scvtf v3.2d, v3.2d -; CHECK-NEXT: fcvtn v0.2s, v0.2d -; CHECK-NEXT: fcvtn v2.2s, v2.2d -; CHECK-NEXT: fcvtn2 v0.4s, v1.2d -; CHECK-NEXT: fcvtn2 v2.4s, v3.2d -; CHECK-NEXT: fcvtn v0.4h, v0.4s -; CHECK-NEXT: fcvtn2 v0.8h, v2.4s -; CHECK-NEXT: ret +; CHECK-CVT-SD-LABEL: sitofp_i64: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: scvtf v0.2d, v0.2d +; CHECK-CVT-SD-NEXT: scvtf v2.2d, v2.2d +; CHECK-CVT-SD-NEXT: scvtf v1.2d, v1.2d +; CHECK-CVT-SD-NEXT: scvtf v3.2d, v3.2d +; CHECK-CVT-SD-NEXT: fcvtn v0.2s, v0.2d +; CHECK-CVT-SD-NEXT: fcvtn v2.2s, v2.2d +; CHECK-CVT-SD-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-CVT-SD-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: sitofp_i64: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: scvtf v0.2d, v0.2d +; CHECK-FP16-SD-NEXT: scvtf v2.2d, v2.2d +; CHECK-FP16-SD-NEXT: scvtf v1.2d, v1.2d +; CHECK-FP16-SD-NEXT: scvtf v3.2d, v3.2d +; CHECK-FP16-SD-NEXT: fcvtn v0.2s, v0.2d +; CHECK-FP16-SD-NEXT: fcvtn v2.2s, v2.2d +; CHECK-FP16-SD-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-FP16-SD-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-FP16-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-FP16-SD-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sitofp_i64: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: scvtf v0.2d, v0.2d +; CHECK-CVT-GI-NEXT: scvtf v1.2d, v1.2d +; CHECK-CVT-GI-NEXT: scvtf v2.2d, v2.2d +; CHECK-CVT-GI-NEXT: scvtf v3.2d, v3.2d +; CHECK-CVT-GI-NEXT: fcvtn v0.2s, v0.2d +; CHECK-CVT-GI-NEXT: fcvtn v2.2s, v2.2d +; CHECK-CVT-GI-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-CVT-GI-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: sitofp_i64: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: scvtf v0.2d, v0.2d +; CHECK-FP16-GI-NEXT: scvtf v1.2d, v1.2d +; CHECK-FP16-GI-NEXT: scvtf v2.2d, v2.2d +; CHECK-FP16-GI-NEXT: scvtf v3.2d, v3.2d +; CHECK-FP16-GI-NEXT: fcvtn v0.2s, v0.2d +; CHECK-FP16-GI-NEXT: fcvtn v2.2s, v2.2d +; CHECK-FP16-GI-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-FP16-GI-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-FP16-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-FP16-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-FP16-GI-NEXT: ret %1 = sitofp <8 x i64> %a to <8 x half> ret <8 x half> %1 } define <4 x half> @uitofp_v4i8(<4 x i8> %a) #0 { -; CHECK-CVT-LABEL: uitofp_v4i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: bic v0.4h, #255, lsl #8 -; CHECK-CVT-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-CVT-NEXT: ucvtf v0.4s, v0.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: uitofp_v4i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: bic v0.4h, #255, lsl #8 -; CHECK-FP16-NEXT: ucvtf v0.4h, v0.4h -; CHECK-FP16-NEXT: ret +; CHECK-CVT-SD-LABEL: uitofp_v4i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: bic v0.4h, #255, lsl #8 +; CHECK-CVT-SD-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: ucvtf v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: uitofp_v4i8: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: bic v0.4h, #255, lsl #8 +; CHECK-FP16-SD-NEXT: ucvtf v0.4h, v0.4h +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: uitofp_v4i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: movi v1.2d, #0x0000ff000000ff +; CHECK-CVT-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-CVT-GI-NEXT: ucvtf v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: uitofp_v4i8: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: movi d1, #0xff00ff00ff00ff +; CHECK-FP16-GI-NEXT: and v0.8b, v0.8b, v1.8b +; CHECK-FP16-GI-NEXT: ucvtf v0.4h, v0.4h +; CHECK-FP16-GI-NEXT: ret %1 = uitofp <4 x i8> %a to <4 x half> ret <4 x half> %1 } define <8 x half> @uitofp_v8i8(<8 x i8> %a) #0 { -; CHECK-CVT-LABEL: uitofp_v8i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: ushll v0.8h, v0.8b, #0 -; CHECK-CVT-NEXT: ushll v1.4s, v0.4h, #0 -; CHECK-CVT-NEXT: ushll2 v2.4s, v0.8h, #0 -; CHECK-CVT-NEXT: ucvtf v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v1.4s -; CHECK-CVT-NEXT: ucvtf v1.4s, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: uitofp_v8i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: ushll v0.8h, v0.8b, #0 +; CHECK-CVT-SD-NEXT: ushll v1.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: ushll2 v2.4s, v0.8h, #0 +; CHECK-CVT-SD-NEXT: ucvtf v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-SD-NEXT: ucvtf v1.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: uitofp_v8i8: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: ushll v0.8h, v0.8b, #0 ; CHECK-FP16-NEXT: ucvtf v0.8h, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: uitofp_v8i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: ushll v0.8h, v0.8b, #0 +; CHECK-CVT-GI-NEXT: ushll v1.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: ushll2 v0.4s, v0.8h, #0 +; CHECK-CVT-GI-NEXT: ucvtf v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: ucvtf v2.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-GI-NEXT: ret %1 = uitofp <8 x i8> %a to <8 x half> ret <8 x half> %1 } define <16 x half> @uitofp_v16i8(<16 x i8> %a) #0 { -; CHECK-CVT-LABEL: uitofp_v16i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: ushll2 v1.8h, v0.16b, #0 -; CHECK-CVT-NEXT: ushll v0.8h, v0.8b, #0 -; CHECK-CVT-NEXT: ushll v2.4s, v1.4h, #0 -; CHECK-CVT-NEXT: ushll v3.4s, v0.4h, #0 -; CHECK-CVT-NEXT: ushll2 v4.4s, v1.8h, #0 -; CHECK-CVT-NEXT: ushll2 v5.4s, v0.8h, #0 -; CHECK-CVT-NEXT: ucvtf v2.4s, v2.4s -; CHECK-CVT-NEXT: ucvtf v3.4s, v3.4s -; CHECK-CVT-NEXT: fcvtn v1.4h, v2.4s -; CHECK-CVT-NEXT: ucvtf v2.4s, v4.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v3.4s -; CHECK-CVT-NEXT: ucvtf v3.4s, v5.4s -; CHECK-CVT-NEXT: fcvtn2 v1.8h, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v3.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: uitofp_v16i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: ushll2 v1.8h, v0.16b, #0 -; CHECK-FP16-NEXT: ushll v0.8h, v0.8b, #0 -; CHECK-FP16-NEXT: ucvtf v1.8h, v1.8h -; CHECK-FP16-NEXT: ucvtf v0.8h, v0.8h -; CHECK-FP16-NEXT: ret +; CHECK-CVT-SD-LABEL: uitofp_v16i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: ushll2 v1.8h, v0.16b, #0 +; CHECK-CVT-SD-NEXT: ushll v0.8h, v0.8b, #0 +; CHECK-CVT-SD-NEXT: ushll v2.4s, v1.4h, #0 +; CHECK-CVT-SD-NEXT: ushll v3.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: ushll2 v4.4s, v1.8h, #0 +; CHECK-CVT-SD-NEXT: ushll2 v5.4s, v0.8h, #0 +; CHECK-CVT-SD-NEXT: ucvtf v2.4s, v2.4s +; CHECK-CVT-SD-NEXT: ucvtf v3.4s, v3.4s +; CHECK-CVT-SD-NEXT: fcvtn v1.4h, v2.4s +; CHECK-CVT-SD-NEXT: ucvtf v2.4s, v4.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v3.4s +; CHECK-CVT-SD-NEXT: ucvtf v3.4s, v5.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v1.8h, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v3.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: uitofp_v16i8: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: ushll2 v1.8h, v0.16b, #0 +; CHECK-FP16-SD-NEXT: ushll v0.8h, v0.8b, #0 +; CHECK-FP16-SD-NEXT: ucvtf v1.8h, v1.8h +; CHECK-FP16-SD-NEXT: ucvtf v0.8h, v0.8h +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: uitofp_v16i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: ushll v1.8h, v0.8b, #0 +; CHECK-CVT-GI-NEXT: ushll2 v0.8h, v0.16b, #0 +; CHECK-CVT-GI-NEXT: ushll v2.4s, v1.4h, #0 +; CHECK-CVT-GI-NEXT: ushll v3.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: ushll2 v1.4s, v1.8h, #0 +; CHECK-CVT-GI-NEXT: ushll2 v0.4s, v0.8h, #0 +; CHECK-CVT-GI-NEXT: ucvtf v2.4s, v2.4s +; CHECK-CVT-GI-NEXT: ucvtf v3.4s, v3.4s +; CHECK-CVT-GI-NEXT: ucvtf v4.4s, v1.4s +; CHECK-CVT-GI-NEXT: ucvtf v5.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-GI-NEXT: fcvtn v1.4h, v3.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v4.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v1.8h, v5.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: uitofp_v16i8: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: ushll v1.8h, v0.8b, #0 +; CHECK-FP16-GI-NEXT: ushll2 v2.8h, v0.16b, #0 +; CHECK-FP16-GI-NEXT: ucvtf v0.8h, v1.8h +; CHECK-FP16-GI-NEXT: ucvtf v1.8h, v2.8h +; CHECK-FP16-GI-NEXT: ret %1 = uitofp <16 x i8> %a to <16 x half> ret <16 x half> %1 } define <8 x half> @uitofp_i16(<8 x i16> %a) #0 { -; CHECK-CVT-LABEL: uitofp_i16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: ushll v1.4s, v0.4h, #0 -; CHECK-CVT-NEXT: ushll2 v2.4s, v0.8h, #0 -; CHECK-CVT-NEXT: ucvtf v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v1.4s -; CHECK-CVT-NEXT: ucvtf v1.4s, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: uitofp_i16: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: ushll v1.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: ushll2 v2.4s, v0.8h, #0 +; CHECK-CVT-SD-NEXT: ucvtf v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-SD-NEXT: ucvtf v1.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: uitofp_i16: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: ucvtf v0.8h, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: uitofp_i16: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: ushll v1.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: ushll2 v0.4s, v0.8h, #0 +; CHECK-CVT-GI-NEXT: ucvtf v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: ucvtf v2.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-GI-NEXT: ret %1 = uitofp <8 x i16> %a to <8 x half> ret <8 x half> %1 } @@ -407,19 +750,61 @@ define <8 x half> @uitofp_i32(<8 x i32> %a) #0 { define <8 x half> @uitofp_i64(<8 x i64> %a) #0 { -; CHECK-LABEL: uitofp_i64: -; CHECK: // %bb.0: -; CHECK-NEXT: ucvtf v0.2d, v0.2d -; CHECK-NEXT: ucvtf v2.2d, v2.2d -; CHECK-NEXT: ucvtf v1.2d, v1.2d -; CHECK-NEXT: ucvtf v3.2d, v3.2d -; CHECK-NEXT: fcvtn v0.2s, v0.2d -; CHECK-NEXT: fcvtn v2.2s, v2.2d -; CHECK-NEXT: fcvtn2 v0.4s, v1.2d -; CHECK-NEXT: fcvtn2 v2.4s, v3.2d -; CHECK-NEXT: fcvtn v0.4h, v0.4s -; CHECK-NEXT: fcvtn2 v0.8h, v2.4s -; CHECK-NEXT: ret +; CHECK-CVT-SD-LABEL: uitofp_i64: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: ucvtf v0.2d, v0.2d +; CHECK-CVT-SD-NEXT: ucvtf v2.2d, v2.2d +; CHECK-CVT-SD-NEXT: ucvtf v1.2d, v1.2d +; CHECK-CVT-SD-NEXT: ucvtf v3.2d, v3.2d +; CHECK-CVT-SD-NEXT: fcvtn v0.2s, v0.2d +; CHECK-CVT-SD-NEXT: fcvtn v2.2s, v2.2d +; CHECK-CVT-SD-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-CVT-SD-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: uitofp_i64: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: ucvtf v0.2d, v0.2d +; CHECK-FP16-SD-NEXT: ucvtf v2.2d, v2.2d +; CHECK-FP16-SD-NEXT: ucvtf v1.2d, v1.2d +; CHECK-FP16-SD-NEXT: ucvtf v3.2d, v3.2d +; CHECK-FP16-SD-NEXT: fcvtn v0.2s, v0.2d +; CHECK-FP16-SD-NEXT: fcvtn v2.2s, v2.2d +; CHECK-FP16-SD-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-FP16-SD-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-FP16-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-FP16-SD-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: uitofp_i64: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: ucvtf v0.2d, v0.2d +; CHECK-CVT-GI-NEXT: ucvtf v1.2d, v1.2d +; CHECK-CVT-GI-NEXT: ucvtf v2.2d, v2.2d +; CHECK-CVT-GI-NEXT: ucvtf v3.2d, v3.2d +; CHECK-CVT-GI-NEXT: fcvtn v0.2s, v0.2d +; CHECK-CVT-GI-NEXT: fcvtn v2.2s, v2.2d +; CHECK-CVT-GI-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-CVT-GI-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: uitofp_i64: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: ucvtf v0.2d, v0.2d +; CHECK-FP16-GI-NEXT: ucvtf v1.2d, v1.2d +; CHECK-FP16-GI-NEXT: ucvtf v2.2d, v2.2d +; CHECK-FP16-GI-NEXT: ucvtf v3.2d, v3.2d +; CHECK-FP16-GI-NEXT: fcvtn v0.2s, v0.2d +; CHECK-FP16-GI-NEXT: fcvtn v2.2s, v2.2d +; CHECK-FP16-GI-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-FP16-GI-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-FP16-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-FP16-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-FP16-GI-NEXT: ret %1 = uitofp <8 x i64> %a to <8 x half> ret <8 x half> %1 } @@ -436,94 +821,132 @@ define void @test_insert_at_zero(half %a, ptr %b) #0 { } define <8 x i8> @fptosi_i8(<8 x half> %a) #0 { -; CHECK-CVT-LABEL: fptosi_i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtzs v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: fptosi_i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: fptosi_i8: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcvtzs v0.8h, v0.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: fptosi_i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fptosi<8 x half> %a to <8 x i8> ret <8 x i8> %1 } define <8 x i16> @fptosi_i16(<8 x half> %a) #0 { -; CHECK-CVT-LABEL: fptosi_i16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtzs v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: fptosi_i16: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: fptosi_i16: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcvtzs v0.8h, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: fptosi_i16: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fptosi<8 x half> %a to <8 x i16> ret <8 x i16> %1 } define <8 x i8> @fptoui_i8(<8 x half> %a) #0 { -; CHECK-CVT-LABEL: fptoui_i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtzu v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: fptoui_i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: fptoui_i8: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcvtzu v0.8h, v0.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: fptoui_i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fptoui<8 x half> %a to <8 x i8> ret <8 x i8> %1 } define <8 x i16> @fptoui_i16(<8 x half> %a) #0 { -; CHECK-CVT-LABEL: fptoui_i16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtzu v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: fptoui_i16: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: fptoui_i16: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcvtzu v0.8h, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: fptoui_i16: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fptoui<8 x half> %a to <8 x i16> ret <8 x i16> %1 } define <8 x i1> @test_fcmp_une(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_une: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmeq v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmeq v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_une: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmeq v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_une: ; CHECK-FP16: // %bb.0: @@ -531,27 +954,41 @@ define <8 x i1> @test_fcmp_une(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_une: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmeq v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp une <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ueq(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ueq: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v4.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v2.4s, v2.4s, v3.4s -; CHECK-CVT-NEXT: fcmgt v3.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v1.16b, v2.16b, v4.16b -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v3.16b -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ueq: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v4.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v2.4s, v3.4s +; CHECK-CVT-SD-NEXT: fcmgt v3.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ueq: ; CHECK-FP16: // %bb.0: @@ -561,23 +998,41 @@ define <8 x i1> @test_fcmp_ueq(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ueq: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmgt v4.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmgt v3.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-GI-NEXT: mvn v1.16b, v1.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ueq <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ugt(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ugt: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcmge v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmge v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ugt: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcmge v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ugt: ; CHECK-FP16: // %bb.0: @@ -585,23 +1040,37 @@ define <8 x i1> @test_fcmp_ugt(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ugt: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmge v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ugt <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_uge(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_uge: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcmgt v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_uge: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_uge: ; CHECK-FP16: // %bb.0: @@ -609,23 +1078,37 @@ define <8 x i1> @test_fcmp_uge(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_uge: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp uge <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ult(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ult: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmge v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ult: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ult: ; CHECK-FP16: // %bb.0: @@ -633,23 +1116,37 @@ define <8 x i1> @test_fcmp_ult(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ult: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmge v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ult <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ule(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ule: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ule: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ule: ; CHECK-FP16: // %bb.0: @@ -657,27 +1154,41 @@ define <8 x i1> @test_fcmp_ule(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ule: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ule <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_uno(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_uno: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v4.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v2.4s, v2.4s, v3.4s -; CHECK-CVT-NEXT: fcmge v3.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v1.16b, v2.16b, v4.16b -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v3.16b -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_uno: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v4.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v2.4s, v3.4s +; CHECK-CVT-SD-NEXT: fcmge v3.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_uno: ; CHECK-FP16: // %bb.0: @@ -687,26 +1198,44 @@ define <8 x i1> @test_fcmp_uno(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_uno: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmge v4.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmge v3.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-GI-NEXT: mvn v1.16b, v1.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp uno <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_one(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_one: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v4.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v2.4s, v2.4s, v3.4s -; CHECK-CVT-NEXT: fcmgt v3.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v1.16b, v2.16b, v4.16b -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v3.16b -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_one: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v4.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v2.4s, v3.4s +; CHECK-CVT-SD-NEXT: fcmgt v3.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_one: ; CHECK-FP16: // %bb.0: @@ -715,136 +1244,212 @@ define <8 x i1> @test_fcmp_one(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: orr v0.16b, v0.16b, v2.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_one: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmgt v4.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmgt v3.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp one <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_oeq(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_oeq: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmeq v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmeq v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_oeq: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmeq v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_oeq: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmeq v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_oeq: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmeq v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp oeq <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ogt(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ogt: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ogt: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ogt: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmgt v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ogt: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ogt <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_oge(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_oge: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmge v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_oge: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_oge: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmge v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_oge: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmge v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp oge <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_olt(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_olt: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcmgt v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_olt: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_olt: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmgt v0.8h, v1.8h, v0.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_olt: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp olt <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ole(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ole: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcmge v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmge v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ole: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcmge v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ole: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmge v0.8h, v1.8h, v0.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ole: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmge v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ole <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ord(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ord: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v4.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v2.4s, v2.4s, v3.4s -; CHECK-CVT-NEXT: fcmge v3.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v1.16b, v2.16b, v4.16b -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v3.16b -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ord: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v4.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v2.4s, v3.4s +; CHECK-CVT-SD-NEXT: fcmge v3.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ord: ; CHECK-FP16: // %bb.0: @@ -853,8 +1458,27 @@ define <8 x i1> @test_fcmp_ord(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: orr v0.16b, v0.16b, v2.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ord: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmge v4.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmge v3.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ord <8 x half> %a, %b ret <8 x i1> %1 } attributes #0 = { nounwind } + +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK-CVT: {{.*}} diff --git a/llvm/test/CodeGen/AArch64/icmp.ll b/llvm/test/CodeGen/AArch64/icmp.ll index 18665bc..7195e2b 100644 --- a/llvm/test/CodeGen/AArch64/icmp.ll +++ b/llvm/test/CodeGen/AArch64/icmp.ll @@ -2093,3 +2093,54 @@ define <2 x i1> @icmp_slt_v2i64_Zero_LHS(<2 x i64> %a) { %c = icmp slt <2 x i64> <i64 0, i64 0>, %a ret <2 x i1> %c } + +; Test TST optimization for i8 sign bit testing with cross-type select +; This tests the pattern: icmp slt i8 %val, 0; select i1 %cmp, i32 %a, i32 %b +; The optimization should convert sxtb+cmp to tst for sign bit testing. + +define i32 @i8_signbit_tst_constants(i8 %x, i8 %y) { +; CHECK-SD-LABEL: i8_signbit_tst_constants: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: add w9, w0, w1 +; CHECK-SD-NEXT: mov w8, #42 // =0x2a +; CHECK-SD-NEXT: tst w9, #0x80 +; CHECK-SD-NEXT: mov w9, #20894 // =0x519e +; CHECK-SD-NEXT: csel w0, w9, w8, ne +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: i8_signbit_tst_constants: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: add w8, w0, w1 +; CHECK-GI-NEXT: mov w9, #42 // =0x2a +; CHECK-GI-NEXT: mov w10, #20894 // =0x519e +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: cmp w8, #0 +; CHECK-GI-NEXT: csel w0, w10, w9, mi +; CHECK-GI-NEXT: ret + %add = add i8 %x, %y + %cmp = icmp slt i8 %add, 0 + %sel = select i1 %cmp, i32 20894, i32 42 + ret i32 %sel +} + +; Test i8 sign bit testing with variable select values (problematic case) +define i32 @i8_signbit_variables(i8 %x, i8 %y, i32 %a, i32 %b) { +; CHECK-SD-LABEL: i8_signbit_variables: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: add w8, w0, w1 +; CHECK-SD-NEXT: tst w8, #0x80 +; CHECK-SD-NEXT: csel w0, w2, w3, ne +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: i8_signbit_variables: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: add w8, w0, w1 +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: cmp w8, #0 +; CHECK-GI-NEXT: csel w0, w2, w3, mi +; CHECK-GI-NEXT: ret + %add = add i8 %x, %y + %cmp = icmp slt i8 %add, 0 + %sel = select i1 %cmp, i32 %a, i32 %b + ret i32 %sel +} diff --git a/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll b/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll index fc43c71..b6dee97e 100644 --- a/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll +++ b/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme -aarch64-new-sme-abi -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -aarch64-new-sme-abi -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-SDAG ; A simple EH test case that corresponds to the following C++ source: ; @@ -87,6 +88,90 @@ define void @za_with_raii(i1 %fail) "aarch64_inout_za" personality ptr @__gxx_pe ; CHECK-NEXT: mov x0, x19 ; CHECK-NEXT: msr TPIDR2_EL0, x8 ; CHECK-NEXT: bl _Unwind_Resume +; +; CHECK-SDAG-LABEL: za_with_raii: +; CHECK-SDAG: .Lfunc_begin0: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception0 +; CHECK-SDAG-NEXT: // %bb.0: +; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-SDAG-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-SDAG-NEXT: mov x29, sp +; CHECK-SDAG-NEXT: sub sp, sp, #16 +; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32 +; CHECK-SDAG-NEXT: .cfi_offset w19, -8 +; CHECK-SDAG-NEXT: .cfi_offset w20, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -24 +; CHECK-SDAG-NEXT: .cfi_offset w29, -32 +; CHECK-SDAG-NEXT: rdsvl x8, #1 +; CHECK-SDAG-NEXT: mov x9, sp +; CHECK-SDAG-NEXT: msub x9, x8, x8, x9 +; CHECK-SDAG-NEXT: mov sp, x9 +; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16] +; CHECK-SDAG-NEXT: tbnz w0, #0, .LBB0_2 +; CHECK-SDAG-NEXT: // %bb.1: // %return_normally +; CHECK-SDAG-NEXT: mov sp, x29 +; CHECK-SDAG-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-SDAG-NEXT: b shared_za_call +; CHECK-SDAG-NEXT: .LBB0_2: // %throw_exception +; CHECK-SDAG-NEXT: sub x20, x29, #16 +; CHECK-SDAG-NEXT: mov w0, #8 // =0x8 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x20 +; CHECK-SDAG-NEXT: bl __cxa_allocate_exception +; CHECK-SDAG-NEXT: mov x8, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x9, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x9, .LBB0_4 +; CHECK-SDAG-NEXT: // %bb.3: // %throw_exception +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB0_4: // %throw_exception +; CHECK-SDAG-NEXT: adrp x9, .L.str +; CHECK-SDAG-NEXT: add x9, x9, :lo12:.L.str +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: str x9, [x8] +; CHECK-SDAG-NEXT: .Ltmp0: // EH_LABEL +; CHECK-SDAG-NEXT: adrp x1, :got:typeinfo_for_char_const_ptr +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x20 +; CHECK-SDAG-NEXT: mov x0, x8 +; CHECK-SDAG-NEXT: ldr x1, [x1, :got_lo12:typeinfo_for_char_const_ptr] +; CHECK-SDAG-NEXT: mov x2, xzr +; CHECK-SDAG-NEXT: bl __cxa_throw +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB0_6 +; CHECK-SDAG-NEXT: // %bb.5: // %throw_exception +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB0_6: // %throw_exception +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: .Ltmp1: // EH_LABEL +; CHECK-SDAG-NEXT: // %bb.7: // %throw_fail +; CHECK-SDAG-NEXT: .LBB0_8: // %unwind_dtors +; CHECK-SDAG-NEXT: .Ltmp2: // EH_LABEL +; CHECK-SDAG-NEXT: mov x19, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB0_10 +; CHECK-SDAG-NEXT: // %bb.9: // %unwind_dtors +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB0_10: // %unwind_dtors +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: bl shared_za_call +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x20 +; CHECK-SDAG-NEXT: bl _Unwind_Resume +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB0_12 +; CHECK-SDAG-NEXT: // %bb.11: // %unwind_dtors +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB0_12: // %unwind_dtors +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr br i1 %fail, label %throw_exception, label %return_normally throw_exception: @@ -124,7 +209,7 @@ throw_fail: ; } ; shared_za_call(); ; } -define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_personality_v0 { +define void @try_catch() "aarch64_inout_za" personality ptr @__gxx_personality_v0 { ; CHECK-LABEL: try_catch: ; CHECK: .Lfunc_begin1: ; CHECK-NEXT: .cfi_startproc @@ -142,11 +227,11 @@ define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_per ; CHECK-NEXT: msub x9, x8, x8, x9 ; CHECK-NEXT: mov sp, x9 ; CHECK-NEXT: stp x9, x8, [x29, #-16] -; CHECK-NEXT: .Ltmp3: +; CHECK-NEXT: .Ltmp3: // EH_LABEL ; CHECK-NEXT: sub x8, x29, #16 ; CHECK-NEXT: msr TPIDR2_EL0, x8 ; CHECK-NEXT: bl may_throw -; CHECK-NEXT: .Ltmp4: +; CHECK-NEXT: .Ltmp4: // EH_LABEL ; CHECK-NEXT: .LBB1_1: // %after_catch ; CHECK-NEXT: smstart za ; CHECK-NEXT: mrs x8, TPIDR2_EL0 @@ -160,7 +245,7 @@ define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_per ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload ; CHECK-NEXT: b shared_za_call ; CHECK-NEXT: .LBB1_4: // %catch -; CHECK-NEXT: .Ltmp5: +; CHECK-NEXT: .Ltmp5: // EH_LABEL ; CHECK-NEXT: bl __cxa_begin_catch ; CHECK-NEXT: smstart za ; CHECK-NEXT: mrs x8, TPIDR2_EL0 @@ -175,6 +260,78 @@ define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_per ; CHECK-NEXT: msr TPIDR2_EL0, x8 ; CHECK-NEXT: bl __cxa_end_catch ; CHECK-NEXT: b .LBB1_1 +; +; CHECK-SDAG-LABEL: try_catch: +; CHECK-SDAG: .Lfunc_begin1: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception1 +; CHECK-SDAG-NEXT: // %bb.0: +; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill +; CHECK-SDAG-NEXT: mov x29, sp +; CHECK-SDAG-NEXT: sub sp, sp, #16 +; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32 +; CHECK-SDAG-NEXT: .cfi_offset w19, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -24 +; CHECK-SDAG-NEXT: .cfi_offset w29, -32 +; CHECK-SDAG-NEXT: rdsvl x8, #1 +; CHECK-SDAG-NEXT: mov x9, sp +; CHECK-SDAG-NEXT: msub x9, x8, x8, x9 +; CHECK-SDAG-NEXT: mov sp, x9 +; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16] +; CHECK-SDAG-NEXT: .Ltmp3: // EH_LABEL +; CHECK-SDAG-NEXT: sub x19, x29, #16 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl may_throw +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB1_2 +; CHECK-SDAG-NEXT: // %bb.1: +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB1_2: +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: .Ltmp4: // EH_LABEL +; CHECK-SDAG-NEXT: .LBB1_3: // %after_catch +; CHECK-SDAG-NEXT: mov sp, x29 +; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload +; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-SDAG-NEXT: b shared_za_call +; CHECK-SDAG-NEXT: .LBB1_4: // %catch +; CHECK-SDAG-NEXT: .Ltmp5: // EH_LABEL +; CHECK-SDAG-NEXT: mov x1, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB1_6 +; CHECK-SDAG-NEXT: // %bb.5: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB1_6: // %catch +; CHECK-SDAG-NEXT: mov x0, x1 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl __cxa_begin_catch +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB1_8 +; CHECK-SDAG-NEXT: // %bb.7: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB1_8: // %catch +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: bl shared_za_call +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl __cxa_end_catch +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB1_10 +; CHECK-SDAG-NEXT: // %bb.9: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB1_10: // %catch +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: b .LBB1_3 invoke void @may_throw() to label %after_catch unwind label %catch @@ -235,16 +392,16 @@ define void @try_catch_shared_za_callee() "aarch64_new_za" personality ptr @__gx ; CHECK-NEXT: zero {za} ; CHECK-NEXT: .LBB2_2: ; CHECK-NEXT: smstart za -; CHECK-NEXT: .Ltmp6: +; CHECK-NEXT: .Ltmp6: // EH_LABEL ; CHECK-NEXT: bl shared_za_call -; CHECK-NEXT: .Ltmp7: +; CHECK-NEXT: .Ltmp7: // EH_LABEL ; CHECK-NEXT: .LBB2_3: // %exit ; CHECK-NEXT: smstop za ; CHECK-NEXT: mov sp, x29 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB2_4: // %catch -; CHECK-NEXT: .Ltmp8: +; CHECK-NEXT: .Ltmp8: // EH_LABEL ; CHECK-NEXT: bl __cxa_begin_catch ; CHECK-NEXT: smstart za ; CHECK-NEXT: mrs x8, TPIDR2_EL0 @@ -260,6 +417,78 @@ define void @try_catch_shared_za_callee() "aarch64_new_za" personality ptr @__gx ; CHECK-NEXT: bl __cxa_end_catch ; CHECK-NEXT: msr TPIDR2_EL0, xzr ; CHECK-NEXT: b .LBB2_3 +; +; CHECK-SDAG-LABEL: try_catch_shared_za_callee: +; CHECK-SDAG: .Lfunc_begin2: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception2 +; CHECK-SDAG-NEXT: // %bb.0: // %prelude +; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill +; CHECK-SDAG-NEXT: mov x29, sp +; CHECK-SDAG-NEXT: sub sp, sp, #16 +; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32 +; CHECK-SDAG-NEXT: .cfi_offset w19, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -24 +; CHECK-SDAG-NEXT: .cfi_offset w29, -32 +; CHECK-SDAG-NEXT: rdsvl x8, #1 +; CHECK-SDAG-NEXT: mov x9, sp +; CHECK-SDAG-NEXT: msub x9, x8, x8, x9 +; CHECK-SDAG-NEXT: mov sp, x9 +; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16] +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: cbz x8, .LBB2_2 +; CHECK-SDAG-NEXT: // %bb.1: // %save.za +; CHECK-SDAG-NEXT: bl __arm_tpidr2_save +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: .LBB2_2: +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: zero {za} +; CHECK-SDAG-NEXT: .Ltmp6: // EH_LABEL +; CHECK-SDAG-NEXT: bl shared_za_call +; CHECK-SDAG-NEXT: .Ltmp7: // EH_LABEL +; CHECK-SDAG-NEXT: .LBB2_3: // %exit +; CHECK-SDAG-NEXT: smstop za +; CHECK-SDAG-NEXT: mov sp, x29 +; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload +; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-SDAG-NEXT: ret +; CHECK-SDAG-NEXT: .LBB2_4: // %catch +; CHECK-SDAG-NEXT: .Ltmp8: // EH_LABEL +; CHECK-SDAG-NEXT: mov x1, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: sub x19, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB2_6 +; CHECK-SDAG-NEXT: // %bb.5: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB2_6: // %catch +; CHECK-SDAG-NEXT: mov x0, x1 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl __cxa_begin_catch +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB2_8 +; CHECK-SDAG-NEXT: // %bb.7: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB2_8: // %catch +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: bl noexcept_shared_za_call +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl __cxa_end_catch +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB2_10 +; CHECK-SDAG-NEXT: // %bb.9: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB2_10: // %catch +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: b .LBB2_3 invoke void @shared_za_call() #4 to label %exit unwind label %catch catch: @@ -275,6 +504,234 @@ exit: ret void } +; A simple ZT0 exception example that corresponds to: +; +; struct ZT0Resource { +; ~ZT0Resource() __arm_inout("zt0") { +; shared_zt0_call(); // simulate cleanup in destructor +; } +; }; +; +; void za_with_raii() __arm_inout("zt0") { +; ZT0Resource r; +; may_throw(); +; } +; +; This code may require reloading ZT0 in the cleanup for ~ZT0Resource(). +; +; FIXME: Codegen with `-aarch64-new-sme-abi` is broken with ZT0 (as it is not implemented). +define void @try_catch_shared_zt0_callee() "aarch64_inout_zt0" personality ptr @__gxx_personality_v0 { +; CHECK-LABEL: try_catch_shared_zt0_callee: +; CHECK: .Lfunc_begin3: +; CHECK-NEXT: .cfi_startproc +; CHECK-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-NEXT: .cfi_lsda 28, .Lexception3 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .cfi_def_cfa w29, 32 +; CHECK-NEXT: .cfi_offset w19, -8 +; CHECK-NEXT: .cfi_offset w20, -16 +; CHECK-NEXT: .cfi_offset w30, -24 +; CHECK-NEXT: .cfi_offset w29, -32 +; CHECK-NEXT: rdsvl x8, #1 +; CHECK-NEXT: mov x9, sp +; CHECK-NEXT: msub x9, x8, x8, x9 +; CHECK-NEXT: mov sp, x9 +; CHECK-NEXT: stp x9, x8, [x29, #-80] +; CHECK-NEXT: .Ltmp9: // EH_LABEL +; CHECK-NEXT: sub x19, x29, #64 +; CHECK-NEXT: str zt0, [x19] +; CHECK-NEXT: smstop za +; CHECK-NEXT: bl may_throw +; CHECK-NEXT: smstart za +; CHECK-NEXT: ldr zt0, [x19] +; CHECK-NEXT: .Ltmp10: // EH_LABEL +; CHECK-NEXT: // %bb.1: // %return_normally +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB3_2: // %unwind_dtors +; CHECK-NEXT: .Ltmp11: // EH_LABEL +; CHECK-NEXT: sub x20, x29, #64 +; CHECK-NEXT: mov x19, x0 +; CHECK-NEXT: smstart za +; CHECK-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-NEXT: sub x0, x29, #80 +; CHECK-NEXT: cbnz x8, .LBB3_4 +; CHECK-NEXT: // %bb.3: // %unwind_dtors +; CHECK-NEXT: bl __arm_tpidr2_restore +; CHECK-NEXT: .LBB3_4: // %unwind_dtors +; CHECK-NEXT: msr TPIDR2_EL0, xzr +; CHECK-NEXT: bl shared_zt0_call +; CHECK-NEXT: str zt0, [x20] +; CHECK-NEXT: smstop za +; CHECK-NEXT: mov x0, x19 +; CHECK-NEXT: bl _Unwind_Resume +; CHECK-NEXT: smstart za +; CHECK-NEXT: ldr zt0, [x20] +; +; CHECK-SDAG-LABEL: try_catch_shared_zt0_callee: +; CHECK-SDAG: .Lfunc_begin3: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception3 +; CHECK-SDAG-NEXT: // %bb.0: +; CHECK-SDAG-NEXT: sub sp, sp, #96 +; CHECK-SDAG-NEXT: str x30, [sp, #64] // 8-byte Folded Spill +; CHECK-SDAG-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill +; CHECK-SDAG-NEXT: .cfi_def_cfa_offset 96 +; CHECK-SDAG-NEXT: .cfi_offset w19, -8 +; CHECK-SDAG-NEXT: .cfi_offset w20, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -32 +; CHECK-SDAG-NEXT: .Ltmp9: // EH_LABEL +; CHECK-SDAG-NEXT: mov x19, sp +; CHECK-SDAG-NEXT: str zt0, [x19] +; CHECK-SDAG-NEXT: smstop za +; CHECK-SDAG-NEXT: bl may_throw +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: ldr zt0, [x19] +; CHECK-SDAG-NEXT: .Ltmp10: // EH_LABEL +; CHECK-SDAG-NEXT: // %bb.1: // %return_normally +; CHECK-SDAG-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload +; CHECK-SDAG-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload +; CHECK-SDAG-NEXT: add sp, sp, #96 +; CHECK-SDAG-NEXT: ret +; CHECK-SDAG-NEXT: .LBB3_2: // %unwind_dtors +; CHECK-SDAG-NEXT: .Ltmp11: // EH_LABEL +; CHECK-SDAG-NEXT: mov x20, sp +; CHECK-SDAG-NEXT: mov x19, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: ldr zt0, [x20] +; CHECK-SDAG-NEXT: bl shared_zt0_call +; CHECK-SDAG-NEXT: str zt0, [x20] +; CHECK-SDAG-NEXT: smstop za +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl _Unwind_Resume +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: ldr zt0, [x20] + invoke void @may_throw() + to label %return_normally unwind label %unwind_dtors + +unwind_dtors: + %5 = landingpad { ptr, i32 } + cleanup + tail call void @shared_zt0_call() + resume { ptr, i32 } %5 + +return_normally: + ret void +} + +; This example corresponds to: +; +; __arm_agnostic("sme_za_state") void try_catch_agnostic_za() +; { +; try { +; may_throw(); +; } catch(...) { +; } +; } +; +; In this example we must execute __arm_sme_restore once we enter the catch block +; (before executing __arm_sme_save again, which would invalidate the prior save). +define void @try_catch_agnostic_za() "aarch64_za_state_agnostic" personality ptr @__gxx_personality_v0 { +; CHECK-LABEL: try_catch_agnostic_za: +; CHECK: .Lfunc_begin4: +; CHECK-NEXT: .cfi_startproc +; CHECK-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-NEXT: .cfi_lsda 28, .Lexception4 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-NEXT: str x19, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: .cfi_def_cfa w29, 32 +; CHECK-NEXT: .cfi_offset w19, -16 +; CHECK-NEXT: .cfi_offset w30, -24 +; CHECK-NEXT: .cfi_offset w29, -32 +; CHECK-NEXT: bl __arm_sme_state_size +; CHECK-NEXT: sub sp, sp, x0 +; CHECK-NEXT: mov x19, sp +; CHECK-NEXT: .Ltmp12: // EH_LABEL +; CHECK-NEXT: mov x0, x19 +; CHECK-NEXT: bl __arm_sme_save +; CHECK-NEXT: bl may_throw +; CHECK-NEXT: .Ltmp13: // EH_LABEL +; CHECK-NEXT: .LBB4_1: // %exit +; CHECK-NEXT: mov x0, x19 +; CHECK-NEXT: bl __arm_sme_restore +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB4_2: // %catch +; CHECK-NEXT: .Ltmp14: // EH_LABEL +; CHECK-NEXT: bl __cxa_begin_catch +; CHECK-NEXT: bl __cxa_end_catch +; CHECK-NEXT: b .LBB4_1 +; +; CHECK-SDAG-LABEL: try_catch_agnostic_za: +; CHECK-SDAG: .Lfunc_begin4: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception4 +; CHECK-SDAG-NEXT: // %bb.0: +; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill +; CHECK-SDAG-NEXT: mov x29, sp +; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32 +; CHECK-SDAG-NEXT: .cfi_offset w19, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -24 +; CHECK-SDAG-NEXT: .cfi_offset w29, -32 +; CHECK-SDAG-NEXT: bl __arm_sme_state_size +; CHECK-SDAG-NEXT: sub sp, sp, x0 +; CHECK-SDAG-NEXT: mov x19, sp +; CHECK-SDAG-NEXT: .Ltmp12: // EH_LABEL +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_save +; CHECK-SDAG-NEXT: bl may_throw +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: .Ltmp13: // EH_LABEL +; CHECK-SDAG-NEXT: .LBB4_1: // %exit +; CHECK-SDAG-NEXT: mov sp, x29 +; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload +; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-SDAG-NEXT: ret +; CHECK-SDAG-NEXT: .LBB4_2: // %catch +; CHECK-SDAG-NEXT: .Ltmp14: // EH_LABEL +; CHECK-SDAG-NEXT: mov x1, x0 +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_save +; CHECK-SDAG-NEXT: mov x0, x1 +; CHECK-SDAG-NEXT: bl __cxa_begin_catch +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_save +; CHECK-SDAG-NEXT: bl __cxa_end_catch +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: b .LBB4_1 + invoke void @may_throw() + to label %exit unwind label %catch +catch: + %eh_info = landingpad { ptr, i32 } + catch ptr null + %exception_ptr = extractvalue { ptr, i32 } %eh_info, 0 + tail call ptr @__cxa_begin_catch(ptr %exception_ptr) + tail call void @__cxa_end_catch() + br label %exit + +exit: + ret void +} + declare ptr @__cxa_allocate_exception(i64) declare void @__cxa_throw(ptr, ptr, ptr) declare ptr @__cxa_begin_catch(ptr) @@ -284,3 +741,4 @@ declare i32 @__gxx_personality_v0(...) declare void @may_throw() declare void @shared_za_call() "aarch64_inout_za" declare void @noexcept_shared_za_call() "aarch64_inout_za" +declare void @shared_zt0_call() "aarch64_inout_zt0" diff --git a/llvm/test/CodeGen/AArch64/strict-fp-opt.ll b/llvm/test/CodeGen/AArch64/strict-fp-opt.ll index bb7cd22..c433291 100644 --- a/llvm/test/CodeGen/AArch64/strict-fp-opt.ll +++ b/llvm/test/CodeGen/AArch64/strict-fp-opt.ll @@ -1,31 +1,40 @@ -; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s -; RUN: llc -mtriple=aarch64 -global-isel=true -global-isel-abort=2 %s -o - | FileCheck %s - +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64 -global-isel=true -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI + +; CHECK-GI: warning: Instruction selection used fallback path for unused_div_fpexcept_strict +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for unused_div_round_dynamic +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for add_twice_fpexcept_strict +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for add_twice_round_dynamic +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for set_rounding +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for set_rounding_fpexcept_strict +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for set_rounding_round_dynamic ; Div whose result is unused should be removed unless we have strict exceptions -; CHECK-LABEL: unused_div: -; CHECK-NOT: fdiv -; CHECK: ret define void @unused_div(float %x, float %y) { +; CHECK-LABEL: unused_div: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ret entry: %add = fdiv float %x, %y ret void } -; CHECK-LABEL: unused_div_fpexcept_strict: -; CHECK: fdiv s0, s0, s1 -; CHECK-NEXT: ret define void @unused_div_fpexcept_strict(float %x, float %y) #0 { +; CHECK-LABEL: unused_div_fpexcept_strict: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fdiv s0, s0, s1 +; CHECK-NEXT: ret entry: %add = call float @llvm.experimental.constrained.fdiv.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret void } -; CHECK-LABEL: unused_div_round_dynamic: -; CHECK-NOT: fdiv -; CHECK: ret define void @unused_div_round_dynamic(float %x, float %y) #0 { +; CHECK-LABEL: unused_div_round_dynamic: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ret entry: %add = call float @llvm.experimental.constrained.fdiv.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 ret void @@ -33,14 +42,14 @@ entry: ; Machine CSE should eliminate the second add unless we have strict exceptions - -; CHECK-LABEL: add_twice: -; CHECK: fadd [[ADD:s[0-9]+]], s0, s1 -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: fmul [[MUL:s[0-9]+]], [[ADD]], [[ADD]] -; CHECK-NEXT: fcsel s0, [[ADD]], [[MUL]], eq -; CHECK-NEXT: ret define float @add_twice(float %x, float %y, i32 %n) { +; CHECK-LABEL: add_twice: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: cmp w0, #0 +; CHECK-NEXT: fmul s1, s0, s0 +; CHECK-NEXT: fcsel s0, s0, s1, eq +; CHECK-NEXT: ret entry: %add = fadd float %x, %y %tobool.not = icmp eq i32 %n, 0 @@ -56,15 +65,17 @@ if.end: ret float %a.0 } -; CHECK-LABEL: add_twice_fpexcept_strict: -; CHECK: fmov [[X:s[0-9]+]], s0 -; CHECK-NEXT: fadd s0, s0, s1 -; CHECK-NEXT: cbz w0, [[LABEL:.LBB[0-9_]+]] -; CHECK: fadd [[ADD:s[0-9]+]], [[X]], s1 -; CHECK-NEXT: fmul s0, s0, [[ADD]] -; CHECK: [[LABEL]]: -; CHECK-NEXT: ret define float @add_twice_fpexcept_strict(float %x, float %y, i32 %n) #0 { +; CHECK-LABEL: add_twice_fpexcept_strict: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmov s2, s0 +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: cbz w0, .LBB4_2 +; CHECK-NEXT: // %bb.1: // %if.then +; CHECK-NEXT: fadd s1, s2, s1 +; CHECK-NEXT: fmul s0, s0, s1 +; CHECK-NEXT: .LBB4_2: // %if.end +; CHECK-NEXT: ret entry: %add = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 %tobool.not = icmp eq i32 %n, 0 @@ -80,14 +91,15 @@ if.end: ret float %a.0 } -; CHECK-LABEL: add_twice_round_dynamic: -; CHECK: fadd s0, s0, s1 -; CHECK-NEXT: cbz w0, [[LABEL:.LBB[0-9_]+]] -; CHECK-NOT: fadd -; CHECK: fmul s0, s0, s0 -; CHECK: [[LABEL]]: -; CHECK-NEXT: ret define float @add_twice_round_dynamic(float %x, float %y, i32 %n) #0 { +; CHECK-LABEL: add_twice_round_dynamic: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: cbz w0, .LBB5_2 +; CHECK-NEXT: // %bb.1: // %if.then +; CHECK-NEXT: fmul s0, s0, s0 +; CHECK-NEXT: .LBB5_2: // %if.end +; CHECK-NEXT: ret entry: %add = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 %tobool.not = icmp eq i32 %n, 0 @@ -108,17 +120,18 @@ if.end: ; dynamic (as they may give different results) or when we have strict exceptions ; (the llvm.set.rounding is irrelevant, but both could trap). -; CHECK-LABEL: set_rounding: -; CHECK-DAG: fadd [[SREG:s[0-9]+]], s0, s1 -; CHECK-DAG: mrs [[XREG1:x[0-9]+]], FPCR -; CHECK-DAG: orr [[XREG2:x[0-9]+]], [[XREG1]], #0xc00000 -; CHECK: msr FPCR, [[XREG2]] -; CHECK-NEXT: mrs [[XREG3:x[0-9]+]], FPCR -; CHECK-NEXT: and [[XREG4:x[0-9]+]], [[XREG3]], #0xffffffffff3fffff -; CHECK-NEXT: msr FPCR, [[XREG4]] -; CHECK-NEXT: fsub s0, [[SREG]], [[SREG]] -; CHECK-NEXT: ret define float @set_rounding(float %x, float %y) { +; CHECK-LABEL: set_rounding: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mrs x8, FPCR +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: orr x8, x8, #0xc00000 +; CHECK-NEXT: msr FPCR, x8 +; CHECK-NEXT: mrs x8, FPCR +; CHECK-NEXT: and x8, x8, #0xffffffffff3fffff +; CHECK-NEXT: msr FPCR, x8 +; CHECK-NEXT: fsub s0, s0, s0 +; CHECK-NEXT: ret entry: %add1 = fadd float %x, %y call void @llvm.set.rounding(i32 0) @@ -128,18 +141,19 @@ entry: ret float %sub } -; CHECK-LABEL: set_rounding_fpexcept_strict: -; CHECK-DAG: fadd [[SREG1:s[0-9]+]], s0, s1 -; CHECK-DAG: mrs [[XREG1:x[0-9]+]], FPCR -; CHECK-DAG: orr [[XREG2:x[0-9]+]], [[XREG1]], #0xc00000 -; CHECK: msr FPCR, [[XREG2]] -; CHECK-DAG: fadd [[SREG2:s[0-9]+]], s0, s1 -; CHECK-DAG: mrs [[XREG3:x[0-9]+]], FPCR -; CHECK-DAG: and [[XREG4:x[0-9]+]], [[XREG3]], #0xffffffffff3fffff -; CHECK-NEXT: msr FPCR, [[XREG4]] -; CHECK-NEXT: fsub s0, [[SREG1]], [[SREG2]] -; CHECK-NEXT: ret define float @set_rounding_fpexcept_strict(float %x, float %y) #0 { +; CHECK-LABEL: set_rounding_fpexcept_strict: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fadd s2, s0, s1 +; CHECK-NEXT: mrs x8, FPCR +; CHECK-NEXT: orr x8, x8, #0xc00000 +; CHECK-NEXT: msr FPCR, x8 +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: mrs x8, FPCR +; CHECK-NEXT: and x8, x8, #0xffffffffff3fffff +; CHECK-NEXT: msr FPCR, x8 +; CHECK-NEXT: fsub s0, s2, s0 +; CHECK-NEXT: ret entry: %add1 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 call void @llvm.set.rounding(i32 0) #0 @@ -149,18 +163,19 @@ entry: ret float %sub } -; CHECK-LABEL: set_rounding_round_dynamic: -; CHECK-DAG: fadd [[SREG1:s[0-9]+]], s0, s1 -; CHECK-DAG: mrs [[XREG1:x[0-9]+]], FPCR -; CHECK-DAG: orr [[XREG2:x[0-9]+]], [[XREG1]], #0xc00000 -; CHECK: msr FPCR, [[XREG2]] -; CHECK-DAG: fadd [[SREG2:s[0-9]+]], s0, s1 -; CHECK-DAG: mrs [[XREG3:x[0-9]+]], FPCR -; CHECK-DAG: and [[XREG4:x[0-9]+]], [[XREG3]], #0xffffffffff3fffff -; CHECK-NEXT: msr FPCR, [[XREG4]] -; CHECK-NEXT: fsub s0, [[SREG1]], [[SREG2]] -; CHECK-NEXT: ret define float @set_rounding_round_dynamic(float %x, float %y) #0 { +; CHECK-LABEL: set_rounding_round_dynamic: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mrs x8, FPCR +; CHECK-NEXT: fadd s2, s0, s1 +; CHECK-NEXT: orr x8, x8, #0xc00000 +; CHECK-NEXT: msr FPCR, x8 +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: mrs x8, FPCR +; CHECK-NEXT: and x8, x8, #0xffffffffff3fffff +; CHECK-NEXT: msr FPCR, x8 +; CHECK-NEXT: fsub s0, s2, s0 +; CHECK-NEXT: ret entry: %add1 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 call void @llvm.set.rounding(i32 0) #0 @@ -178,3 +193,6 @@ declare i32 @llvm.get.rounding() declare void @llvm.set.rounding(i32) attributes #0 = { strictfp } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK-GI: {{.*}} +; CHECK-SD: {{.*}} diff --git a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll index 62d41fc..19e1aa5 100644 --- a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll +++ b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll @@ -26,9 +26,9 @@ define i32 @reduce_and_v1i8(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_and_v1i8: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: smov w8, v0.b[0] -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w0, w0, w1, mi +; CHECK-NEXT: umov w8, v0.b[0] +; CHECK-NEXT: tst w8, #0x80 +; CHECK-NEXT: csel w0, w0, w1, ne ; CHECK-NEXT: ret %x = icmp slt <1 x i8> %a0, zeroinitializer %y = call i1 @llvm.vector.reduce.and.v1i1(<1 x i1> %x) @@ -120,9 +120,9 @@ define i32 @reduce_and_v1i16(<1 x i16> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_and_v1i16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: smov w8, v0.h[0] -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w0, w0, w1, mi +; CHECK-NEXT: umov w8, v0.h[0] +; CHECK-NEXT: tst w8, #0x8000 +; CHECK-NEXT: csel w0, w0, w1, ne ; CHECK-NEXT: ret %x = icmp slt <1 x i16> %a0, zeroinitializer %y = call i1 @llvm.vector.reduce.and.v1i1(<1 x i1> %x) @@ -305,9 +305,9 @@ define i32 @reduce_or_v1i8(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_or_v1i8: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: smov w8, v0.b[0] -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w0, w0, w1, mi +; CHECK-NEXT: umov w8, v0.b[0] +; CHECK-NEXT: tst w8, #0x80 +; CHECK-NEXT: csel w0, w0, w1, ne ; CHECK-NEXT: ret %x = icmp slt <1 x i8> %a0, zeroinitializer %y = call i1 @llvm.vector.reduce.or.v1i1(<1 x i1> %x) @@ -399,9 +399,9 @@ define i32 @reduce_or_v1i16(<1 x i16> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_or_v1i16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: smov w8, v0.h[0] -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w0, w0, w1, mi +; CHECK-NEXT: umov w8, v0.h[0] +; CHECK-NEXT: tst w8, #0x8000 +; CHECK-NEXT: csel w0, w0, w1, ne ; CHECK-NEXT: ret %x = icmp slt <1 x i16> %a0, zeroinitializer %y = call i1 @llvm.vector.reduce.or.v1i1(<1 x i1> %x) @@ -584,9 +584,9 @@ define i32 @reduce_xor_v1i8(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_xor_v1i8: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: smov w8, v0.b[0] -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w0, w0, w1, mi +; CHECK-NEXT: umov w8, v0.b[0] +; CHECK-NEXT: tst w8, #0x80 +; CHECK-NEXT: csel w0, w0, w1, ne ; CHECK-NEXT: ret %x = icmp slt <1 x i8> %a0, zeroinitializer %y = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %x) @@ -679,9 +679,9 @@ define i32 @reduce_xor_v1i16(<1 x i16> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_xor_v1i16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: smov w8, v0.h[0] -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w0, w0, w1, mi +; CHECK-NEXT: umov w8, v0.h[0] +; CHECK-NEXT: tst w8, #0x8000 +; CHECK-NEXT: csel w0, w0, w1, ne ; CHECK-NEXT: ret %x = icmp slt <1 x i16> %a0, zeroinitializer %y = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %x) diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll index 0490e5a..94ba5cd 100644 --- a/llvm/test/CodeGen/AMDGPU/bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/bf16.ll @@ -10908,12 +10908,13 @@ define <2 x bfloat> @v_fadd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) { ; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v1, 0x7060302 ; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; ; GFX1250-LABEL: v_fadd_v2bf16: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_pk_add_bf16 v0, v0, v1 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_pk_add_bf16 v0, v0, v1 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %op = fadd <2 x bfloat> %a, %b ret <2 x bfloat> %op } @@ -11446,13 +11447,14 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) { ; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11FAKE16-NEXT: v_perm_b32 v1, v1, v4, 0x7060302 ; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; ; GFX1250-LABEL: v_fadd_v4bf16: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_pk_add_bf16 v0, v0, v2 -; GFX1250-NEXT: v_pk_add_bf16 v1, v1, v3 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_pk_add_bf16 v0, v0, v2 +; GFX1250-NEXT: v_pk_add_bf16 v1, v1, v3 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %op = fadd <4 x bfloat> %a, %b ret <4 x bfloat> %op } @@ -49991,6 +49993,622 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat> ret <4 x bfloat> %op } +define <8 x bfloat> @v_fma_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c) { +; GCN-LABEL: v_fma_v8bf16: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: v_fma_f32 v7, v7, v15, v23 +; GCN-NEXT: v_fma_f32 v6, v6, v14, v22 +; GCN-NEXT: v_fma_f32 v5, v5, v13, v21 +; GCN-NEXT: v_fma_f32 v4, v4, v12, v20 +; GCN-NEXT: v_fma_f32 v3, v3, v11, v19 +; GCN-NEXT: v_fma_f32 v2, v2, v10, v18 +; GCN-NEXT: v_fma_f32 v1, v1, v9, v17 +; GCN-NEXT: v_fma_f32 v0, v0, v8, v16 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: v_fma_v8bf16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: v_fma_f32 v7, v7, v15, v23 +; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v22 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: v_fma_f32 v6, v6, v14, v15 +; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v21 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: v_fma_f32 v5, v5, v13, v14 +; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: v_fma_f32 v4, v4, v12, v13 +; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v19 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: v_fma_f32 v3, v3, v11, v12 +; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v18 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: v_fma_f32 v2, v2, v10, v11 +; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v17 +; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v16 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: v_fma_f32 v1, v1, v9, v11 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v10 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: v_fma_f32 v0, v0, v8, v9 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_fma_v8bf16: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v7 +; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v3 +; GFX8-NEXT: v_fma_f32 v12, v14, v13, v12 +; GFX8-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v13, vcc, v13, v12 +; GFX8-NEXT: s_movk_i32 s4, 0x7fff +; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX8-NEXT: v_add_u32_e32 v13, vcc, s4, v13 +; GFX8-NEXT: v_fma_f32 v3, v3, v7, v11 +; GFX8-NEXT: v_or_b32_e32 v14, 0x400000, v12 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX8-NEXT: v_bfe_u32 v7, v3, 16, 1 +; GFX8-NEXT: v_cndmask_b32_e32 v12, v13, v14, vcc +; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v3 +; GFX8-NEXT: v_add_u32_e32 v7, vcc, s4, v7 +; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v11, vcc +; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v10 +; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v6 +; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v2 +; GFX8-NEXT: v_fma_f32 v7, v13, v11, v7 +; GFX8-NEXT: v_bfe_u32 v11, v7, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v11, vcc, v11, v7 +; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX8-NEXT: v_add_u32_e32 v11, vcc, s4, v11 +; GFX8-NEXT: v_fma_f32 v2, v2, v6, v10 +; GFX8-NEXT: v_or_b32_e32 v13, 0x400000, v7 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX8-NEXT: v_bfe_u32 v6, v2, 16, 1 +; GFX8-NEXT: v_cndmask_b32_e32 v7, v11, v13, vcc +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v2 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, s4, v6 +; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v2 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v10, vcc +; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v1 +; GFX8-NEXT: v_fma_f32 v6, v11, v10, v6 +; GFX8-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v6 +; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, s4, v10 +; GFX8-NEXT: v_fma_f32 v1, v1, v5, v9 +; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX8-NEXT: v_bfe_u32 v5, v1, 16, 1 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v1 +; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5 +; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v1 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v8 +; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v0 +; GFX8-NEXT: v_fma_f32 v5, v10, v9, v5 +; GFX8-NEXT: v_bfe_u32 v9, v5, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v9, vcc, v9, v5 +; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9 +; GFX8-NEXT: v_fma_f32 v0, v0, v4, v8 +; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX8-NEXT: v_bfe_u32 v4, v0, 16, 1 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v0 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4 +; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v0 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v8, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX8-NEXT: v_alignbit_b32 v0, v0, v5, 16 +; GFX8-NEXT: v_alignbit_b32 v1, v1, v6, 16 +; GFX8-NEXT: v_alignbit_b32 v2, v2, v7, 16 +; GFX8-NEXT: v_alignbit_b32 v3, v3, v12, 16 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX900-LABEL: v_fma_v8bf16: +; GFX900: ; %bb.0: +; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX900-NEXT: v_lshlrev_b32_e32 v13, 16, v7 +; GFX900-NEXT: v_lshlrev_b32_e32 v14, 16, v3 +; GFX900-NEXT: v_fma_f32 v12, v14, v13, v12 +; GFX900-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX900-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX900-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX900-NEXT: s_movk_i32 s4, 0x7fff +; GFX900-NEXT: v_fma_f32 v3, v3, v7, v11 +; GFX900-NEXT: v_add3_u32 v13, v13, v12, s4 +; GFX900-NEXT: v_or_b32_e32 v14, 0x400000, v12 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX900-NEXT: v_bfe_u32 v7, v3, 16, 1 +; GFX900-NEXT: v_cndmask_b32_e32 v12, v13, v14, vcc +; GFX900-NEXT: v_add3_u32 v7, v7, v3, s4 +; GFX900-NEXT: v_or_b32_e32 v11, 0x400000, v3 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX900-NEXT: v_cndmask_b32_e32 v3, v7, v11, vcc +; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v10 +; GFX900-NEXT: v_lshlrev_b32_e32 v11, 16, v6 +; GFX900-NEXT: v_lshlrev_b32_e32 v13, 16, v2 +; GFX900-NEXT: v_fma_f32 v7, v13, v11, v7 +; GFX900-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX900-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX900-NEXT: v_bfe_u32 v11, v7, 16, 1 +; GFX900-NEXT: v_fma_f32 v2, v2, v6, v10 +; GFX900-NEXT: v_add3_u32 v11, v11, v7, s4 +; GFX900-NEXT: v_or_b32_e32 v13, 0x400000, v7 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX900-NEXT: v_bfe_u32 v6, v2, 16, 1 +; GFX900-NEXT: v_cndmask_b32_e32 v7, v11, v13, vcc +; GFX900-NEXT: v_add3_u32 v6, v6, v2, s4 +; GFX900-NEXT: v_or_b32_e32 v10, 0x400000, v2 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX900-NEXT: v_cndmask_b32_e32 v2, v6, v10, vcc +; GFX900-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX900-NEXT: v_lshlrev_b32_e32 v10, 16, v5 +; GFX900-NEXT: v_lshlrev_b32_e32 v11, 16, v1 +; GFX900-NEXT: v_fma_f32 v6, v11, v10, v6 +; GFX900-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX900-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX900-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX900-NEXT: v_fma_f32 v1, v1, v5, v9 +; GFX900-NEXT: v_add3_u32 v10, v10, v6, s4 +; GFX900-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX900-NEXT: v_bfe_u32 v5, v1, 16, 1 +; GFX900-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX900-NEXT: v_add3_u32 v5, v5, v1, s4 +; GFX900-NEXT: v_or_b32_e32 v9, 0x400000, v1 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX900-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc +; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v8 +; GFX900-NEXT: v_lshlrev_b32_e32 v9, 16, v4 +; GFX900-NEXT: v_lshlrev_b32_e32 v10, 16, v0 +; GFX900-NEXT: v_fma_f32 v5, v10, v9, v5 +; GFX900-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX900-NEXT: v_bfe_u32 v9, v5, 16, 1 +; GFX900-NEXT: v_fma_f32 v0, v0, v4, v8 +; GFX900-NEXT: v_add3_u32 v9, v9, v5, s4 +; GFX900-NEXT: v_or_b32_e32 v10, 0x400000, v5 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX900-NEXT: v_bfe_u32 v4, v0, 16, 1 +; GFX900-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc +; GFX900-NEXT: v_add3_u32 v4, v4, v0, s4 +; GFX900-NEXT: v_or_b32_e32 v8, 0x400000, v0 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX900-NEXT: v_cndmask_b32_e32 v0, v4, v8, vcc +; GFX900-NEXT: s_mov_b32 s4, 0x7060302 +; GFX900-NEXT: v_perm_b32 v0, v0, v5, s4 +; GFX900-NEXT: v_perm_b32 v1, v1, v6, s4 +; GFX900-NEXT: v_perm_b32 v2, v2, v7, s4 +; GFX900-NEXT: v_perm_b32 v3, v3, v12, s4 +; GFX900-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fma_v8bf16: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v11 +; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v7 +; GFX950-NEXT: v_and_b32_e32 v14, 0xffff0000, v3 +; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX950-NEXT: v_fmac_f32_e32 v12, v14, v13 +; GFX950-NEXT: v_fmac_f32_e32 v11, v3, v7 +; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v10 +; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v6 +; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v2 +; GFX950-NEXT: v_fmac_f32_e32 v3, v13, v7 +; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v10 +; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX950-NEXT: v_fmac_f32_e32 v7, v2, v6 +; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v9 +; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 +; GFX950-NEXT: v_and_b32_e32 v10, 0xffff0000, v1 +; GFX950-NEXT: v_fmac_f32_e32 v2, v10, v6 +; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX950-NEXT: v_fmac_f32_e32 v6, v1, v5 +; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v8 +; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX950-NEXT: v_and_b32_e32 v9, 0xffff0000, v0 +; GFX950-NEXT: v_fmac_f32_e32 v1, v9, v5 +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v8 +; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX950-NEXT: v_fmac_f32_e32 v5, v0, v4 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v5, v1 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v6, v2 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v7, v3 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v11, v12 +; GFX950-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_fma_v8bf16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v3 +; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v0 +; GFX10-NEXT: v_fmac_f32_e32 v12, v14, v13 +; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v2 +; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX10-NEXT: v_fmac_f32_e32 v11, v3, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v10 +; GFX10-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v6 +; GFX10-NEXT: v_or_b32_e32 v15, 0x400000, v12 +; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX10-NEXT: v_add3_u32 v13, v13, v12, 0x7fff +; GFX10-NEXT: v_fmac_f32_e32 v3, v14, v7 +; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v10 +; GFX10-NEXT: v_bfe_u32 v16, v11, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v1 +; GFX10-NEXT: v_cndmask_b32_e32 v10, v13, v15, vcc_lo +; GFX10-NEXT: v_bfe_u32 v13, v3, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v7, v2, v6 +; GFX10-NEXT: v_add3_u32 v12, v16, v11, 0x7fff +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v9 +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v5 +; GFX10-NEXT: v_add3_u32 v13, v13, v3, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v15, 0x400000, v3 +; GFX10-NEXT: v_bfe_u32 v16, v7, 16, 1 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX10-NEXT: v_fmac_f32_e32 v2, v14, v6 +; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX10-NEXT: v_add3_u32 v6, v16, v7, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v3, v13, v15, vcc_lo +; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v15, 16, v8 +; GFX10-NEXT: v_lshlrev_b32_e32 v16, 16, v4 +; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v7 +; GFX10-NEXT: v_bfe_u32 v14, v2, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX10-NEXT: v_fmac_f32_e32 v9, v1, v5 +; GFX10-NEXT: v_fmac_f32_e32 v15, v18, v16 +; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v2 +; GFX10-NEXT: v_fmac_f32_e32 v8, v0, v4 +; GFX10-NEXT: v_cndmask_b32_e32 v6, v6, v13, vcc_lo +; GFX10-NEXT: v_add3_u32 v0, v14, v2, 0x7fff +; GFX10-NEXT: v_bfe_u32 v4, v9, 16, 1 +; GFX10-NEXT: v_bfe_u32 v5, v15, 16, 1 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX10-NEXT: v_bfe_u32 v7, v8, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v9 +; GFX10-NEXT: v_or_b32_e32 v17, 0x400000, v11 +; GFX10-NEXT: v_add3_u32 v2, v5, v15, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo +; GFX10-NEXT: v_add3_u32 v0, v4, v9, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v15 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX10-NEXT: v_add3_u32 v5, v7, v8, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v8 +; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8 +; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9 +; GFX10-NEXT: v_cndmask_b32_e32 v5, v0, v13, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11 +; GFX10-NEXT: v_perm_b32 v0, v4, v2, 0x7060302 +; GFX10-NEXT: v_perm_b32 v2, v6, v3, 0x7060302 +; GFX10-NEXT: v_perm_b32 v1, v5, v1, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v7, v12, v17, vcc_lo +; GFX10-NEXT: v_perm_b32 v3, v7, v10, 0x7060302 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11TRUE16-LABEL: v_fma_v8bf16: +; GFX11TRUE16: ; %bb.0: +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11TRUE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v11 +; GFX11TRUE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v7 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX11TRUE16-NEXT: v_and_b32_e32 v15, 0xffff0000, v6 +; GFX11TRUE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v2 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX11TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v3 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v11, v3, v7 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v10 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v7, v2, v6 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v12, v14, v13 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v11 +; GFX11TRUE16-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v12 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_add3_u32 v13, v13, v12, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v13, v17, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v1 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX11TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v10 +; GFX11TRUE16-NEXT: v_bfe_u32 v10, v11, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11 +; GFX11TRUE16-NEXT: v_bfe_u32 v13, v7, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v14, v16, v15 +; GFX11TRUE16-NEXT: v_add3_u32 v2, v10, v11, 0x7fff +; GFX11TRUE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v5 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX11TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v7 +; GFX11TRUE16-NEXT: v_bfe_u32 v15, v14, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v2, v6, vcc_lo +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_add3_u32 v10, v15, v14, 0x7fff +; GFX11TRUE16-NEXT: v_and_b32_e32 v15, 0xffff0000, v9 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX11TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v14 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v14, 16, v4 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v6.h +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v9, v1, v5 +; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v10, v12, vcc_lo +; GFX11TRUE16-NEXT: v_add3_u32 v10, v13, v7, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX11TRUE16-NEXT: v_bfe_u32 v7, v9, 16, 1 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v13, 16, v8 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v4, v10, v11 :: v_dual_and_b32 v5, 0xffff0000, v8 +; GFX11TRUE16-NEXT: v_add3_u32 v7, v7, v9, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v9 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v4.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v7, v7, v10, vcc_lo +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v15, v17, v16 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v16, 16, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_bfe_u32 v12, v15, 16, 1 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v13, v16, v14 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v5, v0, v1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v15 +; GFX11TRUE16-NEXT: v_add3_u32 v8, v12, v15, 0x7fff +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v13, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v13 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13 +; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v13, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v9, v11, v5, 0x7fff +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v11, v0, v12, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v7.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v9, v10, vcc_lo +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v11.h +; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11FAKE16-LABEL: v_fma_v8bf16: +; GFX11FAKE16: ; %bb.0: +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v13, 16, v7 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v14, 16, v3 +; GFX11FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v18, 16, v0 +; GFX11FAKE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v12, v14, v13 :: v_dual_and_b32 v3, 0xffff0000, v3 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v14, 16, v2 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v11, v3, v7 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v6 +; GFX11FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v12 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX11FAKE16-NEXT: v_add3_u32 v13, v13, v12, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v10 +; GFX11FAKE16-NEXT: v_bfe_u32 v16, v11, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX11FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v11 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v3, v14, v7 +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v10, v13, v15 :: v_dual_and_b32 v7, 0xffff0000, v10 +; GFX11FAKE16-NEXT: v_add3_u32 v12, v16, v11, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v14, 16, v1 +; GFX11FAKE16-NEXT: v_bfe_u32 v13, v3, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v3 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_add3_u32 v13, v13, v3, 0x7fff +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v3, v13, v15 :: v_dual_and_b32 v2, 0xffff0000, v2 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v7, v2, v6 :: v_dual_lshlrev_b32 v6, 16, v5 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v15, 16, v8 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v9 +; GFX11FAKE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX11FAKE16-NEXT: v_bfe_u32 v16, v7, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v7 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v2, v14, v6 :: v_dual_and_b32 v5, 0xffff0000, v5 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX11FAKE16-NEXT: v_add3_u32 v6, v16, v7, 0x7fff +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v9, v1, v5 :: v_dual_and_b32 v8, 0xffff0000, v8 +; GFX11FAKE16-NEXT: v_bfe_u32 v14, v2, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v2 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v6, v6, v13, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v16, 16, v4 +; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX11FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v9 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v8, v0, v4 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v14, v2, 0x7fff +; GFX11FAKE16-NEXT: v_bfe_u32 v4, v9, 16, 1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v15, v18, v16 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_add3_u32 v0, v4, v9, 0x7fff +; GFX11FAKE16-NEXT: v_bfe_u32 v5, v15, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v15 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_add3_u32 v2, v5, v15, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v5, v7, v8, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v8 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v5, v0, v13, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11 +; GFX11FAKE16-NEXT: v_perm_b32 v0, v4, v2, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v2, v6, v3, 0x7060302 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_perm_b32 v1, v5, v1, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v7, v12, v17, vcc_lo +; GFX11FAKE16-NEXT: v_perm_b32 v3, v7, v10, 0x7060302 +; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; ; GFX1250-LABEL: v_fma_v8bf16: ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 @@ -50000,11 +50618,1239 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat> ; GFX1250-NEXT: v_pk_fma_bf16 v2, v2, v6, v10 ; GFX1250-NEXT: v_pk_fma_bf16 v3, v3, v7, v11 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] -define <8 x bfloat> @v_fma_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c) { %op = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c) ret <8 x bfloat> %op } +define <16 x bfloat> @v_fma_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b, <16 x bfloat> %c) { +; GCN-LABEL: v_fma_v16bf16: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s32 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GCN-NEXT: v_fma_f32 v15, v15, v31, v32 +; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:60 +; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GCN-NEXT: v_fma_f32 v14, v14, v30, v31 +; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GCN-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:56 +; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GCN-NEXT: v_fma_f32 v13, v13, v29, v30 +; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GCN-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:52 +; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GCN-NEXT: v_fma_f32 v12, v12, v28, v29 +; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GCN-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:48 +; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GCN-NEXT: v_fma_f32 v11, v11, v27, v28 +; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GCN-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:44 +; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GCN-NEXT: v_fma_f32 v10, v10, v26, v27 +; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GCN-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:40 +; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GCN-NEXT: v_fma_f32 v9, v9, v25, v26 +; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GCN-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:36 +; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GCN-NEXT: v_fma_f32 v8, v8, v24, v25 +; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GCN-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:32 +; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GCN-NEXT: v_fma_f32 v7, v7, v23, v24 +; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GCN-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:28 +; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: v_fma_f32 v6, v6, v22, v23 +; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GCN-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:24 +; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: v_fma_f32 v5, v5, v21, v22 +; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GCN-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:20 +; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: v_fma_f32 v4, v4, v20, v21 +; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:16 +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: v_fma_f32 v3, v3, v19, v20 +; GCN-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:12 +; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:4 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_fma_f32 v2, v2, v18, v19 +; GCN-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:8 +; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v20 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_fma_f32 v1, v1, v17, v18 +; GCN-NEXT: v_fma_f32 v0, v0, v16, v19 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: v_fma_v16bf16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX7-NEXT: v_fma_f32 v15, v15, v31, v32 +; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:60 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX7-NEXT: v_fma_f32 v14, v14, v30, v31 +; GFX7-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:56 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX7-NEXT: v_fma_f32 v13, v13, v29, v30 +; GFX7-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:52 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX7-NEXT: v_fma_f32 v12, v12, v28, v29 +; GFX7-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:48 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX7-NEXT: v_fma_f32 v11, v11, v27, v28 +; GFX7-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:44 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX7-NEXT: v_fma_f32 v10, v10, v26, v27 +; GFX7-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:40 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX7-NEXT: v_fma_f32 v9, v9, v25, v26 +; GFX7-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:36 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX7-NEXT: v_fma_f32 v8, v8, v24, v25 +; GFX7-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:32 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX7-NEXT: v_fma_f32 v7, v7, v23, v24 +; GFX7-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:28 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: v_fma_f32 v6, v6, v22, v23 +; GFX7-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:24 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX7-NEXT: v_fma_f32 v5, v5, v21, v22 +; GFX7-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:20 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX7-NEXT: v_fma_f32 v4, v4, v20, v21 +; GFX7-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:16 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX7-NEXT: v_fma_f32 v3, v3, v19, v20 +; GFX7-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:12 +; GFX7-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:4 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX7-NEXT: v_fma_f32 v2, v2, v18, v19 +; GFX7-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:8 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX7-NEXT: v_fma_f32 v1, v1, v17, v18 +; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v19 +; GFX7-NEXT: v_fma_f32 v0, v0, v16, v17 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_fma_v16bf16: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v24, 16, v23 +; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v15 +; GFX8-NEXT: v_lshlrev_b32_e32 v26, 16, v7 +; GFX8-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX8-NEXT: v_fma_f32 v24, v26, v25, v24 +; GFX8-NEXT: v_fma_f32 v7, v7, v15, v23 +; GFX8-NEXT: v_lshlrev_b32_e32 v15, 16, v22 +; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v14 +; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v6 +; GFX8-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX8-NEXT: v_fma_f32 v15, v25, v23, v15 +; GFX8-NEXT: v_fma_f32 v6, v6, v14, v22 +; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v21 +; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v13 +; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v5 +; GFX8-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX8-NEXT: v_fma_f32 v14, v23, v22, v14 +; GFX8-NEXT: v_fma_f32 v5, v5, v13, v21 +; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v20 +; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v12 +; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v4 +; GFX8-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX8-NEXT: v_fma_f32 v13, v22, v21, v13 +; GFX8-NEXT: v_fma_f32 v4, v4, v12, v20 +; GFX8-NEXT: v_lshlrev_b32_e32 v12, 16, v19 +; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v11 +; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v3 +; GFX8-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX8-NEXT: v_fma_f32 v12, v21, v20, v12 +; GFX8-NEXT: v_fma_f32 v3, v3, v11, v19 +; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v18 +; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v10 +; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v2 +; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX8-NEXT: v_fma_f32 v11, v20, v19, v11 +; GFX8-NEXT: v_fma_f32 v2, v2, v10, v18 +; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v17 +; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v9 +; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v1 +; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX8-NEXT: v_fma_f32 v10, v19, v18, v10 +; GFX8-NEXT: v_fma_f32 v1, v1, v9, v17 +; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v16 +; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v8 +; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v0 +; GFX8-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX8-NEXT: v_fma_f32 v0, v0, v8, v16 +; GFX8-NEXT: v_bfe_u32 v8, v24, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v24 +; GFX8-NEXT: s_movk_i32 s4, 0x7fff +; GFX8-NEXT: v_add_u32_e32 v8, vcc, s4, v8 +; GFX8-NEXT: v_or_b32_e32 v16, 0x400000, v24 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v24, v24 +; GFX8-NEXT: v_cndmask_b32_e32 v8, v8, v16, vcc +; GFX8-NEXT: v_bfe_u32 v16, v7, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v7 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_fma_f32 v9, v18, v17, v9 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v7 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX8-NEXT: v_cndmask_b32_e32 v7, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v15, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v15 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v15 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 +; GFX8-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v6, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v6 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v6 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v14, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v14 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v14 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v14 +; GFX8-NEXT: v_cndmask_b32_e32 v14, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v5, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v5 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v5 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v13, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v13 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v13 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v13, v13 +; GFX8-NEXT: v_cndmask_b32_e32 v13, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v4, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v4 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v4 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX8-NEXT: v_cndmask_b32_e32 v4, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v12, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v12 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v12 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX8-NEXT: v_cndmask_b32_e32 v12, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v3, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v3 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v11, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v11 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v11 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v11, v11 +; GFX8-NEXT: v_cndmask_b32_e32 v11, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v2, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v2 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v2 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX8-NEXT: v_cndmask_b32_e32 v2, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v10, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v10 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v10 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v10 +; GFX8-NEXT: v_cndmask_b32_e32 v10, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v1, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v1 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v9, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v9 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v9 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v9, v9 +; GFX8-NEXT: v_cndmask_b32_e32 v9, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v0, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v0 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v0 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX8-NEXT: v_alignbit_b32 v0, v0, v9, 16 +; GFX8-NEXT: v_alignbit_b32 v1, v1, v10, 16 +; GFX8-NEXT: v_alignbit_b32 v2, v2, v11, 16 +; GFX8-NEXT: v_alignbit_b32 v3, v3, v12, 16 +; GFX8-NEXT: v_alignbit_b32 v4, v4, v13, 16 +; GFX8-NEXT: v_alignbit_b32 v5, v5, v14, 16 +; GFX8-NEXT: v_alignbit_b32 v6, v6, v15, 16 +; GFX8-NEXT: v_alignbit_b32 v7, v7, v8, 16 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX900-LABEL: v_fma_v16bf16: +; GFX900: ; %bb.0: +; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v24, 16, v23 +; GFX900-NEXT: v_lshlrev_b32_e32 v25, 16, v15 +; GFX900-NEXT: v_lshlrev_b32_e32 v26, 16, v7 +; GFX900-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX900-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX900-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX900-NEXT: v_fma_f32 v24, v26, v25, v24 +; GFX900-NEXT: v_fma_f32 v7, v7, v15, v23 +; GFX900-NEXT: v_lshlrev_b32_e32 v15, 16, v22 +; GFX900-NEXT: v_lshlrev_b32_e32 v23, 16, v14 +; GFX900-NEXT: v_lshlrev_b32_e32 v25, 16, v6 +; GFX900-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX900-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX900-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX900-NEXT: v_fma_f32 v15, v25, v23, v15 +; GFX900-NEXT: v_fma_f32 v6, v6, v14, v22 +; GFX900-NEXT: v_lshlrev_b32_e32 v14, 16, v21 +; GFX900-NEXT: v_lshlrev_b32_e32 v22, 16, v13 +; GFX900-NEXT: v_lshlrev_b32_e32 v23, 16, v5 +; GFX900-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX900-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX900-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX900-NEXT: v_fma_f32 v14, v23, v22, v14 +; GFX900-NEXT: v_fma_f32 v5, v5, v13, v21 +; GFX900-NEXT: v_lshlrev_b32_e32 v13, 16, v20 +; GFX900-NEXT: v_lshlrev_b32_e32 v21, 16, v12 +; GFX900-NEXT: v_lshlrev_b32_e32 v22, 16, v4 +; GFX900-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX900-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX900-NEXT: v_fma_f32 v13, v22, v21, v13 +; GFX900-NEXT: v_fma_f32 v4, v4, v12, v20 +; GFX900-NEXT: v_lshlrev_b32_e32 v12, 16, v19 +; GFX900-NEXT: v_lshlrev_b32_e32 v20, 16, v11 +; GFX900-NEXT: v_lshlrev_b32_e32 v21, 16, v3 +; GFX900-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX900-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX900-NEXT: v_fma_f32 v12, v21, v20, v12 +; GFX900-NEXT: v_fma_f32 v3, v3, v11, v19 +; GFX900-NEXT: v_lshlrev_b32_e32 v11, 16, v18 +; GFX900-NEXT: v_lshlrev_b32_e32 v19, 16, v10 +; GFX900-NEXT: v_lshlrev_b32_e32 v20, 16, v2 +; GFX900-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX900-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX900-NEXT: v_fma_f32 v11, v20, v19, v11 +; GFX900-NEXT: v_fma_f32 v2, v2, v10, v18 +; GFX900-NEXT: v_lshlrev_b32_e32 v10, 16, v17 +; GFX900-NEXT: v_lshlrev_b32_e32 v18, 16, v9 +; GFX900-NEXT: v_lshlrev_b32_e32 v19, 16, v1 +; GFX900-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX900-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX900-NEXT: v_fma_f32 v10, v19, v18, v10 +; GFX900-NEXT: v_fma_f32 v1, v1, v9, v17 +; GFX900-NEXT: v_lshlrev_b32_e32 v9, 16, v16 +; GFX900-NEXT: v_lshlrev_b32_e32 v17, 16, v8 +; GFX900-NEXT: v_lshlrev_b32_e32 v18, 16, v0 +; GFX900-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX900-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX900-NEXT: v_fma_f32 v0, v0, v8, v16 +; GFX900-NEXT: s_movk_i32 s4, 0x7fff +; GFX900-NEXT: v_bfe_u32 v8, v24, 16, 1 +; GFX900-NEXT: v_add3_u32 v8, v8, v24, s4 +; GFX900-NEXT: v_or_b32_e32 v16, 0x400000, v24 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v24, v24 +; GFX900-NEXT: v_cndmask_b32_e32 v8, v8, v16, vcc +; GFX900-NEXT: v_bfe_u32 v16, v7, 16, 1 +; GFX900-NEXT: v_fma_f32 v9, v18, v17, v9 +; GFX900-NEXT: v_add3_u32 v16, v16, v7, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v7 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX900-NEXT: v_cndmask_b32_e32 v7, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v15, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v15, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v15 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 +; GFX900-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v6, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v6, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v6 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX900-NEXT: v_cndmask_b32_e32 v6, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v14, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v14, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v14 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v14, v14 +; GFX900-NEXT: v_cndmask_b32_e32 v14, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v5, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v5, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v5 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX900-NEXT: v_cndmask_b32_e32 v5, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v13, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v13, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v13 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v13, v13 +; GFX900-NEXT: v_cndmask_b32_e32 v13, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v4, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v4, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX900-NEXT: v_cndmask_b32_e32 v4, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v12, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v12, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v12 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX900-NEXT: v_cndmask_b32_e32 v12, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v3, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v3, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v3 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX900-NEXT: v_cndmask_b32_e32 v3, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v11, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v11, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v11 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v11, v11 +; GFX900-NEXT: v_cndmask_b32_e32 v11, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v2, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v2, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v2 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX900-NEXT: v_cndmask_b32_e32 v2, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v10, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v10, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v10 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v10, v10 +; GFX900-NEXT: v_cndmask_b32_e32 v10, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v1, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v1, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v1 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX900-NEXT: v_cndmask_b32_e32 v1, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v9, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v9, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v9 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v9, v9 +; GFX900-NEXT: v_cndmask_b32_e32 v9, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v0, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v0, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v0 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX900-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc +; GFX900-NEXT: s_mov_b32 s4, 0x7060302 +; GFX900-NEXT: v_perm_b32 v0, v0, v9, s4 +; GFX900-NEXT: v_perm_b32 v1, v1, v10, s4 +; GFX900-NEXT: v_perm_b32 v2, v2, v11, s4 +; GFX900-NEXT: v_perm_b32 v3, v3, v12, s4 +; GFX900-NEXT: v_perm_b32 v4, v4, v13, s4 +; GFX900-NEXT: v_perm_b32 v5, v5, v14, s4 +; GFX900-NEXT: v_perm_b32 v6, v6, v15, s4 +; GFX900-NEXT: v_perm_b32 v7, v7, v8, s4 +; GFX900-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fma_v16bf16: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v24, 0xffff0000, v23 +; GFX950-NEXT: v_and_b32_e32 v25, 0xffff0000, v15 +; GFX950-NEXT: v_and_b32_e32 v26, 0xffff0000, v7 +; GFX950-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX950-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX950-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX950-NEXT: v_fmac_f32_e32 v23, v7, v15 +; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v22 +; GFX950-NEXT: v_and_b32_e32 v15, 0xffff0000, v14 +; GFX950-NEXT: v_and_b32_e32 v25, 0xffff0000, v6 +; GFX950-NEXT: v_fmac_f32_e32 v7, v25, v15 +; GFX950-NEXT: v_lshlrev_b32_e32 v15, 16, v22 +; GFX950-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX950-NEXT: v_fmac_f32_e32 v15, v6, v14 +; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v21 +; GFX950-NEXT: v_and_b32_e32 v14, 0xffff0000, v13 +; GFX950-NEXT: v_and_b32_e32 v22, 0xffff0000, v5 +; GFX950-NEXT: v_fmac_f32_e32 v6, v22, v14 +; GFX950-NEXT: v_lshlrev_b32_e32 v14, 16, v21 +; GFX950-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX950-NEXT: v_fmac_f32_e32 v14, v5, v13 +; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v20 +; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v12 +; GFX950-NEXT: v_and_b32_e32 v21, 0xffff0000, v4 +; GFX950-NEXT: v_fmac_f32_e32 v5, v21, v13 +; GFX950-NEXT: v_lshlrev_b32_e32 v13, 16, v20 +; GFX950-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX950-NEXT: v_fmac_f32_e32 v13, v4, v12 +; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v19 +; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v11 +; GFX950-NEXT: v_and_b32_e32 v20, 0xffff0000, v3 +; GFX950-NEXT: v_fmac_f32_e32 v4, v20, v12 +; GFX950-NEXT: v_lshlrev_b32_e32 v12, 16, v19 +; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX950-NEXT: v_fmac_f32_e32 v12, v3, v11 +; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v18 +; GFX950-NEXT: v_and_b32_e32 v11, 0xffff0000, v10 +; GFX950-NEXT: v_and_b32_e32 v19, 0xffff0000, v2 +; GFX950-NEXT: v_fmac_f32_e32 v3, v19, v11 +; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v18 +; GFX950-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX950-NEXT: v_fmac_f32_e32 v11, v2, v10 +; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v17 +; GFX950-NEXT: v_and_b32_e32 v10, 0xffff0000, v9 +; GFX950-NEXT: v_and_b32_e32 v18, 0xffff0000, v1 +; GFX950-NEXT: v_fmac_f32_e32 v2, v18, v10 +; GFX950-NEXT: v_lshlrev_b32_e32 v10, 16, v17 +; GFX950-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX950-NEXT: v_fmac_f32_e32 v10, v1, v9 +; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v16 +; GFX950-NEXT: v_and_b32_e32 v9, 0xffff0000, v8 +; GFX950-NEXT: v_and_b32_e32 v17, 0xffff0000, v0 +; GFX950-NEXT: v_fmac_f32_e32 v1, v17, v9 +; GFX950-NEXT: v_lshlrev_b32_e32 v9, 16, v16 +; GFX950-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX950-NEXT: v_fmac_f32_e32 v9, v0, v8 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v9, v1 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v10, v2 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v11, v3 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v12, v4 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v13, v5 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v5, v14, v6 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v6, v15, v7 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v7, v23, v24 +; GFX950-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_fma_v16bf16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v23 +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v15 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v7 +; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX10-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v6 +; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX10-NEXT: v_fmac_f32_e32 v23, v7, v15 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v22 +; GFX10-NEXT: v_bfe_u32 v25, v24, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v15, 16, v14 +; GFX10-NEXT: v_or_b32_e32 v27, 0x400000, v24 +; GFX10-NEXT: v_bfe_u32 v28, v23, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX10-NEXT: v_add3_u32 v25, v25, v24, 0x7fff +; GFX10-NEXT: v_fmac_f32_e32 v7, v26, v15 +; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v22 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX10-NEXT: v_add3_u32 v24, v28, v23, 0x7fff +; GFX10-NEXT: v_bfe_u32 v26, v7, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v15, v6, v14 +; GFX10-NEXT: v_cndmask_b32_e32 v22, v25, v27, vcc_lo +; GFX10-NEXT: v_or_b32_e32 v25, 0x400000, v23 +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v21 +; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v13 +; GFX10-NEXT: v_lshlrev_b32_e32 v27, 16, v5 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23 +; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX10-NEXT: v_fmac_f32_e32 v6, v27, v14 +; GFX10-NEXT: v_cndmask_b32_e32 v23, v24, v25, vcc_lo +; GFX10-NEXT: v_add3_u32 v24, v26, v7, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v25, 0x400000, v7 +; GFX10-NEXT: v_bfe_u32 v26, v15, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v21 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX10-NEXT: v_add3_u32 v21, v26, v15, 0x7fff +; GFX10-NEXT: v_fmac_f32_e32 v14, v5, v13 +; GFX10-NEXT: v_cndmask_b32_e32 v7, v24, v25, vcc_lo +; GFX10-NEXT: v_or_b32_e32 v24, 0x400000, v15 +; GFX10-NEXT: v_bfe_u32 v25, v6, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v20 +; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v12 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v4 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX10-NEXT: v_fmac_f32_e32 v5, v26, v13 +; GFX10-NEXT: v_cndmask_b32_e32 v15, v21, v24, vcc_lo +; GFX10-NEXT: v_add3_u32 v21, v25, v6, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v24, 0x400000, v6 +; GFX10-NEXT: v_bfe_u32 v25, v14, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v20 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v2 +; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX10-NEXT: v_add3_u32 v20, v25, v14, 0x7fff +; GFX10-NEXT: v_fmac_f32_e32 v13, v4, v12 +; GFX10-NEXT: v_cndmask_b32_e32 v6, v21, v24, vcc_lo +; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v14 +; GFX10-NEXT: v_bfe_u32 v24, v5, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v19 +; GFX10-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v3 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14 +; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX10-NEXT: v_fmac_f32_e32 v4, v25, v12 +; GFX10-NEXT: v_cndmask_b32_e32 v14, v20, v21, vcc_lo +; GFX10-NEXT: v_add3_u32 v20, v24, v5, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v5 +; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v19 +; GFX10-NEXT: v_lshlrev_b32_e32 v19, 16, v18 +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v10 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX10-NEXT: v_bfe_u32 v24, v13, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v12, v3, v11 +; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX10-NEXT: v_fmac_f32_e32 v19, v26, v25 +; GFX10-NEXT: v_cndmask_b32_e32 v5, v20, v21, vcc_lo +; GFX10-NEXT: v_bfe_u32 v20, v4, 16, 1 +; GFX10-NEXT: v_add3_u32 v21, v24, v13, 0x7fff +; GFX10-NEXT: v_bfe_u32 v24, v12, 16, 1 +; GFX10-NEXT: v_bfe_u32 v25, v19, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX10-NEXT: v_add3_u32 v11, v20, v4, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v4 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX10-NEXT: v_or_b32_e32 v26, 0x400000, v19 +; GFX10-NEXT: v_fmac_f32_e32 v18, v2, v10 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v17 +; GFX10-NEXT: v_lshlrev_b32_e32 v10, 16, v9 +; GFX10-NEXT: v_cndmask_b32_e32 v4, v11, v20, vcc_lo +; GFX10-NEXT: v_add3_u32 v11, v24, v12, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v12 +; GFX10-NEXT: v_add3_u32 v24, v25, v19, 0x7fff +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v1 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX10-NEXT: v_fmac_f32_e32 v2, v25, v10 +; GFX10-NEXT: v_cndmask_b32_e32 v11, v11, v20, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v8 +; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX10-NEXT: v_bfe_u32 v20, v2, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v17, v1, v9 +; GFX10-NEXT: v_cndmask_b32_e32 v10, v24, v26, vcc_lo +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v16 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v0 +; GFX10-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX10-NEXT: v_add3_u32 v1, v20, v2, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v2 +; GFX10-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX10-NEXT: v_fmac_f32_e32 v16, v0, v8 +; GFX10-NEXT: v_bfe_u32 v0, v17, 16, 1 +; GFX10-NEXT: v_bfe_u32 v27, v18, 16, 1 +; GFX10-NEXT: v_bfe_u32 v8, v24, 16, 1 +; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo +; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v17 +; GFX10-NEXT: v_add3_u32 v0, v0, v17, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 +; GFX10-NEXT: v_bfe_u32 v2, v16, 16, 1 +; GFX10-NEXT: v_add3_u32 v8, v8, v24, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v24 +; GFX10-NEXT: v_or_b32_e32 v25, 0x400000, v16 +; GFX10-NEXT: v_cndmask_b32_e32 v9, v0, v9, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX10-NEXT: v_add3_u32 v2, v2, v16, 0x7fff +; GFX10-NEXT: v_add3_u32 v12, v27, v18, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v19, 0x400000, v18 +; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v13 +; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v20, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16 +; GFX10-NEXT: v_perm_b32 v1, v9, v1, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v25, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 +; GFX10-NEXT: v_perm_b32 v0, v2, v0, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v8, v12, v19, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13 +; GFX10-NEXT: v_perm_b32 v2, v8, v10, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v12, v21, v3, vcc_lo +; GFX10-NEXT: v_perm_b32 v3, v11, v4, 0x7060302 +; GFX10-NEXT: v_perm_b32 v4, v12, v5, 0x7060302 +; GFX10-NEXT: v_perm_b32 v5, v14, v6, 0x7060302 +; GFX10-NEXT: v_perm_b32 v6, v15, v7, 0x7060302 +; GFX10-NEXT: v_perm_b32 v7, v23, v22, 0x7060302 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11TRUE16-LABEL: v_fma_v16bf16: +; GFX11TRUE16: ; %bb.0: +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11TRUE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v23 +; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v7 +; GFX11TRUE16-NEXT: v_and_b32_e32 v27, 0xffff0000, v14 +; GFX11TRUE16-NEXT: v_and_b32_e32 v28, 0xffff0000, v6 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v15 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v24, v26, v25 :: v_dual_lshlrev_b32 v7, 16, v7 +; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v22 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX11TRUE16-NEXT: v_bfe_u32 v25, v24, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v26, v28, v27 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v22, v6, v14 +; GFX11TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v21 +; GFX11TRUE16-NEXT: v_and_b32_e32 v28, 0xffff0000, v13 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; GFX11TRUE16-NEXT: v_add3_u32 v25, v25, v24, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v24 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v23, v7, v15 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v7, v25, v29, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v29, 0xffff0000, v5 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_bfe_u32 v15, v23, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v24, v26, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v25, 0x400000, v23 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23 +; GFX11TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v26 +; GFX11TRUE16-NEXT: v_add3_u32 v15, v15, v23, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v24, v24, v26, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v22, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v14, v29, v28 :: v_dual_cndmask_b32 v15, v15, v25 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26 +; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v12 +; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v4 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v24, v27, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v20 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX11TRUE16-NEXT: v_add3_u32 v23, v23, v22, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v22 +; GFX11TRUE16-NEXT: v_bfe_u32 v28, v14, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v20, v4, v12 +; GFX11TRUE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v19 +; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v11 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v14 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v7.l, v15.h +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v21, v5, v13 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v13, v23, v27, vcc_lo +; GFX11TRUE16-NEXT: v_add3_u32 v5, v28, v14, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14 +; GFX11TRUE16-NEXT: v_bfe_u32 v25, v24, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v21, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v27, v20, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v26, 0x400000, v24 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v5, v5, v22, vcc_lo +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v21 +; GFX11TRUE16-NEXT: v_add3_u32 v14, v23, v21, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 +; GFX11TRUE16-NEXT: v_add3_u32 v23, v25, v24, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v21, v27, v20, 0x7fff +; GFX11TRUE16-NEXT: v_mov_b16_e32 v6.l, v13.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v14, v14, v22, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v20 +; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v3 +; GFX11TRUE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v18 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v5.l, v14.h +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v12, v25, v4 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v4, v23, v26, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v10 +; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v2 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v12, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v20, v21, v22 :: v_dual_and_b32 v25, 0xffff0000, v1 +; GFX11TRUE16-NEXT: v_add3_u32 v21, v23, v12, 0x7fff +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v24, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v12 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v4.l, v20.h +; GFX11TRUE16-NEXT: v_add3_u32 v12, v23, v24, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11TRUE16-NEXT: v_and_b32_e32 v23, 0xffff0000, v9 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v19, v3, v11 +; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v3, v21, v22 :: v_dual_and_b32 v22, 0xffff0000, v17 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v18 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; GFX11TRUE16-NEXT: v_bfe_u32 v18, v19, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v22, v25, v23 :: v_dual_fmac_f32 v11, v2, v10 +; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v19 +; GFX11TRUE16-NEXT: v_add3_u32 v2, v18, v19, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v24 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v23, 16, v8 +; GFX11TRUE16-NEXT: v_bfe_u32 v21, v11, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v17, v1, v9 :: v_dual_cndmask_b32 v10, v2, v10 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v24, 16, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v16 +; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v8 +; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v12, v18, vcc_lo +; GFX11TRUE16-NEXT: v_add3_u32 v12, v21, v11, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v16 +; GFX11TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v11 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11 +; GFX11TRUE16-NEXT: v_bfe_u32 v11, v17, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v21, v24, v23 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v9, v0, v1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v8, v12, v18, vcc_lo +; GFX11TRUE16-NEXT: v_add3_u32 v11, v11, v17, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v17 +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v21, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 +; GFX11TRUE16-NEXT: v_add3_u32 v12, v19, v22, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v18, v9, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v21 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v21, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v11, v11, v16, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 +; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v22 +; GFX11TRUE16-NEXT: v_add3_u32 v16, v18, v9, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v9 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v8.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v18, v0, v19, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v10.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v12, v1, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v11.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc_lo +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v18.h +; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11FAKE16-LABEL: v_fma_v16bf16: +; GFX11FAKE16: ; %bb.0: +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v24, 16, v23 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v15 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v7 +; GFX11FAKE16-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX11FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v24, v26, v25 :: v_dual_and_b32 v23, 0xffff0000, v23 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v23, v7, v15 :: v_dual_lshlrev_b32 v26, 16, v6 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v15, 16, v14 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v24, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v24 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11FAKE16-NEXT: v_bfe_u32 v28, v23, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX11FAKE16-NEXT: v_add3_u32 v25, v25, v24, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v22 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_add3_u32 v24, v28, v23, 0x7fff +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v7, v26, v15 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v22, v25, v27 :: v_dual_and_b32 v15, 0xffff0000, v22 +; GFX11FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v23 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23 +; GFX11FAKE16-NEXT: v_bfe_u32 v26, v7, 16, 1 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v27, 16, v5 +; GFX11FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v23, v24, v25, vcc_lo +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_add3_u32 v24, v26, v7, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v7 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v7, v24, v25 :: v_dual_and_b32 v6, 0xffff0000, v6 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v15, v6, v14 :: v_dual_lshlrev_b32 v14, 16, v13 +; GFX11FAKE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX11FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v15 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v21 +; GFX11FAKE16-NEXT: v_bfe_u32 v26, v15, 16, 1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v6, v27, v14 +; GFX11FAKE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v21 +; GFX11FAKE16-NEXT: v_add3_u32 v21, v26, v15, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v4 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v6, 16, 1 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v14, v5, v13 :: v_dual_lshlrev_b32 v5, 16, v20 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v13, 16, v12 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v15, v21, v24, vcc_lo +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_add3_u32 v21, v25, v6, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v6 +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v14, 16, 1 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v5, v26, v13 :: v_dual_and_b32 v12, 0xffff0000, v12 +; GFX11FAKE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v20 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v2 +; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX11FAKE16-NEXT: v_add3_u32 v20, v25, v14, 0x7fff +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v6, v21, v24 :: v_dual_lshlrev_b32 v25, 16, v3 +; GFX11FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v14 +; GFX11FAKE16-NEXT: v_bfe_u32 v24, v5, 16, 1 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v13, v4, v12 :: v_dual_lshlrev_b32 v4, 16, v19 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14 +; GFX11FAKE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX11FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v4, v25, v12 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v14, v20, v21, vcc_lo +; GFX11FAKE16-NEXT: v_add3_u32 v20, v24, v5, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v5 +; GFX11FAKE16-NEXT: v_bfe_u32 v24, v13, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v19 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v10 +; GFX11FAKE16-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v12, v3, v11 :: v_dual_cndmask_b32 v5, v20, v21 +; GFX11FAKE16-NEXT: v_add3_u32 v21, v24, v13, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v13 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v19, 16, v18 +; GFX11FAKE16-NEXT: v_bfe_u32 v20, v4, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v24, v12, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v19, v26, v25 +; GFX11FAKE16-NEXT: v_add3_u32 v11, v20, v4, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v4 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v18, v2, v10 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v17 +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v19, 16, 1 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v10, 16, v9 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v4, v11, v20, vcc_lo +; GFX11FAKE16-NEXT: v_add3_u32 v11, v24, v12, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v12 +; GFX11FAKE16-NEXT: v_add3_u32 v24, v25, v19, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v1 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX11FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v19 +; GFX11FAKE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v2, v25, v10 :: v_dual_and_b32 v9, 0xffff0000, v9 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v20, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 +; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v8 +; GFX11FAKE16-NEXT: v_bfe_u32 v20, v2, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v10, v24, v26, vcc_lo +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v24, 16, v16 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v0 +; GFX11FAKE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v17, v1, v9 :: v_dual_and_b32 v0, 0xffff0000, v0 +; GFX11FAKE16-NEXT: v_add3_u32 v1, v20, v2, 0x7fff +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v16, v0, v8 +; GFX11FAKE16-NEXT: v_bfe_u32 v0, v17, 16, 1 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX11FAKE16-NEXT: v_bfe_u32 v8, v24, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v24 +; GFX11FAKE16-NEXT: v_bfe_u32 v2, v16, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v17, 0x7fff +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo +; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v17 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 +; GFX11FAKE16-NEXT: v_add3_u32 v8, v8, v24, 0x7fff +; GFX11FAKE16-NEXT: v_bfe_u32 v27, v18, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v16, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v16 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v9, v0, v9, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11FAKE16-NEXT: v_add3_u32 v12, v27, v18, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v18 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v1, v9, v1, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v8, v20, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v25, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 +; GFX11FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v8, v12, v19, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v2, v8, v10, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v12, v21, v3, vcc_lo +; GFX11FAKE16-NEXT: v_perm_b32 v3, v11, v4, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v4, v12, v5, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v5, v14, v6, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v6, v15, v7, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v7, v23, v22, 0x7060302 +; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; ; GFX1250-LABEL: v_fma_v16bf16: ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 @@ -50018,67 +51864,2797 @@ define <8 x bfloat> @v_fma_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> ; GFX1250-NEXT: v_pk_fma_bf16 v6, v6, v14, v22 ; GFX1250-NEXT: v_pk_fma_bf16 v7, v7, v15, v23 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] -define <16 x bfloat> @v_fma_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b, <16 x bfloat> %c) { %op = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %a, <16 x bfloat> %b, <16 x bfloat> %c) ret <16 x bfloat> %op } +define <32 x bfloat> @v_fma_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b, <32 x bfloat> %c) { +; GCN-LABEL: v_fma_v32bf16: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s32 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:256 +; GCN-NEXT: s_waitcnt vmcnt(2) +; GCN-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GCN-NEXT: v_fma_f32 v31, v31, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:252 +; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v30, v30, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:120 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:248 +; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v29, v29, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:116 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:244 +; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v28, v28, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:112 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:240 +; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v27, v27, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:236 +; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v26, v26, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:104 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:232 +; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v25, v25, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:100 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:228 +; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v24, v24, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:224 +; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v23, v23, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:92 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:220 +; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v22, v22, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:88 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:216 +; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v21, v21, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:84 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:212 +; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v20, v20, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:80 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:208 +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v19, v19, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:204 +; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v18, v18, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:200 +; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v17, v17, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:196 +; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v16, v16, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:192 +; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v15, v15, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:188 +; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v14, v14, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:184 +; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v13, v13, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:52 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:180 +; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v12, v12, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:48 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:176 +; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v11, v11, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:172 +; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v10, v10, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:168 +; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v9, v9, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:36 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:164 +; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v8, v8, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:32 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:160 +; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v7, v7, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:28 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:156 +; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v6, v6, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:24 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:152 +; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v5, v5, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:20 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:148 +; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v4, v4, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:16 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:144 +; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v3, v3, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:12 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:140 +; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v2, v2, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:136 +; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v1, v1, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:132 +; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v0, v0, v32, v33 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: v_fma_v32bf16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:256 +; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: s_waitcnt vmcnt(2) +; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX7-NEXT: v_fma_f32 v31, v31, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:252 +; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v30, v30, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:120 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:248 +; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v29, v29, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:116 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:244 +; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v28, v28, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:112 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:240 +; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v27, v27, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:236 +; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v26, v26, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:104 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:232 +; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v25, v25, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:100 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:228 +; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v24, v24, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:224 +; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v23, v23, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:92 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:220 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v22, v22, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:88 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:216 +; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v21, v21, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:84 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:212 +; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v20, v20, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:80 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:208 +; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v19, v19, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:204 +; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v18, v18, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:200 +; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v17, v17, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:196 +; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v16, v16, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:192 +; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v15, v15, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:188 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v14, v14, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:184 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v13, v13, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:52 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:180 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v12, v12, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:48 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:176 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v11, v11, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:172 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v10, v10, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:168 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v9, v9, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:36 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:164 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v8, v8, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:32 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:160 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v7, v7, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:28 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:156 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v6, v6, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:24 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:152 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v5, v5, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:20 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:148 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v4, v4, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:16 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:144 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v3, v3, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:12 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:140 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v2, v2, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:136 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v1, v1, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:132 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v0, v0, v32, v33 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_fma_v32bf16: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 +; GFX8-NEXT: v_lshlrev_b32_e32 v31, 16, v15 +; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX8-NEXT: s_movk_i32 s4, 0x7fff +; GFX8-NEXT: s_waitcnt vmcnt(1) +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v32 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v15, v15, v33, v32 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:60 +; GFX8-NEXT: v_fma_f32 v31, v31, v35, v34 +; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v30 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v14 +; GFX8-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v14, v14, v30, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:56 +; GFX8-NEXT: v_fma_f32 v32, v34, v32, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v30, 16, v29 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v13 +; GFX8-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v13, v13, v29, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:52 +; GFX8-NEXT: v_fma_f32 v30, v34, v30, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v29, 16, v28 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v12 +; GFX8-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v12, v12, v28, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48 +; GFX8-NEXT: v_fma_f32 v29, v34, v29, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v28, 16, v27 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v11 +; GFX8-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v11, v11, v27, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:44 +; GFX8-NEXT: v_fma_f32 v28, v34, v28, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v27, 16, v26 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v10 +; GFX8-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v10, v10, v26, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:40 +; GFX8-NEXT: v_fma_f32 v27, v34, v27, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v25 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v9 +; GFX8-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v26, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v9, v9, v25, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:36 +; GFX8-NEXT: v_fma_f32 v26, v35, v34, v26 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v24 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v8 +; GFX8-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v8, v8, v24, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32 +; GFX8-NEXT: v_fma_f32 v25, v35, v34, v25 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v23 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v7 +; GFX8-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v24, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v7, v7, v23, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:28 +; GFX8-NEXT: v_fma_f32 v24, v35, v34, v24 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v22 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v6 +; GFX8-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v6, v6, v22, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:24 +; GFX8-NEXT: v_fma_f32 v23, v35, v34, v23 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v21 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v5 +; GFX8-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v5, v5, v21, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20 +; GFX8-NEXT: v_fma_f32 v22, v35, v34, v22 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v20 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v4 +; GFX8-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v4, v4, v20, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16 +; GFX8-NEXT: v_fma_f32 v21, v35, v34, v21 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v19 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v3 +; GFX8-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v3, v3, v19, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12 +; GFX8-NEXT: v_fma_f32 v20, v35, v34, v20 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v18 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v2 +; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v2, v2, v18, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8 +; GFX8-NEXT: v_fma_f32 v19, v35, v34, v19 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v17 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v1 +; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v1, v1, v17, v33 +; GFX8-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:4 +; GFX8-NEXT: v_fma_f32 v18, v35, v34, v18 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v16 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v0 +; GFX8-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v17 +; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX8-NEXT: v_fma_f32 v0, v0, v16, v17 +; GFX8-NEXT: v_bfe_u32 v16, v31, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v31 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v31, v31 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v31 +; GFX8-NEXT: v_cndmask_b32_e32 v16, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v17, v15, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v17, vcc, v17, v15 +; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 +; GFX8-NEXT: v_or_b32_e32 v15, 0x400000, v15 +; GFX8-NEXT: v_cndmask_b32_e32 v15, v17, v15, vcc +; GFX8-NEXT: v_bfe_u32 v17, v32, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v17, vcc, v17, v32 +; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v32, v32 +; GFX8-NEXT: v_or_b32_e32 v31, 0x400000, v32 +; GFX8-NEXT: v_cndmask_b32_e32 v17, v17, v31, vcc +; GFX8-NEXT: v_bfe_u32 v31, v14, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v14 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v14 +; GFX8-NEXT: v_or_b32_e32 v14, 0x400000, v14 +; GFX8-NEXT: v_cndmask_b32_e32 v14, v31, v14, vcc +; GFX8-NEXT: v_bfe_u32 v31, v30, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v30 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v30, v30 +; GFX8-NEXT: v_or_b32_e32 v30, 0x400000, v30 +; GFX8-NEXT: v_cndmask_b32_e32 v30, v31, v30, vcc +; GFX8-NEXT: v_bfe_u32 v31, v13, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v13 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v13, v13 +; GFX8-NEXT: v_or_b32_e32 v13, 0x400000, v13 +; GFX8-NEXT: v_cndmask_b32_e32 v13, v31, v13, vcc +; GFX8-NEXT: v_bfe_u32 v31, v29, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v29 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v29, v29 +; GFX8-NEXT: v_or_b32_e32 v29, 0x400000, v29 +; GFX8-NEXT: v_cndmask_b32_e32 v29, v31, v29, vcc +; GFX8-NEXT: v_bfe_u32 v31, v12, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v12 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX8-NEXT: v_or_b32_e32 v12, 0x400000, v12 +; GFX8-NEXT: v_cndmask_b32_e32 v12, v31, v12, vcc +; GFX8-NEXT: v_bfe_u32 v31, v28, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v28 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v28, v28 +; GFX8-NEXT: v_or_b32_e32 v28, 0x400000, v28 +; GFX8-NEXT: v_cndmask_b32_e32 v28, v31, v28, vcc +; GFX8-NEXT: v_bfe_u32 v31, v11, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v11 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v11, v11 +; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v11 +; GFX8-NEXT: v_cndmask_b32_e32 v11, v31, v11, vcc +; GFX8-NEXT: v_bfe_u32 v31, v27, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v27 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v27, v27 +; GFX8-NEXT: v_or_b32_e32 v27, 0x400000, v27 +; GFX8-NEXT: v_cndmask_b32_e32 v27, v31, v27, vcc +; GFX8-NEXT: v_bfe_u32 v31, v10, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v10 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v10 +; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10 +; GFX8-NEXT: v_cndmask_b32_e32 v10, v31, v10, vcc +; GFX8-NEXT: v_bfe_u32 v31, v26, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v26 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v26, v26 +; GFX8-NEXT: v_or_b32_e32 v26, 0x400000, v26 +; GFX8-NEXT: v_cndmask_b32_e32 v26, v31, v26, vcc +; GFX8-NEXT: v_bfe_u32 v31, v9, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v9 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v9, v9 +; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9 +; GFX8-NEXT: v_cndmask_b32_e32 v9, v31, v9, vcc +; GFX8-NEXT: v_bfe_u32 v31, v25, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v25 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v25, v25 +; GFX8-NEXT: v_or_b32_e32 v25, 0x400000, v25 +; GFX8-NEXT: v_cndmask_b32_e32 v25, v31, v25, vcc +; GFX8-NEXT: v_bfe_u32 v31, v8, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v8 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v8, v8 +; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v8 +; GFX8-NEXT: v_cndmask_b32_e32 v8, v31, v8, vcc +; GFX8-NEXT: v_bfe_u32 v31, v24, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v24 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v24, v24 +; GFX8-NEXT: v_or_b32_e32 v24, 0x400000, v24 +; GFX8-NEXT: v_cndmask_b32_e32 v24, v31, v24, vcc +; GFX8-NEXT: v_bfe_u32 v31, v7, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v7 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v7 +; GFX8-NEXT: v_cndmask_b32_e32 v7, v31, v7, vcc +; GFX8-NEXT: v_bfe_u32 v31, v23, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v23 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v23, v23 +; GFX8-NEXT: v_or_b32_e32 v23, 0x400000, v23 +; GFX8-NEXT: v_cndmask_b32_e32 v23, v31, v23, vcc +; GFX8-NEXT: v_bfe_u32 v31, v6, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v6 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v31, v6, vcc +; GFX8-NEXT: v_bfe_u32 v31, v22, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v22 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v22, v22 +; GFX8-NEXT: v_or_b32_e32 v22, 0x400000, v22 +; GFX8-NEXT: v_cndmask_b32_e32 v22, v31, v22, vcc +; GFX8-NEXT: v_bfe_u32 v31, v5, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v5 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v31, v5, vcc +; GFX8-NEXT: v_bfe_u32 v31, v21, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v21 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v21, v21 +; GFX8-NEXT: v_or_b32_e32 v21, 0x400000, v21 +; GFX8-NEXT: v_cndmask_b32_e32 v21, v31, v21, vcc +; GFX8-NEXT: v_bfe_u32 v31, v4, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v4 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4 +; GFX8-NEXT: v_cndmask_b32_e32 v4, v31, v4, vcc +; GFX8-NEXT: v_bfe_u32 v31, v20, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v20 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v20, v20 +; GFX8-NEXT: v_or_b32_e32 v20, 0x400000, v20 +; GFX8-NEXT: v_cndmask_b32_e32 v20, v31, v20, vcc +; GFX8-NEXT: v_bfe_u32 v31, v3, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v3 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v31, v3, vcc +; GFX8-NEXT: v_bfe_u32 v31, v19, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v19 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v19, v19 +; GFX8-NEXT: v_or_b32_e32 v19, 0x400000, v19 +; GFX8-NEXT: v_cndmask_b32_e32 v19, v31, v19, vcc +; GFX8-NEXT: v_bfe_u32 v31, v2, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v2 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2 +; GFX8-NEXT: v_cndmask_b32_e32 v2, v31, v2, vcc +; GFX8-NEXT: v_bfe_u32 v31, v18, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v18 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v18, v18 +; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18 +; GFX8-NEXT: v_cndmask_b32_e32 v18, v31, v18, vcc +; GFX8-NEXT: v_bfe_u32 v31, v1, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_fma_f32 v33, v35, v34, v33 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX8-NEXT: v_or_b32_e32 v1, 0x400000, v1 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v31, v1, vcc +; GFX8-NEXT: v_bfe_u32 v31, v33, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v33 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v33, v33 +; GFX8-NEXT: v_or_b32_e32 v32, 0x400000, v33 +; GFX8-NEXT: v_cndmask_b32_e32 v31, v31, v32, vcc +; GFX8-NEXT: v_bfe_u32 v32, v0, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v32, vcc, v32, v0 +; GFX8-NEXT: v_add_u32_e32 v32, vcc, s4, v32 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX8-NEXT: v_or_b32_e32 v0, 0x400000, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v32, v0, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v8 +; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v9 +; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v10 +; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v11 +; GFX8-NEXT: v_lshrrev_b32_e32 v15, 16, v15 +; GFX8-NEXT: v_lshrrev_b32_e32 v14, 16, v14 +; GFX8-NEXT: v_lshrrev_b32_e32 v13, 16, v13 +; GFX8-NEXT: v_lshrrev_b32_e32 v12, 16, v12 +; GFX8-NEXT: v_alignbit_b32 v0, v0, v31, 16 +; GFX8-NEXT: v_alignbit_b32 v1, v1, v18, 16 +; GFX8-NEXT: v_alignbit_b32 v2, v2, v19, 16 +; GFX8-NEXT: v_alignbit_b32 v3, v3, v20, 16 +; GFX8-NEXT: v_alignbit_b32 v4, v4, v21, 16 +; GFX8-NEXT: v_alignbit_b32 v5, v5, v22, 16 +; GFX8-NEXT: v_alignbit_b32 v6, v6, v23, 16 +; GFX8-NEXT: v_alignbit_b32 v7, v7, v24, 16 +; GFX8-NEXT: v_alignbit_b32 v8, v8, v25, 16 +; GFX8-NEXT: v_alignbit_b32 v9, v9, v26, 16 +; GFX8-NEXT: v_alignbit_b32 v10, v10, v27, 16 +; GFX8-NEXT: v_alignbit_b32 v11, v11, v28, 16 +; GFX8-NEXT: v_alignbit_b32 v12, v12, v29, 16 +; GFX8-NEXT: v_alignbit_b32 v13, v13, v30, 16 +; GFX8-NEXT: v_alignbit_b32 v14, v14, v17, 16 +; GFX8-NEXT: v_alignbit_b32 v15, v15, v16, 16 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX900-LABEL: v_fma_v32bf16: +; GFX900: ; %bb.0: +; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 +; GFX900-NEXT: v_lshlrev_b32_e32 v31, 16, v15 +; GFX900-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX900-NEXT: s_movk_i32 s4, 0x7fff +; GFX900-NEXT: s_waitcnt vmcnt(1) +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v32 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v15, v15, v33, v32 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:60 +; GFX900-NEXT: v_fma_f32 v31, v31, v35, v34 +; GFX900-NEXT: v_lshlrev_b32_e32 v32, 16, v30 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v14 +; GFX900-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX900-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v31, v31 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v14, v14, v30, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:56 +; GFX900-NEXT: v_fma_f32 v32, v34, v32, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v30, 16, v29 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v13 +; GFX900-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX900-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v13, v13, v29, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:52 +; GFX900-NEXT: v_fma_f32 v30, v34, v30, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v29, 16, v28 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v12 +; GFX900-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX900-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v12, v12, v28, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48 +; GFX900-NEXT: v_fma_f32 v29, v34, v29, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v28, 16, v27 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v11 +; GFX900-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX900-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v11, v11, v27, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:44 +; GFX900-NEXT: v_fma_f32 v28, v34, v28, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v27, 16, v26 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v10 +; GFX900-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX900-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v10, v10, v26, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:40 +; GFX900-NEXT: v_fma_f32 v27, v34, v27, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v25 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v9 +; GFX900-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX900-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v26, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v9, v9, v25, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:36 +; GFX900-NEXT: v_fma_f32 v26, v35, v34, v26 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v24 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v8 +; GFX900-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX900-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v25, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v8, v8, v24, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32 +; GFX900-NEXT: v_fma_f32 v25, v35, v34, v25 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v23 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v7 +; GFX900-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX900-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v24, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v7, v7, v23, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:28 +; GFX900-NEXT: v_fma_f32 v24, v35, v34, v24 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v22 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v6 +; GFX900-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX900-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v23, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v6, v6, v22, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:24 +; GFX900-NEXT: v_fma_f32 v23, v35, v34, v23 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v21 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v5 +; GFX900-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX900-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v22, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v5, v5, v21, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20 +; GFX900-NEXT: v_fma_f32 v22, v35, v34, v22 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v20 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v4 +; GFX900-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v21, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v4, v4, v20, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16 +; GFX900-NEXT: v_fma_f32 v21, v35, v34, v21 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v19 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v3 +; GFX900-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v20, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v3, v3, v19, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12 +; GFX900-NEXT: v_fma_f32 v20, v35, v34, v20 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v18 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v2 +; GFX900-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v19, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v2, v2, v18, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8 +; GFX900-NEXT: v_fma_f32 v19, v35, v34, v19 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v17 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v1 +; GFX900-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v18, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v1, v1, v17, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:4 +; GFX900-NEXT: v_fma_f32 v18, v35, v34, v18 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v16 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v0 +; GFX900-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v17, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v0, v0, v16, v33 +; GFX900-NEXT: v_bfe_u32 v16, v31, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v31, s4 +; GFX900-NEXT: v_or_b32_e32 v31, 0x400000, v31 +; GFX900-NEXT: v_cndmask_b32_e32 v16, v16, v31, vcc +; GFX900-NEXT: v_bfe_u32 v31, v15, 16, 1 +; GFX900-NEXT: v_add3_u32 v31, v31, v15, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 +; GFX900-NEXT: v_or_b32_e32 v15, 0x400000, v15 +; GFX900-NEXT: v_cndmask_b32_e32 v15, v31, v15, vcc +; GFX900-NEXT: v_bfe_u32 v31, v32, 16, 1 +; GFX900-NEXT: v_add3_u32 v31, v31, v32, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v32, v32 +; GFX900-NEXT: v_or_b32_e32 v32, 0x400000, v32 +; GFX900-NEXT: v_cndmask_b32_e32 v31, v31, v32, vcc +; GFX900-NEXT: v_bfe_u32 v32, v14, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v14, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v14, v14 +; GFX900-NEXT: v_or_b32_e32 v14, 0x400000, v14 +; GFX900-NEXT: v_cndmask_b32_e32 v14, v32, v14, vcc +; GFX900-NEXT: v_bfe_u32 v32, v30, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v30, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v30, v30 +; GFX900-NEXT: v_or_b32_e32 v30, 0x400000, v30 +; GFX900-NEXT: v_cndmask_b32_e32 v30, v32, v30, vcc +; GFX900-NEXT: v_bfe_u32 v32, v13, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v13, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v13, v13 +; GFX900-NEXT: v_or_b32_e32 v13, 0x400000, v13 +; GFX900-NEXT: v_cndmask_b32_e32 v13, v32, v13, vcc +; GFX900-NEXT: v_bfe_u32 v32, v29, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v29, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v29, v29 +; GFX900-NEXT: v_or_b32_e32 v29, 0x400000, v29 +; GFX900-NEXT: v_cndmask_b32_e32 v29, v32, v29, vcc +; GFX900-NEXT: v_bfe_u32 v32, v12, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v12, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX900-NEXT: v_or_b32_e32 v12, 0x400000, v12 +; GFX900-NEXT: v_cndmask_b32_e32 v12, v32, v12, vcc +; GFX900-NEXT: v_bfe_u32 v32, v28, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v28, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v28, v28 +; GFX900-NEXT: v_or_b32_e32 v28, 0x400000, v28 +; GFX900-NEXT: v_cndmask_b32_e32 v28, v32, v28, vcc +; GFX900-NEXT: v_bfe_u32 v32, v11, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v11, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v11, v11 +; GFX900-NEXT: v_or_b32_e32 v11, 0x400000, v11 +; GFX900-NEXT: v_cndmask_b32_e32 v11, v32, v11, vcc +; GFX900-NEXT: v_bfe_u32 v32, v27, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v27, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v27, v27 +; GFX900-NEXT: v_or_b32_e32 v27, 0x400000, v27 +; GFX900-NEXT: v_cndmask_b32_e32 v27, v32, v27, vcc +; GFX900-NEXT: v_bfe_u32 v32, v10, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v10, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v10, v10 +; GFX900-NEXT: v_or_b32_e32 v10, 0x400000, v10 +; GFX900-NEXT: v_cndmask_b32_e32 v10, v32, v10, vcc +; GFX900-NEXT: v_bfe_u32 v32, v26, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v26, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v26, v26 +; GFX900-NEXT: v_or_b32_e32 v26, 0x400000, v26 +; GFX900-NEXT: v_cndmask_b32_e32 v26, v32, v26, vcc +; GFX900-NEXT: v_bfe_u32 v32, v9, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v9, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v9, v9 +; GFX900-NEXT: v_or_b32_e32 v9, 0x400000, v9 +; GFX900-NEXT: v_cndmask_b32_e32 v9, v32, v9, vcc +; GFX900-NEXT: v_bfe_u32 v32, v25, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v25, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v25, v25 +; GFX900-NEXT: v_or_b32_e32 v25, 0x400000, v25 +; GFX900-NEXT: v_cndmask_b32_e32 v25, v32, v25, vcc +; GFX900-NEXT: v_bfe_u32 v32, v8, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v8, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v8, v8 +; GFX900-NEXT: v_or_b32_e32 v8, 0x400000, v8 +; GFX900-NEXT: v_cndmask_b32_e32 v8, v32, v8, vcc +; GFX900-NEXT: v_bfe_u32 v32, v24, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v24, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v24, v24 +; GFX900-NEXT: v_or_b32_e32 v24, 0x400000, v24 +; GFX900-NEXT: v_cndmask_b32_e32 v24, v32, v24, vcc +; GFX900-NEXT: v_bfe_u32 v32, v7, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v7, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX900-NEXT: v_or_b32_e32 v7, 0x400000, v7 +; GFX900-NEXT: v_cndmask_b32_e32 v7, v32, v7, vcc +; GFX900-NEXT: v_bfe_u32 v32, v23, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v23, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v23, v23 +; GFX900-NEXT: v_or_b32_e32 v23, 0x400000, v23 +; GFX900-NEXT: v_cndmask_b32_e32 v23, v32, v23, vcc +; GFX900-NEXT: v_bfe_u32 v32, v6, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v6, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX900-NEXT: v_or_b32_e32 v6, 0x400000, v6 +; GFX900-NEXT: v_cndmask_b32_e32 v6, v32, v6, vcc +; GFX900-NEXT: v_bfe_u32 v32, v22, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v22, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v22, v22 +; GFX900-NEXT: v_or_b32_e32 v22, 0x400000, v22 +; GFX900-NEXT: v_cndmask_b32_e32 v22, v32, v22, vcc +; GFX900-NEXT: v_bfe_u32 v32, v5, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v5, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v5 +; GFX900-NEXT: v_cndmask_b32_e32 v5, v32, v5, vcc +; GFX900-NEXT: v_bfe_u32 v32, v21, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v21, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v21, v21 +; GFX900-NEXT: v_or_b32_e32 v21, 0x400000, v21 +; GFX900-NEXT: v_cndmask_b32_e32 v21, v32, v21, vcc +; GFX900-NEXT: v_bfe_u32 v32, v4, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v4, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v4 +; GFX900-NEXT: v_cndmask_b32_e32 v4, v32, v4, vcc +; GFX900-NEXT: v_bfe_u32 v32, v20, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v20, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v20, v20 +; GFX900-NEXT: v_or_b32_e32 v20, 0x400000, v20 +; GFX900-NEXT: v_cndmask_b32_e32 v20, v32, v20, vcc +; GFX900-NEXT: v_bfe_u32 v32, v3, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v3, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX900-NEXT: v_or_b32_e32 v3, 0x400000, v3 +; GFX900-NEXT: v_cndmask_b32_e32 v3, v32, v3, vcc +; GFX900-NEXT: v_bfe_u32 v32, v19, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v19, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v19, v19 +; GFX900-NEXT: v_or_b32_e32 v19, 0x400000, v19 +; GFX900-NEXT: v_cndmask_b32_e32 v19, v32, v19, vcc +; GFX900-NEXT: v_bfe_u32 v32, v2, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v2, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX900-NEXT: v_or_b32_e32 v2, 0x400000, v2 +; GFX900-NEXT: v_cndmask_b32_e32 v2, v32, v2, vcc +; GFX900-NEXT: v_bfe_u32 v32, v18, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v18, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v18, v18 +; GFX900-NEXT: v_or_b32_e32 v18, 0x400000, v18 +; GFX900-NEXT: v_cndmask_b32_e32 v18, v32, v18, vcc +; GFX900-NEXT: v_bfe_u32 v32, v1, 16, 1 +; GFX900-NEXT: v_fma_f32 v17, v35, v34, v17 +; GFX900-NEXT: v_add3_u32 v32, v32, v1, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX900-NEXT: v_or_b32_e32 v1, 0x400000, v1 +; GFX900-NEXT: v_cndmask_b32_e32 v1, v32, v1, vcc +; GFX900-NEXT: v_bfe_u32 v32, v17, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v17, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v17 +; GFX900-NEXT: v_cndmask_b32_e32 v17, v32, v17, vcc +; GFX900-NEXT: v_bfe_u32 v32, v0, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v0, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX900-NEXT: v_or_b32_e32 v0, 0x400000, v0 +; GFX900-NEXT: v_cndmask_b32_e32 v0, v32, v0, vcc +; GFX900-NEXT: s_mov_b32 s4, 0x7060302 +; GFX900-NEXT: v_perm_b32 v0, v0, v17, s4 +; GFX900-NEXT: v_perm_b32 v1, v1, v18, s4 +; GFX900-NEXT: v_perm_b32 v2, v2, v19, s4 +; GFX900-NEXT: v_perm_b32 v3, v3, v20, s4 +; GFX900-NEXT: v_perm_b32 v4, v4, v21, s4 +; GFX900-NEXT: v_perm_b32 v5, v5, v22, s4 +; GFX900-NEXT: v_perm_b32 v6, v6, v23, s4 +; GFX900-NEXT: v_perm_b32 v7, v7, v24, s4 +; GFX900-NEXT: v_perm_b32 v8, v8, v25, s4 +; GFX900-NEXT: v_perm_b32 v9, v9, v26, s4 +; GFX900-NEXT: v_perm_b32 v10, v10, v27, s4 +; GFX900-NEXT: v_perm_b32 v11, v11, v28, s4 +; GFX900-NEXT: v_perm_b32 v12, v12, v29, s4 +; GFX900-NEXT: v_perm_b32 v13, v13, v30, s4 +; GFX900-NEXT: v_perm_b32 v14, v14, v31, s4 +; GFX900-NEXT: v_perm_b32 v15, v15, v16, s4 +; GFX900-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fma_v32bf16: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: scratch_load_dword v35, off, s32 offset:64 +; GFX950-NEXT: scratch_load_dword v36, off, s32 +; GFX950-NEXT: scratch_load_dword v38, off, s32 offset:60 +; GFX950-NEXT: scratch_load_dword v39, off, s32 offset:56 +; GFX950-NEXT: scratch_load_dword v48, off, s32 offset:52 +; GFX950-NEXT: scratch_load_dword v49, off, s32 offset:48 +; GFX950-NEXT: scratch_load_dword v50, off, s32 offset:44 +; GFX950-NEXT: scratch_load_dword v51, off, s32 offset:40 +; GFX950-NEXT: scratch_load_dword v52, off, s32 offset:36 +; GFX950-NEXT: scratch_load_dword v53, off, s32 offset:32 +; GFX950-NEXT: scratch_load_dword v54, off, s32 offset:28 +; GFX950-NEXT: scratch_load_dword v31, off, s32 offset:4 +; GFX950-NEXT: scratch_load_dword v32, off, s32 offset:8 +; GFX950-NEXT: scratch_load_dword v33, off, s32 offset:12 +; GFX950-NEXT: scratch_load_dword v34, off, s32 offset:16 +; GFX950-NEXT: scratch_load_dword v37, off, s32 offset:20 +; GFX950-NEXT: scratch_load_dword v55, off, s32 offset:24 +; GFX950-NEXT: v_accvgpr_write_b32 a3, v43 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a5, v45 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a6, v46 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a8, v56 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a11, v59 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a13, v61 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a14, v62 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a15, v63 ; Reload Reuse +; GFX950-NEXT: v_and_b32_e32 v43, 0xffff0000, v14 +; GFX950-NEXT: v_lshlrev_b32_e32 v45, 16, v14 +; GFX950-NEXT: v_and_b32_e32 v46, 0xffff0000, v29 +; GFX950-NEXT: v_lshlrev_b32_e32 v56, 16, v29 +; GFX950-NEXT: v_and_b32_e32 v59, 0xffff0000, v12 +; GFX950-NEXT: v_lshlrev_b32_e32 v61, 16, v12 +; GFX950-NEXT: v_and_b32_e32 v62, 0xffff0000, v27 +; GFX950-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; GFX950-NEXT: v_accvgpr_write_b32 a2, v42 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a4, v44 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a7, v47 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a9, v57 ; Reload Reuse +; GFX950-NEXT: v_and_b32_e32 v42, 0xffff0000, v30 +; GFX950-NEXT: v_lshlrev_b32_e32 v44, 16, v30 +; GFX950-NEXT: v_and_b32_e32 v47, 0xffff0000, v13 +; GFX950-NEXT: v_lshlrev_b32_e32 v57, 16, v13 +; GFX950-NEXT: v_accvgpr_write_b32 a0, v40 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a1, v41 ; Reload Reuse +; GFX950-NEXT: v_and_b32_e32 v40, 0xffff0000, v15 +; GFX950-NEXT: v_lshlrev_b32_e32 v41, 16, v15 +; GFX950-NEXT: v_accvgpr_write_b32 a10, v58 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a12, v60 ; Reload Reuse +; GFX950-NEXT: v_and_b32_e32 v58, 0xffff0000, v28 +; GFX950-NEXT: v_lshlrev_b32_e32 v60, 16, v28 +; GFX950-NEXT: s_waitcnt vmcnt(16) +; GFX950-NEXT: v_and_b32_e32 v15, 0xffff0000, v35 +; GFX950-NEXT: s_waitcnt vmcnt(15) +; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v36 +; GFX950-NEXT: v_lshlrev_b32_e32 v63, 16, v36 +; GFX950-NEXT: s_waitcnt vmcnt(14) +; GFX950-NEXT: v_and_b32_e32 v14, 0xffff0000, v38 +; GFX950-NEXT: v_lshlrev_b32_e32 v29, 16, v38 +; GFX950-NEXT: s_waitcnt vmcnt(11) +; GFX950-NEXT: v_and_b32_e32 v36, 0xffff0000, v49 +; GFX950-NEXT: v_and_b32_e32 v38, 0xffff0000, v11 +; GFX950-NEXT: v_fmac_f32_e32 v36, v38, v62 +; GFX950-NEXT: v_lshlrev_b32_e32 v38, 16, v49 +; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v39 +; GFX950-NEXT: v_lshlrev_b32_e32 v30, 16, v39 +; GFX950-NEXT: v_fmac_f32_e32 v38, v11, v27 +; GFX950-NEXT: s_waitcnt vmcnt(10) +; GFX950-NEXT: v_and_b32_e32 v11, 0xffff0000, v50 +; GFX950-NEXT: v_and_b32_e32 v27, 0xffff0000, v26 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v10 +; GFX950-NEXT: v_fmac_f32_e32 v11, v39, v27 +; GFX950-NEXT: v_lshlrev_b32_e32 v27, 16, v50 +; GFX950-NEXT: v_lshlrev_b32_e32 v26, 16, v26 +; GFX950-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX950-NEXT: v_fmac_f32_e32 v27, v10, v26 +; GFX950-NEXT: s_waitcnt vmcnt(9) +; GFX950-NEXT: v_and_b32_e32 v10, 0xffff0000, v51 +; GFX950-NEXT: v_and_b32_e32 v26, 0xffff0000, v25 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v9 +; GFX950-NEXT: v_fmac_f32_e32 v10, v39, v26 +; GFX950-NEXT: v_lshlrev_b32_e32 v26, 16, v51 +; GFX950-NEXT: v_lshlrev_b32_e32 v25, 16, v25 +; GFX950-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX950-NEXT: v_fmac_f32_e32 v26, v9, v25 +; GFX950-NEXT: s_waitcnt vmcnt(8) +; GFX950-NEXT: v_and_b32_e32 v9, 0xffff0000, v52 +; GFX950-NEXT: v_and_b32_e32 v25, 0xffff0000, v24 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v8 +; GFX950-NEXT: v_fmac_f32_e32 v9, v39, v25 +; GFX950-NEXT: v_lshlrev_b32_e32 v25, 16, v52 +; GFX950-NEXT: v_lshlrev_b32_e32 v24, 16, v24 +; GFX950-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX950-NEXT: v_fmac_f32_e32 v25, v8, v24 +; GFX950-NEXT: s_waitcnt vmcnt(7) +; GFX950-NEXT: v_and_b32_e32 v8, 0xffff0000, v53 +; GFX950-NEXT: v_and_b32_e32 v24, 0xffff0000, v23 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v7 +; GFX950-NEXT: v_fmac_f32_e32 v8, v39, v24 +; GFX950-NEXT: v_lshlrev_b32_e32 v24, 16, v53 +; GFX950-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX950-NEXT: v_fmac_f32_e32 v24, v7, v23 +; GFX950-NEXT: s_waitcnt vmcnt(6) +; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v54 +; GFX950-NEXT: v_and_b32_e32 v23, 0xffff0000, v22 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v6 +; GFX950-NEXT: v_fmac_f32_e32 v7, v39, v23 +; GFX950-NEXT: v_lshlrev_b32_e32 v23, 16, v54 +; GFX950-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX950-NEXT: v_fmac_f32_e32 v23, v6, v22 +; GFX950-NEXT: s_waitcnt vmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v55 +; GFX950-NEXT: v_and_b32_e32 v22, 0xffff0000, v21 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v5 +; GFX950-NEXT: v_fmac_f32_e32 v6, v39, v22 +; GFX950-NEXT: v_lshlrev_b32_e32 v22, 16, v55 +; GFX950-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX950-NEXT: v_fmac_f32_e32 v22, v5, v21 +; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v37 +; GFX950-NEXT: v_and_b32_e32 v21, 0xffff0000, v20 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v4 +; GFX950-NEXT: v_fmac_f32_e32 v5, v39, v21 +; GFX950-NEXT: v_lshlrev_b32_e32 v21, 16, v37 +; GFX950-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX950-NEXT: v_fmac_f32_e32 v21, v4, v20 +; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v34 +; GFX950-NEXT: v_and_b32_e32 v20, 0xffff0000, v19 +; GFX950-NEXT: v_and_b32_e32 v37, 0xffff0000, v3 +; GFX950-NEXT: v_fmac_f32_e32 v4, v37, v20 +; GFX950-NEXT: v_lshlrev_b32_e32 v20, 16, v34 +; GFX950-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX950-NEXT: v_fmac_f32_e32 v20, v3, v19 +; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v33 +; GFX950-NEXT: v_and_b32_e32 v19, 0xffff0000, v18 +; GFX950-NEXT: v_and_b32_e32 v34, 0xffff0000, v2 +; GFX950-NEXT: v_fmac_f32_e32 v3, v34, v19 +; GFX950-NEXT: v_lshlrev_b32_e32 v19, 16, v33 +; GFX950-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX950-NEXT: v_fmac_f32_e32 v19, v2, v18 +; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v32 +; GFX950-NEXT: v_and_b32_e32 v18, 0xffff0000, v17 +; GFX950-NEXT: v_and_b32_e32 v33, 0xffff0000, v1 +; GFX950-NEXT: v_fmac_f32_e32 v2, v33, v18 +; GFX950-NEXT: v_lshlrev_b32_e32 v18, 16, v32 +; GFX950-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX950-NEXT: v_fmac_f32_e32 v18, v1, v17 +; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v31 +; GFX950-NEXT: v_and_b32_e32 v17, 0xffff0000, v16 +; GFX950-NEXT: v_and_b32_e32 v32, 0xffff0000, v0 +; GFX950-NEXT: v_lshlrev_b32_e32 v28, 16, v35 +; GFX950-NEXT: v_fmac_f32_e32 v15, v40, v12 +; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v48 +; GFX950-NEXT: v_lshlrev_b32_e32 v35, 16, v48 +; GFX950-NEXT: v_fmac_f32_e32 v1, v32, v17 +; GFX950-NEXT: v_lshlrev_b32_e32 v17, 16, v31 +; GFX950-NEXT: v_lshlrev_b32_e32 v16, 16, v16 +; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX950-NEXT: v_fmac_f32_e32 v28, v41, v63 +; GFX950-NEXT: v_fmac_f32_e32 v14, v43, v42 +; GFX950-NEXT: v_fmac_f32_e32 v29, v45, v44 +; GFX950-NEXT: v_fmac_f32_e32 v13, v47, v46 +; GFX950-NEXT: v_fmac_f32_e32 v30, v57, v56 +; GFX950-NEXT: v_fmac_f32_e32 v12, v59, v58 +; GFX950-NEXT: v_fmac_f32_e32 v35, v61, v60 +; GFX950-NEXT: v_fmac_f32_e32 v17, v0, v16 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v17, v1 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v18, v2 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v19, v3 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v20, v4 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v21, v5 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v5, v22, v6 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v6, v23, v7 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v7, v24, v8 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v8, v25, v9 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v9, v26, v10 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v10, v27, v11 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v11, v38, v36 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v12, v35, v12 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v13, v30, v13 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v14, v29, v14 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v15, v28, v15 +; GFX950-NEXT: v_accvgpr_read_b32 v63, a15 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v62, a14 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v61, a13 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v60, a12 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v59, a11 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v58, a10 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v57, a9 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v56, a8 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v47, a7 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v46, a6 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v45, a5 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v44, a4 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v43, a3 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v42, a2 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v41, a1 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v40, a0 ; Reload Reuse +; GFX950-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_fma_v32bf16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_clause 0x8 +; GFX10-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX10-NEXT: buffer_load_dword v33, off, s[0:3], s32 +; GFX10-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:60 +; GFX10-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:56 +; GFX10-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:52 +; GFX10-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:48 +; GFX10-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:44 +; GFX10-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:40 +; GFX10-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:36 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v15 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v15 +; GFX10-NEXT: v_and_b32_e32 v52, 0xffff0000, v10 +; GFX10-NEXT: s_waitcnt vmcnt(8) +; GFX10-NEXT: v_lshlrev_b32_e32 v31, 16, v32 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v33 +; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v32 +; GFX10-NEXT: v_and_b32_e32 v32, 0xffff0000, v33 +; GFX10-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32 +; GFX10-NEXT: v_fmac_f32_e32 v31, v49, v50 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v30 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v14 +; GFX10-NEXT: v_fmac_f32_e32 v15, v51, v32 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v32, 16, v34 +; GFX10-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v14 +; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v34 +; GFX10-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:28 +; GFX10-NEXT: v_fmac_f32_e32 v32, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v29 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v13 +; GFX10-NEXT: v_fmac_f32_e32 v14, v51, v30 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v30, 16, v35 +; GFX10-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v13 +; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v35 +; GFX10-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:24 +; GFX10-NEXT: v_fmac_f32_e32 v30, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v28 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v12 +; GFX10-NEXT: v_fmac_f32_e32 v13, v51, v29 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v29, 16, v36 +; GFX10-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v12 +; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v36 +; GFX10-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:20 +; GFX10-NEXT: v_fmac_f32_e32 v29, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v27 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v11 +; GFX10-NEXT: v_fmac_f32_e32 v12, v51, v28 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v28, 16, v37 +; GFX10-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v11 +; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v37 +; GFX10-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:16 +; GFX10-NEXT: v_fmac_f32_e32 v28, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v26 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v10 +; GFX10-NEXT: v_fmac_f32_e32 v11, v51, v27 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v27, 16, v38 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v26 +; GFX10-NEXT: v_and_b32_e32 v10, 0xffff0000, v38 +; GFX10-NEXT: v_lshlrev_b32_e32 v38, 16, v25 +; GFX10-NEXT: s_waitcnt vmcnt(6) +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v39 +; GFX10-NEXT: v_fmac_f32_e32 v27, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v9 +; GFX10-NEXT: v_fmac_f32_e32 v10, v52, v51 +; GFX10-NEXT: s_clause 0x1 +; GFX10-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:12 +; GFX10-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:8 +; GFX10-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v31 +; GFX10-NEXT: v_fmac_f32_e32 v26, v49, v38 +; GFX10-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:4 +; GFX10-NEXT: v_and_b32_e32 v49, 0xffff0000, v9 +; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v39 +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v24 +; GFX10-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v31, v31 +; GFX10-NEXT: v_fmac_f32_e32 v9, v49, v25 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v8 +; GFX10-NEXT: s_waitcnt vmcnt(8) +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v48 +; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX10-NEXT: v_and_b32_e32 v48, 0xffff0000, v48 +; GFX10-NEXT: v_fmac_f32_e32 v25, v49, v39 +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v23 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v7 +; GFX10-NEXT: v_fmac_f32_e32 v48, v8, v24 +; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v22 +; GFX10-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v33 +; GFX10-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX10-NEXT: v_fmac_f32_e32 v8, v49, v39 +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v6 +; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX10-NEXT: v_fmac_f32_e32 v33, v7, v23 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v21 +; GFX10-NEXT: s_waitcnt vmcnt(6) +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v34 +; GFX10-NEXT: v_and_b32_e32 v34, 0xffff0000, v34 +; GFX10-NEXT: v_lshlrev_b32_e32 v23, 16, v5 +; GFX10-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX10-NEXT: v_fmac_f32_e32 v7, v39, v24 +; GFX10-NEXT: v_fmac_f32_e32 v34, v6, v22 +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v20 +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v4 +; GFX10-NEXT: s_waitcnt vmcnt(5) +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v35 +; GFX10-NEXT: v_and_b32_e32 v35, 0xffff0000, v35 +; GFX10-NEXT: v_lshlrev_b32_e32 v22, 16, v19 +; GFX10-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX10-NEXT: v_fmac_f32_e32 v6, v23, v49 +; GFX10-NEXT: v_fmac_f32_e32 v35, v5, v21 +; GFX10-NEXT: v_lshlrev_b32_e32 v23, 16, v3 +; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX10-NEXT: s_waitcnt vmcnt(4) +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v36 +; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX10-NEXT: v_and_b32_e32 v36, 0xffff0000, v36 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v18 +; GFX10-NEXT: v_lshlrev_b32_e32 v21, 16, v2 +; GFX10-NEXT: v_fmac_f32_e32 v5, v39, v24 +; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX10-NEXT: v_fmac_f32_e32 v36, v4, v20 +; GFX10-NEXT: v_lshlrev_b32_e32 v20, 16, v16 +; GFX10-NEXT: s_waitcnt vmcnt(3) +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v37 +; GFX10-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v17 +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v1 +; GFX10-NEXT: v_fmac_f32_e32 v39, v23, v22 +; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v37 +; GFX10-NEXT: v_lshlrev_b32_e32 v22, 16, v0 +; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX10-NEXT: v_fmac_f32_e32 v23, v3, v19 +; GFX10-NEXT: s_waitcnt vmcnt(2) +; GFX10-NEXT: v_lshlrev_b32_e32 v37, 16, v50 +; GFX10-NEXT: s_waitcnt vmcnt(1) +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v51 +; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v51 +; GFX10-NEXT: v_and_b32_e32 v50, 0xffff0000, v50 +; GFX10-NEXT: v_cmp_u_f32_e64 s5, v33, v33 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_lshlrev_b32_e32 v51, 16, v38 +; GFX10-NEXT: v_and_b32_e32 v38, 0xffff0000, v38 +; GFX10-NEXT: v_fmac_f32_e32 v37, v21, v49 +; GFX10-NEXT: v_fmac_f32_e32 v50, v2, v18 +; GFX10-NEXT: v_fmac_f32_e32 v19, v1, v17 +; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v48 +; GFX10-NEXT: v_fmac_f32_e32 v38, v0, v16 +; GFX10-NEXT: v_bfe_u32 v0, v48, 16, 1 +; GFX10-NEXT: v_bfe_u32 v16, v33, 16, 1 +; GFX10-NEXT: v_bfe_u32 v2, v8, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v17, 0x400000, v33 +; GFX10-NEXT: v_bfe_u32 v18, v7, 16, 1 +; GFX10-NEXT: v_bfe_u32 v21, v34, 16, 1 +; GFX10-NEXT: v_add3_u32 v0, v0, v48, 0x7fff +; GFX10-NEXT: v_bfe_u32 v48, v35, 16, 1 +; GFX10-NEXT: v_add3_u32 v16, v16, v33, 0x7fff +; GFX10-NEXT: v_bfe_u32 v33, v5, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v3, v4, v24 +; GFX10-NEXT: v_fmac_f32_e32 v51, v22, v20 +; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v8 +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v7 +; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v34 +; GFX10-NEXT: v_bfe_u32 v24, v6, 16, 1 +; GFX10-NEXT: v_add3_u32 v2, v2, v8, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s4, v8, v8 +; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v35 +; GFX10-NEXT: v_add3_u32 v18, v18, v7, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s6, v7, v7 +; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v5 +; GFX10-NEXT: v_add3_u32 v21, v21, v34, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s7, v34, v34 +; GFX10-NEXT: v_bfe_u32 v34, v39, 16, 1 +; GFX10-NEXT: v_add3_u32 v48, v48, v35, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s9, v35, v35 +; GFX10-NEXT: v_bfe_u32 v35, v23, 16, 1 +; GFX10-NEXT: v_add3_u32 v33, v33, v5, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s10, v5, v5 +; GFX10-NEXT: v_bfe_u32 v5, v37, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v49, 0x400000, v6 +; GFX10-NEXT: v_add3_u32 v24, v24, v6, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s8, v6, v6 +; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v39 +; GFX10-NEXT: v_add3_u32 v34, v34, v39, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s11, v39, v39 +; GFX10-NEXT: v_or_b32_e32 v39, 0x400000, v23 +; GFX10-NEXT: v_add3_u32 v35, v35, v23, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s12, v23, v23 +; GFX10-NEXT: v_or_b32_e32 v23, 0x400000, v37 +; GFX10-NEXT: v_add3_u32 v5, v5, v37, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s13, v37, v37 +; GFX10-NEXT: v_bfe_u32 v37, v31, 16, 1 +; GFX10-NEXT: v_cndmask_b32_e64 v53, v2, v4, s4 +; GFX10-NEXT: v_bfe_u32 v4, v3, 16, 1 +; GFX10-NEXT: v_cndmask_b32_e64 v16, v16, v17, s5 +; GFX10-NEXT: v_cndmask_b32_e64 v17, v18, v20, s6 +; GFX10-NEXT: v_add3_u32 v37, v37, v31, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v18, v21, v22, s7 +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v3 +; GFX10-NEXT: v_bfe_u32 v22, v19, 16, 1 +; GFX10-NEXT: v_add3_u32 v4, v4, v3, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v31, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v15, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v15 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v15, v15 +; GFX10-NEXT: v_cndmask_b32_e64 v21, v24, v49, s8 +; GFX10-NEXT: v_or_b32_e32 v24, 0x400000, v19 +; GFX10-NEXT: v_add3_u32 v37, v37, v15, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v7, v33, v7, s10 +; GFX10-NEXT: v_bfe_u32 v33, v51, 16, 1 +; GFX10-NEXT: v_add3_u32 v22, v22, v19, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v6, v34, v6, s11 +; GFX10-NEXT: v_cndmask_b32_e64 v15, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v32, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v32 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v32, v32 +; GFX10-NEXT: v_or_b32_e32 v34, 0x400000, v51 +; GFX10-NEXT: v_cndmask_b32_e64 v35, v35, v39, s12 +; GFX10-NEXT: v_add3_u32 v37, v37, v32, 0x7fff +; GFX10-NEXT: v_bfe_u32 v39, v38, 16, 1 +; GFX10-NEXT: v_add3_u32 v33, v33, v51, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v23, s13 +; GFX10-NEXT: v_or_b32_e32 v23, 0x400000, v38 +; GFX10-NEXT: v_cndmask_b32_e64 v32, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v14, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v14 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v14, v14 +; GFX10-NEXT: v_add3_u32 v39, v39, v38, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v50 +; GFX10-NEXT: v_add3_u32 v37, v37, v14, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v8, v48, v8, s9 +; GFX10-NEXT: v_perm_b32 v15, v15, v31, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e64 v14, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v30, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v30 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v30, v30 +; GFX10-NEXT: v_perm_b32 v14, v14, v32, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v30, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v30, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v13, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v13 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v13, v13 +; GFX10-NEXT: v_add3_u32 v37, v37, v13, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v13, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v29, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v29 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v29, v29 +; GFX10-NEXT: v_perm_b32 v13, v13, v30, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v29, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v29, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v12, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v12 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v12, v12 +; GFX10-NEXT: v_add3_u32 v37, v37, v12, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v12, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v28, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v28 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v28, v28 +; GFX10-NEXT: v_perm_b32 v12, v12, v29, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v28, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v28, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v11, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v11 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v11, v11 +; GFX10-NEXT: v_add3_u32 v37, v37, v11, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v11, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v27, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v27 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v27, v27 +; GFX10-NEXT: v_perm_b32 v11, v11, v28, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v27, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v27, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v10, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v10 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v10, v10 +; GFX10-NEXT: v_add3_u32 v37, v37, v10, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v10, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v26, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v26 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v26, v26 +; GFX10-NEXT: v_perm_b32 v10, v10, v27, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v26, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v26, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v9, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v9 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v9, v9 +; GFX10-NEXT: v_add3_u32 v37, v37, v9, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v9, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v25, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v25 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v25, v25 +; GFX10-NEXT: v_perm_b32 v9, v9, v26, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v25, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v25, v37, v52, s14 +; GFX10-NEXT: v_cndmask_b32_e32 v52, v0, v1, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX10-NEXT: v_bfe_u32 v1, v50, 16, 1 +; GFX10-NEXT: v_bfe_u32 v37, v36, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v0, 0x400000, v36 +; GFX10-NEXT: v_cndmask_b32_e32 v3, v4, v20, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 +; GFX10-NEXT: v_add3_u32 v1, v1, v50, 0x7fff +; GFX10-NEXT: v_add3_u32 v37, v37, v36, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v4, v22, v24, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v51, v51 +; GFX10-NEXT: v_cndmask_b32_e32 v19, v33, v34, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38 +; GFX10-NEXT: v_cndmask_b32_e32 v20, v39, v23, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50 +; GFX10-NEXT: v_cndmask_b32_e32 v2, v1, v2, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36 +; GFX10-NEXT: v_perm_b32 v1, v4, v3, 0x7060302 +; GFX10-NEXT: v_perm_b32 v3, v35, v6, 0x7060302 +; GFX10-NEXT: v_perm_b32 v6, v18, v17, 0x7060302 +; GFX10-NEXT: v_perm_b32 v2, v2, v5, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v22, v37, v0, vcc_lo +; GFX10-NEXT: v_perm_b32 v0, v20, v19, 0x7060302 +; GFX10-NEXT: v_perm_b32 v5, v8, v21, 0x7060302 +; GFX10-NEXT: v_perm_b32 v8, v52, v25, 0x7060302 +; GFX10-NEXT: v_perm_b32 v4, v22, v7, 0x7060302 +; GFX10-NEXT: v_perm_b32 v7, v16, v53, 0x7060302 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11TRUE16-LABEL: v_fma_v32bf16: +; GFX11TRUE16: ; %bb.0: +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11TRUE16-NEXT: s_clause 0x10 +; GFX11TRUE16-NEXT: scratch_load_b32 v31, off, s32 offset:64 +; GFX11TRUE16-NEXT: scratch_load_b32 v32, off, s32 +; GFX11TRUE16-NEXT: scratch_load_b32 v33, off, s32 offset:60 +; GFX11TRUE16-NEXT: scratch_load_b32 v34, off, s32 offset:56 +; GFX11TRUE16-NEXT: scratch_load_b32 v35, off, s32 offset:52 +; GFX11TRUE16-NEXT: scratch_load_b32 v36, off, s32 offset:48 +; GFX11TRUE16-NEXT: scratch_load_b32 v37, off, s32 offset:44 +; GFX11TRUE16-NEXT: scratch_load_b32 v38, off, s32 offset:40 +; GFX11TRUE16-NEXT: scratch_load_b32 v39, off, s32 offset:36 +; GFX11TRUE16-NEXT: scratch_load_b32 v48, off, s32 offset:32 +; GFX11TRUE16-NEXT: scratch_load_b32 v49, off, s32 offset:28 +; GFX11TRUE16-NEXT: scratch_load_b32 v50, off, s32 offset:24 +; GFX11TRUE16-NEXT: scratch_load_b32 v51, off, s32 offset:20 +; GFX11TRUE16-NEXT: scratch_load_b32 v52, off, s32 offset:16 +; GFX11TRUE16-NEXT: scratch_load_b32 v53, off, s32 offset:12 +; GFX11TRUE16-NEXT: scratch_load_b32 v54, off, s32 offset:8 +; GFX11TRUE16-NEXT: scratch_load_b32 v55, off, s32 offset:4 +; GFX11TRUE16-NEXT: v_and_b32_e32 v99, 0xffff0000, v21 +; GFX11TRUE16-NEXT: v_and_b32_e32 v100, 0xffff0000, v5 +; GFX11TRUE16-NEXT: v_and_b32_e32 v101, 0xffff0000, v20 +; GFX11TRUE16-NEXT: v_and_b32_e32 v102, 0xffff0000, v4 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX11TRUE16-NEXT: v_and_b32_e32 v115, 0xffff0000, v17 +; GFX11TRUE16-NEXT: v_and_b32_e32 v116, 0xffff0000, v1 +; GFX11TRUE16-NEXT: v_and_b32_e32 v97, 0xffff0000, v22 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX11TRUE16-NEXT: v_and_b32_e32 v117, 0xffff0000, v16 +; GFX11TRUE16-NEXT: v_and_b32_e32 v118, 0xffff0000, v0 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v16, 16, v16 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v103, 0xffff0000, v19 +; GFX11TRUE16-NEXT: v_and_b32_e32 v112, 0xffff0000, v3 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11TRUE16-NEXT: v_and_b32_e32 v85, 0xffff0000, v24 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v24, 16, v24 +; GFX11TRUE16-NEXT: v_and_b32_e32 v113, 0xffff0000, v18 +; GFX11TRUE16-NEXT: v_and_b32_e32 v114, 0xffff0000, v2 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(16) +; GFX11TRUE16-NEXT: v_and_b32_e32 v119, 0xffff0000, v31 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(15) +; GFX11TRUE16-NEXT: v_and_b32_e32 v128, 0xffff0000, v32 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(14) +; GFX11TRUE16-NEXT: v_and_b32_e32 v129, 0xffff0000, v33 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v33, 16, v33 +; GFX11TRUE16-NEXT: v_and_b32_e32 v68, 0xffff0000, v13 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(12) +; GFX11TRUE16-NEXT: v_and_b32_e32 v131, 0xffff0000, v35 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(10) +; GFX11TRUE16-NEXT: v_and_b32_e32 v133, 0xffff0000, v37 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(9) +; GFX11TRUE16-NEXT: v_and_b32_e32 v134, 0xffff0000, v38 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v37, 16, v37 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(7) +; GFX11TRUE16-NEXT: v_and_b32_e32 v144, 0xffff0000, v48 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v48, 16, v48 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(5) +; GFX11TRUE16-NEXT: v_and_b32_e32 v146, 0xffff0000, v50 +; GFX11TRUE16-NEXT: v_and_b32_e32 v145, 0xffff0000, v49 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v49, 16, v49 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(4) +; GFX11TRUE16-NEXT: v_and_b32_e32 v147, 0xffff0000, v51 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v51, 16, v51 +; GFX11TRUE16-NEXT: v_and_b32_e32 v96, 0xffff0000, v7 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11TRUE16-NEXT: v_and_b32_e32 v148, 0xffff0000, v55 +; GFX11TRUE16-NEXT: v_and_b32_e32 v87, 0xffff0000, v23 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX11TRUE16-NEXT: v_and_b32_e32 v83, 0xffff0000, v25 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v146, v100, v99 :: v_dual_lshlrev_b32 v25, 16, v25 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v55, 16, v55 +; GFX11TRUE16-NEXT: v_and_b32_e32 v98, 0xffff0000, v6 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX11TRUE16-NEXT: v_and_b32_e32 v84, 0xffff0000, v9 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v48, v7, v23 +; GFX11TRUE16-NEXT: v_and_b32_e32 v135, 0xffff0000, v39 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v39, 16, v39 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v49, v6, v22 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v134, v84, v83 :: v_dual_lshlrev_b32 v13, 16, v13 +; GFX11TRUE16-NEXT: v_bfe_u32 v83, v146, 16, 1 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v51, v4, v20 :: v_dual_fmac_f32 v148, v118, v117 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v144, v96, v87 :: v_dual_and_b32 v81, 0xffff0000, v26 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v55, v0, v16 :: v_dual_lshlrev_b32 v26, 16, v26 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v145, v98, v97 +; GFX11TRUE16-NEXT: v_or_b32_e32 v84, 0x400000, v146 +; GFX11TRUE16-NEXT: v_add3_u32 v83, v83, v146, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; GFX11TRUE16-NEXT: v_and_b32_e32 v86, 0xffff0000, v8 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX11TRUE16-NEXT: v_and_b32_e32 v82, 0xffff0000, v10 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v147, v102, v101 :: v_dual_lshlrev_b32 v10, 16, v10 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v38, 16, v38 +; GFX11TRUE16-NEXT: v_and_b32_e32 v69, 0xffff0000, v28 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v37, v10, v26 :: v_dual_lshlrev_b32 v28, 16, v28 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v39, v8, v24 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v133, v82, v81 :: v_dual_and_b32 v70, 0xffff0000, v12 +; GFX11TRUE16-NEXT: v_bfe_u32 v97, v51, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v37, 16, 1 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v135, v86, v85 :: v_dual_lshlrev_b32 v12, 16, v12 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v35, 16, v35 +; GFX11TRUE16-NEXT: v_and_b32_e32 v80, 0xffff0000, v11 +; GFX11TRUE16-NEXT: v_and_b32_e32 v132, 0xffff0000, v36 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v36, 16, v36 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v50, 16, v50 +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v133 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX11TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v37 +; GFX11TRUE16-NEXT: v_or_b32_e32 v98, 0x400000, v51 +; GFX11TRUE16-NEXT: v_add3_u32 v23, v23, v37, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; GFX11TRUE16-NEXT: v_and_b32_e32 v71, 0xffff0000, v27 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; GFX11TRUE16-NEXT: v_add3_u32 v97, v97, v51, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v31, 16, v31 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v32, 16, v32 +; GFX11TRUE16-NEXT: v_and_b32_e32 v64, 0xffff0000, v15 +; GFX11TRUE16-NEXT: v_and_b32_e32 v130, 0xffff0000, v34 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v35, v12, v28 :: v_dual_lshlrev_b32 v34, 16, v34 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v36, v11, v27 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v50, v5, v21 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v132, v80, v71 :: v_dual_and_b32 v67, 0xffff0000, v29 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v29, 16, v29 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v130, v68, v67 :: v_dual_and_b32 v65, 0xffff0000, v30 +; GFX11TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v36 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v34, v13, v29 :: v_dual_fmac_f32 v31, v15, v32 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v119, v64, v128 :: v_dual_and_b32 v66, 0xffff0000, v14 +; GFX11TRUE16-NEXT: v_and_b32_e32 v64, 0xffff0000, v52 +; GFX11TRUE16-NEXT: v_and_b32_e32 v128, 0xffff0000, v53 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v53, 16, v53 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v129, v66, v65 :: v_dual_lshlrev_b32 v30, 16, v30 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v52, 16, v52 +; GFX11TRUE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v54 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v54, 16, v54 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v64, v112, v103 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v38, v9, v25 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v131, v70, v69 :: v_dual_lshlrev_b32 v14, 16, v14 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v53, v2, v18 +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v119, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v2, v31, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v33, v14, v30 :: v_dual_fmac_f32 v52, v3, v19 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v54, v1, v17 +; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v119 +; GFX11TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v31 +; GFX11TRUE16-NEXT: v_bfe_u32 v4, v129, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v119, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v119, v119 +; GFX11TRUE16-NEXT: v_add3_u32 v2, v2, v31, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e64 s0, v31, v31 +; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v129 +; GFX11TRUE16-NEXT: v_bfe_u32 v6, v33, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v14, v132, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v15, v0, v1, vcc_lo +; GFX11TRUE16-NEXT: v_cndmask_b32_e64 v149, v2, v3, s0 +; GFX11TRUE16-NEXT: v_add3_u32 v2, v4, v129, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v129, v129 +; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v33 +; GFX11TRUE16-NEXT: v_bfe_u32 v8, v130, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v3, v6, v33, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v150, v14, v132, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v14, v2, v5, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33 +; GFX11TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v130 +; GFX11TRUE16-NEXT: v_bfe_u32 v10, v34, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v13, v35, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v4, v8, v130, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v33, v3, v7, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v130, v130 +; GFX11TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v34 +; GFX11TRUE16-NEXT: v_bfe_u32 v12, v131, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v6, v10, v34, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v10, v13, v35, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v13, v4, v9, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34 +; GFX11TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v131 +; GFX11TRUE16-NEXT: v_add3_u32 v8, v12, v131, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v35 +; GFX11TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v132 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v34, v6, v11, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v131, v131 +; GFX11TRUE16-NEXT: v_bfe_u32 v19, v36, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v21, v133, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v25, v134, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v26, 0x400000, v134 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v12, v8, v16, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35 +; GFX11TRUE16-NEXT: v_add3_u32 v19, v19, v36, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v21, v21, v133, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v27, v38, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v25, v25, v134, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v16, v10, v17, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v132, v132 +; GFX11TRUE16-NEXT: v_or_b32_e32 v28, 0x400000, v38 +; GFX11TRUE16-NEXT: v_bfe_u32 v29, v135, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v27, v27, v38, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v30, 0x400000, v135 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v11, v150, v18, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36 +; GFX11TRUE16-NEXT: v_bfe_u32 v65, v39, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v29, v29, v135, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v66, 0x400000, v39 +; GFX11TRUE16-NEXT: v_bfe_u32 v67, v144, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v17, v19, v20, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v133, v133 +; GFX11TRUE16-NEXT: v_add3_u32 v65, v65, v39, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v68, 0x400000, v144 +; GFX11TRUE16-NEXT: v_bfe_u32 v69, v48, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v67, v67, v144, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v10, v21, v22, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37 +; GFX11TRUE16-NEXT: v_or_b32_e32 v70, 0x400000, v48 +; GFX11TRUE16-NEXT: v_bfe_u32 v71, v145, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v69, v69, v48, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v80, 0x400000, v145 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v18, v23, v24, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v134, v134 +; GFX11TRUE16-NEXT: v_bfe_u32 v81, v49, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v71, v71, v145, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v82, 0x400000, v49 +; GFX11TRUE16-NEXT: v_bfe_u32 v85, v50, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v9, v25, v26, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38 +; GFX11TRUE16-NEXT: v_add3_u32 v81, v81, v49, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v86, 0x400000, v50 +; GFX11TRUE16-NEXT: v_bfe_u32 v87, v147, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v85, v85, v50, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v19, v27, v28, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v135, v135 +; GFX11TRUE16-NEXT: v_or_b32_e32 v96, 0x400000, v147 +; GFX11TRUE16-NEXT: v_add3_u32 v87, v87, v147, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v99, v64, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v100, 0x400000, v64 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v8, v29, v30, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39 +; GFX11TRUE16-NEXT: v_bfe_u32 v101, v52, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v99, v99, v64, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v102, 0x400000, v52 +; GFX11TRUE16-NEXT: v_bfe_u32 v117, v54, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v20, v65, v66, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v144, v144 +; GFX11TRUE16-NEXT: v_add3_u32 v101, v101, v52, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v118, 0x400000, v54 +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v55, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v117, v117, v54, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v7, v67, v68, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48 +; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v55 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v55, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v119, v148, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v31, 0x400000, v148 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v21, v69, v70, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v145, v145 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v8.l, v20.h +; GFX11TRUE16-NEXT: v_add3_u32 v119, v119, v148, 0x7fff +; GFX11TRUE16-NEXT: v_mov_b16_e32 v9.l, v19.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v7.l, v21.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v71, v80, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v49, v49 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v10.l, v18.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v11.l, v17.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v12.l, v16.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v13.l, v34.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v22, v81, v82, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v146, v146 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v14.l, v33.h +; GFX11TRUE16-NEXT: v_mov_b16_e64 v15.l, v149.h +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v6.l, v22.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v5, v83, v84, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v23, v85, v86, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v147, v147 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v5.l, v23.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v4, v87, v96, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v51, v51 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v128, v114, v113 +; GFX11TRUE16-NEXT: v_bfe_u32 v113, v53, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v114, 0x400000, v53 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v24, v97, v98, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v64, v64 +; GFX11TRUE16-NEXT: v_bfe_u32 v103, v128, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v112, 0x400000, v128 +; GFX11TRUE16-NEXT: v_add3_u32 v113, v113, v53, 0x7fff +; GFX11TRUE16-NEXT: v_mov_b16_e32 v4.l, v24.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v99, v100, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v52, v52 +; GFX11TRUE16-NEXT: v_add3_u32 v103, v103, v128, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v25, v101, v102, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v128, v128 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v32, v116, v115 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v25.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v103, v112, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v53, v53 +; GFX11TRUE16-NEXT: v_bfe_u32 v115, v32, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v116, 0x400000, v32 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v26, v113, v114, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v54, v54 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_add3_u32 v115, v115, v32, 0x7fff +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v26.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v27, v117, v118, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v55, v55 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v28, v0, v1, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v115, v116, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v148, v148 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v27.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v119, v31, vcc_lo +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v28.h +; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11FAKE16-LABEL: v_fma_v32bf16: +; GFX11FAKE16: ; %bb.0: +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11FAKE16-NEXT: s_clause 0x10 +; GFX11FAKE16-NEXT: scratch_load_b32 v31, off, s32 offset:64 +; GFX11FAKE16-NEXT: scratch_load_b32 v32, off, s32 +; GFX11FAKE16-NEXT: scratch_load_b32 v33, off, s32 offset:60 +; GFX11FAKE16-NEXT: scratch_load_b32 v34, off, s32 offset:56 +; GFX11FAKE16-NEXT: scratch_load_b32 v35, off, s32 offset:52 +; GFX11FAKE16-NEXT: scratch_load_b32 v36, off, s32 offset:48 +; GFX11FAKE16-NEXT: scratch_load_b32 v37, off, s32 offset:44 +; GFX11FAKE16-NEXT: scratch_load_b32 v38, off, s32 offset:40 +; GFX11FAKE16-NEXT: scratch_load_b32 v39, off, s32 offset:36 +; GFX11FAKE16-NEXT: scratch_load_b32 v48, off, s32 offset:32 +; GFX11FAKE16-NEXT: scratch_load_b32 v49, off, s32 offset:28 +; GFX11FAKE16-NEXT: scratch_load_b32 v50, off, s32 offset:24 +; GFX11FAKE16-NEXT: scratch_load_b32 v51, off, s32 offset:20 +; GFX11FAKE16-NEXT: scratch_load_b32 v52, off, s32 offset:16 +; GFX11FAKE16-NEXT: scratch_load_b32 v53, off, s32 offset:12 +; GFX11FAKE16-NEXT: scratch_load_b32 v54, off, s32 offset:8 +; GFX11FAKE16-NEXT: scratch_load_b32 v55, off, s32 offset:4 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v99, 16, v21 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v100, 16, v5 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v97, 16, v22 +; GFX11FAKE16-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v101, 16, v20 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v102, 16, v4 +; GFX11FAKE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v117, 16, v16 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v118, 16, v0 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v87, 16, v23 +; GFX11FAKE16-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX11FAKE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v98, 16, v6 +; GFX11FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v103, 16, v19 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v112, 16, v3 +; GFX11FAKE16-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX11FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v85, 16, v24 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v113, 16, v18 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v114, 16, v2 +; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v115, 16, v17 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v116, 16, v1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX11FAKE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(15) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v128, 16, v32 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(14) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v129, 16, v33 +; GFX11FAKE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v68, 16, v13 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(12) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v131, 16, v35 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(10) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v133, 16, v37 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(9) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v134, 16, v38 +; GFX11FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v37 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(7) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v144, 16, v48 +; GFX11FAKE16-NEXT: v_and_b32_e32 v48, 0xffff0000, v48 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(5) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v146, 16, v50 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v145, 16, v49 +; GFX11FAKE16-NEXT: v_and_b32_e32 v49, 0xffff0000, v49 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v84, 16, v9 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(4) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v147, 16, v51 +; GFX11FAKE16-NEXT: v_and_b32_e32 v51, 0xffff0000, v51 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v96, 16, v7 +; GFX11FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v83, 16, v25 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v146, v100, v99 :: v_dual_and_b32 v25, 0xffff0000, v25 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v135, 16, v39 +; GFX11FAKE16-NEXT: v_and_b32_e32 v39, 0xffff0000, v39 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v48, v7, v23 :: v_dual_fmac_f32 v49, v6, v22 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v134, v84, v83 :: v_dual_and_b32 v13, 0xffff0000, v13 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v51, v4, v20 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v144, v96, v87 :: v_dual_lshlrev_b32 v81, 16, v26 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v145, v98, v97 :: v_dual_and_b32 v26, 0xffff0000, v26 +; GFX11FAKE16-NEXT: v_or_b32_e32 v84, 0x400000, v146 +; GFX11FAKE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v86, 16, v8 +; GFX11FAKE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v82, 16, v10 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v147, v102, v101 :: v_dual_and_b32 v10, 0xffff0000, v10 +; GFX11FAKE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX11FAKE16-NEXT: v_and_b32_e32 v38, 0xffff0000, v38 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v69, 16, v28 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v37, v10, v26 :: v_dual_and_b32 v28, 0xffff0000, v28 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v39, v8, v24 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v133, v82, v81 :: v_dual_lshlrev_b32 v70, 16, v12 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v135, v86, v85 :: v_dual_and_b32 v12, 0xffff0000, v12 +; GFX11FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v35 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v80, 16, v11 +; GFX11FAKE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v132, 16, v36 +; GFX11FAKE16-NEXT: v_and_b32_e32 v36, 0xffff0000, v36 +; GFX11FAKE16-NEXT: v_and_b32_e32 v50, 0xffff0000, v50 +; GFX11FAKE16-NEXT: v_or_b32_e32 v22, 0x400000, v133 +; GFX11FAKE16-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX11FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v37 +; GFX11FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v71, 16, v27 +; GFX11FAKE16-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX11FAKE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v130, 16, v34 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v35, v12, v28 :: v_dual_and_b32 v34, 0xffff0000, v34 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v36, v11, v27 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v50, v5, v21 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v132, v80, v71 :: v_dual_lshlrev_b32 v67, 16, v29 +; GFX11FAKE16-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX11FAKE16-NEXT: v_or_b32_e32 v98, 0x400000, v51 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v119, 16, v31 +; GFX11FAKE16-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v64, 16, v15 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v34, v13, v29 :: v_dual_and_b32 v15, 0xffff0000, v15 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v130, v68, v67 :: v_dual_lshlrev_b32 v65, 16, v30 +; GFX11FAKE16-NEXT: v_bfe_u32 v23, v37, 16, 1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v31, v15, v32 :: v_dual_lshlrev_b32 v66, 16, v14 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v119, v64, v128 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(3) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v64, 16, v52 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(2) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v128, 16, v53 +; GFX11FAKE16-NEXT: v_and_b32_e32 v53, 0xffff0000, v53 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(1) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v15, 16, v54 +; GFX11FAKE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v54 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v54, 16, v55 +; GFX11FAKE16-NEXT: v_and_b32_e32 v55, 0xffff0000, v55 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v129, v66, v65 :: v_dual_and_b32 v30, 0xffff0000, v30 +; GFX11FAKE16-NEXT: v_and_b32_e32 v52, 0xffff0000, v52 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v64, v112, v103 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v38, v9, v25 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v131, v70, v69 :: v_dual_and_b32 v14, 0xffff0000, v14 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v53, v2, v18 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v55, v0, v16 +; GFX11FAKE16-NEXT: v_bfe_u32 v0, v119, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v2, v31, 16, 1 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v33, v14, v30 :: v_dual_fmac_f32 v52, v3, v19 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v32, v1, v17 +; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v119 +; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v31 +; GFX11FAKE16-NEXT: v_bfe_u32 v4, v129, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v119, 0x7fff +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v119, v119 +; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v31, 0x7fff +; GFX11FAKE16-NEXT: v_cmp_u_f32_e64 s0, v31, v31 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v128, v114, v113 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v54, v118, v117 +; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v129 +; GFX11FAKE16-NEXT: v_bfe_u32 v6, v33, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v10, v34, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v14, v35, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v19, v36, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v27, v38, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v65, v39, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v69, v48, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v81, v49, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v85, v50, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v97, v51, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v101, v52, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v113, v53, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v117, v32, 16, 1 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v148, v0, v1, vcc_lo +; GFX11FAKE16-NEXT: v_cndmask_b32_e64 v149, v2, v3, s0 +; GFX11FAKE16-NEXT: v_add3_u32 v2, v4, v129, 0x7fff +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v129, v129 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v15, v116, v115 +; GFX11FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v33 +; GFX11FAKE16-NEXT: v_bfe_u32 v8, v130, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v3, v6, v33, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v6, v10, v34, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v10, v14, v35, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v14, v19, v36, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v19, v23, v37, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v23, v27, v38, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v27, v65, v39, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v65, v69, v48, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v69, v81, v49, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v81, v85, v50, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v85, v97, v51, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v97, v101, v52, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v101, v113, v53, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v113, v117, v32, 0x7fff +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v117, v2, v5, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33 +; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v130 +; GFX11FAKE16-NEXT: v_bfe_u32 v12, v131, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v17, v132, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v21, v133, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v134, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v29, v135, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v67, v144, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v71, v145, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v83, v146, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v87, v147, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v99, v64, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v103, v128, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v115, v15, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v119, v54, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v4, v8, v130, 0x7fff +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v33, v3, v7, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v130, v130 +; GFX11FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v34 +; GFX11FAKE16-NEXT: v_add3_u32 v8, v12, v131, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v12, v17, v132, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v17, v21, v133, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v21, v25, v134, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v25, v29, v135, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v29, v67, v144, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v67, v71, v145, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v71, v83, v146, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v83, v87, v147, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v87, v99, v64, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v99, v103, v128, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v103, v115, v15, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v115, v119, v54, 0x7fff +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v119, v4, v9, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34 +; GFX11FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v131 +; GFX11FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v35 +; GFX11FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v132 +; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v36 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v34, v6, v11, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v131, v131 +; GFX11FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v134 +; GFX11FAKE16-NEXT: v_or_b32_e32 v28, 0x400000, v38 +; GFX11FAKE16-NEXT: v_or_b32_e32 v30, 0x400000, v135 +; GFX11FAKE16-NEXT: v_or_b32_e32 v66, 0x400000, v39 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v13, v8, v13, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35 +; GFX11FAKE16-NEXT: v_or_b32_e32 v68, 0x400000, v144 +; GFX11FAKE16-NEXT: v_or_b32_e32 v70, 0x400000, v48 +; GFX11FAKE16-NEXT: v_or_b32_e32 v80, 0x400000, v145 +; GFX11FAKE16-NEXT: v_or_b32_e32 v82, 0x400000, v49 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v16, v10, v16, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v132, v132 +; GFX11FAKE16-NEXT: v_or_b32_e32 v86, 0x400000, v50 +; GFX11FAKE16-NEXT: v_or_b32_e32 v96, 0x400000, v147 +; GFX11FAKE16-NEXT: v_or_b32_e32 v100, 0x400000, v64 +; GFX11FAKE16-NEXT: v_or_b32_e32 v102, 0x400000, v52 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v18, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36 +; GFX11FAKE16-NEXT: v_or_b32_e32 v112, 0x400000, v128 +; GFX11FAKE16-NEXT: v_or_b32_e32 v116, 0x400000, v15 +; GFX11FAKE16-NEXT: v_or_b32_e32 v118, 0x400000, v32 +; GFX11FAKE16-NEXT: v_or_b32_e32 v31, 0x400000, v54 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v20, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v133, v133 +; GFX11FAKE16-NEXT: v_bfe_u32 v0, v55, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v55 +; GFX11FAKE16-NEXT: v_or_b32_e32 v114, 0x400000, v53 +; GFX11FAKE16-NEXT: v_perm_b32 v11, v12, v11, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v10, v17, v22, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v55, 0x7fff +; GFX11FAKE16-NEXT: v_perm_b32 v12, v16, v13, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v13, v34, v119, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v14, v19, v24, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v134, v134 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v10, v14, v10, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v9, v21, v26, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38 +; GFX11FAKE16-NEXT: v_perm_b32 v14, v33, v117, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v17, v23, v28, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v135, v135 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v9, v17, v9, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v8, v25, v30, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v18, v27, v66, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v144, v144 +; GFX11FAKE16-NEXT: v_perm_b32 v8, v18, v8, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v7, v29, v68, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v19, v65, v70, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v145, v145 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v7, v19, v7, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v6, v67, v80, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v49, v49 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v20, v69, v82, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v146, v146 +; GFX11FAKE16-NEXT: v_perm_b32 v6, v20, v6, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v5, v71, v84, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v21, v81, v86, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v147, v147 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v5, v21, v5, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v4, v83, v96, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v64, v64 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v3, v87, v100, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v52, v52 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v22, v97, v102, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v128, v128 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v3, v22, v3, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v99, v112, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v15, v103, v116, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v23, v113, v118, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v54, v54 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v24, v115, v31, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v55, v55 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v53, v53 +; GFX11FAKE16-NEXT: v_perm_b32 v1, v23, v15, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v15, v149, v148, 0x7060302 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v24, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v25, v101, v114, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v51, v51 +; GFX11FAKE16-NEXT: v_perm_b32 v2, v25, v2, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v26, v85, v98, vcc_lo +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_perm_b32 v4, v26, v4, 0x7060302 +; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; ; GFX1250-LABEL: v_fma_v32bf16: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_clause 0x10 -; GFX1250-NEXT: scratch_load_b32 v31, off, s32 offset:64 -; GFX1250-NEXT: scratch_load_b32 v32, off, s32 offset:4 -; GFX1250-NEXT: scratch_load_b32 v33, off, s32 offset:8 -; GFX1250-NEXT: scratch_load_b32 v34, off, s32 offset:12 -; GFX1250-NEXT: scratch_load_b32 v35, off, s32 offset:16 -; GFX1250-NEXT: scratch_load_b32 v36, off, s32 offset:20 -; GFX1250-NEXT: scratch_load_b32 v37, off, s32 offset:24 -; GFX1250-NEXT: scratch_load_b32 v38, off, s32 offset:28 -; GFX1250-NEXT: scratch_load_b32 v39, off, s32 offset:32 -; GFX1250-NEXT: scratch_load_b32 v48, off, s32 offset:36 -; GFX1250-NEXT: scratch_load_b32 v49, off, s32 offset:40 -; GFX1250-NEXT: scratch_load_b32 v50, off, s32 offset:44 -; GFX1250-NEXT: scratch_load_b32 v51, off, s32 offset:48 -; GFX1250-NEXT: scratch_load_b32 v52, off, s32 offset:52 -; GFX1250-NEXT: scratch_load_b32 v53, off, s32 offset:56 -; GFX1250-NEXT: scratch_load_b32 v54, off, s32 offset:60 -; GFX1250-NEXT: scratch_load_b32 v55, off, s32 -; GFX1250-NEXT: s_wait_loadcnt 0xf -; GFX1250-NEXT: v_pk_fma_bf16 v0, v0, v16, v32 -; GFX1250-NEXT: s_wait_loadcnt 0xe -; GFX1250-NEXT: v_pk_fma_bf16 v1, v1, v17, v33 -; GFX1250-NEXT: s_wait_loadcnt 0xd -; GFX1250-NEXT: v_pk_fma_bf16 v2, v2, v18, v34 -; GFX1250-NEXT: s_wait_loadcnt 0xc -; GFX1250-NEXT: v_pk_fma_bf16 v3, v3, v19, v35 -; GFX1250-NEXT: s_wait_loadcnt 0xb -; GFX1250-NEXT: v_pk_fma_bf16 v4, v4, v20, v36 -; GFX1250-NEXT: s_wait_loadcnt 0xa -; GFX1250-NEXT: v_pk_fma_bf16 v5, v5, v21, v37 -; GFX1250-NEXT: s_wait_loadcnt 0x9 -; GFX1250-NEXT: v_pk_fma_bf16 v6, v6, v22, v38 -; GFX1250-NEXT: s_wait_loadcnt 0x8 -; GFX1250-NEXT: v_pk_fma_bf16 v7, v7, v23, v39 -; GFX1250-NEXT: s_wait_loadcnt 0x7 -; GFX1250-NEXT: v_pk_fma_bf16 v8, v8, v24, v48 -; GFX1250-NEXT: s_wait_loadcnt 0x6 -; GFX1250-NEXT: v_pk_fma_bf16 v9, v9, v25, v49 -; GFX1250-NEXT: s_wait_loadcnt 0x5 -; GFX1250-NEXT: v_pk_fma_bf16 v10, v10, v26, v50 -; GFX1250-NEXT: s_wait_loadcnt 0x4 -; GFX1250-NEXT: v_pk_fma_bf16 v11, v11, v27, v51 -; GFX1250-NEXT: s_wait_loadcnt 0x3 -; GFX1250-NEXT: v_pk_fma_bf16 v12, v12, v28, v52 -; GFX1250-NEXT: s_wait_loadcnt 0x2 -; GFX1250-NEXT: v_pk_fma_bf16 v13, v13, v29, v53 -; GFX1250-NEXT: s_wait_loadcnt 0x1 -; GFX1250-NEXT: v_pk_fma_bf16 v14, v14, v30, v54 -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_pk_fma_bf16 v15, v15, v55, v31 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] -define <32 x bfloat> @v_fma_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b, <32 x bfloat> %c) { +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_clause 0x10 +; GFX1250-NEXT: scratch_load_b32 v31, off, s32 offset:64 +; GFX1250-NEXT: scratch_load_b32 v32, off, s32 offset:4 +; GFX1250-NEXT: scratch_load_b32 v33, off, s32 offset:8 +; GFX1250-NEXT: scratch_load_b32 v34, off, s32 offset:12 +; GFX1250-NEXT: scratch_load_b32 v35, off, s32 offset:16 +; GFX1250-NEXT: scratch_load_b32 v36, off, s32 offset:20 +; GFX1250-NEXT: scratch_load_b32 v37, off, s32 offset:24 +; GFX1250-NEXT: scratch_load_b32 v38, off, s32 offset:28 +; GFX1250-NEXT: scratch_load_b32 v39, off, s32 offset:32 +; GFX1250-NEXT: scratch_load_b32 v48, off, s32 offset:36 +; GFX1250-NEXT: scratch_load_b32 v49, off, s32 offset:40 +; GFX1250-NEXT: scratch_load_b32 v50, off, s32 offset:44 +; GFX1250-NEXT: scratch_load_b32 v51, off, s32 offset:48 +; GFX1250-NEXT: scratch_load_b32 v52, off, s32 offset:52 +; GFX1250-NEXT: scratch_load_b32 v53, off, s32 offset:56 +; GFX1250-NEXT: scratch_load_b32 v54, off, s32 offset:60 +; GFX1250-NEXT: scratch_load_b32 v55, off, s32 +; GFX1250-NEXT: s_wait_loadcnt 0xf +; GFX1250-NEXT: v_pk_fma_bf16 v0, v0, v16, v32 +; GFX1250-NEXT: s_wait_loadcnt 0xe +; GFX1250-NEXT: v_pk_fma_bf16 v1, v1, v17, v33 +; GFX1250-NEXT: s_wait_loadcnt 0xd +; GFX1250-NEXT: v_pk_fma_bf16 v2, v2, v18, v34 +; GFX1250-NEXT: s_wait_loadcnt 0xc +; GFX1250-NEXT: v_pk_fma_bf16 v3, v3, v19, v35 +; GFX1250-NEXT: s_wait_loadcnt 0xb +; GFX1250-NEXT: v_pk_fma_bf16 v4, v4, v20, v36 +; GFX1250-NEXT: s_wait_loadcnt 0xa +; GFX1250-NEXT: v_pk_fma_bf16 v5, v5, v21, v37 +; GFX1250-NEXT: s_wait_loadcnt 0x9 +; GFX1250-NEXT: v_pk_fma_bf16 v6, v6, v22, v38 +; GFX1250-NEXT: s_wait_loadcnt 0x8 +; GFX1250-NEXT: v_pk_fma_bf16 v7, v7, v23, v39 +; GFX1250-NEXT: s_wait_loadcnt 0x7 +; GFX1250-NEXT: v_pk_fma_bf16 v8, v8, v24, v48 +; GFX1250-NEXT: s_wait_loadcnt 0x6 +; GFX1250-NEXT: v_pk_fma_bf16 v9, v9, v25, v49 +; GFX1250-NEXT: s_wait_loadcnt 0x5 +; GFX1250-NEXT: v_pk_fma_bf16 v10, v10, v26, v50 +; GFX1250-NEXT: s_wait_loadcnt 0x4 +; GFX1250-NEXT: v_pk_fma_bf16 v11, v11, v27, v51 +; GFX1250-NEXT: s_wait_loadcnt 0x3 +; GFX1250-NEXT: v_pk_fma_bf16 v12, v12, v28, v52 +; GFX1250-NEXT: s_wait_loadcnt 0x2 +; GFX1250-NEXT: v_pk_fma_bf16 v13, v13, v29, v53 +; GFX1250-NEXT: s_wait_loadcnt 0x1 +; GFX1250-NEXT: v_pk_fma_bf16 v14, v14, v30, v54 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_pk_fma_bf16 v15, v15, v55, v31 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %op = call <32 x bfloat> @llvm.fma.v32bf16(<32 x bfloat> %a, <32 x bfloat> %b, <32 x bfloat> %c) ret <32 x bfloat> %op } diff --git a/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll b/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll index ed48999..bd28f72 100644 --- a/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll @@ -1,734 +1,759 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-NNAN %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s -; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-NNAN %s +; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI %s -; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefixes=SI-NNAN %s +; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI %s -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-TRUE16 %s -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-FAKE16 %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-TRUE16 %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-FAKE16 %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-TRUE16 %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-FAKE16 %s define half @test_fmax_legacy_ugt_f16(half %a, half %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_max_f16_e32 v0, v0, v1 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v1 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v1, v0 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v1 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.l, v1.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-TRUE16-LABEL: test_fmax_legacy_ugt_f16: -; GFX11-NNAN-TRUE16: ; %bb.0: -; GFX11-NNAN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v1.l -; GFX11-NNAN-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-FAKE16-LABEL: test_fmax_legacy_ugt_f16: -; GFX11-NNAN-FAKE16: ; %bb.0: -; GFX11-NNAN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-FAKE16-NEXT: v_max_f16_e32 v0, v0, v1 -; GFX11-NNAN-FAKE16-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_max_legacy_f32_e32 v0, v1, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.l, v1.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt half %a, %b %val = select i1 %cmp, half %a, half %b ret half %val } +define half @test_fmax_legacy_ugt_f16_fast(half %a, half %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_max_f16_e32 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_e32 v0, v0, v1 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_f16_fast: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v1.l +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_f16_fast: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_max_f16_e32 v0, v0, v1 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt half %a, %b + %val = select nnan nsz i1 %cmp, half %a, half %b + ret half %val +} + define <2 x half> @test_fmax_legacy_ugt_v2f16(<2 x half> %a, <2 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v2, v0, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v1 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_v2f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2 -; VI-SAFE-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_v2f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v1 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v2 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_v2f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v2, v0 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v3, v1 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_v2f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v2 -; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v3 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v1.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v1.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v1 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_v2f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v2, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v2f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2 +; VI-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v2f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_max_legacy_f32_e32 v0, v2, v0 +; SI-NEXT: v_max_legacy_f32_e32 v1, v3, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v2f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v1.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v1.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v2f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt <2 x half> %a, %b %val = select <2 x i1> %cmp, <2 x half> %a, <2 x half> %b ret <2 x half> %val } +define <2 x half> @test_fmax_legacy_ugt_v2f16_fast(<2 x half> %a, <2 x half> %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_v2f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v2f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_e32 v0, v0, v1 +; VI-NEXT: v_or_b32_e32 v0, v0, v2 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v2f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v2 +; SI-NEXT: v_max_f32_e32 v1, v1, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmax_legacy_ugt_v2f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v0, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt <2 x half> %a, %b + %val = select nnan nsz <2 x i1> %cmp, <2 x half> %a, <2 x half> %b + ret <2 x half> %val +} + define <3 x half> @test_fmax_legacy_ugt_v3f16(<3 x half> %a, <3 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_max_f16 v1, v1, v3 -; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v2 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_v3f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 -; VI-SAFE-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_v3f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v2 -; VI-NNAN-NEXT: v_max_f16_e32 v1, v1, v3 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v4 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_v3f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v3, v0 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v4, v1 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v2, v5, v2 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_v3f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v3 -; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v4 -; SI-NNAN-NEXT: v_max_f32_e32 v2, v2, v5 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v2.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v2.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v1.l, v3.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v2 -; GFX11-NNAN-NEXT: v_pk_max_f16 v1, v1, v3 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_v3f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v3f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 +; VI-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 +; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 +; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v3f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_max_legacy_f32_e32 v0, v3, v0 +; SI-NEXT: v_max_legacy_f32_e32 v1, v4, v1 +; SI-NEXT: v_max_legacy_f32_e32 v2, v5, v2 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v3f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v2.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v3f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt <3 x half> %a, %b %val = select <3 x i1> %cmp, <3 x half> %a, <3 x half> %b ret <3 x half> %val } +define <3 x half> @test_fmax_legacy_ugt_v3f16_fast(<3 x half> %a, <3 x half> %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_v3f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v3, v3, v3 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v1 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v3 +; GFX9-NEXT: v_pk_max_f16 v0, v0, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v3f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_e32 v0, v0, v2 +; VI-NEXT: v_max_f16_e32 v1, v1, v3 +; VI-NEXT: v_or_b32_e32 v0, v0, v4 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v3f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v3 +; SI-NEXT: v_max_f32_e32 v1, v1, v4 +; SI-NEXT: v_max_f32_e32 v2, v2, v5 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmax_legacy_ugt_v3f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v3, v3, v3 +; GFX11-NEXT: v_pk_max_f16 v1, v1, v1 +; GFX11-NEXT: v_pk_max_f16 v0, v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_pk_max_f16 v1, v1, v3 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt <3 x half> %a, %b + %val = select nnan nsz <3 x i1> %cmp, <3 x half> %a, <3 x half> %b + ret <3 x half> %val +} + define <4 x half> @test_fmax_legacy_ugt_v4f16(<4 x half> %a, <4 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v1, v6, v1, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v2 -; GFX9-NNAN-NEXT: v_pk_max_f16 v1, v1, v3 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_v4f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v4 -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v6 -; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_v4f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_e32 v1, v1, v3 -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v2 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v5 -; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v4 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_v4f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v4, v0 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v5, v1 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v2, v6, v2 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v3, v7, v3 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_v4f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v4 -; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v5 -; SI-NNAN-NEXT: v_max_f32_e32 v2, v2, v6 -; SI-NNAN-NEXT: v_max_f32_e32 v3, v3, v7 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1.h, v3.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.h, v2.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v0.l, v2.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v1.l, v3.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1 -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v7, v6 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v2 -; GFX11-NNAN-NEXT: v_pk_max_f16 v1, v1, v3 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_v4f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4 +; GFX9-NEXT: v_perm_b32 v1, v6, v1, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v4f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v6, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6 +; VI-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 +; VI-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 +; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 +; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v4 +; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v6 +; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v4f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_max_legacy_f32_e32 v0, v4, v0 +; SI-NEXT: v_max_legacy_f32_e32 v1, v5, v1 +; SI-NEXT: v_max_legacy_f32_e32 v2, v6, v2 +; SI-NEXT: v_max_legacy_f32_e32 v3, v7, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v4f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1.h, v3.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.h, v2.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v4f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v7, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100 +; GFX11-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt <4 x half> %a, %b %val = select <4 x i1> %cmp, <4 x half> %a, <4 x half> %b ret <4 x half> %val } +define <4 x half> @test_fmax_legacy_ugt_v4f16_fast(<4 x half> %a, <4 x half> %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_v4f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v0, v0, v2 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v3 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v4f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_e32 v1, v1, v3 +; VI-NEXT: v_max_f16_e32 v0, v0, v2 +; VI-NEXT: v_or_b32_e32 v0, v0, v5 +; VI-NEXT: v_or_b32_e32 v1, v1, v4 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v4f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v4 +; SI-NEXT: v_max_f32_e32 v1, v1, v5 +; SI-NEXT: v_max_f32_e32 v2, v2, v6 +; SI-NEXT: v_max_f32_e32 v3, v3, v7 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmax_legacy_ugt_v4f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v0, v0, v2 +; GFX11-NEXT: v_pk_max_f16 v1, v1, v3 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt <4 x half> %a, %b + %val = select nnan nsz <4 x i1> %cmp, <4 x half> %a, <4 x half> %b + ret <4 x half> %val +} + define <8 x half> @test_fmax_legacy_ugt_v8f16(<8 x half> %a, <8 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v8, v0, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v1, v10, v1, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v2, v12, v2, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v3, v14, v3, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v4 -; GFX9-NNAN-NEXT: v_pk_max_f16 v1, v1, v5 -; GFX9-NNAN-NEXT: v_pk_max_f16 v2, v2, v6 -; GFX9-NNAN-NEXT: v_pk_max_f16 v3, v3, v7 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_v8f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v8 -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v10 -; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v12 -; VI-SAFE-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v14 -; VI-SAFE-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_v8f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_e32 v3, v3, v7 -; VI-NNAN-NEXT: v_max_f16_e32 v2, v2, v6 -; VI-NNAN-NEXT: v_max_f16_e32 v1, v1, v5 -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v4 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v11 -; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v10 -; VI-NNAN-NEXT: v_or_b32_e32 v2, v2, v9 -; VI-NNAN-NEXT: v_or_b32_e32 v3, v3, v8 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_v8f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v8, v0 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v9, v1 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v2, v10, v2 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v3, v11, v3 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v4, v12, v4 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v5, v13, v5 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v6, v14, v6 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v7, v15, v7 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_v8f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v8 -; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v9 -; SI-NNAN-NEXT: v_max_f32_e32 v2, v2, v10 -; SI-NNAN-NEXT: v_max_f32_e32 v3, v3, v11 -; SI-NNAN-NEXT: v_max_f32_e32 v4, v4, v12 -; SI-NNAN-NEXT: v_max_f32_e32 v5, v5, v13 -; SI-NNAN-NEXT: v_max_f32_e32 v6, v6, v14 -; SI-NNAN-NEXT: v_max_f32_e32 v7, v7, v15 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v4.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v1.h, v5.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v2.h, v6.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v3.h, v7.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s3, v0.l, v4.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s4, v1.l, v5.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s5, v2.l, v6.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s6, v3.l, v7.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v11, v10 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v13, v12 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v15, v14 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v9, v8 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v2, v6 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v4 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v5 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v7 -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v4 -; GFX11-NNAN-NEXT: v_pk_max_f16 v1, v1, v5 -; GFX11-NNAN-NEXT: v_pk_max_f16 v2, v2, v6 -; GFX11-NNAN-NEXT: v_pk_max_f16 v3, v3, v7 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_v8f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v7 +; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; GFX9-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14 +; GFX9-NEXT: v_lshrrev_b32_e32 v10, 16, v5 +; GFX9-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12 +; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10 +; GFX9-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8 +; GFX9-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7 +; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v8, v0, s4 +; GFX9-NEXT: v_perm_b32 v1, v10, v1, s4 +; GFX9-NEXT: v_perm_b32 v2, v12, v2, s4 +; GFX9-NEXT: v_perm_b32 v3, v14, v3, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v8f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v7 +; VI-NEXT: v_lshrrev_b32_e32 v15, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; VI-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14 +; VI-NEXT: v_lshrrev_b32_e32 v10, 16, v5 +; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; VI-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12 +; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; VI-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; VI-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10 +; VI-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8 +; VI-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7 +; VI-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6 +; VI-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5 +; VI-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4 +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v8 +; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v10 +; VI-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v12 +; VI-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v14 +; VI-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v8f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_max_legacy_f32_e32 v0, v8, v0 +; SI-NEXT: v_max_legacy_f32_e32 v1, v9, v1 +; SI-NEXT: v_max_legacy_f32_e32 v2, v10, v2 +; SI-NEXT: v_max_legacy_f32_e32 v3, v11, v3 +; SI-NEXT: v_max_legacy_f32_e32 v4, v12, v4 +; SI-NEXT: v_max_legacy_f32_e32 v5, v13, v5 +; SI-NEXT: v_max_legacy_f32_e32 v6, v14, v6 +; SI-NEXT: v_max_legacy_f32_e32 v7, v15, v7 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v8f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v4.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v1.h, v5.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v2.h, v6.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v3.h, v7.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s3, v0.l, v4.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s4, v1.l, v5.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s5, v2.l, v6.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s6, v3.l, v7.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v8f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1 +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v11, v10 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v13, v12 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v15, v14 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v9, v8 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v2, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v4 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v5 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v7 +; GFX11-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt <8 x half> %a, %b %val = select <8 x i1> %cmp, <8 x half> %a, <8 x half> %b ret <8 x half> %val } +define <8 x half> @test_fmax_legacy_ugt_v8f16_fast(<8 x half> %a, <8 x half> %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_v8f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v0, v0, v4 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v5 +; GFX9-NEXT: v_pk_max_f16 v2, v2, v6 +; GFX9-NEXT: v_pk_max_f16 v3, v3, v7 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v8f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_e32 v3, v3, v7 +; VI-NEXT: v_max_f16_e32 v2, v2, v6 +; VI-NEXT: v_max_f16_e32 v1, v1, v5 +; VI-NEXT: v_max_f16_e32 v0, v0, v4 +; VI-NEXT: v_or_b32_e32 v0, v0, v11 +; VI-NEXT: v_or_b32_e32 v1, v1, v10 +; VI-NEXT: v_or_b32_e32 v2, v2, v9 +; VI-NEXT: v_or_b32_e32 v3, v3, v8 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v8f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v8 +; SI-NEXT: v_max_f32_e32 v1, v1, v9 +; SI-NEXT: v_max_f32_e32 v2, v2, v10 +; SI-NEXT: v_max_f32_e32 v3, v3, v11 +; SI-NEXT: v_max_f32_e32 v4, v4, v12 +; SI-NEXT: v_max_f32_e32 v5, v5, v13 +; SI-NEXT: v_max_f32_e32 v6, v6, v14 +; SI-NEXT: v_max_f32_e32 v7, v7, v15 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmax_legacy_ugt_v8f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v0, v0, v4 +; GFX11-NEXT: v_pk_max_f16 v1, v1, v5 +; GFX11-NEXT: v_pk_max_f16 v2, v2, v6 +; GFX11-NEXT: v_pk_max_f16 v3, v3, v7 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt <8 x half> %a, %b + %val = select nnan nsz <8 x i1> %cmp, <8 x half> %a, <8 x half> %b + ret <8 x half> %val +} + attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll b/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll index eee2bd1..f3a84e6 100644 --- a/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll +++ b/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll @@ -1,8 +1,6 @@ -; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-SAFE,GCN,FUNC %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=GCN-NONAN,GCN,FUNC %s +; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI,GCN,FUNC %s -; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-SAFE,GCN,FUNC %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=GCN-NONAN,GCN,FUNC %s +; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI,GCN,FUNC %s ; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck -enable-var-scope --check-prefixes=EG,FUNC %s @@ -12,12 +10,10 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1 ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] -; VI-SAFE: v_cmp_nlt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] - -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; VI: v_cmp_nlt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_uge_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { @@ -34,18 +30,38 @@ define amdgpu_kernel void @test_fmax_legacy_uge_f32(ptr addrspace(1) %out, ptr a ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_uge_f32_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_uge_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 + + %cmp = fcmp uge float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_uge_f32_nnan_src: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] ; GCN-DAG: v_add_f32_e32 [[ADD_A:v[0-9]+]], 1.0, [[A]] ; GCN-DAG: v_add_f32_e32 [[ADD_B:v[0-9]+]], 2.0, [[B]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]] +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]] -; VI-SAFE: v_cmp_nlt_f32_e32 vcc, [[ADD_A]], [[ADD_B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[ADD_B]], [[ADD_A]] +; VI: v_cmp_nlt_f32_e32 vcc, [[ADD_A]], [[ADD_B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[ADD_B]], [[ADD_A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_uge_f32_nnan_src(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { @@ -64,16 +80,40 @@ define amdgpu_kernel void @test_fmax_legacy_uge_f32_nnan_src(ptr addrspace(1) %o ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_uge_f32_nnan_src_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] +; GCN-DAG: v_add_f32_e32 [[ADD_A:v[0-9]+]], 1.0, [[A]] +; GCN-DAG: v_add_f32_e32 [[ADD_B:v[0-9]+]], 2.0, [[B]] + +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]] + +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_uge_f32_nnan_src_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 + %a.nnan = fadd nnan float %a, 1.0 + %b.nnan = fadd nnan float %b, 2.0 + + %cmp = fcmp uge float %a.nnan, %b.nnan + %val = select nnan nsz i1 %cmp, float %a.nnan, float %b.nnan + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_oge_f32: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] -; VI-SAFE: v_cmp_ge_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] +; VI: v_cmp_ge_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_oge_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -89,17 +129,35 @@ define amdgpu_kernel void @test_fmax_legacy_oge_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmax_legacy_ugt_f32: +; FUNC-LABEL: {{^}}test_fmax_legacy_oge_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_oge_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; VI-SAFE: v_cmp_nle_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %cmp = fcmp oge float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} +; FUNC-LABEL: {{^}}test_fmax_legacy_ugt_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] + +; VI: v_cmp_nle_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_ugt_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -115,16 +173,35 @@ define amdgpu_kernel void @test_fmax_legacy_ugt_f32(ptr addrspace(1) %out, ptr a ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_ugt_f32_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_ugt_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 + + %cmp = fcmp ugt float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_f32: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] -; VI-SAFE: v_cmp_gt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] +; VI: v_cmp_gt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_ogt_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -140,17 +217,35 @@ define amdgpu_kernel void @test_fmax_legacy_ogt_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v1f32: +; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_ogt_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; VI-SAFE: v_cmp_gt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %cmp = fcmp ogt float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} +; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v1f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + +; VI: v_cmp_gt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_ogt_v1f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -166,23 +261,39 @@ define amdgpu_kernel void @test_fmax_legacy_ogt_v1f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v1f32_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_ogt_v1f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <1 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <1 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile <1 x float>, ptr addrspace(1) %gep.0 + %b = load volatile <1 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ogt <1 x float> %a, %b + %val = select nnan nsz <1 x i1> %cmp, <1 x float> %a, <1 x float> %b + store <1 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v3f32: -; SI-SAFE: v_max_legacy_f32_e32 -; SI-SAFE: v_max_legacy_f32_e32 -; SI-SAFE: v_max_legacy_f32_e32 - -; VI-SAFE: v_cmp_gt_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_gt_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_gt_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE-NOT: v_cmp -; VI-SAFE-NOT: v_cndmask - -; GCN-NONAN: v_max_f32_e32 -; GCN-NONAN: v_max_f32_e32 -; GCN-NONAN: v_max_f32_e32 +; SI: v_max_legacy_f32_e32 +; SI: v_max_legacy_f32_e32 +; SI: v_max_legacy_f32_e32 + +; VI: v_cmp_gt_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_gt_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_gt_f32_e32 +; VI: v_cndmask_b32_e32 +; VI-NOT: v_cmp +; VI-NOT: v_cndmask ; GCN-NOT: v_max define amdgpu_kernel void @test_fmax_legacy_ogt_v3f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { @@ -199,6 +310,27 @@ define amdgpu_kernel void @test_fmax_legacy_ogt_v3f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v3f32_fast: + +; GCN: v_max_f32_e32 +; GCN: v_max_f32_e32 +; GCN: v_max_f32_e32 + +; GCN-NOT: v_max +define amdgpu_kernel void @test_fmax_legacy_ogt_v3f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <3 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <3 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load <3 x float>, ptr addrspace(1) %gep.0 + %b = load <3 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ogt <3 x float> %a, %b + %val = select nnan nsz <3 x i1> %cmp, <3 x float> %a, <3 x float> %b + store <3 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_f32_multi_use: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] diff --git a/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll b/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll index 2ac5891..37f077d5 100644 --- a/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll +++ b/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll @@ -1,16 +1,12 @@ -; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-SAFE,GCN %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn < %s | FileCheck -enable-var-scope --check-prefixes=GCN %s +; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI,GCN %s -; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-SAFE,GCN %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope --check-prefixes=GCN,VI-NNAN %s +; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI,GCN %s ; GCN-LABEL: {{^}}min_fneg_select_regression_0: ; GCN-NOT: v_mul -; SI: v_max_legacy_f32_e64 [[MIN:v[0-9]+]], -1.0, -v0 - -; VI-SAFE: v_cmp_nle_f32_e32 vcc, 1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc +; VI: v_cmp_nle_f32_e32 vcc, 1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc define amdgpu_ps float @min_fneg_select_regression_0(float %a, float %b) #0 { %fneg.a = fsub float -0.0, %a %cmp.a = fcmp ult float %a, 1.0 @@ -18,15 +14,23 @@ define amdgpu_ps float @min_fneg_select_regression_0(float %a, float %b) #0 { ret float %min.a } +; GCN-LABEL: {{^}}min_fneg_select_regression_0_fast: +; GCN-NOT: v_mul + +define amdgpu_ps float @min_fneg_select_regression_0_fast(float %a, float %b) #0 { + %fneg.a = fsub float -0.0, %a + %cmp.a = fcmp ult float %a, 1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float -1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}min_fneg_select_regression_posk_0: ; GCN-NOT: v_mul ; SI: v_max_legacy_f32_e64 [[MIN:v[0-9]+]], 1.0, -v0 -; VI-SAFE: v_cmp_nle_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NNAN: v_max_f32_e64 v{{[0-9]+}}, -v0, 1.0 +; VI: v_cmp_nle_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @min_fneg_select_regression_posk_0(float %a, float %b) #0 { %fneg.a = fsub float -0.0, %a %cmp.a = fcmp ult float %a, -1.0 @@ -34,15 +38,24 @@ define amdgpu_ps float @min_fneg_select_regression_posk_0(float %a, float %b) #0 ret float %min.a } -; GCN-LABEL: {{^}}max_fneg_select_regression_0: +; GCN-LABEL: {{^}}min_fneg_select_regression_posk_0_fast: ; GCN-NOT: v_mul -; SI-SAFE: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], -1.0, -v0 +; VI: v_max_f32_e64 v{{[0-9]+}}, -v0, 1.0 +define amdgpu_ps float @min_fneg_select_regression_posk_0_fast(float %a, float %b) #0 { + %fneg.a = fsub float -0.0, %a + %cmp.a = fcmp ult float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + +; GCN-LABEL: {{^}}max_fneg_select_regression_0: +; GCN-NOT: v_mul -; VI-SAFE: v_cmp_nge_f32_e32 vcc, 1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc +; SI: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], -1.0, -v0 -; GCN-NONAN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, -1.0 +; VI: v_cmp_nge_f32_e32 vcc, 1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc define amdgpu_ps float @max_fneg_select_regression_0(float %a) #0 { %fneg.a = fsub float -0.0, %a %cmp.a = fcmp ugt float %a, 1.0 @@ -50,15 +63,24 @@ define amdgpu_ps float @max_fneg_select_regression_0(float %a) #0 { ret float %min.a } -; GCN-LABEL: {{^}}max_fneg_select_regression_posk_0: +; GCN-LABEL: {{^}}max_fneg_select_regression_0_fast: ; GCN-NOT: v_mul -; SI-SAFE: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], 1.0, -v0 +; GCN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, -1.0 +define amdgpu_ps float @max_fneg_select_regression_0_fast(float %a) #0 { + %fneg.a = fsub float -0.0, %a + %cmp.a = fcmp ugt float %a, 1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float -1.0 + ret float %min.a +} + +; GCN-LABEL: {{^}}max_fneg_select_regression_posk_0: +; GCN-NOT: v_mul -; VI-SAFE: v_cmp_nge_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc +; SI: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], 1.0, -v0 -; GCN-NONAN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, 1.0 +; VI: v_cmp_nge_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @max_fneg_select_regression_posk_0(float %a) #0 { %fneg.a = fsub float -0.0, %a %cmp.a = fcmp ugt float %a, -1.0 @@ -66,13 +88,22 @@ define amdgpu_ps float @max_fneg_select_regression_posk_0(float %a) #0 { ret float %min.a } +; GCN-LABEL: {{^}}max_fneg_select_regression_posk_0_fast: +; GCN-NOT: v_mul + +; GCN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, 1.0 +define amdgpu_ps float @max_fneg_select_regression_posk_0_fast(float %a) #0 { + %fneg.a = fsub float -0.0, %a + %cmp.a = fcmp ugt float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg1: ; SI: v_min_legacy_f32_e64 v0, 1.0, -v0 -; VI-SAFE: v_cmp_nge_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NNAN: v_min_f32_e64 v0, -v0, 1.0 +; VI: v_cmp_nge_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ugt float %a, -1.0 @@ -80,13 +111,21 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg1_fast: + +; VI: v_min_f32_e64 v0, -v0, 1.0 +define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ugt float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg1: ; SI: v_max_legacy_f32_e64 v0, 1.0, -v0 -; VI-SAFE: v_cmp_nle_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NNAN: v_max_f32_e64 v0, -v0, 1.0 +; VI: v_cmp_nle_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ult float %a, -1.0 @@ -94,13 +133,21 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg1_fast: + +; VI: v_max_f32_e64 v0, -v0, 1.0 +define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ult float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg1: ; SI: v_min_legacy_f32_e64 v0, -v0, 1.0 -; VI-SAFE: v_cmp_lt_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NNAN: v_min_f32_e64 v0, -v0, 1.0 +; VI: v_cmp_lt_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ogt float %a, -1.0 @@ -108,13 +155,21 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg1_fast: + +; VI: v_min_f32_e64 v0, -v0, 1.0 +define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ogt float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg1: ; SI: v_max_legacy_f32_e64 v0, -v0, 1.0 -; VI-SAFE: v_cmp_gt_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NANN: v_max_f32_e64 v0, -v0, 1.0 +; VI: v_cmp_gt_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp olt float %a, -1.0 @@ -122,17 +177,24 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg1_fast: + +; VI-NANN: v_max_f32_e64 v0, -v0, 1.0 +define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp olt float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg8: ; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 ; SI-NEXT: v_min_legacy_f32_e64 v0, [[K]], -v0 -; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 -; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 -; VI-SAFE: v_cmp_nge_f32_e32 vcc, [[K0]], v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc - -; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000 -; VI-NNAN-NEXT: v_min_f32_e64 v0, -v0, [[K]] +; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 +; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 +; VI: v_cmp_nge_f32_e32 vcc, [[K0]], v0 +; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg8(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ugt float %a, -8.0 @@ -140,17 +202,25 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg8(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg8_fast: + +; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 +; VI-NEXT: v_min_f32_e64 v0, -v0, [[K]] +define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg8_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ugt float %a, -8.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg8: ; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 ; SI-NEXT: v_max_legacy_f32_e64 v0, [[K]], -v0 -; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 -; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 -; VI-SAFE: v_cmp_nle_f32_e32 vcc, [[K0]], v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc - -; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000 -; VI-NNAN-NEXT: v_max_f32_e64 v0, -v0, [[K]] +; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 +; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 +; VI: v_cmp_nle_f32_e32 vcc, [[K0]], v0 +; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg8(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ult float %a, -8.0 @@ -158,17 +228,25 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg8(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg8_fast: + +; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 +; VI-NEXT: v_max_f32_e64 v0, -v0, [[K]] +define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg8_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ult float %a, -8.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg8: ; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 ; SI-NEXT: v_min_legacy_f32_e64 v0, -v0, [[K]] -; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 -; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 -; VI-SAFE: v_cmp_lt_f32_e32 vcc, [[K0]], v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc - -; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000 -; VI-NNAN-NEXT: v_min_f32_e64 v0, -v0, [[K]] +; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 +; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 +; VI: v_cmp_lt_f32_e32 vcc, [[K0]], v0 +; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg8(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ogt float %a, -8.0 @@ -176,18 +254,26 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg8(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg8_fast: + +; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 +; VI-NEXT: v_min_f32_e64 v0, -v0, [[K]] +define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg8_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ogt float %a, -8.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg8: ; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 ; SI-NEXT: v_max_legacy_f32_e64 v0, -v0, [[K]] -; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 -; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 -; VI-SAFE: v_cmp_gt_f32_e32 vcc, [[K0]], v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc - -; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000 -; VI-NNAN-NEXT: v_max_f32_e64 v0, -v0, [[K]] +; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 +; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 +; VI: v_cmp_gt_f32_e32 vcc, [[K0]], v0 +; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg8(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp olt float %a, -8.0 @@ -195,13 +281,22 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg8(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg8_fast: + +; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 +; VI-NEXT: v_max_f32_e64 v0, -v0, [[K]] +define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg8_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp olt float %a, -8.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_neg1_cmp_olt_a_1: ; SI: v_max_legacy_f32_e64 v0, -v0, -1.0 -; VI-SAFE: v_cmp_gt_f32_e32 vcc, 1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc - -; VI-NNAN: v_max_f32_e64 v0, -v0, -1.0 +; VI: v_cmp_gt_f32_e32 vcc, 1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_neg1_cmp_olt_a_1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp olt float %a, 1.0 @@ -209,15 +304,22 @@ define amdgpu_ps float @select_fneg_a_or_neg1_cmp_olt_a_1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_neg1_cmp_olt_a_1_fast: + +; VI: v_max_f32_e64 v0, -v0, -1.0 +define amdgpu_ps float @select_fneg_a_or_neg1_cmp_olt_a_1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp olt float %a, 1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float -1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}ult_a_select_fneg_a_b: ; SI: v_cmp_nge_f32_e32 vcc, v0, v1 ; SI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc -; VI-SAFE: v_cmp_nge_f32_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc - -; VI-NNAN: v_cmp_lt_f32_e32 vcc, v0, v1 -; VI-NNAN-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc +; VI: v_cmp_nge_f32_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc define amdgpu_ps float @ult_a_select_fneg_a_b(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ult float %a, %b @@ -225,15 +327,23 @@ define amdgpu_ps float @ult_a_select_fneg_a_b(float %a, float %b) #0 { ret float %min.a } +; GCN-LABEL: {{^}}ult_a_select_fneg_a_b_fast: + +; VI: v_cmp_lt_f32_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc +define amdgpu_ps float @ult_a_select_fneg_a_b_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp nnan nsz ult float %a, %b + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float %b + ret float %min.a +} + ; GCN-LABEL: {{^}}ugt_a_select_fneg_a_b: ; SI: v_cmp_nle_f32_e32 vcc, v0, v1 ; SI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc -; VI-SAFE: v_cmp_nle_f32_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc - -; VI-NNAN: v_cmp_gt_f32_e32 vcc, v0, v1 -; VI-NNAN-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc +; VI: v_cmp_nle_f32_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc define amdgpu_ps float @ugt_a_select_fneg_a_b(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ugt float %a, %b @@ -241,5 +351,16 @@ define amdgpu_ps float @ugt_a_select_fneg_a_b(float %a, float %b) #0 { ret float %min.a } +; GCN-LABEL: {{^}}ugt_a_select_fneg_a_b_fast: + +; VI: v_cmp_gt_f32_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc +define amdgpu_ps float @ugt_a_select_fneg_a_b_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp nnan nsz ugt float %a, %b + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float %b + ret float %min.a +} + attributes #0 = { nounwind } attributes #1 = { nounwind readnone } diff --git a/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll b/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll index 34cb0b1..40c2ec0 100644 --- a/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll @@ -1,735 +1,760 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-NNAN %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s -; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-NNAN %s +; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI %s -; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefixes=SI-NNAN %s +; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI %s -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-TRUE16 %s -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-FAKE16 %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-TRUE16 %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-FAKE16 %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-TRUE16 %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-FAKE16 %s define half @test_fmin_legacy_ule_f16(half %a, half %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_min_f16_e32 v0, v0, v1 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v1 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v1, v0 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v1 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.l, v1.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-TRUE16-LABEL: test_fmin_legacy_ule_f16: -; GFX11-NNAN-TRUE16: ; %bb.0: -; GFX11-NNAN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-TRUE16-NEXT: v_min_f16_e32 v0.l, v0.l, v1.l -; GFX11-NNAN-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-FAKE16-LABEL: test_fmin_legacy_ule_f16: -; GFX11-NNAN-FAKE16: ; %bb.0: -; GFX11-NNAN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-FAKE16-NEXT: v_min_f16_e32 v0, v0, v1 -; GFX11-NNAN-FAKE16-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_min_legacy_f32_e32 v0, v1, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.l, v1.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule half %a, %b %val = select i1 %cmp, half %a, half %b ret half %val } +define half @test_fmin_legacy_ule_f16_fast(half %a, half %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_min_f16_e32 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_e32 v0, v0, v1 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_f16_fast: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_min_f16_e32 v0.l, v0.l, v1.l +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_f16_fast: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_min_f16_e32 v0, v0, v1 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule half %a, %b + %val = select nnan nsz i1 %cmp, half %a, half %b + ret half %val +} + define <2 x half> @test_fmin_legacy_ule_v2f16(<2 x half> %a, <2 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v2f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v2, v0, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v2f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v1 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_v2f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2 -; VI-SAFE-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_v2f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v1 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v2 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_v2f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v2, v0 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v3, v1 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_v2f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v2 -; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v3 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v2f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v1.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v1.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v2f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v2f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v1 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_v2f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v2, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v2f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2 +; VI-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v2f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_min_legacy_f32_e32 v0, v2, v0 +; SI-NEXT: v_min_legacy_f32_e32 v1, v3, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v2f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v1.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v1.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v2f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule <2 x half> %a, %b %val = select <2 x i1> %cmp, <2 x half> %a, <2 x half> %b ret <2 x half> %val } +define <2 x half> @test_fmin_legacy_ule_v2f16_fast(<2 x half> %a, <2 x half> %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_v2f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_min_f16 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v2f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_e32 v0, v0, v1 +; VI-NEXT: v_or_b32_e32 v0, v0, v2 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v2f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v2 +; SI-NEXT: v_min_f32_e32 v1, v1, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmin_legacy_ule_v2f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_min_f16 v0, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule <2 x half> %a, %b + %val = select nnan nsz <2 x i1> %cmp, <2 x half> %a, <2 x half> %b + ret <2 x half> %val +} + define <3 x half> @test_fmin_legacy_ule_v3f16(<3 x half> %a, <3 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v3f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v3f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_min_f16 v1, v1, v3 -; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v2 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_v3f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 -; VI-SAFE-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_v3f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v2 -; VI-NNAN-NEXT: v_min_f16_e32 v1, v1, v3 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v4 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_v3f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v3, v0 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v4, v1 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v2, v5, v2 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_v3f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v3 -; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v4 -; SI-NNAN-NEXT: v_min_f32_e32 v2, v2, v5 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v3f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v2.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v2.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v1.l, v3.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v3f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v3f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v2 -; GFX11-NNAN-NEXT: v_pk_min_f16 v1, v1, v3 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_v3f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v3f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 +; VI-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 +; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 +; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v3f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_min_legacy_f32_e32 v0, v3, v0 +; SI-NEXT: v_min_legacy_f32_e32 v1, v4, v1 +; SI-NEXT: v_min_legacy_f32_e32 v2, v5, v2 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v3f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v2.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v3f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule <3 x half> %a, %b %val = select <3 x i1> %cmp, <3 x half> %a, <3 x half> %b ret <3 x half> %val } +define <3 x half> @test_fmin_legacy_ule_v3f16_fast(<3 x half> %a, <3 x half> %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_v3f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v3, v3, v3 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v1 +; GFX9-NEXT: v_pk_min_f16 v1, v1, v3 +; GFX9-NEXT: v_pk_min_f16 v0, v0, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v3f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_e32 v0, v0, v2 +; VI-NEXT: v_min_f16_e32 v1, v1, v3 +; VI-NEXT: v_or_b32_e32 v0, v0, v4 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v3f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v3 +; SI-NEXT: v_min_f32_e32 v1, v1, v4 +; SI-NEXT: v_min_f32_e32 v2, v2, v5 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmin_legacy_ule_v3f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v3, v3, v3 +; GFX11-NEXT: v_pk_max_f16 v1, v1, v1 +; GFX11-NEXT: v_pk_min_f16 v0, v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_pk_min_f16 v1, v1, v3 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule <3 x half> %a, %b + %val = select nnan nsz <3 x i1> %cmp, <3 x half> %a, <3 x half> %b + ret <3 x half> %val +} + define <4 x half> @test_fmin_legacy_ule_v4f16(<4 x half> %a, <4 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v4f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v1, v6, v1, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v4f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v2 -; GFX9-NNAN-NEXT: v_pk_min_f16 v1, v1, v3 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_v4f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v4 -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v6 -; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_v4f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_e32 v1, v1, v3 -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v2 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v5 -; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v4 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_v4f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v4, v0 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v5, v1 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v2, v6, v2 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v3, v7, v3 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_v4f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v4 -; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v5 -; SI-NNAN-NEXT: v_min_f32_e32 v2, v2, v6 -; SI-NNAN-NEXT: v_min_f32_e32 v3, v3, v7 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v4f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1.h, v3.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.h, v2.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v0.l, v2.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v1.l, v3.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1 -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v4f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v7, v6 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v4f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v2 -; GFX11-NNAN-NEXT: v_pk_min_f16 v1, v1, v3 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_v4f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4 +; GFX9-NEXT: v_perm_b32 v1, v6, v1, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v4f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v6, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6 +; VI-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 +; VI-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 +; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 +; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v4 +; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v6 +; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v4f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_min_legacy_f32_e32 v0, v4, v0 +; SI-NEXT: v_min_legacy_f32_e32 v1, v5, v1 +; SI-NEXT: v_min_legacy_f32_e32 v2, v6, v2 +; SI-NEXT: v_min_legacy_f32_e32 v3, v7, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v4f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1.h, v3.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.h, v2.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v4f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v7, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100 +; GFX11-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule <4 x half> %a, %b %val = select <4 x i1> %cmp, <4 x half> %a, <4 x half> %b ret <4 x half> %val } +define <4 x half> @test_fmin_legacy_ule_v4f16_fast(<4 x half> %a, <4 x half> %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_v4f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_min_f16 v0, v0, v2 +; GFX9-NEXT: v_pk_min_f16 v1, v1, v3 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v4f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_e32 v1, v1, v3 +; VI-NEXT: v_min_f16_e32 v0, v0, v2 +; VI-NEXT: v_or_b32_e32 v0, v0, v5 +; VI-NEXT: v_or_b32_e32 v1, v1, v4 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v4f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v4 +; SI-NEXT: v_min_f32_e32 v1, v1, v5 +; SI-NEXT: v_min_f32_e32 v2, v2, v6 +; SI-NEXT: v_min_f32_e32 v3, v3, v7 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmin_legacy_ule_v4f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_min_f16 v0, v0, v2 +; GFX11-NEXT: v_pk_min_f16 v1, v1, v3 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule <4 x half> %a, %b + %val = select nnan nsz <4 x i1> %cmp, <4 x half> %a, <4 x half> %b + ret <4 x half> %val +} + define <8 x half> @test_fmin_legacy_ule_v8f16(<8 x half> %a, <8 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v8f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v8, v0, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v1, v10, v1, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v2, v12, v2, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v3, v14, v3, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v8f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v4 -; GFX9-NNAN-NEXT: v_pk_min_f16 v1, v1, v5 -; GFX9-NNAN-NEXT: v_pk_min_f16 v2, v2, v6 -; GFX9-NNAN-NEXT: v_pk_min_f16 v3, v3, v7 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_v8f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v8 -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v10 -; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v12 -; VI-SAFE-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v14 -; VI-SAFE-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_v8f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_e32 v3, v3, v7 -; VI-NNAN-NEXT: v_min_f16_e32 v2, v2, v6 -; VI-NNAN-NEXT: v_min_f16_e32 v1, v1, v5 -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v4 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v11 -; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v10 -; VI-NNAN-NEXT: v_or_b32_e32 v2, v2, v9 -; VI-NNAN-NEXT: v_or_b32_e32 v3, v3, v8 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_v8f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v8, v0 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v9, v1 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v2, v10, v2 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v3, v11, v3 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v4, v12, v4 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v5, v13, v5 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v6, v14, v6 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v7, v15, v7 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_v8f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v8 -; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v9 -; SI-NNAN-NEXT: v_min_f32_e32 v2, v2, v10 -; SI-NNAN-NEXT: v_min_f32_e32 v3, v3, v11 -; SI-NNAN-NEXT: v_min_f32_e32 v4, v4, v12 -; SI-NNAN-NEXT: v_min_f32_e32 v5, v5, v13 -; SI-NNAN-NEXT: v_min_f32_e32 v6, v6, v14 -; SI-NNAN-NEXT: v_min_f32_e32 v7, v7, v15 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v8f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v4.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v1.h, v5.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v2.h, v6.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v3.h, v7.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s3, v0.l, v4.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s4, v1.l, v5.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s5, v2.l, v6.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s6, v3.l, v7.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v8f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v11, v10 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v13, v12 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v15, v14 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v9, v8 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v2, v6 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v4 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v5 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v7 -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v8f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v4 -; GFX11-NNAN-NEXT: v_pk_min_f16 v1, v1, v5 -; GFX11-NNAN-NEXT: v_pk_min_f16 v2, v2, v6 -; GFX11-NNAN-NEXT: v_pk_min_f16 v3, v3, v7 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_v8f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v7 +; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; GFX9-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14 +; GFX9-NEXT: v_lshrrev_b32_e32 v10, 16, v5 +; GFX9-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12 +; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10 +; GFX9-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8 +; GFX9-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7 +; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v8, v0, s4 +; GFX9-NEXT: v_perm_b32 v1, v10, v1, s4 +; GFX9-NEXT: v_perm_b32 v2, v12, v2, s4 +; GFX9-NEXT: v_perm_b32 v3, v14, v3, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v8f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v7 +; VI-NEXT: v_lshrrev_b32_e32 v15, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; VI-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14 +; VI-NEXT: v_lshrrev_b32_e32 v10, 16, v5 +; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; VI-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12 +; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; VI-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; VI-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10 +; VI-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8 +; VI-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7 +; VI-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6 +; VI-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5 +; VI-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4 +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v8 +; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v10 +; VI-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v12 +; VI-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v14 +; VI-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v8f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_min_legacy_f32_e32 v0, v8, v0 +; SI-NEXT: v_min_legacy_f32_e32 v1, v9, v1 +; SI-NEXT: v_min_legacy_f32_e32 v2, v10, v2 +; SI-NEXT: v_min_legacy_f32_e32 v3, v11, v3 +; SI-NEXT: v_min_legacy_f32_e32 v4, v12, v4 +; SI-NEXT: v_min_legacy_f32_e32 v5, v13, v5 +; SI-NEXT: v_min_legacy_f32_e32 v6, v14, v6 +; SI-NEXT: v_min_legacy_f32_e32 v7, v15, v7 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v8f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v4.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v1.h, v5.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v2.h, v6.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v3.h, v7.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s3, v0.l, v4.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s4, v1.l, v5.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s5, v2.l, v6.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s6, v3.l, v7.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v8f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1 +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v11, v10 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v13, v12 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v15, v14 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v9, v8 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v2, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v4 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v5 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v7 +; GFX11-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule <8 x half> %a, %b %val = select <8 x i1> %cmp, <8 x half> %a, <8 x half> %b ret <8 x half> %val } +define <8 x half> @test_fmin_legacy_ule_v8f16_fast(<8 x half> %a, <8 x half> %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_v8f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_min_f16 v0, v0, v4 +; GFX9-NEXT: v_pk_min_f16 v1, v1, v5 +; GFX9-NEXT: v_pk_min_f16 v2, v2, v6 +; GFX9-NEXT: v_pk_min_f16 v3, v3, v7 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v8f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_e32 v3, v3, v7 +; VI-NEXT: v_min_f16_e32 v2, v2, v6 +; VI-NEXT: v_min_f16_e32 v1, v1, v5 +; VI-NEXT: v_min_f16_e32 v0, v0, v4 +; VI-NEXT: v_or_b32_e32 v0, v0, v11 +; VI-NEXT: v_or_b32_e32 v1, v1, v10 +; VI-NEXT: v_or_b32_e32 v2, v2, v9 +; VI-NEXT: v_or_b32_e32 v3, v3, v8 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v8f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v8 +; SI-NEXT: v_min_f32_e32 v1, v1, v9 +; SI-NEXT: v_min_f32_e32 v2, v2, v10 +; SI-NEXT: v_min_f32_e32 v3, v3, v11 +; SI-NEXT: v_min_f32_e32 v4, v4, v12 +; SI-NEXT: v_min_f32_e32 v5, v5, v13 +; SI-NEXT: v_min_f32_e32 v6, v6, v14 +; SI-NEXT: v_min_f32_e32 v7, v7, v15 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmin_legacy_ule_v8f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_min_f16 v0, v0, v4 +; GFX11-NEXT: v_pk_min_f16 v1, v1, v5 +; GFX11-NEXT: v_pk_min_f16 v2, v2, v6 +; GFX11-NEXT: v_pk_min_f16 v3, v3, v7 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule <8 x half> %a, %b + %val = select nnan nsz <8 x i1> %cmp, <8 x half> %a, <8 x half> %b + ret <8 x half> %val +} + attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll index ec4dd85..defcffa 100644 --- a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll +++ b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll @@ -1,8 +1,6 @@ -; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-SAFE,GCN,FUNC %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-NONAN,GCN-NONAN,GCN,FUNC %s +; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI,GCN,FUNC %s -; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-SAFE,GCN,FUNC %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-NONAN,GCN-NONAN,GCN,FUNC %s +; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI,GCN,FUNC %s ; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck -enable-var-scope --check-prefixes=EG,FUNC %s @@ -14,13 +12,9 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1 ; FUNC-LABEL: {{^}}s_test_fmin_legacy_subreg_inputs_f32: ; EG: MIN * -; SI-SAFE: v_min_legacy_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} +; SI: v_min_legacy_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} -; SI-NONAN: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} - -; VI-SAFE: v_cmp_nlt_f32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} - -; VI-NONAN: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} +; VI: v_cmp_nlt_f32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} define amdgpu_kernel void @s_test_fmin_legacy_subreg_inputs_f32(ptr addrspace(1) %out, <4 x float> %reg0) #0 { %r0 = extractelement <4 x float> %reg0, i32 0 %r1 = extractelement <4 x float> %reg0, i32 1 @@ -30,22 +24,32 @@ define amdgpu_kernel void @s_test_fmin_legacy_subreg_inputs_f32(ptr addrspace(1) ret void } -; FUNC-LABEL: {{^}}s_test_fmin_legacy_ule_f32: -; GCN-DAG: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}} +; FUNC-LABEL: {{^}}s_test_fmin_legacy_subreg_inputs_f32_fast: -; SI-SAFE: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]] +; SI: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} -; GCN-NONAN: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]] +; VI: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} +define amdgpu_kernel void @s_test_fmin_legacy_subreg_inputs_f32_fast(ptr addrspace(1) %out, <4 x float> %reg0) #0 { + %r0 = extractelement <4 x float> %reg0, i32 0 + %r1 = extractelement <4 x float> %reg0, i32 1 + %r2 = fcmp nnan nsz uge float %r0, %r1 + %r3 = select nnan nsz i1 %r2, float %r1, float %r0 + store float %r3, ptr addrspace(1) %out + ret void +} -; VI-SAFE: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]] +; FUNC-LABEL: {{^}}s_test_fmin_legacy_ule_f32: +; GCN-DAG: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}} + +; SI: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, s[[#LOAD + 3]], [[VA]] +; VI: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]] -; VI-SAFE: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]] -; VI-SAFE: v_cmp_ngt_f32_e32 vcc, s[[#LOAD + 2]], [[VB]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[VB]], [[VA]] +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, s[[#LOAD + 3]], [[VA]] -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, s[[#LOAD + 2]], [[VB]] +; VI: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]] +; VI: v_cmp_ngt_f32_e32 vcc, s[[#LOAD + 2]], [[VB]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[VB]], [[VA]] define amdgpu_kernel void @s_test_fmin_legacy_ule_f32(ptr addrspace(1) %out, float %a, float %b) #0 { %cmp = fcmp ule float %a, %b %val = select i1 %cmp, float %a, float %b @@ -53,6 +57,19 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32(ptr addrspace(1) %out, flo ret void } +; FUNC-LABEL: {{^}}s_test_fmin_legacy_ule_f32_fast: +; GCN-DAG: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}} + +; GCN: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]] + +; GCN: v_min_f32_e32 {{v[0-9]+}}, s[[#LOAD + 2]], [[VB]] +define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_fast(ptr addrspace(1) %out, float %a, float %b) #0 { + %cmp = fcmp ule float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; Nsz also needed ; FIXME: Should separate tests ; GCN-LABEL: {{^}}s_test_fmin_legacy_ule_f32_nnan_src: @@ -61,12 +78,10 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32(ptr addrspace(1) %out, flo ; GCN-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0 ; GCN-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0 -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]] - -; VI-SAFE: v_cmp_ngt_f32_e32 vcc, [[ADD_A]], [[ADD_B]] -; VI-SAFE: v_cndmask_b32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]], vcc +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]] -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]] +; VI: v_cmp_ngt_f32_e32 vcc, [[ADD_A]], [[ADD_B]] +; VI: v_cndmask_b32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]], vcc define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src(ptr addrspace(1) %out, float %a, float %b) #0 { %a.nnan = fadd nnan float %a, 1.0 %b.nnan = fadd nnan float %b, 2.0 @@ -76,16 +91,32 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src(ptr addrspace(1) ret void } +; Nsz also needed +; FIXME: Should separate tests +; GCN-LABEL: {{^}}s_test_fmin_legacy_ule_f32_nnan_src_fast: +; GCN: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}} + +; GCN-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0 +; GCN-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0 + +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]] +define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src_fast(ptr addrspace(1) %out, float %a, float %b) #0 { + %a.nnan = fadd nnan float %a, 1.0 + %b.nnan = fadd nnan float %b, 2.0 + %cmp = fcmp ule float %a.nnan, %b.nnan + %val = select nnan nsz i1 %cmp, float %a.nnan, float %b.nnan + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; FUNC-LABEL: {{^}}test_fmin_legacy_ule_f32: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] -; VI-SAFE: v_cmp_ngt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] - -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; VI: v_cmp_ngt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_ule_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid @@ -100,16 +131,33 @@ define amdgpu_kernel void @test_fmin_legacy_ule_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32: +; FUNC-LABEL: {{^}}test_fmin_legacy_ule_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_ule_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; VI-SAFE: v_cmp_le_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %cmp = fcmp ule float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + +; VI: v_cmp_le_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_ole_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid @@ -124,16 +172,33 @@ define amdgpu_kernel void @test_fmin_legacy_ole_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmin_legacy_olt_f32: +; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_ole_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 -; VI-SAFE: v_cmp_lt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 + + %cmp = fcmp ole float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}test_fmin_legacy_olt_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; VI: v_cmp_lt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_olt_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid @@ -148,16 +213,33 @@ define amdgpu_kernel void @test_fmin_legacy_olt_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmin_legacy_ult_f32: +; FUNC-LABEL: {{^}}test_fmin_legacy_olt_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_olt_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 -; VI-SAFE: v_cmp_nge_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + %cmp = fcmp olt float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] + +; VI: v_cmp_nge_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_ult_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid @@ -172,16 +254,33 @@ define amdgpu_kernel void @test_fmin_legacy_ult_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v1f32: +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_ult_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 -; VI-SAFE: v_cmp_nge_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + %cmp = fcmp ult float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v1f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] + +; VI: v_cmp_nge_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_ult_v1f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr <1 x float>, ptr addrspace(1) %in, i32 %tid @@ -196,19 +295,35 @@ define amdgpu_kernel void @test_fmin_legacy_ult_v1f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v1f32_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_ult_v1f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <1 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <1 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile <1 x float>, ptr addrspace(1) %gep.0 + %b = load volatile <1 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ult <1 x float> %a, %b + %val = select nnan nsz <1 x i1> %cmp, <1 x float> %a, <1 x float> %b + store <1 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v2f32: ; GCN: {{buffer|flat}}_load_dwordx2 ; GCN: {{buffer|flat}}_load_dwordx2 -; SI-SAFE: v_min_legacy_f32_e32 -; SI-SAFE: v_min_legacy_f32_e32 - -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 +; SI: v_min_legacy_f32_e32 +; SI: v_min_legacy_f32_e32 -; GCN-NONAN: v_min_f32_e32 -; GCN-NONAN: v_min_f32_e32 +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 define amdgpu_kernel void @test_fmin_legacy_ult_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr <2 x float>, ptr addrspace(1) %in, i32 %tid @@ -223,25 +338,40 @@ define amdgpu_kernel void @test_fmin_legacy_ult_v2f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v2f32_fast: +; GCN: {{buffer|flat}}_load_dwordx2 +; GCN: {{buffer|flat}}_load_dwordx2 + +; GCN: v_min_f32_e32 +; GCN: v_min_f32_e32 +define amdgpu_kernel void @test_fmin_legacy_ult_v2f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <2 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <2 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile <2 x float>, ptr addrspace(1) %gep.0 + %b = load volatile <2 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ult <2 x float> %a, %b + %val = select nnan nsz <2 x i1> %cmp, <2 x float> %a, <2 x float> %b + store <2 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v3f32: -; SI-SAFE: v_min_legacy_f32_e32 -; SI-SAFE: v_min_legacy_f32_e32 -; SI-SAFE: v_min_legacy_f32_e32 -; SI-SAFE-NOT: v_min_ - -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 +; SI: v_min_legacy_f32_e32 +; SI: v_min_legacy_f32_e32 +; SI: v_min_legacy_f32_e32 +; SI-NOT: v_min_ + +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 ; VI-NOT: v_cmp ; VI-NOT: v_cndmask - -; GCN-NONAN: v_min_f32_e32 -; GCN-NONAN: v_min_f32_e32 -; GCN-NONAN: v_min_f32_e32 -; GCN-NONAN-NOT: v_min_ define amdgpu_kernel void @test_fmin_legacy_ult_v3f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr <3 x float>, ptr addrspace(1) %in, i32 %tid @@ -256,6 +386,28 @@ define amdgpu_kernel void @test_fmin_legacy_ult_v3f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v3f32_fast: +; VI-NOT: v_cmp +; VI-NOT: v_cndmask + +; GCN: v_min_f32_e32 +; GCN: v_min_f32_e32 +; GCN: v_min_f32_e32 +; GCN-NOT: v_min_ +define amdgpu_kernel void @test_fmin_legacy_ult_v3f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <3 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <3 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load <3 x float>, ptr addrspace(1) %gep.0 + %b = load <3 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ult <3 x float> %a, %b + %val = select nnan nsz <3 x i1> %cmp, <3 x float> %a, <3 x float> %b + store <3 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32_multi_use: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll index 12e9888..aaea4f7 100644 --- a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll +++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll @@ -5015,7 +5015,7 @@ define amdgpu_kernel void @v_fneg_fp_round_fneg_f64_to_f32(ptr addrspace(1) %out %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i64 %tid.ext %a = load volatile double, ptr addrspace(1) %a.gep - %fneg.a = fsub double -0.000000e+00, %a + %fneg.a = fsub nsz double -0.000000e+00, %a %fpround = fptrunc double %fneg.a to float %fneg = fneg float %fpround store float %fneg, ptr addrspace(1) %out.gep diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll index c4ca79d..3de6df2 100644 --- a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll +++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll @@ -4441,25 +4441,40 @@ define float @v_fneg_fabs_select_infloop_regression(float %arg, i1 %arg1) { ret float %i3 } -define float @v_fmul_0_fsub_0_infloop_regression(float %arg) { -; GCN-SAFE-LABEL: v_fmul_0_fsub_0_infloop_regression: +define float @v_fmul_0_fsub_0_safe_infloop_regression(float %arg) { +; GCN-SAFE-LABEL: v_fmul_0_fsub_0_safe_infloop_regression: ; GCN-SAFE: ; %bb.0: ; %bb ; GCN-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-SAFE-NEXT: v_mul_f32_e32 v0, 0, v0 ; GCN-SAFE-NEXT: v_sub_f32_e32 v0, 0, v0 ; GCN-SAFE-NEXT: s_setpc_b64 s[30:31] ; -; GCN-NSZ-LABEL: v_fmul_0_fsub_0_infloop_regression: -; GCN-NSZ: ; %bb.0: ; %bb -; GCN-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NSZ-NEXT: v_mul_f32_e32 v0, 0x80000000, v0 -; GCN-NSZ-NEXT: s_setpc_b64 s[30:31] +; SI-NSZ-LABEL: v_fmul_0_fsub_0_safe_infloop_regression: +; SI-NSZ: ; %bb.0: ; %bb +; SI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NSZ-NEXT: s_brev_b32 s4, 1 +; SI-NSZ-NEXT: v_fma_f32 v0, v0, s4, 0 +; SI-NSZ-NEXT: s_setpc_b64 s[30:31] +; FIXME: utils/update_llc_test_checks.py will generate redundant VI +; labels, remove them, they will cause test failure. bb: %i = fmul float %arg, 0.0 %i1 = fsub float 0.0, %i ret float %i1 } +define float @v_fmul_0_fsub_0_nsz_infloop_regression(float %arg) { +; GCN-LABEL: v_fmul_0_fsub_0_nsz_infloop_regression: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v0, 0x80000000, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] +bb: + %i = fmul float %arg, 0.0 + %i1 = fsub nsz float 0.0, %i + ret float %i1 +} + declare i32 @llvm.amdgcn.workitem.id.x() #1 declare float @llvm.fma.f32(float, float, float) #1 declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-dead-intrinsics.ll b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-dead-intrinsics.ll new file mode 100644 index 0000000..d6198f5 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-dead-intrinsics.ll @@ -0,0 +1,9 @@ +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -amdgpu-lower-buffer-fat-pointers < %s | FileCheck %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=amdgpu-lower-buffer-fat-pointers < %s | FileCheck %s + +; CHECK: @arbitrary +declare amdgpu_kernel void @arbitrary(ptr addrspace(1)) + +; COM: This used to cause verifier errors when "lowered" +declare <4 x i8> @llvm.masked.load.v4i8.p7(ptr addrspace(7) captures(none), i32 immarg, <4 x i1>, <4 x i8>) +; CHECK-NOT: llvm.masked.load diff --git a/llvm/test/CodeGen/AMDGPU/lro-phi-samebb-nonlookthrough-store.ll b/llvm/test/CodeGen/AMDGPU/lro-phi-samebb-nonlookthrough-store.ll new file mode 100644 index 0000000..b508f73 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/lro-phi-samebb-nonlookthrough-store.ll @@ -0,0 +1,46 @@ +; RUN: opt -S -passes=amdgpu-late-codegenprepare \ +; RUN: -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a %s | FileCheck %s + +; Goal: With a loop-header PHI in illegal vector type and a same-BB +; non-lookthrough user (vector add) in the header, LRO should still coerce +; the PHI to i32 because a profitable sink (store) exists across BB. + +define amdgpu_kernel void @phi_samebb_nonlookthrough_store( + ptr addrspace(1) %out, <4 x i8> %v, i1 %exit) { +; CHECK-LABEL: @phi_samebb_nonlookthrough_store( +entry: + br label %loop + +loop: ; preds = %entry, %loop + ; Loop-carried PHI in illegal vector type. + %acc = phi <4 x i8> [ zeroinitializer, %entry ], [ %acc.next, %loop ] + + ; Same-BB non-lookthrough use in header. + %acc.next = add <4 x i8> %acc, %v + + ; Make it a real loop: either iterate or exit to the sink block. + br i1 %exit, label %store, label %loop + +store: ; preds = %loop + ; The across-BB sink: storing the PHI coerced to i32. + %acc.bc = bitcast <4 x i8> %acc to i32 + store i32 %acc.bc, ptr addrspace(1) %out, align 4 + ret void +} + +; After AMDGPULateCodeGenPrepare we expect: +; - PHI is coerced to i32 +; - A header bitcast materializes for the add +; This proves the same-BB non-lookthrough user (add) did not get pruned +; when the def is a PHI. + +; CHECK: loop: +; CHECK: %[[ACC_TC:[^ ]+]] = phi i32 +; CHECK: %[[ACC_TC_BC:[^ ]+]] = bitcast i32 %[[ACC_TC]] to <4 x i8> +; CHECK: %[[ACC_NEXT:[^ ]+]] = add <4 x i8> %[[ACC_TC_BC]], %v +; CHECK: br i1 %exit, label %store, label %loop +; CHECK: store: +; CHECK: %[[ACC_TC_BC2:[^ ]+]] = bitcast i32 %[[ACC_TC]] to <4 x i8> +; CHECK: %[[ST_I32:[^ ]+]] = bitcast <4 x i8> %[[ACC_TC_BC2]] to i32 +; CHECK: store i32 %[[ST_I32]], + diff --git a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll index 92d3277..bb22144 100644 --- a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll +++ b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll @@ -4148,28 +4148,28 @@ define <2 x half> @mul_select_negk_negfabs_v2f16(<2 x i32> %c, <2 x half> %x, <2 ; -------------------------------------------------------------------------------- define <2 x half> @select_fneg_posk_src_add_v2f16(<2 x i32> %c, <2 x half> %x, <2 x half> %y) { -; CI-SAFE-LABEL: select_fneg_posk_src_add_v2f16: -; CI-SAFE: ; %bb.0: -; CI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-SAFE-NEXT: v_add_f32_e32 v3, 4.0, v3 -; CI-SAFE-NEXT: v_add_f32_e32 v2, 4.0, v2 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-SAFE-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; CI-SAFE-NEXT: v_or_b32_e32 v2, v2, v3 -; CI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v2 -; CI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-SAFE-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc -; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; CI-SAFE-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc -; CI-SAFE-NEXT: s_setpc_b64 s[30:31] +; CI-LABEL: select_fneg_posk_src_add_v2f16: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_add_f32_e32 v3, 4.0, v3 +; CI-NEXT: v_add_f32_e32 v2, 4.0, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; CI-NEXT: v_or_b32_e32 v2, v2, v3 +; CI-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v2 +; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc +; CI-NEXT: s_setpc_b64 s[30:31] ; ; VI-SAFE-LABEL: select_fneg_posk_src_add_v2f16: ; VI-SAFE: ; %bb.0: @@ -4229,21 +4229,6 @@ define <2 x half> @select_fneg_posk_src_add_v2f16(<2 x i32> %c, <2 x half> %x, < ; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 ; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] ; -; CI-NSZ-LABEL: select_fneg_posk_src_add_v2f16: -; CI-NSZ: ; %bb.0: -; CI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v3, v3 -; CI-NSZ-NEXT: v_sub_f32_e32 v2, -4.0, v2 -; CI-NSZ-NEXT: v_sub_f32_e32 v3, -4.0, v3 -; CI-NSZ-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc -; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; CI-NSZ-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc -; CI-NSZ-NEXT: s_setpc_b64 s[30:31] -; ; VI-NSZ-LABEL: select_fneg_posk_src_add_v2f16: ; VI-NSZ: ; %bb.0: ; VI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -4302,6 +4287,105 @@ define <2 x half> @select_fneg_posk_src_add_v2f16(<2 x i32> %c, <2 x half> %x, < ret <2 x half> %select } +define <2 x half> @select_fneg_posk_src_add_v2f16_nsz(<2 x i32> %c, <2 x half> %x, <2 x half> %y) { +; CI-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; CI-NEXT: v_sub_f32_e32 v2, -4.0, v2 +; CI-NEXT: v_sub_f32_e32 v3, -4.0, v3 +; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc +; CI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; VI-NEXT: v_mov_b32_e32 v1, 0xc400 +; VI-NEXT: v_sub_f16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; VI-NEXT: v_sub_f16_e32 v2, -4.0, v2 +; VI-NEXT: v_mov_b32_e32 v3, 0x4000 +; VI-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, v3, v2, s[4:5] +; VI-NEXT: v_cndmask_b32_sdwa v1, v3, v1, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GFX9-NEXT: v_pk_add_f16 v1, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX9-NEXT: v_mov_b32_e32 v2, 0x4000 +; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0 +; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, v1, s[4:5] +; GFX9-NEXT: v_cndmask_b32_sdwa v1, v2, v1, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v1, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SAFE-TRUE16-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX11-SAFE-TRUE16: ; %bb.0: +; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-SAFE-TRUE16-NEXT: v_pk_add_f16 v0, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1 +; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo +; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0 +; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SAFE-FAKE16-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX11-SAFE-FAKE16: ; %bb.0: +; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SAFE-FAKE16-NEXT: v_pk_add_f16 v2, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo +; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo +; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-NSZ-TRUE16-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX11-NSZ-TRUE16: ; %bb.0: +; GFX11-NSZ-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NSZ-TRUE16-NEXT: v_pk_add_f16 v0, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1 +; GFX11-NSZ-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo +; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0 +; GFX11-NSZ-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-NSZ-FAKE16-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX11-NSZ-FAKE16: ; %bb.0: +; GFX11-NSZ-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NSZ-FAKE16-NEXT: v_pk_add_f16 v2, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-NSZ-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo +; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo +; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NSZ-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX11-NSZ-FAKE16-NEXT: s_setpc_b64 s[30:31] + %cmp = icmp eq <2 x i32> %c, zeroinitializer + %add = fadd nsz <2 x half> %x, <half 4.0, half 4.0> + %fneg = fneg <2 x half> %add + %select = select <2 x i1> %cmp, <2 x half> %fneg, <2 x half> <half 2.0, half 2.0> + ret <2 x half> %select +} + define <2 x half> @select_fneg_posk_src_sub_v2f16(<2 x i32> %c, <2 x half> %x) { ; CI-SAFE-LABEL: select_fneg_posk_src_sub_v2f16: ; CI-SAFE: ; %bb.0: @@ -4704,34 +4788,34 @@ define <2 x half> @select_fneg_posk_src_fma_v2f16(<2 x i32> %c, <2 x half> %x, < } define <2 x half> @select_fneg_posk_src_fmad_v2f16(<2 x i32> %c, <2 x half> %x, <2 x half> %z) { -; CI-SAFE-LABEL: select_fneg_posk_src_fmad_v2f16: -; CI-SAFE: ; %bb.0: -; CI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; CI-SAFE-NEXT: v_mul_f32_e32 v3, 4.0, v3 -; CI-SAFE-NEXT: v_add_f32_e32 v3, v3, v5 -; CI-SAFE-NEXT: v_mul_f32_e32 v2, 4.0, v2 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-SAFE-NEXT: v_add_f32_e32 v2, v2, v4 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 -; CI-SAFE-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; CI-SAFE-NEXT: v_or_b32_e32 v2, v2, v3 -; CI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v2 -; CI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-SAFE-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc -; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; CI-SAFE-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc -; CI-SAFE-NEXT: s_setpc_b64 s[30:31] +; CI-LABEL: select_fneg_posk_src_fmad_v2f16: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; CI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; CI-NEXT: v_mul_f32_e32 v3, 4.0, v3 +; CI-NEXT: v_add_f32_e32 v3, v3, v5 +; CI-NEXT: v_mul_f32_e32 v2, 4.0, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_add_f32_e32 v2, v2, v4 +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; CI-NEXT: v_or_b32_e32 v2, v2, v3 +; CI-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v2 +; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc +; CI-NEXT: s_setpc_b64 s[30:31] ; ; VI-SAFE-LABEL: select_fneg_posk_src_fmad_v2f16: ; VI-SAFE: ; %bb.0: @@ -4793,27 +4877,6 @@ define <2 x half> @select_fneg_posk_src_fmad_v2f16(<2 x i32> %c, <2 x half> %x, ; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 ; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] ; -; CI-NSZ-LABEL: select_fneg_posk_src_fmad_v2f16: -; CI-NSZ: ; %bb.0: -; CI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v4, v4 -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v5, v5 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v3, v3 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v4, v4 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v5, v5 -; CI-NSZ-NEXT: v_mul_f32_e32 v2, -4.0, v2 -; CI-NSZ-NEXT: v_mul_f32_e32 v3, -4.0, v3 -; CI-NSZ-NEXT: v_sub_f32_e32 v2, v2, v4 -; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 -; CI-NSZ-NEXT: v_sub_f32_e32 v3, v3, v5 -; CI-NSZ-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc -; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; CI-NSZ-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc -; CI-NSZ-NEXT: s_setpc_b64 s[30:31] -; ; VI-NSZ-LABEL: select_fneg_posk_src_fmad_v2f16: ; VI-NSZ: ; %bb.0: ; VI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -4873,6 +4936,112 @@ define <2 x half> @select_fneg_posk_src_fmad_v2f16(<2 x i32> %c, <2 x half> %x, ret <2 x half> %select } +define <2 x half> @select_fneg_posk_src_fmad_v2f16_nsz(<2 x i32> %c, <2 x half> %x, <2 x half> %z) { +; CI-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; CI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; CI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; CI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; CI-NEXT: v_mul_f32_e32 v2, -4.0, v2 +; CI-NEXT: v_mul_f32_e32 v3, -4.0, v3 +; CI-NEXT: v_sub_f32_e32 v2, v2, v4 +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; CI-NEXT: v_sub_f32_e32 v3, v3, v5 +; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc +; CI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_fma_f16 v1, v4, -4.0, -v1 +; VI-NEXT: v_fma_f16 v2, v2, -4.0, -v3 +; VI-NEXT: v_mov_b32_e32 v3, 0x4000 +; VI-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, v3, v2, s[4:5] +; VI-NEXT: v_cndmask_b32_sdwa v1, v3, v1, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GFX9-NEXT: v_pk_fma_f16 v1, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX9-NEXT: v_mov_b32_e32 v2, 0x4000 +; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0 +; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, v1, s[4:5] +; GFX9-NEXT: v_cndmask_b32_sdwa v1, v2, v1, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v1, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SAFE-TRUE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX11-SAFE-TRUE16: ; %bb.0: +; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-SAFE-TRUE16-NEXT: v_pk_fma_f16 v0, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1 +; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo +; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0 +; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SAFE-FAKE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX11-SAFE-FAKE16: ; %bb.0: +; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SAFE-FAKE16-NEXT: v_pk_fma_f16 v2, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo +; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo +; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-NSZ-TRUE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX11-NSZ-TRUE16: ; %bb.0: +; GFX11-NSZ-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NSZ-TRUE16-NEXT: v_pk_fma_f16 v0, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1 +; GFX11-NSZ-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo +; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0 +; GFX11-NSZ-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-NSZ-FAKE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX11-NSZ-FAKE16: ; %bb.0: +; GFX11-NSZ-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NSZ-FAKE16-NEXT: v_pk_fma_f16 v2, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-NSZ-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo +; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo +; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NSZ-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX11-NSZ-FAKE16-NEXT: s_setpc_b64 s[30:31] + %cmp = icmp eq <2 x i32> %c, zeroinitializer + %fmad = call nsz <2 x half> @llvm.fmuladd.v2f16(<2 x half> %x, <2 x half> <half 4.0, half 4.0>, <2 x half> %z) + %fneg = fneg <2 x half> %fmad + %select = select <2 x i1> %cmp, <2 x half> %fneg, <2 x half> <half 2.0, half 2.0> + ret <2 x half> %select +} + declare <2 x half> @llvm.fabs.v2f16(<2 x half>) #0 declare <2 x half> @llvm.fma.v2f16(<2 x half>, <2 x half>, <2 x half>) #0 declare <2 x half> @llvm.fmuladd.v2f16(<2 x half>, <2 x half>, <2 x half>) #0 diff --git a/llvm/test/CodeGen/AMDGPU/v_mac.ll b/llvm/test/CodeGen/AMDGPU/v_mac.ll index c128715..f5dc824 100644 --- a/llvm/test/CodeGen/AMDGPU/v_mac.ll +++ b/llvm/test/CodeGen/AMDGPU/v_mac.ll @@ -116,7 +116,7 @@ entry: ; GCN-LABEL: {{^}}nsz_mad_sub0_src0: ; GCN-NOT: v_mac_f32 ; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}} -define amdgpu_kernel void @nsz_mad_sub0_src0(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 { +define amdgpu_kernel void @nsz_mad_sub0_src0(ptr addrspace(1) %out, ptr addrspace(1) %in) { entry: %b_ptr = getelementptr float, ptr addrspace(1) %in, i32 1 %c_ptr = getelementptr float, ptr addrspace(1) %in, i32 2 @@ -125,7 +125,7 @@ entry: %b = load float, ptr addrspace(1) %b_ptr %c = load float, ptr addrspace(1) %c_ptr - %neg_a = fsub float 0.0, %a + %neg_a = fsub nsz float 0.0, %a %tmp0 = fmul float %neg_a, %b %tmp1 = fadd float %tmp0, %c @@ -176,7 +176,7 @@ entry: ; GCN-LABEL: {{^}}nsz_mad_sub0_src1: ; GCN-NOT: v_mac_f32 ; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}} -define amdgpu_kernel void @nsz_mad_sub0_src1(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 { +define amdgpu_kernel void @nsz_mad_sub0_src1(ptr addrspace(1) %out, ptr addrspace(1) %in) { entry: %b_ptr = getelementptr float, ptr addrspace(1) %in, i32 1 %c_ptr = getelementptr float, ptr addrspace(1) %in, i32 2 @@ -185,7 +185,7 @@ entry: %b = load float, ptr addrspace(1) %b_ptr %c = load float, ptr addrspace(1) %c_ptr - %neg_b = fsub float 0.0, %b + %neg_b = fsub nsz float 0.0, %b %tmp0 = fmul float %a, %neg_b %tmp1 = fadd float %tmp0, %c @@ -310,6 +310,5 @@ define float @v_mac_f32_dynamic_ftz(float %a, float %b, float %c) "denormal-fp-m declare i32 @llvm.amdgcn.workitem.id.x() #2 attributes #0 = { nounwind "no-signed-zeros-fp-math"="false" } -attributes #1 = { nounwind "no-signed-zeros-fp-math"="true" } attributes #2 = { nounwind readnone } attributes #3 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll b/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll index bcc60b0..8da6f23 100644 --- a/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll +++ b/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll @@ -236,7 +236,7 @@ entry: %b.val = load half, ptr addrspace(1) %b %c.val = load half, ptr addrspace(1) %c - %a.neg = fsub half 0.0, %a.val + %a.neg = fsub nsz half 0.0, %a.val %t.val = fmul half %a.neg, %b.val %r.val = fadd half %t.val, %c.val @@ -263,7 +263,7 @@ entry: %b.val = load half, ptr addrspace(1) %b %c.val = load half, ptr addrspace(1) %c - %b.neg = fsub half 0.0, %b.val + %b.neg = fsub nsz half 0.0, %b.val %t.val = fmul half %a.val, %b.neg %r.val = fadd half %t.val, %c.val @@ -290,7 +290,7 @@ entry: %b.val = load half, ptr addrspace(1) %b %c.val = load half, ptr addrspace(1) %c - %c.neg = fsub half 0.0, %c.val + %c.neg = fsub nsz half 0.0, %c.val %t.val = fmul half %a.val, %b.val %r.val = fadd half %t.val, %c.neg @@ -601,7 +601,7 @@ entry: %b.val = load <2 x half>, ptr addrspace(1) %b %c.val = load <2 x half>, ptr addrspace(1) %c - %a.neg = fsub <2 x half> <half 0.0, half 0.0>, %a.val + %a.neg = fsub nsz <2 x half> <half 0.0, half 0.0>, %a.val %t.val = fmul <2 x half> %a.neg, %b.val %r.val = fadd <2 x half> %t.val, %c.val @@ -634,7 +634,7 @@ entry: %b.val = load <2 x half>, ptr addrspace(1) %b %c.val = load <2 x half>, ptr addrspace(1) %c - %b.neg = fsub <2 x half> <half 0.0, half 0.0>, %b.val + %b.neg = fsub nsz <2 x half> <half 0.0, half 0.0>, %b.val %t.val = fmul <2 x half> %a.val, %b.neg %r.val = fadd <2 x half> %t.val, %c.val @@ -667,7 +667,7 @@ entry: %b.val = load <2 x half>, ptr addrspace(1) %b %c.val = load <2 x half>, ptr addrspace(1) %c - %c.neg = fsub <2 x half> <half 0.0, half 0.0>, %c.val + %c.neg = fsub nsz <2 x half> <half 0.0, half 0.0>, %c.val %t.val = fmul <2 x half> %a.val, %b.val %r.val = fadd <2 x half> %t.val, %c.neg @@ -678,5 +678,5 @@ entry: declare void @llvm.amdgcn.s.barrier() #2 attributes #0 = { nounwind "no-signed-zeros-fp-math"="false" "denormal-fp-math"="preserve-sign,preserve-sign" } -attributes #1 = { nounwind "no-signed-zeros-fp-math"="true" "denormal-fp-math"="preserve-sign,preserve-sign" } +attributes #1 = { nounwind "denormal-fp-math"="preserve-sign,preserve-sign" } attributes #2 = { nounwind convergent } diff --git a/llvm/test/CodeGen/ARM/build-attributes.ll b/llvm/test/CodeGen/ARM/build-attributes.ll index 68844ae..306a4a3 100644 --- a/llvm/test/CodeGen/ARM/build-attributes.ll +++ b/llvm/test/CodeGen/ARM/build-attributes.ll @@ -3,23 +3,16 @@ ; RUN: llc < %s -mtriple=thumbv5-linux-gnueabi -mcpu=xscale -mattr=+strict-align | FileCheck %s --check-prefix=XSCALE ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align | FileCheck %s --check-prefix=V6 -; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6-FAST ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mattr=+strict-align | FileCheck %s --check-prefix=V6M -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6M-FAST ; RUN: llc < %s -mtriple=thumbv6sm-linux-gnueabi -mattr=+strict-align | FileCheck %s --check-prefix=V6M -; RUN: llc < %s -mtriple=thumbv6sm-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6M-FAST ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align | FileCheck %s --check-prefix=ARM1156T2F-S -; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=ARM1156T2F-S-FAST ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi | FileCheck %s --check-prefix=V7M -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V7M-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=V7 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V7-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi | FileCheck %s --check-prefix=V8 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V8-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi | FileCheck %s --check-prefix=Vt8 ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING @@ -31,35 +24,24 @@ ; RUN: llc < %s -mtriple=thumbv8m.main-linux-gnueabi | FileCheck %s --check-prefix=V8MMAINLINE ; RUN: llc < %s -mtriple=thumbv8m.main-linux-gnueabi -mattr=+dsp | FileCheck %s --check-prefix=V8MMAINLINE_DSP ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 | FileCheck %s --check-prefix=CORTEX-A5-DEFAULT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-DEFAULT-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-neon,-d32 | FileCheck %s --check-prefix=CORTEX-A5-NONEON ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2sp | FileCheck %s --check-prefix=CORTEX-A5-NOFPU -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A8-SOFT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A8-SOFT-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-A8-HARD -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=hard -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A8-HARD-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A8-SOFT ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A9-SOFT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A9-SOFT-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-A9-HARD -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A9-HARD-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 | FileCheck %s --check-prefix=CORTEX-A12-DEFAULT ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A9-SOFT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-DEFAULT-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2sp | FileCheck %s --check-prefix=CORTEX-A12-NOFPU -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 | FileCheck %s --check-prefix=CORTEX-A15 -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A15-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 | FileCheck %s --check-prefix=CORTEX-A17-DEFAULT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2sp | FileCheck %s --check-prefix=CORTEX-A17-NOFPU -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-no-trapping-fp-math | FileCheck %s --check-prefix=NO-TRAPPING-MATH ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -denormal-fp-math=ieee | FileCheck %s --check-prefix=DENORMAL-IEEE @@ -74,37 +56,26 @@ ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 | FileCheck %s --check-prefix=CORTEX-M0 -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M0-FAST ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus | FileCheck %s --check-prefix=CORTEX-M0PLUS -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M0PLUS-FAST ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 | FileCheck %s --check-prefix=CORTEX-M1 -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M1-FAST ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align | FileCheck %s --check-prefix=SC000 -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=SC000-FAST ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 | FileCheck %s --check-prefix=CORTEX-M3 -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M3-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 | FileCheck %s --check-prefix=SC300 -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=SC300-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-M4-SOFT -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=soft -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M4-SOFT-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-M4-HARD -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M4-HARD-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2sp | FileCheck %s --check-prefix=CORTEX-M7 --check-prefix=CORTEX-M7-SOFT -; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-NOFPU-FAST ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-fp64 | FileCheck %s --check-prefix=CORTEX-M7 --check-prefix=CORTEX-M7-SINGLE -; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-fp64 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-FAST ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 | FileCheck %s --check-prefix=CORTEX-M7-DOUBLE ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m23 | FileCheck %s --check-prefix=CORTEX-M23 ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 | FileCheck %s --check-prefix=CORTEX-M33 -; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M33-FAST ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m35p | FileCheck %s --check-prefix=CORTEX-M35P @@ -113,49 +84,34 @@ ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r4 | FileCheck %s --check-prefix=CORTEX-R4 ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r4f | FileCheck %s --check-prefix=CORTEX-R4F ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 | FileCheck %s --check-prefix=CORTEX-R5 -; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R5-FAST ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 | FileCheck %s --check-prefix=CORTEX-R7 -; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R7-FAST ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8 | FileCheck %s --check-prefix=CORTEX-R8 -; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R8-FAST ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32 | FileCheck %s --check-prefix=CORTEX-A32 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A32-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35 | FileCheck %s --check-prefix=CORTEX-A35 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A35-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 | FileCheck %s --check-prefix=CORTEX-A53 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A53-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 | FileCheck %s --check-prefix=CORTEX-A57 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A57-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 | FileCheck %s --check-prefix=CORTEX-A72 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A72-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a73 | FileCheck %s --check-prefix=CORTEX-A73 ; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi | FileCheck %s --check-prefix=GENERIC-ARMV8_1-A ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3 | FileCheck %s --check-prefix=EXYNOS-M3 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4 | FileCheck %s --check-prefix=EXYNOS-M4 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m5 | FileCheck %s --check-prefix=EXYNOS-M5 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m5 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m5 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING -; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=GENERIC-ARMV8_1-A-FAST ; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 | FileCheck %s --check-prefix=CORTEX-A7-CHECK -; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-CHECK-FAST ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2sp,-vfp3,-vfp4,-neon,-fp16 | FileCheck %s --check-prefix=CORTEX-A7-NOFPU -; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2sp,-vfp3,-vfp4,-neon,-fp16 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,-neon | FileCheck %s --check-prefix=CORTEX-A7-FPUV4 ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING -; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,-neon -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-FPUV4-FAST ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,,-d32,-neon | FileCheck %s --check-prefix=CORTEX-A7-FPUV4 ; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -mattr=+strict-align -relocation-model=pic | FileCheck %s --check-prefix=RELOC-PIC ; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -mattr=+strict-align -relocation-model=static | FileCheck %s --check-prefix=RELOC-OTHER @@ -278,15 +234,6 @@ ; V6-NOT: .eabi_attribute 28 ; V6: .eabi_attribute 38, 1 -; V6-FAST-NOT: .eabi_attribute 19 -;; Despite the V6 CPU having no FPU by default, we chose to flush to -;; positive zero here. There's no hardware support doing this, but the -;; fast maths software library might. -; V6-FAST-NOT: .eabi_attribute 20 -; V6-FAST-NOT: .eabi_attribute 21 -; V6-FAST-NOT: .eabi_attribute 22 -; V6-FAST: .eabi_attribute 23, 1 - ;; We emit 6, 12 for both v6-M and v6S-M, technically this is incorrect for ;; V6-M, however we don't model the OS extension so this is fine. ; V6M: .eabi_attribute 6, 12 @@ -312,14 +259,6 @@ ; V6M-NOT: .eabi_attribute 28 ; V6M: .eabi_attribute 38, 1 -; V6M-FAST-NOT: .eabi_attribute 19 -;; Despite the V6M CPU having no FPU by default, we chose to flush to -;; positive zero here. There's no hardware support doing this, but the -;; fast maths software library might. -; V6M-FAST-NOT: .eabi_attribute 20 -; V6M-FAST-NOT: .eabi_attribute 21 -; V6M-FAST-NOT: .eabi_attribute 22 -; V6M-FAST: .eabi_attribute 23, 1 ; ARM1156T2F-S: .cpu arm1156t2f-s ; ARM1156T2F-S: .eabi_attribute 6, 8 @@ -342,14 +281,6 @@ ; ARM1156T2F-S-NOT: .eabi_attribute 28 ; ARM1156T2F-S: .eabi_attribute 38, 1 -; ARM1156T2F-S-FAST-NOT: .eabi_attribute 19 -;; V6 cores default to flush to positive zero (value 0). Note that value 2 is also equally -;; valid for this core, it's an implementation defined question as to which of 0 and 2 you -;; select. LLVM historically picks 0. -; ARM1156T2F-S-FAST-NOT: .eabi_attribute 20 -; ARM1156T2F-S-FAST-NOT: .eabi_attribute 21 -; ARM1156T2F-S-FAST-NOT: .eabi_attribute 22 -; ARM1156T2F-S-FAST: .eabi_attribute 23, 1 ; V7M: .eabi_attribute 6, 10 ; V7M: .eabi_attribute 7, 77 @@ -374,15 +305,6 @@ ; V7M-NOT: .eabi_attribute 28 ; V7M: .eabi_attribute 38, 1 -; V7M-FAST-NOT: .eabi_attribute 19 -;; Despite the V7M CPU having no FPU by default, we chose to flush -;; preserving sign. This matches what the hardware would do in the -;; architecture revision were to exist on the current target. -; V7M-FAST: .eabi_attribute 20, 2 -; V7M-FAST-NOT: .eabi_attribute 21 -; V7M-FAST-NOT: .eabi_attribute 22 -; V7M-FAST: .eabi_attribute 23, 1 - ; V7: .syntax unified ; V7: .eabi_attribute 6, 10 ; V7-NOT: .eabi_attribute 27 @@ -401,13 +323,6 @@ ; V7-NOT: .eabi_attribute 28 ; V7: .eabi_attribute 38, 1 -; V7-FAST-NOT: .eabi_attribute 19 -;; The default CPU does have an FPU and it must be VFPv3 or better, so it flushes -;; denormals to zero preserving the sign. -; V7-FAST: .eabi_attribute 20, 2 -; V7-FAST-NOT: .eabi_attribute 21 -; V7-FAST-NOT: .eabi_attribute 22 -; V7-FAST: .eabi_attribute 23, 1 ; V7VE: .syntax unified ; V7VE: .eabi_attribute 6, 10 @ Tag_CPU_arch @@ -435,12 +350,6 @@ ; V8-NOT: .eabi_attribute 22 ; V8: .eabi_attribute 23, 3 -; V8-FAST-NOT: .eabi_attribute 19 -;; The default does have an FPU, and for V8-A, it flushes preserving sign. -; V8-FAST: .eabi_attribute 20, 2 -; V8-FAST-NOT: .eabi_attribute 21 -; V8-FAST-NOT: .eabi_attribute 22 -; V8-FAST: .eabi_attribute 23, 1 ; Vt8: .syntax unified ; Vt8: .eabi_attribute 6, 14 @@ -552,15 +461,11 @@ ;; We default to IEEE 754 compliance ; CORTEX-A7-CHECK: .eabi_attribute 20, 1 ;; The A7 has VFPv3 support by default, so flush preserving sign. -; CORTEX-A7-CHECK-FAST: .eabi_attribute 20, 2 ; CORTEX-A7-NOFPU: .eabi_attribute 20, 1 ;; Despite there being no FPU, we chose to flush to zero preserving ;; sign. This matches what the hardware would do for this architecture ;; revision. -; CORTEX-A7-NOFPU-FAST: .eabi_attribute 20, 2 ; CORTEX-A7-FPUV4: .eabi_attribute 20, 1 -;; The VFPv4 FPU flushes preserving sign. -; CORTEX-A7-FPUV4-FAST: .eabi_attribute 20, 2 ; Tag_ABI_FP_exceptions ; CORTEX-A7-CHECK: .eabi_attribute 21, 1 @@ -610,13 +515,6 @@ ; CORTEX-A5-DEFAULT: .eabi_attribute 24, 1 ; CORTEX-A5-DEFAULT: .eabi_attribute 25, 1 -; CORTEX-A5-DEFAULT-FAST-NOT: .eabi_attribute 19 -;; The A5 defaults to a VFPv4 FPU, so it flushed preserving the sign when -ffast-math -;; is given. -; CORTEX-A5-DEFAULT-FAST: .eabi_attribute 20, 2 -; CORTEX-A5-DEFAULT-FAST-NOT: .eabi_attribute 21 -; CORTEX-A5-DEFAULT-FAST-NOT: .eabi_attribute 22 -; CORTEX-A5-DEFAULT-FAST: .eabi_attribute 23, 1 ; CORTEX-A5-NONEON: .cpu cortex-a5 ; CORTEX-A5-NONEON: .eabi_attribute 6, 10 @@ -634,13 +532,6 @@ ; CORTEX-A5-NONEON: .eabi_attribute 24, 1 ; CORTEX-A5-NONEON: .eabi_attribute 25, 1 -; CORTEX-A5-NONEON-FAST-NOT: .eabi_attribute 19 -;; The A5 defaults to a VFPv4 FPU, so it flushed preserving sign when -ffast-math -;; is given. -; CORTEX-A5-NONEON-FAST: .eabi_attribute 20, 2 -; CORTEX-A5-NONEON-FAST-NOT: .eabi_attribute 21 -; CORTEX-A5-NONEON-FAST-NOT: .eabi_attribute 22 -; CORTEX-A5-NONEON-FAST: .eabi_attribute 23, 1 ; CORTEX-A5-NOFPU: .cpu cortex-a5 ; CORTEX-A5-NOFPU: .eabi_attribute 6, 10 @@ -659,14 +550,9 @@ ; CORTEX-A5-NOFPU: .eabi_attribute 24, 1 ; CORTEX-A5-NOFPU: .eabi_attribute 25, 1 -; CORTEX-A5-NOFPU-FAST-NOT: .eabi_attribute 19 ;; Despite there being no FPU, we chose to flush to zero preserving ;; sign. This matches what the hardware would do for this architecture ;; revision. -; CORTEX-A5-NOFPU-FAST: .eabi_attribute 20, 2 -; CORTEX-A5-NOFPU-FAST-NOT: .eabi_attribute 21 -; CORTEX-A5-NOFPU-FAST-NOT: .eabi_attribute 22 -; CORTEX-A5-NOFPU-FAST: .eabi_attribute 23, 1 ; CORTEX-A8-SOFT: .cpu cortex-a8 ; CORTEX-A8-SOFT: .eabi_attribute 6, 10 @@ -712,15 +598,6 @@ ; CORTEX-A9-SOFT-NOT: .eabi_attribute 28 ; CORTEX-A9-SOFT: .eabi_attribute 38, 1 -; CORTEX-A8-SOFT-FAST-NOT: .eabi_attribute 19 -; CORTEX-A9-SOFT-FAST-NOT: .eabi_attribute 19 -;; The A9 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A8-SOFT-FAST: .eabi_attribute 20, 2 -; CORTEX-A9-SOFT-FAST: .eabi_attribute 20, 2 -; CORTEX-A5-SOFT-FAST-NOT: .eabi_attribute 21 -; CORTEX-A5-SOFT-FAST-NOT: .eabi_attribute 22 -; CORTEX-A5-SOFT-FAST: .eabi_attribute 23, 1 ; CORTEX-A8-HARD: .cpu cortex-a8 ; CORTEX-A8-HARD: .eabi_attribute 6, 10 @@ -766,21 +643,6 @@ ; CORTEX-A9-HARD: .eabi_attribute 28, 1 ; CORTEX-A9-HARD: .eabi_attribute 38, 1 -; CORTEX-A8-HARD-FAST-NOT: .eabi_attribute 19 -;; The A8 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A8-HARD-FAST: .eabi_attribute 20, 2 -; CORTEX-A8-HARD-FAST-NOT: .eabi_attribute 21 -; CORTEX-A8-HARD-FAST-NOT: .eabi_attribute 22 -; CORTEX-A8-HARD-FAST: .eabi_attribute 23, 1 - -; CORTEX-A9-HARD-FAST-NOT: .eabi_attribute 19 -;; The A9 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A9-HARD-FAST: .eabi_attribute 20, 2 -; CORTEX-A9-HARD-FAST-NOT: .eabi_attribute 21 -; CORTEX-A9-HARD-FAST-NOT: .eabi_attribute 22 -; CORTEX-A9-HARD-FAST: .eabi_attribute 23, 1 ; CORTEX-A12-DEFAULT: .cpu cortex-a12 ; CORTEX-A12-DEFAULT: .eabi_attribute 6, 10 @@ -800,13 +662,6 @@ ; CORTEX-A12-DEFAULT: .eabi_attribute 24, 1 ; CORTEX-A12-DEFAULT: .eabi_attribute 25, 1 -; CORTEX-A12-DEFAULT-FAST-NOT: .eabi_attribute 19 -;; The A12 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A12-DEFAULT-FAST: .eabi_attribute 20, 2 -; CORTEX-A12-HARD-FAST-NOT: .eabi_attribute 21 -; CORTEX-A12-HARD-FAST-NOT: .eabi_attribute 22 -; CORTEX-A12-HARD-FAST: .eabi_attribute 23, 1 ; CORTEX-A12-NOFPU: .cpu cortex-a12 ; CORTEX-A12-NOFPU: .eabi_attribute 6, 10 @@ -826,14 +681,6 @@ ; CORTEX-A12-NOFPU: .eabi_attribute 24, 1 ; CORTEX-A12-NOFPU: .eabi_attribute 25, 1 -; CORTEX-A12-NOFPU-FAST-NOT: .eabi_attribute 19 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; CORTEX-A12-NOFPU-FAST: .eabi_attribute 20, 2 -; CORTEX-A12-NOFPU-FAST-NOT: .eabi_attribute 21 -; CORTEX-A12-NOFPU-FAST-NOT: .eabi_attribute 22 -; CORTEX-A12-NOFPU-FAST: .eabi_attribute 23, 1 ; CORTEX-A15: .cpu cortex-a15 ; CORTEX-A15: .eabi_attribute 6, 10 @@ -857,13 +704,6 @@ ; CORTEX-A15-NOT: .eabi_attribute 28 ; CORTEX-A15: .eabi_attribute 38, 1 -; CORTEX-A15-FAST-NOT: .eabi_attribute 19 -;; The A15 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A15-FAST: .eabi_attribute 20, 2 -; CORTEX-A15-FAST-NOT: .eabi_attribute 21 -; CORTEX-A15-FAST-NOT: .eabi_attribute 22 -; CORTEX-A15-FAST: .eabi_attribute 23, 1 ; CORTEX-A17-DEFAULT: .cpu cortex-a17 ; CORTEX-A17-DEFAULT: .eabi_attribute 6, 10 @@ -883,13 +723,6 @@ ; CORTEX-A17-DEFAULT: .eabi_attribute 24, 1 ; CORTEX-A17-DEFAULT: .eabi_attribute 25, 1 -; CORTEX-A17-FAST-NOT: .eabi_attribute 19 -;; The A17 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A17-FAST: .eabi_attribute 20, 2 -; CORTEX-A17-FAST-NOT: .eabi_attribute 21 -; CORTEX-A17-FAST-NOT: .eabi_attribute 22 -; CORTEX-A17-FAST: .eabi_attribute 23, 1 ; CORTEX-A17-NOFPU: .cpu cortex-a17 ; CORTEX-A17-NOFPU: .eabi_attribute 6, 10 @@ -910,13 +743,6 @@ ; CORTEX-A17-NOFPU: .eabi_attribute 25, 1 ; CORTEX-A17-NOFPU-NOT: .eabi_attribute 19 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; CORTEX-A17-NOFPU-FAST: .eabi_attribute 20, 2 -; CORTEX-A17-NOFPU-FAST-NOT: .eabi_attribute 21 -; CORTEX-A17-NOFPU-FAST-NOT: .eabi_attribute 22 -; CORTEX-A17-NOFPU-FAST: .eabi_attribute 23, 1 ; Test flags -enable-no-trapping-fp-math and -denormal-fp-math: ; NO-TRAPPING-MATH: .eabi_attribute 21, 0 @@ -946,16 +772,6 @@ ; CORTEX-M0-NOT: .eabi_attribute 28 ; CORTEX-M0: .eabi_attribute 38, 1 -; CORTEX-M0-FAST-NOT: .eabi_attribute 19 -;; Despite the M0 CPU having no FPU in this scenario, we chose to -;; flush to positive zero here. There's no hardware support doing -;; this, but the fast maths software library might and such behaviour -;; would match hardware support on this architecture revision if it -;; existed. -; CORTEX-M0-FAST-NOT: .eabi_attribute 20 -; CORTEX-M0-FAST-NOT: .eabi_attribute 21 -; CORTEX-M0-FAST-NOT: .eabi_attribute 22 -; CORTEX-M0-FAST: .eabi_attribute 23, 1 ; CORTEX-M0PLUS: .cpu cortex-m0plus ; CORTEX-M0PLUS: .eabi_attribute 6, 12 @@ -978,16 +794,6 @@ ; CORTEX-M0PLUS-NOT: .eabi_attribute 28 ; CORTEX-M0PLUS: .eabi_attribute 38, 1 -; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 19 -;; Despite the M0+ CPU having no FPU in this scenario, we chose to -;; flush to positive zero here. There's no hardware support doing -;; this, but the fast maths software library might and such behaviour -;; would match hardware support on this architecture revision if it -;; existed. -; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 20 -; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 21 -; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 22 -; CORTEX-M0PLUS-FAST: .eabi_attribute 23, 1 ; CORTEX-M1: .cpu cortex-m1 ; CORTEX-M1: .eabi_attribute 6, 12 @@ -1010,16 +816,6 @@ ; CORTEX-M1-NOT: .eabi_attribute 28 ; CORTEX-M1: .eabi_attribute 38, 1 -; CORTEX-M1-FAST-NOT: .eabi_attribute 19 -;; Despite the M1 CPU having no FPU in this scenario, we chose to -;; flush to positive zero here. There's no hardware support doing -;; this, but the fast maths software library might and such behaviour -;; would match hardware support on this architecture revision if it -;; existed. -; CORTEX-M1-FAST-NOT: .eabi_attribute 20 -; CORTEX-M1-FAST-NOT: .eabi_attribute 21 -; CORTEX-M1-FAST-NOT: .eabi_attribute 22 -; CORTEX-M1-FAST: .eabi_attribute 23, 1 ; SC000: .cpu sc000 ; SC000: .eabi_attribute 6, 12 @@ -1041,16 +837,6 @@ ; SC000-NOT: .eabi_attribute 28 ; SC000: .eabi_attribute 38, 1 -; SC000-FAST-NOT: .eabi_attribute 19 -;; Despite the SC000 CPU having no FPU in this scenario, we chose to -;; flush to positive zero here. There's no hardware support doing -;; this, but the fast maths software library might and such behaviour -;; would match hardware support on this architecture revision if it -;; existed. -; SC000-FAST-NOT: .eabi_attribute 20 -; SC000-FAST-NOT: .eabi_attribute 21 -; SC000-FAST-NOT: .eabi_attribute 22 -; SC000-FAST: .eabi_attribute 23, 1 ; CORTEX-M3: .cpu cortex-m3 ; CORTEX-M3: .eabi_attribute 6, 10 @@ -1073,14 +859,6 @@ ; CORTEX-M3-NOT: .eabi_attribute 28 ; CORTEX-M3: .eabi_attribute 38, 1 -; CORTEX-M3-FAST-NOT: .eabi_attribute 19 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; CORTEX-M3-FAST: .eabi_attribute 20, 2 -; CORTEX-M3-FAST-NOT: .eabi_attribute 21 -; CORTEX-M3-FAST-NOT: .eabi_attribute 22 -; CORTEX-M3-FAST: .eabi_attribute 23, 1 ; SC300: .cpu sc300 ; SC300: .eabi_attribute 6, 10 @@ -1103,14 +881,6 @@ ; SC300-NOT: .eabi_attribute 28 ; SC300: .eabi_attribute 38, 1 -; SC300-FAST-NOT: .eabi_attribute 19 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; SC300-FAST: .eabi_attribute 20, 2 -; SC300-FAST-NOT: .eabi_attribute 21 -; SC300-FAST-NOT: .eabi_attribute 22 -; SC300-FAST: .eabi_attribute 23, 1 ; CORTEX-M4-SOFT: .cpu cortex-m4 ; CORTEX-M4-SOFT: .eabi_attribute 6, 13 @@ -1134,13 +904,6 @@ ; CORTEX-M4-SOFT-NOT: .eabi_attribute 28 ; CORTEX-M4-SOFT: .eabi_attribute 38, 1 -; CORTEX-M4-SOFT-FAST-NOT: .eabi_attribute 19 -;; The M4 defaults to a VFPv4 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-M4-SOFT-FAST: .eabi_attribute 20, 2 -; CORTEX-M4-SOFT-FAST-NOT: .eabi_attribute 21 -; CORTEX-M4-SOFT-FAST-NOT: .eabi_attribute 22 -; CORTEX-M4-SOFT-FAST: .eabi_attribute 23, 1 ; CORTEX-M4-HARD: .cpu cortex-m4 ; CORTEX-M4-HARD: .eabi_attribute 6, 13 @@ -1164,13 +927,6 @@ ; CORTEX-M4-HARD: .eabi_attribute 28, 1 ; CORTEX-M4-HARD: .eabi_attribute 38, 1 -; CORTEX-M4-HARD-FAST-NOT: .eabi_attribute 19 -;; The M4 defaults to a VFPv4 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-M4-HARD-FAST: .eabi_attribute 20, 2 -; CORTEX-M4-HARD-FAST-NOT: .eabi_attribute 21 -; CORTEX-M4-HARD-FAST-NOT: .eabi_attribute 22 -; CORTEX-M4-HARD-FAST: .eabi_attribute 23, 1 ; CORTEX-M7: .cpu cortex-m7 ; CORTEX-M7: .eabi_attribute 6, 13 @@ -1197,16 +953,6 @@ ; CORTEX-M7: .eabi_attribute 38, 1 ; CORTEX-M7: .eabi_attribute 14, 0 -; CORTEX-M7-NOFPU-FAST-NOT: .eabi_attribute 19 -;; The M7 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-M7-FAST: .eabi_attribute 20, 2 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; CORTEX-M7-NOFPU-FAST: .eabi_attribute 20, 2 -; CORTEX-M7-NOFPU-FAST-NOT: .eabi_attribute 21 -; CORTEX-M7-NOFPU-FAST-NOT: .eabi_attribute 22 -; CORTEX-M7-NOFPU-FAST: .eabi_attribute 23, 1 ; CORTEX-R4: .cpu cortex-r4 ; CORTEX-R4: .eabi_attribute 6, 10 @@ -1273,12 +1019,6 @@ ; CORTEX-R5-NOT: .eabi_attribute 28 ; CORTEX-R5: .eabi_attribute 38, 1 -; CORTEX-R5-FAST-NOT: .eabi_attribute 19 -;; The R5 has the VFPv3 FP unit, which always flushes preserving sign. -; CORTEX-R5-FAST: .eabi_attribute 20, 2 -; CORTEX-R5-FAST-NOT: .eabi_attribute 21 -; CORTEX-R5-FAST-NOT: .eabi_attribute 22 -; CORTEX-R5-FAST: .eabi_attribute 23, 1 ; CORTEX-R7: .cpu cortex-r7 ; CORTEX-R7: .eabi_attribute 6, 10 @@ -1301,12 +1041,6 @@ ; CORTEX-R7-NOT: .eabi_attribute 28 ; CORTEX-R7: .eabi_attribute 38, 1 -; CORTEX-R7-FAST-NOT: .eabi_attribute 19 -;; The R7 has the VFPv3 FP unit, which always flushes preserving sign. -; CORTEX-R7-FAST: .eabi_attribute 20, 2 -; CORTEX-R7-FAST-NOT: .eabi_attribute 21 -; CORTEX-R7-FAST-NOT: .eabi_attribute 22 -; CORTEX-R7-FAST: .eabi_attribute 23, 1 ; CORTEX-R8: .cpu cortex-r8 ; CORTEX-R8: .eabi_attribute 6, 10 @@ -1329,12 +1063,6 @@ ; CORTEX-R8-NOT: .eabi_attribute 28 ; CORTEX-R8: .eabi_attribute 38, 1 -; CORTEX-R8-FAST-NOT: .eabi_attribute 19 -;; The R8 has the VFPv3 FP unit, which always flushes preserving sign. -; CORTEX-R8-FAST: .eabi_attribute 20, 2 -; CORTEX-R8-FAST-NOT: .eabi_attribute 21 -; CORTEX-R8-FAST-NOT: .eabi_attribute 22 -; CORTEX-R8-FAST: .eabi_attribute 23, 1 ; CORTEX-A32: .cpu cortex-a32 ; CORTEX-A32: .eabi_attribute 6, 14 @@ -1359,12 +1087,6 @@ ; CORTEX-A32-NOT: .eabi_attribute 28 ; CORTEX-A32: .eabi_attribute 38, 1 -; CORTEX-A32-FAST-NOT: .eabi_attribute 19 -;; The A32 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A32-FAST: .eabi_attribute 20, 2 -; CORTEX-A32-FAST-NOT: .eabi_attribute 21 -; CORTEX-A32-FAST-NOT: .eabi_attribute 22 -; CORTEX-A32-FAST: .eabi_attribute 23, 1 ; CORTEX-M23: .cpu cortex-m23 ; CORTEX-M23: .eabi_attribute 6, 16 @@ -1430,11 +1152,6 @@ ; CORTEX-M35P: .eabi_attribute 38, 1 ; CORTEX-M35P: .eabi_attribute 14, 0 -; CORTEX-M33-FAST-NOT: .eabi_attribute 19 -; CORTEX-M33-FAST: .eabi_attribute 20, 2 -; CORTEX-M33-FAST-NOT: .eabi_attribute 21 -; CORTEX-M33-FAST-NOT: .eabi_attribute 22 -; CORTEX-M33-FAST: .eabi_attribute 23, 1 ; CORTEX-A35: .cpu cortex-a35 ; CORTEX-A35: .eabi_attribute 6, 14 @@ -1459,12 +1176,6 @@ ; CORTEX-A35-NOT: .eabi_attribute 28 ; CORTEX-A35: .eabi_attribute 38, 1 -; CORTEX-A35-FAST-NOT: .eabi_attribute 19 -;; The A35 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A35-FAST: .eabi_attribute 20, 2 -; CORTEX-A35-FAST-NOT: .eabi_attribute 21 -; CORTEX-A35-FAST-NOT: .eabi_attribute 22 -; CORTEX-A35-FAST: .eabi_attribute 23, 1 ; CORTEX-A53: .cpu cortex-a53 ; CORTEX-A53: .eabi_attribute 6, 14 @@ -1489,12 +1200,6 @@ ; CORTEX-A53-NOT: .eabi_attribute 28 ; CORTEX-A53: .eabi_attribute 38, 1 -; CORTEX-A53-FAST-NOT: .eabi_attribute 19 -;; The A53 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A53-FAST: .eabi_attribute 20, 2 -; CORTEX-A53-FAST-NOT: .eabi_attribute 21 -; CORTEX-A53-FAST-NOT: .eabi_attribute 22 -; CORTEX-A53-FAST: .eabi_attribute 23, 1 ; CORTEX-A57: .cpu cortex-a57 ; CORTEX-A57: .eabi_attribute 6, 14 @@ -1519,12 +1224,6 @@ ; CORTEX-A57-NOT: .eabi_attribute 28 ; CORTEX-A57: .eabi_attribute 38, 1 -; CORTEX-A57-FAST-NOT: .eabi_attribute 19 -;; The A57 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A57-FAST: .eabi_attribute 20, 2 -; CORTEX-A57-FAST-NOT: .eabi_attribute 21 -; CORTEX-A57-FAST-NOT: .eabi_attribute 22 -; CORTEX-A57-FAST: .eabi_attribute 23, 1 ; CORTEX-A72: .cpu cortex-a72 ; CORTEX-A72: .eabi_attribute 6, 14 @@ -1549,12 +1248,6 @@ ; CORTEX-A72-NOT: .eabi_attribute 28 ; CORTEX-A72: .eabi_attribute 38, 1 -; CORTEX-A72-FAST-NOT: .eabi_attribute 19 -;; The A72 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A72-FAST: .eabi_attribute 20, 2 -; CORTEX-A72-FAST-NOT: .eabi_attribute 21 -; CORTEX-A72-FAST-NOT: .eabi_attribute 22 -; CORTEX-A72-FAST: .eabi_attribute 23, 1 ; CORTEX-A73: .cpu cortex-a73 ; CORTEX-A73: .eabi_attribute 6, 14 @@ -1580,12 +1273,6 @@ ; CORTEX-A73: .eabi_attribute 38, 1 ; CORTEX-A73: .eabi_attribute 14, 0 -; EXYNOS-FAST-NOT: .eabi_attribute 19 -;; The Exynos processors have the ARMv8 FP unit, which always flushes preserving sign. -; EXYNOS-FAST: .eabi_attribute 20, 2 -; EXYNOS-FAST-NOT: .eabi_attribute 21 -; EXYNOS-FAST-NOT: .eabi_attribute 22 -; EXYNOS-FAST: .eabi_attribute 23, 1 ; EXYNOS-M3: .cpu exynos-m3 ; EXYNOS-M3: .eabi_attribute 6, 14 @@ -1684,12 +1371,6 @@ ; GENERIC-ARMV8_1-A-NOT: .eabi_attribute 28 ; GENERIC-ARMV8_1-A: .eabi_attribute 38, 1 -; GENERIC-ARMV8_1-A-FAST-NOT: .eabi_attribute 19 -;; GENERIC-ARMV8_1-A has the ARMv8 FP unit, which always flushes preserving sign. -; GENERIC-ARMV8_1-A-FAST: .eabi_attribute 20, 2 -; GENERIC-ARMV8_1-A-FAST-NOT: .eabi_attribute 21 -; GENERIC-ARMV8_1-A-FAST-NOT: .eabi_attribute 22 -; GENERIC-ARMV8_1-A-FAST: .eabi_attribute 23, 1 ; RELOC-PIC: .eabi_attribute 15, 1 ; RELOC-PIC: .eabi_attribute 16, 1 diff --git a/llvm/test/CodeGen/LoongArch/lasx/abs.ll b/llvm/test/CodeGen/LoongArch/lasx/abs.ll new file mode 100644 index 0000000..e3b0d04d --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/abs.ll @@ -0,0 +1,128 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s + +define void @vabs_b(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_b: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.b $xr1, $xr0 +; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <32 x i8>, ptr %src + %b = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> %a, i1 true) + store <32 x i8> %b, ptr %dst + ret void +} + +define void @vabs_b_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_b_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.b $xr1, $xr0 +; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <32 x i8>, ptr %src + %b = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> %a, i1 false) + store <32 x i8> %b, ptr %dst + ret void +} + +define void @vabs_h(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.h $xr1, $xr0 +; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <16 x i16>, ptr %src + %b = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> %a, i1 true) + store <16 x i16> %b, ptr %dst + ret void +} + +define void @vabs_h_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_h_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.h $xr1, $xr0 +; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <16 x i16>, ptr %src + %b = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> %a, i1 false) + store <16 x i16> %b, ptr %dst + ret void +} + +define void @vabs_w(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_w: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.w $xr1, $xr0 +; CHECK-NEXT: xvmax.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <8 x i32>, ptr %src + %b = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> %a, i1 true) + store <8 x i32> %b, ptr %dst + ret void +} + +define void @vabs_w_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_w_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.w $xr1, $xr0 +; CHECK-NEXT: xvmax.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <8 x i32>, ptr %src + %b = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> %a, i1 false) + store <8 x i32> %b, ptr %dst + ret void +} + +define void @vabs_d(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.d $xr1, $xr0 +; CHECK-NEXT: xvmax.d $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <4 x i64>, ptr %src + %b = tail call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 true) + store <4 x i64> %b, ptr %dst + ret void +} + +define void @vabs_d_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_d_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.d $xr1, $xr0 +; CHECK-NEXT: xvmax.d $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <4 x i64>, ptr %src + %b = tail call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 false) + store <4 x i64> %b, ptr %dst + ret void +} + +declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1) +declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1) +declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1) +declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1) diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/adda.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/adda.ll index e66a152..9868775 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/adda.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/adda.ll @@ -7,11 +7,7 @@ define void @vadda_b(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xvld $xr0, $a1, 0 ; CHECK-NEXT: xvld $xr1, $a2, 0 -; CHECK-NEXT: xvneg.b $xr2, $xr0 -; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr2 -; CHECK-NEXT: xvneg.b $xr2, $xr1 -; CHECK-NEXT: xvmax.b $xr1, $xr1, $xr2 -; CHECK-NEXT: xvadd.b $xr0, $xr0, $xr1 +; CHECK-NEXT: xvadda.b $xr0, $xr0, $xr1 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -33,11 +29,7 @@ define void @vadda_h(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xvld $xr0, $a1, 0 ; CHECK-NEXT: xvld $xr1, $a2, 0 -; CHECK-NEXT: xvneg.h $xr2, $xr0 -; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr2 -; CHECK-NEXT: xvneg.h $xr2, $xr1 -; CHECK-NEXT: xvmax.h $xr1, $xr1, $xr2 -; CHECK-NEXT: xvadd.h $xr0, $xr0, $xr1 +; CHECK-NEXT: xvadda.h $xr0, $xr0, $xr1 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -59,11 +51,7 @@ define void @vadda_w(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xvld $xr0, $a1, 0 ; CHECK-NEXT: xvld $xr1, $a2, 0 -; CHECK-NEXT: xvneg.w $xr2, $xr0 -; CHECK-NEXT: xvmax.w $xr0, $xr0, $xr2 -; CHECK-NEXT: xvneg.w $xr2, $xr1 -; CHECK-NEXT: xvmax.w $xr1, $xr1, $xr2 -; CHECK-NEXT: xvadd.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvadda.w $xr0, $xr0, $xr1 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -85,11 +73,7 @@ define void @vadda_d(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xvld $xr0, $a1, 0 ; CHECK-NEXT: xvld $xr1, $a2, 0 -; CHECK-NEXT: xvneg.d $xr2, $xr0 -; CHECK-NEXT: xvmax.d $xr0, $xr0, $xr2 -; CHECK-NEXT: xvneg.d $xr2, $xr1 -; CHECK-NEXT: xvmax.d $xr1, $xr1, $xr2 -; CHECK-NEXT: xvadd.d $xr0, $xr0, $xr1 +; CHECK-NEXT: xvadda.d $xr0, $xr0, $xr1 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll new file mode 100644 index 0000000..e1784f8 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll @@ -0,0 +1,197 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s + +;; xvinsve0.w +define void @xvinsve0_v8i32_l_0(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_l_0: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 0 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> <i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8i32_l_4(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_l_4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 4 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 5, i32 6, i32 7> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8f32_l(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8f32_l: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 0 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x float>, ptr %a + %vb = load <8 x float>, ptr %b + %vc = shufflevector <8 x float> %va, <8 x float> %vb, <8 x i32> <i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + store <8 x float> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8i32_h_1(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_h_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 1 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> <i32 8, i32 0, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8i32_h_6(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_h_6: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 6 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 0, i32 15> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8f32_h(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8f32_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 0 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x float>, ptr %a + %vb = load <8 x float>, ptr %b + %vc = shufflevector <8 x float> %va, <8 x float> %vb, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + store <8 x float> %vc, ptr %d + ret void +} + +;; xvinsve0.d +define void @xvinsve0_v4i64_l_1(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_l_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> <i32 0, i32 4, i32 2, i32 3> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4i64_l_2(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_l_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> <i32 0, i32 1, i32 4, i32 3> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4f64_l(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4f64_l: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 0 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x double>, ptr %a + %vb = load <4 x double>, ptr %b + %vc = shufflevector <4 x double> %va, <4 x double> %vb, <4 x i32> <i32 4, i32 1, i32 2, i32 3> + store <4 x double> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4i64_h_0(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_h_0: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 0 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> <i32 0, i32 5, i32 6, i32 7> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4i64_h_2(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_h_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 2 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> <i32 4, i32 5, i32 0, i32 7> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4f64_h(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4f64_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 0 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x double>, ptr %a + %vb = load <4 x double>, ptr %b + %vc = shufflevector <4 x double> %va, <4 x double> %vb, <4 x i32> <i32 0, i32 5, i32 6, i32 7> + store <4 x double> %vc, ptr %d + ret void +} diff --git a/llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll b/llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll new file mode 100644 index 0000000..39ac647 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll @@ -0,0 +1,58 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 -mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s + +define <8 x float> @fadd_elt0_v8f32(float %a) nounwind { +; CHECK-LABEL: fadd_elt0_v8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -1168 +; CHECK-NEXT: fadd.s $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %b = insertelement <8 x float> poison, float %a, i32 0 + %c = fadd <8 x float> %b, <float 1.0, float poison, float poison, float poison, float poison, float poison, float poison, float poison> + ret <8 x float> %c +} + +define <4 x double> @fadd_elt0_v4f64(double %a) nounwind { +; CHECK-LABEL: fadd_elt0_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -912 +; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %b = insertelement <4 x double> poison, double %a, i32 0 + %c = fadd <4 x double> %b, <double 1.0, double poison, double poison, double poison> + ret <4 x double> %c +} + +define <8 x float> @fsub_splat_v8f32(float %a, float %b) nounwind { +; CHECK-LABEL: fsub_splat_v8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsub.s $fa0, $fa0, $fa1 +; CHECK-NEXT: xvreplve0.w $xr0, $xr0 +; CHECK-NEXT: ret +entry: + %insa = insertelement <8 x float> poison, float %a, i32 0 + %insb = insertelement <8 x float> poison, float %b, i32 0 + %va = shufflevector <8 x float> %insa, <8 x float> poison, <8 x i32> zeroinitializer + %vb = shufflevector <8 x float> %insb, <8 x float> poison, <8 x i32> zeroinitializer + %c = fsub <8 x float> %va, %vb + ret <8 x float> %c +} + +define <4 x double> @fsub_splat_v4f64(double %a) nounwind { +; CHECK-LABEL: fsub_splat_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -784 +; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1 +; CHECK-NEXT: xvreplve0.d $xr0, $xr0 +; CHECK-NEXT: ret +entry: + %insa = insertelement <4 x double> poison, double %a, i32 0 + %insb = insertelement <4 x double> poison, double 1.0, i32 0 + %va = shufflevector <4 x double> %insa, <4 x double> poison, <4 x i32> zeroinitializer + %vb = shufflevector <4 x double> %insb, <4 x double> poison, <4 x i32> zeroinitializer + %c = fsub <4 x double> %va, %vb + ret <4 x double> %c +} diff --git a/llvm/test/CodeGen/LoongArch/lsx/abs.ll b/llvm/test/CodeGen/LoongArch/lsx/abs.ll new file mode 100644 index 0000000..85fe1fe --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lsx/abs.ll @@ -0,0 +1,128 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s + +define void @vabs_b(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_b: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.b $vr1, $vr0 +; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <16 x i8>, ptr %src + %b = tail call <16 x i8> @llvm.abs.v16i8(<16 x i8> %a, i1 true) + store <16 x i8> %b, ptr %dst + ret void +} + +define void @vabs_b_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_b_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.b $vr1, $vr0 +; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <16 x i8>, ptr %src + %b = tail call <16 x i8> @llvm.abs.v16i8(<16 x i8> %a, i1 false) + store <16 x i8> %b, ptr %dst + ret void +} + +define void @vabs_h(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.h $vr1, $vr0 +; CHECK-NEXT: vmax.h $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <8 x i16>, ptr %src + %b = tail call <8 x i16> @llvm.abs.v8i16(<8 x i16> %a, i1 true) + store <8 x i16> %b, ptr %dst + ret void +} + +define void @vabs_h_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_h_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.h $vr1, $vr0 +; CHECK-NEXT: vmax.h $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <8 x i16>, ptr %src + %b = tail call <8 x i16> @llvm.abs.v8i16(<8 x i16> %a, i1 false) + store <8 x i16> %b, ptr %dst + ret void +} + +define void @vabs_w(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_w: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.w $vr1, $vr0 +; CHECK-NEXT: vmax.w $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <4 x i32>, ptr %src + %b = tail call <4 x i32> @llvm.abs.v4i32(<4 x i32> %a, i1 true) + store <4 x i32> %b, ptr %dst + ret void +} + +define void @vabs_w_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_w_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.w $vr1, $vr0 +; CHECK-NEXT: vmax.w $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <4 x i32>, ptr %src + %b = tail call <4 x i32> @llvm.abs.v4i32(<4 x i32> %a, i1 false) + store <4 x i32> %b, ptr %dst + ret void +} + +define void @vabs_d(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.d $vr1, $vr0 +; CHECK-NEXT: vmax.d $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <2 x i64>, ptr %src + %b = tail call <2 x i64> @llvm.abs.v2i64(<2 x i64> %a, i1 true) + store <2 x i64> %b, ptr %dst + ret void +} + +define void @vabs_d_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_d_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.d $vr1, $vr0 +; CHECK-NEXT: vmax.d $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <2 x i64>, ptr %src + %b = tail call <2 x i64> @llvm.abs.v2i64(<2 x i64> %a, i1 false) + store <2 x i64> %b, ptr %dst + ret void +} + +declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1) +declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1) +declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1) +declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1) diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/adda.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/adda.ll index 2bd0b59..34f22e1f 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/adda.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/adda.ll @@ -7,11 +7,7 @@ define void @vadda_b(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vld $vr0, $a1, 0 ; CHECK-NEXT: vld $vr1, $a2, 0 -; CHECK-NEXT: vneg.b $vr2, $vr0 -; CHECK-NEXT: vmax.b $vr0, $vr0, $vr2 -; CHECK-NEXT: vneg.b $vr2, $vr1 -; CHECK-NEXT: vmax.b $vr1, $vr1, $vr2 -; CHECK-NEXT: vadd.b $vr0, $vr0, $vr1 +; CHECK-NEXT: vadda.b $vr0, $vr0, $vr1 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -33,11 +29,7 @@ define void @vadda_h(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vld $vr0, $a1, 0 ; CHECK-NEXT: vld $vr1, $a2, 0 -; CHECK-NEXT: vneg.h $vr2, $vr0 -; CHECK-NEXT: vmax.h $vr0, $vr0, $vr2 -; CHECK-NEXT: vneg.h $vr2, $vr1 -; CHECK-NEXT: vmax.h $vr1, $vr1, $vr2 -; CHECK-NEXT: vadd.h $vr0, $vr0, $vr1 +; CHECK-NEXT: vadda.h $vr0, $vr0, $vr1 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -59,11 +51,7 @@ define void @vadda_w(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vld $vr0, $a1, 0 ; CHECK-NEXT: vld $vr1, $a2, 0 -; CHECK-NEXT: vneg.w $vr2, $vr0 -; CHECK-NEXT: vmax.w $vr0, $vr0, $vr2 -; CHECK-NEXT: vneg.w $vr2, $vr1 -; CHECK-NEXT: vmax.w $vr1, $vr1, $vr2 -; CHECK-NEXT: vadd.w $vr0, $vr0, $vr1 +; CHECK-NEXT: vadda.w $vr0, $vr0, $vr1 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -85,11 +73,7 @@ define void @vadda_d(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vld $vr0, $a1, 0 ; CHECK-NEXT: vld $vr1, $a2, 0 -; CHECK-NEXT: vneg.d $vr2, $vr0 -; CHECK-NEXT: vmax.d $vr0, $vr0, $vr2 -; CHECK-NEXT: vneg.d $vr2, $vr1 -; CHECK-NEXT: vmax.d $vr1, $vr1, $vr2 -; CHECK-NEXT: vadd.d $vr0, $vr0, $vr1 +; CHECK-NEXT: vadda.d $vr0, $vr0, $vr1 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll b/llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll new file mode 100644 index 0000000..b651f11 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll @@ -0,0 +1,58 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 -mattr=+32s,+lsx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s + +define <4 x float> @fadd_elt0_v4f32(float %a) nounwind { +; CHECK-LABEL: fadd_elt0_v4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -1168 +; CHECK-NEXT: fadd.s $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %b = insertelement <4 x float> poison, float %a, i32 0 + %c = fadd <4 x float> %b, <float 1.0, float poison, float poison, float poison> + ret <4 x float> %c +} + +define <2 x double> @fadd_elt0_v2f64(double %a) nounwind { +; CHECK-LABEL: fadd_elt0_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -912 +; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %b = insertelement <2 x double> poison, double %a, i32 0 + %c = fadd <2 x double> %b, <double 1.0, double poison> + ret <2 x double> %c +} + +define <4 x float> @fsub_splat_v4f32(float %b) nounwind { +; CHECK-LABEL: fsub_splat_v4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -1168 +; CHECK-NEXT: fsub.s $fa0, $fa1, $fa0 +; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0 +; CHECK-NEXT: ret +entry: + %insa = insertelement <4 x float> poison, float 1.0, i32 0 + %insb = insertelement <4 x float> poison, float %b, i32 0 + %va = shufflevector <4 x float> %insa, <4 x float> poison, <4 x i32> zeroinitializer + %vb = shufflevector <4 x float> %insb, <4 x float> poison, <4 x i32> zeroinitializer + %c = fsub <4 x float> %va, %vb + ret <4 x float> %c +} + +define <2 x double> @fsub_splat_v2f64(double %a, double %b) nounwind { +; CHECK-LABEL: fsub_splat_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsub.d $fa0, $fa0, $fa1 +; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0 +; CHECK-NEXT: ret +entry: + %insa = insertelement <2 x double> poison, double %a, i32 0 + %insb = insertelement <2 x double> poison, double %b, i32 0 + %va = shufflevector <2 x double> %insa, <2 x double> poison, <2 x i32> zeroinitializer + %vb = shufflevector <2 x double> %insb, <2 x double> poison, <2 x i32> zeroinitializer + %c = fsub <2 x double> %va, %vb + ret <2 x double> %c +} diff --git a/llvm/test/CodeGen/LoongArch/merge-offset-option.ll b/llvm/test/CodeGen/LoongArch/merge-offset-option.ll new file mode 100644 index 0000000..e5351a6 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/merge-offset-option.ll @@ -0,0 +1,24 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch64 -mattr=+d --relocation-model=static -O1 \ +; RUN: < %s | FileCheck %s --check-prefix=MERGE +; RUN: llc --mtriple=loongarch64 -mattr=+d --relocation-model=static -O1 \ +; RUN: --loongarch-enable-merge-offset=false < %s | FileCheck %s --check-prefix=NO_MERGE + +@g = dso_local global i32 zeroinitializer, align 4 + +define void @foo() nounwind { +; MERGE-LABEL: foo: +; MERGE: # %bb.0: +; MERGE-NEXT: pcalau12i $a0, %pc_hi20(g) +; MERGE-NEXT: ld.w $zero, $a0, %pc_lo12(g) +; MERGE-NEXT: ret +; +; NO_MERGE-LABEL: foo: +; NO_MERGE: # %bb.0: +; NO_MERGE-NEXT: pcalau12i $a0, %pc_hi20(g) +; NO_MERGE-NEXT: addi.d $a0, $a0, %pc_lo12(g) +; NO_MERGE-NEXT: ld.w $zero, $a0, 0 +; NO_MERGE-NEXT: ret + %v = load volatile i32, ptr @g + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/bug22322.ll b/llvm/test/CodeGen/NVPTX/bug22322.ll index 055c512..71e180b 100644 --- a/llvm/test/CodeGen/NVPTX/bug22322.ll +++ b/llvm/test/CodeGen/NVPTX/bug22322.ll @@ -20,12 +20,12 @@ _ZL11compute_vecRK6float3jb.exit: call void @llvm.lifetime.start.p0(i64 4, ptr %ret_vec.sroa.8.i) %6 = and i32 %4, 15 %7 = icmp eq i32 %6, 0 - %8 = select i1 %7, float 0.000000e+00, float -1.000000e+00 + %8 = select nnan nsz i1 %7, float 0.000000e+00, float -1.000000e+00 store float %8, ptr %ret_vec.sroa.8.i, align 4 ; CHECK: max.f32 %r{{[0-9]+}}, %r{{[0-9]+}}, 0f00000000 %9 = fcmp olt float %8, 0.000000e+00 %ret_vec.sroa.8.i.val = load float, ptr %ret_vec.sroa.8.i, align 4 - %10 = select i1 %9, float 0.000000e+00, float %ret_vec.sroa.8.i.val + %10 = select nnan nsz i1 %9, float 0.000000e+00, float %ret_vec.sroa.8.i.val call void @llvm.lifetime.end.p0(i64 4, ptr %ret_vec.sroa.8.i) %11 = getelementptr inbounds %class.float3, ptr %dst, i64 %5, i32 0 store float 0.000000e+00, ptr %11, align 4 @@ -51,7 +51,7 @@ declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #2 ; Function Attrs: nounwind declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #2 -attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "no-signed-zeros-fp-math"="true" "use-soft-float"="false" } +attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "use-soft-float"="false" } attributes #1 = { nounwind readnone } attributes #2 = { nounwind } diff --git a/llvm/test/CodeGen/PowerPC/scalar-min-max.ll b/llvm/test/CodeGen/PowerPC/scalar-min-max.ll index 216d498..5f637e3 100644 --- a/llvm/test/CodeGen/PowerPC/scalar-min-max.ll +++ b/llvm/test/CodeGen/PowerPC/scalar-min-max.ll @@ -1,36 +1,23 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names --enable-unsafe-fp-math \ -; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \ -; RUN: --enable-no-nans-fp-math \ -; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s -; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names --enable-unsafe-fp-math \ -; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \ -; RUN: --enable-no-nans-fp-math \ -; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s ; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names -verify-machineinstrs \ ; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \ -; RUN: --check-prefix=NO-FAST-P9 +; RUN: --check-prefix=P9 ; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -verify-machineinstrs \ ; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \ -; RUN: --check-prefix=NO-FAST-P8 +; RUN: --check-prefix=P8 define dso_local float @testfmax(float %a, float %b) local_unnamed_addr { -; CHECK-LABEL: testfmax: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmaxdp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testfmax: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testfmax: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P8-NEXT: bgtlr cr0 -; NO-FAST-P8-NEXT: # %bb.1: # %entry -; NO-FAST-P8-NEXT: fmr f1, f2 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testfmax: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmaxcdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testfmax: +; P8: # %bb.0: # %entry +; P8-NEXT: fcmpu cr0, f1, f2 +; P8-NEXT: bgtlr cr0 +; P8-NEXT: # %bb.1: # %entry +; P8-NEXT: fmr f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp ogt float %a, %b %cond = select i1 %cmp, float %a, float %b @@ -38,23 +25,18 @@ entry: } define dso_local double @testdmax(double %a, double %b) local_unnamed_addr { -; CHECK-LABEL: testdmax: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmaxdp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testdmax: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testdmax: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: bgtlr cr0 -; NO-FAST-P8-NEXT: # %bb.1: # %entry -; NO-FAST-P8-NEXT: fmr f1, f2 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testdmax: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmaxcdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testdmax: +; P8: # %bb.0: # %entry +; P8-NEXT: xscmpudp cr0, f1, f2 +; P8-NEXT: bgtlr cr0 +; P8-NEXT: # %bb.1: # %entry +; P8-NEXT: fmr f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp ogt double %a, %b %cond = select i1 %cmp, double %a, double %b @@ -62,23 +44,18 @@ entry: } define dso_local float @testfmin(float %a, float %b) local_unnamed_addr { -; CHECK-LABEL: testfmin: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmindp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testfmin: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testfmin: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P8-NEXT: bltlr cr0 -; NO-FAST-P8-NEXT: # %bb.1: # %entry -; NO-FAST-P8-NEXT: fmr f1, f2 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testfmin: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmincdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testfmin: +; P8: # %bb.0: # %entry +; P8-NEXT: fcmpu cr0, f1, f2 +; P8-NEXT: bltlr cr0 +; P8-NEXT: # %bb.1: # %entry +; P8-NEXT: fmr f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp olt float %a, %b %cond = select i1 %cmp, float %a, float %b @@ -86,23 +63,18 @@ entry: } define dso_local double @testdmin(double %a, double %b) local_unnamed_addr { -; CHECK-LABEL: testdmin: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmindp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testdmin: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testdmin: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: bltlr cr0 -; NO-FAST-P8-NEXT: # %bb.1: # %entry -; NO-FAST-P8-NEXT: fmr f1, f2 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testdmin: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmincdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testdmin: +; P8: # %bb.0: # %entry +; P8-NEXT: xscmpudp cr0, f1, f2 +; P8-NEXT: bltlr cr0 +; P8-NEXT: # %bb.1: # %entry +; P8-NEXT: fmr f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp olt double %a, %b %cond = select i1 %cmp, double %a, double %b @@ -110,86 +82,62 @@ entry: } define dso_local float @testfmax_fast(float %a, float %b) local_unnamed_addr { -; CHECK-LABEL: testfmax_fast: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmaxdp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testfmax_fast: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testfmax_fast: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xssubsp f0, f2, f1 -; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testfmax_fast: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmaxdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testfmax_fast: +; P8: # %bb.0: # %entry +; P8-NEXT: xsmaxdp f1, f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp nnan ninf ogt float %a, %b - %cond = select i1 %cmp, float %a, float %b + %cond = select nnan nsz i1 %cmp, float %a, float %b ret float %cond } define dso_local double @testdmax_fast(double %a, double %b) local_unnamed_addr { -; CHECK-LABEL: testdmax_fast: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmaxdp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testdmax_fast: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testdmax_fast: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xssubdp f0, f2, f1 -; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testdmax_fast: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmaxdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testdmax_fast: +; P8: # %bb.0: # %entry +; P8-NEXT: xsmaxdp f1, f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp nnan ninf ogt double %a, %b - %cond = select i1 %cmp, double %a, double %b + %cond = select nnan nsz i1 %cmp, double %a, double %b ret double %cond } define dso_local float @testfmin_fast(float %a, float %b) local_unnamed_addr { -; CHECK-LABEL: testfmin_fast: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmindp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testfmin_fast: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testfmin_fast: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xssubsp f0, f1, f2 -; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testfmin_fast: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmindp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testfmin_fast: +; P8: # %bb.0: # %entry +; P8-NEXT: xsmindp f1, f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp nnan ninf olt float %a, %b - %cond = select i1 %cmp, float %a, float %b + %cond = select nnan nsz i1 %cmp, float %a, float %b ret float %cond } define dso_local double @testdmin_fast(double %a, double %b) local_unnamed_addr { -; CHECK-LABEL: testdmin_fast: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmindp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testdmin_fast: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testdmin_fast: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xssubdp f0, f1, f2 -; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testdmin_fast: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmindp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testdmin_fast: +; P8: # %bb.0: # %entry +; P8-NEXT: xsmindp f1, f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp nnan ninf olt double %a, %b - %cond = select i1 %cmp, double %a, double %b + %cond = select nnan nsz i1 %cmp, double %a, double %b ret double %cond } diff --git a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll index aaabd76e..fd0b494 100644 --- a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll +++ b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll @@ -20,18 +20,18 @@ define float @select_oeq_float(float %a, float %b, float %c, float %d) { ; FAST-P8-LABEL: select_oeq_float: ; FAST-P8: # %bb.0: # %entry -; FAST-P8-NEXT: xssubsp f0, f2, f1 -; FAST-P8-NEXT: xssubsp f1, f1, f2 -; FAST-P8-NEXT: fsel f1, f1, f3, f4 -; FAST-P8-NEXT: fsel f1, f0, f1, f4 +; FAST-P8-NEXT: xssubsp f0, f1, f2 +; FAST-P8-NEXT: xsnegdp f1, f0 +; FAST-P8-NEXT: fsel f0, f0, f3, f4 +; FAST-P8-NEXT: fsel f1, f1, f0, f4 ; FAST-P8-NEXT: blr ; ; FAST-P9-LABEL: select_oeq_float: ; FAST-P9: # %bb.0: # %entry -; FAST-P9-NEXT: xssubsp f0, f2, f1 -; FAST-P9-NEXT: xssubsp f1, f1, f2 -; FAST-P9-NEXT: fsel f1, f1, f3, f4 -; FAST-P9-NEXT: fsel f1, f0, f1, f4 +; FAST-P9-NEXT: xssubsp f0, f1, f2 +; FAST-P9-NEXT: xsnegdp f1, f0 +; FAST-P9-NEXT: fsel f0, f0, f3, f4 +; FAST-P9-NEXT: fsel f1, f1, f0, f4 ; FAST-P9-NEXT: blr ; ; NO-FAST-P8-LABEL: select_oeq_float: @@ -59,6 +59,48 @@ entry: ret float %cond } +define float @select_oeq_float_nsz(float %a, float %b, float %c, float %d) { +; FAST-P8-LABEL: select_oeq_float_nsz: +; FAST-P8: # %bb.0: # %entry +; FAST-P8-NEXT: xssubsp f0, f2, f1 +; FAST-P8-NEXT: xssubsp f1, f1, f2 +; FAST-P8-NEXT: fsel f1, f1, f3, f4 +; FAST-P8-NEXT: fsel f1, f0, f1, f4 +; FAST-P8-NEXT: blr +; +; FAST-P9-LABEL: select_oeq_float_nsz: +; FAST-P9: # %bb.0: # %entry +; FAST-P9-NEXT: xssubsp f0, f2, f1 +; FAST-P9-NEXT: xssubsp f1, f1, f2 +; FAST-P9-NEXT: fsel f1, f1, f3, f4 +; FAST-P9-NEXT: fsel f1, f0, f1, f4 +; FAST-P9-NEXT: blr +; +; NO-FAST-P8-LABEL: select_oeq_float_nsz: +; NO-FAST-P8: # %bb.0: # %entry +; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 +; NO-FAST-P8-NEXT: beq cr0, .LBB1_2 +; NO-FAST-P8-NEXT: # %bb.1: # %entry +; NO-FAST-P8-NEXT: fmr f3, f4 +; NO-FAST-P8-NEXT: .LBB1_2: # %entry +; NO-FAST-P8-NEXT: fmr f1, f3 +; NO-FAST-P8-NEXT: blr +; +; NO-FAST-P9-LABEL: select_oeq_float_nsz: +; NO-FAST-P9: # %bb.0: # %entry +; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 +; NO-FAST-P9-NEXT: beq cr0, .LBB1_2 +; NO-FAST-P9-NEXT: # %bb.1: # %entry +; NO-FAST-P9-NEXT: fmr f3, f4 +; NO-FAST-P9-NEXT: .LBB1_2: # %entry +; NO-FAST-P9-NEXT: fmr f1, f3 +; NO-FAST-P9-NEXT: blr +entry: + %cmp = fcmp nsz oeq float %a, %b + %cond = select i1 %cmp, float %c, float %d + ret float %cond +} + define double @select_oeq_double(double %a, double %b, double %c, double %d) { ; FAST-P8-LABEL: select_oeq_double: ; FAST-P8: # %bb.0: # %entry @@ -79,20 +121,20 @@ define double @select_oeq_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8-LABEL: select_oeq_double: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: beq cr0, .LBB1_2 +; NO-FAST-P8-NEXT: beq cr0, .LBB2_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB1_2: # %entry +; NO-FAST-P8-NEXT: .LBB2_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_oeq_double: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P9-NEXT: beq cr0, .LBB1_2 +; NO-FAST-P9-NEXT: beq cr0, .LBB2_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB1_2: # %entry +; NO-FAST-P9-NEXT: .LBB2_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -182,13 +224,57 @@ entry: define float @select_one_float(float %a, float %b, float %c, float %d) { ; FAST-P8-LABEL: select_one_float: ; FAST-P8: # %bb.0: # %entry +; FAST-P8-NEXT: xssubsp f0, f1, f2 +; FAST-P8-NEXT: xsnegdp f1, f0 +; FAST-P8-NEXT: fsel f0, f0, f4, f3 +; FAST-P8-NEXT: fsel f1, f1, f0, f3 +; FAST-P8-NEXT: blr +; +; FAST-P9-LABEL: select_one_float: +; FAST-P9: # %bb.0: # %entry +; FAST-P9-NEXT: xssubsp f0, f1, f2 +; FAST-P9-NEXT: xsnegdp f1, f0 +; FAST-P9-NEXT: fsel f0, f0, f4, f3 +; FAST-P9-NEXT: fsel f1, f1, f0, f3 +; FAST-P9-NEXT: blr +; +; NO-FAST-P8-LABEL: select_one_float: +; NO-FAST-P8: # %bb.0: # %entry +; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 +; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB5_2 +; NO-FAST-P8-NEXT: # %bb.1: # %entry +; NO-FAST-P8-NEXT: fmr f3, f4 +; NO-FAST-P8-NEXT: .LBB5_2: # %entry +; NO-FAST-P8-NEXT: fmr f1, f3 +; NO-FAST-P8-NEXT: blr +; +; NO-FAST-P9-LABEL: select_one_float: +; NO-FAST-P9: # %bb.0: # %entry +; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 +; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB5_2 +; NO-FAST-P9-NEXT: # %bb.1: # %entry +; NO-FAST-P9-NEXT: fmr f3, f4 +; NO-FAST-P9-NEXT: .LBB5_2: # %entry +; NO-FAST-P9-NEXT: fmr f1, f3 +; NO-FAST-P9-NEXT: blr +entry: + %cmp = fcmp one float %a, %b + %cond = select i1 %cmp, float %c, float %d + ret float %cond +} + +define float @select_one_float_nsz(float %a, float %b, float %c, float %d) { +; FAST-P8-LABEL: select_one_float_nsz: +; FAST-P8: # %bb.0: # %entry ; FAST-P8-NEXT: xssubsp f0, f2, f1 ; FAST-P8-NEXT: xssubsp f1, f1, f2 ; FAST-P8-NEXT: fsel f1, f1, f4, f3 ; FAST-P8-NEXT: fsel f1, f0, f1, f3 ; FAST-P8-NEXT: blr ; -; FAST-P9-LABEL: select_one_float: +; FAST-P9-LABEL: select_one_float_nsz: ; FAST-P9: # %bb.0: # %entry ; FAST-P9-NEXT: xssubsp f0, f2, f1 ; FAST-P9-NEXT: xssubsp f1, f1, f2 @@ -196,29 +282,29 @@ define float @select_one_float(float %a, float %b, float %c, float %d) { ; FAST-P9-NEXT: fsel f1, f0, f1, f3 ; FAST-P9-NEXT: blr ; -; NO-FAST-P8-LABEL: select_one_float: +; NO-FAST-P8-LABEL: select_one_float_nsz: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB4_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB6_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB4_2: # %entry +; NO-FAST-P8-NEXT: .LBB6_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; -; NO-FAST-P9-LABEL: select_one_float: +; NO-FAST-P9-LABEL: select_one_float_nsz: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB4_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB6_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB4_2: # %entry +; NO-FAST-P9-NEXT: .LBB6_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: - %cmp = fcmp one float %a, %b + %cmp = fcmp nsz one float %a, %b %cond = select i1 %cmp, float %c, float %d ret float %cond } @@ -244,10 +330,10 @@ define double @select_one_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB5_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB7_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB5_2: # %entry +; NO-FAST-P8-NEXT: .LBB7_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -255,10 +341,10 @@ define double @select_one_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB5_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB7_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB5_2: # %entry +; NO-FAST-P9-NEXT: .LBB7_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -362,10 +448,10 @@ define float @select_oge_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, lt -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB8_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB10_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB8_2: # %entry +; NO-FAST-P8-NEXT: .LBB10_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -373,10 +459,10 @@ define float @select_oge_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, lt -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB8_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB10_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB8_2: # %entry +; NO-FAST-P9-NEXT: .LBB10_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -402,10 +488,10 @@ define double @select_oge_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, lt -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB9_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB11_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB9_2: # %entry +; NO-FAST-P8-NEXT: .LBB11_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -413,10 +499,10 @@ define double @select_oge_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, lt -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB9_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB11_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB9_2: # %entry +; NO-FAST-P9-NEXT: .LBB11_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -503,20 +589,20 @@ define float @select_olt_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P8-LABEL: select_olt_float: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P8-NEXT: blt cr0, .LBB12_2 +; NO-FAST-P8-NEXT: blt cr0, .LBB14_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB12_2: # %entry +; NO-FAST-P8-NEXT: .LBB14_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_olt_float: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P9-NEXT: blt cr0, .LBB12_2 +; NO-FAST-P9-NEXT: blt cr0, .LBB14_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB12_2: # %entry +; NO-FAST-P9-NEXT: .LBB14_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -541,20 +627,20 @@ define double @select_olt_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8-LABEL: select_olt_double: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: blt cr0, .LBB13_2 +; NO-FAST-P8-NEXT: blt cr0, .LBB15_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB13_2: # %entry +; NO-FAST-P8-NEXT: .LBB15_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_olt_double: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P9-NEXT: blt cr0, .LBB13_2 +; NO-FAST-P9-NEXT: blt cr0, .LBB15_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB13_2: # %entry +; NO-FAST-P9-NEXT: .LBB15_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -641,20 +727,20 @@ define float @select_ogt_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P8-LABEL: select_ogt_float: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P8-NEXT: bgt cr0, .LBB16_2 +; NO-FAST-P8-NEXT: bgt cr0, .LBB18_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB16_2: # %entry +; NO-FAST-P8-NEXT: .LBB18_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_ogt_float: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P9-NEXT: bgt cr0, .LBB16_2 +; NO-FAST-P9-NEXT: bgt cr0, .LBB18_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB16_2: # %entry +; NO-FAST-P9-NEXT: .LBB18_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -679,20 +765,20 @@ define double @select_ogt_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8-LABEL: select_ogt_double: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: bgt cr0, .LBB17_2 +; NO-FAST-P8-NEXT: bgt cr0, .LBB19_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB17_2: # %entry +; NO-FAST-P8-NEXT: .LBB19_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_ogt_double: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P9-NEXT: bgt cr0, .LBB17_2 +; NO-FAST-P9-NEXT: bgt cr0, .LBB19_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB17_2: # %entry +; NO-FAST-P9-NEXT: .LBB19_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -780,10 +866,10 @@ define float @select_ole_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, gt -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB20_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB22_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB20_2: # %entry +; NO-FAST-P8-NEXT: .LBB22_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -791,10 +877,10 @@ define float @select_ole_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, gt -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB20_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB22_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB20_2: # %entry +; NO-FAST-P9-NEXT: .LBB22_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -820,10 +906,10 @@ define double @select_ole_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, gt -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB21_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB23_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB21_2: # %entry +; NO-FAST-P8-NEXT: .LBB23_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -831,10 +917,10 @@ define double @select_ole_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, gt -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB21_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB23_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB21_2: # %entry +; NO-FAST-P9-NEXT: .LBB23_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -926,13 +1012,13 @@ define double @onecmp1(double %a, double %y, double %z) { ; NO-FAST-P8-NEXT: vspltisw v2, 1 ; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f0 -; NO-FAST-P8-NEXT: bc 12, lt, .LBB24_3 +; NO-FAST-P8-NEXT: bc 12, lt, .LBB26_3 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f1 -; NO-FAST-P8-NEXT: bc 12, un, .LBB24_3 +; NO-FAST-P8-NEXT: bc 12, un, .LBB26_3 ; NO-FAST-P8-NEXT: # %bb.2: # %entry ; NO-FAST-P8-NEXT: fmr f3, f2 -; NO-FAST-P8-NEXT: .LBB24_3: # %entry +; NO-FAST-P8-NEXT: .LBB26_3: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -941,13 +1027,13 @@ define double @onecmp1(double %a, double %y, double %z) { ; NO-FAST-P9-NEXT: vspltisw v2, 1 ; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f0 -; NO-FAST-P9-NEXT: bc 12, lt, .LBB24_3 +; NO-FAST-P9-NEXT: bc 12, lt, .LBB26_3 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f1 -; NO-FAST-P9-NEXT: bc 12, un, .LBB24_3 +; NO-FAST-P9-NEXT: bc 12, un, .LBB26_3 ; NO-FAST-P9-NEXT: # %bb.2: # %entry ; NO-FAST-P9-NEXT: fmr f3, f2 -; NO-FAST-P9-NEXT: .LBB24_3: # %entry +; NO-FAST-P9-NEXT: .LBB26_3: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -978,10 +1064,10 @@ define double @onecmp2(double %a, double %y, double %z) { ; NO-FAST-P8-NEXT: vspltisw v2, 1 ; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f0 -; NO-FAST-P8-NEXT: bgt cr0, .LBB25_2 +; NO-FAST-P8-NEXT: bgt cr0, .LBB27_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f2, f3 -; NO-FAST-P8-NEXT: .LBB25_2: # %entry +; NO-FAST-P8-NEXT: .LBB27_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f2 ; NO-FAST-P8-NEXT: blr ; @@ -990,10 +1076,10 @@ define double @onecmp2(double %a, double %y, double %z) { ; NO-FAST-P9-NEXT: vspltisw v2, 1 ; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f0 -; NO-FAST-P9-NEXT: bgt cr0, .LBB25_2 +; NO-FAST-P9-NEXT: bgt cr0, .LBB27_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f2, f3 -; NO-FAST-P9-NEXT: .LBB25_2: # %entry +; NO-FAST-P9-NEXT: .LBB27_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f2 ; NO-FAST-P9-NEXT: blr entry: @@ -1028,10 +1114,10 @@ define double @onecmp3(double %a, double %y, double %z) { ; NO-FAST-P8-NEXT: vspltisw v2, 1 ; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f0 -; NO-FAST-P8-NEXT: beq cr0, .LBB26_2 +; NO-FAST-P8-NEXT: beq cr0, .LBB28_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f2, f3 -; NO-FAST-P8-NEXT: .LBB26_2: # %entry +; NO-FAST-P8-NEXT: .LBB28_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f2 ; NO-FAST-P8-NEXT: blr ; @@ -1040,10 +1126,10 @@ define double @onecmp3(double %a, double %y, double %z) { ; NO-FAST-P9-NEXT: vspltisw v2, 1 ; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f0 -; NO-FAST-P9-NEXT: beq cr0, .LBB26_2 +; NO-FAST-P9-NEXT: beq cr0, .LBB28_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f2, f3 -; NO-FAST-P9-NEXT: .LBB26_2: # %entry +; NO-FAST-P9-NEXT: .LBB28_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f2 ; NO-FAST-P9-NEXT: blr entry: diff --git a/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll b/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll index 55b0d1f..2a46a59 100644 --- a/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll +++ b/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll @@ -155,3 +155,109 @@ define i1 @test9(i64 %x) { %b = icmp eq i64 %a, u0x08000000 ret i1 %b } + +; Make sure the and constant doesn't get converted to an opaque constant by +; ConstantHoisting. If it's an opaque constant, we'll have addi -16 and addi 15. +define i64 @test10(i64 %0) #0 { +; RV32-LABEL: test10: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: andi a0, a0, -16 +; RV32-NEXT: snez a0, a0 +; RV32-NEXT: li a1, 0 +; RV32-NEXT: ret +; +; RV64-LABEL: test10: +; RV64: # %bb.0: # %entry +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: sraiw a0, a0, 4 +; RV64-NEXT: snez a0, a0 +; RV64-NEXT: ret +entry: + %1 = add nuw nsw i64 %0, u0xffffffff + %2 = and i64 %1, u0xfffffff0 + %3 = icmp ne i64 %2, 0 + %4 = zext i1 %3 to i64 + ret i64 %4 +} + +; Make sure the and constant doesn't get converted to an opaque constant by +; ConstantHoisting. If it's an opaque constant, we'll have addi -16 and addi 15. +define i64 @test11(i64 %0) #0 { +; RV32-LABEL: test11: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: srai a0, a0, 4 +; RV32-NEXT: addi a0, a0, 1621 +; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: li a1, 0 +; RV32-NEXT: ret +; +; RV64-LABEL: test11: +; RV64: # %bb.0: # %entry +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: sraiw a0, a0, 4 +; RV64-NEXT: addi a0, a0, 1621 +; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: ret +entry: + %1 = add nuw nsw i64 %0, u0xffffffff + %2 = and i64 %1, u0xfffffff0 + %3 = icmp eq i64 %2, u0xffff9ab0 + %4 = zext i1 %3 to i64 + ret i64 %4 +} + +; Make sure the and constant doesn't get converted to an opaque constant by +; ConstantHoisting. If it's an opaque constant we'll end up with constant +; materialization sequences on RV64. +define i64 @test12(i64 %0) #0 { +; RV32-LABEL: test12: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi a0, a0, -3 +; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: li a1, 0 +; RV32-NEXT: ret +; +; RV64-LABEL: test12: +; RV64: # %bb.0: # %entry +; RV64-NEXT: addiw a0, a0, -16 +; RV64-NEXT: addi a0, a0, 13 +; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: ret +entry: + %1 = add nuw nsw i64 %0, u0xfffffff0 + %2 = and i64 %1, u0xffffffff + %3 = icmp eq i64 %2, u0xfffffff3 + %4 = zext i1 %3 to i64 + ret i64 %4 +} + +; Make sure the and constant doesn't get converted to an opaque constant by +; ConstantHoisting. +define i64 @test13(i64 %0) #0 { +; RV32-LABEL: test13: +; RV32: # %bb.0: # %entry +; RV32-NEXT: lui a1, 524288 +; RV32-NEXT: addi a1, a1, 15 +; RV32-NEXT: add a0, a0, a1 +; RV32-NEXT: srli a0, a0, 31 +; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: li a1, 0 +; RV32-NEXT: ret +; +; RV64-LABEL: test13: +; RV64: # %bb.0: # %entry +; RV64-NEXT: lui a1, 524288 +; RV64-NEXT: addi a1, a1, -15 +; RV64-NEXT: sub a0, a0, a1 +; RV64-NEXT: sraiw a0, a0, 31 +; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: ret +entry: + %1 = add nuw nsw i64 %0, u0x8000000f + %2 = and i64 %1, u0x80000000 + %3 = icmp eq i64 %2, 0 + %4 = zext i1 %3 to i64 + ret i64 %4 +} diff --git a/llvm/test/CodeGen/RISCV/select-bare.ll b/llvm/test/CodeGen/RISCV/select-bare.ll index 796121a..44028a7 100644 --- a/llvm/test/CodeGen/RISCV/select-bare.ll +++ b/llvm/test/CodeGen/RISCV/select-bare.ll @@ -26,8 +26,8 @@ define i32 @bare_select(i1 %a, i32 %b, i32 %c) nounwind { ; RV32IXQCI-LABEL: bare_select: ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret %1 = select i1 %a, i32 %b, i32 %c ret i32 %1 @@ -53,8 +53,8 @@ define float @bare_select_float(i1 %a, float %b, float %c) nounwind { ; RV32IXQCI-LABEL: bare_select_float: ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret %1 = select i1 %a, float %b, float %c ret float %1 diff --git a/llvm/test/CodeGen/RISCV/select-cc.ll b/llvm/test/CodeGen/RISCV/select-cc.ll index 14055df..b57f625 100644 --- a/llvm/test/CodeGen/RISCV/select-cc.ll +++ b/llvm/test/CodeGen/RISCV/select-cc.ll @@ -87,40 +87,40 @@ define signext i32 @foo(i32 signext %a, ptr %b) nounwind { ; ; RV32IXQCI-LABEL: foo: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: lw a5, 0(a1) ; RV32IXQCI-NEXT: lw a2, 0(a1) ; RV32IXQCI-NEXT: lw a4, 0(a1) ; RV32IXQCI-NEXT: lw t5, 0(a1) ; RV32IXQCI-NEXT: lw t4, 0(a1) +; RV32IXQCI-NEXT: lw t3, 0(a1) ; RV32IXQCI-NEXT: lw t2, 0(a1) -; RV32IXQCI-NEXT: lw t1, 0(a1) ; RV32IXQCI-NEXT: lw t0, 0(a1) ; RV32IXQCI-NEXT: lw a7, 0(a1) ; RV32IXQCI-NEXT: lw a6, 0(a1) -; RV32IXQCI-NEXT: lw t3, 0(a1) ; RV32IXQCI-NEXT: lw a3, 0(a1) -; RV32IXQCI-NEXT: bltz t3, .LBB0_2 +; RV32IXQCI-NEXT: lw t1, 0(a1) +; RV32IXQCI-NEXT: lw a5, 0(a1) +; RV32IXQCI-NEXT: bltz t1, .LBB0_2 ; RV32IXQCI-NEXT: # %bb.1: -; RV32IXQCI-NEXT: li t6, 0 -; RV32IXQCI-NEXT: qc.mveq a5, a0, a5, a0 -; RV32IXQCI-NEXT: qc.mvne a2, a5, a2, a5 -; RV32IXQCI-NEXT: qc.mvltu a4, a4, a2, a2 -; RV32IXQCI-NEXT: qc.mvgeu t5, a4, t5, a4 -; RV32IXQCI-NEXT: qc.mvltu t4, t5, t4, t5 -; RV32IXQCI-NEXT: qc.mvgeu t2, t2, t4, t4 -; RV32IXQCI-NEXT: qc.mvlt t1, t1, t2, t2 -; RV32IXQCI-NEXT: qc.mvge t0, t1, t0, t1 -; RV32IXQCI-NEXT: qc.mvlt a7, t0, a7, t0 -; RV32IXQCI-NEXT: qc.mvge a6, a6, a7, a7 -; RV32IXQCI-NEXT: mv a3, t3 -; RV32IXQCI-NEXT: qc.mvge a3, t6, t3, a6 +; RV32IXQCI-NEXT: li a5, 0 +; RV32IXQCI-NEXT: qc.mveq a2, a0, a2, a0 +; RV32IXQCI-NEXT: qc.mvne a4, a2, a4, a2 +; RV32IXQCI-NEXT: qc.mvltu t5, t5, a4, a4 +; RV32IXQCI-NEXT: qc.mvgeu t4, t5, t4, t5 +; RV32IXQCI-NEXT: qc.mvltu t3, t4, t3, t4 +; RV32IXQCI-NEXT: qc.mvgeu t2, t2, t3, t3 +; RV32IXQCI-NEXT: qc.mvlt t0, t0, t2, t2 +; RV32IXQCI-NEXT: qc.mvge a7, t0, a7, t0 +; RV32IXQCI-NEXT: qc.mvlt a6, a7, a6, a7 +; RV32IXQCI-NEXT: qc.mvge a3, a3, a6, a6 +; RV32IXQCI-NEXT: qc.mvlt a3, a5, t1, t1 +; RV32IXQCI-NEXT: mv a5, a3 ; RV32IXQCI-NEXT: .LBB0_2: ; RV32IXQCI-NEXT: lw a2, 0(a1) ; RV32IXQCI-NEXT: lw a0, 0(a1) ; RV32IXQCI-NEXT: li a1, 1024 -; RV32IXQCI-NEXT: qc.mvlt a2, a1, a2, a3 +; RV32IXQCI-NEXT: qc.mvlt a2, a1, a2, a5 ; RV32IXQCI-NEXT: li a1, 2046 -; RV32IXQCI-NEXT: qc.mvltu a0, a1, t3, a2 +; RV32IXQCI-NEXT: qc.mvltu a0, a1, t1, a2 ; RV32IXQCI-NEXT: ret ; ; RV64I-LABEL: foo: @@ -417,8 +417,8 @@ define i32 @select_sge_int16min(i32 signext %x, i32 signext %y, i32 signext %z) ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: lui a3, 1048560 ; RV32IXQCI-NEXT: addi a3, a3, -1 -; RV32IXQCI-NEXT: qc.mvlt a2, a3, a0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mvge a1, a3, a0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64I-LABEL: select_sge_int16min: @@ -471,10 +471,10 @@ define i64 @select_sge_int32min(i64 %x, i64 %y, i64 %z) { ; RV32IXQCI-NEXT: srli a0, a1, 31 ; RV32IXQCI-NEXT: xori a0, a0, 1 ; RV32IXQCI-NEXT: qc.mveqi a0, a1, -1, a6 -; RV32IXQCI-NEXT: qc.mvnei a4, a0, 0, a2 -; RV32IXQCI-NEXT: qc.mvnei a5, a0, 0, a3 -; RV32IXQCI-NEXT: mv a0, a4 -; RV32IXQCI-NEXT: mv a1, a5 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 0, a4 +; RV32IXQCI-NEXT: qc.mveqi a3, a0, 0, a5 +; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: mv a1, a3 ; RV32IXQCI-NEXT: ret ; ; RV64I-LABEL: select_sge_int32min: diff --git a/llvm/test/CodeGen/RISCV/select-cond.ll b/llvm/test/CodeGen/RISCV/select-cond.ll index b88fe9a..3ca0f46 100644 --- a/llvm/test/CodeGen/RISCV/select-cond.ll +++ b/llvm/test/CodeGen/RISCV/select-cond.ll @@ -35,8 +35,8 @@ define signext i32 @select_i32_trunc(i32 signext %cond, i32 signext %x, i32 sign ; RV32-XQCICM-LABEL: select_i32_trunc: ; RV32-XQCICM: # %bb.0: ; RV32-XQCICM-NEXT: andi a0, a0, 1 -; RV32-XQCICM-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32-XQCICM-NEXT: mv a0, a2 +; RV32-XQCICM-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32-XQCICM-NEXT: mv a0, a1 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_trunc: @@ -48,8 +48,8 @@ define signext i32 @select_i32_trunc(i32 signext %cond, i32 signext %x, i32 sign ; RV32IXQCI-LABEL: select_i32_trunc: ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_trunc: @@ -93,8 +93,8 @@ define signext i32 @select_i32_param(i1 signext %cond, i32 signext %x, i32 signe ; RV32-XQCICM-LABEL: select_i32_param: ; RV32-XQCICM: # %bb.0: ; RV32-XQCICM-NEXT: andi a0, a0, 1 -; RV32-XQCICM-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32-XQCICM-NEXT: mv a0, a2 +; RV32-XQCICM-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32-XQCICM-NEXT: mv a0, a1 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_param: @@ -106,8 +106,8 @@ define signext i32 @select_i32_param(i1 signext %cond, i32 signext %x, i32 signe ; RV32IXQCI-LABEL: select_i32_param: ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_param: @@ -148,8 +148,8 @@ define signext i32 @select_i32_eq(i32 signext %a, i32 signext %b, i32 signext %x ; ; RV32-XQCICM-LABEL: select_i32_eq: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mveq a3, a0, a1, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvne a2, a0, a1, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_eq: @@ -163,8 +163,8 @@ define signext i32 @select_i32_eq(i32 signext %a, i32 signext %b, i32 signext %x ; ; RV32IXQCI-LABEL: select_i32_eq: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mveq a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvne a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_eq: @@ -205,8 +205,8 @@ define signext i32 @select_i32_ne(i32 signext %a, i32 signext %b, i32 signext %x ; ; RV32-XQCICM-LABEL: select_i32_ne: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvne a3, a0, a1, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mveq a2, a0, a1, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_ne: @@ -220,8 +220,8 @@ define signext i32 @select_i32_ne(i32 signext %a, i32 signext %b, i32 signext %x ; ; RV32IXQCI-LABEL: select_i32_ne: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvne a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveq a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_ne: @@ -262,8 +262,8 @@ define signext i32 @select_i32_ugt(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_ugt: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvltu a3, a1, a0, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvgeu a2, a1, a0, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_ugt: @@ -277,8 +277,8 @@ define signext i32 @select_i32_ugt(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_ugt: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvltu a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeu a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_ugt: @@ -319,8 +319,8 @@ define signext i32 @select_i32_uge(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_uge: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvgeu a3, a0, a1, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvltu a2, a0, a1, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_uge: @@ -334,8 +334,8 @@ define signext i32 @select_i32_uge(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_uge: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvgeu a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltu a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_uge: @@ -376,8 +376,8 @@ define signext i32 @select_i32_ult(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_ult: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_ult: @@ -391,8 +391,8 @@ define signext i32 @select_i32_ult(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_ult: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_ult: @@ -433,8 +433,8 @@ define signext i32 @select_i32_ule(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_ule: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvgeu a3, a1, a0, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvltu a2, a1, a0, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_ule: @@ -448,8 +448,8 @@ define signext i32 @select_i32_ule(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_ule: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvgeu a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltu a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_ule: @@ -490,8 +490,8 @@ define signext i32 @select_i32_sgt(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_sgt: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvlt a3, a1, a0, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvge a2, a1, a0, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_sgt: @@ -505,8 +505,8 @@ define signext i32 @select_i32_sgt(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_sgt: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvlt a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvge a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_sgt: @@ -547,8 +547,8 @@ define signext i32 @select_i32_sge(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_sge: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvge a3, a0, a1, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvlt a2, a0, a1, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_sge: @@ -562,8 +562,8 @@ define signext i32 @select_i32_sge(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_sge: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvge a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlt a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_sge: @@ -604,8 +604,8 @@ define signext i32 @select_i32_slt(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_slt: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvlt a3, a0, a1, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvge a2, a0, a1, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_slt: @@ -619,8 +619,8 @@ define signext i32 @select_i32_slt(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_slt: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvlt a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvge a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_slt: @@ -661,8 +661,8 @@ define signext i32 @select_i32_sle(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_sle: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvge a3, a1, a0, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvlt a2, a1, a0, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_sle: @@ -676,8 +676,8 @@ define signext i32 @select_i32_sle(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_sle: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvge a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlt a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_sle: @@ -723,11 +723,11 @@ define i64 @select_i64_trunc(i64 %cond, i64 %x, i64 %y) nounwind { ; ; RV32-XQCICM-LABEL: select_i64_trunc: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: mv a1, a5 +; RV32-XQCICM-NEXT: mv a1, a3 ; RV32-XQCICM-NEXT: andi a0, a0, 1 -; RV32-XQCICM-NEXT: qc.mvnei a4, a0, 0, a2 -; RV32-XQCICM-NEXT: qc.mvnei a1, a0, 0, a3 -; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: qc.mveqi a2, a0, 0, a4 +; RV32-XQCICM-NEXT: qc.mveqi a1, a0, 0, a5 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_trunc: @@ -740,11 +740,11 @@ define i64 @select_i64_trunc(i64 %cond, i64 %x, i64 %y) nounwind { ; ; RV32IXQCI-LABEL: select_i64_trunc: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: mv a1, a5 +; RV32IXQCI-NEXT: mv a1, a3 ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a4, a0, 0, a2 -; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a3 -; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 0, a4 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a5 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_trunc: @@ -792,10 +792,10 @@ define i64 @select_i64_param(i1 %cond, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-LABEL: select_i64_param: ; RV32-XQCICM: # %bb.0: ; RV32-XQCICM-NEXT: andi a0, a0, 1 -; RV32-XQCICM-NEXT: qc.mvnei a3, a0, 0, a1 -; RV32-XQCICM-NEXT: qc.mvnei a4, a0, 0, a2 -; RV32-XQCICM-NEXT: mv a0, a3 -; RV32-XQCICM-NEXT: mv a1, a4 +; RV32-XQCICM-NEXT: qc.mveqi a1, a0, 0, a3 +; RV32-XQCICM-NEXT: qc.mveqi a2, a0, 0, a4 +; RV32-XQCICM-NEXT: mv a0, a1 +; RV32-XQCICM-NEXT: mv a1, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_param: @@ -810,10 +810,10 @@ define i64 @select_i64_param(i1 %cond, i64 %x, i64 %y) nounwind { ; RV32IXQCI-LABEL: select_i64_param: ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a3, a0, 0, a1 -; RV32IXQCI-NEXT: qc.mvnei a4, a0, 0, a2 -; RV32IXQCI-NEXT: mv a0, a3 -; RV32IXQCI-NEXT: mv a1, a4 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a3 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 0, a4 +; RV32IXQCI-NEXT: mv a0, a1 +; RV32IXQCI-NEXT: mv a1, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_param: @@ -866,10 +866,10 @@ define i64 @select_i64_eq(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: xor a1, a1, a3 ; RV32-XQCICM-NEXT: xor a0, a0, a2 ; RV32-XQCICM-NEXT: or a0, a0, a1 -; RV32-XQCICM-NEXT: qc.mveqi a6, a0, 0, a4 -; RV32-XQCICM-NEXT: qc.mveqi a7, a0, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mvnei a4, a0, 0, a6 +; RV32-XQCICM-NEXT: qc.mvnei a5, a0, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_eq: @@ -887,10 +887,10 @@ define i64 @select_i64_eq(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: xor a1, a1, a3 ; RV32IXQCI-NEXT: xor a0, a0, a2 ; RV32IXQCI-NEXT: or a0, a0, a1 -; RV32IXQCI-NEXT: qc.mveqi a6, a0, 0, a4 -; RV32IXQCI-NEXT: qc.mveqi a7, a0, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mvnei a4, a0, 0, a6 +; RV32IXQCI-NEXT: qc.mvnei a5, a0, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_eq: @@ -943,10 +943,10 @@ define i64 @select_i64_ne(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: xor a1, a1, a3 ; RV32-XQCICM-NEXT: xor a0, a0, a2 ; RV32-XQCICM-NEXT: or a0, a0, a1 -; RV32-XQCICM-NEXT: qc.mvnei a6, a0, 0, a4 -; RV32-XQCICM-NEXT: qc.mvnei a7, a0, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mveqi a4, a0, 0, a6 +; RV32-XQCICM-NEXT: qc.mveqi a5, a0, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_ne: @@ -964,10 +964,10 @@ define i64 @select_i64_ne(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: xor a1, a1, a3 ; RV32IXQCI-NEXT: xor a0, a0, a2 ; RV32IXQCI-NEXT: or a0, a0, a1 -; RV32IXQCI-NEXT: qc.mvnei a6, a0, 0, a4 -; RV32IXQCI-NEXT: qc.mvnei a7, a0, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mveqi a4, a0, 0, a6 +; RV32IXQCI-NEXT: qc.mveqi a5, a0, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_ne: @@ -1025,10 +1025,10 @@ define i64 @select_i64_ugt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a2, a0 ; RV32-XQCICM-NEXT: sltu a2, a3, a1 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_ugt: @@ -1050,10 +1050,10 @@ define i64 @select_i64_ugt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a2, a0 ; RV32IXQCI-NEXT: sltu a2, a3, a1 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_ugt: @@ -1111,10 +1111,10 @@ define i64 @select_i64_uge(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a0, a2 ; RV32-XQCICM-NEXT: sltu a2, a1, a3 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_uge: @@ -1136,10 +1136,10 @@ define i64 @select_i64_uge(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a0, a2 ; RV32IXQCI-NEXT: sltu a2, a1, a3 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_uge: @@ -1197,10 +1197,10 @@ define i64 @select_i64_ult(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a0, a2 ; RV32-XQCICM-NEXT: sltu a2, a1, a3 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_ult: @@ -1222,10 +1222,10 @@ define i64 @select_i64_ult(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a0, a2 ; RV32IXQCI-NEXT: sltu a2, a1, a3 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_ult: @@ -1283,10 +1283,10 @@ define i64 @select_i64_ule(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a2, a0 ; RV32-XQCICM-NEXT: sltu a2, a3, a1 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_ule: @@ -1308,10 +1308,10 @@ define i64 @select_i64_ule(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a2, a0 ; RV32IXQCI-NEXT: sltu a2, a3, a1 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_ule: @@ -1369,10 +1369,10 @@ define i64 @select_i64_sgt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a2, a0 ; RV32-XQCICM-NEXT: slt a2, a3, a1 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_sgt: @@ -1394,10 +1394,10 @@ define i64 @select_i64_sgt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a2, a0 ; RV32IXQCI-NEXT: slt a2, a3, a1 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_sgt: @@ -1455,10 +1455,10 @@ define i64 @select_i64_sge(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a0, a2 ; RV32-XQCICM-NEXT: slt a2, a1, a3 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_sge: @@ -1480,10 +1480,10 @@ define i64 @select_i64_sge(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a0, a2 ; RV32IXQCI-NEXT: slt a2, a1, a3 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_sge: @@ -1541,10 +1541,10 @@ define i64 @select_i64_slt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a0, a2 ; RV32-XQCICM-NEXT: slt a2, a1, a3 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_slt: @@ -1566,10 +1566,10 @@ define i64 @select_i64_slt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a0, a2 ; RV32IXQCI-NEXT: slt a2, a1, a3 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_slt: @@ -1627,10 +1627,10 @@ define i64 @select_i64_sle(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a2, a0 ; RV32-XQCICM-NEXT: slt a2, a3, a1 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_sle: @@ -1652,10 +1652,10 @@ define i64 @select_i64_sle(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a2, a0 ; RV32IXQCI-NEXT: slt a2, a3, a1 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_sle: diff --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll index 19fade6..8273c65 100644 --- a/llvm/test/CodeGen/RISCV/select.ll +++ b/llvm/test/CodeGen/RISCV/select.ll @@ -1153,8 +1153,8 @@ define i32 @select_sub_1(i1 zeroext %cond, i32 %a, i32 %b) { ; RV32IXQCI-LABEL: select_sub_1: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: sub a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = sub i32 %a, %b @@ -1301,9 +1301,9 @@ define i32 @select_sub_4(i1 zeroext %cond, i32 %x) { ; ; RV32IXQCI-LABEL: select_sub_4: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: addi a1, a1, -128 -; RV32IXQCI-NEXT: li a2, 128 -; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a2 +; RV32IXQCI-NEXT: addi a2, a1, -128 +; RV32IXQCI-NEXT: li a1, 128 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 ; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret %add = sub i32 %x, 128 @@ -1348,8 +1348,8 @@ define i32 @select_and_1(i1 zeroext %cond, i32 %a, i32 %b) { ; RV32IXQCI-LABEL: select_and_1: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: and a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = and i32 %a, %b @@ -1493,8 +1493,8 @@ define i32 @select_udiv_1(i1 zeroext %cond, i32 %a, i32 %b) { ; RV32IXQCI-LABEL: select_udiv_1: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: divu a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = udiv i32 %a, %b @@ -1682,8 +1682,8 @@ define i32 @select_shl_1(i1 zeroext %cond, i32 %a, i32 %b) { ; RV32IXQCI-LABEL: select_shl_1: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: sll a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = shl i32 %a, %b @@ -1798,8 +1798,8 @@ define i32 @select_ashr_1(i1 zeroext %cond, i32 %a, i32 %b) { ; RV32IXQCI-LABEL: select_ashr_1: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: sra a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = ashr i32 %a, %b @@ -1914,8 +1914,8 @@ define i32 @select_lshr_1(i1 zeroext %cond, i32 %a, i32 %b) { ; RV32IXQCI-LABEL: select_lshr_1: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: srl a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = lshr i32 %a, %b @@ -2371,9 +2371,9 @@ define i32 @select_cst5(i1 zeroext %cond) { ; RV32IXQCI-LABEL: select_cst5: ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: lui a1, 1 -; RV32IXQCI-NEXT: addi a1, a1, -2047 -; RV32IXQCI-NEXT: li a2, 2047 -; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a2 +; RV32IXQCI-NEXT: addi a2, a1, -2047 +; RV32IXQCI-NEXT: li a1, 2047 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 ; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret %ret = select i1 %cond, i32 2047, i32 2049 @@ -2870,8 +2870,8 @@ define void @select_redundant_czero_eqz1(ptr %0, ptr %1) { ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: lui a2, %hi(select_redundant_czero_eqz_data) ; RV32IXQCI-NEXT: addi a2, a2, %lo(select_redundant_czero_eqz_data) -; RV32IXQCI-NEXT: qc.mveqi a0, a0, 0, a2 -; RV32IXQCI-NEXT: sw a0, 0(a1) +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a0 +; RV32IXQCI-NEXT: sw a2, 0(a1) ; RV32IXQCI-NEXT: ret entry: %3 = icmp eq ptr %0, null diff --git a/llvm/test/CodeGen/RISCV/xqcicm.ll b/llvm/test/CodeGen/RISCV/xqcicm.ll index 1741be7..fb48301 100644 --- a/llvm/test/CodeGen/RISCV/xqcicm.ll +++ b/llvm/test/CodeGen/RISCV/xqcicm.ll @@ -23,15 +23,15 @@ define i32 @select_example(i32 %cond, i32 %x, i32 %y) { ; RV32IXQCICM-LABEL: select_example: ; RV32IXQCICM: # %bb.0: # %entry ; RV32IXQCICM-NEXT: andi a0, a0, 1 -; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCICM-NEXT: mv a0, a2 +; RV32IXQCICM-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCICM-NEXT: mv a0, a1 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_example: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %cond_trunc = trunc i32 %cond to i1 @@ -52,14 +52,14 @@ define i32 @select_cc_example_eq(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 %a, 11 @@ -80,14 +80,14 @@ define i32 @select_cc_example_eq1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 11, %a @@ -108,14 +108,14 @@ define i32 @select_cc_example_ne(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 %a, 11 @@ -136,14 +136,14 @@ define i32 @select_cc_example_ne1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 11, %a @@ -164,14 +164,14 @@ define i32 @select_cc_example_slt(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_slt: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_slt: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlti a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp slt i32 %a, 11 @@ -192,14 +192,14 @@ define i32 @select_cc_example_slt1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_slt1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_slt1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgei a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlti a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp slt i32 11, %a @@ -220,14 +220,14 @@ define i32 @select_cc_example_sle(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sle: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sle: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlti a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgei a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sle i32 %a, 11 @@ -248,14 +248,14 @@ define i32 @select_cc_example_sle1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sle1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sle1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlti a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sle i32 11, %a @@ -276,14 +276,14 @@ define i32 @select_cc_example_sgt(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sgt: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sgt: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgei a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlti a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sgt i32 %a, 11 @@ -304,14 +304,14 @@ define i32 @select_cc_example_sgt1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sgt1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sgt1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlti a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sgt i32 11, %a @@ -332,14 +332,14 @@ define i32 @select_cc_example_sge(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sge: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sge: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlti a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sge i32 %a, 11 @@ -360,14 +360,14 @@ define i32 @select_cc_example_sge1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sge1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sge1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlti a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgei a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sge i32 11, %a @@ -388,14 +388,14 @@ define i32 @select_cc_example_ule(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ule: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ule: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltui a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ule i32 %a, 11 @@ -416,14 +416,14 @@ define i32 @select_cc_example_ule1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ule1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ule1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltui a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ule i32 11, %a @@ -444,14 +444,14 @@ define i32 @select_cc_example_ugt(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ugt: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ugt: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltui a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ugt i32 %a, 11 @@ -472,14 +472,14 @@ define i32 @select_cc_example_ugt1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ugt1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ugt1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltui a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ugt i32 11, %a @@ -500,14 +500,14 @@ define i32 @select_cc_example_ult(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ult: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ult: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltui a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ult i32 %a, 11 @@ -528,14 +528,14 @@ define i32 @select_cc_example_ult1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ult1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ult1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltui a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ult i32 11, %a @@ -556,14 +556,14 @@ define i32 @select_cc_example_uge(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_uge: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_uge: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltui a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp uge i32 %a, 11 @@ -584,14 +584,14 @@ define i32 @select_cc_example_uge1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_uge1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_uge1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltui a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp uge i32 11, %a @@ -611,14 +611,14 @@ define i32 @select_cc_example_eq_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveq a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvne a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveq a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvne a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 %a, %b @@ -638,14 +638,14 @@ define i32 @select_cc_example_ne_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvne a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveq a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvne a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveq a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 %a, %b @@ -665,14 +665,14 @@ define i32 @select_cc_example_slt_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_slt_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlt a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvge a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_slt_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlt a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvge a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp slt i32 %a, %b @@ -692,14 +692,14 @@ define i32 @select_cc_example_sge_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sge_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvge a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlt a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sge_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvge a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlt a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sge i32 %a, %b @@ -719,14 +719,14 @@ define i32 @select_cc_example_sgt_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sgt_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlt a3, a1, a0, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvge a2, a1, a0, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sgt_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlt a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvge a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sgt i32 %a, %b @@ -746,14 +746,14 @@ define i32 @select_cc_example_sle_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sle_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvge a3, a1, a0, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlt a2, a1, a0, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sle_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvge a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlt a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sle i32 %a, %b @@ -773,14 +773,14 @@ define i32 @select_cc_example_ugt_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ugt_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltu a3, a1, a0, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeu a2, a1, a0, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ugt_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltu a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeu a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ugt i32 %a, %b @@ -800,14 +800,14 @@ define i32 @select_cc_example_ult_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ult_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ult_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ult i32 %a, %b @@ -827,14 +827,14 @@ define i32 @select_cc_example_uge_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_uge_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeu a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltu a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_uge_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeu a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltu a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp uge i32 %a, %b @@ -854,14 +854,14 @@ define i32 @select_cc_example_ule_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ule_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeu a3, a1, a0, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltu a2, a1, a0, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ule_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeu a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltu a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ule i32 %a, %b @@ -883,18 +883,263 @@ define i32 @select_cc_example_ule_neg(i32 %a, i32 %b, i32 %x, i32 %y) { ; RV32IXQCICM-LABEL: select_cc_example_ule_neg: ; RV32IXQCICM: # %bb.0: # %entry ; RV32IXQCICM-NEXT: li a1, -10 -; RV32IXQCICM-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ule_neg: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: li a1, -10 -; RV32IXQCI-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ule i32 %a, -11 %sel = select i1 %cmp, i32 %x, i32 %y ret i32 %sel } + +define i32 @select_cc_example_eq_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_eq_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: beq a2, a1, .LBB32_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB32_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_eq_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvne a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_eq_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvne a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp eq i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_lt_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_lt_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: blt a2, a1, .LBB33_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB33_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_lt_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvge a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_lt_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvge a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp slt i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_ge_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_ge_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: bge a2, a1, .LBB34_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB34_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_ge_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvlt a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_ge_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvlt a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp sge i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_ult_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_ult_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: bltu a2, a1, .LBB35_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB35_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_ult_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvgeu a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_ult_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvgeu a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp ult i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_uge_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_uge_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: bgeu a2, a1, .LBB36_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB36_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_uge_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvltu a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_uge_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvltu a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp uge i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_eq_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_eq_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 11 +; RV32I-NEXT: beq a2, a1, .LBB37_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB37_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_eq_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvnei a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_eq_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvnei a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp eq i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_lt_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_lt_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 11 +; RV32I-NEXT: blt a2, a1, .LBB38_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB38_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_lt_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvgei a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_lt_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvgei a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp slt i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_ge_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_ge_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 10 +; RV32I-NEXT: blt a1, a2, .LBB39_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB39_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_ge_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvlti a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_ge_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvlti a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp sge i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_ult_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_ult_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 11 +; RV32I-NEXT: bltu a2, a1, .LBB40_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB40_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_ult_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvgeui a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_ult_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvgeui a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp ult i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_uge_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_uge_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 10 +; RV32I-NEXT: bltu a1, a2, .LBB41_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB41_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_uge_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvltui a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_uge_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvltui a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp uge i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} diff --git a/llvm/test/CodeGen/RISCV/xqcics.ll b/llvm/test/CodeGen/RISCV/xqcics.ll index 38de8fb..5b7ca9e7 100644 --- a/llvm/test/CodeGen/RISCV/xqcics.ll +++ b/llvm/test/CodeGen/RISCV/xqcics.ll @@ -134,14 +134,14 @@ define i32 @select_cc_example_eq(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 %a, 11 @@ -167,14 +167,14 @@ define i32 @select_cc_example_eq_c(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq_c: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq_c: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 11, %a @@ -200,14 +200,14 @@ define i32 @select_cc_example_ne(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 %a, 11 @@ -233,14 +233,14 @@ define i32 @select_cc_example_ne_c(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne_c: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne_c: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 11, %a diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/builtin_printf.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/builtin_printf.ll new file mode 100644 index 0000000..093d172 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/builtin_printf.ll @@ -0,0 +1,24 @@ +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_relaxed_printf_string_address_space %s -o - | FileCheck %s +; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR + +; CHECK: OpExtension "SPV_EXT_relaxed_printf_string_address_space" +; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] printf + +; CHECK-ERROR: LLVM ERROR: SPV_EXT_relaxed_printf_string_address_space is required because printf uses a format string not in constant address space. + +@.str = private unnamed_addr addrspace(1) constant [4 x i8] c"%d\0A\00", align 1 + +declare spir_func i32 @printf(ptr addrspace(4), ...) + +define spir_kernel void @test_kernel() { +entry: + ; Format string in addrspace(1) → cast to addrspace(4) + %format = addrspacecast ptr addrspace(1) @.str to ptr addrspace(4) + %val = alloca i32, align 4 + store i32 123, ptr %val, align 4 + %loaded = load i32, ptr %val, align 4 + + ; Call printf with non-constant format string + %call = call spir_func i32 (ptr addrspace(4), ...) @printf(ptr addrspace(4) %format, i32 %loaded) + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/non-constant-printf.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/non-constant-printf.ll new file mode 100644 index 0000000..b54d59b --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/non-constant-printf.ll @@ -0,0 +1,48 @@ +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_relaxed_printf_string_address_space %s -o - | FileCheck %s +; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR + +; CHECK: OpExtension "SPV_EXT_relaxed_printf_string_address_space" +; CHECK: %[[#ExtInstSetId:]] = OpExtInstImport "OpenCL.std" +; CHECK-DAG: %[[#TypeInt32Id:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#TypeInt8Id:]] = OpTypeInt 8 0 +; CHECK-DAG: %[[#TypeInt64Id:]] = OpTypeInt 64 0 +; CHECK-DAG: %[[#TypeArrayId:]] = OpTypeArray %[[#TypeInt8Id]] %[[#]] +; CHECK-DAG: %[[#ConstantStorClassGlobalPtrTy:]] = OpTypePointer UniformConstant %[[#TypeArrayId]] +; CHECK-DAG: %[[#WGStorClassGlobalPtrTy:]] = OpTypePointer Workgroup %[[#TypeArrayId]] +; CHECK-DAG: %[[#CrossWFStorClassGlobalPtrTy:]] = OpTypePointer CrossWorkgroup %[[#TypeArrayId]] +; CHECK-DAG: %[[#FunctionStorClassPtrTy:]] = OpTypePointer Function %[[#TypeInt8Id]] +; CHECK-DAG: %[[#WGStorClassPtrTy:]] = OpTypePointer Workgroup %[[#TypeInt8Id]] +; CHECK-DAG: %[[#CrossWFStorClassPtrTy:]] = OpTypePointer CrossWorkgroup %[[#TypeInt8Id]] +; CHECK: %[[#ConstantCompositeId:]] = OpConstantComposite %[[#TypeArrayId]] %[[#]] %[[#]] %[[#]] %[[#]] %[[#]] %[[#]] +; CHECK: %[[#]] = OpVariable %[[#ConstantStorClassGlobalPtrTy]] UniformConstant %[[#ConstantCompositeId]] +; CHECK: %[[#]] = OpVariable %[[#CrossWFStorClassGlobalPtrTy]] CrossWorkgroup %[[#ConstantCompositeId]] +; CHECK: %[[#]] = OpVariable %[[#WGStorClassGlobalPtrTy]] Workgroup %[[#ConstantCompositeId]] +; CHECK: %[[#GEP1:]] = OpInBoundsPtrAccessChain %[[#FunctionStorClassPtrTy]] %[[#]] %[[#]] %[[#]] +; CHECK: %[[#]] = OpExtInst %[[#TypeInt32Id]] %[[#ExtInstSetId:]] printf %[[#GEP1]] +; CHECK: %[[#GEP2:]] = OpInBoundsPtrAccessChain %[[#CrossWFStorClassPtrTy]] %[[#]] %[[#]] %[[#]] +; CHECK: %[[#]] = OpExtInst %[[#TypeInt32Id]] %[[#ExtInstSetId:]] printf %[[#GEP2]] +; CHECK: %[[#GEP3:]] = OpInBoundsPtrAccessChain %[[#WGStorClassPtrTy]] %[[#]] %[[#]] %[[#]] +; CHECK: %[[#]] = OpExtInst %[[#TypeInt32Id]] %[[#ExtInstSetId:]] printf %[[#GEP3]] + +; CHECK-ERROR: LLVM ERROR: SPV_EXT_relaxed_printf_string_address_space is required because printf uses a format string not in constant address space. + +@0 = internal unnamed_addr addrspace(2) constant [6 x i8] c"Test\0A\00", align 1 +@1 = internal unnamed_addr addrspace(1) constant [6 x i8] c"Test\0A\00", align 1 +@2 = internal unnamed_addr addrspace(3) constant [6 x i8] c"Test\0A\00", align 1 + +define spir_kernel void @test() { + %tmp1 = alloca [6 x i8], align 1 + call void @llvm.memcpy.p0.p2.i64(ptr align 1 %tmp1, ptr addrspace(2) align 1 @0, i64 6, i1 false) + %1 = getelementptr inbounds [6 x i8], ptr %tmp1, i32 0, i32 0 + %2 = call spir_func i32 @_Z18__spirv_ocl_printfPc(ptr %1) + %3 = getelementptr inbounds [6 x i8], ptr addrspace(1) @1, i32 0, i32 0 + %4 = call spir_func i32 @_Z18__spirv_ocl_printfPU3AS1c(ptr addrspace(1) %3) + %5 = getelementptr inbounds [6 x i8], ptr addrspace(3) @2, i32 0, i32 0 + %6 = call spir_func i32 @_Z18__spirv_ocl_printfPU3AS3c(ptr addrspace(3) %5) + ret void +} + +declare spir_func i32 @_Z18__spirv_ocl_printfPc(ptr) +declare spir_func i32 @_Z18__spirv_ocl_printfPU3AS1c(ptr addrspace(1)) +declare spir_func i32 @_Z18__spirv_ocl_printfPU3AS3c(ptr addrspace(3)) +declare void @llvm.memcpy.p0.p2.i64(ptr captures(none), ptr addrspace(2) captures(none) readonly, i64, i1) diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bindless_images/i32-in-physical64.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bindless_images/i32-in-physical64.ll new file mode 100644 index 0000000..3624f14 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bindless_images/i32-in-physical64.ll @@ -0,0 +1,19 @@ +; RUN: not llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_INTEL_bindless_images %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR + +; CHECK-ERROR: LLVM ERROR: Parameter value must be a 32-bit scalar in case of Physical32 addressing model or a 64-bit scalar in case of Physical64 addressing model + +target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64" +target triple = "spir64-unknown-unknown" + +define spir_func void @foo(i32 %in) { + %img = call spir_func target("spirv.Image", i32, 2, 0, 0, 0, 0, 0, 0) @_Z33__spirv_ConvertHandleToImageINTELi(i32 %in) + %samp = call spir_func target("spirv.Sampler") @_Z35__spirv_ConvertHandleToSamplerINTELl(i64 42) + %sampImage = call spir_func target("spirv.SampledImage", i64, 1, 0, 0, 0, 0, 0, 0) @_Z40__spirv_ConvertHandleToSampledImageINTELl(i64 43) + ret void +} + +declare spir_func target("spirv.Image", i32, 2, 0, 0, 0, 0, 0, 0) @_Z33__spirv_ConvertHandleToImageINTELi(i32) + +declare spir_func target("spirv.Sampler") @_Z35__spirv_ConvertHandleToSamplerINTELl(i64) + +declare spir_func target("spirv.SampledImage", i64, 1, 0, 0, 0, 0, 0, 0) @_Z40__spirv_ConvertHandleToSampledImageINTELl(i64) diff --git a/llvm/test/CodeGen/SPIRV/image_store.ll b/llvm/test/CodeGen/SPIRV/image_store.ll new file mode 100644 index 0000000..a70651c --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/image_store.ll @@ -0,0 +1,22 @@ +; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; Image types may be represented in two ways while translating to SPIR-V: +; - OpenCL form, for example, '%opencl.image2d_ro_t', +; - SPIR-V form, for example, '%spirv.Image._void_1_0_0_0_0_0_0', +; but it is still one type which should be translated to one SPIR-V type. +; +; The test checks that the code below is successfully translated and only one +; SPIR-V type for images is generated (no duplicate OpTypeImage instructions). + +; CHECK: %[[#]] = OpTypeImage %[[#]] 2D +; CHECK-NOT: %[[#]] = OpTypeImage %[[#]] 2D + +declare spir_func <4 x float> @_Z11read_imagef14ocl_image2d_ro11ocl_samplerDv2_ff(ptr addrspace(1), ptr addrspace(2), <2 x float>, float) + +define spir_kernel void @read_image(ptr addrspace(1) %srcimg, ptr addrspace(2) %sampler){ +entry: + %spirvimg.addr = alloca target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0), align 8 + %val = call <4 x float> @_Z11read_imagef14ocl_image2d_ro11ocl_samplerDv2_ff(ptr addrspace(1) %srcimg, ptr addrspace(2) %sampler, <2 x float> zeroinitializer, float 0.0) + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll b/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll new file mode 100644 index 0000000..b788f34b --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll @@ -0,0 +1,28 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-LABEL: Begin function original_testcase +define fastcc void @original_testcase() { +top: + ; CHECK: OpCompositeInsert + %0 = insertvalue [1 x ptr] zeroinitializer, ptr poison, 0 + ret void +} + +; CHECK-LABEL: Begin function additional_testcases +define fastcc void @additional_testcases() { +top: + ; Test with different pointer types + ; CHECK: OpCompositeInsert + %1 = insertvalue [1 x ptr] zeroinitializer, ptr undef, 0 + ; CHECK-NEXT: OpCompositeInsert + %2 = insertvalue {ptr, i32} zeroinitializer, ptr poison, 0 + ; CHECK-NEXT: OpCompositeInsert + %3 = insertvalue {ptr, ptr} undef, ptr null, 0 + + ; Test with undef aggregate + ; CHECK-NEXT: OpCompositeInsert + %4 = insertvalue [1 x ptr] undef, ptr undef, 0 + + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-comparison.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-comparison.ll new file mode 100644 index 0000000..49bb8ea --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-comparison.ll @@ -0,0 +1,56 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: OpFOrdEqual +; CHECK-DAG: OpFOrdGreaterThan +; CHECK-DAG: OpFOrdGreaterThanEqual +; CHECK-DAG: OpFOrdLessThan +; CHECK-DAG: OpFOrdLessThanEqual +; CHECK-DAG: OpFOrdNotEqual +; CHECK-DAG: OpOrdered +; CHECK-DAG: OpFUnordEqual +; CHECK-DAG: OpFUnordGreaterThan +; CHECK-DAG: OpFUnordGreaterThanEqual +; CHECK-DAG: OpFUnordLessThan +; CHECK-DAG: OpFUnordLessThanEqual +; CHECK-DAG: OpFUnordNotEqual +; CHECK-DAG: OpUnordered + +define dso_local spir_kernel void @test(float %a){ +entry: + %cmp = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %a, metadata !"oeq", metadata !"fpexcept.strict") + %cmp1 = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %a, metadata !"ogt", metadata !"fpexcept.strict") + %cmp2 = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %a, metadata !"oge", metadata !"fpexcept.strict") + %cmp3 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"olt", metadata !"fpexcept.strict") + %cmp4 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ole", metadata !"fpexcept.strict") + %cmp5 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"one", metadata !"fpexcept.strict") + %cmp6 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ord", metadata !"fpexcept.strict") + %cmp7 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ueq", metadata !"fpexcept.strict") + %cmp8 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ugt", metadata !"fpexcept.strict") + %cmp9 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"uge", metadata !"fpexcept.strict") + %cmp10 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ult", metadata !"fpexcept.strict") + %cmp11 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ule", metadata !"fpexcept.strict") + %cmp12 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"une", metadata !"fpexcept.strict") + %cmp13 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"uno", metadata !"fpexcept.strict") + + %or1 = or i1 %cmp, %cmp1 + %or2 = or i1 %or1, %cmp2 + %or3 = or i1 %or2, %cmp3 + %or4 = or i1 %or3, %cmp4 + %or5 = or i1 %or4, %cmp5 + %or6 = or i1 %or5, %cmp6 + %or7 = or i1 %or6, %cmp7 + %or8 = or i1 %or7, %cmp8 + %or9 = or i1 %or8, %cmp9 + %or10 = or i1 %or9, %cmp10 + %or11 = or i1 %or10, %cmp11 + %or12 = or i1 %or11, %cmp12 + %or13 = or i1 %or12, %cmp13 + br i1 %or13, label %true_block, label %false_block +true_block: + ret void +false_block: + ret void +} +declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata) +declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/debugtrap.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/debugtrap.ll new file mode 100644 index 0000000..fd8cb9d --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/debugtrap.ll @@ -0,0 +1,14 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s + +; CHECK: OpNop +; CHECK-NEXT: OpReturn + +declare void @llvm.debugtrap() + +define spir_kernel void @foo(ptr addrspace(1) %a){ +entry: + %a.addr = alloca ptr addrspace(1), align 4 + store ptr addrspace(1) %a, ptr %a.addr, align 4 + call void @llvm.debugtrap() + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/frexp.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/frexp.ll new file mode 100644 index 0000000..f6434e9 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/frexp.ll @@ -0,0 +1,114 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: %[[#extinst_id:]] = OpExtInstImport "OpenCL.std" +; CHECK-DAG: %[[#float_32_type:]] = OpTypeFloat 32 +; CHECK-DAG: %[[#int_32_type:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#fn_ptr_type_i32:]] = OpTypePointer Function %[[#int_32_type]] +; CHECK-DAG: %[[#const_negzero:]] = OpConstant %[[#float_32_type]] -0 +; CHECK-DAG: %[[#vec2_float_type:]] = OpTypeVector %[[#float_32_type]] 2 +; CHECK-DAG: %[[#vec2_int_type:]] = OpTypeVector %[[#int_32_type]] 2 +; CHECK-DAG: %[[#fn_ptr_type_vec2_i32:]] = OpTypePointer Function %[[#vec2_int_type]] +; CHECK-DAG: %[[#vec2_null:]] = OpConstantNull %[[#vec2_float_type]] +; CHECK-DAG: %[[#scalar_null:]] = OpConstantNull %[[#float_32_type]] +; CHECK-DAG: %[[#const_composite1:]] = OpConstantComposite %[[#vec2_float_type]] %[[#scalar_null]] %[[#const_negzero]] +; CHECK-DAG: %[[#vec4_float_type:]] = OpTypeVector %[[#float_32_type]] 4 +; CHECK-DAG: %[[#vec4_int_type:]] = OpTypeVector %[[#int_32_type]] 4 +; CHECK-DAG: %[[#fn_ptr_type_vec4_i32:]] = OpTypePointer Function %[[#vec4_int_type]] +; CHECK-DAG: %[[#const_composite2:]] = OpConstantComposite %[[#vec4_float_type]] %[[#const_16:]] %[[#const_neg32:]] %[[#const_0:]] %[[#const_9999:]] +; CHECK-DAG: %[[#float_64_type:]] = OpTypeFloat 64 +; CHECK-DAG: %[[#vec2_double_type:]] = OpTypeVector %[[#float_64_type]] 2 + +; CHECK: %[[#]] = OpFunctionParameter %[[#float_32_type]] +; CHECK: %[[#var1:]] = OpVariable %[[#fn_ptr_type_i32]] Function +; CHECK: %[[#extinst1:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#const_negzero]] %[[#var1]] +; CHECK: %[[#exp_part_var:]] = OpLoad %[[#int_32_type]] %[[#var1]] +; CHECK: OpReturnValue %[[#exp_part_var]] +define i32 @frexp_negzero(float %x) { + %ret = call { float, i32 } @llvm.frexp.f32.i32(float -0.0) + %f_part = extractvalue { float, i32 } %ret, 0 + %exp_part = extractvalue { float, i32 } %ret, 1 + ret i32 %exp_part +} + +; CHECK: %[[#x_var4:]] = OpFunctionParameter %[[#float_32_type]] +; CHECK: %[[#var10:]] = OpVariable %[[#fn_ptr_type_i32]] Function +; CHECK: %[[#extinst10:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#x_var4]] %[[#var10]] +; CHECK: %[[#exp_part_var2:]] = OpLoad %[[#int_32_type]] %[[#var10]] +; CHECK: OpReturnValue %[[#exp_part_var2]] +define i32 @frexp_frexp_get_int(float %x) { + %frexp0 = call { float, i32 } @llvm.frexp.f32.i32(float %x) + %f_part = extractvalue { float, i32 } %frexp0, 0 + %exp_part = extractvalue { float, i32 } %frexp0, 1 + ret i32 %exp_part +} + +; CHECK: %[[#var3:]] = OpVariable %[[#fn_ptr_type_vec2_i32]] Function +; CHECK: %[[#extinst3:]] = OpExtInst %[[#vec2_float_type]] %[[#extinst_id]] frexp %[[#vec2_null]] %[[#var3]] +; CHECK: %[[#f_part_var2:]] = OpLoad %[[#vec2_int_type]] %[[#var3]] +; CHECK: OpReturnValue %[[#extinst3]] +define <2 x float> @frexp_zero_vector() { + %ret = call { <2 x float>, <2 x i32> } @llvm.frexp.v2f32.v2i32(<2 x float> zeroinitializer) + %f_part = extractvalue { <2 x float>, <2 x i32> } %ret, 0 + %exp_part = extractvalue { <2 x float>, <2 x i32> } %ret, 1 + ret <2 x float> %f_part +} + +; CHECK: %[[#var4:]] = OpVariable %[[#fn_ptr_type_vec2_i32]] Function +; CHECK: %[[#extinst4:]] = OpExtInst %[[#vec2_float_type]] %[[#extinst_id]] frexp %[[#const_composite1]] %[[#var4]] +; CHECK: %[[#f_part_var3:]] = OpLoad %[[#vec2_int_type]] %[[#var4]] +; CHECK: OpReturnValue %[[#extinst4]] +define <2 x float> @frexp_zero_negzero_vector() { + %ret = call { <2 x float>, <2 x i32> } @llvm.frexp.v2f32.v2i32(<2 x float> <float 0.0, float -0.0>) + %f_part = extractvalue { <2 x float>, <2 x i32> } %ret, 0 + %exp_part = extractvalue { <2 x float>, <2 x i32> } %ret, 1 + ret <2 x float> %f_part +} + +; CHECK: %[[#var5:]] = OpVariable %[[#fn_ptr_type_vec4_i32]] Function +; CHECK: %[[#extinst5:]] = OpExtInst %[[#vec4_float_type]] %[[#extinst_id]] frexp %[[#const_composite2]] %[[#var5]] +; CHECK: %[[#f_part_var4:]] = OpLoad %[[#vec4_int_type]] %[[#var5]] +; CHECK: OpReturnValue %[[#extinst5]] +define <4 x float> @frexp_nonsplat_vector() { + %ret = call { <4 x float>, <4 x i32> } @llvm.frexp.v4f32.v4i32(<4 x float> <float 16.0, float -32.0, float 0.0, float 9999.0>) + %f_part = extractvalue { <4 x float>, <4 x i32> } %ret, 0 + %exp_part = extractvalue { <4 x float>, <4 x i32> } %ret, 1 + ret <4 x float> %f_part +} + +; CHECK: %[[#x_var2:]] = OpFunctionParameter %[[#float_32_type]] +; CHECK: %[[#var6:]] = OpVariable %[[#fn_ptr_type_i32]] Function +; CHECK: %[[#var7:]] = OpVariable %[[#fn_ptr_type_i32]] Function +; CHECK: %[[#extinst6:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#x_var2]] %[[#var6]] +; CHECK: %[[#load1:]] = OpLoad %[[#int_32_type]] %[[#var6]] +; CHECK: %[[#extinst7:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#extinst6]] %[[#var7]] +; CHECK: %[[#f_part_var5:]] = OpLoad %[[#int_32_type]] %[[#var7]] +; CHECK: OpReturnValue %[[#extinst7]] +define float @frexp_frexp(float %x) { + %frexp0 = call { float, i32 } @llvm.frexp.f32.i32(float %x) + %frexp0_f_part = extractvalue { float, i32 } %frexp0, 0 + %frexp0_exp_part = extractvalue { float, i32 } %frexp0, 1 + %frexp1 = call { float, i32 } @llvm.frexp.f32.i32(float %frexp0_f_part) + %frexp1_f_part = extractvalue { float, i32 } %frexp1, 0 + %frexp1_exp_part = extractvalue { float, i32 } %frexp1, 1 + ret float %frexp1_f_part +} + +; CHECK: %[[#x_var3:]] = OpFunctionParameter %[[#vec2_double_type]] +; CHECK: %[[#var9:]] = OpVariable %[[#fn_ptr_type_vec2_i32]] Function +; CHECK: %[[#extinst9:]] = OpExtInst %[[#vec2_double_type]] %[[#extinst_id]] frexp %[[#x_var3]] %[[#var9]] +; CHECK: %[[#f_part_var6:]] = OpLoad %[[#vec2_int_type]] %[[#var9]] +; CHECK: OpReturnValue %[[#extinst9]] +define <2 x double> @frexp_frexp_vector(<2 x double> %x) { + %frexp0 = call { <2 x double>, <2 x i32> } @llvm.frexp.v2f64.v2i32(<2 x double> %x) + %f_part = extractvalue { <2 x double>, <2 x i32> } %frexp0, 0 + %exp_part = extractvalue { <2 x double>, <2 x i32> } %frexp0, 1 + ret <2 x double> %f_part +} + +declare { float, i32 } @llvm.frexp.f32.i32(float) +declare { double, i32 } @llvm.frexp.f64.i32(double) +declare { <2 x float>, <2 x i32> } @llvm.frexp.v2f32.v2i32(<2 x float>) +declare { <4 x float>, <4 x i32> } @llvm.frexp.v4f32.v4i32(<4 x float>) +declare { <2 x double>, <2 x i32> } @llvm.frexp.v2f64.v2i32(<2 x double>) +declare { float, i8 } @llvm.frexp.f32.i8(float) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll index a15a807..b3ef6d6 100644 --- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll @@ -11,7 +11,6 @@ define spir_kernel void @foo(ptr %p) { entry: call void @llvm.trap() - call void @llvm.debugtrap() call void @llvm.ubsantrap(i8 100) %r1 = call ptr @llvm.invariant.start.p0(i64 1024, ptr %p) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll new file mode 100644 index 0000000..51b7664 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll @@ -0,0 +1,86 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-SPIRV-NOT: llvm.memmove + +; CHECK-DAG: %[[#Int8:]] = OpTypeInt 8 0 +; CHECK-DAG: %[[#Int32:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#Int64:]] = OpTypeInt 64 0 +; CHECK-DAG: %[[#Ptr_CrossWG_8:]] = OpTypePointer CrossWorkgroup %[[#Int8]] +; CHECK-DAG: %[[#Ptr_Generic_32:]] = OpTypePointer Generic %[[#Int32]] +; CHECK-DAG: %[[#Const_64:]] = OpConstant %[[#Int32]] 64 +; CHECK-DAG: %[[#Const_36:]] = OpConstant %[[#Int32]] 36 +; CHECK-DAG: %[[#Const_30:]] = OpConstant %[[#Int32]] 30 +; CHECK-DAG: %[[#Const_32_64:]] = OpConstant %[[#Int64]] 32 + +; CHECK: %[[#Param1:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#Param2:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#Size1:]] = OpUConvert %[[#Int64]] %[[#Const_64]] +; CHECK: OpCopyMemorySized %[[#Param2]] %[[#Param1]] %[[#Size1]] Aligned 64 + +; CHECK: %[[#Src:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#CastDst2:]] = OpGenericCastToPtr %[[#Ptr_CrossWG_8]] %[[#GenPtr:]] +; CHECK: %[[#Size2:]] = OpUConvert %[[#Int64]] %[[#Const_36]] +; CHECK: OpCopyMemorySized %[[#CastDst2]] %[[#Src]] %[[#Size2]] Aligned 64 + +; CHECK: %[[#Param1:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#Param2:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#Size3:]] = OpUConvert %[[#Int64]] %[[#Const_30]] +; CHECK: OpCopyMemorySized %[[#Param2]] %[[#Param1]] %[[#Size3]] Aligned 1 + +; CHECK: %[[#Phi:]] = OpPhi %[[#Ptr_Generic_32]] %[[#Op1:]] %[[#Lbl1:]] %[[#Op2:]] %[[#Lbl2:]] +; CHECK: %[[#Cast:]] = OpPtrCastToGeneric %[[#]] %[[#]] +; CHECK: OpCopyMemorySized %[[#Cast]] %[[#Phi]] %[[#Const_32_64]] Aligned 8 + +%struct.SomeStruct = type { <16 x float>, i32, [60 x i8] } +%class.kfunc = type <{ i32, i32, i32, [4 x i8] }> + +@InvocIndex = external local_unnamed_addr addrspace(1) constant i64, align 8 +@"func_object1" = internal addrspace(3) global %class.kfunc zeroinitializer, align 8 + +define spir_kernel void @test_full_move(%struct.SomeStruct addrspace(1)* captures(none) readonly %in, %struct.SomeStruct addrspace(1)* captures(none) %out) { + %1 = bitcast %struct.SomeStruct addrspace(1)* %in to i8 addrspace(1)* + %2 = bitcast %struct.SomeStruct addrspace(1)* %out to i8 addrspace(1)* + call void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* align 64 %2, i8 addrspace(1)* align 64 %1, i32 64, i1 false) + ret void +} + +define spir_kernel void @test_partial_move(%struct.SomeStruct addrspace(1)* captures(none) readonly %in, %struct.SomeStruct addrspace(4)* captures(none) %out) { + %1 = bitcast %struct.SomeStruct addrspace(1)* %in to i8 addrspace(1)* + %2 = bitcast %struct.SomeStruct addrspace(4)* %out to i8 addrspace(4)* + %3 = addrspacecast i8 addrspace(4)* %2 to i8 addrspace(1)* + call void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* align 64 %3, i8 addrspace(1)* align 64 %1, i32 36, i1 false) + ret void +} + +define spir_kernel void @test_array(i8 addrspace(1)* %in, i8 addrspace(1)* %out) { + call void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* %out, i8 addrspace(1)* %in, i32 30, i1 false) + ret void +} + +define weak_odr dso_local spir_kernel void @test_phi() local_unnamed_addr { +entry: + %0 = alloca i32, align 8 + %1 = addrspacecast i32* %0 to i32 addrspace(4)* + %2 = load i64, i64 addrspace(1)* @InvocIndex, align 8 + %cmp = icmp eq i64 %2, 0 + br i1 %cmp, label %leader, label %entry.merge_crit_edge + +entry.merge_crit_edge: ; preds = %entry + %3 = bitcast i32 addrspace(4)* %1 to i8 addrspace(4)* + br label %merge + +leader: ; preds = %entry + %4 = bitcast i32 addrspace(4)* %1 to i8 addrspace(4)* + br label %merge + +merge: ; preds = %entry.merge_crit_edge, %leader + %phi = phi i8 addrspace(4)* [ %3, %entry.merge_crit_edge ], [ %4, %leader ] + %5 = addrspacecast i8 addrspace(3)* bitcast (%class.kfunc addrspace(3)* @"func_object1" to i8 addrspace(3)*) to i8 addrspace(4)* + call void @llvm.memmove.p4i8.p4i8.i64(i8 addrspace(4)* align 8 dereferenceable(32) %5, i8 addrspace(4)* align 8 dereferenceable(32) %phi, i64 32, i1 false) + ret void +} + +declare void @llvm.memmove.p4i8.p4i8.i64(i8 addrspace(4)* captures(none) writeonly, i8 addrspace(4)* captures(none) readonly, i64, i1 immarg) + +declare void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* captures(none), i8 addrspace(1)* captures(none) readonly, i32, i1) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/signed_arithmetic_overflow.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/signed_arithmetic_overflow.ll new file mode 100644 index 0000000..52f939f --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/signed_arithmetic_overflow.ll @@ -0,0 +1,30 @@ +; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -filetype=obj -o - | spirv-val %} +; XFAIL: * +;@llvm.sadd.with.overflow and @llvm.ssub.with.overflow has not been implemented. + +define spir_func void @test_sadd_overflow(ptr %out_result, ptr %out_overflow, i32 %a, i32 %b) { +entry: + %res = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) + %val = extractvalue { i32, i1 } %res, 0 + %ofl = extractvalue { i32, i1 } %res, 1 + store i32 %val, ptr %out_result + %zext_ofl = zext i1 %ofl to i8 + store i8 %zext_ofl, ptr %out_overflow + ret void +} + +declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) + +define spir_func void @test_ssub_overflow(ptr %out_result, ptr %out_overflow, i32 %a, i32 %b) { +entry: + %res = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 %b) + %val = extractvalue { i32, i1 } %res, 0 + %ofl = extractvalue { i32, i1 } %res, 1 + store i32 %val, ptr %out_result + %zext_ofl = zext i1 %ofl to i8 + store i8 %zext_ofl, ptr %out_overflow + ret void +} + +declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) diff --git a/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll b/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll index e405ef0..5e66b8b6 100644 --- a/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll +++ b/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll @@ -7,10 +7,11 @@ ;; ;; Positive tests: ;; -; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-NEGATIVE +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV ;; ;; Negative tests: ;; +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV-NEGATIVE ;; Check that backend is able to skip nsw/nuw attributes if extension is ;; disabled implicitly or explicitly and if max SPIR-V version is lower then 1.4 diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpVariable_Initializer.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpVariable_Initializer.ll new file mode 100644 index 0000000..c8953c7 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/OpVariable_Initializer.ll @@ -0,0 +1,11 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-SPIRV: [[#PtrT:]] = OpTypePointer Workgroup %[[#]] +; CHECK-SPIRV: %[[#]] = OpVariable %[[#PtrT]] Workgroup + +@test_atomic_fn.L = internal addrspace(3) global [64 x i32] zeroinitializer, align 4 + +define spir_kernel void @test_atomic_fn() { + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_pipe.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_pipe.ll new file mode 100644 index 0000000..607997d --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_pipe.ll @@ -0,0 +1,140 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: OpCapability Kernel +; CHECK: OpCapability Addresses +; CHECK: OpCapability Pipes +; CHECK: OpCapability Int8 +; CHECK: OpCapability GenericPointer + +; CHECK-DAG: %[[#PipeWriteTy:]] = OpTypePipe WriteOnly +; CHECK-DAG: %[[#PipeReadTy:]] = OpTypePipe ReadOnly +; CHECK-DAG: %[[#ReserveIdTy:]] = OpTypeReserveId +; CHECK-DAG: %[[#BoolTy:]] = OpTypeBool +; CHECK-DAG: %[[#Int32Ty:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#Uint1:]] = OpConstant %[[#Int32Ty]] 1 +; CHECK-DAG: %[[#Uint2:]] = OpConstant %[[#Int32Ty]] 2 +; CHECK-DAG: %[[#Uint3:]] = OpConstant %[[#Int32Ty]] 3 +; CHECK-DAG: %[[#Uint4:]] = OpConstant %[[#Int32Ty]] 4 +; CHECK-DAG: %[[#NullUint:]] = OpConstantNull %[[#Int32Ty]] + +; CHECK: OpFunction +; CHECK: %[[#FuncParam1:]] = OpFunctionParameter %[[#PipeWriteTy]] +; CHECK: %[[#FuncParam2:]] = OpFunctionParameter %[[#PipeReadTy]] + +; CHECK: %[[#BasicWriteReserve:]] = OpReserveWritePipePackets %[[#ReserveIdTy]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpWritePipe %[[#Int32Ty]] %[[#FuncParam1]] %[[#]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpCommitWritePipe %[[#FuncParam1]] %[[#BasicWriteReserve]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#BasicReadReserve:]] = OpReserveReadPipePackets %[[#ReserveIdTy]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpReadPipe %[[#Int32Ty]] %[[#FuncParam2]] %[[#]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpCommitReadPipe %[[#FuncParam2]] %[[#BasicReadReserve]] %[[#Uint4]] %[[#Uint4]] + +; --- Reserved pipe operations --- +; CHECK: %[[#ReservedWriteReserve:]] = OpReserveWritePipePackets %[[#ReserveIdTy]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#ReservedWrite:]] = OpReservedWritePipe %[[#Int32Ty]] %[[#FuncParam1]] %[[#ReservedWriteReserve]] %[[#NullUint]] %[[#]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#IsValidWrite:]] = OpIsValidReserveId %[[#BoolTy]] %[[#ReservedWriteReserve]] +; CHECK: OpCommitWritePipe %[[#FuncParam1]] %[[#ReservedWriteReserve]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#ReservedReadReserve:]] = OpReserveReadPipePackets %[[#ReserveIdTy]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#ReservedRead:]] = OpReservedReadPipe %[[#Int32Ty]] %[[#FuncParam2]] %[[#ReservedReadReserve]] %[[#NullUint]] %[[#]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#IsValidRead:]] = OpIsValidReserveId %[[#BoolTy]] %[[#ReservedReadReserve]] +; CHECK: OpCommitReadPipe %[[#FuncParam2]] %[[#ReservedReadReserve]] %[[#Uint4]] %[[#Uint4]] + +; --- Pipe packet queries --- +; CHECK: %[[#MaxPacketsWO:]] = OpGetMaxPipePackets %[[#Int32Ty]] %[[#FuncParam1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpStore %[[#]] %[[#MaxPacketsWO]] Aligned 4 +; CHECK: %[[#NumPacketsWO:]] = OpGetNumPipePackets %[[#Int32Ty]] %[[#FuncParam1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpStore %[[#]] %[[#NumPacketsWO]] Aligned 4 +; CHECK: %[[#MaxPacketsRO:]] = OpGetMaxPipePackets %[[#Int32Ty]] %[[#FuncParam2]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpStore %[[#]] %[[#MaxPacketsRO]] Aligned 4 +; CHECK: %[[#NumPacketsRO:]] = OpGetNumPipePackets %[[#Int32Ty]] %[[#FuncParam2]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpStore %[[#]] %[[#NumPacketsRO]] Aligned 4 + +; --- Workgroup operations --- +; CHECK: %[[#WorkgroupWriteReserve:]] = OpGroupReserveWritePipePackets %[[#ReserveIdTy]] %[[#Uint2]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint1]] %[[#Uint1]] +; CHECK: OpGroupCommitWritePipe %[[#Uint2]] %[[#FuncParam1]] %[[#WorkgroupWriteReserve]] %[[#Uint1]] %[[#Uint1]] +; CHECK: %[[#WorkgroupReadReserve:]] = OpGroupReserveReadPipePackets %[[#ReserveIdTy]] %[[#Uint2]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint1]] %[[#Uint1]] +; CHECK: OpGroupCommitReadPipe %[[#Uint2]] %[[#FuncParam2]] %[[#WorkgroupReadReserve]] %[[#Uint1]] %[[#Uint1]] + +; --- Subgroup operations --- +; CHECK: %[[#SubgroupWriteReserve:]] = OpGroupReserveWritePipePackets %[[#ReserveIdTy]] %[[#Uint3]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpGroupCommitWritePipe %[[#Uint3]] %[[#FuncParam1]] %[[#SubgroupWriteReserve]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#SubgroupReadReserve:]] = OpGroupReserveReadPipePackets %[[#ReserveIdTy]] %[[#Uint3]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpGroupCommitReadPipe %[[#Uint3]] %[[#FuncParam2]] %[[#SubgroupReadReserve]] %[[#Uint4]] %[[#Uint4]] + +define spir_kernel void @test_pipe_builtins( + target("spirv.Pipe", 1) %out_pipe, + target("spirv.Pipe", 0) %in_pipe, + ptr addrspace(4) %src, + ptr addrspace(4) %dst, + ptr addrspace(1) %max_packets_wo, + ptr addrspace(1) %num_packets_wo, + ptr addrspace(1) %max_packets_ro, + ptr addrspace(1) %num_packets_ro +) { +entry: + ; Basic pipe operations + %0 = call spir_func target("spirv.ReserveId") @__reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 4, i32 4) + %1 = call spir_func i32 @__write_pipe_2(target("spirv.Pipe", 1) %out_pipe, ptr addrspace(4) %src, i32 4, i32 4) + call spir_func void @__commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %0, i32 4, i32 4) + + %2 = call spir_func target("spirv.ReserveId") @__reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 4, i32 4) + %3 = call spir_func i32 @__read_pipe_2(target("spirv.Pipe", 0) %in_pipe, ptr addrspace(4) %dst, i32 4, i32 4) + call spir_func void @__commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %2, i32 4, i32 4) + + ; Reserved pipe operations + %4 = call spir_func target("spirv.ReserveId") @__reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 4, i32 4) + %5 = call spir_func i32 @__write_pipe_4(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %4, i32 0, ptr addrspace(4) %src, i32 4, i32 4) + %6 = call spir_func i1 @_Z19is_valid_reserve_id13ocl_reserveid(target("spirv.ReserveId") %4) + call spir_func void @__commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %4, i32 4, i32 4) + + %7 = call spir_func target("spirv.ReserveId") @__reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 4, i32 4) + %8 = call spir_func i32 @__read_pipe_4(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %7, i32 0, ptr addrspace(4) %dst, i32 4, i32 4) + %9 = call spir_func i1 @_Z19is_valid_reserve_id13ocl_reserveid(target("spirv.ReserveId") %7) + call spir_func void @__commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %7, i32 4, i32 4) + + ; Pipe packet queries + %10 = call spir_func i32 @__get_pipe_max_packets_wo(target("spirv.Pipe", 1) %out_pipe, i32 4, i32 4) + store i32 %10, ptr addrspace(1) %max_packets_wo, align 4 + %11 = call spir_func i32 @__get_pipe_num_packets_wo(target("spirv.Pipe", 1) %out_pipe, i32 4, i32 4) + store i32 %11, ptr addrspace(1) %num_packets_wo, align 4 + %12 = call spir_func i32 @__get_pipe_max_packets_ro(target("spirv.Pipe", 0) %in_pipe, i32 4, i32 4) + store i32 %12, ptr addrspace(1) %max_packets_ro, align 4 + %13 = call spir_func i32 @__get_pipe_num_packets_ro(target("spirv.Pipe", 0) %in_pipe, i32 4, i32 4) + store i32 %13, ptr addrspace(1) %num_packets_ro, align 4 + + ; Workgroup operations + %14 = call spir_func target("spirv.ReserveId") @__work_group_reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 1, i32 1) + call spir_func void @__work_group_commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %14, i32 1, i32 1) + %15 = call spir_func target("spirv.ReserveId") @__work_group_reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 1, i32 1) + call spir_func void @__work_group_commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %15, i32 1, i32 1) + + ; Subgroup operations + %16 = call spir_func target("spirv.ReserveId") @__sub_group_reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 4, i32 4) + call spir_func void @__sub_group_commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %16, i32 4, i32 4) + %17 = call spir_func target("spirv.ReserveId") @__sub_group_reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 4, i32 4) + call spir_func void @__sub_group_commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %17, i32 4, i32 4) + + ret void +} + +declare spir_func target("spirv.ReserveId") @__reserve_write_pipe(target("spirv.Pipe", 1), i32, i32, i32) +declare spir_func target("spirv.ReserveId") @__reserve_read_pipe(target("spirv.Pipe", 0), i32, i32, i32) +declare spir_func i32 @__write_pipe_2(target("spirv.Pipe", 1), ptr addrspace(4), i32, i32) +declare spir_func i32 @__read_pipe_2(target("spirv.Pipe", 0), ptr addrspace(4), i32, i32) +declare spir_func i32 @__write_pipe_4(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, ptr addrspace(4), i32, i32) +declare spir_func i32 @__read_pipe_4(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, ptr addrspace(4), i32, i32) +declare spir_func void @__commit_write_pipe(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, i32) +declare spir_func void @__commit_read_pipe(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, i32) +declare spir_func i1 @_Z19is_valid_reserve_id13ocl_reserveid(target("spirv.ReserveId")) +declare spir_func i32 @__get_pipe_max_packets_wo(target("spirv.Pipe", 1), i32, i32) +declare spir_func i32 @__get_pipe_num_packets_wo(target("spirv.Pipe", 1), i32, i32) +declare spir_func i32 @__get_pipe_max_packets_ro(target("spirv.Pipe", 0), i32, i32) +declare spir_func i32 @__get_pipe_num_packets_ro(target("spirv.Pipe", 0), i32, i32) +declare spir_func target("spirv.ReserveId") @__work_group_reserve_write_pipe(target("spirv.Pipe", 1), i32, i32, i32) +declare spir_func void @__work_group_commit_write_pipe(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, i32) +declare spir_func target("spirv.ReserveId") @__work_group_reserve_read_pipe(target("spirv.Pipe", 0), i32, i32, i32) +declare spir_func void @__work_group_commit_read_pipe(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, i32) +declare spir_func target("spirv.ReserveId") @__sub_group_reserve_write_pipe(target("spirv.Pipe", 1), i32, i32, i32) +declare spir_func void @__sub_group_commit_write_pipe(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, i32) +declare spir_func target("spirv.ReserveId") @__sub_group_reserve_read_pipe(target("spirv.Pipe", 0), i32, i32, i32) +declare spir_func void @__sub_group_commit_read_pipe(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, i32) diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_gep.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_gep.ll new file mode 100644 index 0000000..4c64a12 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_gep.ll @@ -0,0 +1,16 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: OpDecorate %[[#Id:]] BuiltIn GlobalInvocationId +; CHECK: %[[#Id]] = OpVariable %[[#]] CrossWorkgroup + +@__spirv_BuiltInGlobalInvocationId = external dso_local local_unnamed_addr addrspace(1) constant <3 x i64>, align 32 + +define spir_kernel void @f() { +entry: + %0 = load i64, ptr addrspace(1) @__spirv_BuiltInGlobalInvocationId, align 32 + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/transcoding/decoration-forward-decl.ll b/llvm/test/CodeGen/SPIRV/transcoding/decoration-forward-decl.ll new file mode 100644 index 0000000..74ce26b --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/decoration-forward-decl.ll @@ -0,0 +1,30 @@ +; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; Check saturation conversion is translated when there is forward declaration +; of SPIRV entry. + +; CHECK: OpDecorate %[[#SAT:]] SaturatedConversion +; CHECK: %[[#SAT]] = OpConvertFToU %[[#]] %[[#]] + +declare spir_func zeroext i8 @_Z30__spirv_ConvertFToU_Ruchar_satf(float) + +define spir_func void @forward(float %val, i8 %initval, ptr addrspace(1) %dst) { +entry: + br label %for.cond + +for.cond: ; preds = %for.body, %entry + %new_val.0 = phi i8 [ %initval, %entry ], [ %call1, %for.body ] + %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ] + %cmp = icmp ult i32 %i.0, 1 + br i1 %cmp, label %for.body, label %for.end + +for.body: ; preds = %for.cond + %call1 = call spir_func zeroext i8 @_Z30__spirv_ConvertFToU_Ruchar_satf(float noundef %val) + %inc = add i32 %i.0, 1 + br label %for.cond + +for.end: ; preds = %for.cond + store i8 %new_val.0, ptr addrspace(1) %dst, align 1 + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/transcoding/float16.ll b/llvm/test/CodeGen/SPIRV/transcoding/float16.ll new file mode 100644 index 0000000..0018dba --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/float16.ll @@ -0,0 +1,25 @@ +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-SPIRV: %[[#HALF:]] = OpTypeFloat 16 +; CHECK-SPIRV: %[[#HALFPTR:]] = OpTypePointer Function %[[#HALF]] +; CHECK-SPIRV: %[[#HALFV2:]] = OpTypeVector %[[#HALF]] 2 +; CHECK-SPIRV: %[[#HALFV2PTR:]] = OpTypePointer Function %[[#HALFV2]] +; CHECK-SPIRV: %[[#CONST:]] = OpConstant %[[#HALF]] 14788 +; CHECK-SPIRV: %[[#ADDR:]] = OpVariable %[[#HALFPTR]] Function +; CHECK-SPIRV: %[[#ADDR2:]] = OpVariable %[[#HALFV2PTR]] Function +; CHECK-SPIRV: %[[#]] = OpExtInst %[[#HALF]] %[[#]] fract %[[#CONST]] %[[#ADDR]] +; CHECK-SPIRV: %[[#]] = OpExtInst %[[#HALFV2]] %[[#]] fract %[[#]] %[[#ADDR2]] + +define spir_kernel void @test() { +entry: + %addr = alloca half + %addr2 = alloca <2 x half> + %res = call spir_func noundef half @_Z17__spirv_ocl_fractDF16_PU3AS0DF16_(half noundef 0xH39C4, ptr noundef %addr) + %res2 = call spir_func noundef <2 x half> @_Z17__spirv_ocl_fractDv2_DF16_PU3AS0S_(<2 x half> noundef <half 0xH39C4, half 0xH0000>, ptr noundef %addr2) + ret void +} + +declare spir_func noundef half @_Z17__spirv_ocl_fractDF16_PU3AS0DF16_(half noundef, ptr noundef) local_unnamed_addr + +declare spir_func noundef <2 x half> @_Z17__spirv_ocl_fractDv2_DF16_PU3AS0S_(<2 x half> noundef, ptr noundef) local_unnamed_addr diff --git a/llvm/test/CodeGen/VE/Scalar/max.ll b/llvm/test/CodeGen/VE/Scalar/max.ll index 51da557..7950842 100644 --- a/llvm/test/CodeGen/VE/Scalar/max.ll +++ b/llvm/test/CodeGen/VE/Scalar/max.ll @@ -1,7 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s -; RUN: llc < %s -mtriple=ve-unknown-unknown -enable-no-signed-zeros-fp-math \ -; RUN: -enable-no-nans-fp-math | FileCheck %s -check-prefix=OPT define double @maxf64(double, double) { ; CHECK-LABEL: maxf64: @@ -10,16 +8,21 @@ define double @maxf64(double, double) { ; CHECK-NEXT: cmov.d.gt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxf64: -; OPT: # %bb.0: -; OPT-NEXT: fmax.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ogt double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @maxf64_fast(double, double) { +; CHECK-LABEL: maxf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ogt double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define double @max2f64(double, double) { ; CHECK-LABEL: max2f64: ; CHECK: # %bb.0: @@ -27,16 +30,21 @@ define double @max2f64(double, double) { ; CHECK-NEXT: cmov.d.ge %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2f64: -; OPT: # %bb.0: -; OPT-NEXT: fmax.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp oge double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @max2f64_fast(double, double) { +; CHECK-LABEL: max2f64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp oge double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + ; VE has no max for unordered comparison define double @maxuf64(double, double) { ; CHECK-LABEL: maxuf64: @@ -45,16 +53,21 @@ define double @maxuf64(double, double) { ; CHECK-NEXT: cmov.d.gtnan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxuf64: -; OPT: # %bb.0: -; OPT-NEXT: fmax.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ugt double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @maxuf64_fast(double, double) { +; CHECK-LABEL: maxuf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ugt double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + ; VE has no max for unordered comparison define double @max2uf64(double, double) { ; CHECK-LABEL: max2uf64: @@ -63,16 +76,21 @@ define double @max2uf64(double, double) { ; CHECK-NEXT: cmov.d.genan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2uf64: -; OPT: # %bb.0: -; OPT-NEXT: fmax.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp uge double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @max2uf64_fast(double, double) { +; CHECK-LABEL: max2uf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp uge double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define float @maxf32(float, float) { ; CHECK-LABEL: maxf32: ; CHECK: # %bb.0: @@ -80,16 +98,21 @@ define float @maxf32(float, float) { ; CHECK-NEXT: cmov.s.gt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxf32: -; OPT: # %bb.0: -; OPT-NEXT: fmax.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ogt float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @maxf32_fast(float, float) { +; CHECK-LABEL: maxf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ogt float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @max2f32(float, float) { ; CHECK-LABEL: max2f32: ; CHECK: # %bb.0: @@ -97,16 +120,21 @@ define float @max2f32(float, float) { ; CHECK-NEXT: cmov.s.ge %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2f32: -; OPT: # %bb.0: -; OPT-NEXT: fmax.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp oge float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @max2f32_fast(float, float) { +; CHECK-LABEL: max2f32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp oge float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @maxuf32(float, float) { ; CHECK-LABEL: maxuf32: ; CHECK: # %bb.0: @@ -114,16 +142,21 @@ define float @maxuf32(float, float) { ; CHECK-NEXT: cmov.s.gtnan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxuf32: -; OPT: # %bb.0: -; OPT-NEXT: fmax.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ugt float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @maxuf32_fast(float, float) { +; CHECK-LABEL: maxuf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ugt float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @max2uf32(float, float) { ; CHECK-LABEL: max2uf32: ; CHECK: # %bb.0: @@ -131,26 +164,26 @@ define float @max2uf32(float, float) { ; CHECK-NEXT: cmov.s.genan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2uf32: -; OPT: # %bb.0: -; OPT-NEXT: fmax.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp uge float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @max2uf32_fast(float, float) { +; CHECK-LABEL: max2uf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp uge float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define i64 @maxi64(i64, i64) { ; CHECK-LABEL: maxi64: ; CHECK: # %bb.0: ; CHECK-NEXT: maxs.l %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxi64: -; OPT: # %bb.0: -; OPT-NEXT: maxs.l %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sgt i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -161,11 +194,6 @@ define i64 @max2i64(i64, i64) { ; CHECK: # %bb.0: ; CHECK-NEXT: maxs.l %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2i64: -; OPT: # %bb.0: -; OPT-NEXT: maxs.l %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sge i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -178,13 +206,6 @@ define i64 @maxu64(i64, i64) { ; CHECK-NEXT: cmov.l.gt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxu64: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.l %s2, %s0, %s1 -; OPT-NEXT: cmov.l.gt %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ugt i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -197,13 +218,6 @@ define i64 @max2u64(i64, i64) { ; CHECK-NEXT: cmov.l.ge %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2u64: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.l %s2, %s0, %s1 -; OPT-NEXT: cmov.l.ge %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp uge i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -214,11 +228,6 @@ define i32 @maxi32(i32, i32) { ; CHECK: # %bb.0: ; CHECK-NEXT: maxs.w.sx %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxi32: -; OPT: # %bb.0: -; OPT-NEXT: maxs.w.sx %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sgt i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -229,11 +238,6 @@ define i32 @max2i32(i32, i32) { ; CHECK: # %bb.0: ; CHECK-NEXT: maxs.w.sx %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2i32: -; OPT: # %bb.0: -; OPT-NEXT: maxs.w.sx %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sge i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -246,13 +250,6 @@ define i32 @maxu32(i32, i32) { ; CHECK-NEXT: cmov.w.gt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxu32: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.w %s2, %s0, %s1 -; OPT-NEXT: cmov.w.gt %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ugt i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -265,13 +262,6 @@ define i32 @max2u32(i32, i32) { ; CHECK-NEXT: cmov.w.ge %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2u32: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.w %s2, %s0, %s1 -; OPT-NEXT: cmov.w.ge %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp uge i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -283,12 +273,6 @@ define zeroext i1 @maxi1(i1 zeroext, i1 zeroext) { ; CHECK-NEXT: or %s0, %s0, %s1 ; CHECK-NEXT: and %s0, 1, %s0 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxi1: -; OPT: # %bb.0: -; OPT-NEXT: or %s0, %s0, %s1 -; OPT-NEXT: and %s0, 1, %s0 -; OPT-NEXT: b.l.t (, %s10) %3 = xor i1 %1, true %4 = and i1 %3, %0 %5 = select i1 %4, i1 %0, i1 %1 diff --git a/llvm/test/CodeGen/VE/Scalar/min.ll b/llvm/test/CodeGen/VE/Scalar/min.ll index e8f4939..36a2e06 100644 --- a/llvm/test/CodeGen/VE/Scalar/min.ll +++ b/llvm/test/CodeGen/VE/Scalar/min.ll @@ -1,7 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s -; RUN: llc < %s -mtriple=ve-unknown-unknown -enable-no-signed-zeros-fp-math \ -; RUN: -enable-no-nans-fp-math | FileCheck %s -check-prefix=OPT define double @minf64(double, double) { ; CHECK-LABEL: minf64: @@ -10,16 +8,21 @@ define double @minf64(double, double) { ; CHECK-NEXT: cmov.d.lt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minf64: -; OPT: # %bb.0: -; OPT-NEXT: fmin.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp olt double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @minf64_fast(double, double) { +; CHECK-LABEL: minf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp olt double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define double @min2f64(double, double) { ; CHECK-LABEL: min2f64: ; CHECK: # %bb.0: @@ -27,16 +30,21 @@ define double @min2f64(double, double) { ; CHECK-NEXT: cmov.d.le %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2f64: -; OPT: # %bb.0: -; OPT-NEXT: fmin.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ole double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @min2f64_fast(double, double) { +; CHECK-LABEL: min2f64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ole double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define double @minuf64(double, double) { ; CHECK-LABEL: minuf64: ; CHECK: # %bb.0: @@ -44,16 +52,21 @@ define double @minuf64(double, double) { ; CHECK-NEXT: cmov.d.ltnan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minuf64: -; OPT: # %bb.0: -; OPT-NEXT: fmin.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ult double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @minuf64_fast(double, double) { +; CHECK-LABEL: minuf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ult double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define double @min2uf64(double, double) { ; CHECK-LABEL: min2uf64: ; CHECK: # %bb.0: @@ -61,16 +74,21 @@ define double @min2uf64(double, double) { ; CHECK-NEXT: cmov.d.lenan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2uf64: -; OPT: # %bb.0: -; OPT-NEXT: fmin.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ule double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @min2uf64_fast(double, double) { +; CHECK-LABEL: min2uf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ule double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define float @minf32(float, float) { ; CHECK-LABEL: minf32: ; CHECK: # %bb.0: @@ -78,16 +96,21 @@ define float @minf32(float, float) { ; CHECK-NEXT: cmov.s.lt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minf32: -; OPT: # %bb.0: -; OPT-NEXT: fmin.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp olt float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @minf32_fast(float, float) { +; CHECK-LABEL: minf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp olt float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @min2f32(float, float) { ; CHECK-LABEL: min2f32: ; CHECK: # %bb.0: @@ -95,16 +118,21 @@ define float @min2f32(float, float) { ; CHECK-NEXT: cmov.s.le %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2f32: -; OPT: # %bb.0: -; OPT-NEXT: fmin.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ole float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @min2f32_fast(float, float) { +; CHECK-LABEL: min2f32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ole float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @minuf32(float, float) { ; CHECK-LABEL: minuf32: ; CHECK: # %bb.0: @@ -112,16 +140,21 @@ define float @minuf32(float, float) { ; CHECK-NEXT: cmov.s.ltnan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minuf32: -; OPT: # %bb.0: -; OPT-NEXT: fmin.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ult float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @minuf32_fast(float, float) { +; CHECK-LABEL: minuf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ult float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @min2uf32(float, float) { ; CHECK-LABEL: min2uf32: ; CHECK: # %bb.0: @@ -129,26 +162,26 @@ define float @min2uf32(float, float) { ; CHECK-NEXT: cmov.s.lenan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2uf32: -; OPT: # %bb.0: -; OPT-NEXT: fmin.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ule float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @min2uf32_fast(float, float) { +; CHECK-LABEL: min2uf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ule float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define i64 @mini64(i64, i64) { ; CHECK-LABEL: mini64: ; CHECK: # %bb.0: ; CHECK-NEXT: mins.l %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: mini64: -; OPT: # %bb.0: -; OPT-NEXT: mins.l %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp slt i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -159,11 +192,6 @@ define i64 @min2i64(i64, i64) { ; CHECK: # %bb.0: ; CHECK-NEXT: mins.l %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2i64: -; OPT: # %bb.0: -; OPT-NEXT: mins.l %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sle i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -176,13 +204,6 @@ define i64 @minu64(i64, i64) { ; CHECK-NEXT: cmov.l.lt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minu64: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.l %s2, %s0, %s1 -; OPT-NEXT: cmov.l.lt %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ult i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -195,13 +216,6 @@ define i64 @min2u64(i64, i64) { ; CHECK-NEXT: cmov.l.le %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2u64: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.l %s2, %s0, %s1 -; OPT-NEXT: cmov.l.le %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ule i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -212,11 +226,6 @@ define i32 @mini32(i32, i32) { ; CHECK: # %bb.0: ; CHECK-NEXT: mins.w.sx %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: mini32: -; OPT: # %bb.0: -; OPT-NEXT: mins.w.sx %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp slt i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -227,11 +236,6 @@ define i32 @min2i32(i32, i32) { ; CHECK: # %bb.0: ; CHECK-NEXT: mins.w.sx %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2i32: -; OPT: # %bb.0: -; OPT-NEXT: mins.w.sx %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sle i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -244,13 +248,6 @@ define i32 @minu32(i32, i32) { ; CHECK-NEXT: cmov.w.lt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minu32: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.w %s2, %s0, %s1 -; OPT-NEXT: cmov.w.lt %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ult i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -263,13 +260,6 @@ define i32 @min2u32(i32, i32) { ; CHECK-NEXT: cmov.w.le %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2u32: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.w %s2, %s0, %s1 -; OPT-NEXT: cmov.w.le %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ule i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -283,14 +273,6 @@ define zeroext i1 @mini1(i1 zeroext, i1 zeroext) { ; CHECK-NEXT: cmov.w.ne %s0, %s1, %s2 ; CHECK-NEXT: adds.w.zx %s0, %s0, (0)1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: mini1: -; OPT: # %bb.0: -; OPT-NEXT: and %s2, 1, %s0 -; OPT-NEXT: and %s0, %s1, %s0 -; OPT-NEXT: cmov.w.ne %s0, %s1, %s2 -; OPT-NEXT: adds.w.zx %s0, %s0, (0)1 -; OPT-NEXT: b.l.t (, %s10) %3 = xor i1 %0, true %4 = and i1 %3, %1 %5 = select i1 %4, i1 %0, i1 %1 diff --git a/llvm/test/CodeGen/X86/apx/ndd-neg-addr-index.ll b/llvm/test/CodeGen/X86/apx/ndd-neg-addr-index.ll index 6679b5f5..41fa346 100644 --- a/llvm/test/CodeGen/X86/apx/ndd-neg-addr-index.ll +++ b/llvm/test/CodeGen/X86/apx/ndd-neg-addr-index.ll @@ -8,7 +8,7 @@ define void @neg_8bit_1(i1 %cmp) { ; NDD-NEXT: andb $1, %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x80,0xe7,0x01] ; NDD-NEXT: movzbl 0, %ecx # encoding: [0x0f,0xb6,0x0c,0x25,0x00,0x00,0x00,0x00] ; NDD-NEXT: negb %al, %al # encoding: [0x62,0xf4,0x7c,0x18,0xf6,0xd8] -; NDD-NEXT: leab 2(%rcx,%rax), %al # encoding: [0x66,0x8d,0x44,0x01,0x02] +; NDD-NEXT: leal 2(%rcx,%rax), %eax # encoding: [0x8d,0x44,0x01,0x02] ; NDD-NEXT: movb %al, 0 # encoding: [0x88,0x04,0x25,0x00,0x00,0x00,0x00] ; NDD-NEXT: retq # encoding: [0xc3] entry: @@ -25,7 +25,8 @@ define void @neg_8bit_2(i8 %int8) { ; NDD-NEXT: # kill: def $edi killed $edi def $rdi ; NDD-NEXT: addb %dil, %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x00,0xff] ; NDD-NEXT: negb %al, %al # encoding: [0x62,0xf4,0x7c,0x18,0xf6,0xd8] -; NDD-NEXT: leab 1(%rdi,%rax), %al # encoding: [0x66,0x8d,0x44,0x07,0x01] +; NDD-NEXT: leal 1(%rdi,%rax), %eax # encoding: [0x8d,0x44,0x07,0x01] +; NDD-NEXT: # kill: def $al killed $al killed $eax ; NDD-NEXT: mulb %dil # encoding: [0x40,0xf6,0xe7] ; NDD-NEXT: testb %al, %al # encoding: [0x84,0xc0] ; NDD-NEXT: retq # encoding: [0xc3] @@ -55,7 +56,7 @@ define i32 @neg_16bit(i16 %0) { ; NDD-NEXT: cmovsl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x48,0xc1] ; NDD-NEXT: andw $-256, %ax # EVEX TO LEGACY Compression encoding: [0x66,0x25,0x00,0xff] ; NDD-NEXT: negw %ax, %ax # encoding: [0x62,0xf4,0x7d,0x18,0xf7,0xd8] -; NDD-NEXT: leaw 1(%rdi,%rax), %ax # encoding: [0x66,0x8d,0x44,0x07,0x01] +; NDD-NEXT: leal 1(%rdi,%rax), %eax # encoding: [0x8d,0x44,0x07,0x01] ; NDD-NEXT: movzwl %ax, %eax # encoding: [0x0f,0xb7,0xc0] ; NDD-NEXT: movq %rax, 0 # encoding: [0x48,0x89,0x04,0x25,0x00,0x00,0x00,0x00] ; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] diff --git a/llvm/test/CodeGen/X86/avx512-mask-op.ll b/llvm/test/CodeGen/X86/avx512-mask-op.ll index 8aa898f..da0cef0 100644 --- a/llvm/test/CodeGen/X86/avx512-mask-op.ll +++ b/llvm/test/CodeGen/X86/avx512-mask-op.ll @@ -2119,8 +2119,7 @@ define void @ktest_1(<8 x double> %in, ptr %base) { ; KNL-LABEL: ktest_1: ; KNL: ## %bb.0: ; KNL-NEXT: vcmpgtpd (%rdi), %zmm0, %k1 -; KNL-NEXT: vmovupd 8(%rdi), %zmm1 {%k1} {z} -; KNL-NEXT: vcmpltpd %zmm1, %zmm0, %k0 {%k1} +; KNL-NEXT: vcmpltpd 8(%rdi), %zmm0, %k0 {%k1} ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb %al, %al ; KNL-NEXT: je LBB44_2 @@ -2152,8 +2151,7 @@ define void @ktest_1(<8 x double> %in, ptr %base) { ; AVX512BW-LABEL: ktest_1: ; AVX512BW: ## %bb.0: ; AVX512BW-NEXT: vcmpgtpd (%rdi), %zmm0, %k1 -; AVX512BW-NEXT: vmovupd 8(%rdi), %zmm1 {%k1} {z} -; AVX512BW-NEXT: vcmpltpd %zmm1, %zmm0, %k0 {%k1} +; AVX512BW-NEXT: vcmpltpd 8(%rdi), %zmm0, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax ; AVX512BW-NEXT: testb %al, %al ; AVX512BW-NEXT: je LBB44_2 diff --git a/llvm/test/CodeGen/X86/combine-add.ll b/llvm/test/CodeGen/X86/combine-add.ll index ff9f995..51a8bf5 100644 --- a/llvm/test/CodeGen/X86/combine-add.ll +++ b/llvm/test/CodeGen/X86/combine-add.ll @@ -235,10 +235,10 @@ define void @PR52039(ptr %pa, ptr %pb) { ; SSE-NEXT: psubd %xmm1, %xmm3 ; SSE-NEXT: psubd %xmm0, %xmm2 ; SSE-NEXT: movdqa %xmm2, %xmm0 -; SSE-NEXT: paddd %xmm2, %xmm0 +; SSE-NEXT: paddd %xmm0, %xmm0 ; SSE-NEXT: paddd %xmm2, %xmm0 ; SSE-NEXT: movdqa %xmm3, %xmm1 -; SSE-NEXT: paddd %xmm3, %xmm1 +; SSE-NEXT: paddd %xmm1, %xmm1 ; SSE-NEXT: paddd %xmm3, %xmm1 ; SSE-NEXT: movdqu %xmm3, 16(%rsi) ; SSE-NEXT: movdqu %xmm2, (%rsi) diff --git a/llvm/test/CodeGen/X86/combine-mul.ll b/llvm/test/CodeGen/X86/combine-mul.ll index 8e4a50e..ae4d24f 100644 --- a/llvm/test/CodeGen/X86/combine-mul.ll +++ b/llvm/test/CodeGen/X86/combine-mul.ll @@ -81,7 +81,7 @@ define <4 x i64> @combine_vec_mul_pow2c(<4 x i64> %x) { ; SSE-LABEL: combine_vec_mul_pow2c: ; SSE: # %bb.0: ; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: paddq %xmm0, %xmm2 +; SSE-NEXT: paddq %xmm2, %xmm2 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] ; SSE-NEXT: movdqa %xmm1, %xmm2 ; SSE-NEXT: psllq $4, %xmm2 diff --git a/llvm/test/CodeGen/X86/combine-sdiv.ll b/llvm/test/CodeGen/X86/combine-sdiv.ll index 98187d6..6bcbfe1 100644 --- a/llvm/test/CodeGen/X86/combine-sdiv.ll +++ b/llvm/test/CodeGen/X86/combine-sdiv.ll @@ -2187,13 +2187,13 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) { ; SSE41-NEXT: pxor %xmm0, %xmm0 ; SSE41-NEXT: pxor %xmm3, %xmm3 ; SSE41-NEXT: pcmpgtb %xmm1, %xmm3 -; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero ; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] ; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [256,2,2,2,2,128,2,128] ; SSE41-NEXT: psrlw $8, %xmm3 -; SSE41-NEXT: paddw %xmm4, %xmm4 -; SSE41-NEXT: pmovsxbw %xmm1, %xmm2 -; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3,4,5],xmm4[6],xmm2[7] +; SSE41-NEXT: pmovsxbw %xmm1, %xmm0 +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; SSE41-NEXT: paddw %xmm2, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2],xmm0[3,4,5],xmm2[6],xmm0[7] ; SSE41-NEXT: psrlw $8, %xmm2 ; SSE41-NEXT: packuswb %xmm3, %xmm2 ; SSE41-NEXT: paddb %xmm1, %xmm2 @@ -2201,15 +2201,14 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) { ; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] ; SSE41-NEXT: psraw $8, %xmm0 ; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: paddw %xmm0, %xmm3 -; SSE41-NEXT: psllw $7, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm3[5],xmm0[6],xmm3[7] -; SSE41-NEXT: psrlw $8, %xmm0 +; SSE41-NEXT: psllw $7, %xmm3 +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm0[5],xmm3[6],xmm0[7] +; SSE41-NEXT: psrlw $8, %xmm3 ; SSE41-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE41-NEXT: psraw $8, %xmm2 ; SSE41-NEXT: psllw $7, %xmm2 ; SSE41-NEXT: psrlw $8, %xmm2 -; SSE41-NEXT: packuswb %xmm0, %xmm2 +; SSE41-NEXT: packuswb %xmm3, %xmm2 ; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255] ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [255,255,0,255,255,255,0,255,255,0,0,0,0,255,0,255] @@ -2225,18 +2224,17 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) { ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [256,2,2,2,2,128,2,128] ; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1 -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpmovsxbw %xmm0, %xmm3 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2],xmm3[3,4,5],xmm2[6],xmm3[7] +; AVX1-NEXT: vpmovsxbw %xmm0, %xmm2 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3,4,5],xmm3[6],xmm2[7] ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm1 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; AVX1-NEXT: vpsraw $8, %xmm2, %xmm2 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3 -; AVX1-NEXT: vpsllw $7, %xmm2, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5],xmm2[6],xmm3[7] +; AVX1-NEXT: vpsllw $7, %xmm2, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5],xmm3[6],xmm2[7] ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; AVX1-NEXT: vpsraw $8, %xmm1, %xmm1 diff --git a/llvm/test/CodeGen/X86/dbg-distringtype-uint.ll b/llvm/test/CodeGen/X86/dbg-distringtype-uint.ll index 638a65d..7542c1b 100644 --- a/llvm/test/CodeGen/X86/dbg-distringtype-uint.ll +++ b/llvm/test/CodeGen/X86/dbg-distringtype-uint.ll @@ -1,5 +1,13 @@ ; RUN: llc -mtriple=x86_64 -filetype=obj < %s | llvm-dwarfdump -debug-info - | FileCheck %s -; + +; Ensure that static local variable elemnt is placed in abstract subprogram DIE. +; CHECK: DW_TAG_subprogram +; CHECK-NOT: DW_TAG +; CHECK: DW_AT_inline (DW_INL_inlined) +; CHECK-EMPTY: +; CHECK-NEXT: DW_TAG_variable +; CHECK-NEXT: DW_AT_name ("elemnt") + ; CHECK: [[SYM:[a-z0-9]+]]: DW_TAG_formal_parameter ; CHECK: DW_AT_name ("esym") ; CHECK: DW_AT_type ([[TYPE:[a-z0-9]+]] "CHARACTER_1") diff --git a/llvm/test/CodeGen/X86/dpbusd.ll b/llvm/test/CodeGen/X86/dpbusd.ll index 3aa77c3..7bd22d5 100644 --- a/llvm/test/CodeGen/X86/dpbusd.ll +++ b/llvm/test/CodeGen/X86/dpbusd.ll @@ -1,40 +1,25 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=AVXVNNI -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=AVX512,AVX512VNNI -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLVNNI +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX512 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VNNI +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VLVNNI define i32 @no_dpbusd(ptr%a, ptr%b, i32 %c, i32 %n) { -; AVXVNNI-LABEL: no_dpbusd: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVXVNNI-NEXT: vpmaddwd %ymm0, %ymm1, %ymm0 -; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vmovd %xmm0, %eax -; AVXVNNI-NEXT: addl %edx, %eax -; AVXVNNI-NEXT: vzeroupper -; AVXVNNI-NEXT: retq -; -; AVX512-LABEL: no_dpbusd: -; AVX512: # %bb.0: # %entry -; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVX512-NEXT: vpmaddwd %ymm0, %ymm1, %ymm0 -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] -; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: vmovd %xmm0, %eax -; AVX512-NEXT: addl %edx, %eax -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: retq +; CHECK-LABEL: no_dpbusd: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; CHECK-NEXT: vpmaddwd %ymm0, %ymm1, %ymm0 +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vmovd %xmm0, %eax +; CHECK-NEXT: addl %edx, %eax +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq entry: %0 = load <16 x i8>, ptr %a, align 16 %1 = zext <16 x i8> %0 to <16 x i32> @@ -99,25 +84,44 @@ entry: } define i32 @mul_zext(ptr%a, ptr%b, i32 %c, i32 %n) { -; AVXVNNI-LABEL: mul_zext: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVXVNNI-NEXT: vpmovsxbw (%rsi), %ymm1 -; AVXVNNI-NEXT: vpmullw %ymm0, %ymm1, %ymm0 -; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVXVNNI-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVXVNNI-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVXVNNI-NEXT: vpaddd %ymm1, %ymm0, %ymm0 -; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vmovd %xmm0, %eax -; AVXVNNI-NEXT: addl %edx, %eax -; AVXVNNI-NEXT: vzeroupper -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: mul_zext: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVXVNNI-AVX-NEXT: vpmovsxbw (%rsi), %ymm1 +; AVXVNNI-AVX-NEXT: vpmullw %ymm0, %ymm1, %ymm0 +; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVXVNNI-AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVXVNNI-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vmovd %xmm0, %eax +; AVXVNNI-AVX-NEXT: addl %edx, %eax +; AVXVNNI-AVX-NEXT: vzeroupper +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: mul_zext: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVXVNNI-AVX512-NEXT: vpmovsxbw (%rsi), %ymm1 +; AVXVNNI-AVX512-NEXT: vpmullw %ymm0, %ymm1, %ymm0 +; AVXVNNI-AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVXVNNI-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVXVNNI-AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVXVNNI-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vmovd %xmm0, %eax +; AVXVNNI-AVX512-NEXT: addl %edx, %eax +; AVXVNNI-AVX512-NEXT: vzeroupper +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512-LABEL: mul_zext: ; AVX512: # %bb.0: # %entry @@ -153,25 +157,44 @@ entry: } define i32 @mul_sext(ptr%a, ptr%b, i32 %c, i32 %n) { -; AVXVNNI-LABEL: mul_sext: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVXVNNI-NEXT: vpmovsxbw (%rsi), %ymm1 -; AVXVNNI-NEXT: vpmullw %ymm0, %ymm1, %ymm0 -; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVXVNNI-NEXT: vpmovsxwd %xmm1, %ymm1 -; AVXVNNI-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVXVNNI-NEXT: vpaddd %ymm1, %ymm0, %ymm0 -; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vmovd %xmm0, %eax -; AVXVNNI-NEXT: addl %edx, %eax -; AVXVNNI-NEXT: vzeroupper -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: mul_sext: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVXVNNI-AVX-NEXT: vpmovsxbw (%rsi), %ymm1 +; AVXVNNI-AVX-NEXT: vpmullw %ymm0, %ymm1, %ymm0 +; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX-NEXT: vpmovsxwd %xmm1, %ymm1 +; AVXVNNI-AVX-NEXT: vpmovsxwd %xmm0, %ymm0 +; AVXVNNI-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vmovd %xmm0, %eax +; AVXVNNI-AVX-NEXT: addl %edx, %eax +; AVXVNNI-AVX-NEXT: vzeroupper +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: mul_sext: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVXVNNI-AVX512-NEXT: vpmovsxbw (%rsi), %ymm1 +; AVXVNNI-AVX512-NEXT: vpmullw %ymm0, %ymm1, %ymm0 +; AVXVNNI-AVX512-NEXT: vpmovsxwd %ymm0, %zmm0 +; AVXVNNI-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVXVNNI-AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVXVNNI-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vmovd %xmm0, %eax +; AVXVNNI-AVX512-NEXT: addl %edx, %eax +; AVXVNNI-AVX512-NEXT: vzeroupper +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512-LABEL: mul_sext: ; AVX512: # %bb.0: # %entry @@ -312,17 +335,30 @@ entry: declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>) define i32 @vpdpbusd_128(ptr%a, ptr%b, i32 %c, i32 %n) { -; AVXVNNI-LABEL: vpdpbusd_128: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVXVNNI-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVXVNNI-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3] -; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3] -; AVXVNNI-NEXT: {vex} vpdpbusd %xmm1, %xmm0, %xmm2 -; AVXVNNI-NEXT: vmovd %xmm2, %eax -; AVXVNNI-NEXT: addl %edx, %eax -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: vpdpbusd_128: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVXVNNI-AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVXVNNI-AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3] +; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3] +; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %xmm1, %xmm0, %xmm2 +; AVXVNNI-AVX-NEXT: vmovd %xmm2, %eax +; AVXVNNI-AVX-NEXT: addl %edx, %eax +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: vpdpbusd_128: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVXVNNI-AVX512-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVXVNNI-AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3] +; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3] +; AVXVNNI-AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %xmm1, %xmm0, %xmm2 +; AVXVNNI-AVX512-NEXT: vmovd %xmm2, %eax +; AVXVNNI-AVX512-NEXT: addl %edx, %eax +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512VNNI-LABEL: vpdpbusd_128: ; AVX512VNNI: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/X86/dpbusd_const.ll b/llvm/test/CodeGen/X86/dpbusd_const.ll index 456e6e8..bb47df5 100644 --- a/llvm/test/CodeGen/X86/dpbusd_const.ll +++ b/llvm/test/CodeGen/X86/dpbusd_const.ll @@ -1,20 +1,21 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=ALL,AVXVNNI -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VNNI -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni -mattr=+avx512vl | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VLVNNI +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX512 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VNNI +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VLVNNI define i32 @mul_4xi8_zc_exceed(<4 x i8> %a, i32 %c) { -; ALL-LABEL: mul_4xi8_zc_exceed: -; ALL: # %bb.0: # %entry -; ALL-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; ALL-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,128,0] -; ALL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; ALL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; ALL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] -; ALL-NEXT: vpaddd %xmm0, %xmm1, %xmm0 -; ALL-NEXT: vmovd %xmm0, %eax -; ALL-NEXT: addl %edi, %eax -; ALL-NEXT: retq +; CHECK-LABEL: mul_4xi8_zc_exceed: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; CHECK-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,128,0] +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] +; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: vmovd %xmm0, %eax +; CHECK-NEXT: addl %edi, %eax +; CHECK-NEXT: retq entry: %0 = zext <4 x i8> %a to <4 x i32> %1 = mul nsw <4 x i32> %0, <i32 0, i32 1, i32 2, i32 128> @@ -24,14 +25,24 @@ entry: } define i32 @mul_4xi8_zc(<4 x i8> %a, i32 %c) { -; AVXVNNI-LABEL: mul_4xi8_zc: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] -; AVXVNNI-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 -; AVXVNNI-NEXT: vmovd %xmm1, %eax -; AVXVNNI-NEXT: addl %edi, %eax -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: mul_4xi8_zc: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVXVNNI-AVX-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; AVXVNNI-AVX-NEXT: vmovd %xmm1, %eax +; AVXVNNI-AVX-NEXT: addl %edi, %eax +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: mul_4xi8_zc: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; AVXVNNI-AVX512-NEXT: vmovd %xmm1, %eax +; AVXVNNI-AVX512-NEXT: addl %edi, %eax +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512VNNI-LABEL: mul_4xi8_zc: ; AVX512VNNI: # %bb.0: # %entry @@ -62,16 +73,26 @@ entry: } define i32 @mul_4xi4_cz(<4 x i4> %a, i32 %c) { -; AVXVNNI-LABEL: mul_4xi4_cz: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] -; AVXVNNI-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] -; AVXVNNI-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 -; AVXVNNI-NEXT: vmovd %xmm1, %eax -; AVXVNNI-NEXT: addl %edi, %eax -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: mul_4xi4_cz: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVXVNNI-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVXVNNI-AVX-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; AVXVNNI-AVX-NEXT: vmovd %xmm1, %eax +; AVXVNNI-AVX-NEXT: addl %edi, %eax +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: mul_4xi4_cz: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vpmovdb %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; AVXVNNI-AVX512-NEXT: vmovd %xmm1, %eax +; AVXVNNI-AVX512-NEXT: addl %edi, %eax +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512VNNI-LABEL: mul_4xi4_cz: ; AVX512VNNI: # %bb.0: # %entry @@ -104,15 +125,26 @@ entry: } define i32 @mul_4xi8_cs(<4 x i8> %a, i32 %c) { -; AVXVNNI-LABEL: mul_4xi8_cs: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] -; AVXVNNI-NEXT: vmovd {{.*#+}} xmm2 = [16,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0] -; AVXVNNI-NEXT: {vex} vpdpbusd %xmm0, %xmm2, %xmm1 -; AVXVNNI-NEXT: vmovd %xmm1, %eax -; AVXVNNI-NEXT: addl %edi, %eax -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: mul_4xi8_cs: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVXVNNI-AVX-NEXT: vmovd {{.*#+}} xmm2 = [16,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0] +; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %xmm0, %xmm2, %xmm1 +; AVXVNNI-AVX-NEXT: vmovd %xmm1, %eax +; AVXVNNI-AVX-NEXT: addl %edi, %eax +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: mul_4xi8_cs: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVXVNNI-AVX512-NEXT: vmovd {{.*#+}} xmm1 = [16,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0] +; AVXVNNI-AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %xmm0, %xmm1, %xmm2 +; AVXVNNI-AVX512-NEXT: vmovd %xmm2, %eax +; AVXVNNI-AVX512-NEXT: addl %edi, %eax +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512VNNI-LABEL: mul_4xi8_cs: ; AVX512VNNI: # %bb.0: # %entry @@ -145,17 +177,17 @@ entry: } define i32 @mul_4xi8_cs_exceed(<4 x i8> %a, i32 %c) { -; ALL-LABEL: mul_4xi8_cs_exceed: -; ALL: # %bb.0: # %entry -; ALL-NEXT: vpmovsxbd %xmm0, %xmm0 -; ALL-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,256,0] -; ALL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; ALL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; ALL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] -; ALL-NEXT: vpaddd %xmm0, %xmm1, %xmm0 -; ALL-NEXT: vmovd %xmm0, %eax -; ALL-NEXT: addl %edi, %eax -; ALL-NEXT: retq +; CHECK-LABEL: mul_4xi8_cs_exceed: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vpmovsxbd %xmm0, %xmm0 +; CHECK-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,256,0] +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] +; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: vmovd %xmm0, %eax +; CHECK-NEXT: addl %edi, %eax +; CHECK-NEXT: retq entry: %0 = sext <4 x i8> %a to <4 x i32> %1 = mul nsw <4 x i32> <i32 0, i32 1, i32 2, i32 256>, %0 @@ -265,24 +297,44 @@ entry: } define i32 @mul_64xi8_zc(<64 x i8> %a, i32 %c) { -; AVXVNNI-LABEL: mul_64xi8_zc: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpbroadcastd {{.*#+}} ymm2 = [0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64] -; AVXVNNI-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; AVXVNNI-NEXT: vpxor %xmm4, %xmm4, %xmm4 -; AVXVNNI-NEXT: {vex} vpdpbusd %ymm2, %ymm1, %ymm4 -; AVXVNNI-NEXT: {vex} vpdpbusd %ymm2, %ymm0, %ymm3 -; AVXVNNI-NEXT: vpaddd %ymm4, %ymm3, %ymm0 -; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vmovd %xmm0, %eax -; AVXVNNI-NEXT: addl %edi, %eax -; AVXVNNI-NEXT: vzeroupper -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: mul_64xi8_zc: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vpbroadcastd {{.*#+}} ymm2 = [0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64] +; AVXVNNI-AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVXVNNI-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %ymm2, %ymm1, %ymm4 +; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %ymm2, %ymm0, %ymm3 +; AVXVNNI-AVX-NEXT: vpaddd %ymm4, %ymm3, %ymm0 +; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vmovd %xmm0, %eax +; AVXVNNI-AVX-NEXT: addl %edi, %eax +; AVXVNNI-AVX-NEXT: vzeroupper +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: mul_64xi8_zc: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVXVNNI-AVX512-NEXT: vpbroadcastd {{.*#+}} ymm2 = [0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64] +; AVXVNNI-AVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVXVNNI-AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %ymm2, %ymm1, %ymm4 +; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %ymm2, %ymm0, %ymm3 +; AVXVNNI-AVX512-NEXT: vpaddd %ymm4, %ymm3, %ymm0 +; AVXVNNI-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vmovd %xmm0, %eax +; AVXVNNI-AVX512-NEXT: addl %edi, %eax +; AVXVNNI-AVX512-NEXT: vzeroupper +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512-LABEL: mul_64xi8_zc: ; AVX512: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/X86/ftrunc.ll b/llvm/test/CodeGen/X86/ftrunc.ll index 3ed9858..9095fb1 100644 --- a/llvm/test/CodeGen/X86/ftrunc.ll +++ b/llvm/test/CodeGen/X86/ftrunc.ll @@ -243,7 +243,7 @@ define <4 x double> @trunc_unsigned_v4f64(<4 x double> %x) #0 { ret <4 x double> %r } -define float @trunc_signed_f32_no_fast_math(float %x) { +define float @trunc_signed_f32_no_fast_math(float %x) nounwind { ; SSE-LABEL: trunc_signed_f32_no_fast_math: ; SSE: # %bb.0: ; SSE-NEXT: cvttps2dq %xmm0, %xmm0 @@ -259,14 +259,12 @@ define float @trunc_signed_f32_no_fast_math(float %x) { ; X86-AVX1-LABEL: trunc_signed_f32_no_fast_math: ; X86-AVX1: # %bb.0: ; X86-AVX1-NEXT: pushl %eax -; X86-AVX1-NEXT: .cfi_def_cfa_offset 8 ; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0 ; X86-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X86-AVX1-NEXT: vmovss %xmm0, (%esp) ; X86-AVX1-NEXT: flds (%esp) ; X86-AVX1-NEXT: popl %eax -; X86-AVX1-NEXT: .cfi_def_cfa_offset 4 ; X86-AVX1-NEXT: retl %i = fptosi float %x to i32 %r = sitofp i32 %i to float @@ -306,7 +304,7 @@ define float @trunc_signed_f32_nsz(float %x) #0 { ret float %r } -define double @trunc_signed32_f64_no_fast_math(double %x) { +define double @trunc_signed32_f64_no_fast_math(double %x) nounwind { ; SSE-LABEL: trunc_signed32_f64_no_fast_math: ; SSE: # %bb.0: ; SSE-NEXT: cvttpd2dq %xmm0, %xmm0 @@ -322,10 +320,7 @@ define double @trunc_signed32_f64_no_fast_math(double %x) { ; X86-AVX1-LABEL: trunc_signed32_f64_no_fast_math: ; X86-AVX1: # %bb.0: ; X86-AVX1-NEXT: pushl %ebp -; X86-AVX1-NEXT: .cfi_def_cfa_offset 8 -; X86-AVX1-NEXT: .cfi_offset %ebp, -8 ; X86-AVX1-NEXT: movl %esp, %ebp -; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp ; X86-AVX1-NEXT: andl $-8, %esp ; X86-AVX1-NEXT: subl $8, %esp ; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero @@ -335,7 +330,6 @@ define double @trunc_signed32_f64_no_fast_math(double %x) { ; X86-AVX1-NEXT: fldl (%esp) ; X86-AVX1-NEXT: movl %ebp, %esp ; X86-AVX1-NEXT: popl %ebp -; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4 ; X86-AVX1-NEXT: retl %i = fptosi double %x to i32 %r = sitofp i32 %i to double @@ -377,7 +371,7 @@ define double @trunc_signed32_f64_nsz(double %x) #0 { ret double %r } -define double @trunc_f32_signed32_f64_no_fast_math(float %x) { +define double @trunc_f32_signed32_f64_no_fast_math(float %x) nounwind { ; SSE-LABEL: trunc_f32_signed32_f64_no_fast_math: ; SSE: # %bb.0: ; SSE-NEXT: cvttps2dq %xmm0, %xmm0 @@ -393,10 +387,7 @@ define double @trunc_f32_signed32_f64_no_fast_math(float %x) { ; X86-AVX1-LABEL: trunc_f32_signed32_f64_no_fast_math: ; X86-AVX1: # %bb.0: ; X86-AVX1-NEXT: pushl %ebp -; X86-AVX1-NEXT: .cfi_def_cfa_offset 8 -; X86-AVX1-NEXT: .cfi_offset %ebp, -8 ; X86-AVX1-NEXT: movl %esp, %ebp -; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp ; X86-AVX1-NEXT: andl $-8, %esp ; X86-AVX1-NEXT: subl $8, %esp ; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero @@ -406,7 +397,6 @@ define double @trunc_f32_signed32_f64_no_fast_math(float %x) { ; X86-AVX1-NEXT: fldl (%esp) ; X86-AVX1-NEXT: movl %ebp, %esp ; X86-AVX1-NEXT: popl %ebp -; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4 ; X86-AVX1-NEXT: retl %i = fptosi float %x to i32 %r = sitofp i32 %i to double @@ -445,7 +435,7 @@ define double @trunc_f32_signed32_f64_nsz(float %x) #0 { ret double %r } -define float @trunc_f64_signed32_f32_no_fast_math(double %x) { +define float @trunc_f64_signed32_f32_no_fast_math(double %x) nounwind { ; SSE-LABEL: trunc_f64_signed32_f32_no_fast_math: ; SSE: # %bb.0: ; SSE-NEXT: cvttpd2dq %xmm0, %xmm0 @@ -461,14 +451,12 @@ define float @trunc_f64_signed32_f32_no_fast_math(double %x) { ; X86-AVX1-LABEL: trunc_f64_signed32_f32_no_fast_math: ; X86-AVX1: # %bb.0: ; X86-AVX1-NEXT: pushl %eax -; X86-AVX1-NEXT: .cfi_def_cfa_offset 8 ; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X86-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0 ; X86-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X86-AVX1-NEXT: vmovss %xmm0, (%esp) ; X86-AVX1-NEXT: flds (%esp) ; X86-AVX1-NEXT: popl %eax -; X86-AVX1-NEXT: .cfi_def_cfa_offset 4 ; X86-AVX1-NEXT: retl %i = fptosi double %x to i32 %r = sitofp i32 %i to float @@ -503,7 +491,7 @@ define float @trunc_f64_signed32_f32_nsz(double %x) #0 { ret float %r } -define double @trunc_signed_f64_no_fast_math(double %x) { +define double @trunc_signed_f64_no_fast_math(double %x) nounwind { ; SSE-LABEL: trunc_signed_f64_no_fast_math: ; SSE: # %bb.0: ; SSE-NEXT: cvttsd2si %xmm0, %rax @@ -520,10 +508,7 @@ define double @trunc_signed_f64_no_fast_math(double %x) { ; X86-AVX1-LABEL: trunc_signed_f64_no_fast_math: ; X86-AVX1: # %bb.0: ; X86-AVX1-NEXT: pushl %ebp -; X86-AVX1-NEXT: .cfi_def_cfa_offset 8 -; X86-AVX1-NEXT: .cfi_offset %ebp, -8 ; X86-AVX1-NEXT: movl %esp, %ebp -; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp ; X86-AVX1-NEXT: andl $-8, %esp ; X86-AVX1-NEXT: subl $24, %esp ; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero @@ -537,7 +522,6 @@ define double @trunc_signed_f64_no_fast_math(double %x) { ; X86-AVX1-NEXT: fldl {{[0-9]+}}(%esp) ; X86-AVX1-NEXT: movl %ebp, %esp ; X86-AVX1-NEXT: popl %ebp -; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4 ; X86-AVX1-NEXT: retl %i = fptosi double %x to i64 %r = sitofp i64 %i to double diff --git a/llvm/test/CodeGen/X86/isint.ll b/llvm/test/CodeGen/X86/isint.ll index 8a56f49..8c11fe1 100644 --- a/llvm/test/CodeGen/X86/isint.ll +++ b/llvm/test/CodeGen/X86/isint.ll @@ -1,29 +1,29 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=CHECK64 %s -; RUN: llc < %s -mtriple=i686-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=CHECK32 %s +; RUN: llc < %s -mtriple=x86_64-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=X64 %s +; RUN: llc < %s -mtriple=i686-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=X86 %s ; PR19059 define i32 @isint_return(double %d) nounwind { -; CHECK64-LABEL: isint_return: -; CHECK64: # %bb.0: -; CHECK64-NEXT: cvttpd2dq %xmm0, %xmm1 -; CHECK64-NEXT: cvtdq2pd %xmm1, %xmm1 -; CHECK64-NEXT: cmpeqsd %xmm0, %xmm1 -; CHECK64-NEXT: movq %xmm1, %rax -; CHECK64-NEXT: andl $1, %eax -; CHECK64-NEXT: # kill: def $eax killed $eax killed $rax -; CHECK64-NEXT: retq +; X64-LABEL: isint_return: +; X64: # %bb.0: +; X64-NEXT: cvttpd2dq %xmm0, %xmm1 +; X64-NEXT: cvtdq2pd %xmm1, %xmm1 +; X64-NEXT: cmpeqsd %xmm0, %xmm1 +; X64-NEXT: movq %xmm1, %rax +; X64-NEXT: andl $1, %eax +; X64-NEXT: # kill: def $eax killed $eax killed $rax +; X64-NEXT: retq ; -; CHECK32-LABEL: isint_return: -; CHECK32: # %bb.0: -; CHECK32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; CHECK32-NEXT: cvttpd2dq %xmm0, %xmm1 -; CHECK32-NEXT: cvtdq2pd %xmm1, %xmm1 -; CHECK32-NEXT: cmpeqsd %xmm0, %xmm1 -; CHECK32-NEXT: movd %xmm1, %eax -; CHECK32-NEXT: andl $1, %eax -; CHECK32-NEXT: retl +; X86-LABEL: isint_return: +; X86: # %bb.0: +; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X86-NEXT: cvttpd2dq %xmm0, %xmm1 +; X86-NEXT: cvtdq2pd %xmm1, %xmm1 +; X86-NEXT: cmpeqsd %xmm0, %xmm1 +; X86-NEXT: movd %xmm1, %eax +; X86-NEXT: andl $1, %eax +; X86-NEXT: retl %i = fptosi double %d to i32 %e = sitofp i32 %i to double %c = fcmp oeq double %d, %e @@ -32,24 +32,24 @@ define i32 @isint_return(double %d) nounwind { } define i32 @isint_float_return(float %f) nounwind { -; CHECK64-LABEL: isint_float_return: -; CHECK64: # %bb.0: -; CHECK64-NEXT: cvttps2dq %xmm0, %xmm1 -; CHECK64-NEXT: cvtdq2ps %xmm1, %xmm1 -; CHECK64-NEXT: cmpeqss %xmm0, %xmm1 -; CHECK64-NEXT: movd %xmm1, %eax -; CHECK64-NEXT: andl $1, %eax -; CHECK64-NEXT: retq +; X64-LABEL: isint_float_return: +; X64: # %bb.0: +; X64-NEXT: cvttps2dq %xmm0, %xmm1 +; X64-NEXT: cvtdq2ps %xmm1, %xmm1 +; X64-NEXT: cmpeqss %xmm0, %xmm1 +; X64-NEXT: movd %xmm1, %eax +; X64-NEXT: andl $1, %eax +; X64-NEXT: retq ; -; CHECK32-LABEL: isint_float_return: -; CHECK32: # %bb.0: -; CHECK32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; CHECK32-NEXT: cvttps2dq %xmm0, %xmm1 -; CHECK32-NEXT: cvtdq2ps %xmm1, %xmm1 -; CHECK32-NEXT: cmpeqss %xmm0, %xmm1 -; CHECK32-NEXT: movd %xmm1, %eax -; CHECK32-NEXT: andl $1, %eax -; CHECK32-NEXT: retl +; X86-LABEL: isint_float_return: +; X86: # %bb.0: +; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-NEXT: cvttps2dq %xmm0, %xmm1 +; X86-NEXT: cvtdq2ps %xmm1, %xmm1 +; X86-NEXT: cmpeqss %xmm0, %xmm1 +; X86-NEXT: movd %xmm1, %eax +; X86-NEXT: andl $1, %eax +; X86-NEXT: retl %i = fptosi float %f to i32 %g = sitofp i32 %i to float %c = fcmp oeq float %f, %g @@ -60,32 +60,32 @@ define i32 @isint_float_return(float %f) nounwind { declare void @foo() define void @isint_branch(double %d) nounwind { -; CHECK64-LABEL: isint_branch: -; CHECK64: # %bb.0: -; CHECK64-NEXT: cvttpd2dq %xmm0, %xmm1 -; CHECK64-NEXT: cvtdq2pd %xmm1, %xmm1 -; CHECK64-NEXT: ucomisd %xmm1, %xmm0 -; CHECK64-NEXT: jne .LBB2_2 -; CHECK64-NEXT: jp .LBB2_2 -; CHECK64-NEXT: # %bb.1: # %true -; CHECK64-NEXT: pushq %rax -; CHECK64-NEXT: callq foo@PLT -; CHECK64-NEXT: popq %rax -; CHECK64-NEXT: .LBB2_2: # %false -; CHECK64-NEXT: retq +; X64-LABEL: isint_branch: +; X64: # %bb.0: +; X64-NEXT: cvttpd2dq %xmm0, %xmm1 +; X64-NEXT: cvtdq2pd %xmm1, %xmm1 +; X64-NEXT: ucomisd %xmm1, %xmm0 +; X64-NEXT: jne .LBB2_2 +; X64-NEXT: jp .LBB2_2 +; X64-NEXT: # %bb.1: # %true +; X64-NEXT: pushq %rax +; X64-NEXT: callq foo@PLT +; X64-NEXT: popq %rax +; X64-NEXT: .LBB2_2: # %false +; X64-NEXT: retq ; -; CHECK32-LABEL: isint_branch: -; CHECK32: # %bb.0: -; CHECK32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; CHECK32-NEXT: cvttpd2dq %xmm0, %xmm1 -; CHECK32-NEXT: cvtdq2pd %xmm1, %xmm1 -; CHECK32-NEXT: ucomisd %xmm1, %xmm0 -; CHECK32-NEXT: jne .LBB2_2 -; CHECK32-NEXT: jp .LBB2_2 -; CHECK32-NEXT: # %bb.1: # %true -; CHECK32-NEXT: calll foo@PLT -; CHECK32-NEXT: .LBB2_2: # %false -; CHECK32-NEXT: retl +; X86-LABEL: isint_branch: +; X86: # %bb.0: +; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X86-NEXT: cvttpd2dq %xmm0, %xmm1 +; X86-NEXT: cvtdq2pd %xmm1, %xmm1 +; X86-NEXT: ucomisd %xmm1, %xmm0 +; X86-NEXT: jne .LBB2_2 +; X86-NEXT: jp .LBB2_2 +; X86-NEXT: # %bb.1: # %true +; X86-NEXT: calll foo@PLT +; X86-NEXT: .LBB2_2: # %false +; X86-NEXT: retl %i = fptosi double %d to i32 %e = sitofp i32 %i to double %c = fcmp oeq double %d, %e diff --git a/llvm/test/CodeGen/X86/known-signbits-shl.ll b/llvm/test/CodeGen/X86/known-signbits-shl.ll index 473fecc..57d557d 100644 --- a/llvm/test/CodeGen/X86/known-signbits-shl.ll +++ b/llvm/test/CodeGen/X86/known-signbits-shl.ll @@ -137,7 +137,7 @@ define void @computeNumSignBits_shl_zext_vec_3(<2 x i8> %x, ptr %p) nounwind { ; X64-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; X64-NEXT: por %xmm2, %xmm1 ; X64-NEXT: movdqa %xmm0, %xmm2 -; X64-NEXT: paddw %xmm0, %xmm2 +; X64-NEXT: paddw %xmm2, %xmm2 ; X64-NEXT: movdqa %xmm2, %xmm3 ; X64-NEXT: psraw $1, %xmm3 ; X64-NEXT: pcmpeqw %xmm0, %xmm3 diff --git a/llvm/test/CodeGen/X86/lea-16bit.ll b/llvm/test/CodeGen/X86/lea-16bit.ll index cec29ab..40da01d 100644 --- a/llvm/test/CodeGen/X86/lea-16bit.ll +++ b/llvm/test/CodeGen/X86/lea-16bit.ll @@ -13,7 +13,8 @@ define i16 @lea16bit(i16 %in) { ; NDD-LABEL: lea16bit: ; NDD: # %bb.0: ; NDD-NEXT: # kill: def $edi killed $edi def $rdi -; NDD-NEXT: leaw 1(%rdi,%rdi), %ax +; NDD-NEXT: leal 1(%rdi,%rdi), %eax +; NDD-NEXT: # kill: def $ax killed $ax killed $eax ; NDD-NEXT: retq %shl = shl i16 %in, 1 %or = or i16 %shl, 1 diff --git a/llvm/test/CodeGen/X86/lea-8bit.ll b/llvm/test/CodeGen/X86/lea-8bit.ll index 98222df..fc295f7 100644 --- a/llvm/test/CodeGen/X86/lea-8bit.ll +++ b/llvm/test/CodeGen/X86/lea-8bit.ll @@ -14,7 +14,8 @@ define i8 @lea8bit(i8 %in) { ; NDD-LABEL: lea8bit: ; NDD: # %bb.0: ; NDD-NEXT: # kill: def $edi killed $edi def $rdi -; NDD-NEXT: leab 1(%rdi,%rdi), %al +; NDD-NEXT: leal 1(%rdi,%rdi), %eax +; NDD-NEXT: # kill: def $al killed $al killed $eax ; NDD-NEXT: retq %shl = shl i8 %in, 1 %or = or i8 %shl, 1 diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll index 4e6f666..4cde581 100644 --- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll +++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll @@ -4806,9 +4806,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16 ; X64-KNL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-KNL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 -; X64-KNL-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-KNL-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-KNL-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-KNL-NEXT: vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1} ; X64-KNL-NEXT: vmovaps %zmm1, %zmm0 ; X64-KNL-NEXT: retq @@ -4830,9 +4829,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16 ; X64-SKX-SMALL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-SMALL-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-SMALL-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-SKX-SMALL-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1} ; X64-SKX-SMALL-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-SMALL-NEXT: retq @@ -4842,10 +4840,9 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16 ; X64-SKX-LARGE-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-LARGE-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-LARGE-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-LARGE-NEXT: movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax ; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm0 -; X64-SKX-LARGE-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1} ; X64-SKX-LARGE-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-LARGE-NEXT: retq @@ -4875,9 +4872,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a ; X64-KNL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-KNL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 -; X64-KNL-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-KNL-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-KNL-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-KNL-NEXT: vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1} ; X64-KNL-NEXT: vmovaps %zmm1, %zmm0 ; X64-KNL-NEXT: retq @@ -4899,9 +4895,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a ; X64-SKX-SMALL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-SMALL-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-SMALL-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-SKX-SMALL-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1} ; X64-SKX-SMALL-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-SMALL-NEXT: retq @@ -4911,10 +4906,9 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a ; X64-SKX-LARGE-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-LARGE-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-LARGE-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-LARGE-NEXT: movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax ; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm0 -; X64-SKX-LARGE-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1} ; X64-SKX-LARGE-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-LARGE-NEXT: retq @@ -4944,9 +4938,8 @@ define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair( ; X64-KNL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-KNL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 -; X64-KNL-NEXT: vmovdqu64 (%rsi), %zmm0 -; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-KNL-NEXT: vpaddd %zmm0, %zmm0, %zmm2 +; X64-KNL-NEXT: vpslld $1, (%rsi), %zmm0 +; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm2 ; X64-KNL-NEXT: kmovw %k1, %k2 ; X64-KNL-NEXT: vmovaps %zmm1, %zmm0 ; X64-KNL-NEXT: vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2} @@ -4972,9 +4965,8 @@ define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair( ; X64-SKX-SMALL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-SMALL-NEXT: vmovdqu64 (%rsi), %zmm0 -; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-SKX-SMALL-NEXT: vpaddd %zmm0, %zmm0, %zmm2 +; X64-SKX-SMALL-NEXT: vpslld $1, (%rsi), %zmm0 +; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm2 ; X64-SKX-SMALL-NEXT: kmovw %k1, %k2 ; X64-SKX-SMALL-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-SMALL-NEXT: vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2} @@ -4986,10 +4978,9 @@ define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair( ; X64-SKX-LARGE-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-LARGE-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-LARGE-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-LARGE-NEXT: movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax -; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm0 -; X64-SKX-LARGE-NEXT: vpaddd %zmm0, %zmm0, %zmm2 +; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm2 ; X64-SKX-LARGE-NEXT: kmovw %k1, %k2 ; X64-SKX-LARGE-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-LARGE-NEXT: vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2} diff --git a/llvm/test/CodeGen/X86/negative-sin.ll b/llvm/test/CodeGen/X86/negative-sin.ll index f24507d..4836da2 100644 --- a/llvm/test/CodeGen/X86/negative-sin.ll +++ b/llvm/test/CodeGen/X86/negative-sin.ll @@ -82,18 +82,13 @@ define double @semi_strict2(double %e) nounwind { ret double %h } -; FIXME: -; Auto-upgrade function attribute to IR-level fast-math-flags. - -define double @fn_attr(double %e) nounwind #0 { -; CHECK-LABEL: fn_attr: +define double @nsz_flag(double %e) nounwind { +; CHECK-LABEL: nsz_flag: ; CHECK: # %bb.0: ; CHECK-NEXT: jmp sin@PLT # TAILCALL - %f = fsub double 0.0, %e - %g = call double @sin(double %f) readonly - %h = fsub double 0.0, %g + %f = fsub nsz double 0.0, %e + %g = call nsz double @sin(double %f) readonly + %h = fsub nsz double 0.0, %g ret double %h } -attributes #0 = { "unsafe-fp-math"="true" "no-signed-zeros-fp-math"="true" } - diff --git a/llvm/test/CodeGen/X86/oddsubvector.ll b/llvm/test/CodeGen/X86/oddsubvector.ll index f539830..5df1867 100644 --- a/llvm/test/CodeGen/X86/oddsubvector.ll +++ b/llvm/test/CodeGen/X86/oddsubvector.ll @@ -155,10 +155,10 @@ define <16 x i32> @PR42819(ptr %a0) { define void @PR42833() { ; SSE2-LABEL: PR42833: ; SSE2: # %bb.0: +; SSE2-NEXT: movl b(%rip), %eax ; SSE2-NEXT: movdqa c+144(%rip), %xmm2 ; SSE2-NEXT: movdqa c+128(%rip), %xmm0 -; SSE2-NEXT: movd %xmm0, %eax -; SSE2-NEXT: addl b(%rip), %eax +; SSE2-NEXT: addl c+128(%rip), %eax ; SSE2-NEXT: movd %eax, %xmm1 ; SSE2-NEXT: movd %eax, %xmm3 ; SSE2-NEXT: paddd %xmm0, %xmm3 @@ -166,7 +166,7 @@ define void @PR42833() { ; SSE2-NEXT: psubd %xmm2, %xmm4 ; SSE2-NEXT: paddd %xmm2, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm5 -; SSE2-NEXT: paddd %xmm0, %xmm5 +; SSE2-NEXT: paddd %xmm5, %xmm5 ; SSE2-NEXT: movss {{.*#+}} xmm5 = xmm3[0],xmm5[1,2,3] ; SSE2-NEXT: movdqa %xmm2, c+144(%rip) ; SSE2-NEXT: movaps %xmm5, c+128(%rip) @@ -191,17 +191,17 @@ define void @PR42833() { ; ; SSE42-LABEL: PR42833: ; SSE42: # %bb.0: +; SSE42-NEXT: movl b(%rip), %eax ; SSE42-NEXT: movdqa c+144(%rip), %xmm1 ; SSE42-NEXT: movdqa c+128(%rip), %xmm0 -; SSE42-NEXT: movd %xmm0, %eax -; SSE42-NEXT: addl b(%rip), %eax +; SSE42-NEXT: addl c+128(%rip), %eax ; SSE42-NEXT: movd %eax, %xmm2 ; SSE42-NEXT: paddd %xmm0, %xmm2 ; SSE42-NEXT: movdqa d+144(%rip), %xmm3 ; SSE42-NEXT: psubd %xmm1, %xmm3 ; SSE42-NEXT: paddd %xmm1, %xmm1 ; SSE42-NEXT: movdqa %xmm0, %xmm4 -; SSE42-NEXT: paddd %xmm0, %xmm4 +; SSE42-NEXT: paddd %xmm4, %xmm4 ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0,1],xmm4[2,3,4,5,6,7] ; SSE42-NEXT: movdqa %xmm1, c+144(%rip) ; SSE42-NEXT: movdqa %xmm4, c+128(%rip) diff --git a/llvm/test/CodeGen/X86/pr62286.ll b/llvm/test/CodeGen/X86/pr62286.ll index ce03f8f..161e965 100644 --- a/llvm/test/CodeGen/X86/pr62286.ll +++ b/llvm/test/CodeGen/X86/pr62286.ll @@ -26,27 +26,33 @@ define i64 @PR62286(i32 %a) { ; AVX1-LABEL: PR62286: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovd %edi, %xmm0 -; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,0,0,0] +; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6,7] ; AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7] -; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] -; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1 -; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] ; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: PR62286: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovd %edi, %xmm0 -; AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm1 -; AVX2-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3] -; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,0] +; AVX2-NEXT: vpaddd %ymm0, %ymm0, %ymm1 +; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4],ymm0[5,6,7] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 @@ -59,12 +65,12 @@ define i64 @PR62286(i32 %a) { ; AVX512-LABEL: PR62286: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovd %edi, %xmm0 -; AVX512-NEXT: movb $8, %al +; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,0] +; AVX512-NEXT: vpaddd %ymm0, %ymm0, %ymm1 +; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 +; AVX512-NEXT: movw $4369, %ax # imm = 0x1111 ; AVX512-NEXT: kmovd %eax, %k1 -; AVX512-NEXT: vpexpandd %ymm0, %ymm1 {%k1} {z} -; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 -; AVX512-NEXT: vpaddd %ymm0, %ymm0, %ymm0 -; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] +; AVX512-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1} ; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0 ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0 diff --git a/llvm/test/CodeGen/X86/pr74736.ll b/llvm/test/CodeGen/X86/pr74736.ll index ceccee0..5895526 100644 --- a/llvm/test/CodeGen/X86/pr74736.ll +++ b/llvm/test/CodeGen/X86/pr74736.ll @@ -6,8 +6,8 @@ define void @main(<16 x i32> %0, i32 %1) { ; SSE-LABEL: main: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movd %edi, %xmm4 -; SSE-NEXT: movss {{.*#+}} xmm0 = [1,0,0,0] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm4[1,0] +; SSE-NEXT: movsd {{.*#+}} xmm0 = [0,1,0,0] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,0] ; SSE-NEXT: paddd %xmm0, %xmm0 ; SSE-NEXT: paddd %xmm1, %xmm1 ; SSE-NEXT: paddd %xmm3, %xmm3 @@ -32,20 +32,20 @@ define void @main(<16 x i32> %0, i32 %1) { ; AVX-LABEL: main: ; AVX: # %bb.0: # %entry ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm0[1,2,3] ; AVX-NEXT: movl $1, %eax ; AVX-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2 ; AVX-NEXT: vpinsrd $3, %edi, %xmm2, %xmm2 -; AVX-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] -; AVX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 -; AVX-NEXT: vpaddd %ymm1, %ymm1, %ymm1 -; AVX-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,1,1,3,3,5,5,7] -; AVX-NEXT: vpermd %ymm0, %ymm2, %ymm2 +; AVX-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVX-NEXT: vpaddd %ymm2, %ymm2, %ymm2 +; AVX-NEXT: vpaddd %ymm1, %ymm1, %ymm3 ; AVX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] ; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,3,3,3,7,7,7,7] -; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,1,1,3,4,5,5,7] +; AVX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 +; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm3[0,1,1,3,4,5,5,7] ; AVX-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] -; AVX-NEXT: vpxor %ymm0, %ymm2, %ymm0 +; AVX-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,1,1,3,3,5,5,7] +; AVX-NEXT: vpermd %ymm2, %ymm1, %ymm1 +; AVX-NEXT: vpxor %ymm0, %ymm1, %ymm0 ; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] diff --git a/llvm/test/CodeGen/X86/setoeq.ll b/llvm/test/CodeGen/X86/setoeq.ll index f0addf4..131e279 100644 --- a/llvm/test/CodeGen/X86/setoeq.ll +++ b/llvm/test/CodeGen/X86/setoeq.ll @@ -1,40 +1,532 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 | FileCheck %s - -define zeroext i8 @t(double %x) nounwind readnone { -; CHECK-LABEL: t: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; CHECK-NEXT: cvttpd2dq %xmm0, %xmm1 -; CHECK-NEXT: cvtdq2pd %xmm1, %xmm1 -; CHECK-NEXT: cmpeqsd %xmm0, %xmm1 -; CHECK-NEXT: movd %xmm1, %eax -; CHECK-NEXT: andl $1, %eax -; CHECK-NEXT: # kill: def $al killed $al killed $eax -; CHECK-NEXT: retl -entry: - %0 = fptosi double %x to i32 ; <i32> [#uses=1] - %1 = sitofp i32 %0 to double ; <double> [#uses=1] - %2 = fcmp oeq double %1, %x ; <i1> [#uses=1] - %retval12 = zext i1 %2 to i8 ; <i8> [#uses=1] - ret i8 %retval12 -} - -define zeroext i8 @u(double %x) nounwind readnone { -; CHECK-LABEL: u: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; CHECK-NEXT: cvttpd2dq %xmm0, %xmm1 -; CHECK-NEXT: cvtdq2pd %xmm1, %xmm1 -; CHECK-NEXT: cmpneqsd %xmm0, %xmm1 -; CHECK-NEXT: movd %xmm1, %eax -; CHECK-NEXT: andl $1, %eax -; CHECK-NEXT: # kill: def $al killed $al killed $eax -; CHECK-NEXT: retl -entry: - %0 = fptosi double %x to i32 ; <i32> [#uses=1] - %1 = sitofp i32 %0 to double ; <double> [#uses=1] - %2 = fcmp une double %1, %x ; <i1> [#uses=1] - %retval12 = zext i1 %2 to i8 ; <i8> [#uses=1] +; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE +; RUN: llc < %s -mtriple=i686-- -mattr=+avx | FileCheck %s --check-prefixes=AVX +; RUN: llc < %s -mtriple=i686-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX512 + +define zeroext i8 @oeq_f64_i32(double %x) nounwind readnone { +; SSE-LABEL: oeq_f64_i32: +; SSE: # %bb.0: # %entry +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: cvttpd2dq %xmm0, %xmm1 +; SSE-NEXT: cvtdq2pd %xmm1, %xmm1 +; SSE-NEXT: cmpeqsd %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: retl +; +; AVX-LABEL: oeq_f64_i32: +; AVX: # %bb.0: # %entry +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vcvttpd2dq %xmm0, %xmm1 +; AVX-NEXT: vcvtdq2pd %xmm1, %xmm1 +; AVX-NEXT: vcmpeqsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: retl +; +; AVX512-LABEL: oeq_f64_i32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttpd2dq %xmm0, %xmm1 +; AVX512-NEXT: vcvtdq2pd %xmm1, %xmm1 +; AVX512-NEXT: vcmpeqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: retl +entry: + %0 = fptosi double %x to i32 + %1 = sitofp i32 %0 to double + %2 = fcmp oeq double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @oeq_f64_u32(double %x) nounwind readnone { +; SSE-LABEL: oeq_f64_u32: +; SSE: # %bb.0: # %entry +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: cvttsd2si %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: movapd %xmm0, %xmm1 +; SSE-NEXT: subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; SSE-NEXT: cvttsd2si %xmm1, %edx +; SSE-NEXT: andl %ecx, %edx +; SSE-NEXT: orl %eax, %edx +; SSE-NEXT: movd %edx, %xmm1 +; SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; SSE-NEXT: subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; SSE-NEXT: cmpeqsd %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: retl +; +; AVX-LABEL: oeq_f64_u32: +; AVX: # %bb.0: # %entry +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vcvttsd2si %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarl $31, %ecx +; AVX-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 +; AVX-NEXT: vcvttsd2si %xmm1, %edx +; AVX-NEXT: andl %ecx, %edx +; AVX-NEXT: orl %eax, %edx +; AVX-NEXT: vmovd %edx, %xmm1 +; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; AVX-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; AVX-NEXT: vcmpeqsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: retl +; +; AVX512-LABEL: oeq_f64_u32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttsd2usi %xmm0, %eax +; AVX512-NEXT: vcvtusi2sd %eax, %xmm7, %xmm1 +; AVX512-NEXT: vcmpeqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: retl +entry: + %0 = fptoui double %x to i32 + %1 = uitofp i32 %0 to double + %2 = fcmp oeq double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @oeq_f64_i64(double %x) nounwind readnone { +; SSE-LABEL: oeq_f64_i64: +; SSE: # %bb.0: # %entry +; SSE-NEXT: pushl %ebp +; SSE-NEXT: movl %esp, %ebp +; SSE-NEXT: andl $-8, %esp +; SSE-NEXT: subl $32, %esp +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd %xmm0, {{[0-9]+}}(%esp) +; SSE-NEXT: fldl {{[0-9]+}}(%esp) +; SSE-NEXT: fnstcw {{[0-9]+}}(%esp) +; SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; SSE-NEXT: orl $3072, %eax # imm = 0xC00 +; SSE-NEXT: movw %ax, {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: fistpll {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: movlps %xmm1, {{[0-9]+}}(%esp) +; SSE-NEXT: fildll {{[0-9]+}}(%esp) +; SSE-NEXT: fstpl {{[0-9]+}}(%esp) +; SSE-NEXT: cmpeqsd {{[0-9]+}}(%esp), %xmm0 +; SSE-NEXT: movd %xmm0, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: movl %ebp, %esp +; SSE-NEXT: popl %ebp +; SSE-NEXT: retl +; +; AVX-LABEL: oeq_f64_i64: +; AVX: # %bb.0: # %entry +; AVX-NEXT: pushl %ebp +; AVX-NEXT: movl %esp, %ebp +; AVX-NEXT: andl $-8, %esp +; AVX-NEXT: subl $24, %esp +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd %xmm0, (%esp) +; AVX-NEXT: fldl (%esp) +; AVX-NEXT: fisttpll (%esp) +; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp) +; AVX-NEXT: fildll {{[0-9]+}}(%esp) +; AVX-NEXT: fstpl {{[0-9]+}}(%esp) +; AVX-NEXT: vcmpeqsd {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: movl %ebp, %esp +; AVX-NEXT: popl %ebp +; AVX-NEXT: retl +; +; AVX512-LABEL: oeq_f64_i64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttpd2qq %xmm0, %xmm1 +; AVX512-NEXT: vcvtqq2pd %ymm1, %ymm1 +; AVX512-NEXT: vcmpeqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retl +entry: + %0 = fptosi double %x to i64 + %1 = sitofp i64 %0 to double + %2 = fcmp oeq double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @oeq_f64_u64(double %x) nounwind readnone { +; SSE-LABEL: oeq_f64_u64: +; SSE: # %bb.0: # %entry +; SSE-NEXT: pushl %ebp +; SSE-NEXT: movl %esp, %ebp +; SSE-NEXT: andl $-8, %esp +; SSE-NEXT: subl $16, %esp +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0] +; SSE-NEXT: ucomisd %xmm0, %xmm1 +; SSE-NEXT: jbe .LBB3_2 +; SSE-NEXT: # %bb.1: # %entry +; SSE-NEXT: xorpd %xmm1, %xmm1 +; SSE-NEXT: .LBB3_2: # %entry +; SSE-NEXT: movapd %xmm0, %xmm2 +; SSE-NEXT: subsd %xmm1, %xmm2 +; SSE-NEXT: movsd %xmm2, {{[0-9]+}}(%esp) +; SSE-NEXT: setbe %al +; SSE-NEXT: fldl {{[0-9]+}}(%esp) +; SSE-NEXT: fnstcw {{[0-9]+}}(%esp) +; SSE-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; SSE-NEXT: orl $3072, %ecx # imm = 0xC00 +; SSE-NEXT: movw %cx, {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: fistpll {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: movzbl %al, %eax +; SSE-NEXT: shll $31, %eax +; SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax +; SSE-NEXT: movd %eax, %xmm1 +; SSE-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1] +; SSE-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 +; SSE-NEXT: movapd %xmm2, %xmm1 +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] +; SSE-NEXT: addsd %xmm2, %xmm1 +; SSE-NEXT: cmpeqsd %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: movl %ebp, %esp +; SSE-NEXT: popl %ebp +; SSE-NEXT: retl +; +; AVX-LABEL: oeq_f64_u64: +; AVX: # %bb.0: # %entry +; AVX-NEXT: pushl %ebp +; AVX-NEXT: movl %esp, %ebp +; AVX-NEXT: andl $-8, %esp +; AVX-NEXT: subl $8, %esp +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0] +; AVX-NEXT: vucomisd %xmm0, %xmm1 +; AVX-NEXT: jbe .LBB3_2 +; AVX-NEXT: # %bb.1: # %entry +; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: .LBB3_2: # %entry +; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vmovsd %xmm1, (%esp) +; AVX-NEXT: fldl (%esp) +; AVX-NEXT: fisttpll (%esp) +; AVX-NEXT: setbe %al +; AVX-NEXT: movzbl %al, %eax +; AVX-NEXT: shll $31, %eax +; AVX-NEXT: xorl {{[0-9]+}}(%esp), %eax +; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] +; AVX-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; AVX-NEXT: vshufpd {{.*#+}} xmm2 = xmm1[1,0] +; AVX-NEXT: vaddsd %xmm1, %xmm2, %xmm1 +; AVX-NEXT: vcmpeqsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: movl %ebp, %esp +; AVX-NEXT: popl %ebp +; AVX-NEXT: retl +; +; AVX512-LABEL: oeq_f64_u64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttpd2uqq %xmm0, %xmm1 +; AVX512-NEXT: vcvtuqq2pd %ymm1, %ymm1 +; AVX512-NEXT: vcmpeqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retl +entry: + %0 = fptoui double %x to i64 + %1 = uitofp i64 %0 to double + %2 = fcmp oeq double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @une_f64_i32(double %x) nounwind readnone { +; SSE-LABEL: une_f64_i32: +; SSE: # %bb.0: # %entry +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: cvttpd2dq %xmm0, %xmm1 +; SSE-NEXT: cvtdq2pd %xmm1, %xmm1 +; SSE-NEXT: cmpneqsd %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: retl +; +; AVX-LABEL: une_f64_i32: +; AVX: # %bb.0: # %entry +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vcvttpd2dq %xmm0, %xmm1 +; AVX-NEXT: vcvtdq2pd %xmm1, %xmm1 +; AVX-NEXT: vcmpneqsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: retl +; +; AVX512-LABEL: une_f64_i32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttpd2dq %xmm0, %xmm1 +; AVX512-NEXT: vcvtdq2pd %xmm1, %xmm1 +; AVX512-NEXT: vcmpneqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: retl +entry: + %0 = fptosi double %x to i32 + %1 = sitofp i32 %0 to double + %2 = fcmp une double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @une_f64_u32(double %x) nounwind readnone { +; SSE-LABEL: une_f64_u32: +; SSE: # %bb.0: # %entry +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: cvttsd2si %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: movapd %xmm0, %xmm1 +; SSE-NEXT: subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; SSE-NEXT: cvttsd2si %xmm1, %edx +; SSE-NEXT: andl %ecx, %edx +; SSE-NEXT: orl %eax, %edx +; SSE-NEXT: movd %edx, %xmm1 +; SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; SSE-NEXT: subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; SSE-NEXT: cmpneqsd %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: retl +; +; AVX-LABEL: une_f64_u32: +; AVX: # %bb.0: # %entry +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vcvttsd2si %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarl $31, %ecx +; AVX-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 +; AVX-NEXT: vcvttsd2si %xmm1, %edx +; AVX-NEXT: andl %ecx, %edx +; AVX-NEXT: orl %eax, %edx +; AVX-NEXT: vmovd %edx, %xmm1 +; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; AVX-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; AVX-NEXT: vcmpneqsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: retl +; +; AVX512-LABEL: une_f64_u32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttsd2usi %xmm0, %eax +; AVX512-NEXT: vcvtusi2sd %eax, %xmm7, %xmm1 +; AVX512-NEXT: vcmpneqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: retl +entry: + %0 = fptoui double %x to i32 + %1 = uitofp i32 %0 to double + %2 = fcmp une double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @une_f64_i64(double %x) nounwind readnone { +; SSE-LABEL: une_f64_i64: +; SSE: # %bb.0: # %entry +; SSE-NEXT: pushl %ebp +; SSE-NEXT: movl %esp, %ebp +; SSE-NEXT: andl $-8, %esp +; SSE-NEXT: subl $32, %esp +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd %xmm0, {{[0-9]+}}(%esp) +; SSE-NEXT: fldl {{[0-9]+}}(%esp) +; SSE-NEXT: fnstcw {{[0-9]+}}(%esp) +; SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; SSE-NEXT: orl $3072, %eax # imm = 0xC00 +; SSE-NEXT: movw %ax, {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: fistpll {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: movlps %xmm1, {{[0-9]+}}(%esp) +; SSE-NEXT: fildll {{[0-9]+}}(%esp) +; SSE-NEXT: fstpl {{[0-9]+}}(%esp) +; SSE-NEXT: cmpneqsd {{[0-9]+}}(%esp), %xmm0 +; SSE-NEXT: movd %xmm0, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: movl %ebp, %esp +; SSE-NEXT: popl %ebp +; SSE-NEXT: retl +; +; AVX-LABEL: une_f64_i64: +; AVX: # %bb.0: # %entry +; AVX-NEXT: pushl %ebp +; AVX-NEXT: movl %esp, %ebp +; AVX-NEXT: andl $-8, %esp +; AVX-NEXT: subl $24, %esp +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd %xmm0, (%esp) +; AVX-NEXT: fldl (%esp) +; AVX-NEXT: fisttpll (%esp) +; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp) +; AVX-NEXT: fildll {{[0-9]+}}(%esp) +; AVX-NEXT: fstpl {{[0-9]+}}(%esp) +; AVX-NEXT: vcmpneqsd {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: movl %ebp, %esp +; AVX-NEXT: popl %ebp +; AVX-NEXT: retl +; +; AVX512-LABEL: une_f64_i64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttpd2qq %xmm0, %xmm1 +; AVX512-NEXT: vcvtqq2pd %ymm1, %ymm1 +; AVX512-NEXT: vcmpneqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retl +entry: + %0 = fptosi double %x to i64 + %1 = sitofp i64 %0 to double + %2 = fcmp une double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @une_f64_u64(double %x) nounwind readnone { +; SSE-LABEL: une_f64_u64: +; SSE: # %bb.0: # %entry +; SSE-NEXT: pushl %ebp +; SSE-NEXT: movl %esp, %ebp +; SSE-NEXT: andl $-8, %esp +; SSE-NEXT: subl $16, %esp +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0] +; SSE-NEXT: ucomisd %xmm0, %xmm1 +; SSE-NEXT: jbe .LBB7_2 +; SSE-NEXT: # %bb.1: # %entry +; SSE-NEXT: xorpd %xmm1, %xmm1 +; SSE-NEXT: .LBB7_2: # %entry +; SSE-NEXT: movapd %xmm0, %xmm2 +; SSE-NEXT: subsd %xmm1, %xmm2 +; SSE-NEXT: movsd %xmm2, {{[0-9]+}}(%esp) +; SSE-NEXT: setbe %al +; SSE-NEXT: fldl {{[0-9]+}}(%esp) +; SSE-NEXT: fnstcw {{[0-9]+}}(%esp) +; SSE-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; SSE-NEXT: orl $3072, %ecx # imm = 0xC00 +; SSE-NEXT: movw %cx, {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: fistpll {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: movzbl %al, %eax +; SSE-NEXT: shll $31, %eax +; SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax +; SSE-NEXT: movd %eax, %xmm1 +; SSE-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1] +; SSE-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 +; SSE-NEXT: movapd %xmm2, %xmm1 +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] +; SSE-NEXT: addsd %xmm2, %xmm1 +; SSE-NEXT: cmpneqsd %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: movl %ebp, %esp +; SSE-NEXT: popl %ebp +; SSE-NEXT: retl +; +; AVX-LABEL: une_f64_u64: +; AVX: # %bb.0: # %entry +; AVX-NEXT: pushl %ebp +; AVX-NEXT: movl %esp, %ebp +; AVX-NEXT: andl $-8, %esp +; AVX-NEXT: subl $8, %esp +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0] +; AVX-NEXT: vucomisd %xmm0, %xmm1 +; AVX-NEXT: jbe .LBB7_2 +; AVX-NEXT: # %bb.1: # %entry +; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: .LBB7_2: # %entry +; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vmovsd %xmm1, (%esp) +; AVX-NEXT: fldl (%esp) +; AVX-NEXT: fisttpll (%esp) +; AVX-NEXT: setbe %al +; AVX-NEXT: movzbl %al, %eax +; AVX-NEXT: shll $31, %eax +; AVX-NEXT: xorl {{[0-9]+}}(%esp), %eax +; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] +; AVX-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; AVX-NEXT: vshufpd {{.*#+}} xmm2 = xmm1[1,0] +; AVX-NEXT: vaddsd %xmm1, %xmm2, %xmm1 +; AVX-NEXT: vcmpneqsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: movl %ebp, %esp +; AVX-NEXT: popl %ebp +; AVX-NEXT: retl +; +; AVX512-LABEL: une_f64_u64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttpd2uqq %xmm0, %xmm1 +; AVX512-NEXT: vcvtuqq2pd %ymm1, %ymm1 +; AVX512-NEXT: vcmpneqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retl +entry: + %0 = fptoui double %x to i64 + %1 = uitofp i64 %0 to double + %2 = fcmp une double %1, %x + %retval12 = zext i1 %2 to i8 ret i8 %retval12 } diff --git a/llvm/test/CodeGen/X86/shift-i512.ll b/llvm/test/CodeGen/X86/shift-i512.ll index 756019d..03b61d9 100644 --- a/llvm/test/CodeGen/X86/shift-i512.ll +++ b/llvm/test/CodeGen/X86/shift-i512.ll @@ -10,7 +10,7 @@ define <8 x i64> @shl_i512_1(<8 x i64> %a) { ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: valignq {{.*#+}} zmm1 = zmm0[3,4,5,6,7,0,1,2] ; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512VL-NEXT: vpsllq $1, %xmm0, %xmm3 +; AVX512VL-NEXT: vpaddq %xmm0, %xmm0, %xmm3 ; AVX512VL-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3] ; AVX512VL-NEXT: vpsrlq $63, %xmm4, %xmm4 ; AVX512VL-NEXT: vpaddq %xmm2, %xmm2, %xmm2 @@ -34,7 +34,7 @@ define <8 x i64> @shl_i512_1(<8 x i64> %a) { ; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm2 ; AVX512VBMI-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] ; AVX512VBMI-NEXT: vpshldq $1, %xmm3, %xmm2, %xmm3 -; AVX512VBMI-NEXT: vpsllq $1, %xmm0, %xmm4 +; AVX512VBMI-NEXT: vpaddq %xmm0, %xmm0, %xmm4 ; AVX512VBMI-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 ; AVX512VBMI-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 ; AVX512VBMI-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7] @@ -51,7 +51,7 @@ define <8 x i64> @shl_i512_1(<8 x i64> %a) { ; ZNVER4-NEXT: vextracti32x4 $2, %zmm0, %xmm1 ; ZNVER4-NEXT: vextracti128 $1, %ymm0, %xmm2 ; ZNVER4-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] -; ZNVER4-NEXT: vpsllq $1, %xmm0, %xmm4 +; ZNVER4-NEXT: vpaddq %xmm0, %xmm0, %xmm4 ; ZNVER4-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 ; ZNVER4-NEXT: vpshldq $1, %xmm3, %xmm2, %xmm3 ; ZNVER4-NEXT: vextracti64x4 $1, %zmm0, %ymm2 diff --git a/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll index 3f48b22..a48be03 100644 --- a/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll @@ -5791,20 +5791,20 @@ declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone define <2 x i64> @test_mm_slli_epi16(<2 x i64> %a0) { ; SSE-LABEL: test_mm_slli_epi16: ; SSE: # %bb.0: -; SSE-NEXT: psllw $1, %xmm0 # encoding: [0x66,0x0f,0x71,0xf0,0x01] +; SSE-NEXT: psllw $2, %xmm0 # encoding: [0x66,0x0f,0x71,0xf0,0x02] ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX1-LABEL: test_mm_slli_epi16: ; AVX1: # %bb.0: -; AVX1-NEXT: vpsllw $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x71,0xf0,0x01] +; AVX1-NEXT: vpsllw $2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x71,0xf0,0x02] ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512-LABEL: test_mm_slli_epi16: ; AVX512: # %bb.0: -; AVX512-NEXT: vpsllw $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x01] +; AVX512-NEXT: vpsllw $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x02] ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3] %arg0 = bitcast <2 x i64> %a0 to <8 x i16> - %res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %arg0, i32 1) + %res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %arg0, i32 2) %bc = bitcast <8 x i16> %res to <2 x i64> ret <2 x i64> %bc } @@ -5813,20 +5813,20 @@ declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) nounwind readnone define <2 x i64> @test_mm_slli_epi32(<2 x i64> %a0) { ; SSE-LABEL: test_mm_slli_epi32: ; SSE: # %bb.0: -; SSE-NEXT: pslld $1, %xmm0 # encoding: [0x66,0x0f,0x72,0xf0,0x01] +; SSE-NEXT: pslld $2, %xmm0 # encoding: [0x66,0x0f,0x72,0xf0,0x02] ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX1-LABEL: test_mm_slli_epi32: ; AVX1: # %bb.0: -; AVX1-NEXT: vpslld $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x72,0xf0,0x01] +; AVX1-NEXT: vpslld $2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x72,0xf0,0x02] ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512-LABEL: test_mm_slli_epi32: ; AVX512: # %bb.0: -; AVX512-NEXT: vpslld $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xf0,0x01] +; AVX512-NEXT: vpslld $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xf0,0x02] ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3] %arg0 = bitcast <2 x i64> %a0 to <4 x i32> - %res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %arg0, i32 1) + %res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %arg0, i32 2) %bc = bitcast <4 x i32> %res to <2 x i64> ret <2 x i64> %bc } @@ -5835,19 +5835,19 @@ declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32) nounwind readnone define <2 x i64> @test_mm_slli_epi64(<2 x i64> %a0) { ; SSE-LABEL: test_mm_slli_epi64: ; SSE: # %bb.0: -; SSE-NEXT: psllq $1, %xmm0 # encoding: [0x66,0x0f,0x73,0xf0,0x01] +; SSE-NEXT: psllq $2, %xmm0 # encoding: [0x66,0x0f,0x73,0xf0,0x02] ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX1-LABEL: test_mm_slli_epi64: ; AVX1: # %bb.0: -; AVX1-NEXT: vpsllq $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xf0,0x01] +; AVX1-NEXT: vpsllq $2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xf0,0x02] ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512-LABEL: test_mm_slli_epi64: ; AVX512: # %bb.0: -; AVX512-NEXT: vpsllq $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf0,0x01] +; AVX512-NEXT: vpsllq $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf0,0x02] ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3] - %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 1) + %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 2) ret <2 x i64> %res } declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32) nounwind readnone diff --git a/llvm/test/CodeGen/X86/vec_shift6.ll b/llvm/test/CodeGen/X86/vec_shift6.ll index 71e659c..219e32c 100644 --- a/llvm/test/CodeGen/X86/vec_shift6.ll +++ b/llvm/test/CodeGen/X86/vec_shift6.ll @@ -28,14 +28,14 @@ define <8 x i16> @test2(<8 x i16> %a) { ; SSE2-LABEL: test2: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: paddw %xmm0, %xmm1 +; SSE2-NEXT: paddw %xmm1, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] ; SSE2-NEXT: retq ; ; SSE41-LABEL: test2: ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: paddw %xmm0, %xmm1 +; SSE41-NEXT: paddw %xmm1, %xmm1 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; SSE41-NEXT: retq ; @@ -56,7 +56,7 @@ define <4 x i32> @test3(<4 x i32> %a) { ; SSE2-LABEL: test3: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: paddd %xmm1, %xmm1 ; SSE2-NEXT: pslld $2, %xmm0 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE2-NEXT: retq @@ -81,14 +81,14 @@ define <4 x i32> @test4(<4 x i32> %a) { ; SSE2-LABEL: test4: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: paddd %xmm1, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] ; SSE2-NEXT: retq ; ; SSE41-LABEL: test4: ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: paddd %xmm0, %xmm1 +; SSE41-NEXT: paddd %xmm1, %xmm1 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; SSE41-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vec_unsafe-fp-math.ll b/llvm/test/CodeGen/X86/vec_unsafe-fp-math.ll index 23d22e7..3f92d2b 100644 --- a/llvm/test/CodeGen/X86/vec_unsafe-fp-math.ll +++ b/llvm/test/CodeGen/X86/vec_unsafe-fp-math.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -enable-unsafe-fp-math -enable-no-signed-zeros-fp-math -mtriple=x86_64-unknown-unknown | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s ; Make sure that vectors get the same benefits as scalars when using unsafe-fp-math. @@ -18,7 +18,7 @@ define <4 x float> @vec_fneg(<4 x float> %x) { ; CHECK: # %bb.0: ; CHECK-NEXT: xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: retq - %sub = fsub <4 x float> zeroinitializer, %x + %sub = fsub nsz <4 x float> zeroinitializer, %x ret <4 x float> %sub } diff --git a/llvm/test/CodeGen/X86/vector-gep.ll b/llvm/test/CodeGen/X86/vector-gep.ll index 5c48559..b4cffcd 100644 --- a/llvm/test/CodeGen/X86/vector-gep.ll +++ b/llvm/test/CodeGen/X86/vector-gep.ll @@ -122,91 +122,87 @@ define <64 x ptr> @AGEP9(ptr %param, <64 x i32> %off) nounwind { ; CHECK-NEXT: movl %esp, %ebp ; CHECK-NEXT: andl $-32, %esp ; CHECK-NEXT: subl $160, %esp -; CHECK-NEXT: vmovdqa %ymm2, %ymm5 -; CHECK-NEXT: vmovdqa %ymm1, %ymm3 -; CHECK-NEXT: vmovdqa %ymm0, %ymm1 -; CHECK-NEXT: vmovdqa 72(%ebp), %ymm0 -; CHECK-NEXT: vmovdqa 40(%ebp), %ymm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm4 -; CHECK-NEXT: vbroadcastss 12(%ebp), %xmm7 -; CHECK-NEXT: vpaddd %xmm4, %xmm7, %xmm4 -; CHECK-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm3 +; CHECK-NEXT: vbroadcastss 12(%ebp), %xmm5 +; CHECK-NEXT: vpaddd %xmm3, %xmm5, %xmm3 +; CHECK-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vmovdqa 104(%ebp), %ymm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vmovdqa 136(%ebp), %ymm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vmovdqa 168(%ebp), %ymm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, (%esp) # 16-byte Spill -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vmovdqa 40(%ebp), %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm2 -; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0 -; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm1 -; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm1 -; CHECK-NEXT: vpaddd %xmm1, %xmm7, %xmm1 -; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm6 -; CHECK-NEXT: vpaddd %xmm6, %xmm7, %xmm6 -; CHECK-NEXT: vextractf128 $1, %ymm3, %xmm3 -; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm3 -; CHECK-NEXT: vpaddd %xmm3, %xmm7, %xmm3 -; CHECK-NEXT: vmovdqa %ymm5, %ymm4 -; CHECK-NEXT: vpaddd %xmm4, %xmm4, %xmm5 -; CHECK-NEXT: vpaddd %xmm5, %xmm7, %xmm5 -; CHECK-NEXT: vextractf128 $1, %ymm4, %xmm4 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovdqa 56(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovdqa 72(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, (%esp) # 16-byte Spill +; CHECK-NEXT: vmovdqa 88(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm2 +; CHECK-NEXT: vmovdqa 104(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm1 +; CHECK-NEXT: vmovdqa 120(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa 136(%ebp), %xmm6 +; CHECK-NEXT: vpaddd %xmm6, %xmm6, %xmm6 +; CHECK-NEXT: vpaddd %xmm6, %xmm5, %xmm6 +; CHECK-NEXT: vmovdqa 152(%ebp), %xmm7 +; CHECK-NEXT: vpaddd %xmm7, %xmm7, %xmm7 +; CHECK-NEXT: vpaddd %xmm7, %xmm5, %xmm7 +; CHECK-NEXT: vmovdqa 168(%ebp), %xmm4 ; CHECK-NEXT: vpaddd %xmm4, %xmm4, %xmm4 -; CHECK-NEXT: vpaddd %xmm4, %xmm7, %xmm4 +; CHECK-NEXT: vpaddd %xmm4, %xmm5, %xmm4 +; CHECK-NEXT: vmovdqa 184(%ebp), %xmm3 +; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm3 +; CHECK-NEXT: vpaddd %xmm3, %xmm5, %xmm3 ; CHECK-NEXT: movl 8(%ebp), %eax -; CHECK-NEXT: vmovdqa %xmm4, 80(%eax) -; CHECK-NEXT: vmovdqa %xmm5, 64(%eax) -; CHECK-NEXT: vmovdqa %xmm3, 48(%eax) -; CHECK-NEXT: vmovdqa %xmm6, 32(%eax) -; CHECK-NEXT: vmovdqa %xmm1, 16(%eax) -; CHECK-NEXT: vmovdqa %xmm0, (%eax) -; CHECK-NEXT: vmovdqa %xmm2, 240(%eax) +; CHECK-NEXT: vmovdqa %xmm3, 240(%eax) +; CHECK-NEXT: vmovdqa %xmm4, 224(%eax) +; CHECK-NEXT: vmovdqa %xmm7, 208(%eax) +; CHECK-NEXT: vmovdqa %xmm6, 192(%eax) +; CHECK-NEXT: vmovdqa %xmm0, 176(%eax) +; CHECK-NEXT: vmovdqa %xmm1, 160(%eax) +; CHECK-NEXT: vmovdqa %xmm2, 144(%eax) ; CHECK-NEXT: vmovaps (%esp), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 224(%eax) +; CHECK-NEXT: vmovaps %xmm0, 128(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 208(%eax) +; CHECK-NEXT: vmovaps %xmm0, 112(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 192(%eax) +; CHECK-NEXT: vmovaps %xmm0, 96(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 176(%eax) +; CHECK-NEXT: vmovaps %xmm0, 80(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 160(%eax) +; CHECK-NEXT: vmovaps %xmm0, 64(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 144(%eax) +; CHECK-NEXT: vmovaps %xmm0, 48(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 128(%eax) +; CHECK-NEXT: vmovaps %xmm0, 32(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 112(%eax) +; CHECK-NEXT: vmovaps %xmm0, 16(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 96(%eax) +; CHECK-NEXT: vmovaps %xmm0, (%eax) ; CHECK-NEXT: movl %ebp, %esp ; CHECK-NEXT: popl %ebp ; CHECK-NEXT: vzeroupper diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll index 13f7d68..33d80f6 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll @@ -652,7 +652,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE2-NEXT: paddb %xmm4, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: psllw $1, %xmm2 +; SSE2-NEXT: paddw %xmm2, %xmm2 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; SSE2-NEXT: psrlw $2, %xmm1 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -678,7 +678,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE41-NEXT: paddb %xmm3, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psllw $1, %xmm2 +; SSE41-NEXT: paddw %xmm2, %xmm2 ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; SSE41-NEXT: psrlw $2, %xmm1 ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -701,7 +701,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpsllw $1, %xmm1, %xmm2 +; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm1 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 @@ -720,7 +720,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; AVX2NOBW-NEXT: vpsrlw $1, %xmm2, %xmm2 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX2NOBW-NEXT: vpaddb %xmm1, %xmm2, %xmm1 -; AVX2NOBW-NEXT: vpsllw $1, %xmm1, %xmm2 +; AVX2NOBW-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX2NOBW-NEXT: vpsrlw $2, %xmm1, %xmm1 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 @@ -739,7 +739,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; AVX512BW-NEXT: vpsrlw $1, %xmm2, %xmm2 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX512BW-NEXT: vpaddb %xmm1, %xmm2, %xmm1 -; AVX512BW-NEXT: vpsllw $1, %xmm1, %xmm2 +; AVX512BW-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX512BW-NEXT: vpsrlw $2, %xmm1, %xmm1 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll index 1a5c373..e43108f 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll @@ -590,7 +590,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] ; AVX1-NEXT: vpand %xmm6, %xmm5, %xmm5 ; AVX1-NEXT: vpaddb %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vpsllw $1, %xmm3, %xmm5 +; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm5 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm7 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248] ; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5 ; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm3 @@ -609,7 +609,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3 ; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 ; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2 -; AVX1-NEXT: vpsllw $1, %xmm2, %xmm3 +; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3 ; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3 ; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm2 ; AVX1-NEXT: vpand %xmm2, %xmm8, %xmm2 @@ -633,7 +633,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX2NOBW-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpaddb %ymm1, %ymm2, %ymm1 -; AVX2NOBW-NEXT: vpsllw $1, %ymm1, %ymm2 +; AVX2NOBW-NEXT: vpaddw %ymm1, %ymm1, %ymm2 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpsrlw $2, %ymm1, %ymm1 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 @@ -651,7 +651,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX512BW-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 ; AVX512BW-NEXT: vpaddb %ymm1, %ymm2, %ymm1 -; AVX512BW-NEXT: vpsllw $1, %ymm1, %ymm2 +; AVX512BW-NEXT: vpaddw %ymm1, %ymm1, %ymm2 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 ; AVX512BW-NEXT: vpsrlw $2, %ymm1, %ymm1 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll index 9c56894..bf98bcc 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll @@ -485,7 +485,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vpbroadcastb {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] ; AVX512F-NEXT: vpand %ymm6, %ymm5, %ymm5 ; AVX512F-NEXT: vpaddb %ymm3, %ymm5, %ymm3 -; AVX512F-NEXT: vpsllw $1, %ymm3, %ymm5 +; AVX512F-NEXT: vpaddw %ymm3, %ymm3, %ymm5 ; AVX512F-NEXT: vpbroadcastb {{.*#+}} ymm7 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248] ; AVX512F-NEXT: vpand %ymm7, %ymm5, %ymm5 ; AVX512F-NEXT: vpsrlw $2, %ymm3, %ymm3 @@ -504,7 +504,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3 ; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3 ; AVX512F-NEXT: vpaddb %ymm2, %ymm3, %ymm2 -; AVX512F-NEXT: vpsllw $1, %ymm2, %ymm3 +; AVX512F-NEXT: vpaddw %ymm2, %ymm2, %ymm3 ; AVX512F-NEXT: vpand %ymm7, %ymm3, %ymm3 ; AVX512F-NEXT: vpsrlw $2, %ymm2, %ymm2 ; AVX512F-NEXT: vpand %ymm2, %ymm8, %ymm2 @@ -528,7 +528,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind { ; AVX512BW-NEXT: vpsrlw $1, %zmm2, %zmm2 ; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2 ; AVX512BW-NEXT: vpaddb %zmm1, %zmm2, %zmm1 -; AVX512BW-NEXT: vpsllw $1, %zmm1, %zmm2 +; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm2 ; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2 ; AVX512BW-NEXT: vpsrlw $2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm1 diff --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll index 13b21a7..6e1bf25 100644 --- a/llvm/test/CodeGen/X86/vector-mul.ll +++ b/llvm/test/CodeGen/X86/vector-mul.ll @@ -821,10 +821,10 @@ define <16 x i16> @madd_v16i16_3(<16 x i16> %a0, <16 x i16> %a1) nounwind { ; X86-SSE-NEXT: andl $-16, %esp ; X86-SSE-NEXT: subl $16, %esp ; X86-SSE-NEXT: movdqa %xmm1, %xmm3 -; X86-SSE-NEXT: paddw %xmm1, %xmm3 +; X86-SSE-NEXT: paddw %xmm3, %xmm3 ; X86-SSE-NEXT: paddw %xmm3, %xmm1 ; X86-SSE-NEXT: movdqa %xmm0, %xmm3 -; X86-SSE-NEXT: paddw %xmm0, %xmm3 +; X86-SSE-NEXT: paddw %xmm3, %xmm3 ; X86-SSE-NEXT: paddw %xmm2, %xmm0 ; X86-SSE-NEXT: paddw %xmm3, %xmm0 ; X86-SSE-NEXT: paddw 8(%ebp), %xmm1 @@ -835,9 +835,9 @@ define <16 x i16> @madd_v16i16_3(<16 x i16> %a0, <16 x i16> %a1) nounwind { ; X64-SSE-LABEL: madd_v16i16_3: ; X64-SSE: # %bb.0: ; X64-SSE-NEXT: movdqa %xmm1, %xmm4 -; X64-SSE-NEXT: paddw %xmm1, %xmm4 +; X64-SSE-NEXT: paddw %xmm4, %xmm4 ; X64-SSE-NEXT: movdqa %xmm0, %xmm5 -; X64-SSE-NEXT: paddw %xmm0, %xmm5 +; X64-SSE-NEXT: paddw %xmm5, %xmm5 ; X64-SSE-NEXT: paddw %xmm2, %xmm0 ; X64-SSE-NEXT: paddw %xmm5, %xmm0 ; X64-SSE-NEXT: paddw %xmm3, %xmm1 diff --git a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll index 227e000..ab1feba 100644 --- a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll +++ b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll @@ -907,7 +907,7 @@ define i1 @mask_v8i32_2(<8 x i32> %a0) { ; SSE2-LABEL: mask_v8i32_2: ; SSE2: # %bb.0: ; SSE2-NEXT: por %xmm1, %xmm0 -; SSE2-NEXT: pslld $1, %xmm0 +; SSE2-NEXT: paddd %xmm0, %xmm0 ; SSE2-NEXT: movmskps %xmm0, %eax ; SSE2-NEXT: testl %eax, %eax ; SSE2-NEXT: sete %al diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll index 2b1cf5b..99dac74 100644 --- a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll @@ -927,7 +927,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind { ; SSE2-LABEL: constant_shift_v2i64: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: paddq %xmm0, %xmm1 +; SSE2-NEXT: paddq %xmm1, %xmm1 ; SSE2-NEXT: psllq $7, %xmm0 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE2-NEXT: retq @@ -975,7 +975,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind { ; X86-SSE-LABEL: constant_shift_v2i64: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: movdqa %xmm0, %xmm1 -; X86-SSE-NEXT: paddq %xmm0, %xmm1 +; X86-SSE-NEXT: paddq %xmm1, %xmm1 ; X86-SSE-NEXT: psllq $7, %xmm0 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; X86-SSE-NEXT: retl diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll index bec3349..3590c4d 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll @@ -62,15 +62,12 @@ define <4 x i32> @combine_blend_of_permutes_v4i32(<2 x i64> %a0, <2 x i64> %a1) define <4 x float> @freeze_insertps(<4 x float> %a0, <4 x float> %a1) { ; SSE-LABEL: freeze_insertps: ; SSE: # %bb.0: -; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] -; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm0[1],xmm1[1,2,3] ; SSE-NEXT: movaps %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: freeze_insertps: ; AVX: # %bb.0: -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[1],xmm1[1,2,3] +; AVX-NEXT: vmovaps %xmm1, %xmm0 ; AVX-NEXT: retq %s0 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 16) %f0 = freeze <4 x float> %s0 diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll index 5b61de5..ee9d8a5 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll @@ -3550,14 +3550,14 @@ define <8 x i16> @PR141475(i32 %in) { ; SSE-LABEL: PR141475: ; SSE: # %bb.0: ; SSE-NEXT: movd %edi, %xmm0 -; SSE-NEXT: pslld $1, %xmm0 +; SSE-NEXT: paddd %xmm0, %xmm0 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; SSE-NEXT: retq ; ; AVX-LABEL: PR141475: ; AVX: # %bb.0: ; AVX-NEXT: vmovd %edi, %xmm0 -; AVX-NEXT: vpslld $1, %xmm0, %xmm0 +; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; AVX-NEXT: retq %mul = shl i32 %in, 1 diff --git a/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll b/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll index 54dc107..3b93734 100644 --- a/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll +++ b/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll @@ -1438,26 +1438,26 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_10(<8 x i16> %a0) { define <8 x i16> @test_128_i16_x_8_127_mask_shl_1(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddw %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_127_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddw %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX-LABEL: test_128_i16_x_8_127_mask_shl_1: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0 +; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: retq %t0 = and <8 x i16> %a0, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127> %t1 = shl <8 x i16> %t0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> @@ -1656,26 +1656,26 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_6(<8 x i16> %a0) { define <8 x i16> @test_128_i16_x_8_65024_mask_shl_1(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddw %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddw %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_shl_1: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0 +; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: retq %t0 = and <8 x i16> %a0, <i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024> %t1 = shl <8 x i16> %t0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> @@ -2373,40 +2373,40 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_18(<4 x i32> %a0) { define <4 x i32> @test_128_i32_x_4_32767_mask_shl_1(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddd %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; ; X86-AVX2-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X86-AVX2: # %bb.0: -; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767] -; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65534,65534,65534,65534] +; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddd %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X64-AVX1: # %bb.0: -; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX1-NEXT: retq ; ; X64-AVX2-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X64-AVX2: # %bb.0: -; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767] -; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65534,65534,65534,65534] +; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: retq %t0 = and <4 x i32> %a0, <i32 32767, i32 32767, i32 32767, i32 32767> %t1 = shl <4 x i32> %t0, <i32 1, i32 1, i32 1, i32 1> @@ -2675,40 +2675,40 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_10(<4 x i32> %a0) { define <4 x i32> @test_128_i32_x_4_4294836224_mask_shl_1(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddd %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; ; X86-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X86-AVX2: # %bb.0: -; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224] -; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294705152,4294705152,4294705152,4294705152] +; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddd %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X64-AVX1: # %bb.0: -; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX1-NEXT: retq ; ; X64-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X64-AVX2: # %bb.0: -; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224] -; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294705152,4294705152,4294705152,4294705152] +; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: retq %t0 = and <4 x i32> %a0, <i32 4294836224, i32 4294836224, i32 4294836224, i32 4294836224> %t1 = shl <4 x i32> %t0, <i32 1, i32 1, i32 1, i32 1> @@ -3325,26 +3325,26 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_34(<2 x i64> % define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_1(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddq %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddq %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_1: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 +; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: retq %t0 = and <2 x i64> %a0, <i64 2147483647, i64 2147483647> %t1 = shl <2 x i64> %t0, <i64 1, i64 1> @@ -3543,26 +3543,26 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_18(<2 x i64> %a0) { define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_shl_1(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddq %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddq %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 +; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: retq %t0 = and <2 x i64> %a0, <i64 18446744065119617024, i64 18446744065119617024> %t1 = shl <2 x i64> %t0, <i64 1, i64 1> diff --git a/llvm/test/DebugInfo/AArch64/abstract-sp-unit.ll b/llvm/test/DebugInfo/AArch64/abstract-sp-unit.ll new file mode 100644 index 0000000..559f201 --- /dev/null +++ b/llvm/test/DebugInfo/AArch64/abstract-sp-unit.ll @@ -0,0 +1,43 @@ +; RUN: llc --filetype=obj -O0 -o - %s | llvm-dwarfdump --verify - + +; Check that abstract DIE for a subprogram referenced from another compile unit +; is emitted in the correct CU. + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64" + +define void @a() !dbg !10 { + br label %for.b.c.c, !dbg !13 + for.b.c.c: + br label %for.b.c.c +} + +!llvm.dbg.cu = !{!0, !6} +!llvm.module.flags = !{!8} + +!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_20, file: !1, emissionKind: FullDebug, globals: !2) +!1 = !DIFile(filename: "foo.cpp", directory: "") +!2 = !{!3} +!3 = !DIGlobalVariableExpression(var: !4, expr: !DIExpression()) +!4 = !DIGlobalVariable(type: !5) +!5 = !DICompositeType(tag: DW_TAG_class_type) +!6 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_20, file: !7, emissionKind: FullDebug) +!7 = !DIFile(filename: "bar.cpp", directory: "") +!8 = !{i32 2, !"Debug Info Version", i32 3} +!10 = distinct !DISubprogram(type: !11, unit: !6) +!11 = !DISubroutineType(types: !12) +!12 = !{} +!13 = !DILocation(scope: !14, inlinedAt: !15) +!14 = distinct !DISubprogram(unit: !6) +!15 = !DILocation(scope: !16, inlinedAt: !25) +!16 = distinct !DISubprogram(type: !11, unit: !6, declaration: !17) +!17 = !DISubprogram(scope: !5, type: !11, spFlags: DISPFlagOptimized, templateParams: !18) +!18 = !{!19} +!19 = !DITemplateTypeParameter(type: !20) +!20 = !DICompositeType(tag: DW_TAG_class_type, scope: !21) +!21 = distinct !DISubprogram(unit: !6, retainedNodes: !22) +!22 = !{!23} +!23 = !DILocalVariable(scope: !21, type: !24) +!24 = !DIBasicType() +!25 = !DILocation(scope: !21, inlinedAt: !26) +!26 = !DILocation(scope: !10) diff --git a/llvm/test/DebugInfo/AArch64/debug-types.ll b/llvm/test/DebugInfo/AArch64/debug-types.ll new file mode 100644 index 0000000..0d0fd33 --- /dev/null +++ b/llvm/test/DebugInfo/AArch64/debug-types.ll @@ -0,0 +1,59 @@ +; Check that composite type DIEs go to debug_types section. + +; RUN: llc -generate-type-units -filetype=obj %s -o - | llvm-dwarfdump -debug-info -debug-types - | FileCheck %s + +; CHECK: .debug_info contents: +; CHECK: DW_TAG_compile_unit +; CHECK: DW_TAG_class_type +; CHECK: DW_AT_signature ([[SIG_A:0x[0-9a-f]+]]) +; CHECK: DW_TAG_subprogram +; CHECK: NULL +; CHECK: DW_TAG_subprogram +; CHECK: "_ZN1A6AppendEv" +; CHECK: DW_TAG_class_type +; CHECK: DW_AT_signature ([[SIG_LAMBDA:0x[0-9a-f]+]]) +; CHECK: DW_TAG_variable +; CHECK: NULL +; CHECK: DW_TAG_subprogram +; CHECK: DW_TAG_inlined_subroutine +; CHECK: NULL +; CHECK: NULL + +; CHECK: .debug_types contents: +; CHECK: Type Unit: {{.*}} type_signature = [[SIG_A]] +; CHECK: DW_TAG_class_type +; CHECK-NOT: DW_TAG +; CHECK: DW_AT_name ("A") +; CHECK: Type Unit: {{.*}} type_signature = [[SIG_LAMBDA]] +; CHECK: DW_TAG_class_type +; CHECK: DW_TAG_class_type +; CHECK-NOT: DW_TAG +; CHECK: DW_AT_decl_line (7) + +target triple = "aarch64-unknown-linux-gnu" + +define void @_Z1f1A() !dbg !4 { +entry: + ret void, !dbg !8 +} + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!3} + +!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !1, emissionKind: FullDebug, globals: !2) +!1 = !DIFile(filename: "<stdin>", directory: "") +!2 = !{} +!3 = !{i32 2, !"Debug Info Version", i32 3} +!4 = distinct !DISubprogram(name: "f", linkageName: "_Z1f1A", scope: !5, file: !5, line: 14, type: !6, scopeLine: 14, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2) +!5 = !DIFile(filename: "repro.ii", directory: "") +!6 = distinct !DISubroutineType(types: !7) +!7 = !{null} +!8 = !DILocation(line: 8, column: 12, scope: !9, inlinedAt: !16) +!9 = distinct !DISubprogram(name: "Append", linkageName: "_ZN1A6AppendEv", scope: !10, file: !5, line: 6, type: !11, scopeLine: 6, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, declaration: !12, retainedNodes: !13) +!10 = distinct !DICompositeType(tag: DW_TAG_class_type, name: "A", file: !5, line: 3, size: 32, flags: DIFlagTypePassByValue, elements: !2, identifier: "_ZTS1A") +!11 = distinct !DISubroutineType(types: !7) +!12 = !DISubprogram(name: "Append", linkageName: "_ZN1A6AppendEv", scope: !10, file: !5, line: 6, type: !11, scopeLine: 6, flags: DIFlagPublic | DIFlagPrototyped, spFlags: DISPFlagOptimized) +!13 = !{!14} +!14 = !DILocalVariable(name: "raw_append", scope: !9, file: !5, line: 7, type: !15) +!15 = distinct !DICompositeType(tag: DW_TAG_class_type, scope: !9, file: !5, line: 7, size: 8, flags: DIFlagTypePassByValue | DIFlagNonTrivial, elements: !2, identifier: "_ZTSZN1A6AppendEvEUlvE_") +!16 = distinct !DILocation(line: 14, column: 15, scope: !4) diff --git a/llvm/test/DebugInfo/AArch64/populate-abstract-sp-once.ll b/llvm/test/DebugInfo/AArch64/populate-abstract-sp-once.ll new file mode 100644 index 0000000..20cc98a --- /dev/null +++ b/llvm/test/DebugInfo/AArch64/populate-abstract-sp-once.ll @@ -0,0 +1,67 @@ +; Check that abstract DIEs for inlined subprograms and lexical scopes +; are populated only once. + +; RUN: llc -filetype=obj %s -o - | llvm-dwarfdump - -o - | FileCheck --implicit-check-not=DW_TAG_lexical_scope --implicit-check-not DW_TAG_subprogram %s + +; CHECK: DW_TAG_compile_unit +; CHECK: DW_TAG_namespace +; CHECK: DW_TAG_subprogram +; CHECK: DW_AT_declaration (true) +; CHECK: DW_TAG_subprogram +; CHECK: DW_AT_declaration (true) +; CHECK: DW_TAG_subprogram +; CHECK: DW_AT_declaration (true) +; CHECK: NULL + +; CHECK: [[ABSTRACT_SP:0x[0-9a-f]+]]: DW_TAG_subprogram +; CHECK: DW_AT_inline (DW_INL_inlined) + +; CHECK: DW_TAG_lexical_block +; CHECK: DW_TAG_imported_module +; CHECK: NULL + +; CHECK: NULL + +; CHECK: DW_TAG_subprogram +; CHECK: DW_TAG_inlined_subroutine +; CHECK: DW_AT_abstract_origin ([[ABSTRACT_SP]] +; CHECK: NULL +; CHECK: DW_TAG_subprogram +; CHECK: DW_TAG_inlined_subroutine +; CHECK: DW_AT_abstract_origin ([[ABSTRACT_SP]] +; CHECK: NULL + +target triple = "aarch64-unknown-linux-gnu" + +define void @_ZN12_GLOBAL__N_117MapRegionCounters14TraverseIfStmtEPN5clang6IfStmtE() !dbg !4 { +entry: + ret void, !dbg !8 +} + +define void @_ZN12_GLOBAL__N_117MapRegionCounters9VisitStmtEPN5clang4StmtE() !dbg !15 { +entry: + ret void, !dbg !17 +} + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!3} + +!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !1, isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !2, globals: !2, imports: !2) +!1 = !DIFile(filename: "CodeGenPGO.cpp", directory: "/") +!2 = !{} +!3 = !{i32 2, !"Debug Info Version", i32 3} +!4 = distinct !DISubprogram(name: "TraverseIfStmt", linkageName: "_ZN12_GLOBAL__N_117MapRegionCounters14TraverseIfStmtEPN5clang6IfStmtE", scope: !5, file: !1, line: 364, type: !6, scopeLine: 364, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition | DISPFlagOptimized, unit: !0, declaration: !7, retainedNodes: !2, keyInstructions: true) +!5 = !DINamespace(name: "llvm", scope: null) +!6 = distinct !DISubroutineType(types: !2) +!7 = !DISubprogram(name: "TraverseIfStmt", linkageName: "_ZN12_GLOBAL__N_117MapRegionCounters14TraverseIfStmtEPN5clang6IfStmtE", scope: !5, file: !1, line: 364, type: !6, scopeLine: 364, flags: DIFlagPrototyped, spFlags: DISPFlagLocalToUnit | DISPFlagOptimized) +!8 = !DILocation(line: 982, column: 39, scope: !9, inlinedAt: !14, atomGroup: 6, atomRank: 2) +!9 = distinct !DISubprogram(name: "combine", linkageName: "_ZN12_GLOBAL__N_17PGOHash7combineENS0_8HashTypeE", scope: !5, file: !1, line: 966, type: !6, scopeLine: 966, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition | DISPFlagOptimized, unit: !0, declaration: !10, retainedNodes: !11, keyInstructions: true) +!10 = !DISubprogram(name: "combine", linkageName: "_ZN12_GLOBAL__N_17PGOHash7combineENS0_8HashTypeE", scope: !5, file: !1, line: 140, type: !6, scopeLine: 140, flags: DIFlagPublic | DIFlagPrototyped, spFlags: DISPFlagLocalToUnit | DISPFlagOptimized) +!11 = !{!12} +!12 = !DIImportedEntity(tag: DW_TAG_imported_module, scope: !13, entity: !5, file: !1, line: 973) +!13 = distinct !DILexicalBlock(scope: !9, file: !1, line: 972, column: 7) +!14 = distinct !DILocation(line: 393, column: 10, scope: !4) +!15 = distinct !DISubprogram(name: "VisitStmt", linkageName: "_ZN12_GLOBAL__N_117MapRegionCounters9VisitStmtEPN5clang4StmtE", scope: !5, file: !1, line: 355, type: !6, scopeLine: 355, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition | DISPFlagOptimized, unit: !0, declaration: !16, retainedNodes: !2, keyInstructions: true) +!16 = !DISubprogram(name: "VisitStmt", linkageName: "_ZN12_GLOBAL__N_117MapRegionCounters9VisitStmtEPN5clang4StmtE", scope: !5, file: !1, line: 355, type: !6, scopeLine: 355, flags: DIFlagPrototyped, spFlags: DISPFlagLocalToUnit | DISPFlagOptimized) +!17 = !DILocation(line: 982, column: 13, scope: !9, inlinedAt: !18) +!18 = distinct !DILocation(line: 360, column: 12, scope: !15) diff --git a/llvm/test/DebugInfo/Generic/inlined-static-var.ll b/llvm/test/DebugInfo/Generic/inlined-static-var.ll new file mode 100644 index 0000000..1d24646 --- /dev/null +++ b/llvm/test/DebugInfo/Generic/inlined-static-var.ll @@ -0,0 +1,93 @@ +; RUN: %llc_dwarf -O0 -filetype=obj < %s | llvm-dwarfdump -debug-info - | FileCheck --implicit-check-not "{{DW_TAG|NULL}}" %s + +; inline __attribute__((always_inline)) +; int removed() { static int A; return A++; } +; +; __attribute__((always_inline)) +; int not_removed() { static int B; return B++; } +; +; int foo() { return removed() + not_removed(); } + +; Ensure that global variables belong to the correct subprograms even if those +; subprograms are inlined. + +; CHECK: DW_TAG_compile_unit +; CHECK: DW_TAG_subprogram +; CHECK: DW_AT_abstract_origin {{.*}} "_Z11not_removedv" +; TODO: This variable should be emitted in abstract subprogram DIE. +; CHECK: DW_TAG_variable +; CHECK: DW_AT_name ("B") +; CHECK: NULL +; CHECK: DW_TAG_base_type +; CHECK: DW_TAG_subprogram +; CHECK: DW_AT_name ("removed") +; CHECK: DW_TAG_variable +; CHECK: DW_AT_name ("A") +; CHECK: NULL +; CHECK: DW_TAG_subprogram +; CHECK: DW_AT_name ("not_removed") +; CHECK: DW_TAG_subprogram +; CHECK: DW_AT_name ("foo") +; CHECK: DW_TAG_inlined_subroutine +; CHECK: DW_TAG_inlined_subroutine +; CHECK: NULL +; CHECK: NULL + +@_ZZ11not_removedvE1A = internal global i32 0, align 4, !dbg !0 +@_ZZ7removedvE1A = linkonce_odr dso_local global i32 0, align 4, !dbg !10 + +define dso_local i32 @_Z11not_removedv() !dbg !2 { + %1 = load i32, i32* @_ZZ11not_removedvE1A, align 4, !dbg !24 + %2 = add nsw i32 %1, 1, !dbg !24 + store i32 %2, i32* @_ZZ11not_removedvE1A, align 4, !dbg !24 + ret i32 %1, !dbg !25 +} + +define dso_local i32 @_Z3foov() !dbg !26 { + %1 = load i32, i32* @_ZZ7removedvE1A, align 4, !dbg !27 + %2 = add nsw i32 %1, 1, !dbg !27 + store i32 %2, i32* @_ZZ7removedvE1A, align 4, !dbg !27 + %3 = load i32, i32* @_ZZ11not_removedvE1A, align 4, !dbg !29 + %4 = add nsw i32 %3, 1, !dbg !29 + store i32 %4, i32* @_ZZ11not_removedvE1A, align 4, !dbg !29 + %5 = add nsw i32 %1, %3, !dbg !31 + ret i32 %5, !dbg !32 +} + +!llvm.dbg.cu = !{!7} +!llvm.module.flags = !{!14, !15, !16, !17, !18, !19, !20, !21, !22} +!llvm.ident = !{!23} + +!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression()) +!1 = distinct !DIGlobalVariable(name: "B", scope: !2, file: !3, line: 5, type: !6, isLocal: true, isDefinition: true) +!2 = distinct !DISubprogram(name: "not_removed", linkageName: "_Z11not_removedv", scope: !3, file: !3, line: 5, type: !4, scopeLine: 5, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !7, retainedNodes: !13) +!3 = !DIFile(filename: "example.cpp", directory: "") +!4 = !DISubroutineType(types: !5) +!5 = !{!6} +!6 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) +!7 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !8, producer: "clang version 14.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, globals: !9, splitDebugInlining: false, nameTableKind: None) +!8 = !DIFile(filename: "example.cpp", directory: "") +!9 = !{!0, !10} +!10 = !DIGlobalVariableExpression(var: !11, expr: !DIExpression()) +!11 = distinct !DIGlobalVariable(name: "A", scope: !12, file: !3, line: 2, type: !6, isLocal: false, isDefinition: true) +!12 = distinct !DISubprogram(name: "removed", linkageName: "_Z7removedv", scope: !3, file: !3, line: 2, type: !4, scopeLine: 2, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !7, retainedNodes: !13) +!13 = !{} +!14 = !{i32 7, !"Dwarf Version", i32 4} +!15 = !{i32 2, !"Debug Info Version", i32 3} +!16 = !{i32 1, !"wchar_size", i32 4} +!17 = !{i32 1, !"branch-target-enforcement", i32 0} +!18 = !{i32 1, !"sign-return-address", i32 0} +!19 = !{i32 1, !"sign-return-address-all", i32 0} +!20 = !{i32 1, !"sign-return-address-with-bkey", i32 0} +!21 = !{i32 7, !"uwtable", i32 1} +!22 = !{i32 7, !"frame-pointer", i32 1} +!23 = !{!"clang version 14.0.0"} +!24 = !DILocation(line: 5, column: 43, scope: !2) +!25 = !DILocation(line: 5, column: 35, scope: !2) +!26 = distinct !DISubprogram(name: "foo", linkageName: "_Z3foov", scope: !3, file: !3, line: 7, type: !4, scopeLine: 7, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !7, retainedNodes: !13) +!27 = !DILocation(line: 2, column: 39, scope: !12, inlinedAt: !28) +!28 = distinct !DILocation(line: 7, column: 20, scope: !26) +!29 = !DILocation(line: 5, column: 43, scope: !2, inlinedAt: !30) +!30 = distinct !DILocation(line: 7, column: 32, scope: !26) +!31 = !DILocation(line: 7, column: 30, scope: !26) +!32 = !DILocation(line: 7, column: 13, scope: !26) diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_salu_lit64.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_salu_lit64.txt index d2ec213..7064479 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_salu_lit64.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_salu_lit64.txt @@ -1,55 +1,56 @@ +# NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5 # RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250 %s -# GFX1250: s_mov_b64 s[2:3], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x01,0x82,0xbe,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x01,0x82,0xbe,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_mov_b64 s[2:3], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x01,0x82,0xbe,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_add_nc_u64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0xa9,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0x04,0xfe,0x82,0xa9,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_add_nc_u64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0xa9,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_and_b64 s[2:3], lit64(0x10abcdef12345678), s[4:5] ; encoding: [0xfe,0x04,0x82,0x8b,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x04,0x82,0x8b,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_and_b64 s[2:3], lit64(0x10abcdef12345678), s[4:5] ; encoding: [0xfe,0x04,0x82,0x8b,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_and_not1_b64 s[2:3], lit64(0x10abcdef12345678), lit64(0x10abcdef12345678) ; encoding: [0xfe,0xfe,0x82,0x91,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfe,0x82,0x91,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_and_not1_b64 s[2:3], lit64(0x10abcdef12345678), lit64(0x10abcdef12345678) ; encoding: [0xfe,0xfe,0x82,0x91,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_and_not1_b64 s[2:3], lit64(0x10abcdef12345678), s[4:5] ; encoding: [0xfe,0x04,0x82,0x91,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x04,0x82,0x91,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_and_not1_b64 s[2:3], lit64(0x10abcdef12345678), s[4:5] ; encoding: [0xfe,0x04,0x82,0x91,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_ashr_i64 s[2:3], lit64(0x10abcdef12345678), s4 ; encoding: [0xfe,0x04,0x82,0x86,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x04,0x82,0x86,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_ashr_i64 s[2:3], lit64(0x10abcdef12345678), s4 ; encoding: [0xfe,0x04,0x82,0x86,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_bfe_i64 s[2:3], lit64(0x80abcdef12345678), 5 ; encoding: [0xfe,0x85,0x82,0x94,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x80] 0xfe,0x85,0x82,0x94,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x80 +# GFX1250: s_bfe_i64 s[2:3], lit64(0x80abcdef12345678), 5 ; encoding: [0xfe,0x85,0x82,0x94,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x80] -# GFX1250: s_bfe_u64 s[2:3], lit64(0x10abcdef12345678), 5 ; encoding: [0xfe,0x85,0x02,0x94,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x85,0x02,0x94,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_bfe_u64 s[2:3], lit64(0x10abcdef12345678), 5 ; encoding: [0xfe,0x85,0x02,0x94,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_cselect_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x98,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0x04,0xfe,0x82,0x98,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_cselect_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x98,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_lshl_b64 s[2:3], lit64(0x10abcdef12345678), s4 ; encoding: [0xfe,0x04,0x82,0x84,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x04,0x82,0x84,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_lshl_b64 s[2:3], lit64(0x10abcdef12345678), s4 ; encoding: [0xfe,0x04,0x82,0x84,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_lshr_b64 s[2:3], lit64(0x10abcdef12345678), s4 ; encoding: [0xfe,0x04,0x82,0x85,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x04,0x82,0x85,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_lshr_b64 s[2:3], lit64(0x10abcdef12345678), s4 ; encoding: [0xfe,0x04,0x82,0x85,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_mul_u64 s[2:3], lit64(0x10abcdef12345678), s[4:5] ; encoding: [0xfe,0x04,0x82,0xaa,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x04,0x82,0xaa,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_mul_u64 s[2:3], lit64(0x10abcdef12345678), s[4:5] ; encoding: [0xfe,0x04,0x82,0xaa,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_nand_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x8e,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0x04,0xfe,0x82,0x8e,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_nand_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x8e,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_nor_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x8f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0x04,0xfe,0x82,0x8f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_nor_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x8f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_or_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x8c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0x04,0xfe,0x82,0x8c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_or_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x8c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_or_not1_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x92,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0x04,0xfe,0x82,0x92,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_or_not1_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x92,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_xnor_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x90,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0x04,0xfe,0x82,0x90,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_xnor_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x90,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_xor_b64 s[2:3], lit64(0x10abcdef12345678), s[4:5] ; encoding: [0xfe,0x04,0x82,0x8d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x04,0x82,0x8d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_xor_b64 s[2:3], lit64(0x10abcdef12345678), s[4:5] ; encoding: [0xfe,0x04,0x82,0x8d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_sop1.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_sop1.txt index 963e693..227e1c4 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_sop1.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_sop1.txt @@ -1,34 +1,35 @@ +# NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5 # RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250 %s -# GFX1250: s_add_pc_i64 lit64(0x12345678abcd0) ; encoding: [0xfe,0x4b,0x80,0xbe,0xd0,0xbc,0x8a,0x67,0x45,0x23,0x01,0x00] 0xfe,0x4b,0x80,0xbe,0xd0,0xbc,0x8a,0x67,0x45,0x23,0x01,0x00 +# GFX1250: s_add_pc_i64 lit64(0x12345678abcd0) ; encoding: [0xfe,0x4b,0x80,0xbe,0xd0,0xbc,0x8a,0x67,0x45,0x23,0x01,0x00] -# GFX1250: s_add_pc_i64 0x64 ; encoding: [0xff,0x4b,0x80,0xbe,0x64,0x00,0x00,0x00] 0xff,0x4b,0x80,0xbe,0x64,0x00,0x00,0x00 +# GFX1250: s_add_pc_i64 0x64 ; encoding: [0xff,0x4b,0x80,0xbe,0x64,0x00,0x00,0x00] -# GFX1250: s_add_pc_i64 4 ; encoding: [0x84,0x4b,0x80,0xbe] 0x84,0x4b,0x80,0xbe +# GFX1250: s_add_pc_i64 4 ; encoding: [0x84,0x4b,0x80,0xbe] -# GFX1250: s_add_pc_i64 s[2:3] ; encoding: [0x02,0x4b,0x80,0xbe] 0x02,0x4b,0x80,0xbe +# GFX1250: s_add_pc_i64 s[2:3] ; encoding: [0x02,0x4b,0x80,0xbe] -# GFX1250: s_sendmsg_rtn_b32 s2, sendmsg(MSG_RTN_GET_CLUSTER_BARRIER_STATE) ; encoding: [0x88,0x4c,0x82,0xbe] 0x88,0x4c,0x82,0xbe +# GFX1250: s_sendmsg_rtn_b32 s2, sendmsg(MSG_RTN_GET_CLUSTER_BARRIER_STATE) ; encoding: [0x88,0x4c,0x82,0xbe] -# GFX1250: s_sendmsg_rtn_b64 s[2:3], sendmsg(MSG_RTN_GET_CLUSTER_BARRIER_STATE) ; encoding: [0x88,0x4d,0x82,0xbe] 0x88,0x4d,0x82,0xbe +# GFX1250: s_sendmsg_rtn_b64 s[2:3], sendmsg(MSG_RTN_GET_CLUSTER_BARRIER_STATE) ; encoding: [0x88,0x4d,0x82,0xbe] -# GFX1250: s_get_shader_cycles_u64 s[2:3] ; encoding: [0x00,0x06,0x82,0xbe] 0x00,0x06,0x82,0xbe +# GFX1250: s_get_shader_cycles_u64 s[2:3] ; encoding: [0x00,0x06,0x82,0xbe] -# GFX1250: s_barrier_signal -3 ; encoding: [0xc3,0x4e,0x80,0xbe] 0xc3,0x4e,0x80,0xbe +# GFX1250: s_barrier_signal -3 ; encoding: [0xc3,0x4e,0x80,0xbe] -# GFX1250: s_get_barrier_state s3, -3 ; encoding: [0xc3,0x50,0x83,0xbe] 0xc3,0x50,0x83,0xbe +# GFX1250: s_get_barrier_state s3, -3 ; encoding: [0xc3,0x50,0x83,0xbe] -# GFX1250: s_get_barrier_state s3, -4 ; encoding: [0xc4,0x50,0x83,0xbe] 0xc4,0x50,0x83,0xbe +# GFX1250: s_get_barrier_state s3, -4 ; encoding: [0xc4,0x50,0x83,0xbe] -# GFX1250: s_get_barrier_state s3, m0 ; encoding: [0x7d,0x50,0x83,0xbe] 0x7d,0x50,0x83,0xbe +# GFX1250: s_get_barrier_state s3, m0 ; encoding: [0x7d,0x50,0x83,0xbe] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_valu_lit64.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_valu_lit64.txt index 30650b4..1571fb9 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_valu_lit64.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_valu_lit64.txt @@ -1,232 +1,233 @@ +# NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5 # RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250 %s -# GFX1250: v_add_f64_e32 v[254:255], lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xfd,0x05,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xfd,0x05,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_add_f64_e32 v[254:255], lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xfd,0x05,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x30,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x30,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x30,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_class_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v255 ; encoding: [0xfe,0xfe,0xff,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfe,0xff,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_class_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v255 ; encoding: [0xfe,0xfe,0xff,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_eq_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x45,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x45,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_eq_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x45,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_ge_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4d,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x4d,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_ge_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4d,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_gt_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x49,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x49,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_gt_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x49,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_gt_i64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa9,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xa9,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_gt_i64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa9,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_gt_u64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb9,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xb9,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_gt_u64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb9,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_le_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x47,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x47,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_le_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x47,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_le_i64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa7,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xa7,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_le_i64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa7,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_le_u64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb7,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xb7,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_le_u64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb7,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_lg_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4b,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x4b,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_lg_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4b,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_lt_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x43,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x43,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_lt_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x43,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_lt_i64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa3,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xa3,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_lt_i64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa3,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_lt_u64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb3,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xb3,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_lt_u64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb3,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_ne_i64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xab,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xab,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_ne_i64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xab,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_ne_u64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xbb,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xbb,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_ne_u64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xbb,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_neq_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x5b,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x5b,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_neq_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x5b,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_nge_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x53,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x53,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_nge_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x53,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_ngt_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x57,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x57,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_ngt_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x57,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_nle_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x59,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x59,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_nle_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x59,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_nlg_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x55,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x55,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_nlg_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x55,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_nlt_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x5d,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x5d,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_nlt_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x5d,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_o_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4f,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x4f,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_o_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4f,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_u_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x51,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x51,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_u_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x51,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_class_f64_e32 lit64(0x10abcdef12345678), v255 ; encoding: [0xfe,0xfe,0xff,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfe,0xff,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_class_f64_e32 lit64(0x10abcdef12345678), v255 ; encoding: [0xfe,0xfe,0xff,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_eq_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x45,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x45,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_eq_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x45,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_eq_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa5,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xa5,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_eq_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa5,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_eq_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb5,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xb5,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_eq_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb5,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_ge_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4d,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x4d,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_ge_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4d,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_ge_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xad,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xad,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_ge_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xad,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_ge_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xbd,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xbd,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_ge_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xbd,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_gt_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x49,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x49,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_gt_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x49,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_gt_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa9,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xa9,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_gt_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa9,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_gt_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb9,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xb9,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_gt_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb9,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_le_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x47,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x47,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_le_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x47,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_le_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa7,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xa7,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_le_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa7,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_le_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb7,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xb7,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_le_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb7,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_lg_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4b,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x4b,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_lg_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4b,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_lt_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x43,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x43,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_lt_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x43,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_lt_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa3,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xa3,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_lt_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa3,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_lt_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb3,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xb3,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_lt_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb3,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_ne_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xab,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xab,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_ne_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xab,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_ne_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xbb,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xbb,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_ne_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xbb,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_neq_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x5b,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x5b,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_neq_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x5b,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_nge_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x53,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x53,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_nge_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x53,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_ngt_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x57,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x57,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_ngt_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x57,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_nle_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x59,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x59,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_nle_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x59,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_nlg_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x55,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x55,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_nlg_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x55,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_nlt_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x5d,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x5d,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_nlt_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x5d,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_o_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4f,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x4f,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_o_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4f,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_u_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x51,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x51,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_u_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x51,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cvt_f32_f64_e32 v255, lit64(0x10abcdef12345678) ; encoding: [0xfe,0x1e,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x1e,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cvt_f32_f64_e32 v255, lit64(0x10abcdef12345678) ; encoding: [0xfe,0x1e,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cvt_i32_f64_e32 v255, lit64(0x10abcdef12345678) ; encoding: [0xfe,0x06,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x06,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cvt_i32_f64_e32 v255, lit64(0x10abcdef12345678) ; encoding: [0xfe,0x06,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cvt_u32_f64_e32 v255, lit64(0x10abcdef12345678) ; encoding: [0xfe,0x2a,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x2a,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cvt_u32_f64_e32 v255, lit64(0x10abcdef12345678) ; encoding: [0xfe,0x2a,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_floor_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x34,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x34,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_floor_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x34,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_fract_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x7c,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x7c,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_fract_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x7c,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_frexp_exp_i32_f64_e32 v255, lit64(0x10abcdef12345678) ; encoding: [0xfe,0x78,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x78,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_frexp_exp_i32_f64_e32 v255, lit64(0x10abcdef12345678) ; encoding: [0xfe,0x78,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_frexp_mant_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x7a,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x7a,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_frexp_mant_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x7a,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_max_num_f64_e32 v[254:255], lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xfd,0x1d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xfd,0x1d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_max_num_f64_e32 v[254:255], lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xfd,0x1d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_min_num_f64_e32 v[254:255], lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xfd,0x1b,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xfd,0x1b,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_min_num_f64_e32 v[254:255], lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xfd,0x1b,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_mul_f64_e32 v[254:255], lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xfd,0x0d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xfd,0x0d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_mul_f64_e32 v[254:255], lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xfd,0x0d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_rcp_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x5e,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x5e,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_rcp_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x5e,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_rndne_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x32,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x32,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_rndne_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x32,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_rsq_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x62,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x62,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_rsq_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x62,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_sqrt_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x68,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x68,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_sqrt_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x68,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_trunc_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x2e,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x2e,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_trunc_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x2e,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x4063233333333333) ; encoding: [0xfe,0x30,0xfc,0x7f,0x33,0x33,0x33,0x33,0x33,0x23,0x63,0x40] 0xfe,0x30,0xfc,0x7f,0x33,0x33,0x33,0x33,0x33,0x23,0x63,0x40 +# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x4063233333333333) ; encoding: [0xfe,0x30,0xfc,0x7f,0x33,0x33,0x33,0x33,0x33,0x23,0x63,0x40] -# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x448969368974c05b) ; encoding: [0xfe,0x30,0xfc,0x7f,0x5b,0xc0,0x74,0x89,0x36,0x69,0x89,0x44] 0xfe,0x30,0xfc,0x7f,0x5b,0xc0,0x74,0x89,0x36,0x69,0x89,0x44 +# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x448969368974c05b) ; encoding: [0xfe,0x30,0xfc,0x7f,0x5b,0xc0,0x74,0x89,0x36,0x69,0x89,0x44] -# GFX1250: v_ceil_f64_e32 v[254:255], 0x40632000 ; encoding: [0xff,0x30,0xfc,0x7f,0x00,0x20,0x63,0x40] 0xff,0x30,0xfc,0x7f,0x00,0x20,0x63,0x40 +# GFX1250: v_ceil_f64_e32 v[254:255], 0x40632000 ; encoding: [0xff,0x30,0xfc,0x7f,0x00,0x20,0x63,0x40] -# GFX1250: v_mov_b64_e32 v[0:1], 0x12345678 ; encoding: [0xff,0x3a,0x00,0x7e,0x78,0x56,0x34,0x12] 0xff,0x3a,0x00,0x7e,0x78,0x56,0x34,0x12 +# GFX1250: v_mov_b64_e32 v[0:1], 0x12345678 ; encoding: [0xff,0x3a,0x00,0x7e,0x78,0x56,0x34,0x12] -# GFX1250: v_ceil_f64_e32 v[254:255], 0.15915494309189532 ; encoding: [0xf8,0x30,0xfc,0x7f] 0xf8,0x30,0xfc,0x7f +# GFX1250: v_ceil_f64_e32 v[254:255], 0.15915494309189532 ; encoding: [0xf8,0x30,0xfc,0x7f] -# GFX1250: v_ceil_f64_e32 v[254:255], -4.0 ; encoding: [0xf7,0x30,0xfc,0x7f] 0xf7,0x30,0xfc,0x7f +# GFX1250: v_ceil_f64_e32 v[254:255], -4.0 ; encoding: [0xf7,0x30,0xfc,0x7f] -# GFX1250: v_ceil_f64_e32 v[254:255], 2.0 ; encoding: [0xf4,0x30,0xfc,0x7f] 0xf4,0x30,0xfc,0x7f +# GFX1250: v_ceil_f64_e32 v[254:255], 2.0 ; encoding: [0xf4,0x30,0xfc,0x7f] -# GFX1250: v_ceil_f64_e32 v[254:255], 0 ; encoding: [0x80,0x30,0xfc,0x7f] 0x80,0x30,0xfc,0x7f +# GFX1250: v_ceil_f64_e32 v[254:255], 0 ; encoding: [0x80,0x30,0xfc,0x7f] -# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x7b) ; encoding: [0xfe,0x30,0xfc,0x7f,0x7b,0x00,0x00,0x00,0x00,0x00,0x00,0x00] 0xfe,0x30,0xfc,0x7f,0x7b,0x00,0x00,0x00,0x00,0x00,0x00,0x00 +# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x7b) ; encoding: [0xfe,0x30,0xfc,0x7f,0x7b,0x00,0x00,0x00,0x00,0x00,0x00,0x00] -# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x109a) ; encoding: [0xfe,0x30,0xfc,0x7f,0x9a,0x10,0x00,0x00,0x00,0x00,0x00,0x00] 0xfe,0x30,0xfc,0x7f,0x9a,0x10,0x00,0x00,0x00,0x00,0x00,0x00 +# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x109a) ; encoding: [0xfe,0x30,0xfc,0x7f,0x9a,0x10,0x00,0x00,0x00,0x00,0x00,0x00] diff --git a/llvm/test/MC/LoongArch/Macros/macros-la.s b/llvm/test/MC/LoongArch/Macros/macros-la.s index a732988..8022d5b 100644 --- a/llvm/test/MC/LoongArch/Macros/macros-la.s +++ b/llvm/test/MC/LoongArch/Macros/macros-la.s @@ -26,6 +26,7 @@ la.abs $a0, sym_abs # ABS-NEXT: lu32i.d $a0, %abs64_lo20(sym_abs) # ABS-NEXT: lu52i.d $a0, $a0, %abs64_hi12(sym_abs) # ABS-EMPTY: +# RELOC-NEXT: R_LARCH_MARK_LA - 0x0 # RELOC-NEXT: R_LARCH_ABS_HI20 sym_abs 0x0 # RELOC-NEXT: R_LARCH_ABS_LO12 sym_abs 0x0 # RELOC-NEXT: R_LARCH_ABS64_LO20 sym_abs 0x0 diff --git a/llvm/test/MachineVerifier/test_g_build_vector.mir b/llvm/test/MachineVerifier/test_g_build_vector.mir index 50b9801..9857306 100644 --- a/llvm/test/MachineVerifier/test_g_build_vector.mir +++ b/llvm/test/MachineVerifier/test_g_build_vector.mir @@ -16,17 +16,17 @@ body: | ; CHECK: Bad machine code: G_BUILD_VECTOR must produce a vector from scalar operands %3:_(<2 x s32>) = G_BUILD_VECTOR %2 - ; CHECK: Bad machine code: G_BUILD_VECTOR must have an operand for each elemement + ; CHECK: Bad machine code: G_BUILD_VECTOR must have an operand for each element %4:_(<2 x s32>) = G_BUILD_VECTOR %0, %0, %0, %0 ; CHECK: Bad machine code: G_BUILD_VECTOR result element type must match source type - ; CHECK: Bad machine code: G_BUILD_VECTOR must have an operand for each elemement + ; CHECK: Bad machine code: G_BUILD_VECTOR must have an operand for each element %5:_(<4 x s16>) = G_BUILD_VECTOR %0, %0 %6:_(s16) = IMPLICIT_DEF ; CHECK: Bad machine code: G_BUILD_VECTOR result element type must match source type - ; CHECK: Bad machine code: G_BUILD_VECTOR must have an operand for each elemement + ; CHECK: Bad machine code: G_BUILD_VECTOR must have an operand for each element %7:_(<2 x s32>) = G_BUILD_VECTOR %6, %6, %6, %6 %8:_(p0) = IMPLICIT_DEF diff --git a/llvm/test/Transforms/AggressiveInstCombine/memchr.ll b/llvm/test/Transforms/AggressiveInstCombine/memchr.ll index b26320b..6fbe960 100644 --- a/llvm/test/Transforms/AggressiveInstCombine/memchr.ll +++ b/llvm/test/Transforms/AggressiveInstCombine/memchr.ll @@ -6,9 +6,10 @@ declare ptr @memchr(ptr, i32, i64) -define i1 @test_memchr_null(i32 %x) { +define i1 @test_memchr_null(i32 %x) !prof !0 { ; CHECK-LABEL: define i1 @test_memchr_null( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0:![0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[X]] to i8 ; CHECK-NEXT: switch i8 [[TMP0]], label %[[ENTRY_SPLIT:.*]] [ @@ -40,9 +41,10 @@ entry: ret i1 %isnull } -define ptr @test_memchr(i32 %x) { +define ptr @test_memchr(i32 %x) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[X]] to i8 ; CHECK-NEXT: switch i8 [[TMP0]], label %[[ENTRY_SPLIT:.*]] [ @@ -72,16 +74,17 @@ entry: ret ptr %memchr } -define ptr @test_memchr_smaller_n(i32 %x) { +define ptr @test_memchr_smaller_n(i32 %x) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_smaller_n( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[X]] to i8 ; CHECK-NEXT: switch i8 [[TMP0]], label %[[ENTRY_SPLIT:.*]] [ ; CHECK-NEXT: i8 48, label %[[MEMCHR_CASE:.*]] ; CHECK-NEXT: i8 49, label %[[MEMCHR_CASE1:.*]] ; CHECK-NEXT: i8 0, label %[[MEMCHR_CASE2:.*]] -; CHECK-NEXT: ] +; CHECK-NEXT: ], !prof [[PROF_1:![0-9]+]] ; CHECK: [[MEMCHR_CASE]]: ; CHECK-NEXT: br label %[[MEMCHR_SUCCESS:.*]] ; CHECK: [[MEMCHR_CASE1]]: @@ -103,9 +106,10 @@ entry: ; negative tests -define ptr @test_memchr_larger_n(i32 %x) { +define ptr @test_memchr_larger_n(i32 %x) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_larger_n( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 [[X]], i64 6) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -115,9 +119,10 @@ entry: ret ptr %memchr } -define ptr @test_memchr_non_constant(i32 %x, ptr %str) { +define ptr @test_memchr_non_constant(i32 %x, ptr %str) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_non_constant( -; CHECK-SAME: i32 [[X:%.*]], ptr [[STR:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]], ptr [[STR:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr [[STR]], i32 [[X]], i64 5) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -127,8 +132,9 @@ entry: ret ptr %memchr } -define ptr @test_memchr_constant_ch() { -; CHECK-LABEL: define ptr @test_memchr_constant_ch() { +define ptr @test_memchr_constant_ch() !prof !0 { +; CHECK-LABEL: define ptr @test_memchr_constant_ch() +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 49, i64 5) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -138,9 +144,10 @@ entry: ret ptr %memchr } -define ptr @test_memchr_dynamic_n(i32 %x, i32 %y) { +define ptr @test_memchr_dynamic_n(i32 %x, i32 %y) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_dynamic_n( -; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 [[X]], i32 [[Y]]) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -150,9 +157,10 @@ entry: ret ptr %memchr } -define ptr @test_memchr_long(i32 %x) { +define ptr @test_memchr_long(i32 %x) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_long( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str_long, i32 [[X]], i64 8) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -163,9 +171,10 @@ entry: } ; We want to check that the compiler still calls memchr if the length is non-constant: -define ptr @test_memchr_non_constant_length2(i32 %x, i64 %len) { +define ptr @test_memchr_non_constant_length2(i32 %x, i64 %len) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_non_constant_length2( -; CHECK-SAME: i32 [[X:%.*]], i64 [[LEN:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]], i64 [[LEN:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 [[X]], i64 [[LEN]]) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -174,3 +183,7 @@ entry: %memchr = call ptr @memchr(ptr @str, i32 %x, i64 %len) ret ptr %memchr } + +!0 = !{!"function_entry_count", i64 1000} +; CHECK: [[PROF_0]] = !{!"function_entry_count", i64 1000} +; CHECK: [[PROF_1]] = !{!"unknown", !"aggressive-instcombine"}
\ No newline at end of file diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/track-predecessor-ranges.ll b/llvm/test/Transforms/CorrelatedValuePropagation/track-predecessor-ranges.ll new file mode 100644 index 0000000..b5f68842 --- /dev/null +++ b/llvm/test/Transforms/CorrelatedValuePropagation/track-predecessor-ranges.ll @@ -0,0 +1,98 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes="correlated-propagation" -S 2>&1 | FileCheck %s +; RUN: opt < %s -passes="correlated-propagation" -lvi-per-pred-ranges -S 2>&1 | FileCheck %s -check-prefix=LVI-PRED-RANGES + +@global = external local_unnamed_addr global [4338 x i32], align 16 + +define dso_local noundef zeroext i1 @bar(i64 noundef %arg, ptr noundef writeonly captures(none) %arg1) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef zeroext i1 @bar( +; CHECK-SAME: i64 noundef [[ARG:%.*]], ptr noundef writeonly captures(none) [[ARG1:%.*]]) local_unnamed_addr { +; CHECK-NEXT: [[BB:.*]]: +; CHECK-NEXT: [[ICMP:%.*]] = icmp ult i64 [[ARG]], 1025 +; CHECK-NEXT: br i1 [[ICMP]], label %[[BB4:.*]], label %[[BB2:.*]] +; CHECK: [[BB2]]: +; CHECK-NEXT: [[ICMP3:%.*]] = icmp ult i64 [[ARG]], 262145 +; CHECK-NEXT: br i1 [[ICMP3]], label %[[BB4]], label %[[BB9:.*]] +; CHECK: [[BB4]]: +; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ 7, %[[BB]] ], [ 15487, %[[BB2]] ] +; CHECK-NEXT: [[PHI5:%.*]] = phi i64 [ 3, %[[BB]] ], [ 7, %[[BB2]] ] +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i64 [[PHI]], [[ARG]] +; CHECK-NEXT: [[LSHR:%.*]] = lshr i64 [[ADD]], [[PHI5]] +; CHECK-NEXT: [[ICMP6:%.*]] = icmp samesign ult i64 [[LSHR]], 4338 +; CHECK-NEXT: br i1 [[ICMP6]], label %[[BB8:.*]], label %[[BB7:.*]] +; CHECK: [[BB7]]: +; CHECK-NEXT: tail call void @llvm.ubsantrap(i8 18) +; CHECK-NEXT: unreachable +; CHECK: [[BB8]]: +; CHECK-NEXT: [[GETELEMENTPTR:%.*]] = getelementptr inbounds nuw [4338 x i32], ptr @global, i64 0, i64 [[LSHR]] +; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[GETELEMENTPTR]], align 4 +; CHECK-NEXT: [[SEXT:%.*]] = sext i32 [[LOAD]] to i64 +; CHECK-NEXT: store i64 [[SEXT]], ptr [[ARG1]], align 8 +; CHECK-NEXT: br label %[[BB9]] +; CHECK: [[BB9]]: +; CHECK-NEXT: [[PHI10:%.*]] = phi i1 [ true, %[[BB8]] ], [ false, %[[BB2]] ] +; CHECK-NEXT: ret i1 [[PHI10]] +; +; LVI-PRED-RANGES-LABEL: define dso_local noundef zeroext i1 @bar( +; LVI-PRED-RANGES-SAME: i64 noundef [[ARG:%.*]], ptr noundef writeonly captures(none) [[ARG1:%.*]]) local_unnamed_addr { +; LVI-PRED-RANGES-NEXT: [[BB:.*]]: +; LVI-PRED-RANGES-NEXT: [[ICMP:%.*]] = icmp ult i64 [[ARG]], 1025 +; LVI-PRED-RANGES-NEXT: br i1 [[ICMP]], label %[[BB4:.*]], label %[[BB2:.*]] +; LVI-PRED-RANGES: [[BB2]]: +; LVI-PRED-RANGES-NEXT: [[ICMP3:%.*]] = icmp ult i64 [[ARG]], 262145 +; LVI-PRED-RANGES-NEXT: br i1 [[ICMP3]], label %[[BB4]], label %[[BB9:.*]] +; LVI-PRED-RANGES: [[BB4]]: +; LVI-PRED-RANGES-NEXT: [[PHI:%.*]] = phi i64 [ 7, %[[BB]] ], [ 15487, %[[BB2]] ] +; LVI-PRED-RANGES-NEXT: [[PHI5:%.*]] = phi i64 [ 3, %[[BB]] ], [ 7, %[[BB2]] ] +; LVI-PRED-RANGES-NEXT: [[ADD:%.*]] = add nuw nsw i64 [[PHI]], [[ARG]] +; LVI-PRED-RANGES-NEXT: [[LSHR:%.*]] = lshr i64 [[ADD]], [[PHI5]] +; LVI-PRED-RANGES-NEXT: br i1 true, label %[[BB8:.*]], label %[[BB7:.*]] +; LVI-PRED-RANGES: [[BB7]]: +; LVI-PRED-RANGES-NEXT: tail call void @llvm.ubsantrap(i8 18) +; LVI-PRED-RANGES-NEXT: unreachable +; LVI-PRED-RANGES: [[BB8]]: +; LVI-PRED-RANGES-NEXT: [[GETELEMENTPTR:%.*]] = getelementptr inbounds nuw [4338 x i32], ptr @global, i64 0, i64 [[LSHR]] +; LVI-PRED-RANGES-NEXT: [[LOAD:%.*]] = load i32, ptr [[GETELEMENTPTR]], align 4 +; LVI-PRED-RANGES-NEXT: [[SEXT:%.*]] = sext i32 [[LOAD]] to i64 +; LVI-PRED-RANGES-NEXT: store i64 [[SEXT]], ptr [[ARG1]], align 8 +; LVI-PRED-RANGES-NEXT: br label %[[BB9]] +; LVI-PRED-RANGES: [[BB9]]: +; LVI-PRED-RANGES-NEXT: [[PHI10:%.*]] = phi i1 [ true, %[[BB8]] ], [ false, %[[BB2]] ] +; LVI-PRED-RANGES-NEXT: ret i1 [[PHI10]] +; +bb: + %icmp = icmp ult i64 %arg, 1025 + br i1 %icmp, label %bb4, label %bb2 + +bb2: ; preds = %bb + %icmp3 = icmp ult i64 %arg, 262145 + br i1 %icmp3, label %bb4, label %bb9 + +bb4: ; preds = %bb2, %bb + %phi = phi i64 [ 7, %bb ], [ 15487, %bb2 ] + %phi5 = phi i64 [ 3, %bb ], [ 7, %bb2 ] + %add = add nuw nsw i64 %phi, %arg + %lshr = lshr i64 %add, %phi5 + %icmp6 = icmp samesign ult i64 %lshr, 4338 + br i1 %icmp6, label %bb8, label %bb7 + +bb7: ; preds = %bb4 + tail call void @llvm.ubsantrap(i8 18) + unreachable + +bb8: ; preds = %bb4 + %getelementptr = getelementptr inbounds nuw [4338 x i32], ptr @global, i64 0, i64 %lshr + %load = load i32, ptr %getelementptr, align 4 + %sext = sext i32 %load to i64 + store i64 %sext, ptr %arg1, align 8 + br label %bb9 + +bb9: ; preds = %bb8, %bb2 + %phi10 = phi i1 [ true, %bb8 ], [ false, %bb2 ] + ret i1 %phi10 +} + +; Function Attrs: cold noreturn nounwind +declare void @llvm.ubsantrap(i8 immarg) #0 + +attributes #0 = { cold noreturn nounwind } diff --git a/llvm/test/Transforms/DropUnnecessaryAssumes/basic.ll b/llvm/test/Transforms/DropUnnecessaryAssumes/basic.ll index e2a9b4e..8a6f60b 100644 --- a/llvm/test/Transforms/DropUnnecessaryAssumes/basic.ll +++ b/llvm/test/Transforms/DropUnnecessaryAssumes/basic.ll @@ -1,6 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -S -passes=drop-unnecessary-assumes < %s | FileCheck %s +declare void @use(i32 %x) +declare i32 @get() + define void @basic_dead(i32 %x) { ; CHECK-LABEL: define void @basic_dead( ; CHECK-SAME: i32 [[X:%.*]]) { @@ -180,3 +183,136 @@ define void @type_test(ptr %x) { call void @llvm.assume(i1 %test) ret void } + +define void @multiple_dead_conds(i32 %x) { +; CHECK-LABEL: define void @multiple_dead_conds( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: ret void +; + %cond1 = icmp sge i32 %x, 0 + call void @llvm.assume(i1 %cond1) + %cond2 = icmp ne i32 %x, 64 + call void @llvm.assume(i1 %cond2) + ret void +} + +define void @multiple_dead_bundles(ptr %x) { +; CHECK-LABEL: define void @multiple_dead_bundles( +; CHECK-SAME: ptr [[X:%.*]]) { +; CHECK-NEXT: ret void +; + call void @llvm.assume(i1 true) ["align"(ptr %x, i64 8), "nonnull"(ptr %x)] + ret void +} + +; The assume is eliminated, but currently leaves behind a dead cycle. +define void @dead_cycle(i1 %loop.cond) { +; CHECK-LABEL: define void @dead_cycle( +; CHECK-SAME: i1 [[LOOP_COND:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 +; CHECK-NEXT: br i1 [[LOOP_COND]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %cond = icmp ne i32 %iv, 64 + call void @llvm.assume(i1 %cond) + %iv.next = add i32 %iv, 1 + br i1 %loop.cond, label %loop, label %exit + +exit: + ret void +} + +define void @use_in_side_effect(i32 %x) { +; CHECK-LABEL: define void @use_in_side_effect( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[COND:%.*]] = icmp sge i32 [[X]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[COND]]) +; CHECK-NEXT: call void @use(i32 [[X]]) +; CHECK-NEXT: ret void +; + %cond = icmp sge i32 %x, 0 + call void @llvm.assume(i1 %cond) + call void @use(i32 %x) + ret void +} + +define void @indirect_use_in_side_effect(i32 %x) { +; CHECK-LABEL: define void @indirect_use_in_side_effect( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[COND:%.*]] = icmp sge i32 [[X]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[COND]]) +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X]], 1 +; CHECK-NEXT: call void @use(i32 [[ADD]]) +; CHECK-NEXT: ret void +; + %cond = icmp sge i32 %x, 0 + call void @llvm.assume(i1 %cond) + %add = add i32 %x, 1 + call void @use(i32 %add) + ret void +} + +; The affected value itself has a side effect, but we can still drop the +; assume. +define void @affected_value_has_side_effect() { +; CHECK-LABEL: define void @affected_value_has_side_effect() { +; CHECK-NEXT: [[X:%.*]] = call i32 @get() +; CHECK-NEXT: ret void +; + %x = call i32 @get() + %cond = icmp sge i32 %x, 0 + call void @llvm.assume(i1 %cond) + ret void +} + +define i32 @affected_value_has_side_effect_and_is_used() { +; CHECK-LABEL: define i32 @affected_value_has_side_effect_and_is_used() { +; CHECK-NEXT: [[X:%.*]] = call i32 @get() +; CHECK-NEXT: [[COND:%.*]] = icmp sge i32 [[X]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[COND]]) +; CHECK-NEXT: ret i32 [[X]] +; + %x = call i32 @get() + %cond = icmp sge i32 %x, 0 + call void @llvm.assume(i1 %cond) + ret i32 %x +} + +@g = external global i8 +@g2 = external global i8 + +; Assumes on globals are currently not supported. +define void @assume_on_global() { +; CHECK-LABEL: define void @assume_on_global() { +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr @g, i64 8) ] +; CHECK-NEXT: ret void +; + call void @llvm.assume(i1 true) ["align"(ptr @g, i64 8)] + ret void +} + +define void @assume_on_global_used_in_other_func() { +; CHECK-LABEL: define void @assume_on_global_used_in_other_func() { +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr @g2, i64 8) ] +; CHECK-NEXT: ret void +; + call void @llvm.assume(i1 true) ["align"(ptr @g2, i64 8)] + ret void +} + +define ptr @other_func() { +; CHECK-LABEL: define ptr @other_func() { +; CHECK-NEXT: ret ptr @g2 +; + ret ptr @g2 +} diff --git a/llvm/test/Transforms/InstCombine/preserve-profile.ll b/llvm/test/Transforms/InstCombine/preserve-profile.ll index dd83805..8cb3e68 100644 --- a/llvm/test/Transforms/InstCombine/preserve-profile.ll +++ b/llvm/test/Transforms/InstCombine/preserve-profile.ll @@ -46,9 +46,59 @@ define i32 @NegBin(i1 %C) !prof !0 { ret i32 %V } +define i32 @select_C_minus_1_or_C_from_bool(i1 %x) !prof !0 { +; CHECK-LABEL: define i32 @select_C_minus_1_or_C_from_bool( +; CHECK-SAME: i1 [[X:%.*]]) !prof [[PROF0]] { +; CHECK-NEXT: [[ADD:%.*]] = select i1 [[X]], i32 41, i32 42, !prof [[PROF2:![0-9]+]] +; CHECK-NEXT: ret i32 [[ADD]] +; + %ext = sext i1 %x to i32 + %add = add i32 %ext, 42 + ret i32 %add +} + +define i5 @and_add(i1 %x, i1 %y) !prof !0 { +; CHECK-LABEL: define i5 @and_add( +; CHECK-SAME: i1 [[X:%.*]], i1 [[Y:%.*]]) !prof [[PROF0]] { +; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[X]], true +; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[Y]], [[TMP1]] +; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP2]], i5 -2, i5 0, !prof [[PROF2]] +; CHECK-NEXT: ret i5 [[R]] +; + %xz = zext i1 %x to i5 + %ys = sext i1 %y to i5 + %sub = add i5 %xz, %ys + %r = and i5 %sub, 30 + ret i5 %r +} + +define i32 @add_zext_zext_i1(i1 %a) !prof !0 { +; CHECK-LABEL: define i32 @add_zext_zext_i1( +; CHECK-SAME: i1 [[A:%.*]]) !prof [[PROF0]] { +; CHECK-NEXT: [[ADD:%.*]] = select i1 [[A]], i32 2, i32 0, !prof [[PROF2]] +; CHECK-NEXT: ret i32 [[ADD]] +; + %zext = zext i1 %a to i32 + %add = add i32 %zext, %zext + ret i32 %add +} + +define i32 @no_count_no_branch_weights(i1 %a) { +; CHECK-LABEL: define i32 @no_count_no_branch_weights( +; CHECK-SAME: i1 [[A:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = select i1 [[A]], i32 2, i32 0 +; CHECK-NEXT: ret i32 [[ADD]] +; + %zext = zext i1 %a to i32 + %add = add i32 %zext, %zext + ret i32 %add +} + + !0 = !{!"function_entry_count", i64 1000} !1 = !{!"branch_weights", i32 2, i32 3} ;. ; CHECK: [[PROF0]] = !{!"function_entry_count", i64 1000} ; CHECK: [[PROF1]] = !{!"branch_weights", i32 2, i32 3} +; CHECK: [[PROF2]] = !{!"unknown", !"instcombine"} ;. diff --git a/llvm/test/Transforms/InstCombine/ptrtoaddr.ll b/llvm/test/Transforms/InstCombine/ptrtoaddr.ll new file mode 100644 index 0000000..61b1331 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/ptrtoaddr.ll @@ -0,0 +1,65 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt < %s -passes=instcombine -S | FileCheck %s +target datalayout = "p1:64:64:64:32" + +define i32 @ptrtoaddr_inttoptr_arg(i32 %a) { +; CHECK-LABEL: define i32 @ptrtoaddr_inttoptr_arg( +; CHECK-SAME: i32 [[A:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[A]] to i64 +; CHECK-NEXT: [[TOPTR:%.*]] = inttoptr i64 [[TMP1]] to ptr addrspace(1) +; CHECK-NEXT: [[TOADDR:%.*]] = ptrtoaddr ptr addrspace(1) [[TOPTR]] to i32 +; CHECK-NEXT: ret i32 [[TOADDR]] +; + %toptr = inttoptr i32 %a to ptr addrspace(1) + %toaddr = ptrtoaddr ptr addrspace(1) %toptr to i32 + ret i32 %toaddr +} + +define i32 @ptrtoaddr_inttoptr() { +; CHECK-LABEL: define i32 @ptrtoaddr_inttoptr() { +; CHECK-NEXT: ret i32 -1 +; + ret i32 ptrtoaddr (ptr addrspace(1) inttoptr (i32 -1 to ptr addrspace(1)) to i32) +} + +define i32 @ptrtoaddr_inttoptr_diff_size1() { +; CHECK-LABEL: define i32 @ptrtoaddr_inttoptr_diff_size1() { +; CHECK-NEXT: ret i32 ptrtoaddr (ptr addrspace(1) inttoptr (i64 -1 to ptr addrspace(1)) to i32) +; + ret i32 ptrtoaddr (ptr addrspace(1) inttoptr (i64 -1 to ptr addrspace(1)) to i32) +} + +define i32 @ptrtoaddr_inttoptr_diff_size2() { +; CHECK-LABEL: define i32 @ptrtoaddr_inttoptr_diff_size2() { +; CHECK-NEXT: ret i32 ptrtoaddr (ptr addrspace(1) inttoptr (i16 -1 to ptr addrspace(1)) to i32) +; + ret i32 ptrtoaddr (ptr addrspace(1) inttoptr (i16 -1 to ptr addrspace(1)) to i32) +} + +define i64 @ptrtoaddr_inttoptr_noas1() { +; CHECK-LABEL: define i64 @ptrtoaddr_inttoptr_noas1() { +; CHECK-NEXT: ret i64 1 +; + ret i64 ptrtoaddr (ptr getelementptr (i8, ptr null, i64 1) to i64) +} + +define i64 @ptr2addr2_inttoptr_noas2() { +; CHECK-LABEL: define i64 @ptr2addr2_inttoptr_noas2() { +; CHECK-NEXT: ret i64 123 +; + ret i64 ptrtoaddr (ptr inttoptr (i64 123 to ptr) to i64) +} + +define i64 @ptrtoaddr_inttoptr_noas_diff_size1() { +; CHECK-LABEL: define i64 @ptrtoaddr_inttoptr_noas_diff_size1() { +; CHECK-NEXT: ret i64 ptrtoaddr (ptr inttoptr (i32 -1 to ptr) to i64) +; + ret i64 ptrtoaddr (ptr inttoptr (i32 -1 to ptr) to i64) +} + +define i64 @ptrtoaddr_inttoptr_noas_diff_size2() { +; CHECK-LABEL: define i64 @ptrtoaddr_inttoptr_noas_diff_size2() { +; CHECK-NEXT: ret i64 ptrtoaddr (ptr inttoptr (i128 -1 to ptr) to i64) +; + ret i64 ptrtoaddr (ptr inttoptr (i128 -1 to ptr) to i64) +} diff --git a/llvm/test/Transforms/InstCombine/vector-reductions.ll b/llvm/test/Transforms/InstCombine/vector-reductions.ll index 10f4aca..f1e0dd9 100644 --- a/llvm/test/Transforms/InstCombine/vector-reductions.ll +++ b/llvm/test/Transforms/InstCombine/vector-reductions.ll @@ -308,3 +308,174 @@ define i32 @diff_of_sums_type_mismatch2(<8 x i32> %v0, <4 x i32> %v1) { %r = sub i32 %r0, %r1 ret i32 %r } + +define i32 @constant_multiplied_4xi32(i32 %0) { +; CHECK-LABEL: @constant_multiplied_4xi32( +; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[TMP0:%.*]], 2 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %2 = insertelement <4 x i32> poison, i32 %0, i64 0 + %3 = shufflevector <4 x i32> %2, <4 x i32> poison, <4 x i32> zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %3) + ret i32 %4 +} + +define i32 @constant_multiplied_3xi32(i32 %0) { +; CHECK-LABEL: @constant_multiplied_3xi32( +; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[TMP0:%.*]], 3 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %2 = insertelement <3 x i32> poison, i32 %0, i64 0 + %3 = shufflevector <3 x i32> %2, <3 x i32> poison, <3 x i32> zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.v3i32(<3 x i32> %3) + ret i32 %4 +} + +define i64 @constant_multiplied_4xi64(i64 %0) { +; CHECK-LABEL: @constant_multiplied_4xi64( +; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP0:%.*]], 2 +; CHECK-NEXT: ret i64 [[TMP2]] +; + %2 = insertelement <4 x i64> poison, i64 %0, i64 0 + %3 = shufflevector <4 x i64> %2, <4 x i64> poison, <4 x i32> zeroinitializer + %4 = tail call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %3) + ret i64 %4 +} + +define i32 @constant_multiplied_8xi32(i32 %0) { +; CHECK-LABEL: @constant_multiplied_8xi32( +; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[TMP0:%.*]], 3 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %2 = insertelement <4 x i32> poison, i32 %0, i64 0 + %3 = shufflevector <4 x i32> %2, <4 x i32> poison, <8 x i32> zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %3) + ret i32 %4 +} + + +define i32 @constant_multiplied_16xi32(i32 %0) { +; CHECK-LABEL: @constant_multiplied_16xi32( +; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[TMP0:%.*]], 4 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %2 = insertelement <4 x i32> poison, i32 %0, i64 0 + %3 = shufflevector <4 x i32> %2, <4 x i32> poison, <16 x i32> zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %3) + ret i32 %4 +} + + +define i32 @constant_multiplied_4xi32_at_idx1(i32 %0) { +; CHECK-LABEL: @constant_multiplied_4xi32_at_idx1( +; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[TMP0:%.*]], 2 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %2 = insertelement <4 x i32> poison, i32 %0, i64 1 + %3 = shufflevector <4 x i32> %2, <4 x i32> poison, + <4 x i32> <i32 1, i32 1, i32 1, i32 1> + %4 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %3) + ret i32 %4 +} + +define i32 @negative_constant_multiplied_4xi32(i32 %0) { +; CHECK-LABEL: @negative_constant_multiplied_4xi32( +; CHECK-NEXT: ret i32 poison +; + %2 = insertelement <4 x i32> poison, i32 %0, i64 1 + %3 = shufflevector <4 x i32> %2, <4 x i32> poison, <4 x i32> zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %3) + ret i32 %4 +} + +define i32 @constant_multiplied_6xi32(i32 %0) { +; CHECK-LABEL: @constant_multiplied_6xi32( +; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[TMP0:%.*]], 6 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %2 = insertelement <4 x i32> poison, i32 %0, i64 0 + %3 = shufflevector <4 x i32> %2, <4 x i32> poison, <6 x i32> zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.v6i32(<6 x i32> %3) + ret i32 %4 +} + +define i64 @constant_multiplied_6xi64(i64 %0) { +; CHECK-LABEL: @constant_multiplied_6xi64( +; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP0:%.*]], 6 +; CHECK-NEXT: ret i64 [[TMP2]] +; + %2 = insertelement <4 x i64> poison, i64 %0, i64 0 + %3 = shufflevector <4 x i64> %2, <4 x i64> poison, <6 x i32> zeroinitializer + %4 = tail call i64 @llvm.vector.reduce.add.v6i64(<6 x i64> %3) + ret i64 %4 +} + +define i1 @constant_multiplied_8xi1(i1 %0) { +; CHECK-LABEL: @constant_multiplied_8xi1( +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i1> poison, i1 [[TMP0:%.*]], i64 0 +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> poison, <8 x i32> zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i1> [[TMP3]] to i8 +; CHECK-NEXT: [[TMP5:%.*]] = call range(i8 0, 9) i8 @llvm.ctpop.i8(i8 [[TMP4]]) +; CHECK-NEXT: [[TMP6:%.*]] = trunc i8 [[TMP5]] to i1 +; CHECK-NEXT: ret i1 [[TMP6]] +; + %2 = insertelement <8 x i1> poison, i1 %0, i32 0 + %3 = shufflevector <8 x i1> %2, <8 x i1> poison, <8 x i32> zeroinitializer + %4 = tail call i1 @llvm.vector.reduce.add.v8i1(<8 x i1> %3) + ret i1 %4 +} + +define i2 @constant_multiplied_4xi2(i2 %0) { +; CHECK-LABEL: @constant_multiplied_4xi2( +; CHECK-NEXT: ret i2 0 +; + %2 = insertelement <4 x i2> poison, i2 %0, i32 0 + %3 = shufflevector <4 x i2> %2, <4 x i2> poison, <4 x i32> zeroinitializer + %4 = tail call i2 @llvm.vector.reduce.add.v4i2(<4 x i2> %3) + ret i2 %4 +} + +define i2 @constant_multiplied_5xi2(i2 %0) { +; CHECK-LABEL: @constant_multiplied_5xi2( +; CHECK-NEXT: ret i2 [[TMP0:%.*]] +; + %2 = insertelement <5 x i2> poison, i2 %0, i64 0 + %3 = shufflevector <5 x i2> %2, <5 x i2> poison, <5 x i32> zeroinitializer + %4 = tail call i2 @llvm.vector.reduce.add.v5i2(<5 x i2> %3) + ret i2 %4 +} + +define i2 @constant_multiplied_6xi2(i2 %0) { +; CHECK-LABEL: @constant_multiplied_6xi2( +; CHECK-NEXT: [[TMP2:%.*]] = shl i2 [[TMP0:%.*]], 1 +; CHECK-NEXT: ret i2 [[TMP2]] +; + %2 = insertelement <6 x i2> poison, i2 %0, i64 0 + %3 = shufflevector <6 x i2> %2, <6 x i2> poison, <6 x i32> zeroinitializer + %4 = tail call i2 @llvm.vector.reduce.add.v6i2(<6 x i2> %3) + ret i2 %4 +} + +define i2 @constant_multiplied_7xi2(i2 %0) { +; CHECK-LABEL: @constant_multiplied_7xi2( +; CHECK-NEXT: [[TMP2:%.*]] = sub i2 0, [[TMP0:%.*]] +; CHECK-NEXT: ret i2 [[TMP2]] +; + %2 = insertelement <7 x i2> poison, i2 %0, i64 0 + %3 = shufflevector <7 x i2> %2, <7 x i2> poison, <7 x i32> zeroinitializer + %4 = tail call i2 @llvm.vector.reduce.add.v7i2(<7 x i2> %3) + ret i2 %4 +} + +define i32 @negative_scalable_vector(i32 %0) { +; CHECK-LABEL: @negative_scalable_vector( +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP0:%.*]], i64 0 +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = tail call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP3]]) +; CHECK-NEXT: ret i32 [[TMP4]] +; + %2 = insertelement <vscale x 4 x i32> poison, i32 %0, i64 0 + %3 = shufflevector <vscale x 4 x i32> %2, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %3) + ret i32 %4 +} diff --git a/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll b/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll index b3338f47..75420d4 100644 --- a/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll +++ b/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "scalar.ph:" --version 6 ; RUN: opt -p loop-vectorize -force-vector-width=2 -S %s | FileCheck %s declare void @llvm.assume(i1) @@ -47,29 +47,8 @@ define void @deref_assumption_in_header_constant_trip_count(ptr noalias noundef ; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[GEP_A]], i64 4), "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -123,27 +102,8 @@ define void @align_deref_assumption_in_header_constant_trip_count_loop_invariant ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4) ] @@ -216,29 +176,8 @@ define void @deref_assumption_too_small_in_header_constant_trip_count(ptr noalia ; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[GEP_A]], i64 4), "dereferenceable"(ptr [[GEP_A]], i64 2) ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -312,29 +251,8 @@ define void @deref_assumption_in_header_constant_trip_count_align_1(ptr noalias ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 1 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -408,29 +326,8 @@ define void @deref_assumption_in_header_constant_trip_count_align_via_arg_attrib ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -504,29 +401,8 @@ define void @deref_assumption_in_header_constant_trip_count_align_not_known(ptr ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -596,29 +472,8 @@ define void @deref_assumption_in_then_constant_trip_count(ptr noalias noundef %a ; CHECK-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP28]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[GEP_A]], i64 4), "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -692,29 +547,8 @@ define void @deref_assumption_in_latch_constant_trip_count(ptr noalias noundef % ; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[GEP_A]], i64 4), "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -747,7 +581,7 @@ exit: define void @deref_assumption_in_header_variable_trip_count(ptr noalias noundef %a, ptr noalias %b, ptr noalias %c, i64 %N) nofree nosync{ ; CHECK-LABEL: define void @deref_assumption_in_header_variable_trip_count( ; CHECK-SAME: ptr noalias noundef [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[N:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 2 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: @@ -792,30 +626,8 @@ define void @deref_assumption_in_header_variable_trip_count(ptr noalias noundef ; CHECK-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[GEP_A]], i64 4), "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP11:![0-9]+]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void ; entry: br label %loop.header @@ -867,28 +679,8 @@ define void @deref_assumption_in_preheader_constant_trip_count_align_1(ptr noali ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 1 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %a, i64 4000) ] @@ -958,28 +750,8 @@ define void @deref_assumption_too_small_in_preheader_constant_trip_count_align_1 ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 1 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %a, i64 3999) ] @@ -1031,28 +803,8 @@ define void @align_and_deref_assumption_in_preheader_constant_trip_count_align_4 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4000) ] @@ -1105,28 +857,8 @@ define void @deref_assumption_in_preheader_constant_trip_count_align_4_known_via ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %a, i64 4000) ] @@ -1196,28 +928,8 @@ define void @deref_assumption_in_preheader_constant_trip_count_align_4_not_known ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %a, i64 4000) ] @@ -1287,28 +999,8 @@ define void @deref_assumption_too_small_in_preheader_constant_trip_count_align_4 ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %a, i64 3999) ] @@ -1376,27 +1068,8 @@ define void @may_free_align_deref_assumption_in_header_constant_trip_count_loop_ ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4) ] @@ -1465,27 +1138,8 @@ define void @may_free_local_ptr_align_deref_assumption_in_header_constant_trip_c ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: %a = call ptr @get_ptr() @@ -1519,25 +1173,306 @@ exit: declare ptr @get_ptr() declare void @may_free() -;. -; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} -; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} -; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} -; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} -; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} -; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} -; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]} -; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]} -; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]} -; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]} -; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META2]], [[META1]]} -; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]} -; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]], [[META2]]} -; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]], [[META2]]} -; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META1]], [[META2]]} -; CHECK: [[LOOP16]] = distinct !{[[LOOP16]], [[META1]], [[META2]]} -; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]], [[META2]]} -; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META1]], [[META2]]} -; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META1]], [[META2]]} -;. +define void @deref_assumption_in_header_constant_trip_count_nofree_via_context(ptr noalias noundef %a, ptr noalias %b, ptr noalias %c) nosync { +; CHECK-LABEL: define void @deref_assumption_in_header_constant_trip_count_nofree_via_context( +; CHECK-SAME: ptr noalias noundef [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR2:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 4), "dereferenceable"(ptr [[A]], i64 4000) ] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x i32>, ptr [[TMP0]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = icmp slt <2 x i32> [[WIDE_LOAD1]], zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; CHECK-NEXT: br i1 [[TMP13]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; CHECK: [[PRED_LOAD_IF]]: +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[TMP5]], i32 0 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; CHECK: [[PRED_LOAD_CONTINUE]]: +; CHECK-NEXT: [[TMP7:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP6]], %[[PRED_LOAD_IF]] ] +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; CHECK-NEXT: br i1 [[TMP8]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_IF1]]: +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP11]], i32 1 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_CONTINUE2]]: +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP12]], %[[PRED_LOAD_IF1]] ] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP2]], <2 x i32> [[WIDE_LOAD]], <2 x i32> [[WIDE_LOAD1]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] +; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP14]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4000) ] + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] + %gep.a = getelementptr i32, ptr %a, i64 %iv + %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv + %l.b = load i32, ptr %gep.b, align 4 + %c.1 = icmp sge i32 %l.b, 0 + br i1 %c.1, label %loop.latch, label %loop.then + +loop.then: + %l.a = load i32, ptr %gep.a, align 4 + br label %loop.latch + +loop.latch: + %merge = phi i32 [ %l.a, %loop.then ], [ %l.b, %loop.header ] + %gep.c = getelementptr inbounds i32, ptr %c, i64 %iv + store i32 %merge, ptr %gep.c, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, 1000 + br i1 %ec, label %exit, label %loop.header + +exit: + ret void +} + +define void @deref_assumption_in_header_constant_trip_count_may_free(ptr noalias noundef %a, ptr noalias %b, ptr noalias %c) nosync { +; CHECK-LABEL: define void @deref_assumption_in_header_constant_trip_count_may_free( +; CHECK-SAME: ptr noalias noundef [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR2]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 4), "dereferenceable"(ptr [[A]], i64 4000) ] +; CHECK-NEXT: call void @may_free() +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ] +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP3]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = icmp slt <2 x i32> [[WIDE_LOAD]], zeroinitializer +; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; CHECK-NEXT: br i1 [[TMP5]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; CHECK: [[PRED_LOAD_IF]]: +; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP17]] +; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i32> poison, i32 [[TMP7]], i32 0 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; CHECK: [[PRED_LOAD_CONTINUE]]: +; CHECK-NEXT: [[TMP9:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP8]], %[[PRED_LOAD_IF]] ] +; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; CHECK-NEXT: br i1 [[TMP10]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_IF1]]: +; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP18]] +; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4 +; CHECK-NEXT: [[TMP13:%.*]] = insertelement <2 x i32> [[TMP9]], i32 [[TMP12]], i32 1 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_CONTINUE2]]: +; CHECK-NEXT: [[TMP14:%.*]] = phi <2 x i32> [ [[TMP9]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP13]], %[[PRED_LOAD_IF1]] ] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP4]], <2 x i32> [[TMP14]], <2 x i32> [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] +; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP15]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4000) ] + call void @may_free() + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] + %gep.a = getelementptr i32, ptr %a, i64 %iv + %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv + %l.b = load i32, ptr %gep.b, align 4 + %c.1 = icmp sge i32 %l.b, 0 + br i1 %c.1, label %loop.latch, label %loop.then + +loop.then: + %l.a = load i32, ptr %gep.a, align 4 + br label %loop.latch + +loop.latch: + %merge = phi i32 [ %l.a, %loop.then ], [ %l.b, %loop.header ] + %gep.c = getelementptr inbounds i32, ptr %c, i64 %iv + store i32 %merge, ptr %gep.c, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, 1000 + br i1 %ec, label %exit, label %loop.header + +exit: + ret void +} + +define void @deref_assumption_in_header_constant_trip_count_nofree_via_context_but_missing_nosync(ptr noalias noundef %a, ptr noalias %b, ptr noalias %c) { +; CHECK-LABEL: define void @deref_assumption_in_header_constant_trip_count_nofree_via_context_but_missing_nosync( +; CHECK-SAME: ptr noalias noundef [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 4), "dereferenceable"(ptr [[A]], i64 4000) ] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP0]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <2 x i32> [[WIDE_LOAD]], zeroinitializer +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[TMP1]], i32 0 +; CHECK-NEXT: br i1 [[TMP2]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; CHECK: [[PRED_LOAD_IF]]: +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[TMP5]], i32 0 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; CHECK: [[PRED_LOAD_CONTINUE]]: +; CHECK-NEXT: [[TMP7:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP6]], %[[PRED_LOAD_IF]] ] +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP1]], i32 1 +; CHECK-NEXT: br i1 [[TMP8]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_IF1]]: +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP11]], i32 1 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_CONTINUE2]]: +; CHECK-NEXT: [[TMP13:%.*]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP12]], %[[PRED_LOAD_IF1]] ] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP1]], <2 x i32> [[TMP13]], <2 x i32> [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] +; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP14]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4000) ] + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] + %gep.a = getelementptr i32, ptr %a, i64 %iv + %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv + %l.b = load i32, ptr %gep.b, align 4 + %c.1 = icmp sge i32 %l.b, 0 + br i1 %c.1, label %loop.latch, label %loop.then + +loop.then: + %l.a = load i32, ptr %gep.a, align 4 + br label %loop.latch + +loop.latch: + %merge = phi i32 [ %l.a, %loop.then ], [ %l.b, %loop.header ] + %gep.c = getelementptr inbounds i32, ptr %c, i64 %iv + store i32 %merge, ptr %gep.c, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, 1000 + br i1 %ec, label %exit, label %loop.header + +exit: + ret void +} + +define void @deref_assumption_in_header_constant_trip_count_multiple_loop_predecessors(ptr noalias noundef %a, ptr noalias %b, ptr noalias %c, i1 %pre) nosync { +; CHECK-LABEL: define void @deref_assumption_in_header_constant_trip_count_multiple_loop_predecessors( +; CHECK-SAME: ptr noalias noundef [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i1 [[PRE:%.*]]) #[[ATTR2]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 4), "dereferenceable"(ptr [[A]], i64 4000) ] +; CHECK-NEXT: br i1 [[PRE]], label %[[THEN:.*]], label %[[ELSE:.*]] +; CHECK: [[THEN]]: +; CHECK-NEXT: store i32 0, ptr [[A]], align 4 +; CHECK-NEXT: br label %[[LOOP_HEADER_PREHEADER:.*]] +; CHECK: [[ELSE]]: +; CHECK-NEXT: store i32 0, ptr [[B]], align 4 +; CHECK-NEXT: br label %[[LOOP_HEADER_PREHEADER]] +; CHECK: [[LOOP_HEADER_PREHEADER]]: +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP0]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <2 x i32> [[WIDE_LOAD]], zeroinitializer +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[TMP1]], i32 0 +; CHECK-NEXT: br i1 [[TMP2]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; CHECK: [[PRED_LOAD_IF]]: +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[TMP5]], i32 0 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; CHECK: [[PRED_LOAD_CONTINUE]]: +; CHECK-NEXT: [[TMP7:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP6]], %[[PRED_LOAD_IF]] ] +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP1]], i32 1 +; CHECK-NEXT: br i1 [[TMP8]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_IF1]]: +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP11]], i32 1 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_CONTINUE2]]: +; CHECK-NEXT: [[TMP13:%.*]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP12]], %[[PRED_LOAD_IF1]] ] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP1]], <2 x i32> [[TMP13]], <2 x i32> [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] +; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP14]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4000) ] + br i1 %pre, label %then, label %else + +then: + store i32 0, ptr %a + br label %loop.header + +else: + store i32 0, ptr %b + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %then ], [ 0, %else ], [ %iv.next, %loop.latch ] + %gep.a = getelementptr i32, ptr %a, i64 %iv + %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv + %l.b = load i32, ptr %gep.b, align 4 + %c.1 = icmp sge i32 %l.b, 0 + br i1 %c.1, label %loop.latch, label %loop.then + +loop.then: + %l.a = load i32, ptr %gep.a, align 4 + br label %loop.latch + +loop.latch: + %merge = phi i32 [ %l.a, %loop.then ], [ %l.b, %loop.header ] + %gep.c = getelementptr inbounds i32, ptr %c, i64 %iv + store i32 %merge, ptr %gep.c, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, 1000 + br i1 %ec, label %exit, label %loop.header + +exit: + ret void +} + + diff --git a/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll b/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll index cb0c778..73d5e26 100644 --- a/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll +++ b/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll @@ -220,14 +220,18 @@ define void @expand_diff_scev_unknown(ptr %dst, i1 %invar.c, i32 %step) mustprog ; CHECK-NEXT: [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[STEP]], i32 1) ; CHECK-NEXT: [[TMP8:%.*]] = udiv i32 [[TMP7]], [[UMAX]] ; CHECK-NEXT: [[TMP9:%.*]] = add i32 [[TMP6]], [[TMP8]] -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP9]], 2 +; CHECK-NEXT: [[TMP12:%.*]] = add i32 [[INDVAR_LCSSA1]], 2 +; CHECK-NEXT: [[SMAX1:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP12]], i32 0) +; CHECK-NEXT: [[TMP14:%.*]] = add i32 [[TMP3]], -1 +; CHECK-NEXT: [[TMP15:%.*]] = add i32 [[SMAX1]], [[TMP14]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP15]], 2 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] ; CHECK: [[VECTOR_SCEVCHECK]]: ; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[STEP]], 1 ; CHECK-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP9]], 2 -; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP9]], [[N_MOD_VF]] +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP15]], 2 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP15]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[IV_1_LCSSA]], [[N_VEC]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: @@ -239,7 +243,7 @@ define void @expand_diff_scev_unknown(ptr %dst, i1 %invar.c, i32 %step) mustprog ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP9]], [[N_VEC]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP15]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] ; CHECK: [[SCALAR_PH]]: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ [[IV_1_LCSSA]], %[[LOOP_2_PREHEADER]] ], [ [[IV_1_LCSSA]], %[[VECTOR_SCEVCHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll b/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll index 0b86a22..027dcaf 100644 --- a/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll +++ b/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll @@ -22,13 +22,11 @@ define void @test_versioned_with_sext_use(i32 %offset, ptr %dst) { ; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[OFFSET]], 1 ; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = mul i64 200, [[OFFSET_EXT]] -; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[IV_1]], [[TMP0]] +; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[IV_1]], 200 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[INDEX]], [[OFFSET_EXT]] -; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IV_1]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IV_1]], [[INDEX]] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP3]] ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -94,13 +92,11 @@ define void @test_versioned_with_zext_use(i32 %offset, ptr %dst) { ; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[OFFSET]], 1 ; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = mul i64 200, [[OFFSET_EXT]] -; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[IV_1]], [[TMP0]] +; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[IV_1]], 200 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[INDEX]], [[OFFSET_EXT]] -; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IV_1]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IV_1]], [[INDEX]] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP3]] ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -233,13 +229,11 @@ define void @test_versioned_with_different_uses(i32 %offset, ptr noalias %dst.1, ; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[OFFSET]], 1 ; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = mul i64 200, [[OFFSET_EXT]] -; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[IV_1]], [[TMP0]] +; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[IV_1]], 200 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[INDEX]], [[OFFSET_EXT]] -; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IV_1]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IV_1]], [[INDEX]] ; CHECK-NEXT: [[OFFSET_IDX2:%.*]] = trunc i64 [[INDEX]] to i32 ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[OFFSET_IDX2]], 0 ; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[OFFSET_IDX2]], 1 @@ -414,26 +408,20 @@ define void @zext_of_i1_stride(i1 %g, ptr %dst) mustprogress { ; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i1 [[G]], true ; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], 4 -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]] -; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], [[G_64]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], [[G_64]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[INDEX]] ; CHECK-NEXT: store <4 x i16> splat (i16 1), ptr [[TMP4]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 +; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[VECTOR_SCEVCHECK]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[IV]] ; CHECK-NEXT: store i16 [[G_16]], ptr [[GEP]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], [[G_64]] diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll index b056f44..8d20a3b 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll @@ -14,16 +14,9 @@ define void @s172(i32 noundef %xa, i32 noundef %xb, ptr noundef %a, ptr noundef ; CHECK-NEXT: [[SUB:%.*]] = add i32 [[XA]], -1 ; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[SUB]] to i64 ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[XB]] to i64 -; CHECK-NEXT: [[TMP2:%.*]] = add nsw i64 [[TMP1]], [[TMP0]] -; CHECK-NEXT: [[SMAX7:%.*]] = tail call i64 @llvm.smax.i64(i64 [[TMP2]], i64 32000) -; CHECK-NEXT: [[TMP3:%.*]] = icmp slt i64 [[TMP2]], 32000 -; CHECK-NEXT: [[UMIN8:%.*]] = zext i1 [[TMP3]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = add nsw i64 [[TMP2]], [[UMIN8]] -; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[SMAX7]], [[TMP4]] -; CHECK-NEXT: [[UMAX9:%.*]] = tail call i64 @llvm.umax.i64(i64 [[TMP1]], i64 1) -; CHECK-NEXT: [[TMP6:%.*]] = udiv i64 [[TMP5]], [[UMAX9]] -; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP6]], [[UMIN8]] -; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.smax.i64(i64 [[TMP0]], i64 31999) +; CHECK-NEXT: [[SMAX10:%.*]] = add nuw nsw i64 [[TMP2]], 1 +; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[SMAX10]], [[TMP0]] ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP8]], 23 ; CHECK-NEXT: [[IDENT_CHECK_NOT:%.*]] = icmp eq i32 [[XB]], 1 ; CHECK-NEXT: [[OR_COND:%.*]] = and i1 [[MIN_ITERS_CHECK]], [[IDENT_CHECK_NOT]] @@ -50,13 +43,11 @@ define void @s172(i32 noundef %xa, i32 noundef %xb, ptr noundef %a, ptr noundef ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[FOR_BODY_PREHEADER13]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP8]], -8 -; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[N_VEC]], [[TMP1]] -; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[TMP18]], [[TMP0]] +; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[N_VEC]], [[TMP0]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP19:%.*]] = mul nuw i64 [[INDEX]], [[TMP1]] -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[TMP19]], [[TMP0]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[INDEX]], [[TMP0]] ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP20]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP20]], align 4, !alias.scope [[META0:![0-9]+]] @@ -75,7 +66,7 @@ define void @s172(i32 noundef %xa, i32 noundef %xb, ptr noundef %a, ptr noundef ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP8]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END]], label [[FOR_BODY_PREHEADER13]] -; CHECK: for.body.preheader13: +; CHECK: for.body.preheader14: ; CHECK-NEXT: [[INDVARS_IV_PH:%.*]] = phi i64 [ [[TMP0]], [[VECTOR_MEMCHECK]] ], [ [[TMP0]], [[FOR_BODY_PREHEADER]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: diff --git a/llvm/test/Transforms/SLPVectorizer/X86/xor-combined-opcode.ll b/llvm/test/Transforms/SLPVectorizer/X86/xor-combined-opcode.ll new file mode 100644 index 0000000..9cdcdf1 --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/X86/xor-combined-opcode.ll @@ -0,0 +1,21 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s -slp-threshold=-100 | FileCheck %s +define i1 @foo(i1 %v) { ; assume %v is 1 +; CHECK-LABEL: define i1 @foo( +; CHECK-SAME: i1 [[V:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i1> poison, i1 [[V]], i32 0 +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x i1> [[TMP0]], <2 x i1> poison, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i1> zeroinitializer, [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; CHECK-NEXT: [[SUB:%.*]] = sub i1 [[TMP3]], [[TMP4]] +; CHECK-NEXT: ret i1 [[SUB]] +; +entry: + %not = xor i1 %v, 1 ; 0 + %not1 = xor i1 %not, 1 ; 1 + %mul = mul i1 %v, 1 ; 1 + %sub = sub i1 %not1, %mul ; 0 + ret i1 %sub ; 0 +} diff --git a/llvm/test/Transforms/SimplifyCFG/switch-to-select-two-case.ll b/llvm/test/Transforms/SimplifyCFG/switch-to-select-two-case.ll index 39703e9..9d78b97 100644 --- a/llvm/test/Transforms/SimplifyCFG/switch-to-select-two-case.ll +++ b/llvm/test/Transforms/SimplifyCFG/switch-to-select-two-case.ll @@ -755,6 +755,25 @@ bb3: ret i1 %phi } +define i32 @negative_constfold_select() { +; CHECK-LABEL: @negative_constfold_select( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret i32 poison +; +entry: + switch i32 poison, label %default [ + i32 0, label %bb + i32 2, label %bb + ] + +bb: + br label %default + +default: + %ret = phi i32 [ poison, %entry ], [ poison, %bb ] + ret i32 %ret +} + !0 = !{!"function_entry_count", i64 1000} !1 = !{!"branch_weights", i32 3, i32 5, i32 7} !2 = !{!"branch_weights", i32 3, i32 5, i32 7, i32 11, i32 13} diff --git a/llvm/test/Transforms/VectorCombine/AArch64/combine-shuffle-ext.ll b/llvm/test/Transforms/VectorCombine/AArch64/combine-shuffle-ext.ll index 6341c89..1503a1b 100644 --- a/llvm/test/Transforms/VectorCombine/AArch64/combine-shuffle-ext.ll +++ b/llvm/test/Transforms/VectorCombine/AArch64/combine-shuffle-ext.ll @@ -14,9 +14,9 @@ define <4 x i32> @load_i32_zext_to_v4i32(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> <i32 poison, i32 0>, i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[E_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -36,9 +36,9 @@ define <4 x i32> @load_i32_zext_to_v4i32_both_nneg(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> <i32 poison, i32 0>, i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext nneg <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[E_1:%.*]] = zext nneg <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -58,9 +58,9 @@ define <4 x i32> @load_i32_zext_to_v4i32_inner_nneg(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> <i32 poison, i32 0>, i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext nneg <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK-NEXT: [[EXT_2:%.*]] = zext <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[E_1:%.*]] = zext nneg <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -80,9 +80,9 @@ define <4 x i32> @load_i32_zext_to_v4i32_outer_nneg(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> <i32 poison, i32 0>, i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[E_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -102,9 +102,9 @@ define <4 x i32> @load_i32_zext_to_v4i32_inner_nneg_outer_sext(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> <i32 poison, i32 0>, i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext nneg <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[E_1:%.*]] = zext nneg <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -125,9 +125,9 @@ define <4 x i32> @load_i32_zext_to_v4i32_clobber_after_load(ptr %di) { ; CHECK-NEXT: call void @use.i32(i32 0) ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> <i32 poison, i32 0>, i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[E_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -148,9 +148,9 @@ define <4 x i32> @load_i32_sext_zext_to_v4i32(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> <i32 poison, i32 0>, i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = sext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK-NEXT: [[EXT_2:%.*]] = zext <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[E_1:%.*]] = sext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -170,9 +170,9 @@ define <4 x i32> @load_i32_zext_to_v4i32_load_other_users(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> <i32 poison, i32 0>, i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[E_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: call void @use.i32(i32 [[L]]) ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; @@ -194,9 +194,9 @@ define <4 x i32> @load_i32_zext_to_v4i32_ins_other_users(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> <i32 poison, i32 0>, i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[E_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: call void @use.v2i32(<2 x i32> [[VEC_INS]]) ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; @@ -218,9 +218,9 @@ define <4 x i32> @load_i32_zext_to_v4i32_bc_other_users(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> <i32 poison, i32 0>, i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[E_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: call void @use.v8i8(<8 x i8> [[VEC_BC]]) ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; @@ -266,10 +266,10 @@ define <4 x i32> @load_i32_zext_to_v4i32_shuffle_other_users(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> <i32 poison, i32 0>, i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> -; CHECK-NEXT: call void @use.v8i16(<4 x i16> [[VEC_SHUFFLE]]) +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[E_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[E_1]] to <4 x i32> +; CHECK-NEXT: call void @use.v8i16(<4 x i16> [[E_1]]) ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -290,9 +290,9 @@ define <8 x i32> @load_i64_zext_to_v8i32(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[DI]], align 8 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i64> <i64 poison, i64 0>, i64 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i64> [[VEC_INS]] to <16 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = zext <16 x i8> [[VEC_BC]] to <16 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <16 x i16> [[EXT_1]], <16 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> -; CHECK-NEXT: [[OUTER_EXT:%.*]] = zext nneg <8 x i16> [[VEC_SHUFFLE]] to <8 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <16 x i8> [[VEC_BC]], <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> +; CHECK-NEXT: [[EXT_1:%.*]] = zext <8 x i8> [[VEC_SHUFFLE]] to <8 x i16> +; CHECK-NEXT: [[OUTER_EXT:%.*]] = zext nneg <8 x i16> [[EXT_1]] to <8 x i32> ; CHECK-NEXT: ret <8 x i32> [[OUTER_EXT]] ; entry: @@ -312,9 +312,9 @@ define <3 x i32> @load_i24_zext_to_v3i32(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i24, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i24> <i24 poison, i24 0>, i24 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i24> [[VEC_INS]] to <6 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = zext <6 x i8> [[VEC_BC]] to <6 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <6 x i16> [[EXT_1]], <6 x i16> poison, <3 x i32> <i32 0, i32 1, i32 2> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <3 x i16> [[VEC_SHUFFLE]] to <3 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <6 x i8> [[VEC_BC]], <6 x i8> poison, <3 x i32> <i32 0, i32 1, i32 2> +; CHECK-NEXT: [[EXT_1:%.*]] = zext <3 x i8> [[VEC_SHUFFLE]] to <3 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <3 x i16> [[EXT_1]] to <3 x i32> ; CHECK-NEXT: ret <3 x i32> [[EXT_2]] ; entry: @@ -334,9 +334,9 @@ define <4 x i32> @load_i32_insert_idx_1_sext(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> <i32 0, i32 poison>, i32 [[L]], i64 1 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[EXT_1]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[EXT_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[EXT_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -356,9 +356,9 @@ define <4 x i32> @mask_extracts_not_all_elements_1_sext(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> <i32 poison, i32 0>, i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[EXT_1]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 2> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 2> +; CHECK-NEXT: [[EXT_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[EXT_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -378,9 +378,9 @@ define <4 x i32> @mask_extracts_not_all_elements_2_sext(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> <i32 poison, i32 0>, i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[EXT_1]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 4> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 4> +; CHECK-NEXT: [[EXT_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[EXT_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -422,9 +422,9 @@ define <4 x i32> @load_i32_sext_to_v4i32(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> <i32 poison, i32 0>, i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = sext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[E_1:%.*]] = sext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -444,9 +444,9 @@ define <8 x i32> @load_i64_sext_to_v8i32(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[DI]], align 8 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i64> <i64 poison, i64 0>, i64 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i64> [[VEC_INS]] to <16 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = sext <16 x i8> [[VEC_BC]] to <16 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <16 x i16> [[EXT_1]], <16 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> -; CHECK-NEXT: [[OUTER_EXT:%.*]] = sext <8 x i16> [[VEC_SHUFFLE]] to <8 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <16 x i8> [[VEC_BC]], <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> +; CHECK-NEXT: [[EXT_1:%.*]] = sext <8 x i8> [[VEC_SHUFFLE]] to <8 x i16> +; CHECK-NEXT: [[OUTER_EXT:%.*]] = sext <8 x i16> [[EXT_1]] to <8 x i32> ; CHECK-NEXT: ret <8 x i32> [[OUTER_EXT]] ; entry: @@ -466,9 +466,9 @@ define <3 x i32> @load_i24_sext_to_v3i32(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i24, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i24> <i24 poison, i24 0>, i24 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i24> [[VEC_INS]] to <6 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = sext <6 x i8> [[VEC_BC]] to <6 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <6 x i16> [[EXT_1]], <6 x i16> poison, <3 x i32> <i32 0, i32 1, i32 2> -; CHECK-NEXT: [[EXT_2:%.*]] = sext <3 x i16> [[VEC_SHUFFLE]] to <3 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <6 x i8> [[VEC_BC]], <6 x i8> poison, <3 x i32> <i32 0, i32 1, i32 2> +; CHECK-NEXT: [[EXT_1:%.*]] = sext <3 x i8> [[VEC_SHUFFLE]] to <3 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = sext <3 x i16> [[EXT_1]] to <3 x i32> ; CHECK-NEXT: ret <3 x i32> [[EXT_2]] ; entry: @@ -488,9 +488,9 @@ define <4 x i32> @load_i32_insert_idx_1(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> <i32 0, i32 poison>, i32 [[L]], i64 1 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = sext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[EXT_1]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> -; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[EXT_1:%.*]] = sext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[EXT_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -510,9 +510,9 @@ define <4 x i32> @mask_extracts_not_all_elements_1(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> <i32 poison, i32 0>, i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = sext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[EXT_1]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 2> -; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 2> +; CHECK-NEXT: [[EXT_1:%.*]] = sext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[EXT_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -532,9 +532,9 @@ define <4 x i32> @mask_extracts_not_all_elements_2(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> <i32 poison, i32 0>, i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = sext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[EXT_1]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 4> -; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 4> +; CHECK-NEXT: [[EXT_1:%.*]] = sext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[EXT_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: diff --git a/llvm/test/Transforms/VectorCombine/AArch64/shuffletoidentity.ll b/llvm/test/Transforms/VectorCombine/AArch64/shuffletoidentity.ll index acbc836..ed29719 100644 --- a/llvm/test/Transforms/VectorCombine/AArch64/shuffletoidentity.ll +++ b/llvm/test/Transforms/VectorCombine/AArch64/shuffletoidentity.ll @@ -205,8 +205,8 @@ define <8 x i8> @abs_different(<8 x i8> %a) { define <4 x i32> @poison_intrinsic(<2 x i16> %l256) { ; CHECK-LABEL: @poison_intrinsic( ; CHECK-NEXT: [[L266:%.*]] = call <2 x i16> @llvm.abs.v2i16(<2 x i16> [[L256:%.*]], i1 false) -; CHECK-NEXT: [[L267:%.*]] = zext <2 x i16> [[L266]] to <2 x i32> -; CHECK-NEXT: [[L271:%.*]] = shufflevector <2 x i32> [[L267]], <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison> +; CHECK-NEXT: [[L267:%.*]] = shufflevector <2 x i16> [[L266]], <2 x i16> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison> +; CHECK-NEXT: [[L271:%.*]] = zext <4 x i16> [[L267]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[L271]] ; %l266 = call <2 x i16> @llvm.abs.v2i16(<2 x i16> %l256, i1 false) @@ -534,9 +534,9 @@ define <4 x i64> @single_zext(<4 x i32> %x) { define <4 x i64> @not_zext(<4 x i32> %x) { ; CHECK-LABEL: @not_zext( -; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i32> [[X:%.*]] to <4 x i64> -; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i64> [[ZEXT]], <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0> -; CHECK-NEXT: ret <4 x i64> [[REVSHUF]] +; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i32> [[X]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0> +; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i32> [[REVSHUF:%.*]] to <4 x i64> +; CHECK-NEXT: ret <4 x i64> [[ZEXT]] ; %zext = zext <4 x i32> %x to <4 x i64> %revshuf = shufflevector <4 x i64> %zext, <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0> @@ -922,10 +922,9 @@ define <4 x i8> @singleop(<4 x i8> %a, <4 x i8> %b) { define <4 x i64> @cast_mismatched_types(<4 x i32> %x) { ; CHECK-LABEL: @cast_mismatched_types( -; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <2 x i32> <i32 0, i32 2> -; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i32> [[SHUF]] to <2 x i64> -; CHECK-NEXT: [[EXTSHUF:%.*]] = shufflevector <2 x i64> [[ZEXT]], <2 x i64> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3> -; CHECK-NEXT: ret <4 x i64> [[EXTSHUF]] +; CHECK-SAME: <4 x i32> [[X:%.*]]) { +; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i32> [[X]] to <4 x i64> +; CHECK-NEXT: ret <4 x i64> [[ZEXT]] ; %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <2 x i32> <i32 0, i32 2> %zext = zext <2 x i32> %shuf to <2 x i64> diff --git a/llvm/test/Transforms/VectorCombine/AMDGPU/narrow-phi-of-shuffles.ll b/llvm/test/Transforms/VectorCombine/AMDGPU/narrow-phi-of-shuffles.ll index 8c50484..b293976 100644 --- a/llvm/test/Transforms/VectorCombine/AMDGPU/narrow-phi-of-shuffles.ll +++ b/llvm/test/Transforms/VectorCombine/AMDGPU/narrow-phi-of-shuffles.ll @@ -392,7 +392,7 @@ define <4 x i32> @shuffle_v4i32(<3 x i32> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x i32> [[ARG0]], <3 x i32> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x i32> [[ARG0]], <3 x i32> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -427,7 +427,7 @@ define <8 x i32> @shuffle_v8i32(<3 x i32> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x i32> [[ARG0]], <3 x i32> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x i32> [[ARG0]], <3 x i32> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -462,7 +462,7 @@ define <16 x i32> @shuffle_v16i32(<3 x i32> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x i32> [[ARG0]], <3 x i32> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x i32> [[ARG0]], <3 x i32> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -497,7 +497,7 @@ define <32 x i32> @shuffle_v32i32(<3 x i32> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x i32> [[ARG0]], <3 x i32> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x i32> [[ARG0]], <3 x i32> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -1092,7 +1092,7 @@ define <4 x float> @shuffle_v4f32(<3 x float> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -1127,7 +1127,7 @@ define <6 x float> @shuffle_v6f32(<3 x float> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -1162,7 +1162,7 @@ define <8 x float> @shuffle_v8f32(<3 x float> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -1197,7 +1197,7 @@ define <16 x float> @shuffle_v16f32(<3 x float> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -1232,7 +1232,7 @@ define <32 x float> @shuffle_v32f32(<3 x float> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: diff --git a/llvm/test/Transforms/VectorCombine/X86/narrow-phi-of-shuffles.ll b/llvm/test/Transforms/VectorCombine/X86/narrow-phi-of-shuffles.ll index 59422e9..594017e 100644 --- a/llvm/test/Transforms/VectorCombine/X86/narrow-phi-of-shuffles.ll +++ b/llvm/test/Transforms/VectorCombine/X86/narrow-phi-of-shuffles.ll @@ -605,7 +605,7 @@ define <4 x bfloat> @shuffle_v4bf16(<3 x bfloat> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -640,7 +640,7 @@ define <6 x bfloat> @shuffle_v6bf16(<3 x bfloat> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -675,7 +675,7 @@ define <8 x bfloat> @shuffle_v8bf16(<3 x bfloat> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -710,7 +710,7 @@ define <16 x bfloat> @shuffle_v16bf16(<3 x bfloat> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -745,7 +745,7 @@ define <32 x bfloat> @shuffle_v32bf16(<3 x bfloat> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -850,7 +850,7 @@ define <4 x half> @shuffle_v4f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V1-NEXT: [[ENTRY:.*:]] ; CHECK-V1-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V1: [[THEN]]: -; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-V1-NEXT: tail call void @func0() ; CHECK-V1-NEXT: br label %[[FINALLY:.*]] ; CHECK-V1: [[ELSE]]: @@ -866,7 +866,7 @@ define <4 x half> @shuffle_v4f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V2-NEXT: [[ENTRY:.*:]] ; CHECK-V2-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V2: [[THEN]]: -; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-V2-NEXT: tail call void @func0() ; CHECK-V2-NEXT: br label %[[FINALLY:.*]] ; CHECK-V2: [[ELSE]]: @@ -933,7 +933,7 @@ define <6 x half> @shuffle_v6f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V1-NEXT: [[ENTRY:.*:]] ; CHECK-V1-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V1: [[THEN]]: -; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-V1-NEXT: tail call void @func0() ; CHECK-V1-NEXT: br label %[[FINALLY:.*]] ; CHECK-V1: [[ELSE]]: @@ -949,7 +949,7 @@ define <6 x half> @shuffle_v6f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V2-NEXT: [[ENTRY:.*:]] ; CHECK-V2-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V2: [[THEN]]: -; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-V2-NEXT: tail call void @func0() ; CHECK-V2-NEXT: br label %[[FINALLY:.*]] ; CHECK-V2: [[ELSE]]: @@ -1016,7 +1016,7 @@ define <8 x half> @shuffle_v8f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V1-NEXT: [[ENTRY:.*:]] ; CHECK-V1-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V1: [[THEN]]: -; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-V1-NEXT: tail call void @func0() ; CHECK-V1-NEXT: br label %[[FINALLY:.*]] ; CHECK-V1: [[ELSE]]: @@ -1032,7 +1032,7 @@ define <8 x half> @shuffle_v8f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V2-NEXT: [[ENTRY:.*:]] ; CHECK-V2-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V2: [[THEN]]: -; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-V2-NEXT: tail call void @func0() ; CHECK-V2-NEXT: br label %[[FINALLY:.*]] ; CHECK-V2: [[ELSE]]: @@ -1099,7 +1099,7 @@ define <16 x half> @shuffle_v16f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V1-NEXT: [[ENTRY:.*:]] ; CHECK-V1-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V1: [[THEN]]: -; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-V1-NEXT: tail call void @func0() ; CHECK-V1-NEXT: br label %[[FINALLY:.*]] ; CHECK-V1: [[ELSE]]: @@ -1115,7 +1115,7 @@ define <16 x half> @shuffle_v16f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V2-NEXT: [[ENTRY:.*:]] ; CHECK-V2-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V2: [[THEN]]: -; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-V2-NEXT: tail call void @func0() ; CHECK-V2-NEXT: br label %[[FINALLY:.*]] ; CHECK-V2: [[ELSE]]: @@ -1182,7 +1182,7 @@ define <32 x half> @shuffle_v32f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V1-NEXT: [[ENTRY:.*:]] ; CHECK-V1-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V1: [[THEN]]: -; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-V1-NEXT: tail call void @func0() ; CHECK-V1-NEXT: br label %[[FINALLY:.*]] ; CHECK-V1: [[ELSE]]: @@ -1198,7 +1198,7 @@ define <32 x half> @shuffle_v32f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V2-NEXT: [[ENTRY:.*:]] ; CHECK-V2-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V2: [[THEN]]: -; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 1, i32 2, i32 0> +; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> <i32 2, i32 0, i32 1> ; CHECK-V2-NEXT: tail call void @func0() ; CHECK-V2-NEXT: br label %[[FINALLY:.*]] ; CHECK-V2: [[ELSE]]: diff --git a/llvm/test/Transforms/VectorCombine/X86/shuffle-of-casts.ll b/llvm/test/Transforms/VectorCombine/X86/shuffle-of-casts.ll index fba4b60..82a7399 100644 --- a/llvm/test/Transforms/VectorCombine/X86/shuffle-of-casts.ll +++ b/llvm/test/Transforms/VectorCombine/X86/shuffle-of-casts.ll @@ -342,3 +342,59 @@ define <16 x i32> @concat_sext_zext_v8i16_v16i32(<8 x i16> %a0, <8 x i16> %a1) { %r = shufflevector <8 x i32> %x0, <8 x i32> %x1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> ret <16 x i32> %r } + +; Unary shuffles + +define <4 x i16> @unary_shuffle_zext_v8i8_v4i16(<8 x i8> %a0) { +; CHECK-LABEL: define <4 x i16> @unary_shuffle_zext_v8i8_v4i16( +; CHECK-SAME: <8 x i8> [[A0:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[A0]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[X1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: ret <4 x i16> [[X1]] +; + %x1 = zext <8 x i8> %a0 to <8 x i16> + %vec.shuffle = shufflevector <8 x i16> %x1, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + ret <4 x i16> %vec.shuffle +} + +define <4 x i16> @unary_shuffle_sext_v8i8_v4i16(<8 x i8> %a0) { +; CHECK-LABEL: define <4 x i16> @unary_shuffle_sext_v8i8_v4i16( +; CHECK-SAME: <8 x i8> [[A0:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[A0]], <8 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[X1:%.*]] = sext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: ret <4 x i16> [[X1]] +; + %x1 = sext <8 x i8> %a0 to <8 x i16> + %vec.shuffle = shufflevector <8 x i16> %x1, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + ret <4 x i16> %vec.shuffle +} + +; negative - avoid loop with foldBitcastOfShuffle + +define <2 x i32> @unary_shuffle_bitcast_v8i8_v2i32(<8 x i8> %a0) { +; CHECK-LABEL: define <2 x i32> @unary_shuffle_bitcast_v8i8_v2i32( +; CHECK-SAME: <8 x i8> [[A0:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[X1:%.*]] = bitcast <8 x i8> [[A0]] to <2 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <2 x i32> [[X1]], <2 x i32> poison, <2 x i32> <i32 0, i32 1> +; CHECK-NEXT: ret <2 x i32> [[VEC_SHUFFLE]] +; + %x1 = bitcast <8 x i8> %a0 to <2 x i32> + %vec.shuffle = shufflevector <2 x i32> %x1, <2 x i32> poison, <2 x i32> <i32 0, i32 1> + ret <2 x i32> %vec.shuffle +} + +; negative - multiuse + +define <4 x i16> @unary_shuffle_sext_v8i8_v4i16_multiuse(<8 x i8> %a0, ptr %a1) { +; CHECK-LABEL: define <4 x i16> @unary_shuffle_sext_v8i8_v4i16_multiuse( +; CHECK-SAME: <8 x i8> [[A0:%.*]], ptr [[A1:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[X1:%.*]] = sext <8 x i8> [[A0]] to <8 x i16> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[X1]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: store <8 x i16> [[X1]], ptr [[A1]], align 16 +; CHECK-NEXT: ret <4 x i16> [[VEC_SHUFFLE]] +; + %x1 = sext <8 x i8> %a0 to <8 x i16> + %vec.shuffle = shufflevector <8 x i16> %x1, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + store <8 x i16> %x1, ptr %a1, align 16 + ret <4 x i16> %vec.shuffle +} diff --git a/llvm/test/Verifier/preallocated-invalid.ll b/llvm/test/Verifier/preallocated-invalid.ll index 38ed106..2c5aff2 100644 --- a/llvm/test/Verifier/preallocated-invalid.ll +++ b/llvm/test/Verifier/preallocated-invalid.ll @@ -65,13 +65,21 @@ define void @preallocated_one_call() { ret void } -; CHECK: must be a constant +; CHECK: immarg operand has non-immediate parameter define void @preallocated_setup_constant() { %ac = call i32 @blackbox() %cs = call token @llvm.call.preallocated.setup(i32 %ac) ret void } +; CHECK: llvm.call.preallocated.alloc arg index must be a constant +define void @preallocated_arg_constant() { + %ac = call i32 @blackbox() + %cs = call token @llvm.call.preallocated.setup(i32 3) + call token @llvm.call.preallocated.arg(token %cs, i32 %ac) + ret void +} + ; CHECK: must be between 0 and corresponding define void @preallocated_setup_arg_index_in_bounds() { %cs = call token @llvm.call.preallocated.setup(i32 2) diff --git a/llvm/test/lit.cfg.py b/llvm/test/lit.cfg.py index dd3f947..781240a 100644 --- a/llvm/test/lit.cfg.py +++ b/llvm/test/lit.cfg.py @@ -48,15 +48,17 @@ config.suffixes = [".ll", ".c", ".test", ".txt", ".s", ".mir", ".yaml", ".spv"] # directories. config.excludes = ["Inputs", "CMakeLists.txt", "README.txt", "LICENSE.txt"] -# Exclude llvm-reduce tests for profcheck because we substitute the FileCheck -# binary with a no-op command for profcheck, but llvm-reduce tests have RUN -# commands of the form llvm-reduce --test FileCheck, which explode if we -# substitute FileCheck because llvm-reduce expects FileCheck in these tests. -# It's not really possible to exclude these tests from the command substitution, -# so we just exclude llvm-reduce tests from this config altogether. This should -# be fine though as profcheck config tests are mostly concerned with opt. if config.enable_profcheck: - config.excludes = config.excludes + ["llvm-reduce"] + # Exclude llvm-reduce tests for profcheck because we substitute the FileCheck + # binary with a no-op command for profcheck, but llvm-reduce tests have RUN + # commands of the form llvm-reduce --test FileCheck, which explode if we + # substitute FileCheck because llvm-reduce expects FileCheck in these tests. + # It's not really possible to exclude these tests from the command substitution, + # so we just exclude llvm-reduce tests from this config altogether. This should + # be fine though as profcheck config tests are mostly concerned with opt. + config.excludes.append("llvm-reduce") + # (Issue #161235) Temporarily exclude LoopVectorize. + config.excludes.append("LoopVectorize") # test_source_root: The root path where tests are located. config.test_source_root = os.path.dirname(__file__) diff --git a/llvm/test/tools/llvm-dwarfdump/verify_stmt_seq.yaml b/llvm/test/tools/llvm-dwarfdump/verify_stmt_seq.yaml index 5312c25..17e91f1 100644 --- a/llvm/test/tools/llvm-dwarfdump/verify_stmt_seq.yaml +++ b/llvm/test/tools/llvm-dwarfdump/verify_stmt_seq.yaml @@ -2,9 +2,9 @@ # Then manually tempered with some of the value of the attribute # I hope there are easier ways to construct tests like this. -# RUN: yaml2obj %s -o verify_stmt_seq.o -# RUN: not llvm-dwarfdump -verify -debug-info verify_stmt_seq.o | FileCheck %s --check-prefix=CHECK_INVALID --implicit-check-not=error: -# RUN: llvm-dwarfdump -debug-line -verbose -debug-info verify_stmt_seq.o | FileCheck %s --check-prefix=CHECK_DEBUG_LINE +# RUN: yaml2obj %s -o %t.o +# RUN: not llvm-dwarfdump -verify -debug-info %t.o | FileCheck %s --check-prefix=CHECK_INVALID --implicit-check-not=error: +# RUN: llvm-dwarfdump -debug-line -verbose -debug-info %t.o | FileCheck %s --check-prefix=CHECK_DEBUG_LINE # CHECK_INVALID: error: DW_AT_LLVM_stmt_sequence offset 0x00000000 is not within the line table bounds [0x00000034, 0x000000fd) # CHECK_INVALID: DW_AT_LLVM_stmt_sequence [DW_FORM_sec_offset] (0x00000000) diff --git a/llvm/test/tools/llvm-lib/sym64-threshold.test b/llvm/test/tools/llvm-lib/sym64-threshold.test new file mode 100644 index 0000000..76f0a03 --- /dev/null +++ b/llvm/test/tools/llvm-lib/sym64-threshold.test @@ -0,0 +1,71 @@ +# RUN: yaml2obj --docnum=1 %s -o %t01234567890234567789.obj +# RUN: yaml2obj --docnum=2 %s -o %t-ec.obj +# RUN: env SYM64_THRESHOLD=100 llvm-lib -machine:amd64 -out:%t.lib %t01234567890234567789.obj +# RUN: llvm-nm --print-armap %t.lib | FileCheck --check-prefix=ARMAP %s +# ARMAP: Archive map +# ARMAP-NEXT: sym + +# RUN: env SYM64_THRESHOLD=100 not llvm-lib -machine:arm64x -out:%t-ec.lib %t-ec.obj %t01234567890234567789.obj 2>&1 | FileCheck %s +# CHECK: Archive is too large: ARM64X does not support archives larger than 4GB + +--- !COFF +header: + Machine: IMAGE_FILE_MACHINE_AMD64 + Characteristics: [ ] +sections: + - Name: .text + Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ] + Alignment: 4 + SectionData: '' +symbols: + - Name: .text + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + SectionDefinition: + Length: 0 + NumberOfRelocations: 0 + NumberOfLinenumbers: 0 + CheckSum: 0 + Number: 1 + - !Symbol + Name: sym + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL # (0) + ComplexType: IMAGE_SYM_DTYPE_FUNCTION # (2) + StorageClass: IMAGE_SYM_CLASS_EXTERNAL # (2) +... + +--- !COFF +header: + Machine: IMAGE_FILE_MACHINE_ARM64 + Characteristics: [ ] +sections: + - Name: .text + Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ] + Alignment: 4 + SectionData: '' +symbols: + - Name: .text + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + SectionDefinition: + Length: 0 + NumberOfRelocations: 0 + NumberOfLinenumbers: 0 + CheckSum: 0 + Number: 1 + - !Symbol + Name: sym + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL # (0) + ComplexType: IMAGE_SYM_DTYPE_FUNCTION # (2) + StorageClass: IMAGE_SYM_CLASS_EXTERNAL # (2) +... diff --git a/llvm/test/tools/llvm-readobj/ELF/AMDGPU/offloading-fail.test b/llvm/test/tools/llvm-readobj/ELF/AMDGPU/offloading-fail.test new file mode 100644 index 0000000..391b7ee --- /dev/null +++ b/llvm/test/tools/llvm-readobj/ELF/AMDGPU/offloading-fail.test @@ -0,0 +1,26 @@ +## Test that --offloading with a fatbin works correctly. +# REQUIRES: amdgpu-registered-target + +# RUN: yaml2obj %s -o %t.elf +# RUN: llvm-readobj --offloading %t.elf 2>&1 | \ +# RUN: FileCheck %s --check-prefix=WARN -DFILE_NAME=%t.elf + +# WARN: warning: '{{.*}}': Stream Error: The stream is too short to perform the requested operation. + +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_EXEC +Sections: + - Name: .hip_fatbin + Type: SHT_PROGBITS + AddressAlign: 0x1000 + Content: 5F5F434C414E475F4F46464C4F41445F42554E444C455F5F0200000000000000001000000000000000000000000000001B0000000000000075782D2D0010000000000000D00F0000000000001F0000000000000068697076342D616D6467636E2D616D642D616D646873612D2D676678393038000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007F454C460201014003000000000000000300E0000100000000000000000000004000000000000000100C0000000000003005000040003800090040000F000D000600000004000000400000000000000040000000000000004000000000000000F801000000000000F80100000000000008000000000000000100000004000000000000000000000000000000000000000000000000000000C008000000000000C008000000000000001000000000000001000000050000000009000000000000001900000000000000190000000000006C000000000000006C00000000000000001000000000000001000000060000007009000000000000702900000000000070290000000000007000000000000000900600000000000000100000000000000100000006000000E009000000000000E039000000000000E039000000000000000000000000000001000000000000000010000000000000020000000600000070090000000000007029000000000000702900000000000070000000000000007000000000000000080000000000000052E574640400000070090000000000007029000000000000702900000000000070000000000000009006000000000000010000000000000051E57464060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000004000000380200000000000038020000000000003802000000000000340500000000000034050000000000000400000000000000070000001D05000020000000414D44475055000083AE616D646873612E6B65726E656C7391DE0012AB2E616770725F636F756E7400A52E61726773DC001085AE2E616464726573735F7370616365A6676C6F62616CA52E6E616D65AA415F642E636F65726365A72E6F666673657400A52E73697A6508AB2E76616C75655F6B696E64AD676C6F62616C5F62756666657285AE2E616464726573735F7370616365A6676C6F62616CA52E6E616D65AA425F642E636F65726365A72E6F666673657408A52E73697A6508AB2E76616C75655F6B696E64AD676C6F62616C5F62756666657284A52E6E616D65A14EA72E6F666673657410A52E73697A6508AB2E76616C75655F6B696E64A862795F76616C756583A72E6F666673657418A52E73697A6504AB2E76616C75655F6B696E64B468696464656E5F626C6F636B5F636F756E745F7883A72E6F66667365741CA52E73697A6504AB2E76616C75655F6B696E64B468696464656E5F626C6F636B5F636F756E745F7983A72E6F666673657420A52E73697A6504AB2E76616C75655F6B696E64B468696464656E5F626C6F636B5F636F756E745F7A83A72E6F666673657424A52E73697A6502AB2E76616C75655F6B696E64B368696464656E5F67726F75705F73697A655F7883A72E6F666673657426A52E73697A6502AB2E76616C75655F6B696E64B368696464656E5F67726F75705F73697A655F7983A72E6F666673657428A52E73697A6502AB2E76616C75655F6B696E64B368696464656E5F67726F75705F73697A655F7A83A72E6F66667365742AA52E73697A6502AB2E76616C75655F6B696E64B268696464656E5F72656D61696E6465725F7883A72E6F66667365742CA52E73697A6502AB2E76616C75655F6B696E64B268696464656E5F72656D61696E6465725F7983A72E6F66667365742EA52E73697A6502AB2E76616C75655F6B696E64B268696464656E5F72656D61696E6465725F7A83A72E6F666673657440A52E73697A6508AB2E76616C75655F6B696E64B668696464656E5F676C6F62616C5F6F66667365745F7883A72E6F666673657448A52E73697A6508AB2E76616C75655F6B696E64B668696464656E5F676C6F62616C5F6F66667365745F7983A72E6F666673657450A52E73697A6508AB2E76616C75655F6B696E64B668696464656E5F676C6F62616C5F6F66667365745F7A83A72E6F666673657458A52E73697A6502AB2E76616C75655F6B696E64B068696464656E5F677269645F64696D73B92E67726F75705F7365676D656E745F66697865645F73697A6500B62E6B65726E6172675F7365676D656E745F616C69676E08B52E6B65726E6172675F7365676D656E745F73697A65CD0118A92E6C616E6775616765A84F70656E434C2043B12E6C616E67756167655F76657273696F6E920200B82E6D61785F666C61745F776F726B67726F75705F73697A65CD0400A52E6E616D65B25F5A3973696D706C65416464506A504B6A6DBB2E707269766174655F7365676D656E745F66697865645F73697A6500AB2E736770725F636F756E740CB12E736770725F7370696C6C5F636F756E7400A72E73796D626F6CB55F5A3973696D706C65416464506A504B6A6D2E6B64B82E756E69666F726D5F776F726B5F67726F75705F73697A6501B32E757365735F64796E616D69635F737461636BC2AB2E766770725F636F756E7404B12E766770725F7370696C6C5F636F756E7400AF2E7761766566726F6E745F73697A6540AD616D646873612E746172676574B9616D6467636E2D616D642D616D646873612D2D676678393038AE616D646873612E76657273696F6E92010200000000000000000000000000000000000000000000000000000000000000010000001203070000190000000000006C000000000000001400000011030600800800000000000040000000000000002A00000011000A00E03900000000000001000000000000000100000001000000010000001A000000000008400000D20001000000360A4A7A5238A4D3F113F4DD04000000040000000200000001000000000000000300000000000000000000000000000000000000005F5A3973696D706C65416464506A504B6A6D005F5A3973696D706C65416464506A504B6A6D2E6B64005F5F6869705F637569645F623730363264386333326134613933330000000000000000000000000000000000000000000000000000000000000000000000180100000000000080100000000000000000000000000000000000000000000000000000000000004000AF008C000000090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000C20102C02400000002000AC0000000008002027E7FC08CBF07FF0486FFFF0000060406920600006800008FD2820002000302067E0200043203030638008050DC02007F020102067E0000003203030238008050DC00007F03700F8CBF03050468008070DC00027F00000081BF00000000060000000000000070070000000000000B000000000000001800000000000000050000000000000020080000000000000A000000000000004600000000000000F5FEFF6F00000000D0070000000000000400000000000000F807000000000000000000000000000000000000000000004C696E6B65723A20414D44204C4C442031392E302E3000414D4420636C616E672076657273696F6E2031392E302E306769742028202032343231322063393630313665636534313337356462646438663037356266333762643666633333323230376233290000414D4420636C616E672076657273696F6E2031382E302E3067697420287373683A2F2F6765727269746769742F6C696768746E696E672F65632F6C6C766D2D70726F6A65637420616D642D6D61696E6C696E652D6F70656E20323431373620663935303039613166393032313232343865313036333964653837653635636163616338643961372900000000000000000000000000000000000000000000000000460000000002080070290000000000000000000000000000010000001203070000190000000000006C000000000000001400000011030600800800000000000040000000000000002A00000011000A00E0390000000000000100000000000000002E6E6F7465002E64796E73796D002E676E752E68617368002E68617368002E64796E737472002E726F64617461002E74657874002E64796E616D6963002E72656C726F5F70616464696E67002E627373002E636F6D6D656E74002E73796D746162002E7368737472746162002E73747274616200005F5A3973696D706C65416464506A504B6A6D005F5A3973696D706C65416464506A504B6A6D2E6B64005F5F6869705F637569645F62373036326438633332613461393333005F44594E414D494300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000070000000200000000000000380200000000000038020000000000003405000000000000000000000000000004000000000000000000000000000000070000000B00000002000000000000007007000000000000700700000000000060000000000000000500000001000000080000000000000018000000000000000F000000F6FFFF6F0200000000000000D007000000000000D007000000000000280000000000000002000000000000000800000000000000000000000000000019000000050000000200000000000000F807000000000000F80700000000000028000000000000000200000000000000040000000000000004000000000000001F000000030000000200000000000000200800000000000020080000000000004600000000000000000000000000000001000000000000000000000000000000270000000100000002000000000000008008000000000000800800000000000040000000000000000000000000000000400000000000000000000000000000002F000000010000000600000000000000001900000000000000090000000000006C00000000000000000000000000000000010000000000000000000000000000350000000600000003000000000000007029000000000000700900000000000070000000000000000500000000000000080000000000000010000000000000003E000000080000000300000000000000E029000000000000E00900000000000020060000000000000000000000000000010000000000000000000000000000004D000000080000000300000000000000E039000000000000E0090000000000000100000000000000000000000000000001000000000000000000000000000000520000000100000030000000000000000000000000000000E009000000000000F0000000000000000000000000000000010000000000000001000000000000005B0000000200000000000000000000000000000000000000D00A00000000000078000000000000000E0000000200000008000000000000001800000000000000630000000300000000000000000000000000000000000000480B00000000000075000000000000000000000000000000010000000000000000000000000000006D0000000300000000000000000000000000000000000000BD0B0000000000004F00000000000000000000000000000001000000000000000000000000000000 + - Name: .hipFatBinSegment + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC ] + Address: 0x202FD0 + AddressAlign: 0x8 + Content: '465049480100000000102000000000000000000000000000' +... diff --git a/llvm/test/tools/llvm-readobj/ELF/AMDGPU/offloading.test b/llvm/test/tools/llvm-readobj/ELF/AMDGPU/offloading.test new file mode 100644 index 0000000..21ee60d --- /dev/null +++ b/llvm/test/tools/llvm-readobj/ELF/AMDGPU/offloading.test @@ -0,0 +1,27 @@ +## Test that --offloading with a fatbin works correctly. +# REQUIRES: amdgpu-registered-target + +# RUN: yaml2obj %s -o %t.elf +# RUN: llvm-readobj --offloading %t.elf | \ +# RUN: FileCheck %s -DFILE_NAME=%t.elf + +# CHECK: host-x86_64-unknown-linux-- file://[[FILE_NAME]]#offset=8192&size=0 +# CHECK-NEXT: hipv4-amdgcn-amd-amdhsa--gfx908 file://[[FILE_NAME]]#offset=8192&size=4048 + +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_EXEC +Sections: + - Name: .hip_fatbin + Type: SHT_PROGBITS + AddressAlign: 0x1000 + Content: 5F5F434C414E475F4F46464C4F41445F42554E444C455F5F0200000000000000001000000000000000000000000000001B00000000000000686F73742D7838365F36342D756E6B6E6F776E2D6C696E75782D2D0010000000000000D00F0000000000001F0000000000000068697076342D616D6467636E2D616D642D616D646873612D2D676678393038000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007F454C460201014003000000000000000300E0000100000000000000000000004000000000000000100C0000000000003005000040003800090040000F000D000600000004000000400000000000000040000000000000004000000000000000F801000000000000F80100000000000008000000000000000100000004000000000000000000000000000000000000000000000000000000C008000000000000C008000000000000001000000000000001000000050000000009000000000000001900000000000000190000000000006C000000000000006C00000000000000001000000000000001000000060000007009000000000000702900000000000070290000000000007000000000000000900600000000000000100000000000000100000006000000E009000000000000E039000000000000E039000000000000000000000000000001000000000000000010000000000000020000000600000070090000000000007029000000000000702900000000000070000000000000007000000000000000080000000000000052E574640400000070090000000000007029000000000000702900000000000070000000000000009006000000000000010000000000000051E57464060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000004000000380200000000000038020000000000003802000000000000340500000000000034050000000000000400000000000000070000001D05000020000000414D44475055000083AE616D646873612E6B65726E656C7391DE0012AB2E616770725F636F756E7400A52E61726773DC001085AE2E616464726573735F7370616365A6676C6F62616CA52E6E616D65AA415F642E636F65726365A72E6F666673657400A52E73697A6508AB2E76616C75655F6B696E64AD676C6F62616C5F62756666657285AE2E616464726573735F7370616365A6676C6F62616CA52E6E616D65AA425F642E636F65726365A72E6F666673657408A52E73697A6508AB2E76616C75655F6B696E64AD676C6F62616C5F62756666657284A52E6E616D65A14EA72E6F666673657410A52E73697A6508AB2E76616C75655F6B696E64A862795F76616C756583A72E6F666673657418A52E73697A6504AB2E76616C75655F6B696E64B468696464656E5F626C6F636B5F636F756E745F7883A72E6F66667365741CA52E73697A6504AB2E76616C75655F6B696E64B468696464656E5F626C6F636B5F636F756E745F7983A72E6F666673657420A52E73697A6504AB2E76616C75655F6B696E64B468696464656E5F626C6F636B5F636F756E745F7A83A72E6F666673657424A52E73697A6502AB2E76616C75655F6B696E64B368696464656E5F67726F75705F73697A655F7883A72E6F666673657426A52E73697A6502AB2E76616C75655F6B696E64B368696464656E5F67726F75705F73697A655F7983A72E6F666673657428A52E73697A6502AB2E76616C75655F6B696E64B368696464656E5F67726F75705F73697A655F7A83A72E6F66667365742AA52E73697A6502AB2E76616C75655F6B696E64B268696464656E5F72656D61696E6465725F7883A72E6F66667365742CA52E73697A6502AB2E76616C75655F6B696E64B268696464656E5F72656D61696E6465725F7983A72E6F66667365742EA52E73697A6502AB2E76616C75655F6B696E64B268696464656E5F72656D61696E6465725F7A83A72E6F666673657440A52E73697A6508AB2E76616C75655F6B696E64B668696464656E5F676C6F62616C5F6F66667365745F7883A72E6F666673657448A52E73697A6508AB2E76616C75655F6B696E64B668696464656E5F676C6F62616C5F6F66667365745F7983A72E6F666673657450A52E73697A6508AB2E76616C75655F6B696E64B668696464656E5F676C6F62616C5F6F66667365745F7A83A72E6F666673657458A52E73697A6502AB2E76616C75655F6B696E64B068696464656E5F677269645F64696D73B92E67726F75705F7365676D656E745F66697865645F73697A6500B62E6B65726E6172675F7365676D656E745F616C69676E08B52E6B65726E6172675F7365676D656E745F73697A65CD0118A92E6C616E6775616765A84F70656E434C2043B12E6C616E67756167655F76657273696F6E920200B82E6D61785F666C61745F776F726B67726F75705F73697A65CD0400A52E6E616D65B25F5A3973696D706C65416464506A504B6A6DBB2E707269766174655F7365676D656E745F66697865645F73697A6500AB2E736770725F636F756E740CB12E736770725F7370696C6C5F636F756E7400A72E73796D626F6CB55F5A3973696D706C65416464506A504B6A6D2E6B64B82E756E69666F726D5F776F726B5F67726F75705F73697A6501B32E757365735F64796E616D69635F737461636BC2AB2E766770725F636F756E7404B12E766770725F7370696C6C5F636F756E7400AF2E7761766566726F6E745F73697A6540AD616D646873612E746172676574B9616D6467636E2D616D642D616D646873612D2D676678393038AE616D646873612E76657273696F6E92010200000000000000000000000000000000000000000000000000000000000000010000001203070000190000000000006C000000000000001400000011030600800800000000000040000000000000002A00000011000A00E03900000000000001000000000000000100000001000000010000001A000000000008400000D20001000000360A4A7A5238A4D3F113F4DD04000000040000000200000001000000000000000300000000000000000000000000000000000000005F5A3973696D706C65416464506A504B6A6D005F5A3973696D706C65416464506A504B6A6D2E6B64005F5F6869705F637569645F623730363264386333326134613933330000000000000000000000000000000000000000000000000000000000000000000000180100000000000080100000000000000000000000000000000000000000000000000000000000004000AF008C000000090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000C20102C02400000002000AC0000000008002027E7FC08CBF07FF0486FFFF0000060406920600006800008FD2820002000302067E0200043203030638008050DC02007F020102067E0000003203030238008050DC00007F03700F8CBF03050468008070DC00027F00000081BF00000000060000000000000070070000000000000B000000000000001800000000000000050000000000000020080000000000000A000000000000004600000000000000F5FEFF6F00000000D0070000000000000400000000000000F807000000000000000000000000000000000000000000004C696E6B65723A20414D44204C4C442031392E302E3000414D4420636C616E672076657273696F6E2031392E302E306769742028202032343231322063393630313665636534313337356462646438663037356266333762643666633333323230376233290000414D4420636C616E672076657273696F6E2031382E302E3067697420287373683A2F2F6765727269746769742F6C696768746E696E672F65632F6C6C766D2D70726F6A65637420616D642D6D61696E6C696E652D6F70656E20323431373620663935303039613166393032313232343865313036333964653837653635636163616338643961372900000000000000000000000000000000000000000000000000460000000002080070290000000000000000000000000000010000001203070000190000000000006C000000000000001400000011030600800800000000000040000000000000002A00000011000A00E0390000000000000100000000000000002E6E6F7465002E64796E73796D002E676E752E68617368002E68617368002E64796E737472002E726F64617461002E74657874002E64796E616D6963002E72656C726F5F70616464696E67002E627373002E636F6D6D656E74002E73796D746162002E7368737472746162002E73747274616200005F5A3973696D706C65416464506A504B6A6D005F5A3973696D706C65416464506A504B6A6D2E6B64005F5F6869705F637569645F62373036326438633332613461393333005F44594E414D494300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000070000000200000000000000380200000000000038020000000000003405000000000000000000000000000004000000000000000000000000000000070000000B00000002000000000000007007000000000000700700000000000060000000000000000500000001000000080000000000000018000000000000000F000000F6FFFF6F0200000000000000D007000000000000D007000000000000280000000000000002000000000000000800000000000000000000000000000019000000050000000200000000000000F807000000000000F80700000000000028000000000000000200000000000000040000000000000004000000000000001F000000030000000200000000000000200800000000000020080000000000004600000000000000000000000000000001000000000000000000000000000000270000000100000002000000000000008008000000000000800800000000000040000000000000000000000000000000400000000000000000000000000000002F000000010000000600000000000000001900000000000000090000000000006C00000000000000000000000000000000010000000000000000000000000000350000000600000003000000000000007029000000000000700900000000000070000000000000000500000000000000080000000000000010000000000000003E000000080000000300000000000000E029000000000000E00900000000000020060000000000000000000000000000010000000000000000000000000000004D000000080000000300000000000000E039000000000000E0090000000000000100000000000000000000000000000001000000000000000000000000000000520000000100000030000000000000000000000000000000E009000000000000F0000000000000000000000000000000010000000000000001000000000000005B0000000200000000000000000000000000000000000000D00A00000000000078000000000000000E0000000200000008000000000000001800000000000000630000000300000000000000000000000000000000000000480B00000000000075000000000000000000000000000000010000000000000000000000000000006D0000000300000000000000000000000000000000000000BD0B0000000000004F00000000000000000000000000000001000000000000000000000000000000 + - Name: .hipFatBinSegment + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC ] + Address: 0x202FD0 + AddressAlign: 0x8 + Content: '465049480100000000102000000000000000000000000000' +... diff --git a/llvm/test/tools/llvm-size/macho-pagezero.test b/llvm/test/tools/llvm-size/macho-pagezero.test new file mode 100644 index 0000000..db69fd0 --- /dev/null +++ b/llvm/test/tools/llvm-size/macho-pagezero.test @@ -0,0 +1,108 @@ +## Test the --exclude-pagezero option to skip __PAGEZERO segment in Mach-O files. + +# RUN: yaml2obj %s --docnum=1 -o %t-pagezero.o +# RUN: llvm-size %t-pagezero.o | \ +# RUN: FileCheck %s --check-prefix=NORMAL --match-full-lines +# RUN: llvm-size --exclude-pagezero %t-pagezero.o | \ +# RUN: FileCheck %s --check-prefix=SKIP --match-full-lines + +# RUN: yaml2obj %s --docnum=2 -o %t-pagezero32.o +# RUN: llvm-size %t-pagezero32.o | \ +# RUN: FileCheck %s --check-prefix=NORMAL --match-full-lines +# RUN: llvm-size --exclude-pagezero %t-pagezero32.o | \ +# RUN: FileCheck %s --check-prefix=SKIP --match-full-lines + +# NORMAL:__TEXT __DATA __OBJC others dec hex +# NORMAL-NEXT:20 100 0 4096 4216 1078 + +# SKIP:__TEXT __DATA __OBJC others dec hex +# SKIP-NEXT:20 100 0 0 120 78 + +--- !mach-o +FileHeader: + magic: 0xFEEDFACF + cputype: 0x100000C + cpusubtype: 0x0 + filetype: 0x2 + ncmds: 3 + sizeofcmds: 216 + flags: 0x2000 + reserved: 0x0 +LoadCommands: + - cmd: LC_SEGMENT_64 + cmdsize: 72 + segname: __PAGEZERO + vmaddr: 0x0 + vmsize: 4096 + fileoff: 0 + filesize: 0 + maxprot: 0 + initprot: 0 + nsects: 0 + flags: 0 + - cmd: LC_SEGMENT_64 + cmdsize: 72 + segname: __TEXT + vmaddr: 0x100000000 + vmsize: 20 + fileoff: 248 + filesize: 20 + maxprot: 7 + initprot: 5 + nsects: 0 + flags: 0 + - cmd: LC_SEGMENT_64 + cmdsize: 72 + segname: __DATA + vmaddr: 0x100001000 + vmsize: 100 + fileoff: 268 + filesize: 100 + maxprot: 7 + initprot: 3 + nsects: 0 + flags: 0 + +--- !mach-o +FileHeader: + magic: 0xFEEDFACE + cputype: 0x7 + cpusubtype: 0x3 + filetype: 0x2 + ncmds: 3 + sizeofcmds: 168 + flags: 0x2000 +LoadCommands: + - cmd: LC_SEGMENT + cmdsize: 56 + segname: __PAGEZERO + vmaddr: 0x0 + vmsize: 4096 + fileoff: 0 + filesize: 0 + maxprot: 0 + initprot: 0 + nsects: 0 + flags: 0 + - cmd: LC_SEGMENT + cmdsize: 56 + segname: __TEXT + vmaddr: 0x1000 + vmsize: 20 + fileoff: 196 + filesize: 20 + maxprot: 7 + initprot: 5 + nsects: 0 + flags: 0 + - cmd: LC_SEGMENT + cmdsize: 56 + segname: __DATA + vmaddr: 0x2000 + vmsize: 100 + fileoff: 216 + filesize: 100 + maxprot: 7 + initprot: 3 + nsects: 0 + flags: 0 |