aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll40
-rw-r--r--llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll16
-rw-r--r--llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll732
-rw-r--r--llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll1454
-rw-r--r--llvm/test/CodeGen/AArch64/icmp.ll51
-rw-r--r--llvm/test/CodeGen/AArch64/sme-za-exceptions.ll474
-rw-r--r--llvm/test/CodeGen/AArch64/strict-fp-opt.ll150
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-bool.ll36
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg-combines.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll29
-rw-r--r--llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-dead-intrinsics.ll9
-rw-r--r--llvm/test/CodeGen/AMDGPU/lro-phi-samebb-nonlookthrough-store.ll46
-rw-r--r--llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll341
-rw-r--r--llvm/test/CodeGen/AMDGPU/v_mac.ll9
-rw-r--r--llvm/test/CodeGen/AMDGPU/v_mac_f16.ll14
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll58
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll58
-rw-r--r--llvm/test/CodeGen/PowerPC/scalar_cmp.ll226
-rw-r--r--llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll106
-rw-r--r--llvm/test/CodeGen/X86/apx/ndd-neg-addr-index.ll7
-rw-r--r--llvm/test/CodeGen/X86/combine-add.ll4
-rw-r--r--llvm/test/CodeGen/X86/combine-mul.ll2
-rw-r--r--llvm/test/CodeGen/X86/combine-sdiv.ll30
-rw-r--r--llvm/test/CodeGen/X86/dbg-distringtype-uint.ll10
-rw-r--r--llvm/test/CodeGen/X86/dpbusd.ll202
-rw-r--r--llvm/test/CodeGen/X86/dpbusd_const.ll192
-rw-r--r--llvm/test/CodeGen/X86/ftrunc.ll26
-rw-r--r--llvm/test/CodeGen/X86/isint.ll124
-rw-r--r--llvm/test/CodeGen/X86/known-signbits-shl.ll2
-rw-r--r--llvm/test/CodeGen/X86/lea-16bit.ll3
-rw-r--r--llvm/test/CodeGen/X86/lea-8bit.ll3
-rw-r--r--llvm/test/CodeGen/X86/masked_gather_scatter.ll33
-rw-r--r--llvm/test/CodeGen/X86/negative-sin.ll15
-rw-r--r--llvm/test/CodeGen/X86/oddsubvector.ll12
-rw-r--r--llvm/test/CodeGen/X86/pr62286.ll38
-rw-r--r--llvm/test/CodeGen/X86/pr74736.ll20
-rw-r--r--llvm/test/CodeGen/X86/setoeq.ll566
-rw-r--r--llvm/test/CodeGen/X86/shift-i512.ll6
-rw-r--r--llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll24
-rw-r--r--llvm/test/CodeGen/X86/vec_shift6.ll10
-rw-r--r--llvm/test/CodeGen/X86/vec_unsafe-fp-math.ll4
-rw-r--r--llvm/test/CodeGen/X86/vector-gep.ll128
-rw-r--r--llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll10
-rw-r--r--llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll8
-rw-r--r--llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll6
-rw-r--r--llvm/test/CodeGen/X86/vector-mul.ll8
-rw-r--r--llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll2
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-shl-128.ll4
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll5
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-combining.ll4
-rw-r--r--llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll64
51 files changed, 3992 insertions, 1431 deletions
diff --git a/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll b/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll
index 1c216e7..e371748 100644
--- a/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll
@@ -11,6 +11,16 @@ entry:
ret <4 x i16> %1
}
+define <4 x half> @v4bf16_to_v4f16(float, <4 x bfloat> %a) nounwind {
+; CHECK-LABEL: v4bf16_to_v4f16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ret
+entry:
+ %1 = bitcast <4 x bfloat> %a to <4 x half>
+ ret <4 x half> %1
+}
+
define <2 x i32> @v4bf16_to_v2i32(float, <4 x bfloat> %a) nounwind {
; CHECK-LABEL: v4bf16_to_v2i32:
; CHECK: // %bb.0: // %entry
@@ -82,6 +92,16 @@ entry:
ret <4 x bfloat> %1
}
+define <4 x bfloat> @v4f16_to_v4bf16(float, <4 x half> %a) nounwind {
+; CHECK-LABEL: v4f16_to_v4bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ret
+entry:
+ %1 = bitcast <4 x half> %a to <4 x bfloat>
+ ret <4 x bfloat> %1
+}
+
define <4 x bfloat> @v2i32_to_v4bf16(float, <2 x i32> %a) nounwind {
; CHECK-LABEL: v2i32_to_v4bf16:
; CHECK: // %bb.0: // %entry
@@ -152,6 +172,16 @@ entry:
ret <8 x i16> %1
}
+define <8 x half> @v8bf16_to_v8f16(float, <8 x bfloat> %a) nounwind {
+; CHECK-LABEL: v8bf16_to_v8f16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov v0.16b, v1.16b
+; CHECK-NEXT: ret
+entry:
+ %1 = bitcast <8 x bfloat> %a to <8 x half>
+ ret <8 x half> %1
+}
+
define <4 x i32> @v8bf16_to_v4i32(float, <8 x bfloat> %a) nounwind {
; CHECK-LABEL: v8bf16_to_v4i32:
; CHECK: // %bb.0: // %entry
@@ -202,6 +232,16 @@ entry:
ret <8 x bfloat> %1
}
+define <8 x bfloat> @v8f16_to_v8bf16(float, <8 x half> %a) nounwind {
+; CHECK-LABEL: v8f16_to_v8bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov v0.16b, v1.16b
+; CHECK-NEXT: ret
+entry:
+ %1 = bitcast <8 x half> %a to <8 x bfloat>
+ ret <8 x bfloat> %1
+}
+
define <8 x bfloat> @v4i32_to_v8bf16(float, <4 x i32> %a) nounwind {
; CHECK-LABEL: v4i32_to_v8bf16:
; CHECK: // %bb.0: // %entry
diff --git a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll
index 0960c4c..a56d5b1 100644
--- a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll
+++ b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll
@@ -78,9 +78,8 @@ B:
define i32 @g_i8_sign_extend_inreg(i8 %in, i32 %a, i32 %b) nounwind {
; CHECK-LABEL: g_i8_sign_extend_inreg:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sxtb w8, w0
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: csel w8, w1, w2, mi
+; CHECK-NEXT: tst w0, #0x80
+; CHECK-NEXT: csel w8, w1, w2, ne
; CHECK-NEXT: add w0, w8, w0, uxtb
; CHECK-NEXT: ret
entry:
@@ -100,9 +99,8 @@ B:
define i32 @g_i16_sign_extend_inreg(i16 %in, i32 %a, i32 %b) nounwind {
; CHECK-LABEL: g_i16_sign_extend_inreg:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sxth w8, w0
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: csel w8, w1, w2, mi
+; CHECK-NEXT: tst w0, #0x8000
+; CHECK-NEXT: csel w8, w1, w2, ne
; CHECK-NEXT: add w0, w8, w0, uxth
; CHECK-NEXT: ret
entry:
@@ -167,10 +165,8 @@ B:
define i64 @g_i32_sign_extend_i64(i32 %in, i64 %a, i64 %b) nounwind {
; CHECK-LABEL: g_i32_sign_extend_i64:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
-; CHECK-NEXT: sxtw x8, w0
-; CHECK-NEXT: cmp x8, #0
-; CHECK-NEXT: csel x8, x1, x2, mi
+; CHECK-NEXT: tst w0, #0x80000000
+; CHECK-NEXT: csel x8, x1, x2, ne
; CHECK-NEXT: add x0, x8, w0, uxtw
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll b/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll
index 8bc3497..6233ce7 100644
--- a/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll
@@ -1,20 +1,30 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 | FileCheck %s --check-prefixes=CHECK-COMMON,CHECK-CVT
-; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK-COMMON,CHECK-FP16
+; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-CVT,CHECK-CVT-SD
+; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16,CHECK-FP16-SD
+; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-CVT,CHECK-CVT-GI
+; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-FP16,CHECK-FP16-GI
define <4 x half> @add_h(<4 x half> %a, <4 x half> %b) {
-; CHECK-CVT-LABEL: add_h:
-; CHECK-CVT: // %bb.0: // %entry
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fadd v0.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: add_h:
+; CHECK-CVT-SD: // %bb.0: // %entry
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fadd v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: add_h:
; CHECK-FP16: // %bb.0: // %entry
; CHECK-FP16-NEXT: fadd v0.4h, v0.4h, v1.4h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: add_h:
+; CHECK-CVT-GI: // %bb.0: // %entry
+; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fadd v0.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
entry:
%0 = fadd <4 x half> %a, %b
@@ -22,28 +32,54 @@ entry:
}
define <4 x half> @build_h4(<4 x half> %a) {
-; CHECK-COMMON-LABEL: build_h4:
-; CHECK-COMMON: // %bb.0: // %entry
-; CHECK-COMMON-NEXT: mov w8, #15565 // =0x3ccd
-; CHECK-COMMON-NEXT: dup v0.4h, w8
-; CHECK-COMMON-NEXT: ret
+; CHECK-CVT-SD-LABEL: build_h4:
+; CHECK-CVT-SD: // %bb.0: // %entry
+; CHECK-CVT-SD-NEXT: mov w8, #15565 // =0x3ccd
+; CHECK-CVT-SD-NEXT: dup v0.4h, w8
+; CHECK-CVT-SD-NEXT: ret
+;
+; CHECK-FP16-SD-LABEL: build_h4:
+; CHECK-FP16-SD: // %bb.0: // %entry
+; CHECK-FP16-SD-NEXT: mov w8, #15565 // =0x3ccd
+; CHECK-FP16-SD-NEXT: dup v0.4h, w8
+; CHECK-FP16-SD-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: build_h4:
+; CHECK-CVT-GI: // %bb.0: // %entry
+; CHECK-CVT-GI-NEXT: adrp x8, .LCPI1_0
+; CHECK-CVT-GI-NEXT: ldr d0, [x8, :lo12:.LCPI1_0]
+; CHECK-CVT-GI-NEXT: ret
+;
+; CHECK-FP16-GI-LABEL: build_h4:
+; CHECK-FP16-GI: // %bb.0: // %entry
+; CHECK-FP16-GI-NEXT: adrp x8, .LCPI1_0
+; CHECK-FP16-GI-NEXT: ldr d0, [x8, :lo12:.LCPI1_0]
+; CHECK-FP16-GI-NEXT: ret
entry:
ret <4 x half> <half 0xH3CCD, half 0xH3CCD, half 0xH3CCD, half 0xH3CCD>
}
define <4 x half> @sub_h(<4 x half> %a, <4 x half> %b) {
-; CHECK-CVT-LABEL: sub_h:
-; CHECK-CVT: // %bb.0: // %entry
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fsub v0.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: sub_h:
+; CHECK-CVT-SD: // %bb.0: // %entry
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fsub v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: sub_h:
; CHECK-FP16: // %bb.0: // %entry
; CHECK-FP16-NEXT: fsub v0.4h, v0.4h, v1.4h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: sub_h:
+; CHECK-CVT-GI: // %bb.0: // %entry
+; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fsub v0.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
entry:
%0 = fsub <4 x half> %a, %b
@@ -51,18 +87,26 @@ entry:
}
define <4 x half> @mul_h(<4 x half> %a, <4 x half> %b) {
-; CHECK-CVT-LABEL: mul_h:
-; CHECK-CVT: // %bb.0: // %entry
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fmul v0.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: mul_h:
+; CHECK-CVT-SD: // %bb.0: // %entry
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fmul v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: mul_h:
; CHECK-FP16: // %bb.0: // %entry
; CHECK-FP16-NEXT: fmul v0.4h, v0.4h, v1.4h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: mul_h:
+; CHECK-CVT-GI: // %bb.0: // %entry
+; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fmul v0.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
entry:
%0 = fmul <4 x half> %a, %b
@@ -70,18 +114,26 @@ entry:
}
define <4 x half> @div_h(<4 x half> %a, <4 x half> %b) {
-; CHECK-CVT-LABEL: div_h:
-; CHECK-CVT: // %bb.0: // %entry
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fdiv v0.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: div_h:
+; CHECK-CVT-SD: // %bb.0: // %entry
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fdiv v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: div_h:
; CHECK-FP16: // %bb.0: // %entry
; CHECK-FP16-NEXT: fdiv v0.4h, v0.4h, v1.4h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: div_h:
+; CHECK-CVT-GI: // %bb.0: // %entry
+; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fdiv v0.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
entry:
%0 = fdiv <4 x half> %a, %b
@@ -89,92 +141,162 @@ entry:
}
define <4 x half> @load_h(ptr %a) {
-; CHECK-COMMON-LABEL: load_h:
-; CHECK-COMMON: // %bb.0: // %entry
-; CHECK-COMMON-NEXT: ldr d0, [x0]
-; CHECK-COMMON-NEXT: ret
+; CHECK-LABEL: load_h:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr d0, [x0]
+; CHECK-NEXT: ret
entry:
%0 = load <4 x half>, ptr %a, align 4
ret <4 x half> %0
}
define void @store_h(ptr %a, <4 x half> %b) {
-; CHECK-COMMON-LABEL: store_h:
-; CHECK-COMMON: // %bb.0: // %entry
-; CHECK-COMMON-NEXT: str d0, [x0]
-; CHECK-COMMON-NEXT: ret
+; CHECK-LABEL: store_h:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str d0, [x0]
+; CHECK-NEXT: ret
entry:
store <4 x half> %b, ptr %a, align 4
ret void
}
define <4 x half> @s_to_h(<4 x float> %a) {
-; CHECK-COMMON-LABEL: s_to_h:
-; CHECK-COMMON: // %bb.0:
-; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-COMMON-NEXT: ret
+; CHECK-LABEL: s_to_h:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-NEXT: ret
%1 = fptrunc <4 x float> %a to <4 x half>
ret <4 x half> %1
}
define <4 x half> @d_to_h(<4 x double> %a) {
-; CHECK-COMMON-LABEL: d_to_h:
-; CHECK-COMMON: // %bb.0:
-; CHECK-COMMON-NEXT: fcvtxn v0.2s, v0.2d
-; CHECK-COMMON-NEXT: fcvtxn2 v0.4s, v1.2d
-; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-COMMON-NEXT: ret
+; CHECK-CVT-SD-LABEL: d_to_h:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtxn v0.2s, v0.2d
+; CHECK-CVT-SD-NEXT: fcvtxn2 v0.4s, v1.2d
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: ret
+;
+; CHECK-FP16-SD-LABEL: d_to_h:
+; CHECK-FP16-SD: // %bb.0:
+; CHECK-FP16-SD-NEXT: fcvtxn v0.2s, v0.2d
+; CHECK-FP16-SD-NEXT: fcvtxn2 v0.4s, v1.2d
+; CHECK-FP16-SD-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-FP16-SD-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: d_to_h:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: mov d2, v0.d[1]
+; CHECK-CVT-GI-NEXT: fcvt h0, d0
+; CHECK-CVT-GI-NEXT: mov d3, v1.d[1]
+; CHECK-CVT-GI-NEXT: fcvt h1, d1
+; CHECK-CVT-GI-NEXT: fcvt h2, d2
+; CHECK-CVT-GI-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-CVT-GI-NEXT: fcvt h2, d3
+; CHECK-CVT-GI-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-CVT-GI-NEXT: mov v0.h[3], v2.h[0]
+; CHECK-CVT-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-CVT-GI-NEXT: ret
+;
+; CHECK-FP16-GI-LABEL: d_to_h:
+; CHECK-FP16-GI: // %bb.0:
+; CHECK-FP16-GI-NEXT: mov d2, v0.d[1]
+; CHECK-FP16-GI-NEXT: fcvt h0, d0
+; CHECK-FP16-GI-NEXT: mov d3, v1.d[1]
+; CHECK-FP16-GI-NEXT: fcvt h1, d1
+; CHECK-FP16-GI-NEXT: fcvt h2, d2
+; CHECK-FP16-GI-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-FP16-GI-NEXT: fcvt h2, d3
+; CHECK-FP16-GI-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-FP16-GI-NEXT: mov v0.h[3], v2.h[0]
+; CHECK-FP16-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-FP16-GI-NEXT: ret
%1 = fptrunc <4 x double> %a to <4 x half>
ret <4 x half> %1
}
define <4 x float> @h_to_s(<4 x half> %a) {
-; CHECK-COMMON-LABEL: h_to_s:
-; CHECK-COMMON: // %bb.0:
-; CHECK-COMMON-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-COMMON-NEXT: ret
+; CHECK-LABEL: h_to_s:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-NEXT: ret
%1 = fpext <4 x half> %a to <4 x float>
ret <4 x float> %1
}
define <4 x double> @h_to_d(<4 x half> %a) {
-; CHECK-COMMON-LABEL: h_to_d:
-; CHECK-COMMON: // %bb.0:
-; CHECK-COMMON-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-COMMON-NEXT: fcvtl2 v1.2d, v0.4s
-; CHECK-COMMON-NEXT: fcvtl v0.2d, v0.2s
-; CHECK-COMMON-NEXT: ret
+; CHECK-CVT-SD-LABEL: h_to_d:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcvtl2 v1.2d, v0.4s
+; CHECK-CVT-SD-NEXT: fcvtl v0.2d, v0.2s
+; CHECK-CVT-SD-NEXT: ret
+;
+; CHECK-FP16-SD-LABEL: h_to_d:
+; CHECK-FP16-SD: // %bb.0:
+; CHECK-FP16-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-FP16-SD-NEXT: fcvtl2 v1.2d, v0.4s
+; CHECK-FP16-SD-NEXT: fcvtl v0.2d, v0.2s
+; CHECK-FP16-SD-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: h_to_d:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-CVT-GI-NEXT: mov h1, v0.h[1]
+; CHECK-CVT-GI-NEXT: mov h2, v0.h[2]
+; CHECK-CVT-GI-NEXT: mov h3, v0.h[3]
+; CHECK-CVT-GI-NEXT: fcvt d0, h0
+; CHECK-CVT-GI-NEXT: fcvt d4, h1
+; CHECK-CVT-GI-NEXT: fcvt d1, h2
+; CHECK-CVT-GI-NEXT: fcvt d2, h3
+; CHECK-CVT-GI-NEXT: mov v0.d[1], v4.d[0]
+; CHECK-CVT-GI-NEXT: mov v1.d[1], v2.d[0]
+; CHECK-CVT-GI-NEXT: ret
+;
+; CHECK-FP16-GI-LABEL: h_to_d:
+; CHECK-FP16-GI: // %bb.0:
+; CHECK-FP16-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-FP16-GI-NEXT: mov h1, v0.h[1]
+; CHECK-FP16-GI-NEXT: mov h2, v0.h[2]
+; CHECK-FP16-GI-NEXT: mov h3, v0.h[3]
+; CHECK-FP16-GI-NEXT: fcvt d0, h0
+; CHECK-FP16-GI-NEXT: fcvt d4, h1
+; CHECK-FP16-GI-NEXT: fcvt d1, h2
+; CHECK-FP16-GI-NEXT: fcvt d2, h3
+; CHECK-FP16-GI-NEXT: mov v0.d[1], v4.d[0]
+; CHECK-FP16-GI-NEXT: mov v1.d[1], v2.d[0]
+; CHECK-FP16-GI-NEXT: ret
%1 = fpext <4 x half> %a to <4 x double>
ret <4 x double> %1
}
define <4 x half> @bitcast_i_to_h(float, <4 x i16> %a) {
-; CHECK-COMMON-LABEL: bitcast_i_to_h:
-; CHECK-COMMON: // %bb.0:
-; CHECK-COMMON-NEXT: fmov d0, d1
-; CHECK-COMMON-NEXT: ret
+; CHECK-LABEL: bitcast_i_to_h:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ret
%2 = bitcast <4 x i16> %a to <4 x half>
ret <4 x half> %2
}
define <4 x i16> @bitcast_h_to_i(float, <4 x half> %a) {
-; CHECK-COMMON-LABEL: bitcast_h_to_i:
-; CHECK-COMMON: // %bb.0:
-; CHECK-COMMON-NEXT: fmov d0, d1
-; CHECK-COMMON-NEXT: ret
+; CHECK-LABEL: bitcast_h_to_i:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ret
%2 = bitcast <4 x half> %a to <4 x i16>
ret <4 x i16> %2
}
define <4 x half> @sitofp_i8(<4 x i8> %a) #0 {
-; CHECK-CVT-LABEL: sitofp_i8:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: shl v0.4h, v0.4h, #8
-; CHECK-CVT-NEXT: sshr v0.4h, v0.4h, #8
-; CHECK-CVT-NEXT: sshll v0.4s, v0.4h, #0
-; CHECK-CVT-NEXT: scvtf v0.4s, v0.4s
-; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: sitofp_i8:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-CVT-SD-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-CVT-SD-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-CVT-SD-NEXT: scvtf v0.4s, v0.4s
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: sitofp_i8:
; CHECK-FP16: // %bb.0:
@@ -182,6 +304,15 @@ define <4 x half> @sitofp_i8(<4 x i8> %a) #0 {
; CHECK-FP16-NEXT: sshr v0.4h, v0.4h, #8
; CHECK-FP16-NEXT: scvtf v0.4h, v0.4h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: sitofp_i8:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-CVT-GI-NEXT: shl v0.4s, v0.4s, #24
+; CHECK-CVT-GI-NEXT: sshr v0.4s, v0.4s, #24
+; CHECK-CVT-GI-NEXT: scvtf v0.4s, v0.4s
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
%1 = sitofp <4 x i8> %a to <4 x half>
ret <4 x half> %1
}
@@ -204,43 +335,59 @@ define <4 x half> @sitofp_i16(<4 x i16> %a) #0 {
define <4 x half> @sitofp_i32(<4 x i32> %a) #0 {
-; CHECK-COMMON-LABEL: sitofp_i32:
-; CHECK-COMMON: // %bb.0:
-; CHECK-COMMON-NEXT: scvtf v0.4s, v0.4s
-; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-COMMON-NEXT: ret
+; CHECK-LABEL: sitofp_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: scvtf v0.4s, v0.4s
+; CHECK-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-NEXT: ret
%1 = sitofp <4 x i32> %a to <4 x half>
ret <4 x half> %1
}
define <4 x half> @sitofp_i64(<4 x i64> %a) #0 {
-; CHECK-COMMON-LABEL: sitofp_i64:
-; CHECK-COMMON: // %bb.0:
-; CHECK-COMMON-NEXT: scvtf v0.2d, v0.2d
-; CHECK-COMMON-NEXT: scvtf v1.2d, v1.2d
-; CHECK-COMMON-NEXT: fcvtn v0.2s, v0.2d
-; CHECK-COMMON-NEXT: fcvtn2 v0.4s, v1.2d
-; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-COMMON-NEXT: ret
+; CHECK-LABEL: sitofp_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: scvtf v0.2d, v0.2d
+; CHECK-NEXT: scvtf v1.2d, v1.2d
+; CHECK-NEXT: fcvtn v0.2s, v0.2d
+; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
+; CHECK-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-NEXT: ret
%1 = sitofp <4 x i64> %a to <4 x half>
ret <4 x half> %1
}
define <4 x half> @uitofp_i8(<4 x i8> %a) #0 {
-; CHECK-CVT-LABEL: uitofp_i8:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: bic v0.4h, #255, lsl #8
-; CHECK-CVT-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-CVT-NEXT: ucvtf v0.4s, v0.4s
-; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: uitofp_i8:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: bic v0.4h, #255, lsl #8
+; CHECK-CVT-SD-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-CVT-SD-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: ret
;
-; CHECK-FP16-LABEL: uitofp_i8:
-; CHECK-FP16: // %bb.0:
-; CHECK-FP16-NEXT: bic v0.4h, #255, lsl #8
-; CHECK-FP16-NEXT: ucvtf v0.4h, v0.4h
-; CHECK-FP16-NEXT: ret
+; CHECK-FP16-SD-LABEL: uitofp_i8:
+; CHECK-FP16-SD: // %bb.0:
+; CHECK-FP16-SD-NEXT: bic v0.4h, #255, lsl #8
+; CHECK-FP16-SD-NEXT: ucvtf v0.4h, v0.4h
+; CHECK-FP16-SD-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: uitofp_i8:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: movi v1.2d, #0x0000ff000000ff
+; CHECK-CVT-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-CVT-GI-NEXT: and v0.16b, v0.16b, v1.16b
+; CHECK-CVT-GI-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
+;
+; CHECK-FP16-GI-LABEL: uitofp_i8:
+; CHECK-FP16-GI: // %bb.0:
+; CHECK-FP16-GI-NEXT: movi d1, #0xff00ff00ff00ff
+; CHECK-FP16-GI-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-FP16-GI-NEXT: ucvtf v0.4h, v0.4h
+; CHECK-FP16-GI-NEXT: ret
%1 = uitofp <4 x i8> %a to <4 x half>
ret <4 x half> %1
}
@@ -264,35 +411,35 @@ define <4 x half> @uitofp_i16(<4 x i16> %a) #0 {
define <4 x half> @uitofp_i32(<4 x i32> %a) #0 {
-; CHECK-COMMON-LABEL: uitofp_i32:
-; CHECK-COMMON: // %bb.0:
-; CHECK-COMMON-NEXT: ucvtf v0.4s, v0.4s
-; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-COMMON-NEXT: ret
+; CHECK-LABEL: uitofp_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-NEXT: ret
%1 = uitofp <4 x i32> %a to <4 x half>
ret <4 x half> %1
}
define <4 x half> @uitofp_i64(<4 x i64> %a) #0 {
-; CHECK-COMMON-LABEL: uitofp_i64:
-; CHECK-COMMON: // %bb.0:
-; CHECK-COMMON-NEXT: ucvtf v0.2d, v0.2d
-; CHECK-COMMON-NEXT: ucvtf v1.2d, v1.2d
-; CHECK-COMMON-NEXT: fcvtn v0.2s, v0.2d
-; CHECK-COMMON-NEXT: fcvtn2 v0.4s, v1.2d
-; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-COMMON-NEXT: ret
+; CHECK-LABEL: uitofp_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ucvtf v0.2d, v0.2d
+; CHECK-NEXT: ucvtf v1.2d, v1.2d
+; CHECK-NEXT: fcvtn v0.2s, v0.2d
+; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
+; CHECK-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-NEXT: ret
%1 = uitofp <4 x i64> %a to <4 x half>
ret <4 x half> %1
}
define void @test_insert_at_zero(half %a, ptr %b) #0 {
-; CHECK-COMMON-LABEL: test_insert_at_zero:
-; CHECK-COMMON: // %bb.0:
-; CHECK-COMMON-NEXT: // kill: def $h0 killed $h0 def $d0
-; CHECK-COMMON-NEXT: str d0, [x0]
-; CHECK-COMMON-NEXT: ret
+; CHECK-LABEL: test_insert_at_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: str d0, [x0]
+; CHECK-NEXT: ret
%1 = insertelement <4 x half> undef, half %a, i64 0
store <4 x half> %1, ptr %b, align 4
ret void
@@ -331,17 +478,29 @@ define <4 x i16> @fptosi_i16(<4 x half> %a) #0 {
}
define <4 x i8> @fptoui_i8(<4 x half> %a) #0 {
-; CHECK-CVT-LABEL: fptoui_i8:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s
-; CHECK-CVT-NEXT: xtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: fptoui_i8:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: ret
;
-; CHECK-FP16-LABEL: fptoui_i8:
-; CHECK-FP16: // %bb.0:
-; CHECK-FP16-NEXT: fcvtzs v0.4h, v0.4h
-; CHECK-FP16-NEXT: ret
+; CHECK-FP16-SD-LABEL: fptoui_i8:
+; CHECK-FP16-SD: // %bb.0:
+; CHECK-FP16-SD-NEXT: fcvtzs v0.4h, v0.4h
+; CHECK-FP16-SD-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: fptoui_i8:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtzu v0.4s, v0.4s
+; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
+;
+; CHECK-FP16-GI-LABEL: fptoui_i8:
+; CHECK-FP16-GI: // %bb.0:
+; CHECK-FP16-GI-NEXT: fcvtzu v0.4h, v0.4h
+; CHECK-FP16-GI-NEXT: ret
; NOTE: fcvtzs selected here because the xtn shaves the sign bit
%1 = fptoui<4 x half> %a to <4 x i8>
ret <4 x i8> %1
@@ -364,36 +523,45 @@ define <4 x i16> @fptoui_i16(<4 x half> %a) #0 {
}
define <4 x i1> @test_fcmp_une(<4 x half> %a, <4 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_une:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmeq v0.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: mvn v0.16b, v0.16b
-; CHECK-CVT-NEXT: xtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_une:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmeq v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_une:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: fcmeq v0.4h, v0.4h, v1.4h
; CHECK-FP16-NEXT: mvn v0.8b, v0.8b
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_une:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcmeq v0.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp une <4 x half> %a, %b
ret <4 x i1> %1
}
define <4 x i1> @test_fcmp_ueq(<4 x half> %a, <4 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_ueq:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmgt v2.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s
-; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v2.16b
-; CHECK-CVT-NEXT: xtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: mvn v0.8b, v0.8b
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_ueq:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_ueq:
; CHECK-FP16: // %bb.0:
@@ -402,102 +570,149 @@ define <4 x i1> @test_fcmp_ueq(<4 x half> %a, <4 x half> %b) #0 {
; CHECK-FP16-NEXT: orr v0.8b, v0.8b, v2.8b
; CHECK-FP16-NEXT: mvn v0.8b, v0.8b
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_ueq:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp ueq <4 x half> %a, %b
ret <4 x i1> %1
}
define <4 x i1> @test_fcmp_ugt(<4 x half> %a, <4 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_ugt:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcmge v0.4s, v1.4s, v0.4s
-; CHECK-CVT-NEXT: xtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: mvn v0.8b, v0.8b
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_ugt:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcmge v0.4s, v1.4s, v0.4s
+; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_ugt:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: fcmge v0.4h, v1.4h, v0.4h
; CHECK-FP16-NEXT: mvn v0.8b, v0.8b
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_ugt:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcmge v0.4s, v1.4s, v0.4s
+; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp ugt <4 x half> %a, %b
ret <4 x i1> %1
}
define <4 x i1> @test_fcmp_uge(<4 x half> %a, <4 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_uge:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s
-; CHECK-CVT-NEXT: xtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: mvn v0.8b, v0.8b
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_uge:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_uge:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: fcmgt v0.4h, v1.4h, v0.4h
; CHECK-FP16-NEXT: mvn v0.8b, v0.8b
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_uge:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp uge <4 x half> %a, %b
ret <4 x i1> %1
}
define <4 x i1> @test_fcmp_ult(<4 x half> %a, <4 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_ult:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmge v0.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: xtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: mvn v0.8b, v0.8b
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_ult:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmge v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_ult:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: fcmge v0.4h, v0.4h, v1.4h
; CHECK-FP16-NEXT: mvn v0.8b, v0.8b
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_ult:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcmge v0.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp ult <4 x half> %a, %b
ret <4 x i1> %1
}
define <4 x i1> @test_fcmp_ule(<4 x half> %a, <4 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_ule:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmgt v0.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: xtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: mvn v0.8b, v0.8b
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_ule:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_ule:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: fcmgt v0.4h, v0.4h, v1.4h
; CHECK-FP16-NEXT: mvn v0.8b, v0.8b
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_ule:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp ule <4 x half> %a, %b
ret <4 x i1> %1
}
define <4 x i1> @test_fcmp_uno(<4 x half> %a, <4 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_uno:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmge v2.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s
-; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v2.16b
-; CHECK-CVT-NEXT: xtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: mvn v0.8b, v0.8b
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_uno:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmge v2.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_uno:
; CHECK-FP16: // %bb.0:
@@ -506,21 +721,32 @@ define <4 x i1> @test_fcmp_uno(<4 x half> %a, <4 x half> %b) #0 {
; CHECK-FP16-NEXT: orr v0.8b, v0.8b, v2.8b
; CHECK-FP16-NEXT: mvn v0.8b, v0.8b
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_uno:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcmge v2.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp uno <4 x half> %a, %b
ret <4 x i1> %1
}
define <4 x i1> @test_fcmp_one(<4 x half> %a, <4 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_one:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmgt v2.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s
-; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v2.16b
-; CHECK-CVT-NEXT: xtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_one:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_one:
; CHECK-FP16: // %bb.0:
@@ -528,60 +754,94 @@ define <4 x i1> @test_fcmp_one(<4 x half> %a, <4 x half> %b) #0 {
; CHECK-FP16-NEXT: fcmgt v0.4h, v1.4h, v0.4h
; CHECK-FP16-NEXT: orr v0.8b, v0.8b, v2.8b
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_one:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp one <4 x half> %a, %b
ret <4 x i1> %1
}
define <4 x i1> @test_fcmp_oeq(<4 x half> %a, <4 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_oeq:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmeq v0.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: xtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_oeq:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmeq v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_oeq:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: fcmeq v0.4h, v0.4h, v1.4h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_oeq:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcmeq v0.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp oeq <4 x half> %a, %b
ret <4 x i1> %1
}
define <4 x i1> @test_fcmp_ogt(<4 x half> %a, <4 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_ogt:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmgt v0.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: xtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_ogt:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_ogt:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: fcmgt v0.4h, v0.4h, v1.4h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_ogt:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp ogt <4 x half> %a, %b
ret <4 x i1> %1
}
define <4 x i1> @test_fcmp_oge(<4 x half> %a, <4 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_oge:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmge v0.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: xtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_oge:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmge v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_oge:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: fcmge v0.4h, v0.4h, v1.4h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_oge:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcmge v0.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp oge <4 x half> %a, %b
ret <4 x i1> %1
@@ -624,15 +884,15 @@ define <4 x i1> @test_fcmp_ole(<4 x half> %a, <4 x half> %b) #0 {
}
define <4 x i1> @test_fcmp_ord(<4 x half> %a, <4 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_ord:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmge v2.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s
-; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v2.16b
-; CHECK-CVT-NEXT: xtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_ord:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmge v2.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_ord:
; CHECK-FP16: // %bb.0:
@@ -640,6 +900,16 @@ define <4 x i1> @test_fcmp_ord(<4 x half> %a, <4 x half> %b) #0 {
; CHECK-FP16-NEXT: fcmgt v0.4h, v1.4h, v0.4h
; CHECK-FP16-NEXT: orr v0.8b, v0.8b, v2.8b
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_ord:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcmge v2.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp ord <4 x half> %a, %b
ret <4 x i1> %1
diff --git a/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll b/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll
index fcb42a7..86763eb 100644
--- a/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll
@@ -1,24 +1,38 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-CVT
-; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16
+; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-CVT,CHECK-CVT-SD
+; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16,CHECK-FP16-SD
+; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-CVT,CHECK-CVT-GI
+; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-FP16,CHECK-FP16-GI
define <8 x half> @add_h(<8 x half> %a, <8 x half> %b) {
-; CHECK-CVT-LABEL: add_h:
-; CHECK-CVT: // %bb.0: // %entry
-; CHECK-CVT-NEXT: fcvtl v2.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v3.4s, v0.4h
-; CHECK-CVT-NEXT: fcvtl2 v1.4s, v1.8h
-; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h
-; CHECK-CVT-NEXT: fadd v2.4s, v3.4s, v2.4s
-; CHECK-CVT-NEXT: fadd v1.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: fcvtn v0.4h, v2.4s
-; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: add_h:
+; CHECK-CVT-SD: // %bb.0: // %entry
+; CHECK-CVT-SD-NEXT: fcvtl v2.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v3.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-SD-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fadd v2.4s, v3.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fadd v1.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v2.4s
+; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: add_h:
; CHECK-FP16: // %bb.0: // %entry
; CHECK-FP16-NEXT: fadd v0.8h, v0.8h, v1.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: add_h:
+; CHECK-CVT-GI: // %bb.0: // %entry
+; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-GI-NEXT: fadd v2.4s, v2.4s, v3.4s
+; CHECK-CVT-GI-NEXT: fadd v1.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s
+; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v1.4s
+; CHECK-CVT-GI-NEXT: ret
entry:
%0 = fadd <8 x half> %a, %b
ret <8 x half> %0
@@ -26,22 +40,34 @@ entry:
define <8 x half> @sub_h(<8 x half> %a, <8 x half> %b) {
-; CHECK-CVT-LABEL: sub_h:
-; CHECK-CVT: // %bb.0: // %entry
-; CHECK-CVT-NEXT: fcvtl v2.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v3.4s, v0.4h
-; CHECK-CVT-NEXT: fcvtl2 v1.4s, v1.8h
-; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h
-; CHECK-CVT-NEXT: fsub v2.4s, v3.4s, v2.4s
-; CHECK-CVT-NEXT: fsub v1.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: fcvtn v0.4h, v2.4s
-; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: sub_h:
+; CHECK-CVT-SD: // %bb.0: // %entry
+; CHECK-CVT-SD-NEXT: fcvtl v2.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v3.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-SD-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fsub v2.4s, v3.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fsub v1.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v2.4s
+; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: sub_h:
; CHECK-FP16: // %bb.0: // %entry
; CHECK-FP16-NEXT: fsub v0.8h, v0.8h, v1.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: sub_h:
+; CHECK-CVT-GI: // %bb.0: // %entry
+; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-GI-NEXT: fsub v2.4s, v2.4s, v3.4s
+; CHECK-CVT-GI-NEXT: fsub v1.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s
+; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v1.4s
+; CHECK-CVT-GI-NEXT: ret
entry:
%0 = fsub <8 x half> %a, %b
ret <8 x half> %0
@@ -49,22 +75,34 @@ entry:
define <8 x half> @mul_h(<8 x half> %a, <8 x half> %b) {
-; CHECK-CVT-LABEL: mul_h:
-; CHECK-CVT: // %bb.0: // %entry
-; CHECK-CVT-NEXT: fcvtl v2.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v3.4s, v0.4h
-; CHECK-CVT-NEXT: fcvtl2 v1.4s, v1.8h
-; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h
-; CHECK-CVT-NEXT: fmul v2.4s, v3.4s, v2.4s
-; CHECK-CVT-NEXT: fmul v1.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: fcvtn v0.4h, v2.4s
-; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: mul_h:
+; CHECK-CVT-SD: // %bb.0: // %entry
+; CHECK-CVT-SD-NEXT: fcvtl v2.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v3.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-SD-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fmul v2.4s, v3.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fmul v1.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v2.4s
+; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: mul_h:
; CHECK-FP16: // %bb.0: // %entry
; CHECK-FP16-NEXT: fmul v0.8h, v0.8h, v1.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: mul_h:
+; CHECK-CVT-GI: // %bb.0: // %entry
+; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-GI-NEXT: fmul v2.4s, v2.4s, v3.4s
+; CHECK-CVT-GI-NEXT: fmul v1.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s
+; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v1.4s
+; CHECK-CVT-GI-NEXT: ret
entry:
%0 = fmul <8 x half> %a, %b
ret <8 x half> %0
@@ -72,22 +110,34 @@ entry:
define <8 x half> @div_h(<8 x half> %a, <8 x half> %b) {
-; CHECK-CVT-LABEL: div_h:
-; CHECK-CVT: // %bb.0: // %entry
-; CHECK-CVT-NEXT: fcvtl v2.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v3.4s, v0.4h
-; CHECK-CVT-NEXT: fcvtl2 v1.4s, v1.8h
-; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h
-; CHECK-CVT-NEXT: fdiv v2.4s, v3.4s, v2.4s
-; CHECK-CVT-NEXT: fdiv v1.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: fcvtn v0.4h, v2.4s
-; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: div_h:
+; CHECK-CVT-SD: // %bb.0: // %entry
+; CHECK-CVT-SD-NEXT: fcvtl v2.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v3.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-SD-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fdiv v2.4s, v3.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fdiv v1.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v2.4s
+; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: div_h:
; CHECK-FP16: // %bb.0: // %entry
; CHECK-FP16-NEXT: fdiv v0.8h, v0.8h, v1.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: div_h:
+; CHECK-CVT-GI: // %bb.0: // %entry
+; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-GI-NEXT: fdiv v2.4s, v2.4s, v3.4s
+; CHECK-CVT-GI-NEXT: fdiv v1.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s
+; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v1.4s
+; CHECK-CVT-GI-NEXT: ret
entry:
%0 = fdiv <8 x half> %a, %b
ret <8 x half> %0
@@ -126,39 +176,171 @@ define <8 x half> @s_to_h(<8 x float> %a) {
}
define <8 x half> @d_to_h(<8 x double> %a) {
-; CHECK-LABEL: d_to_h:
-; CHECK: // %bb.0:
-; CHECK-NEXT: fcvtxn v0.2s, v0.2d
-; CHECK-NEXT: fcvtxn v2.2s, v2.2d
-; CHECK-NEXT: fcvtxn2 v0.4s, v1.2d
-; CHECK-NEXT: fcvtxn2 v2.4s, v3.2d
-; CHECK-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-NEXT: fcvtn2 v0.8h, v2.4s
-; CHECK-NEXT: ret
+; CHECK-CVT-SD-LABEL: d_to_h:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtxn v0.2s, v0.2d
+; CHECK-CVT-SD-NEXT: fcvtxn v2.2s, v2.2d
+; CHECK-CVT-SD-NEXT: fcvtxn2 v0.4s, v1.2d
+; CHECK-CVT-SD-NEXT: fcvtxn2 v2.4s, v3.2d
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v2.4s
+; CHECK-CVT-SD-NEXT: ret
+;
+; CHECK-FP16-SD-LABEL: d_to_h:
+; CHECK-FP16-SD: // %bb.0:
+; CHECK-FP16-SD-NEXT: fcvtxn v0.2s, v0.2d
+; CHECK-FP16-SD-NEXT: fcvtxn v2.2s, v2.2d
+; CHECK-FP16-SD-NEXT: fcvtxn2 v0.4s, v1.2d
+; CHECK-FP16-SD-NEXT: fcvtxn2 v2.4s, v3.2d
+; CHECK-FP16-SD-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-FP16-SD-NEXT: fcvtn2 v0.8h, v2.4s
+; CHECK-FP16-SD-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: d_to_h:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: mov d4, v0.d[1]
+; CHECK-CVT-GI-NEXT: fcvt h0, d0
+; CHECK-CVT-GI-NEXT: mov d5, v1.d[1]
+; CHECK-CVT-GI-NEXT: fcvt h1, d1
+; CHECK-CVT-GI-NEXT: fcvt h4, d4
+; CHECK-CVT-GI-NEXT: mov v0.h[1], v4.h[0]
+; CHECK-CVT-GI-NEXT: fcvt h4, d5
+; CHECK-CVT-GI-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-CVT-GI-NEXT: mov d1, v2.d[1]
+; CHECK-CVT-GI-NEXT: fcvt h2, d2
+; CHECK-CVT-GI-NEXT: mov v0.h[3], v4.h[0]
+; CHECK-CVT-GI-NEXT: fcvt h1, d1
+; CHECK-CVT-GI-NEXT: mov v0.h[4], v2.h[0]
+; CHECK-CVT-GI-NEXT: mov d2, v3.d[1]
+; CHECK-CVT-GI-NEXT: fcvt h3, d3
+; CHECK-CVT-GI-NEXT: mov v0.h[5], v1.h[0]
+; CHECK-CVT-GI-NEXT: fcvt h1, d2
+; CHECK-CVT-GI-NEXT: mov v0.h[6], v3.h[0]
+; CHECK-CVT-GI-NEXT: mov v0.h[7], v1.h[0]
+; CHECK-CVT-GI-NEXT: ret
+;
+; CHECK-FP16-GI-LABEL: d_to_h:
+; CHECK-FP16-GI: // %bb.0:
+; CHECK-FP16-GI-NEXT: mov d4, v0.d[1]
+; CHECK-FP16-GI-NEXT: fcvt h0, d0
+; CHECK-FP16-GI-NEXT: mov d5, v1.d[1]
+; CHECK-FP16-GI-NEXT: fcvt h1, d1
+; CHECK-FP16-GI-NEXT: fcvt h4, d4
+; CHECK-FP16-GI-NEXT: mov v0.h[1], v4.h[0]
+; CHECK-FP16-GI-NEXT: fcvt h4, d5
+; CHECK-FP16-GI-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-FP16-GI-NEXT: mov d1, v2.d[1]
+; CHECK-FP16-GI-NEXT: fcvt h2, d2
+; CHECK-FP16-GI-NEXT: mov v0.h[3], v4.h[0]
+; CHECK-FP16-GI-NEXT: fcvt h1, d1
+; CHECK-FP16-GI-NEXT: mov v0.h[4], v2.h[0]
+; CHECK-FP16-GI-NEXT: mov d2, v3.d[1]
+; CHECK-FP16-GI-NEXT: fcvt h3, d3
+; CHECK-FP16-GI-NEXT: mov v0.h[5], v1.h[0]
+; CHECK-FP16-GI-NEXT: fcvt h1, d2
+; CHECK-FP16-GI-NEXT: mov v0.h[6], v3.h[0]
+; CHECK-FP16-GI-NEXT: mov v0.h[7], v1.h[0]
+; CHECK-FP16-GI-NEXT: ret
%1 = fptrunc <8 x double> %a to <8 x half>
ret <8 x half> %1
}
define <8 x float> @h_to_s(<8 x half> %a) {
-; CHECK-LABEL: h_to_s:
-; CHECK: // %bb.0:
-; CHECK-NEXT: fcvtl2 v1.4s, v0.8h
-; CHECK-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-NEXT: ret
+; CHECK-CVT-SD-LABEL: h_to_s:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: ret
+;
+; CHECK-FP16-SD-LABEL: h_to_s:
+; CHECK-FP16-SD: // %bb.0:
+; CHECK-FP16-SD-NEXT: fcvtl2 v1.4s, v0.8h
+; CHECK-FP16-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-FP16-SD-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: h_to_s:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v0.8h
+; CHECK-CVT-GI-NEXT: mov v0.16b, v2.16b
+; CHECK-CVT-GI-NEXT: ret
+;
+; CHECK-FP16-GI-LABEL: h_to_s:
+; CHECK-FP16-GI: // %bb.0:
+; CHECK-FP16-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-FP16-GI-NEXT: fcvtl2 v1.4s, v0.8h
+; CHECK-FP16-GI-NEXT: mov v0.16b, v2.16b
+; CHECK-FP16-GI-NEXT: ret
%1 = fpext <8 x half> %a to <8 x float>
ret <8 x float> %1
}
define <8 x double> @h_to_d(<8 x half> %a) {
-; CHECK-LABEL: h_to_d:
-; CHECK: // %bb.0:
-; CHECK-NEXT: fcvtl v1.4s, v0.4h
-; CHECK-NEXT: fcvtl2 v2.4s, v0.8h
-; CHECK-NEXT: fcvtl v0.2d, v1.2s
-; CHECK-NEXT: fcvtl2 v3.2d, v2.4s
-; CHECK-NEXT: fcvtl2 v1.2d, v1.4s
-; CHECK-NEXT: fcvtl v2.2d, v2.2s
-; CHECK-NEXT: ret
+; CHECK-CVT-SD-LABEL: h_to_d:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl v0.2d, v1.2s
+; CHECK-CVT-SD-NEXT: fcvtl2 v3.2d, v2.4s
+; CHECK-CVT-SD-NEXT: fcvtl2 v1.2d, v1.4s
+; CHECK-CVT-SD-NEXT: fcvtl v2.2d, v2.2s
+; CHECK-CVT-SD-NEXT: ret
+;
+; CHECK-FP16-SD-LABEL: h_to_d:
+; CHECK-FP16-SD: // %bb.0:
+; CHECK-FP16-SD-NEXT: fcvtl v1.4s, v0.4h
+; CHECK-FP16-SD-NEXT: fcvtl2 v2.4s, v0.8h
+; CHECK-FP16-SD-NEXT: fcvtl v0.2d, v1.2s
+; CHECK-FP16-SD-NEXT: fcvtl2 v3.2d, v2.4s
+; CHECK-FP16-SD-NEXT: fcvtl2 v1.2d, v1.4s
+; CHECK-FP16-SD-NEXT: fcvtl v2.2d, v2.2s
+; CHECK-FP16-SD-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: h_to_d:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: mov h1, v0.h[1]
+; CHECK-CVT-GI-NEXT: mov h2, v0.h[2]
+; CHECK-CVT-GI-NEXT: mov h3, v0.h[3]
+; CHECK-CVT-GI-NEXT: mov h4, v0.h[4]
+; CHECK-CVT-GI-NEXT: mov h5, v0.h[5]
+; CHECK-CVT-GI-NEXT: mov h6, v0.h[6]
+; CHECK-CVT-GI-NEXT: mov h7, v0.h[7]
+; CHECK-CVT-GI-NEXT: fcvt d0, h0
+; CHECK-CVT-GI-NEXT: fcvt d16, h1
+; CHECK-CVT-GI-NEXT: fcvt d1, h2
+; CHECK-CVT-GI-NEXT: fcvt d17, h3
+; CHECK-CVT-GI-NEXT: fcvt d2, h4
+; CHECK-CVT-GI-NEXT: fcvt d4, h5
+; CHECK-CVT-GI-NEXT: fcvt d3, h6
+; CHECK-CVT-GI-NEXT: fcvt d5, h7
+; CHECK-CVT-GI-NEXT: mov v0.d[1], v16.d[0]
+; CHECK-CVT-GI-NEXT: mov v1.d[1], v17.d[0]
+; CHECK-CVT-GI-NEXT: mov v2.d[1], v4.d[0]
+; CHECK-CVT-GI-NEXT: mov v3.d[1], v5.d[0]
+; CHECK-CVT-GI-NEXT: ret
+;
+; CHECK-FP16-GI-LABEL: h_to_d:
+; CHECK-FP16-GI: // %bb.0:
+; CHECK-FP16-GI-NEXT: mov h1, v0.h[1]
+; CHECK-FP16-GI-NEXT: mov h2, v0.h[2]
+; CHECK-FP16-GI-NEXT: mov h3, v0.h[3]
+; CHECK-FP16-GI-NEXT: mov h4, v0.h[4]
+; CHECK-FP16-GI-NEXT: mov h5, v0.h[5]
+; CHECK-FP16-GI-NEXT: mov h6, v0.h[6]
+; CHECK-FP16-GI-NEXT: mov h7, v0.h[7]
+; CHECK-FP16-GI-NEXT: fcvt d0, h0
+; CHECK-FP16-GI-NEXT: fcvt d16, h1
+; CHECK-FP16-GI-NEXT: fcvt d1, h2
+; CHECK-FP16-GI-NEXT: fcvt d17, h3
+; CHECK-FP16-GI-NEXT: fcvt d2, h4
+; CHECK-FP16-GI-NEXT: fcvt d4, h5
+; CHECK-FP16-GI-NEXT: fcvt d3, h6
+; CHECK-FP16-GI-NEXT: fcvt d5, h7
+; CHECK-FP16-GI-NEXT: mov v0.d[1], v16.d[0]
+; CHECK-FP16-GI-NEXT: mov v1.d[1], v17.d[0]
+; CHECK-FP16-GI-NEXT: mov v2.d[1], v4.d[0]
+; CHECK-FP16-GI-NEXT: mov v3.d[1], v5.d[0]
+; CHECK-FP16-GI-NEXT: ret
%1 = fpext <8 x half> %a to <8 x double>
ret <8 x double> %1
}
@@ -183,14 +365,14 @@ define <8 x i16> @bitcast_h_to_i(float, <8 x half> %a) {
}
define <4 x half> @sitofp_v4i8(<4 x i8> %a) #0 {
-; CHECK-CVT-LABEL: sitofp_v4i8:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: shl v0.4h, v0.4h, #8
-; CHECK-CVT-NEXT: sshr v0.4h, v0.4h, #8
-; CHECK-CVT-NEXT: sshll v0.4s, v0.4h, #0
-; CHECK-CVT-NEXT: scvtf v0.4s, v0.4s
-; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: sitofp_v4i8:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-CVT-SD-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-CVT-SD-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-CVT-SD-NEXT: scvtf v0.4s, v0.4s
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: sitofp_v4i8:
; CHECK-FP16: // %bb.0:
@@ -198,76 +380,132 @@ define <4 x half> @sitofp_v4i8(<4 x i8> %a) #0 {
; CHECK-FP16-NEXT: sshr v0.4h, v0.4h, #8
; CHECK-FP16-NEXT: scvtf v0.4h, v0.4h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: sitofp_v4i8:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-CVT-GI-NEXT: shl v0.4s, v0.4s, #24
+; CHECK-CVT-GI-NEXT: sshr v0.4s, v0.4s, #24
+; CHECK-CVT-GI-NEXT: scvtf v0.4s, v0.4s
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
%1 = sitofp <4 x i8> %a to <4 x half>
ret <4 x half> %1
}
define <8 x half> @sitofp_v8i8(<8 x i8> %a) #0 {
-; CHECK-CVT-LABEL: sitofp_v8i8:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-CVT-NEXT: sshll v1.4s, v0.4h, #0
-; CHECK-CVT-NEXT: sshll2 v2.4s, v0.8h, #0
-; CHECK-CVT-NEXT: scvtf v1.4s, v1.4s
-; CHECK-CVT-NEXT: fcvtn v0.4h, v1.4s
-; CHECK-CVT-NEXT: scvtf v1.4s, v2.4s
-; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: sitofp_v8i8:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: sshll v0.8h, v0.8b, #0
+; CHECK-CVT-SD-NEXT: sshll v1.4s, v0.4h, #0
+; CHECK-CVT-SD-NEXT: sshll2 v2.4s, v0.8h, #0
+; CHECK-CVT-SD-NEXT: scvtf v1.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v1.4s
+; CHECK-CVT-SD-NEXT: scvtf v1.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: sitofp_v8i8:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: sshll v0.8h, v0.8b, #0
; CHECK-FP16-NEXT: scvtf v0.8h, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: sitofp_v8i8:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: sshll v0.8h, v0.8b, #0
+; CHECK-CVT-GI-NEXT: sshll v1.4s, v0.4h, #0
+; CHECK-CVT-GI-NEXT: sshll2 v0.4s, v0.8h, #0
+; CHECK-CVT-GI-NEXT: scvtf v1.4s, v1.4s
+; CHECK-CVT-GI-NEXT: scvtf v2.4s, v0.4s
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v1.4s
+; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s
+; CHECK-CVT-GI-NEXT: ret
%1 = sitofp <8 x i8> %a to <8 x half>
ret <8 x half> %1
}
define <16 x half> @sitofp_v16i8(<16 x i8> %a) #0 {
-; CHECK-CVT-LABEL: sitofp_v16i8:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: sshll2 v1.8h, v0.16b, #0
-; CHECK-CVT-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-CVT-NEXT: sshll v2.4s, v1.4h, #0
-; CHECK-CVT-NEXT: sshll v3.4s, v0.4h, #0
-; CHECK-CVT-NEXT: sshll2 v4.4s, v1.8h, #0
-; CHECK-CVT-NEXT: sshll2 v5.4s, v0.8h, #0
-; CHECK-CVT-NEXT: scvtf v2.4s, v2.4s
-; CHECK-CVT-NEXT: scvtf v3.4s, v3.4s
-; CHECK-CVT-NEXT: fcvtn v1.4h, v2.4s
-; CHECK-CVT-NEXT: scvtf v2.4s, v4.4s
-; CHECK-CVT-NEXT: fcvtn v0.4h, v3.4s
-; CHECK-CVT-NEXT: scvtf v3.4s, v5.4s
-; CHECK-CVT-NEXT: fcvtn2 v1.8h, v2.4s
-; CHECK-CVT-NEXT: fcvtn2 v0.8h, v3.4s
-; CHECK-CVT-NEXT: ret
-;
-; CHECK-FP16-LABEL: sitofp_v16i8:
-; CHECK-FP16: // %bb.0:
-; CHECK-FP16-NEXT: sshll2 v1.8h, v0.16b, #0
-; CHECK-FP16-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-FP16-NEXT: scvtf v1.8h, v1.8h
-; CHECK-FP16-NEXT: scvtf v0.8h, v0.8h
-; CHECK-FP16-NEXT: ret
+; CHECK-CVT-SD-LABEL: sitofp_v16i8:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: sshll2 v1.8h, v0.16b, #0
+; CHECK-CVT-SD-NEXT: sshll v0.8h, v0.8b, #0
+; CHECK-CVT-SD-NEXT: sshll v2.4s, v1.4h, #0
+; CHECK-CVT-SD-NEXT: sshll v3.4s, v0.4h, #0
+; CHECK-CVT-SD-NEXT: sshll2 v4.4s, v1.8h, #0
+; CHECK-CVT-SD-NEXT: sshll2 v5.4s, v0.8h, #0
+; CHECK-CVT-SD-NEXT: scvtf v2.4s, v2.4s
+; CHECK-CVT-SD-NEXT: scvtf v3.4s, v3.4s
+; CHECK-CVT-SD-NEXT: fcvtn v1.4h, v2.4s
+; CHECK-CVT-SD-NEXT: scvtf v2.4s, v4.4s
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v3.4s
+; CHECK-CVT-SD-NEXT: scvtf v3.4s, v5.4s
+; CHECK-CVT-SD-NEXT: fcvtn2 v1.8h, v2.4s
+; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v3.4s
+; CHECK-CVT-SD-NEXT: ret
+;
+; CHECK-FP16-SD-LABEL: sitofp_v16i8:
+; CHECK-FP16-SD: // %bb.0:
+; CHECK-FP16-SD-NEXT: sshll2 v1.8h, v0.16b, #0
+; CHECK-FP16-SD-NEXT: sshll v0.8h, v0.8b, #0
+; CHECK-FP16-SD-NEXT: scvtf v1.8h, v1.8h
+; CHECK-FP16-SD-NEXT: scvtf v0.8h, v0.8h
+; CHECK-FP16-SD-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: sitofp_v16i8:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: sshll v1.8h, v0.8b, #0
+; CHECK-CVT-GI-NEXT: sshll2 v0.8h, v0.16b, #0
+; CHECK-CVT-GI-NEXT: sshll v2.4s, v1.4h, #0
+; CHECK-CVT-GI-NEXT: sshll v3.4s, v0.4h, #0
+; CHECK-CVT-GI-NEXT: sshll2 v1.4s, v1.8h, #0
+; CHECK-CVT-GI-NEXT: sshll2 v0.4s, v0.8h, #0
+; CHECK-CVT-GI-NEXT: scvtf v2.4s, v2.4s
+; CHECK-CVT-GI-NEXT: scvtf v3.4s, v3.4s
+; CHECK-CVT-GI-NEXT: scvtf v4.4s, v1.4s
+; CHECK-CVT-GI-NEXT: scvtf v5.4s, v0.4s
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s
+; CHECK-CVT-GI-NEXT: fcvtn v1.4h, v3.4s
+; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v4.4s
+; CHECK-CVT-GI-NEXT: fcvtn2 v1.8h, v5.4s
+; CHECK-CVT-GI-NEXT: ret
+;
+; CHECK-FP16-GI-LABEL: sitofp_v16i8:
+; CHECK-FP16-GI: // %bb.0:
+; CHECK-FP16-GI-NEXT: sshll v1.8h, v0.8b, #0
+; CHECK-FP16-GI-NEXT: sshll2 v2.8h, v0.16b, #0
+; CHECK-FP16-GI-NEXT: scvtf v0.8h, v1.8h
+; CHECK-FP16-GI-NEXT: scvtf v1.8h, v2.8h
+; CHECK-FP16-GI-NEXT: ret
%1 = sitofp <16 x i8> %a to <16 x half>
ret <16 x half> %1
}
define <8 x half> @sitofp_i16(<8 x i16> %a) #0 {
-; CHECK-CVT-LABEL: sitofp_i16:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: sshll v1.4s, v0.4h, #0
-; CHECK-CVT-NEXT: sshll2 v2.4s, v0.8h, #0
-; CHECK-CVT-NEXT: scvtf v1.4s, v1.4s
-; CHECK-CVT-NEXT: fcvtn v0.4h, v1.4s
-; CHECK-CVT-NEXT: scvtf v1.4s, v2.4s
-; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: sitofp_i16:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: sshll v1.4s, v0.4h, #0
+; CHECK-CVT-SD-NEXT: sshll2 v2.4s, v0.8h, #0
+; CHECK-CVT-SD-NEXT: scvtf v1.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v1.4s
+; CHECK-CVT-SD-NEXT: scvtf v1.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: sitofp_i16:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: scvtf v0.8h, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: sitofp_i16:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: sshll v1.4s, v0.4h, #0
+; CHECK-CVT-GI-NEXT: sshll2 v0.4s, v0.8h, #0
+; CHECK-CVT-GI-NEXT: scvtf v1.4s, v1.4s
+; CHECK-CVT-GI-NEXT: scvtf v2.4s, v0.4s
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v1.4s
+; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s
+; CHECK-CVT-GI-NEXT: ret
%1 = sitofp <8 x i16> %a to <8 x half>
ret <8 x half> %1
}
@@ -286,108 +524,213 @@ define <8 x half> @sitofp_i32(<8 x i32> %a) #0 {
define <8 x half> @sitofp_i64(<8 x i64> %a) #0 {
-; CHECK-LABEL: sitofp_i64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: scvtf v0.2d, v0.2d
-; CHECK-NEXT: scvtf v2.2d, v2.2d
-; CHECK-NEXT: scvtf v1.2d, v1.2d
-; CHECK-NEXT: scvtf v3.2d, v3.2d
-; CHECK-NEXT: fcvtn v0.2s, v0.2d
-; CHECK-NEXT: fcvtn v2.2s, v2.2d
-; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
-; CHECK-NEXT: fcvtn2 v2.4s, v3.2d
-; CHECK-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-NEXT: fcvtn2 v0.8h, v2.4s
-; CHECK-NEXT: ret
+; CHECK-CVT-SD-LABEL: sitofp_i64:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: scvtf v0.2d, v0.2d
+; CHECK-CVT-SD-NEXT: scvtf v2.2d, v2.2d
+; CHECK-CVT-SD-NEXT: scvtf v1.2d, v1.2d
+; CHECK-CVT-SD-NEXT: scvtf v3.2d, v3.2d
+; CHECK-CVT-SD-NEXT: fcvtn v0.2s, v0.2d
+; CHECK-CVT-SD-NEXT: fcvtn v2.2s, v2.2d
+; CHECK-CVT-SD-NEXT: fcvtn2 v0.4s, v1.2d
+; CHECK-CVT-SD-NEXT: fcvtn2 v2.4s, v3.2d
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v2.4s
+; CHECK-CVT-SD-NEXT: ret
+;
+; CHECK-FP16-SD-LABEL: sitofp_i64:
+; CHECK-FP16-SD: // %bb.0:
+; CHECK-FP16-SD-NEXT: scvtf v0.2d, v0.2d
+; CHECK-FP16-SD-NEXT: scvtf v2.2d, v2.2d
+; CHECK-FP16-SD-NEXT: scvtf v1.2d, v1.2d
+; CHECK-FP16-SD-NEXT: scvtf v3.2d, v3.2d
+; CHECK-FP16-SD-NEXT: fcvtn v0.2s, v0.2d
+; CHECK-FP16-SD-NEXT: fcvtn v2.2s, v2.2d
+; CHECK-FP16-SD-NEXT: fcvtn2 v0.4s, v1.2d
+; CHECK-FP16-SD-NEXT: fcvtn2 v2.4s, v3.2d
+; CHECK-FP16-SD-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-FP16-SD-NEXT: fcvtn2 v0.8h, v2.4s
+; CHECK-FP16-SD-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: sitofp_i64:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: scvtf v0.2d, v0.2d
+; CHECK-CVT-GI-NEXT: scvtf v1.2d, v1.2d
+; CHECK-CVT-GI-NEXT: scvtf v2.2d, v2.2d
+; CHECK-CVT-GI-NEXT: scvtf v3.2d, v3.2d
+; CHECK-CVT-GI-NEXT: fcvtn v0.2s, v0.2d
+; CHECK-CVT-GI-NEXT: fcvtn v2.2s, v2.2d
+; CHECK-CVT-GI-NEXT: fcvtn2 v0.4s, v1.2d
+; CHECK-CVT-GI-NEXT: fcvtn2 v2.4s, v3.2d
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s
+; CHECK-CVT-GI-NEXT: ret
+;
+; CHECK-FP16-GI-LABEL: sitofp_i64:
+; CHECK-FP16-GI: // %bb.0:
+; CHECK-FP16-GI-NEXT: scvtf v0.2d, v0.2d
+; CHECK-FP16-GI-NEXT: scvtf v1.2d, v1.2d
+; CHECK-FP16-GI-NEXT: scvtf v2.2d, v2.2d
+; CHECK-FP16-GI-NEXT: scvtf v3.2d, v3.2d
+; CHECK-FP16-GI-NEXT: fcvtn v0.2s, v0.2d
+; CHECK-FP16-GI-NEXT: fcvtn v2.2s, v2.2d
+; CHECK-FP16-GI-NEXT: fcvtn2 v0.4s, v1.2d
+; CHECK-FP16-GI-NEXT: fcvtn2 v2.4s, v3.2d
+; CHECK-FP16-GI-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-FP16-GI-NEXT: fcvtn2 v0.8h, v2.4s
+; CHECK-FP16-GI-NEXT: ret
%1 = sitofp <8 x i64> %a to <8 x half>
ret <8 x half> %1
}
define <4 x half> @uitofp_v4i8(<4 x i8> %a) #0 {
-; CHECK-CVT-LABEL: uitofp_v4i8:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: bic v0.4h, #255, lsl #8
-; CHECK-CVT-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-CVT-NEXT: ucvtf v0.4s, v0.4s
-; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-CVT-NEXT: ret
-;
-; CHECK-FP16-LABEL: uitofp_v4i8:
-; CHECK-FP16: // %bb.0:
-; CHECK-FP16-NEXT: bic v0.4h, #255, lsl #8
-; CHECK-FP16-NEXT: ucvtf v0.4h, v0.4h
-; CHECK-FP16-NEXT: ret
+; CHECK-CVT-SD-LABEL: uitofp_v4i8:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: bic v0.4h, #255, lsl #8
+; CHECK-CVT-SD-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-CVT-SD-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: ret
+;
+; CHECK-FP16-SD-LABEL: uitofp_v4i8:
+; CHECK-FP16-SD: // %bb.0:
+; CHECK-FP16-SD-NEXT: bic v0.4h, #255, lsl #8
+; CHECK-FP16-SD-NEXT: ucvtf v0.4h, v0.4h
+; CHECK-FP16-SD-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: uitofp_v4i8:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: movi v1.2d, #0x0000ff000000ff
+; CHECK-CVT-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-CVT-GI-NEXT: and v0.16b, v0.16b, v1.16b
+; CHECK-CVT-GI-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: ret
+;
+; CHECK-FP16-GI-LABEL: uitofp_v4i8:
+; CHECK-FP16-GI: // %bb.0:
+; CHECK-FP16-GI-NEXT: movi d1, #0xff00ff00ff00ff
+; CHECK-FP16-GI-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-FP16-GI-NEXT: ucvtf v0.4h, v0.4h
+; CHECK-FP16-GI-NEXT: ret
%1 = uitofp <4 x i8> %a to <4 x half>
ret <4 x half> %1
}
define <8 x half> @uitofp_v8i8(<8 x i8> %a) #0 {
-; CHECK-CVT-LABEL: uitofp_v8i8:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-CVT-NEXT: ushll v1.4s, v0.4h, #0
-; CHECK-CVT-NEXT: ushll2 v2.4s, v0.8h, #0
-; CHECK-CVT-NEXT: ucvtf v1.4s, v1.4s
-; CHECK-CVT-NEXT: fcvtn v0.4h, v1.4s
-; CHECK-CVT-NEXT: ucvtf v1.4s, v2.4s
-; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: uitofp_v8i8:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-CVT-SD-NEXT: ushll v1.4s, v0.4h, #0
+; CHECK-CVT-SD-NEXT: ushll2 v2.4s, v0.8h, #0
+; CHECK-CVT-SD-NEXT: ucvtf v1.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v1.4s
+; CHECK-CVT-SD-NEXT: ucvtf v1.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: uitofp_v8i8:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: ushll v0.8h, v0.8b, #0
; CHECK-FP16-NEXT: ucvtf v0.8h, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: uitofp_v8i8:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-CVT-GI-NEXT: ushll v1.4s, v0.4h, #0
+; CHECK-CVT-GI-NEXT: ushll2 v0.4s, v0.8h, #0
+; CHECK-CVT-GI-NEXT: ucvtf v1.4s, v1.4s
+; CHECK-CVT-GI-NEXT: ucvtf v2.4s, v0.4s
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v1.4s
+; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s
+; CHECK-CVT-GI-NEXT: ret
%1 = uitofp <8 x i8> %a to <8 x half>
ret <8 x half> %1
}
define <16 x half> @uitofp_v16i8(<16 x i8> %a) #0 {
-; CHECK-CVT-LABEL: uitofp_v16i8:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: ushll2 v1.8h, v0.16b, #0
-; CHECK-CVT-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-CVT-NEXT: ushll v2.4s, v1.4h, #0
-; CHECK-CVT-NEXT: ushll v3.4s, v0.4h, #0
-; CHECK-CVT-NEXT: ushll2 v4.4s, v1.8h, #0
-; CHECK-CVT-NEXT: ushll2 v5.4s, v0.8h, #0
-; CHECK-CVT-NEXT: ucvtf v2.4s, v2.4s
-; CHECK-CVT-NEXT: ucvtf v3.4s, v3.4s
-; CHECK-CVT-NEXT: fcvtn v1.4h, v2.4s
-; CHECK-CVT-NEXT: ucvtf v2.4s, v4.4s
-; CHECK-CVT-NEXT: fcvtn v0.4h, v3.4s
-; CHECK-CVT-NEXT: ucvtf v3.4s, v5.4s
-; CHECK-CVT-NEXT: fcvtn2 v1.8h, v2.4s
-; CHECK-CVT-NEXT: fcvtn2 v0.8h, v3.4s
-; CHECK-CVT-NEXT: ret
-;
-; CHECK-FP16-LABEL: uitofp_v16i8:
-; CHECK-FP16: // %bb.0:
-; CHECK-FP16-NEXT: ushll2 v1.8h, v0.16b, #0
-; CHECK-FP16-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-FP16-NEXT: ucvtf v1.8h, v1.8h
-; CHECK-FP16-NEXT: ucvtf v0.8h, v0.8h
-; CHECK-FP16-NEXT: ret
+; CHECK-CVT-SD-LABEL: uitofp_v16i8:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: ushll2 v1.8h, v0.16b, #0
+; CHECK-CVT-SD-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-CVT-SD-NEXT: ushll v2.4s, v1.4h, #0
+; CHECK-CVT-SD-NEXT: ushll v3.4s, v0.4h, #0
+; CHECK-CVT-SD-NEXT: ushll2 v4.4s, v1.8h, #0
+; CHECK-CVT-SD-NEXT: ushll2 v5.4s, v0.8h, #0
+; CHECK-CVT-SD-NEXT: ucvtf v2.4s, v2.4s
+; CHECK-CVT-SD-NEXT: ucvtf v3.4s, v3.4s
+; CHECK-CVT-SD-NEXT: fcvtn v1.4h, v2.4s
+; CHECK-CVT-SD-NEXT: ucvtf v2.4s, v4.4s
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v3.4s
+; CHECK-CVT-SD-NEXT: ucvtf v3.4s, v5.4s
+; CHECK-CVT-SD-NEXT: fcvtn2 v1.8h, v2.4s
+; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v3.4s
+; CHECK-CVT-SD-NEXT: ret
+;
+; CHECK-FP16-SD-LABEL: uitofp_v16i8:
+; CHECK-FP16-SD: // %bb.0:
+; CHECK-FP16-SD-NEXT: ushll2 v1.8h, v0.16b, #0
+; CHECK-FP16-SD-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-FP16-SD-NEXT: ucvtf v1.8h, v1.8h
+; CHECK-FP16-SD-NEXT: ucvtf v0.8h, v0.8h
+; CHECK-FP16-SD-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: uitofp_v16i8:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: ushll v1.8h, v0.8b, #0
+; CHECK-CVT-GI-NEXT: ushll2 v0.8h, v0.16b, #0
+; CHECK-CVT-GI-NEXT: ushll v2.4s, v1.4h, #0
+; CHECK-CVT-GI-NEXT: ushll v3.4s, v0.4h, #0
+; CHECK-CVT-GI-NEXT: ushll2 v1.4s, v1.8h, #0
+; CHECK-CVT-GI-NEXT: ushll2 v0.4s, v0.8h, #0
+; CHECK-CVT-GI-NEXT: ucvtf v2.4s, v2.4s
+; CHECK-CVT-GI-NEXT: ucvtf v3.4s, v3.4s
+; CHECK-CVT-GI-NEXT: ucvtf v4.4s, v1.4s
+; CHECK-CVT-GI-NEXT: ucvtf v5.4s, v0.4s
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s
+; CHECK-CVT-GI-NEXT: fcvtn v1.4h, v3.4s
+; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v4.4s
+; CHECK-CVT-GI-NEXT: fcvtn2 v1.8h, v5.4s
+; CHECK-CVT-GI-NEXT: ret
+;
+; CHECK-FP16-GI-LABEL: uitofp_v16i8:
+; CHECK-FP16-GI: // %bb.0:
+; CHECK-FP16-GI-NEXT: ushll v1.8h, v0.8b, #0
+; CHECK-FP16-GI-NEXT: ushll2 v2.8h, v0.16b, #0
+; CHECK-FP16-GI-NEXT: ucvtf v0.8h, v1.8h
+; CHECK-FP16-GI-NEXT: ucvtf v1.8h, v2.8h
+; CHECK-FP16-GI-NEXT: ret
%1 = uitofp <16 x i8> %a to <16 x half>
ret <16 x half> %1
}
define <8 x half> @uitofp_i16(<8 x i16> %a) #0 {
-; CHECK-CVT-LABEL: uitofp_i16:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: ushll v1.4s, v0.4h, #0
-; CHECK-CVT-NEXT: ushll2 v2.4s, v0.8h, #0
-; CHECK-CVT-NEXT: ucvtf v1.4s, v1.4s
-; CHECK-CVT-NEXT: fcvtn v0.4h, v1.4s
-; CHECK-CVT-NEXT: ucvtf v1.4s, v2.4s
-; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: uitofp_i16:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: ushll v1.4s, v0.4h, #0
+; CHECK-CVT-SD-NEXT: ushll2 v2.4s, v0.8h, #0
+; CHECK-CVT-SD-NEXT: ucvtf v1.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v1.4s
+; CHECK-CVT-SD-NEXT: ucvtf v1.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: uitofp_i16:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: ucvtf v0.8h, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: uitofp_i16:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: ushll v1.4s, v0.4h, #0
+; CHECK-CVT-GI-NEXT: ushll2 v0.4s, v0.8h, #0
+; CHECK-CVT-GI-NEXT: ucvtf v1.4s, v1.4s
+; CHECK-CVT-GI-NEXT: ucvtf v2.4s, v0.4s
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v1.4s
+; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s
+; CHECK-CVT-GI-NEXT: ret
%1 = uitofp <8 x i16> %a to <8 x half>
ret <8 x half> %1
}
@@ -407,19 +750,61 @@ define <8 x half> @uitofp_i32(<8 x i32> %a) #0 {
define <8 x half> @uitofp_i64(<8 x i64> %a) #0 {
-; CHECK-LABEL: uitofp_i64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ucvtf v0.2d, v0.2d
-; CHECK-NEXT: ucvtf v2.2d, v2.2d
-; CHECK-NEXT: ucvtf v1.2d, v1.2d
-; CHECK-NEXT: ucvtf v3.2d, v3.2d
-; CHECK-NEXT: fcvtn v0.2s, v0.2d
-; CHECK-NEXT: fcvtn v2.2s, v2.2d
-; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
-; CHECK-NEXT: fcvtn2 v2.4s, v3.2d
-; CHECK-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-NEXT: fcvtn2 v0.8h, v2.4s
-; CHECK-NEXT: ret
+; CHECK-CVT-SD-LABEL: uitofp_i64:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: ucvtf v0.2d, v0.2d
+; CHECK-CVT-SD-NEXT: ucvtf v2.2d, v2.2d
+; CHECK-CVT-SD-NEXT: ucvtf v1.2d, v1.2d
+; CHECK-CVT-SD-NEXT: ucvtf v3.2d, v3.2d
+; CHECK-CVT-SD-NEXT: fcvtn v0.2s, v0.2d
+; CHECK-CVT-SD-NEXT: fcvtn v2.2s, v2.2d
+; CHECK-CVT-SD-NEXT: fcvtn2 v0.4s, v1.2d
+; CHECK-CVT-SD-NEXT: fcvtn2 v2.4s, v3.2d
+; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v2.4s
+; CHECK-CVT-SD-NEXT: ret
+;
+; CHECK-FP16-SD-LABEL: uitofp_i64:
+; CHECK-FP16-SD: // %bb.0:
+; CHECK-FP16-SD-NEXT: ucvtf v0.2d, v0.2d
+; CHECK-FP16-SD-NEXT: ucvtf v2.2d, v2.2d
+; CHECK-FP16-SD-NEXT: ucvtf v1.2d, v1.2d
+; CHECK-FP16-SD-NEXT: ucvtf v3.2d, v3.2d
+; CHECK-FP16-SD-NEXT: fcvtn v0.2s, v0.2d
+; CHECK-FP16-SD-NEXT: fcvtn v2.2s, v2.2d
+; CHECK-FP16-SD-NEXT: fcvtn2 v0.4s, v1.2d
+; CHECK-FP16-SD-NEXT: fcvtn2 v2.4s, v3.2d
+; CHECK-FP16-SD-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-FP16-SD-NEXT: fcvtn2 v0.8h, v2.4s
+; CHECK-FP16-SD-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: uitofp_i64:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: ucvtf v0.2d, v0.2d
+; CHECK-CVT-GI-NEXT: ucvtf v1.2d, v1.2d
+; CHECK-CVT-GI-NEXT: ucvtf v2.2d, v2.2d
+; CHECK-CVT-GI-NEXT: ucvtf v3.2d, v3.2d
+; CHECK-CVT-GI-NEXT: fcvtn v0.2s, v0.2d
+; CHECK-CVT-GI-NEXT: fcvtn v2.2s, v2.2d
+; CHECK-CVT-GI-NEXT: fcvtn2 v0.4s, v1.2d
+; CHECK-CVT-GI-NEXT: fcvtn2 v2.4s, v3.2d
+; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s
+; CHECK-CVT-GI-NEXT: ret
+;
+; CHECK-FP16-GI-LABEL: uitofp_i64:
+; CHECK-FP16-GI: // %bb.0:
+; CHECK-FP16-GI-NEXT: ucvtf v0.2d, v0.2d
+; CHECK-FP16-GI-NEXT: ucvtf v1.2d, v1.2d
+; CHECK-FP16-GI-NEXT: ucvtf v2.2d, v2.2d
+; CHECK-FP16-GI-NEXT: ucvtf v3.2d, v3.2d
+; CHECK-FP16-GI-NEXT: fcvtn v0.2s, v0.2d
+; CHECK-FP16-GI-NEXT: fcvtn v2.2s, v2.2d
+; CHECK-FP16-GI-NEXT: fcvtn2 v0.4s, v1.2d
+; CHECK-FP16-GI-NEXT: fcvtn2 v2.4s, v3.2d
+; CHECK-FP16-GI-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-FP16-GI-NEXT: fcvtn2 v0.8h, v2.4s
+; CHECK-FP16-GI-NEXT: ret
%1 = uitofp <8 x i64> %a to <8 x half>
ret <8 x half> %1
}
@@ -436,94 +821,132 @@ define void @test_insert_at_zero(half %a, ptr %b) #0 {
}
define <8 x i8> @fptosi_i8(<8 x half> %a) #0 {
-; CHECK-CVT-LABEL: fptosi_i8:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl2 v1.4s, v0.8h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcvtzs v1.4s, v1.4s
-; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s
-; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h
-; CHECK-CVT-NEXT: xtn v0.8b, v0.8h
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: fptosi_i8:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcvtzs v1.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: fptosi_i8:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: fcvtzs v0.8h, v0.8h
; CHECK-FP16-NEXT: xtn v0.8b, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: fptosi_i8:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtzs v1.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-GI-NEXT: ret
%1 = fptosi<8 x half> %a to <8 x i8>
ret <8 x i8> %1
}
define <8 x i16> @fptosi_i16(<8 x half> %a) #0 {
-; CHECK-CVT-LABEL: fptosi_i16:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl2 v1.4s, v0.8h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcvtzs v1.4s, v1.4s
-; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s
-; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: fptosi_i16:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcvtzs v1.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: fptosi_i16:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: fcvtzs v0.8h, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: fptosi_i16:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtzs v1.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-CVT-GI-NEXT: ret
%1 = fptosi<8 x half> %a to <8 x i16>
ret <8 x i16> %1
}
define <8 x i8> @fptoui_i8(<8 x half> %a) #0 {
-; CHECK-CVT-LABEL: fptoui_i8:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl2 v1.4s, v0.8h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcvtzu v1.4s, v1.4s
-; CHECK-CVT-NEXT: fcvtzu v0.4s, v0.4s
-; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h
-; CHECK-CVT-NEXT: xtn v0.8b, v0.8h
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: fptoui_i8:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcvtzu v1.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcvtzu v0.4s, v0.4s
+; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: fptoui_i8:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: fcvtzu v0.8h, v0.8h
; CHECK-FP16-NEXT: xtn v0.8b, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: fptoui_i8:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtzu v1.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcvtzu v0.4s, v0.4s
+; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-GI-NEXT: ret
%1 = fptoui<8 x half> %a to <8 x i8>
ret <8 x i8> %1
}
define <8 x i16> @fptoui_i16(<8 x half> %a) #0 {
-; CHECK-CVT-LABEL: fptoui_i16:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl2 v1.4s, v0.8h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcvtzu v1.4s, v1.4s
-; CHECK-CVT-NEXT: fcvtzu v0.4s, v0.4s
-; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: fptoui_i16:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcvtzu v1.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcvtzu v0.4s, v0.4s
+; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: fptoui_i16:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: fcvtzu v0.8h, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: fptoui_i16:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtzu v1.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcvtzu v0.4s, v0.4s
+; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-CVT-GI-NEXT: ret
%1 = fptoui<8 x half> %a to <8 x i16>
ret <8 x i16> %1
}
define <8 x i1> @test_fcmp_une(<8 x half> %a, <8 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_une:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h
-; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmeq v2.4s, v3.4s, v2.4s
-; CHECK-CVT-NEXT: fcmeq v0.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h
-; CHECK-CVT-NEXT: mvn v0.16b, v0.16b
-; CHECK-CVT-NEXT: xtn v0.8b, v0.8h
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_une:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h
+; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmeq v2.4s, v3.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fcmeq v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_une:
; CHECK-FP16: // %bb.0:
@@ -531,27 +954,41 @@ define <8 x i1> @test_fcmp_une(<8 x half> %a, <8 x half> %b) #0 {
; CHECK-FP16-NEXT: mvn v0.16b, v0.16b
; CHECK-FP16-NEXT: xtn v0.8b, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_une:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-GI-NEXT: fcmeq v2.4s, v2.4s, v3.4s
+; CHECK-CVT-GI-NEXT: fcmeq v0.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b
+; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp une <8 x half> %a, %b
ret <8 x i1> %1
}
define <8 x i1> @test_fcmp_ueq(<8 x half> %a, <8 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_ueq:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h
-; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmgt v4.4s, v3.4s, v2.4s
-; CHECK-CVT-NEXT: fcmgt v2.4s, v2.4s, v3.4s
-; CHECK-CVT-NEXT: fcmgt v3.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s
-; CHECK-CVT-NEXT: orr v1.16b, v2.16b, v4.16b
-; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v3.16b
-; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h
-; CHECK-CVT-NEXT: mvn v0.16b, v0.16b
-; CHECK-CVT-NEXT: xtn v0.8b, v0.8h
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_ueq:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h
+; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmgt v4.4s, v3.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v2.4s, v3.4s
+; CHECK-CVT-SD-NEXT: fcmgt v3.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-SD-NEXT: orr v1.16b, v2.16b, v4.16b
+; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v3.16b
+; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_ueq:
; CHECK-FP16: // %bb.0:
@@ -561,23 +998,41 @@ define <8 x i1> @test_fcmp_ueq(<8 x half> %a, <8 x half> %b) #0 {
; CHECK-FP16-NEXT: mvn v0.16b, v0.16b
; CHECK-FP16-NEXT: xtn v0.8b, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_ueq:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-GI-NEXT: fcmgt v4.4s, v2.4s, v3.4s
+; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s
+; CHECK-CVT-GI-NEXT: fcmgt v3.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-GI-NEXT: orr v1.16b, v2.16b, v4.16b
+; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v3.16b
+; CHECK-CVT-GI-NEXT: mvn v1.16b, v1.16b
+; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp ueq <8 x half> %a, %b
ret <8 x i1> %1
}
define <8 x i1> @test_fcmp_ugt(<8 x half> %a, <8 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_ugt:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h
-; CHECK-CVT-NEXT: fcvtl2 v3.4s, v1.8h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcmge v2.4s, v3.4s, v2.4s
-; CHECK-CVT-NEXT: fcmge v0.4s, v1.4s, v0.4s
-; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h
-; CHECK-CVT-NEXT: mvn v0.16b, v0.16b
-; CHECK-CVT-NEXT: xtn v0.8b, v0.8h
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_ugt:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v1.8h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcmge v2.4s, v3.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fcmge v0.4s, v1.4s, v0.4s
+; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_ugt:
; CHECK-FP16: // %bb.0:
@@ -585,23 +1040,37 @@ define <8 x i1> @test_fcmp_ugt(<8 x half> %a, <8 x half> %b) #0 {
; CHECK-FP16-NEXT: mvn v0.16b, v0.16b
; CHECK-FP16-NEXT: xtn v0.8b, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_ugt:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-GI-NEXT: fcmge v2.4s, v3.4s, v2.4s
+; CHECK-CVT-GI-NEXT: fcmge v0.4s, v1.4s, v0.4s
+; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b
+; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp ugt <8 x half> %a, %b
ret <8 x i1> %1
}
define <8 x i1> @test_fcmp_uge(<8 x half> %a, <8 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_uge:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h
-; CHECK-CVT-NEXT: fcvtl2 v3.4s, v1.8h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcmgt v2.4s, v3.4s, v2.4s
-; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s
-; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h
-; CHECK-CVT-NEXT: mvn v0.16b, v0.16b
-; CHECK-CVT-NEXT: xtn v0.8b, v0.8h
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_uge:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v1.8h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v3.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_uge:
; CHECK-FP16: // %bb.0:
@@ -609,23 +1078,37 @@ define <8 x i1> @test_fcmp_uge(<8 x half> %a, <8 x half> %b) #0 {
; CHECK-FP16-NEXT: mvn v0.16b, v0.16b
; CHECK-FP16-NEXT: xtn v0.8b, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_uge:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s
+; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b
+; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp uge <8 x half> %a, %b
ret <8 x i1> %1
}
define <8 x i1> @test_fcmp_ult(<8 x half> %a, <8 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_ult:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h
-; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmge v2.4s, v3.4s, v2.4s
-; CHECK-CVT-NEXT: fcmge v0.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h
-; CHECK-CVT-NEXT: mvn v0.16b, v0.16b
-; CHECK-CVT-NEXT: xtn v0.8b, v0.8h
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_ult:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h
+; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmge v2.4s, v3.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fcmge v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_ult:
; CHECK-FP16: // %bb.0:
@@ -633,23 +1116,37 @@ define <8 x i1> @test_fcmp_ult(<8 x half> %a, <8 x half> %b) #0 {
; CHECK-FP16-NEXT: mvn v0.16b, v0.16b
; CHECK-FP16-NEXT: xtn v0.8b, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_ult:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-GI-NEXT: fcmge v2.4s, v2.4s, v3.4s
+; CHECK-CVT-GI-NEXT: fcmge v0.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b
+; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp ult <8 x half> %a, %b
ret <8 x i1> %1
}
define <8 x i1> @test_fcmp_ule(<8 x half> %a, <8 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_ule:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h
-; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmgt v2.4s, v3.4s, v2.4s
-; CHECK-CVT-NEXT: fcmgt v0.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h
-; CHECK-CVT-NEXT: mvn v0.16b, v0.16b
-; CHECK-CVT-NEXT: xtn v0.8b, v0.8h
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_ule:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h
+; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v3.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_ule:
; CHECK-FP16: // %bb.0:
@@ -657,27 +1154,41 @@ define <8 x i1> @test_fcmp_ule(<8 x half> %a, <8 x half> %b) #0 {
; CHECK-FP16-NEXT: mvn v0.16b, v0.16b
; CHECK-FP16-NEXT: xtn v0.8b, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_ule:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v2.4s, v3.4s
+; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b
+; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp ule <8 x half> %a, %b
ret <8 x i1> %1
}
define <8 x i1> @test_fcmp_uno(<8 x half> %a, <8 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_uno:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h
-; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmge v4.4s, v3.4s, v2.4s
-; CHECK-CVT-NEXT: fcmgt v2.4s, v2.4s, v3.4s
-; CHECK-CVT-NEXT: fcmge v3.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s
-; CHECK-CVT-NEXT: orr v1.16b, v2.16b, v4.16b
-; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v3.16b
-; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h
-; CHECK-CVT-NEXT: mvn v0.16b, v0.16b
-; CHECK-CVT-NEXT: xtn v0.8b, v0.8h
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_uno:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h
+; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmge v4.4s, v3.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v2.4s, v3.4s
+; CHECK-CVT-SD-NEXT: fcmge v3.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-SD-NEXT: orr v1.16b, v2.16b, v4.16b
+; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v3.16b
+; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_uno:
; CHECK-FP16: // %bb.0:
@@ -687,26 +1198,44 @@ define <8 x i1> @test_fcmp_uno(<8 x half> %a, <8 x half> %b) #0 {
; CHECK-FP16-NEXT: mvn v0.16b, v0.16b
; CHECK-FP16-NEXT: xtn v0.8b, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_uno:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-GI-NEXT: fcmge v4.4s, v2.4s, v3.4s
+; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s
+; CHECK-CVT-GI-NEXT: fcmge v3.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-GI-NEXT: orr v1.16b, v2.16b, v4.16b
+; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v3.16b
+; CHECK-CVT-GI-NEXT: mvn v1.16b, v1.16b
+; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b
+; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp uno <8 x half> %a, %b
ret <8 x i1> %1
}
define <8 x i1> @test_fcmp_one(<8 x half> %a, <8 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_one:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h
-; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmgt v4.4s, v3.4s, v2.4s
-; CHECK-CVT-NEXT: fcmgt v2.4s, v2.4s, v3.4s
-; CHECK-CVT-NEXT: fcmgt v3.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s
-; CHECK-CVT-NEXT: orr v1.16b, v2.16b, v4.16b
-; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v3.16b
-; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h
-; CHECK-CVT-NEXT: xtn v0.8b, v0.8h
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_one:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h
+; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmgt v4.4s, v3.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v2.4s, v3.4s
+; CHECK-CVT-SD-NEXT: fcmgt v3.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-SD-NEXT: orr v1.16b, v2.16b, v4.16b
+; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v3.16b
+; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_one:
; CHECK-FP16: // %bb.0:
@@ -715,136 +1244,212 @@ define <8 x i1> @test_fcmp_one(<8 x half> %a, <8 x half> %b) #0 {
; CHECK-FP16-NEXT: orr v0.16b, v0.16b, v2.16b
; CHECK-FP16-NEXT: xtn v0.8b, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_one:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-GI-NEXT: fcmgt v4.4s, v2.4s, v3.4s
+; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s
+; CHECK-CVT-GI-NEXT: fcmgt v3.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-GI-NEXT: orr v1.16b, v2.16b, v4.16b
+; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v3.16b
+; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp one <8 x half> %a, %b
ret <8 x i1> %1
}
define <8 x i1> @test_fcmp_oeq(<8 x half> %a, <8 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_oeq:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h
-; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmeq v2.4s, v3.4s, v2.4s
-; CHECK-CVT-NEXT: fcmeq v0.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h
-; CHECK-CVT-NEXT: xtn v0.8b, v0.8h
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_oeq:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h
+; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmeq v2.4s, v3.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fcmeq v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_oeq:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: fcmeq v0.8h, v0.8h, v1.8h
; CHECK-FP16-NEXT: xtn v0.8b, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_oeq:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-GI-NEXT: fcmeq v2.4s, v2.4s, v3.4s
+; CHECK-CVT-GI-NEXT: fcmeq v0.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h
+; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp oeq <8 x half> %a, %b
ret <8 x i1> %1
}
define <8 x i1> @test_fcmp_ogt(<8 x half> %a, <8 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_ogt:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h
-; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmgt v2.4s, v3.4s, v2.4s
-; CHECK-CVT-NEXT: fcmgt v0.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h
-; CHECK-CVT-NEXT: xtn v0.8b, v0.8h
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_ogt:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h
+; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v3.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_ogt:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: fcmgt v0.8h, v0.8h, v1.8h
; CHECK-FP16-NEXT: xtn v0.8b, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_ogt:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v2.4s, v3.4s
+; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h
+; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp ogt <8 x half> %a, %b
ret <8 x i1> %1
}
define <8 x i1> @test_fcmp_oge(<8 x half> %a, <8 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_oge:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h
-; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmge v2.4s, v3.4s, v2.4s
-; CHECK-CVT-NEXT: fcmge v0.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h
-; CHECK-CVT-NEXT: xtn v0.8b, v0.8h
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_oge:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h
+; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmge v2.4s, v3.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fcmge v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_oge:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: fcmge v0.8h, v0.8h, v1.8h
; CHECK-FP16-NEXT: xtn v0.8b, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_oge:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-GI-NEXT: fcmge v2.4s, v2.4s, v3.4s
+; CHECK-CVT-GI-NEXT: fcmge v0.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h
+; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp oge <8 x half> %a, %b
ret <8 x i1> %1
}
define <8 x i1> @test_fcmp_olt(<8 x half> %a, <8 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_olt:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h
-; CHECK-CVT-NEXT: fcvtl2 v3.4s, v1.8h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcmgt v2.4s, v3.4s, v2.4s
-; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s
-; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h
-; CHECK-CVT-NEXT: xtn v0.8b, v0.8h
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_olt:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v1.8h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v3.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_olt:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: fcmgt v0.8h, v1.8h, v0.8h
; CHECK-FP16-NEXT: xtn v0.8b, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_olt:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s
+; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h
+; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp olt <8 x half> %a, %b
ret <8 x i1> %1
}
define <8 x i1> @test_fcmp_ole(<8 x half> %a, <8 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_ole:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h
-; CHECK-CVT-NEXT: fcvtl2 v3.4s, v1.8h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcmge v2.4s, v3.4s, v2.4s
-; CHECK-CVT-NEXT: fcmge v0.4s, v1.4s, v0.4s
-; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h
-; CHECK-CVT-NEXT: xtn v0.8b, v0.8h
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_ole:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v1.8h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcmge v2.4s, v3.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fcmge v0.4s, v1.4s, v0.4s
+; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_ole:
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: fcmge v0.8h, v1.8h, v0.8h
; CHECK-FP16-NEXT: xtn v0.8b, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_ole:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-GI-NEXT: fcmge v2.4s, v3.4s, v2.4s
+; CHECK-CVT-GI-NEXT: fcmge v0.4s, v1.4s, v0.4s
+; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h
+; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp ole <8 x half> %a, %b
ret <8 x i1> %1
}
define <8 x i1> @test_fcmp_ord(<8 x half> %a, <8 x half> %b) #0 {
-; CHECK-CVT-LABEL: test_fcmp_ord:
-; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h
-; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h
-; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h
-; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-CVT-NEXT: fcmge v4.4s, v3.4s, v2.4s
-; CHECK-CVT-NEXT: fcmgt v2.4s, v2.4s, v3.4s
-; CHECK-CVT-NEXT: fcmge v3.4s, v0.4s, v1.4s
-; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s
-; CHECK-CVT-NEXT: orr v1.16b, v2.16b, v4.16b
-; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v3.16b
-; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h
-; CHECK-CVT-NEXT: xtn v0.8b, v0.8h
-; CHECK-CVT-NEXT: ret
+; CHECK-CVT-SD-LABEL: test_fcmp_ord:
+; CHECK-CVT-SD: // %bb.0:
+; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h
+; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h
+; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: fcmge v4.4s, v3.4s, v2.4s
+; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v2.4s, v3.4s
+; CHECK-CVT-SD-NEXT: fcmge v3.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-SD-NEXT: orr v1.16b, v2.16b, v4.16b
+; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v3.16b
+; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-SD-NEXT: ret
;
; CHECK-FP16-LABEL: test_fcmp_ord:
; CHECK-FP16: // %bb.0:
@@ -853,8 +1458,27 @@ define <8 x i1> @test_fcmp_ord(<8 x half> %a, <8 x half> %b) #0 {
; CHECK-FP16-NEXT: orr v0.16b, v0.16b, v2.16b
; CHECK-FP16-NEXT: xtn v0.8b, v0.8h
; CHECK-FP16-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: test_fcmp_ord:
+; CHECK-CVT-GI: // %bb.0:
+; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h
+; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h
+; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h
+; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h
+; CHECK-CVT-GI-NEXT: fcmge v4.4s, v2.4s, v3.4s
+; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s
+; CHECK-CVT-GI-NEXT: fcmge v3.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-CVT-GI-NEXT: orr v1.16b, v2.16b, v4.16b
+; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v3.16b
+; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-CVT-GI-NEXT: ret
%1 = fcmp ord <8 x half> %a, %b
ret <8 x i1> %1
}
attributes #0 = { nounwind }
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-CVT: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/icmp.ll b/llvm/test/CodeGen/AArch64/icmp.ll
index 18665bc..7195e2b 100644
--- a/llvm/test/CodeGen/AArch64/icmp.ll
+++ b/llvm/test/CodeGen/AArch64/icmp.ll
@@ -2093,3 +2093,54 @@ define <2 x i1> @icmp_slt_v2i64_Zero_LHS(<2 x i64> %a) {
%c = icmp slt <2 x i64> <i64 0, i64 0>, %a
ret <2 x i1> %c
}
+
+; Test TST optimization for i8 sign bit testing with cross-type select
+; This tests the pattern: icmp slt i8 %val, 0; select i1 %cmp, i32 %a, i32 %b
+; The optimization should convert sxtb+cmp to tst for sign bit testing.
+
+define i32 @i8_signbit_tst_constants(i8 %x, i8 %y) {
+; CHECK-SD-LABEL: i8_signbit_tst_constants:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add w9, w0, w1
+; CHECK-SD-NEXT: mov w8, #42 // =0x2a
+; CHECK-SD-NEXT: tst w9, #0x80
+; CHECK-SD-NEXT: mov w9, #20894 // =0x519e
+; CHECK-SD-NEXT: csel w0, w9, w8, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: i8_signbit_tst_constants:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add w8, w0, w1
+; CHECK-GI-NEXT: mov w9, #42 // =0x2a
+; CHECK-GI-NEXT: mov w10, #20894 // =0x519e
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: cmp w8, #0
+; CHECK-GI-NEXT: csel w0, w10, w9, mi
+; CHECK-GI-NEXT: ret
+ %add = add i8 %x, %y
+ %cmp = icmp slt i8 %add, 0
+ %sel = select i1 %cmp, i32 20894, i32 42
+ ret i32 %sel
+}
+
+; Test i8 sign bit testing with variable select values (problematic case)
+define i32 @i8_signbit_variables(i8 %x, i8 %y, i32 %a, i32 %b) {
+; CHECK-SD-LABEL: i8_signbit_variables:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add w8, w0, w1
+; CHECK-SD-NEXT: tst w8, #0x80
+; CHECK-SD-NEXT: csel w0, w2, w3, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: i8_signbit_variables:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add w8, w0, w1
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: cmp w8, #0
+; CHECK-GI-NEXT: csel w0, w2, w3, mi
+; CHECK-GI-NEXT: ret
+ %add = add i8 %x, %y
+ %cmp = icmp slt i8 %add, 0
+ %sel = select i1 %cmp, i32 %a, i32 %b
+ ret i32 %sel
+}
diff --git a/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll b/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll
index fc43c71..b6dee97e 100644
--- a/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll
+++ b/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme -aarch64-new-sme-abi -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -aarch64-new-sme-abi -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-SDAG
; A simple EH test case that corresponds to the following C++ source:
;
@@ -87,6 +88,90 @@ define void @za_with_raii(i1 %fail) "aarch64_inout_za" personality ptr @__gxx_pe
; CHECK-NEXT: mov x0, x19
; CHECK-NEXT: msr TPIDR2_EL0, x8
; CHECK-NEXT: bl _Unwind_Resume
+;
+; CHECK-SDAG-LABEL: za_with_raii:
+; CHECK-SDAG: .Lfunc_begin0:
+; CHECK-SDAG-NEXT: .cfi_startproc
+; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception0
+; CHECK-SDAG-NEXT: // %bb.0:
+; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-SDAG-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-SDAG-NEXT: mov x29, sp
+; CHECK-SDAG-NEXT: sub sp, sp, #16
+; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32
+; CHECK-SDAG-NEXT: .cfi_offset w19, -8
+; CHECK-SDAG-NEXT: .cfi_offset w20, -16
+; CHECK-SDAG-NEXT: .cfi_offset w30, -24
+; CHECK-SDAG-NEXT: .cfi_offset w29, -32
+; CHECK-SDAG-NEXT: rdsvl x8, #1
+; CHECK-SDAG-NEXT: mov x9, sp
+; CHECK-SDAG-NEXT: msub x9, x8, x8, x9
+; CHECK-SDAG-NEXT: mov sp, x9
+; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16]
+; CHECK-SDAG-NEXT: tbnz w0, #0, .LBB0_2
+; CHECK-SDAG-NEXT: // %bb.1: // %return_normally
+; CHECK-SDAG-NEXT: mov sp, x29
+; CHECK-SDAG-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-SDAG-NEXT: b shared_za_call
+; CHECK-SDAG-NEXT: .LBB0_2: // %throw_exception
+; CHECK-SDAG-NEXT: sub x20, x29, #16
+; CHECK-SDAG-NEXT: mov w0, #8 // =0x8
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x20
+; CHECK-SDAG-NEXT: bl __cxa_allocate_exception
+; CHECK-SDAG-NEXT: mov x8, x0
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x9, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x9, .LBB0_4
+; CHECK-SDAG-NEXT: // %bb.3: // %throw_exception
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB0_4: // %throw_exception
+; CHECK-SDAG-NEXT: adrp x9, .L.str
+; CHECK-SDAG-NEXT: add x9, x9, :lo12:.L.str
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: str x9, [x8]
+; CHECK-SDAG-NEXT: .Ltmp0: // EH_LABEL
+; CHECK-SDAG-NEXT: adrp x1, :got:typeinfo_for_char_const_ptr
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x20
+; CHECK-SDAG-NEXT: mov x0, x8
+; CHECK-SDAG-NEXT: ldr x1, [x1, :got_lo12:typeinfo_for_char_const_ptr]
+; CHECK-SDAG-NEXT: mov x2, xzr
+; CHECK-SDAG-NEXT: bl __cxa_throw
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB0_6
+; CHECK-SDAG-NEXT: // %bb.5: // %throw_exception
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB0_6: // %throw_exception
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: .Ltmp1: // EH_LABEL
+; CHECK-SDAG-NEXT: // %bb.7: // %throw_fail
+; CHECK-SDAG-NEXT: .LBB0_8: // %unwind_dtors
+; CHECK-SDAG-NEXT: .Ltmp2: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x19, x0
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB0_10
+; CHECK-SDAG-NEXT: // %bb.9: // %unwind_dtors
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB0_10: // %unwind_dtors
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: bl shared_za_call
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x20
+; CHECK-SDAG-NEXT: bl _Unwind_Resume
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB0_12
+; CHECK-SDAG-NEXT: // %bb.11: // %unwind_dtors
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB0_12: // %unwind_dtors
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
br i1 %fail, label %throw_exception, label %return_normally
throw_exception:
@@ -124,7 +209,7 @@ throw_fail:
; }
; shared_za_call();
; }
-define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_personality_v0 {
+define void @try_catch() "aarch64_inout_za" personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: try_catch:
; CHECK: .Lfunc_begin1:
; CHECK-NEXT: .cfi_startproc
@@ -142,11 +227,11 @@ define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_per
; CHECK-NEXT: msub x9, x8, x8, x9
; CHECK-NEXT: mov sp, x9
; CHECK-NEXT: stp x9, x8, [x29, #-16]
-; CHECK-NEXT: .Ltmp3:
+; CHECK-NEXT: .Ltmp3: // EH_LABEL
; CHECK-NEXT: sub x8, x29, #16
; CHECK-NEXT: msr TPIDR2_EL0, x8
; CHECK-NEXT: bl may_throw
-; CHECK-NEXT: .Ltmp4:
+; CHECK-NEXT: .Ltmp4: // EH_LABEL
; CHECK-NEXT: .LBB1_1: // %after_catch
; CHECK-NEXT: smstart za
; CHECK-NEXT: mrs x8, TPIDR2_EL0
@@ -160,7 +245,7 @@ define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_per
; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
; CHECK-NEXT: b shared_za_call
; CHECK-NEXT: .LBB1_4: // %catch
-; CHECK-NEXT: .Ltmp5:
+; CHECK-NEXT: .Ltmp5: // EH_LABEL
; CHECK-NEXT: bl __cxa_begin_catch
; CHECK-NEXT: smstart za
; CHECK-NEXT: mrs x8, TPIDR2_EL0
@@ -175,6 +260,78 @@ define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_per
; CHECK-NEXT: msr TPIDR2_EL0, x8
; CHECK-NEXT: bl __cxa_end_catch
; CHECK-NEXT: b .LBB1_1
+;
+; CHECK-SDAG-LABEL: try_catch:
+; CHECK-SDAG: .Lfunc_begin1:
+; CHECK-SDAG-NEXT: .cfi_startproc
+; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception1
+; CHECK-SDAG-NEXT: // %bb.0:
+; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill
+; CHECK-SDAG-NEXT: mov x29, sp
+; CHECK-SDAG-NEXT: sub sp, sp, #16
+; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32
+; CHECK-SDAG-NEXT: .cfi_offset w19, -16
+; CHECK-SDAG-NEXT: .cfi_offset w30, -24
+; CHECK-SDAG-NEXT: .cfi_offset w29, -32
+; CHECK-SDAG-NEXT: rdsvl x8, #1
+; CHECK-SDAG-NEXT: mov x9, sp
+; CHECK-SDAG-NEXT: msub x9, x8, x8, x9
+; CHECK-SDAG-NEXT: mov sp, x9
+; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16]
+; CHECK-SDAG-NEXT: .Ltmp3: // EH_LABEL
+; CHECK-SDAG-NEXT: sub x19, x29, #16
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19
+; CHECK-SDAG-NEXT: bl may_throw
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB1_2
+; CHECK-SDAG-NEXT: // %bb.1:
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB1_2:
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: .Ltmp4: // EH_LABEL
+; CHECK-SDAG-NEXT: .LBB1_3: // %after_catch
+; CHECK-SDAG-NEXT: mov sp, x29
+; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload
+; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-SDAG-NEXT: b shared_za_call
+; CHECK-SDAG-NEXT: .LBB1_4: // %catch
+; CHECK-SDAG-NEXT: .Ltmp5: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x1, x0
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB1_6
+; CHECK-SDAG-NEXT: // %bb.5: // %catch
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB1_6: // %catch
+; CHECK-SDAG-NEXT: mov x0, x1
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19
+; CHECK-SDAG-NEXT: bl __cxa_begin_catch
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB1_8
+; CHECK-SDAG-NEXT: // %bb.7: // %catch
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB1_8: // %catch
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: bl shared_za_call
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19
+; CHECK-SDAG-NEXT: bl __cxa_end_catch
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB1_10
+; CHECK-SDAG-NEXT: // %bb.9: // %catch
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB1_10: // %catch
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: b .LBB1_3
invoke void @may_throw()
to label %after_catch unwind label %catch
@@ -235,16 +392,16 @@ define void @try_catch_shared_za_callee() "aarch64_new_za" personality ptr @__gx
; CHECK-NEXT: zero {za}
; CHECK-NEXT: .LBB2_2:
; CHECK-NEXT: smstart za
-; CHECK-NEXT: .Ltmp6:
+; CHECK-NEXT: .Ltmp6: // EH_LABEL
; CHECK-NEXT: bl shared_za_call
-; CHECK-NEXT: .Ltmp7:
+; CHECK-NEXT: .Ltmp7: // EH_LABEL
; CHECK-NEXT: .LBB2_3: // %exit
; CHECK-NEXT: smstop za
; CHECK-NEXT: mov sp, x29
; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB2_4: // %catch
-; CHECK-NEXT: .Ltmp8:
+; CHECK-NEXT: .Ltmp8: // EH_LABEL
; CHECK-NEXT: bl __cxa_begin_catch
; CHECK-NEXT: smstart za
; CHECK-NEXT: mrs x8, TPIDR2_EL0
@@ -260,6 +417,78 @@ define void @try_catch_shared_za_callee() "aarch64_new_za" personality ptr @__gx
; CHECK-NEXT: bl __cxa_end_catch
; CHECK-NEXT: msr TPIDR2_EL0, xzr
; CHECK-NEXT: b .LBB2_3
+;
+; CHECK-SDAG-LABEL: try_catch_shared_za_callee:
+; CHECK-SDAG: .Lfunc_begin2:
+; CHECK-SDAG-NEXT: .cfi_startproc
+; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception2
+; CHECK-SDAG-NEXT: // %bb.0: // %prelude
+; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill
+; CHECK-SDAG-NEXT: mov x29, sp
+; CHECK-SDAG-NEXT: sub sp, sp, #16
+; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32
+; CHECK-SDAG-NEXT: .cfi_offset w19, -16
+; CHECK-SDAG-NEXT: .cfi_offset w30, -24
+; CHECK-SDAG-NEXT: .cfi_offset w29, -32
+; CHECK-SDAG-NEXT: rdsvl x8, #1
+; CHECK-SDAG-NEXT: mov x9, sp
+; CHECK-SDAG-NEXT: msub x9, x8, x8, x9
+; CHECK-SDAG-NEXT: mov sp, x9
+; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16]
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: cbz x8, .LBB2_2
+; CHECK-SDAG-NEXT: // %bb.1: // %save.za
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_save
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: .LBB2_2:
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: zero {za}
+; CHECK-SDAG-NEXT: .Ltmp6: // EH_LABEL
+; CHECK-SDAG-NEXT: bl shared_za_call
+; CHECK-SDAG-NEXT: .Ltmp7: // EH_LABEL
+; CHECK-SDAG-NEXT: .LBB2_3: // %exit
+; CHECK-SDAG-NEXT: smstop za
+; CHECK-SDAG-NEXT: mov sp, x29
+; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload
+; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-SDAG-NEXT: ret
+; CHECK-SDAG-NEXT: .LBB2_4: // %catch
+; CHECK-SDAG-NEXT: .Ltmp8: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x1, x0
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: sub x19, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB2_6
+; CHECK-SDAG-NEXT: // %bb.5: // %catch
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB2_6: // %catch
+; CHECK-SDAG-NEXT: mov x0, x1
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19
+; CHECK-SDAG-NEXT: bl __cxa_begin_catch
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB2_8
+; CHECK-SDAG-NEXT: // %bb.7: // %catch
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB2_8: // %catch
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: bl noexcept_shared_za_call
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19
+; CHECK-SDAG-NEXT: bl __cxa_end_catch
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB2_10
+; CHECK-SDAG-NEXT: // %bb.9: // %catch
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB2_10: // %catch
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: b .LBB2_3
invoke void @shared_za_call() #4
to label %exit unwind label %catch
catch:
@@ -275,6 +504,234 @@ exit:
ret void
}
+; A simple ZT0 exception example that corresponds to:
+;
+; struct ZT0Resource {
+; ~ZT0Resource() __arm_inout("zt0") {
+; shared_zt0_call(); // simulate cleanup in destructor
+; }
+; };
+;
+; void za_with_raii() __arm_inout("zt0") {
+; ZT0Resource r;
+; may_throw();
+; }
+;
+; This code may require reloading ZT0 in the cleanup for ~ZT0Resource().
+;
+; FIXME: Codegen with `-aarch64-new-sme-abi` is broken with ZT0 (as it is not implemented).
+define void @try_catch_shared_zt0_callee() "aarch64_inout_zt0" personality ptr @__gxx_personality_v0 {
+; CHECK-LABEL: try_catch_shared_zt0_callee:
+; CHECK: .Lfunc_begin3:
+; CHECK-NEXT: .cfi_startproc
+; CHECK-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-NEXT: .cfi_lsda 28, .Lexception3
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: mov x29, sp
+; CHECK-NEXT: sub sp, sp, #80
+; CHECK-NEXT: .cfi_def_cfa w29, 32
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w30, -24
+; CHECK-NEXT: .cfi_offset w29, -32
+; CHECK-NEXT: rdsvl x8, #1
+; CHECK-NEXT: mov x9, sp
+; CHECK-NEXT: msub x9, x8, x8, x9
+; CHECK-NEXT: mov sp, x9
+; CHECK-NEXT: stp x9, x8, [x29, #-80]
+; CHECK-NEXT: .Ltmp9: // EH_LABEL
+; CHECK-NEXT: sub x19, x29, #64
+; CHECK-NEXT: str zt0, [x19]
+; CHECK-NEXT: smstop za
+; CHECK-NEXT: bl may_throw
+; CHECK-NEXT: smstart za
+; CHECK-NEXT: ldr zt0, [x19]
+; CHECK-NEXT: .Ltmp10: // EH_LABEL
+; CHECK-NEXT: // %bb.1: // %return_normally
+; CHECK-NEXT: mov sp, x29
+; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB3_2: // %unwind_dtors
+; CHECK-NEXT: .Ltmp11: // EH_LABEL
+; CHECK-NEXT: sub x20, x29, #64
+; CHECK-NEXT: mov x19, x0
+; CHECK-NEXT: smstart za
+; CHECK-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-NEXT: sub x0, x29, #80
+; CHECK-NEXT: cbnz x8, .LBB3_4
+; CHECK-NEXT: // %bb.3: // %unwind_dtors
+; CHECK-NEXT: bl __arm_tpidr2_restore
+; CHECK-NEXT: .LBB3_4: // %unwind_dtors
+; CHECK-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-NEXT: bl shared_zt0_call
+; CHECK-NEXT: str zt0, [x20]
+; CHECK-NEXT: smstop za
+; CHECK-NEXT: mov x0, x19
+; CHECK-NEXT: bl _Unwind_Resume
+; CHECK-NEXT: smstart za
+; CHECK-NEXT: ldr zt0, [x20]
+;
+; CHECK-SDAG-LABEL: try_catch_shared_zt0_callee:
+; CHECK-SDAG: .Lfunc_begin3:
+; CHECK-SDAG-NEXT: .cfi_startproc
+; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception3
+; CHECK-SDAG-NEXT: // %bb.0:
+; CHECK-SDAG-NEXT: sub sp, sp, #96
+; CHECK-SDAG-NEXT: str x30, [sp, #64] // 8-byte Folded Spill
+; CHECK-SDAG-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill
+; CHECK-SDAG-NEXT: .cfi_def_cfa_offset 96
+; CHECK-SDAG-NEXT: .cfi_offset w19, -8
+; CHECK-SDAG-NEXT: .cfi_offset w20, -16
+; CHECK-SDAG-NEXT: .cfi_offset w30, -32
+; CHECK-SDAG-NEXT: .Ltmp9: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x19, sp
+; CHECK-SDAG-NEXT: str zt0, [x19]
+; CHECK-SDAG-NEXT: smstop za
+; CHECK-SDAG-NEXT: bl may_throw
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: ldr zt0, [x19]
+; CHECK-SDAG-NEXT: .Ltmp10: // EH_LABEL
+; CHECK-SDAG-NEXT: // %bb.1: // %return_normally
+; CHECK-SDAG-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload
+; CHECK-SDAG-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload
+; CHECK-SDAG-NEXT: add sp, sp, #96
+; CHECK-SDAG-NEXT: ret
+; CHECK-SDAG-NEXT: .LBB3_2: // %unwind_dtors
+; CHECK-SDAG-NEXT: .Ltmp11: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x20, sp
+; CHECK-SDAG-NEXT: mov x19, x0
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: ldr zt0, [x20]
+; CHECK-SDAG-NEXT: bl shared_zt0_call
+; CHECK-SDAG-NEXT: str zt0, [x20]
+; CHECK-SDAG-NEXT: smstop za
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl _Unwind_Resume
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: ldr zt0, [x20]
+ invoke void @may_throw()
+ to label %return_normally unwind label %unwind_dtors
+
+unwind_dtors:
+ %5 = landingpad { ptr, i32 }
+ cleanup
+ tail call void @shared_zt0_call()
+ resume { ptr, i32 } %5
+
+return_normally:
+ ret void
+}
+
+; This example corresponds to:
+;
+; __arm_agnostic("sme_za_state") void try_catch_agnostic_za()
+; {
+; try {
+; may_throw();
+; } catch(...) {
+; }
+; }
+;
+; In this example we must execute __arm_sme_restore once we enter the catch block
+; (before executing __arm_sme_save again, which would invalidate the prior save).
+define void @try_catch_agnostic_za() "aarch64_za_state_agnostic" personality ptr @__gxx_personality_v0 {
+; CHECK-LABEL: try_catch_agnostic_za:
+; CHECK: .Lfunc_begin4:
+; CHECK-NEXT: .cfi_startproc
+; CHECK-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-NEXT: .cfi_lsda 28, .Lexception4
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT: str x19, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: mov x29, sp
+; CHECK-NEXT: .cfi_def_cfa w29, 32
+; CHECK-NEXT: .cfi_offset w19, -16
+; CHECK-NEXT: .cfi_offset w30, -24
+; CHECK-NEXT: .cfi_offset w29, -32
+; CHECK-NEXT: bl __arm_sme_state_size
+; CHECK-NEXT: sub sp, sp, x0
+; CHECK-NEXT: mov x19, sp
+; CHECK-NEXT: .Ltmp12: // EH_LABEL
+; CHECK-NEXT: mov x0, x19
+; CHECK-NEXT: bl __arm_sme_save
+; CHECK-NEXT: bl may_throw
+; CHECK-NEXT: .Ltmp13: // EH_LABEL
+; CHECK-NEXT: .LBB4_1: // %exit
+; CHECK-NEXT: mov x0, x19
+; CHECK-NEXT: bl __arm_sme_restore
+; CHECK-NEXT: mov sp, x29
+; CHECK-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB4_2: // %catch
+; CHECK-NEXT: .Ltmp14: // EH_LABEL
+; CHECK-NEXT: bl __cxa_begin_catch
+; CHECK-NEXT: bl __cxa_end_catch
+; CHECK-NEXT: b .LBB4_1
+;
+; CHECK-SDAG-LABEL: try_catch_agnostic_za:
+; CHECK-SDAG: .Lfunc_begin4:
+; CHECK-SDAG-NEXT: .cfi_startproc
+; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception4
+; CHECK-SDAG-NEXT: // %bb.0:
+; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill
+; CHECK-SDAG-NEXT: mov x29, sp
+; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32
+; CHECK-SDAG-NEXT: .cfi_offset w19, -16
+; CHECK-SDAG-NEXT: .cfi_offset w30, -24
+; CHECK-SDAG-NEXT: .cfi_offset w29, -32
+; CHECK-SDAG-NEXT: bl __arm_sme_state_size
+; CHECK-SDAG-NEXT: sub sp, sp, x0
+; CHECK-SDAG-NEXT: mov x19, sp
+; CHECK-SDAG-NEXT: .Ltmp12: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_save
+; CHECK-SDAG-NEXT: bl may_throw
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_restore
+; CHECK-SDAG-NEXT: .Ltmp13: // EH_LABEL
+; CHECK-SDAG-NEXT: .LBB4_1: // %exit
+; CHECK-SDAG-NEXT: mov sp, x29
+; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload
+; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-SDAG-NEXT: ret
+; CHECK-SDAG-NEXT: .LBB4_2: // %catch
+; CHECK-SDAG-NEXT: .Ltmp14: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x1, x0
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_restore
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_save
+; CHECK-SDAG-NEXT: mov x0, x1
+; CHECK-SDAG-NEXT: bl __cxa_begin_catch
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_restore
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_save
+; CHECK-SDAG-NEXT: bl __cxa_end_catch
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_restore
+; CHECK-SDAG-NEXT: b .LBB4_1
+ invoke void @may_throw()
+ to label %exit unwind label %catch
+catch:
+ %eh_info = landingpad { ptr, i32 }
+ catch ptr null
+ %exception_ptr = extractvalue { ptr, i32 } %eh_info, 0
+ tail call ptr @__cxa_begin_catch(ptr %exception_ptr)
+ tail call void @__cxa_end_catch()
+ br label %exit
+
+exit:
+ ret void
+}
+
declare ptr @__cxa_allocate_exception(i64)
declare void @__cxa_throw(ptr, ptr, ptr)
declare ptr @__cxa_begin_catch(ptr)
@@ -284,3 +741,4 @@ declare i32 @__gxx_personality_v0(...)
declare void @may_throw()
declare void @shared_za_call() "aarch64_inout_za"
declare void @noexcept_shared_za_call() "aarch64_inout_za"
+declare void @shared_zt0_call() "aarch64_inout_zt0"
diff --git a/llvm/test/CodeGen/AArch64/strict-fp-opt.ll b/llvm/test/CodeGen/AArch64/strict-fp-opt.ll
index bb7cd22..c433291 100644
--- a/llvm/test/CodeGen/AArch64/strict-fp-opt.ll
+++ b/llvm/test/CodeGen/AArch64/strict-fp-opt.ll
@@ -1,31 +1,40 @@
-; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s
-; RUN: llc -mtriple=aarch64 -global-isel=true -global-isel-abort=2 %s -o - | FileCheck %s
-
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -global-isel=true -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+
+; CHECK-GI: warning: Instruction selection used fallback path for unused_div_fpexcept_strict
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for unused_div_round_dynamic
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for add_twice_fpexcept_strict
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for add_twice_round_dynamic
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for set_rounding
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for set_rounding_fpexcept_strict
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for set_rounding_round_dynamic
; Div whose result is unused should be removed unless we have strict exceptions
-; CHECK-LABEL: unused_div:
-; CHECK-NOT: fdiv
-; CHECK: ret
define void @unused_div(float %x, float %y) {
+; CHECK-LABEL: unused_div:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ret
entry:
%add = fdiv float %x, %y
ret void
}
-; CHECK-LABEL: unused_div_fpexcept_strict:
-; CHECK: fdiv s0, s0, s1
-; CHECK-NEXT: ret
define void @unused_div_fpexcept_strict(float %x, float %y) #0 {
+; CHECK-LABEL: unused_div_fpexcept_strict:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fdiv s0, s0, s1
+; CHECK-NEXT: ret
entry:
%add = call float @llvm.experimental.constrained.fdiv.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret void
}
-; CHECK-LABEL: unused_div_round_dynamic:
-; CHECK-NOT: fdiv
-; CHECK: ret
define void @unused_div_round_dynamic(float %x, float %y) #0 {
+; CHECK-LABEL: unused_div_round_dynamic:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ret
entry:
%add = call float @llvm.experimental.constrained.fdiv.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
ret void
@@ -33,14 +42,14 @@ entry:
; Machine CSE should eliminate the second add unless we have strict exceptions
-
-; CHECK-LABEL: add_twice:
-; CHECK: fadd [[ADD:s[0-9]+]], s0, s1
-; CHECK-NEXT: cmp w0, #0
-; CHECK-NEXT: fmul [[MUL:s[0-9]+]], [[ADD]], [[ADD]]
-; CHECK-NEXT: fcsel s0, [[ADD]], [[MUL]], eq
-; CHECK-NEXT: ret
define float @add_twice(float %x, float %y, i32 %n) {
+; CHECK-LABEL: add_twice:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fadd s0, s0, s1
+; CHECK-NEXT: cmp w0, #0
+; CHECK-NEXT: fmul s1, s0, s0
+; CHECK-NEXT: fcsel s0, s0, s1, eq
+; CHECK-NEXT: ret
entry:
%add = fadd float %x, %y
%tobool.not = icmp eq i32 %n, 0
@@ -56,15 +65,17 @@ if.end:
ret float %a.0
}
-; CHECK-LABEL: add_twice_fpexcept_strict:
-; CHECK: fmov [[X:s[0-9]+]], s0
-; CHECK-NEXT: fadd s0, s0, s1
-; CHECK-NEXT: cbz w0, [[LABEL:.LBB[0-9_]+]]
-; CHECK: fadd [[ADD:s[0-9]+]], [[X]], s1
-; CHECK-NEXT: fmul s0, s0, [[ADD]]
-; CHECK: [[LABEL]]:
-; CHECK-NEXT: ret
define float @add_twice_fpexcept_strict(float %x, float %y, i32 %n) #0 {
+; CHECK-LABEL: add_twice_fpexcept_strict:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fmov s2, s0
+; CHECK-NEXT: fadd s0, s0, s1
+; CHECK-NEXT: cbz w0, .LBB4_2
+; CHECK-NEXT: // %bb.1: // %if.then
+; CHECK-NEXT: fadd s1, s2, s1
+; CHECK-NEXT: fmul s0, s0, s1
+; CHECK-NEXT: .LBB4_2: // %if.end
+; CHECK-NEXT: ret
entry:
%add = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
%tobool.not = icmp eq i32 %n, 0
@@ -80,14 +91,15 @@ if.end:
ret float %a.0
}
-; CHECK-LABEL: add_twice_round_dynamic:
-; CHECK: fadd s0, s0, s1
-; CHECK-NEXT: cbz w0, [[LABEL:.LBB[0-9_]+]]
-; CHECK-NOT: fadd
-; CHECK: fmul s0, s0, s0
-; CHECK: [[LABEL]]:
-; CHECK-NEXT: ret
define float @add_twice_round_dynamic(float %x, float %y, i32 %n) #0 {
+; CHECK-LABEL: add_twice_round_dynamic:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fadd s0, s0, s1
+; CHECK-NEXT: cbz w0, .LBB5_2
+; CHECK-NEXT: // %bb.1: // %if.then
+; CHECK-NEXT: fmul s0, s0, s0
+; CHECK-NEXT: .LBB5_2: // %if.end
+; CHECK-NEXT: ret
entry:
%add = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%tobool.not = icmp eq i32 %n, 0
@@ -108,17 +120,18 @@ if.end:
; dynamic (as they may give different results) or when we have strict exceptions
; (the llvm.set.rounding is irrelevant, but both could trap).
-; CHECK-LABEL: set_rounding:
-; CHECK-DAG: fadd [[SREG:s[0-9]+]], s0, s1
-; CHECK-DAG: mrs [[XREG1:x[0-9]+]], FPCR
-; CHECK-DAG: orr [[XREG2:x[0-9]+]], [[XREG1]], #0xc00000
-; CHECK: msr FPCR, [[XREG2]]
-; CHECK-NEXT: mrs [[XREG3:x[0-9]+]], FPCR
-; CHECK-NEXT: and [[XREG4:x[0-9]+]], [[XREG3]], #0xffffffffff3fffff
-; CHECK-NEXT: msr FPCR, [[XREG4]]
-; CHECK-NEXT: fsub s0, [[SREG]], [[SREG]]
-; CHECK-NEXT: ret
define float @set_rounding(float %x, float %y) {
+; CHECK-LABEL: set_rounding:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mrs x8, FPCR
+; CHECK-NEXT: fadd s0, s0, s1
+; CHECK-NEXT: orr x8, x8, #0xc00000
+; CHECK-NEXT: msr FPCR, x8
+; CHECK-NEXT: mrs x8, FPCR
+; CHECK-NEXT: and x8, x8, #0xffffffffff3fffff
+; CHECK-NEXT: msr FPCR, x8
+; CHECK-NEXT: fsub s0, s0, s0
+; CHECK-NEXT: ret
entry:
%add1 = fadd float %x, %y
call void @llvm.set.rounding(i32 0)
@@ -128,18 +141,19 @@ entry:
ret float %sub
}
-; CHECK-LABEL: set_rounding_fpexcept_strict:
-; CHECK-DAG: fadd [[SREG1:s[0-9]+]], s0, s1
-; CHECK-DAG: mrs [[XREG1:x[0-9]+]], FPCR
-; CHECK-DAG: orr [[XREG2:x[0-9]+]], [[XREG1]], #0xc00000
-; CHECK: msr FPCR, [[XREG2]]
-; CHECK-DAG: fadd [[SREG2:s[0-9]+]], s0, s1
-; CHECK-DAG: mrs [[XREG3:x[0-9]+]], FPCR
-; CHECK-DAG: and [[XREG4:x[0-9]+]], [[XREG3]], #0xffffffffff3fffff
-; CHECK-NEXT: msr FPCR, [[XREG4]]
-; CHECK-NEXT: fsub s0, [[SREG1]], [[SREG2]]
-; CHECK-NEXT: ret
define float @set_rounding_fpexcept_strict(float %x, float %y) #0 {
+; CHECK-LABEL: set_rounding_fpexcept_strict:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fadd s2, s0, s1
+; CHECK-NEXT: mrs x8, FPCR
+; CHECK-NEXT: orr x8, x8, #0xc00000
+; CHECK-NEXT: msr FPCR, x8
+; CHECK-NEXT: fadd s0, s0, s1
+; CHECK-NEXT: mrs x8, FPCR
+; CHECK-NEXT: and x8, x8, #0xffffffffff3fffff
+; CHECK-NEXT: msr FPCR, x8
+; CHECK-NEXT: fsub s0, s2, s0
+; CHECK-NEXT: ret
entry:
%add1 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
call void @llvm.set.rounding(i32 0) #0
@@ -149,18 +163,19 @@ entry:
ret float %sub
}
-; CHECK-LABEL: set_rounding_round_dynamic:
-; CHECK-DAG: fadd [[SREG1:s[0-9]+]], s0, s1
-; CHECK-DAG: mrs [[XREG1:x[0-9]+]], FPCR
-; CHECK-DAG: orr [[XREG2:x[0-9]+]], [[XREG1]], #0xc00000
-; CHECK: msr FPCR, [[XREG2]]
-; CHECK-DAG: fadd [[SREG2:s[0-9]+]], s0, s1
-; CHECK-DAG: mrs [[XREG3:x[0-9]+]], FPCR
-; CHECK-DAG: and [[XREG4:x[0-9]+]], [[XREG3]], #0xffffffffff3fffff
-; CHECK-NEXT: msr FPCR, [[XREG4]]
-; CHECK-NEXT: fsub s0, [[SREG1]], [[SREG2]]
-; CHECK-NEXT: ret
define float @set_rounding_round_dynamic(float %x, float %y) #0 {
+; CHECK-LABEL: set_rounding_round_dynamic:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mrs x8, FPCR
+; CHECK-NEXT: fadd s2, s0, s1
+; CHECK-NEXT: orr x8, x8, #0xc00000
+; CHECK-NEXT: msr FPCR, x8
+; CHECK-NEXT: fadd s0, s0, s1
+; CHECK-NEXT: mrs x8, FPCR
+; CHECK-NEXT: and x8, x8, #0xffffffffff3fffff
+; CHECK-NEXT: msr FPCR, x8
+; CHECK-NEXT: fsub s0, s2, s0
+; CHECK-NEXT: ret
entry:
%add1 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
call void @llvm.set.rounding(i32 0) #0
@@ -178,3 +193,6 @@ declare i32 @llvm.get.rounding()
declare void @llvm.set.rounding(i32)
attributes #0 = { strictfp }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-GI: {{.*}}
+; CHECK-SD: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll
index 62d41fc..19e1aa5 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll
@@ -26,9 +26,9 @@ define i32 @reduce_and_v1i8(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind {
; CHECK-LABEL: reduce_and_v1i8:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: smov w8, v0.b[0]
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: csel w0, w0, w1, mi
+; CHECK-NEXT: umov w8, v0.b[0]
+; CHECK-NEXT: tst w8, #0x80
+; CHECK-NEXT: csel w0, w0, w1, ne
; CHECK-NEXT: ret
%x = icmp slt <1 x i8> %a0, zeroinitializer
%y = call i1 @llvm.vector.reduce.and.v1i1(<1 x i1> %x)
@@ -120,9 +120,9 @@ define i32 @reduce_and_v1i16(<1 x i16> %a0, i32 %a1, i32 %a2) nounwind {
; CHECK-LABEL: reduce_and_v1i16:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: smov w8, v0.h[0]
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: csel w0, w0, w1, mi
+; CHECK-NEXT: umov w8, v0.h[0]
+; CHECK-NEXT: tst w8, #0x8000
+; CHECK-NEXT: csel w0, w0, w1, ne
; CHECK-NEXT: ret
%x = icmp slt <1 x i16> %a0, zeroinitializer
%y = call i1 @llvm.vector.reduce.and.v1i1(<1 x i1> %x)
@@ -305,9 +305,9 @@ define i32 @reduce_or_v1i8(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind {
; CHECK-LABEL: reduce_or_v1i8:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: smov w8, v0.b[0]
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: csel w0, w0, w1, mi
+; CHECK-NEXT: umov w8, v0.b[0]
+; CHECK-NEXT: tst w8, #0x80
+; CHECK-NEXT: csel w0, w0, w1, ne
; CHECK-NEXT: ret
%x = icmp slt <1 x i8> %a0, zeroinitializer
%y = call i1 @llvm.vector.reduce.or.v1i1(<1 x i1> %x)
@@ -399,9 +399,9 @@ define i32 @reduce_or_v1i16(<1 x i16> %a0, i32 %a1, i32 %a2) nounwind {
; CHECK-LABEL: reduce_or_v1i16:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: smov w8, v0.h[0]
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: csel w0, w0, w1, mi
+; CHECK-NEXT: umov w8, v0.h[0]
+; CHECK-NEXT: tst w8, #0x8000
+; CHECK-NEXT: csel w0, w0, w1, ne
; CHECK-NEXT: ret
%x = icmp slt <1 x i16> %a0, zeroinitializer
%y = call i1 @llvm.vector.reduce.or.v1i1(<1 x i1> %x)
@@ -584,9 +584,9 @@ define i32 @reduce_xor_v1i8(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind {
; CHECK-LABEL: reduce_xor_v1i8:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: smov w8, v0.b[0]
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: csel w0, w0, w1, mi
+; CHECK-NEXT: umov w8, v0.b[0]
+; CHECK-NEXT: tst w8, #0x80
+; CHECK-NEXT: csel w0, w0, w1, ne
; CHECK-NEXT: ret
%x = icmp slt <1 x i8> %a0, zeroinitializer
%y = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %x)
@@ -679,9 +679,9 @@ define i32 @reduce_xor_v1i16(<1 x i16> %a0, i32 %a1, i32 %a2) nounwind {
; CHECK-LABEL: reduce_xor_v1i16:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: smov w8, v0.h[0]
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: csel w0, w0, w1, mi
+; CHECK-NEXT: umov w8, v0.h[0]
+; CHECK-NEXT: tst w8, #0x8000
+; CHECK-NEXT: csel w0, w0, w1, ne
; CHECK-NEXT: ret
%x = icmp slt <1 x i16> %a0, zeroinitializer
%y = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %x)
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
index 12e9888..aaea4f7 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
@@ -5015,7 +5015,7 @@ define amdgpu_kernel void @v_fneg_fp_round_fneg_f64_to_f32(ptr addrspace(1) %out
%a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
%out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i64 %tid.ext
%a = load volatile double, ptr addrspace(1) %a.gep
- %fneg.a = fsub double -0.000000e+00, %a
+ %fneg.a = fsub nsz double -0.000000e+00, %a
%fpround = fptrunc double %fneg.a to float
%fneg = fneg float %fpround
store float %fneg, ptr addrspace(1) %out.gep
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
index c4ca79d..3de6df2 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
@@ -4441,25 +4441,40 @@ define float @v_fneg_fabs_select_infloop_regression(float %arg, i1 %arg1) {
ret float %i3
}
-define float @v_fmul_0_fsub_0_infloop_regression(float %arg) {
-; GCN-SAFE-LABEL: v_fmul_0_fsub_0_infloop_regression:
+define float @v_fmul_0_fsub_0_safe_infloop_regression(float %arg) {
+; GCN-SAFE-LABEL: v_fmul_0_fsub_0_safe_infloop_regression:
; GCN-SAFE: ; %bb.0: ; %bb
; GCN-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-SAFE-NEXT: v_mul_f32_e32 v0, 0, v0
; GCN-SAFE-NEXT: v_sub_f32_e32 v0, 0, v0
; GCN-SAFE-NEXT: s_setpc_b64 s[30:31]
;
-; GCN-NSZ-LABEL: v_fmul_0_fsub_0_infloop_regression:
-; GCN-NSZ: ; %bb.0: ; %bb
-; GCN-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NSZ-NEXT: v_mul_f32_e32 v0, 0x80000000, v0
-; GCN-NSZ-NEXT: s_setpc_b64 s[30:31]
+; SI-NSZ-LABEL: v_fmul_0_fsub_0_safe_infloop_regression:
+; SI-NSZ: ; %bb.0: ; %bb
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NSZ-NEXT: s_brev_b32 s4, 1
+; SI-NSZ-NEXT: v_fma_f32 v0, v0, s4, 0
+; SI-NSZ-NEXT: s_setpc_b64 s[30:31]
+; FIXME: utils/update_llc_test_checks.py will generate redundant VI
+; labels, remove them, they will cause test failure.
bb:
%i = fmul float %arg, 0.0
%i1 = fsub float 0.0, %i
ret float %i1
}
+define float @v_fmul_0_fsub_0_nsz_infloop_regression(float %arg) {
+; GCN-LABEL: v_fmul_0_fsub_0_nsz_infloop_regression:
+; GCN: ; %bb.0: ; %bb
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v0, 0x80000000, v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+bb:
+ %i = fmul float %arg, 0.0
+ %i1 = fsub nsz float 0.0, %i
+ ret float %i1
+}
+
declare i32 @llvm.amdgcn.workitem.id.x() #1
declare float @llvm.fma.f32(float, float, float) #1
declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>)
diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-dead-intrinsics.ll b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-dead-intrinsics.ll
new file mode 100644
index 0000000..d6198f5
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-dead-intrinsics.ll
@@ -0,0 +1,9 @@
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -amdgpu-lower-buffer-fat-pointers < %s | FileCheck %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=amdgpu-lower-buffer-fat-pointers < %s | FileCheck %s
+
+; CHECK: @arbitrary
+declare amdgpu_kernel void @arbitrary(ptr addrspace(1))
+
+; COM: This used to cause verifier errors when "lowered"
+declare <4 x i8> @llvm.masked.load.v4i8.p7(ptr addrspace(7) captures(none), i32 immarg, <4 x i1>, <4 x i8>)
+; CHECK-NOT: llvm.masked.load
diff --git a/llvm/test/CodeGen/AMDGPU/lro-phi-samebb-nonlookthrough-store.ll b/llvm/test/CodeGen/AMDGPU/lro-phi-samebb-nonlookthrough-store.ll
new file mode 100644
index 0000000..b508f73
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/lro-phi-samebb-nonlookthrough-store.ll
@@ -0,0 +1,46 @@
+; RUN: opt -S -passes=amdgpu-late-codegenprepare \
+; RUN: -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a %s | FileCheck %s
+
+; Goal: With a loop-header PHI in illegal vector type and a same-BB
+; non-lookthrough user (vector add) in the header, LRO should still coerce
+; the PHI to i32 because a profitable sink (store) exists across BB.
+
+define amdgpu_kernel void @phi_samebb_nonlookthrough_store(
+ ptr addrspace(1) %out, <4 x i8> %v, i1 %exit) {
+; CHECK-LABEL: @phi_samebb_nonlookthrough_store(
+entry:
+ br label %loop
+
+loop: ; preds = %entry, %loop
+ ; Loop-carried PHI in illegal vector type.
+ %acc = phi <4 x i8> [ zeroinitializer, %entry ], [ %acc.next, %loop ]
+
+ ; Same-BB non-lookthrough use in header.
+ %acc.next = add <4 x i8> %acc, %v
+
+ ; Make it a real loop: either iterate or exit to the sink block.
+ br i1 %exit, label %store, label %loop
+
+store: ; preds = %loop
+ ; The across-BB sink: storing the PHI coerced to i32.
+ %acc.bc = bitcast <4 x i8> %acc to i32
+ store i32 %acc.bc, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+; After AMDGPULateCodeGenPrepare we expect:
+; - PHI is coerced to i32
+; - A header bitcast materializes for the add
+; This proves the same-BB non-lookthrough user (add) did not get pruned
+; when the def is a PHI.
+
+; CHECK: loop:
+; CHECK: %[[ACC_TC:[^ ]+]] = phi i32
+; CHECK: %[[ACC_TC_BC:[^ ]+]] = bitcast i32 %[[ACC_TC]] to <4 x i8>
+; CHECK: %[[ACC_NEXT:[^ ]+]] = add <4 x i8> %[[ACC_TC_BC]], %v
+; CHECK: br i1 %exit, label %store, label %loop
+; CHECK: store:
+; CHECK: %[[ACC_TC_BC2:[^ ]+]] = bitcast i32 %[[ACC_TC]] to <4 x i8>
+; CHECK: %[[ST_I32:[^ ]+]] = bitcast <4 x i8> %[[ACC_TC_BC2]] to i32
+; CHECK: store i32 %[[ST_I32]],
+
diff --git a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll
index 92d3277..bb22144 100644
--- a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll
@@ -4148,28 +4148,28 @@ define <2 x half> @mul_select_negk_negfabs_v2f16(<2 x i32> %c, <2 x half> %x, <2
; --------------------------------------------------------------------------------
define <2 x half> @select_fneg_posk_src_add_v2f16(<2 x i32> %c, <2 x half> %x, <2 x half> %y) {
-; CI-SAFE-LABEL: select_fneg_posk_src_add_v2f16:
-; CI-SAFE: ; %bb.0:
-; CI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2
-; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2
-; CI-SAFE-NEXT: v_add_f32_e32 v3, 4.0, v3
-; CI-SAFE-NEXT: v_add_f32_e32 v2, 4.0, v2
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2
-; CI-SAFE-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; CI-SAFE-NEXT: v_or_b32_e32 v2, v2, v3
-; CI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80008000, v2
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v2
-; CI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2
-; CI-SAFE-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc
-; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
-; CI-SAFE-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc
-; CI-SAFE-NEXT: s_setpc_b64 s[30:31]
+; CI-LABEL: select_fneg_posk_src_add_v2f16:
+; CI: ; %bb.0:
+; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; CI-NEXT: v_add_f32_e32 v3, 4.0, v3
+; CI-NEXT: v_add_f32_e32 v2, 4.0, v2
+; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; CI-NEXT: v_or_b32_e32 v2, v2, v3
+; CI-NEXT: v_xor_b32_e32 v2, 0x80008000, v2
+; CI-NEXT: v_cvt_f32_f16_e32 v3, v2
+; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc
+; CI-NEXT: s_setpc_b64 s[30:31]
;
; VI-SAFE-LABEL: select_fneg_posk_src_add_v2f16:
; VI-SAFE: ; %bb.0:
@@ -4229,21 +4229,6 @@ define <2 x half> @select_fneg_posk_src_add_v2f16(<2 x i32> %c, <2 x half> %x, <
; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
-; CI-NSZ-LABEL: select_fneg_posk_src_add_v2f16:
-; CI-NSZ: ; %bb.0:
-; CI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v2, v2
-; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v2, v2
-; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v3, v3
-; CI-NSZ-NEXT: v_sub_f32_e32 v2, -4.0, v2
-; CI-NSZ-NEXT: v_sub_f32_e32 v3, -4.0, v3
-; CI-NSZ-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc
-; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
-; CI-NSZ-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc
-; CI-NSZ-NEXT: s_setpc_b64 s[30:31]
-;
; VI-NSZ-LABEL: select_fneg_posk_src_add_v2f16:
; VI-NSZ: ; %bb.0:
; VI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -4302,6 +4287,105 @@ define <2 x half> @select_fneg_posk_src_add_v2f16(<2 x i32> %c, <2 x half> %x, <
ret <2 x half> %select
}
+define <2 x half> @select_fneg_posk_src_add_v2f16_nsz(<2 x i32> %c, <2 x half> %x, <2 x half> %y) {
+; CI-LABEL: select_fneg_posk_src_add_v2f16_nsz:
+; CI: ; %bb.0:
+; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; CI-NEXT: v_sub_f32_e32 v2, -4.0, v2
+; CI-NEXT: v_sub_f32_e32 v3, -4.0, v3
+; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc
+; CI-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: select_fneg_posk_src_add_v2f16_nsz:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; VI-NEXT: v_mov_b32_e32 v1, 0xc400
+; VI-NEXT: v_sub_f16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_sub_f16_e32 v2, -4.0, v2
+; VI-NEXT: v_mov_b32_e32 v3, 0x4000
+; VI-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0
+; VI-NEXT: v_cndmask_b32_e64 v0, v3, v2, s[4:5]
+; VI-NEXT: v_cndmask_b32_sdwa v1, v3, v1, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: select_fneg_posk_src_add_v2f16_nsz:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX9-NEXT: v_pk_add_f16 v1, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x4000
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, v1, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_sdwa v1, v2, v1, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v0, v1, v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SAFE-TRUE16-LABEL: select_fneg_posk_src_add_v2f16_nsz:
+; GFX11-SAFE-TRUE16: ; %bb.0:
+; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-SAFE-TRUE16-NEXT: v_pk_add_f16 v0, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0]
+; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1
+; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo
+; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0
+; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SAFE-FAKE16-LABEL: select_fneg_posk_src_add_v2f16_nsz:
+; GFX11-SAFE-FAKE16: ; %bb.0:
+; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SAFE-FAKE16-NEXT: v_pk_add_f16 v2, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0]
+; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo
+; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo
+; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-NSZ-TRUE16-LABEL: select_fneg_posk_src_add_v2f16_nsz:
+; GFX11-NSZ-TRUE16: ; %bb.0:
+; GFX11-NSZ-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NSZ-TRUE16-NEXT: v_pk_add_f16 v0, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0]
+; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1
+; GFX11-NSZ-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo
+; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0
+; GFX11-NSZ-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-NSZ-FAKE16-LABEL: select_fneg_posk_src_add_v2f16_nsz:
+; GFX11-NSZ-FAKE16: ; %bb.0:
+; GFX11-NSZ-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NSZ-FAKE16-NEXT: v_pk_add_f16 v2, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0]
+; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NSZ-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo
+; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo
+; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NSZ-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; GFX11-NSZ-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %cmp = icmp eq <2 x i32> %c, zeroinitializer
+ %add = fadd nsz <2 x half> %x, <half 4.0, half 4.0>
+ %fneg = fneg <2 x half> %add
+ %select = select <2 x i1> %cmp, <2 x half> %fneg, <2 x half> <half 2.0, half 2.0>
+ ret <2 x half> %select
+}
+
define <2 x half> @select_fneg_posk_src_sub_v2f16(<2 x i32> %c, <2 x half> %x) {
; CI-SAFE-LABEL: select_fneg_posk_src_sub_v2f16:
; CI-SAFE: ; %bb.0:
@@ -4704,34 +4788,34 @@ define <2 x half> @select_fneg_posk_src_fma_v2f16(<2 x i32> %c, <2 x half> %x, <
}
define <2 x half> @select_fneg_posk_src_fmad_v2f16(<2 x i32> %c, <2 x half> %x, <2 x half> %z) {
-; CI-SAFE-LABEL: select_fneg_posk_src_fmad_v2f16:
-; CI-SAFE: ; %bb.0:
-; CI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4
-; CI-SAFE-NEXT: v_mul_f32_e32 v3, 4.0, v3
-; CI-SAFE-NEXT: v_add_f32_e32 v3, v3, v5
-; CI-SAFE-NEXT: v_mul_f32_e32 v2, 4.0, v2
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-SAFE-NEXT: v_add_f32_e32 v2, v2, v4
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2
-; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; CI-SAFE-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; CI-SAFE-NEXT: v_or_b32_e32 v2, v2, v3
-; CI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80008000, v2
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v2
-; CI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2
-; CI-SAFE-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc
-; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
-; CI-SAFE-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc
-; CI-SAFE-NEXT: s_setpc_b64 s[30:31]
+; CI-LABEL: select_fneg_posk_src_fmad_v2f16:
+; CI: ; %bb.0:
+; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; CI-NEXT: v_mul_f32_e32 v3, 4.0, v3
+; CI-NEXT: v_add_f32_e32 v3, v3, v5
+; CI-NEXT: v_mul_f32_e32 v2, 4.0, v2
+; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; CI-NEXT: v_add_f32_e32 v2, v2, v4
+; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; CI-NEXT: v_or_b32_e32 v2, v2, v3
+; CI-NEXT: v_xor_b32_e32 v2, 0x80008000, v2
+; CI-NEXT: v_cvt_f32_f16_e32 v3, v2
+; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc
+; CI-NEXT: s_setpc_b64 s[30:31]
;
; VI-SAFE-LABEL: select_fneg_posk_src_fmad_v2f16:
; VI-SAFE: ; %bb.0:
@@ -4793,27 +4877,6 @@ define <2 x half> @select_fneg_posk_src_fmad_v2f16(<2 x i32> %c, <2 x half> %x,
; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
-; CI-NSZ-LABEL: select_fneg_posk_src_fmad_v2f16:
-; CI-NSZ: ; %bb.0:
-; CI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v2, v2
-; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v4, v4
-; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v5, v5
-; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v2, v2
-; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v3, v3
-; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v4, v4
-; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v5, v5
-; CI-NSZ-NEXT: v_mul_f32_e32 v2, -4.0, v2
-; CI-NSZ-NEXT: v_mul_f32_e32 v3, -4.0, v3
-; CI-NSZ-NEXT: v_sub_f32_e32 v2, v2, v4
-; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; CI-NSZ-NEXT: v_sub_f32_e32 v3, v3, v5
-; CI-NSZ-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc
-; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
-; CI-NSZ-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc
-; CI-NSZ-NEXT: s_setpc_b64 s[30:31]
-;
; VI-NSZ-LABEL: select_fneg_posk_src_fmad_v2f16:
; VI-NSZ: ; %bb.0:
; VI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -4873,6 +4936,112 @@ define <2 x half> @select_fneg_posk_src_fmad_v2f16(<2 x i32> %c, <2 x half> %x,
ret <2 x half> %select
}
+define <2 x half> @select_fneg_posk_src_fmad_v2f16_nsz(<2 x i32> %c, <2 x half> %x, <2 x half> %z) {
+; CI-LABEL: select_fneg_posk_src_fmad_v2f16_nsz:
+; CI: ; %bb.0:
+; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; CI-NEXT: v_mul_f32_e32 v2, -4.0, v2
+; CI-NEXT: v_mul_f32_e32 v3, -4.0, v3
+; CI-NEXT: v_sub_f32_e32 v2, v2, v4
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; CI-NEXT: v_sub_f32_e32 v3, v3, v5
+; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc
+; CI-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: select_fneg_posk_src_fmad_v2f16_nsz:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v3
+; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; VI-NEXT: v_fma_f16 v1, v4, -4.0, -v1
+; VI-NEXT: v_fma_f16 v2, v2, -4.0, -v3
+; VI-NEXT: v_mov_b32_e32 v3, 0x4000
+; VI-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0
+; VI-NEXT: v_cndmask_b32_e64 v0, v3, v2, s[4:5]
+; VI-NEXT: v_cndmask_b32_sdwa v1, v3, v1, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: select_fneg_posk_src_fmad_v2f16_nsz:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX9-NEXT: v_pk_fma_f16 v1, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x4000
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, v1, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_sdwa v1, v2, v1, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v0, v1, v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SAFE-TRUE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz:
+; GFX11-SAFE-TRUE16: ; %bb.0:
+; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-SAFE-TRUE16-NEXT: v_pk_fma_f16 v0, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1]
+; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1
+; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo
+; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0
+; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SAFE-FAKE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz:
+; GFX11-SAFE-FAKE16: ; %bb.0:
+; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SAFE-FAKE16-NEXT: v_pk_fma_f16 v2, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1]
+; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo
+; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo
+; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-NSZ-TRUE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz:
+; GFX11-NSZ-TRUE16: ; %bb.0:
+; GFX11-NSZ-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NSZ-TRUE16-NEXT: v_pk_fma_f16 v0, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1]
+; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1
+; GFX11-NSZ-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo
+; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0
+; GFX11-NSZ-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-NSZ-FAKE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz:
+; GFX11-NSZ-FAKE16: ; %bb.0:
+; GFX11-NSZ-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NSZ-FAKE16-NEXT: v_pk_fma_f16 v2, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1]
+; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NSZ-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo
+; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo
+; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NSZ-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; GFX11-NSZ-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %cmp = icmp eq <2 x i32> %c, zeroinitializer
+ %fmad = call nsz <2 x half> @llvm.fmuladd.v2f16(<2 x half> %x, <2 x half> <half 4.0, half 4.0>, <2 x half> %z)
+ %fneg = fneg <2 x half> %fmad
+ %select = select <2 x i1> %cmp, <2 x half> %fneg, <2 x half> <half 2.0, half 2.0>
+ ret <2 x half> %select
+}
+
declare <2 x half> @llvm.fabs.v2f16(<2 x half>) #0
declare <2 x half> @llvm.fma.v2f16(<2 x half>, <2 x half>, <2 x half>) #0
declare <2 x half> @llvm.fmuladd.v2f16(<2 x half>, <2 x half>, <2 x half>) #0
diff --git a/llvm/test/CodeGen/AMDGPU/v_mac.ll b/llvm/test/CodeGen/AMDGPU/v_mac.ll
index c128715..f5dc824 100644
--- a/llvm/test/CodeGen/AMDGPU/v_mac.ll
+++ b/llvm/test/CodeGen/AMDGPU/v_mac.ll
@@ -116,7 +116,7 @@ entry:
; GCN-LABEL: {{^}}nsz_mad_sub0_src0:
; GCN-NOT: v_mac_f32
; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
-define amdgpu_kernel void @nsz_mad_sub0_src0(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
+define amdgpu_kernel void @nsz_mad_sub0_src0(ptr addrspace(1) %out, ptr addrspace(1) %in) {
entry:
%b_ptr = getelementptr float, ptr addrspace(1) %in, i32 1
%c_ptr = getelementptr float, ptr addrspace(1) %in, i32 2
@@ -125,7 +125,7 @@ entry:
%b = load float, ptr addrspace(1) %b_ptr
%c = load float, ptr addrspace(1) %c_ptr
- %neg_a = fsub float 0.0, %a
+ %neg_a = fsub nsz float 0.0, %a
%tmp0 = fmul float %neg_a, %b
%tmp1 = fadd float %tmp0, %c
@@ -176,7 +176,7 @@ entry:
; GCN-LABEL: {{^}}nsz_mad_sub0_src1:
; GCN-NOT: v_mac_f32
; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
-define amdgpu_kernel void @nsz_mad_sub0_src1(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
+define amdgpu_kernel void @nsz_mad_sub0_src1(ptr addrspace(1) %out, ptr addrspace(1) %in) {
entry:
%b_ptr = getelementptr float, ptr addrspace(1) %in, i32 1
%c_ptr = getelementptr float, ptr addrspace(1) %in, i32 2
@@ -185,7 +185,7 @@ entry:
%b = load float, ptr addrspace(1) %b_ptr
%c = load float, ptr addrspace(1) %c_ptr
- %neg_b = fsub float 0.0, %b
+ %neg_b = fsub nsz float 0.0, %b
%tmp0 = fmul float %a, %neg_b
%tmp1 = fadd float %tmp0, %c
@@ -310,6 +310,5 @@ define float @v_mac_f32_dynamic_ftz(float %a, float %b, float %c) "denormal-fp-m
declare i32 @llvm.amdgcn.workitem.id.x() #2
attributes #0 = { nounwind "no-signed-zeros-fp-math"="false" }
-attributes #1 = { nounwind "no-signed-zeros-fp-math"="true" }
attributes #2 = { nounwind readnone }
attributes #3 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll b/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll
index bcc60b0..8da6f23 100644
--- a/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll
@@ -236,7 +236,7 @@ entry:
%b.val = load half, ptr addrspace(1) %b
%c.val = load half, ptr addrspace(1) %c
- %a.neg = fsub half 0.0, %a.val
+ %a.neg = fsub nsz half 0.0, %a.val
%t.val = fmul half %a.neg, %b.val
%r.val = fadd half %t.val, %c.val
@@ -263,7 +263,7 @@ entry:
%b.val = load half, ptr addrspace(1) %b
%c.val = load half, ptr addrspace(1) %c
- %b.neg = fsub half 0.0, %b.val
+ %b.neg = fsub nsz half 0.0, %b.val
%t.val = fmul half %a.val, %b.neg
%r.val = fadd half %t.val, %c.val
@@ -290,7 +290,7 @@ entry:
%b.val = load half, ptr addrspace(1) %b
%c.val = load half, ptr addrspace(1) %c
- %c.neg = fsub half 0.0, %c.val
+ %c.neg = fsub nsz half 0.0, %c.val
%t.val = fmul half %a.val, %b.val
%r.val = fadd half %t.val, %c.neg
@@ -601,7 +601,7 @@ entry:
%b.val = load <2 x half>, ptr addrspace(1) %b
%c.val = load <2 x half>, ptr addrspace(1) %c
- %a.neg = fsub <2 x half> <half 0.0, half 0.0>, %a.val
+ %a.neg = fsub nsz <2 x half> <half 0.0, half 0.0>, %a.val
%t.val = fmul <2 x half> %a.neg, %b.val
%r.val = fadd <2 x half> %t.val, %c.val
@@ -634,7 +634,7 @@ entry:
%b.val = load <2 x half>, ptr addrspace(1) %b
%c.val = load <2 x half>, ptr addrspace(1) %c
- %b.neg = fsub <2 x half> <half 0.0, half 0.0>, %b.val
+ %b.neg = fsub nsz <2 x half> <half 0.0, half 0.0>, %b.val
%t.val = fmul <2 x half> %a.val, %b.neg
%r.val = fadd <2 x half> %t.val, %c.val
@@ -667,7 +667,7 @@ entry:
%b.val = load <2 x half>, ptr addrspace(1) %b
%c.val = load <2 x half>, ptr addrspace(1) %c
- %c.neg = fsub <2 x half> <half 0.0, half 0.0>, %c.val
+ %c.neg = fsub nsz <2 x half> <half 0.0, half 0.0>, %c.val
%t.val = fmul <2 x half> %a.val, %b.val
%r.val = fadd <2 x half> %t.val, %c.neg
@@ -678,5 +678,5 @@ entry:
declare void @llvm.amdgcn.s.barrier() #2
attributes #0 = { nounwind "no-signed-zeros-fp-math"="false" "denormal-fp-math"="preserve-sign,preserve-sign" }
-attributes #1 = { nounwind "no-signed-zeros-fp-math"="true" "denormal-fp-math"="preserve-sign,preserve-sign" }
+attributes #1 = { nounwind "denormal-fp-math"="preserve-sign,preserve-sign" }
attributes #2 = { nounwind convergent }
diff --git a/llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll b/llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll
new file mode 100644
index 0000000..39ac647
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll
@@ -0,0 +1,58 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc --mtriple=loongarch32 -mattr=+32s,+lasx < %s | FileCheck %s
+; RUN: llc --mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s
+
+define <8 x float> @fadd_elt0_v8f32(float %a) nounwind {
+; CHECK-LABEL: fadd_elt0_v8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vldi $vr1, -1168
+; CHECK-NEXT: fadd.s $fa0, $fa0, $fa1
+; CHECK-NEXT: ret
+entry:
+ %b = insertelement <8 x float> poison, float %a, i32 0
+ %c = fadd <8 x float> %b, <float 1.0, float poison, float poison, float poison, float poison, float poison, float poison, float poison>
+ ret <8 x float> %c
+}
+
+define <4 x double> @fadd_elt0_v4f64(double %a) nounwind {
+; CHECK-LABEL: fadd_elt0_v4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vldi $vr1, -912
+; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1
+; CHECK-NEXT: ret
+entry:
+ %b = insertelement <4 x double> poison, double %a, i32 0
+ %c = fadd <4 x double> %b, <double 1.0, double poison, double poison, double poison>
+ ret <4 x double> %c
+}
+
+define <8 x float> @fsub_splat_v8f32(float %a, float %b) nounwind {
+; CHECK-LABEL: fsub_splat_v8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsub.s $fa0, $fa0, $fa1
+; CHECK-NEXT: xvreplve0.w $xr0, $xr0
+; CHECK-NEXT: ret
+entry:
+ %insa = insertelement <8 x float> poison, float %a, i32 0
+ %insb = insertelement <8 x float> poison, float %b, i32 0
+ %va = shufflevector <8 x float> %insa, <8 x float> poison, <8 x i32> zeroinitializer
+ %vb = shufflevector <8 x float> %insb, <8 x float> poison, <8 x i32> zeroinitializer
+ %c = fsub <8 x float> %va, %vb
+ ret <8 x float> %c
+}
+
+define <4 x double> @fsub_splat_v4f64(double %a) nounwind {
+; CHECK-LABEL: fsub_splat_v4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vldi $vr1, -784
+; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1
+; CHECK-NEXT: xvreplve0.d $xr0, $xr0
+; CHECK-NEXT: ret
+entry:
+ %insa = insertelement <4 x double> poison, double %a, i32 0
+ %insb = insertelement <4 x double> poison, double 1.0, i32 0
+ %va = shufflevector <4 x double> %insa, <4 x double> poison, <4 x i32> zeroinitializer
+ %vb = shufflevector <4 x double> %insb, <4 x double> poison, <4 x i32> zeroinitializer
+ %c = fsub <4 x double> %va, %vb
+ ret <4 x double> %c
+}
diff --git a/llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll b/llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll
new file mode 100644
index 0000000..b651f11
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll
@@ -0,0 +1,58 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc --mtriple=loongarch32 -mattr=+32s,+lsx < %s | FileCheck %s
+; RUN: llc --mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s
+
+define <4 x float> @fadd_elt0_v4f32(float %a) nounwind {
+; CHECK-LABEL: fadd_elt0_v4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vldi $vr1, -1168
+; CHECK-NEXT: fadd.s $fa0, $fa0, $fa1
+; CHECK-NEXT: ret
+entry:
+ %b = insertelement <4 x float> poison, float %a, i32 0
+ %c = fadd <4 x float> %b, <float 1.0, float poison, float poison, float poison>
+ ret <4 x float> %c
+}
+
+define <2 x double> @fadd_elt0_v2f64(double %a) nounwind {
+; CHECK-LABEL: fadd_elt0_v2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vldi $vr1, -912
+; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1
+; CHECK-NEXT: ret
+entry:
+ %b = insertelement <2 x double> poison, double %a, i32 0
+ %c = fadd <2 x double> %b, <double 1.0, double poison>
+ ret <2 x double> %c
+}
+
+define <4 x float> @fsub_splat_v4f32(float %b) nounwind {
+; CHECK-LABEL: fsub_splat_v4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vldi $vr1, -1168
+; CHECK-NEXT: fsub.s $fa0, $fa1, $fa0
+; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0
+; CHECK-NEXT: ret
+entry:
+ %insa = insertelement <4 x float> poison, float 1.0, i32 0
+ %insb = insertelement <4 x float> poison, float %b, i32 0
+ %va = shufflevector <4 x float> %insa, <4 x float> poison, <4 x i32> zeroinitializer
+ %vb = shufflevector <4 x float> %insb, <4 x float> poison, <4 x i32> zeroinitializer
+ %c = fsub <4 x float> %va, %vb
+ ret <4 x float> %c
+}
+
+define <2 x double> @fsub_splat_v2f64(double %a, double %b) nounwind {
+; CHECK-LABEL: fsub_splat_v2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsub.d $fa0, $fa0, $fa1
+; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT: ret
+entry:
+ %insa = insertelement <2 x double> poison, double %a, i32 0
+ %insb = insertelement <2 x double> poison, double %b, i32 0
+ %va = shufflevector <2 x double> %insa, <2 x double> poison, <2 x i32> zeroinitializer
+ %vb = shufflevector <2 x double> %insb, <2 x double> poison, <2 x i32> zeroinitializer
+ %c = fsub <2 x double> %va, %vb
+ ret <2 x double> %c
+}
diff --git a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
index aaabd76e..fd0b494 100644
--- a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
@@ -20,18 +20,18 @@
define float @select_oeq_float(float %a, float %b, float %c, float %d) {
; FAST-P8-LABEL: select_oeq_float:
; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f2, f1
-; FAST-P8-NEXT: xssubsp f1, f1, f2
-; FAST-P8-NEXT: fsel f1, f1, f3, f4
-; FAST-P8-NEXT: fsel f1, f0, f1, f4
+; FAST-P8-NEXT: xssubsp f0, f1, f2
+; FAST-P8-NEXT: xsnegdp f1, f0
+; FAST-P8-NEXT: fsel f0, f0, f3, f4
+; FAST-P8-NEXT: fsel f1, f1, f0, f4
; FAST-P8-NEXT: blr
;
; FAST-P9-LABEL: select_oeq_float:
; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f2, f1
-; FAST-P9-NEXT: xssubsp f1, f1, f2
-; FAST-P9-NEXT: fsel f1, f1, f3, f4
-; FAST-P9-NEXT: fsel f1, f0, f1, f4
+; FAST-P9-NEXT: xssubsp f0, f1, f2
+; FAST-P9-NEXT: xsnegdp f1, f0
+; FAST-P9-NEXT: fsel f0, f0, f3, f4
+; FAST-P9-NEXT: fsel f1, f1, f0, f4
; FAST-P9-NEXT: blr
;
; NO-FAST-P8-LABEL: select_oeq_float:
@@ -59,6 +59,48 @@ entry:
ret float %cond
}
+define float @select_oeq_float_nsz(float %a, float %b, float %c, float %d) {
+; FAST-P8-LABEL: select_oeq_float_nsz:
+; FAST-P8: # %bb.0: # %entry
+; FAST-P8-NEXT: xssubsp f0, f2, f1
+; FAST-P8-NEXT: xssubsp f1, f1, f2
+; FAST-P8-NEXT: fsel f1, f1, f3, f4
+; FAST-P8-NEXT: fsel f1, f0, f1, f4
+; FAST-P8-NEXT: blr
+;
+; FAST-P9-LABEL: select_oeq_float_nsz:
+; FAST-P9: # %bb.0: # %entry
+; FAST-P9-NEXT: xssubsp f0, f2, f1
+; FAST-P9-NEXT: xssubsp f1, f1, f2
+; FAST-P9-NEXT: fsel f1, f1, f3, f4
+; FAST-P9-NEXT: fsel f1, f0, f1, f4
+; FAST-P9-NEXT: blr
+;
+; NO-FAST-P8-LABEL: select_oeq_float_nsz:
+; NO-FAST-P8: # %bb.0: # %entry
+; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
+; NO-FAST-P8-NEXT: beq cr0, .LBB1_2
+; NO-FAST-P8-NEXT: # %bb.1: # %entry
+; NO-FAST-P8-NEXT: fmr f3, f4
+; NO-FAST-P8-NEXT: .LBB1_2: # %entry
+; NO-FAST-P8-NEXT: fmr f1, f3
+; NO-FAST-P8-NEXT: blr
+;
+; NO-FAST-P9-LABEL: select_oeq_float_nsz:
+; NO-FAST-P9: # %bb.0: # %entry
+; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
+; NO-FAST-P9-NEXT: beq cr0, .LBB1_2
+; NO-FAST-P9-NEXT: # %bb.1: # %entry
+; NO-FAST-P9-NEXT: fmr f3, f4
+; NO-FAST-P9-NEXT: .LBB1_2: # %entry
+; NO-FAST-P9-NEXT: fmr f1, f3
+; NO-FAST-P9-NEXT: blr
+entry:
+ %cmp = fcmp nsz oeq float %a, %b
+ %cond = select i1 %cmp, float %c, float %d
+ ret float %cond
+}
+
define double @select_oeq_double(double %a, double %b, double %c, double %d) {
; FAST-P8-LABEL: select_oeq_double:
; FAST-P8: # %bb.0: # %entry
@@ -79,20 +121,20 @@ define double @select_oeq_double(double %a, double %b, double %c, double %d) {
; NO-FAST-P8-LABEL: select_oeq_double:
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT: beq cr0, .LBB1_2
+; NO-FAST-P8-NEXT: beq cr0, .LBB2_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB1_2: # %entry
+; NO-FAST-P8-NEXT: .LBB2_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
; NO-FAST-P9-LABEL: select_oeq_double:
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P9-NEXT: beq cr0, .LBB1_2
+; NO-FAST-P9-NEXT: beq cr0, .LBB2_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB1_2: # %entry
+; NO-FAST-P9-NEXT: .LBB2_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -182,13 +224,57 @@ entry:
define float @select_one_float(float %a, float %b, float %c, float %d) {
; FAST-P8-LABEL: select_one_float:
; FAST-P8: # %bb.0: # %entry
+; FAST-P8-NEXT: xssubsp f0, f1, f2
+; FAST-P8-NEXT: xsnegdp f1, f0
+; FAST-P8-NEXT: fsel f0, f0, f4, f3
+; FAST-P8-NEXT: fsel f1, f1, f0, f3
+; FAST-P8-NEXT: blr
+;
+; FAST-P9-LABEL: select_one_float:
+; FAST-P9: # %bb.0: # %entry
+; FAST-P9-NEXT: xssubsp f0, f1, f2
+; FAST-P9-NEXT: xsnegdp f1, f0
+; FAST-P9-NEXT: fsel f0, f0, f4, f3
+; FAST-P9-NEXT: fsel f1, f1, f0, f3
+; FAST-P9-NEXT: blr
+;
+; NO-FAST-P8-LABEL: select_one_float:
+; NO-FAST-P8: # %bb.0: # %entry
+; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
+; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq
+; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB5_2
+; NO-FAST-P8-NEXT: # %bb.1: # %entry
+; NO-FAST-P8-NEXT: fmr f3, f4
+; NO-FAST-P8-NEXT: .LBB5_2: # %entry
+; NO-FAST-P8-NEXT: fmr f1, f3
+; NO-FAST-P8-NEXT: blr
+;
+; NO-FAST-P9-LABEL: select_one_float:
+; NO-FAST-P9: # %bb.0: # %entry
+; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
+; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq
+; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB5_2
+; NO-FAST-P9-NEXT: # %bb.1: # %entry
+; NO-FAST-P9-NEXT: fmr f3, f4
+; NO-FAST-P9-NEXT: .LBB5_2: # %entry
+; NO-FAST-P9-NEXT: fmr f1, f3
+; NO-FAST-P9-NEXT: blr
+entry:
+ %cmp = fcmp one float %a, %b
+ %cond = select i1 %cmp, float %c, float %d
+ ret float %cond
+}
+
+define float @select_one_float_nsz(float %a, float %b, float %c, float %d) {
+; FAST-P8-LABEL: select_one_float_nsz:
+; FAST-P8: # %bb.0: # %entry
; FAST-P8-NEXT: xssubsp f0, f2, f1
; FAST-P8-NEXT: xssubsp f1, f1, f2
; FAST-P8-NEXT: fsel f1, f1, f4, f3
; FAST-P8-NEXT: fsel f1, f0, f1, f3
; FAST-P8-NEXT: blr
;
-; FAST-P9-LABEL: select_one_float:
+; FAST-P9-LABEL: select_one_float_nsz:
; FAST-P9: # %bb.0: # %entry
; FAST-P9-NEXT: xssubsp f0, f2, f1
; FAST-P9-NEXT: xssubsp f1, f1, f2
@@ -196,29 +282,29 @@ define float @select_one_float(float %a, float %b, float %c, float %d) {
; FAST-P9-NEXT: fsel f1, f0, f1, f3
; FAST-P9-NEXT: blr
;
-; NO-FAST-P8-LABEL: select_one_float:
+; NO-FAST-P8-LABEL: select_one_float_nsz:
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB4_2
+; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB6_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB4_2: # %entry
+; NO-FAST-P8-NEXT: .LBB6_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
-; NO-FAST-P9-LABEL: select_one_float:
+; NO-FAST-P9-LABEL: select_one_float_nsz:
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB4_2
+; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB6_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB4_2: # %entry
+; NO-FAST-P9-NEXT: .LBB6_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
- %cmp = fcmp one float %a, %b
+ %cmp = fcmp nsz one float %a, %b
%cond = select i1 %cmp, float %c, float %d
ret float %cond
}
@@ -244,10 +330,10 @@ define double @select_one_double(double %a, double %b, double %c, double %d) {
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB5_2
+; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB7_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB5_2: # %entry
+; NO-FAST-P8-NEXT: .LBB7_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
@@ -255,10 +341,10 @@ define double @select_one_double(double %a, double %b, double %c, double %d) {
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB5_2
+; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB7_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB5_2: # %entry
+; NO-FAST-P9-NEXT: .LBB7_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -362,10 +448,10 @@ define float @select_oge_float(float %a, float %b, float %c, float %d) {
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, lt
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB8_2
+; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB10_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB8_2: # %entry
+; NO-FAST-P8-NEXT: .LBB10_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
@@ -373,10 +459,10 @@ define float @select_oge_float(float %a, float %b, float %c, float %d) {
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, lt
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB8_2
+; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB10_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB8_2: # %entry
+; NO-FAST-P9-NEXT: .LBB10_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -402,10 +488,10 @@ define double @select_oge_double(double %a, double %b, double %c, double %d) {
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, lt
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB9_2
+; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB11_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB9_2: # %entry
+; NO-FAST-P8-NEXT: .LBB11_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
@@ -413,10 +499,10 @@ define double @select_oge_double(double %a, double %b, double %c, double %d) {
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, lt
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB9_2
+; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB11_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB9_2: # %entry
+; NO-FAST-P9-NEXT: .LBB11_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -503,20 +589,20 @@ define float @select_olt_float(float %a, float %b, float %c, float %d) {
; NO-FAST-P8-LABEL: select_olt_float:
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: blt cr0, .LBB12_2
+; NO-FAST-P8-NEXT: blt cr0, .LBB14_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB12_2: # %entry
+; NO-FAST-P8-NEXT: .LBB14_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
; NO-FAST-P9-LABEL: select_olt_float:
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: blt cr0, .LBB12_2
+; NO-FAST-P9-NEXT: blt cr0, .LBB14_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB12_2: # %entry
+; NO-FAST-P9-NEXT: .LBB14_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -541,20 +627,20 @@ define double @select_olt_double(double %a, double %b, double %c, double %d) {
; NO-FAST-P8-LABEL: select_olt_double:
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT: blt cr0, .LBB13_2
+; NO-FAST-P8-NEXT: blt cr0, .LBB15_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB13_2: # %entry
+; NO-FAST-P8-NEXT: .LBB15_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
; NO-FAST-P9-LABEL: select_olt_double:
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P9-NEXT: blt cr0, .LBB13_2
+; NO-FAST-P9-NEXT: blt cr0, .LBB15_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB13_2: # %entry
+; NO-FAST-P9-NEXT: .LBB15_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -641,20 +727,20 @@ define float @select_ogt_float(float %a, float %b, float %c, float %d) {
; NO-FAST-P8-LABEL: select_ogt_float:
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: bgt cr0, .LBB16_2
+; NO-FAST-P8-NEXT: bgt cr0, .LBB18_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB16_2: # %entry
+; NO-FAST-P8-NEXT: .LBB18_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
; NO-FAST-P9-LABEL: select_ogt_float:
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: bgt cr0, .LBB16_2
+; NO-FAST-P9-NEXT: bgt cr0, .LBB18_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB16_2: # %entry
+; NO-FAST-P9-NEXT: .LBB18_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -679,20 +765,20 @@ define double @select_ogt_double(double %a, double %b, double %c, double %d) {
; NO-FAST-P8-LABEL: select_ogt_double:
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT: bgt cr0, .LBB17_2
+; NO-FAST-P8-NEXT: bgt cr0, .LBB19_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB17_2: # %entry
+; NO-FAST-P8-NEXT: .LBB19_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
; NO-FAST-P9-LABEL: select_ogt_double:
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P9-NEXT: bgt cr0, .LBB17_2
+; NO-FAST-P9-NEXT: bgt cr0, .LBB19_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB17_2: # %entry
+; NO-FAST-P9-NEXT: .LBB19_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -780,10 +866,10 @@ define float @select_ole_float(float %a, float %b, float %c, float %d) {
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, gt
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB20_2
+; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB22_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB20_2: # %entry
+; NO-FAST-P8-NEXT: .LBB22_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
@@ -791,10 +877,10 @@ define float @select_ole_float(float %a, float %b, float %c, float %d) {
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, gt
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB20_2
+; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB22_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB20_2: # %entry
+; NO-FAST-P9-NEXT: .LBB22_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -820,10 +906,10 @@ define double @select_ole_double(double %a, double %b, double %c, double %d) {
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, gt
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB21_2
+; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB23_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB21_2: # %entry
+; NO-FAST-P8-NEXT: .LBB23_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
@@ -831,10 +917,10 @@ define double @select_ole_double(double %a, double %b, double %c, double %d) {
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, gt
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB21_2
+; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB23_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB21_2: # %entry
+; NO-FAST-P9-NEXT: .LBB23_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -926,13 +1012,13 @@ define double @onecmp1(double %a, double %y, double %z) {
; NO-FAST-P8-NEXT: vspltisw v2, 1
; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f0
-; NO-FAST-P8-NEXT: bc 12, lt, .LBB24_3
+; NO-FAST-P8-NEXT: bc 12, lt, .LBB26_3
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f1
-; NO-FAST-P8-NEXT: bc 12, un, .LBB24_3
+; NO-FAST-P8-NEXT: bc 12, un, .LBB26_3
; NO-FAST-P8-NEXT: # %bb.2: # %entry
; NO-FAST-P8-NEXT: fmr f3, f2
-; NO-FAST-P8-NEXT: .LBB24_3: # %entry
+; NO-FAST-P8-NEXT: .LBB26_3: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
@@ -941,13 +1027,13 @@ define double @onecmp1(double %a, double %y, double %z) {
; NO-FAST-P9-NEXT: vspltisw v2, 1
; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f0
-; NO-FAST-P9-NEXT: bc 12, lt, .LBB24_3
+; NO-FAST-P9-NEXT: bc 12, lt, .LBB26_3
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f1
-; NO-FAST-P9-NEXT: bc 12, un, .LBB24_3
+; NO-FAST-P9-NEXT: bc 12, un, .LBB26_3
; NO-FAST-P9-NEXT: # %bb.2: # %entry
; NO-FAST-P9-NEXT: fmr f3, f2
-; NO-FAST-P9-NEXT: .LBB24_3: # %entry
+; NO-FAST-P9-NEXT: .LBB26_3: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -978,10 +1064,10 @@ define double @onecmp2(double %a, double %y, double %z) {
; NO-FAST-P8-NEXT: vspltisw v2, 1
; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34
; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f0
-; NO-FAST-P8-NEXT: bgt cr0, .LBB25_2
+; NO-FAST-P8-NEXT: bgt cr0, .LBB27_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f2, f3
-; NO-FAST-P8-NEXT: .LBB25_2: # %entry
+; NO-FAST-P8-NEXT: .LBB27_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f2
; NO-FAST-P8-NEXT: blr
;
@@ -990,10 +1076,10 @@ define double @onecmp2(double %a, double %y, double %z) {
; NO-FAST-P9-NEXT: vspltisw v2, 1
; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34
; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f0
-; NO-FAST-P9-NEXT: bgt cr0, .LBB25_2
+; NO-FAST-P9-NEXT: bgt cr0, .LBB27_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f2, f3
-; NO-FAST-P9-NEXT: .LBB25_2: # %entry
+; NO-FAST-P9-NEXT: .LBB27_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f2
; NO-FAST-P9-NEXT: blr
entry:
@@ -1028,10 +1114,10 @@ define double @onecmp3(double %a, double %y, double %z) {
; NO-FAST-P8-NEXT: vspltisw v2, 1
; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34
; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f0
-; NO-FAST-P8-NEXT: beq cr0, .LBB26_2
+; NO-FAST-P8-NEXT: beq cr0, .LBB28_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f2, f3
-; NO-FAST-P8-NEXT: .LBB26_2: # %entry
+; NO-FAST-P8-NEXT: .LBB28_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f2
; NO-FAST-P8-NEXT: blr
;
@@ -1040,10 +1126,10 @@ define double @onecmp3(double %a, double %y, double %z) {
; NO-FAST-P9-NEXT: vspltisw v2, 1
; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34
; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f0
-; NO-FAST-P9-NEXT: beq cr0, .LBB26_2
+; NO-FAST-P9-NEXT: beq cr0, .LBB28_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f2, f3
-; NO-FAST-P9-NEXT: .LBB26_2: # %entry
+; NO-FAST-P9-NEXT: .LBB28_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f2
; NO-FAST-P9-NEXT: blr
entry:
diff --git a/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll b/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll
index 55b0d1f..2a46a59 100644
--- a/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll
+++ b/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll
@@ -155,3 +155,109 @@ define i1 @test9(i64 %x) {
%b = icmp eq i64 %a, u0x08000000
ret i1 %b
}
+
+; Make sure the and constant doesn't get converted to an opaque constant by
+; ConstantHoisting. If it's an opaque constant, we'll have addi -16 and addi 15.
+define i64 @test10(i64 %0) #0 {
+; RV32-LABEL: test10:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: andi a0, a0, -16
+; RV32-NEXT: snez a0, a0
+; RV32-NEXT: li a1, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: test10:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addi a0, a0, -1
+; RV64-NEXT: sraiw a0, a0, 4
+; RV64-NEXT: snez a0, a0
+; RV64-NEXT: ret
+entry:
+ %1 = add nuw nsw i64 %0, u0xffffffff
+ %2 = and i64 %1, u0xfffffff0
+ %3 = icmp ne i64 %2, 0
+ %4 = zext i1 %3 to i64
+ ret i64 %4
+}
+
+; Make sure the and constant doesn't get converted to an opaque constant by
+; ConstantHoisting. If it's an opaque constant, we'll have addi -16 and addi 15.
+define i64 @test11(i64 %0) #0 {
+; RV32-LABEL: test11:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: srai a0, a0, 4
+; RV32-NEXT: addi a0, a0, 1621
+; RV32-NEXT: seqz a0, a0
+; RV32-NEXT: li a1, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: test11:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addi a0, a0, -1
+; RV64-NEXT: sraiw a0, a0, 4
+; RV64-NEXT: addi a0, a0, 1621
+; RV64-NEXT: seqz a0, a0
+; RV64-NEXT: ret
+entry:
+ %1 = add nuw nsw i64 %0, u0xffffffff
+ %2 = and i64 %1, u0xfffffff0
+ %3 = icmp eq i64 %2, u0xffff9ab0
+ %4 = zext i1 %3 to i64
+ ret i64 %4
+}
+
+; Make sure the and constant doesn't get converted to an opaque constant by
+; ConstantHoisting. If it's an opaque constant we'll end up with constant
+; materialization sequences on RV64.
+define i64 @test12(i64 %0) #0 {
+; RV32-LABEL: test12:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi a0, a0, -3
+; RV32-NEXT: seqz a0, a0
+; RV32-NEXT: li a1, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: test12:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addiw a0, a0, -16
+; RV64-NEXT: addi a0, a0, 13
+; RV64-NEXT: seqz a0, a0
+; RV64-NEXT: ret
+entry:
+ %1 = add nuw nsw i64 %0, u0xfffffff0
+ %2 = and i64 %1, u0xffffffff
+ %3 = icmp eq i64 %2, u0xfffffff3
+ %4 = zext i1 %3 to i64
+ ret i64 %4
+}
+
+; Make sure the and constant doesn't get converted to an opaque constant by
+; ConstantHoisting.
+define i64 @test13(i64 %0) #0 {
+; RV32-LABEL: test13:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: lui a1, 524288
+; RV32-NEXT: addi a1, a1, 15
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: srli a0, a0, 31
+; RV32-NEXT: seqz a0, a0
+; RV32-NEXT: li a1, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: test13:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: lui a1, 524288
+; RV64-NEXT: addi a1, a1, -15
+; RV64-NEXT: sub a0, a0, a1
+; RV64-NEXT: sraiw a0, a0, 31
+; RV64-NEXT: seqz a0, a0
+; RV64-NEXT: ret
+entry:
+ %1 = add nuw nsw i64 %0, u0x8000000f
+ %2 = and i64 %1, u0x80000000
+ %3 = icmp eq i64 %2, 0
+ %4 = zext i1 %3 to i64
+ ret i64 %4
+}
diff --git a/llvm/test/CodeGen/X86/apx/ndd-neg-addr-index.ll b/llvm/test/CodeGen/X86/apx/ndd-neg-addr-index.ll
index 6679b5f5..41fa346 100644
--- a/llvm/test/CodeGen/X86/apx/ndd-neg-addr-index.ll
+++ b/llvm/test/CodeGen/X86/apx/ndd-neg-addr-index.ll
@@ -8,7 +8,7 @@ define void @neg_8bit_1(i1 %cmp) {
; NDD-NEXT: andb $1, %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x80,0xe7,0x01]
; NDD-NEXT: movzbl 0, %ecx # encoding: [0x0f,0xb6,0x0c,0x25,0x00,0x00,0x00,0x00]
; NDD-NEXT: negb %al, %al # encoding: [0x62,0xf4,0x7c,0x18,0xf6,0xd8]
-; NDD-NEXT: leab 2(%rcx,%rax), %al # encoding: [0x66,0x8d,0x44,0x01,0x02]
+; NDD-NEXT: leal 2(%rcx,%rax), %eax # encoding: [0x8d,0x44,0x01,0x02]
; NDD-NEXT: movb %al, 0 # encoding: [0x88,0x04,0x25,0x00,0x00,0x00,0x00]
; NDD-NEXT: retq # encoding: [0xc3]
entry:
@@ -25,7 +25,8 @@ define void @neg_8bit_2(i8 %int8) {
; NDD-NEXT: # kill: def $edi killed $edi def $rdi
; NDD-NEXT: addb %dil, %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x00,0xff]
; NDD-NEXT: negb %al, %al # encoding: [0x62,0xf4,0x7c,0x18,0xf6,0xd8]
-; NDD-NEXT: leab 1(%rdi,%rax), %al # encoding: [0x66,0x8d,0x44,0x07,0x01]
+; NDD-NEXT: leal 1(%rdi,%rax), %eax # encoding: [0x8d,0x44,0x07,0x01]
+; NDD-NEXT: # kill: def $al killed $al killed $eax
; NDD-NEXT: mulb %dil # encoding: [0x40,0xf6,0xe7]
; NDD-NEXT: testb %al, %al # encoding: [0x84,0xc0]
; NDD-NEXT: retq # encoding: [0xc3]
@@ -55,7 +56,7 @@ define i32 @neg_16bit(i16 %0) {
; NDD-NEXT: cmovsl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x48,0xc1]
; NDD-NEXT: andw $-256, %ax # EVEX TO LEGACY Compression encoding: [0x66,0x25,0x00,0xff]
; NDD-NEXT: negw %ax, %ax # encoding: [0x62,0xf4,0x7d,0x18,0xf7,0xd8]
-; NDD-NEXT: leaw 1(%rdi,%rax), %ax # encoding: [0x66,0x8d,0x44,0x07,0x01]
+; NDD-NEXT: leal 1(%rdi,%rax), %eax # encoding: [0x8d,0x44,0x07,0x01]
; NDD-NEXT: movzwl %ax, %eax # encoding: [0x0f,0xb7,0xc0]
; NDD-NEXT: movq %rax, 0 # encoding: [0x48,0x89,0x04,0x25,0x00,0x00,0x00,0x00]
; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
diff --git a/llvm/test/CodeGen/X86/combine-add.ll b/llvm/test/CodeGen/X86/combine-add.ll
index ff9f995..51a8bf5 100644
--- a/llvm/test/CodeGen/X86/combine-add.ll
+++ b/llvm/test/CodeGen/X86/combine-add.ll
@@ -235,10 +235,10 @@ define void @PR52039(ptr %pa, ptr %pb) {
; SSE-NEXT: psubd %xmm1, %xmm3
; SSE-NEXT: psubd %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: paddd %xmm0, %xmm0
; SSE-NEXT: paddd %xmm2, %xmm0
; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: paddd %xmm3, %xmm1
+; SSE-NEXT: paddd %xmm1, %xmm1
; SSE-NEXT: paddd %xmm3, %xmm1
; SSE-NEXT: movdqu %xmm3, 16(%rsi)
; SSE-NEXT: movdqu %xmm2, (%rsi)
diff --git a/llvm/test/CodeGen/X86/combine-mul.ll b/llvm/test/CodeGen/X86/combine-mul.ll
index 8e4a50e..ae4d24f 100644
--- a/llvm/test/CodeGen/X86/combine-mul.ll
+++ b/llvm/test/CodeGen/X86/combine-mul.ll
@@ -81,7 +81,7 @@ define <4 x i64> @combine_vec_mul_pow2c(<4 x i64> %x) {
; SSE-LABEL: combine_vec_mul_pow2c:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: paddq %xmm0, %xmm2
+; SSE-NEXT: paddq %xmm2, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psllq $4, %xmm2
diff --git a/llvm/test/CodeGen/X86/combine-sdiv.ll b/llvm/test/CodeGen/X86/combine-sdiv.ll
index 98187d6..6bcbfe1 100644
--- a/llvm/test/CodeGen/X86/combine-sdiv.ll
+++ b/llvm/test/CodeGen/X86/combine-sdiv.ll
@@ -2187,13 +2187,13 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm3
; SSE41-NEXT: pcmpgtb %xmm1, %xmm3
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [256,2,2,2,2,128,2,128]
; SSE41-NEXT: psrlw $8, %xmm3
-; SSE41-NEXT: paddw %xmm4, %xmm4
-; SSE41-NEXT: pmovsxbw %xmm1, %xmm2
-; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3,4,5],xmm4[6],xmm2[7]
+; SSE41-NEXT: pmovsxbw %xmm1, %xmm0
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; SSE41-NEXT: paddw %xmm2, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2],xmm0[3,4,5],xmm2[6],xmm0[7]
; SSE41-NEXT: psrlw $8, %xmm2
; SSE41-NEXT: packuswb %xmm3, %xmm2
; SSE41-NEXT: paddb %xmm1, %xmm2
@@ -2201,15 +2201,14 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) {
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
; SSE41-NEXT: psraw $8, %xmm0
; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: paddw %xmm0, %xmm3
-; SSE41-NEXT: psllw $7, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm3[5],xmm0[6],xmm3[7]
-; SSE41-NEXT: psrlw $8, %xmm0
+; SSE41-NEXT: psllw $7, %xmm3
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm0[5],xmm3[6],xmm0[7]
+; SSE41-NEXT: psrlw $8, %xmm3
; SSE41-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE41-NEXT: psraw $8, %xmm2
; SSE41-NEXT: psllw $7, %xmm2
; SSE41-NEXT: psrlw $8, %xmm2
-; SSE41-NEXT: packuswb %xmm0, %xmm2
+; SSE41-NEXT: packuswb %xmm3, %xmm2
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255]
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [255,255,0,255,255,255,0,255,255,0,0,0,0,255,0,255]
@@ -2225,18 +2224,17 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) {
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [256,2,2,2,2,128,2,128]
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm3
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2],xmm3[3,4,5],xmm2[6],xmm3[7]
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm2
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3,4,5],xmm3[6],xmm2[7]
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; AVX1-NEXT: vpsraw $8, %xmm2, %xmm2
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3
-; AVX1-NEXT: vpsllw $7, %xmm2, %xmm2
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5],xmm2[6],xmm3[7]
+; AVX1-NEXT: vpsllw $7, %xmm2, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5],xmm3[6],xmm2[7]
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX1-NEXT: vpsraw $8, %xmm1, %xmm1
diff --git a/llvm/test/CodeGen/X86/dbg-distringtype-uint.ll b/llvm/test/CodeGen/X86/dbg-distringtype-uint.ll
index 638a65d..7542c1b 100644
--- a/llvm/test/CodeGen/X86/dbg-distringtype-uint.ll
+++ b/llvm/test/CodeGen/X86/dbg-distringtype-uint.ll
@@ -1,5 +1,13 @@
; RUN: llc -mtriple=x86_64 -filetype=obj < %s | llvm-dwarfdump -debug-info - | FileCheck %s
-;
+
+; Ensure that static local variable elemnt is placed in abstract subprogram DIE.
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_inline (DW_INL_inlined)
+; CHECK-EMPTY:
+; CHECK-NEXT: DW_TAG_variable
+; CHECK-NEXT: DW_AT_name ("elemnt")
+
; CHECK: [[SYM:[a-z0-9]+]]: DW_TAG_formal_parameter
; CHECK: DW_AT_name ("esym")
; CHECK: DW_AT_type ([[TYPE:[a-z0-9]+]] "CHARACTER_1")
diff --git a/llvm/test/CodeGen/X86/dpbusd.ll b/llvm/test/CodeGen/X86/dpbusd.ll
index 3aa77c3..7bd22d5 100644
--- a/llvm/test/CodeGen/X86/dpbusd.ll
+++ b/llvm/test/CodeGen/X86/dpbusd.ll
@@ -1,40 +1,25 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=AVXVNNI
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=AVX512,AVX512VNNI
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLVNNI
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VNNI
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VLVNNI
define i32 @no_dpbusd(ptr%a, ptr%b, i32 %c, i32 %n) {
-; AVXVNNI-LABEL: no_dpbusd:
-; AVXVNNI: # %bb.0: # %entry
-; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVXVNNI-NEXT: vpmaddwd %ymm0, %ymm1, %ymm0
-; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vmovd %xmm0, %eax
-; AVXVNNI-NEXT: addl %edx, %eax
-; AVXVNNI-NEXT: vzeroupper
-; AVXVNNI-NEXT: retq
-;
-; AVX512-LABEL: no_dpbusd:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512-NEXT: vpmaddwd %ymm0, %ymm1, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: addl %edx, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; CHECK-LABEL: no_dpbusd:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; CHECK-NEXT: vpmaddwd %ymm0, %ymm1, %ymm0
+; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1
+; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vmovd %xmm0, %eax
+; CHECK-NEXT: addl %edx, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
entry:
%0 = load <16 x i8>, ptr %a, align 16
%1 = zext <16 x i8> %0 to <16 x i32>
@@ -99,25 +84,44 @@ entry:
}
define i32 @mul_zext(ptr%a, ptr%b, i32 %c, i32 %n) {
-; AVXVNNI-LABEL: mul_zext:
-; AVXVNNI: # %bb.0: # %entry
-; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVXVNNI-NEXT: vpmovsxbw (%rsi), %ymm1
-; AVXVNNI-NEXT: vpmullw %ymm0, %ymm1, %ymm0
-; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVXVNNI-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVXVNNI-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVXVNNI-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vmovd %xmm0, %eax
-; AVXVNNI-NEXT: addl %edx, %eax
-; AVXVNNI-NEXT: vzeroupper
-; AVXVNNI-NEXT: retq
+; AVXVNNI-AVX-LABEL: mul_zext:
+; AVXVNNI-AVX: # %bb.0: # %entry
+; AVXVNNI-AVX-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVXVNNI-AVX-NEXT: vpmovsxbw (%rsi), %ymm1
+; AVXVNNI-AVX-NEXT: vpmullw %ymm0, %ymm1, %ymm0
+; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVXVNNI-AVX-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVXVNNI-AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVXVNNI-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vmovd %xmm0, %eax
+; AVXVNNI-AVX-NEXT: addl %edx, %eax
+; AVXVNNI-AVX-NEXT: vzeroupper
+; AVXVNNI-AVX-NEXT: retq
+;
+; AVXVNNI-AVX512-LABEL: mul_zext:
+; AVXVNNI-AVX512: # %bb.0: # %entry
+; AVXVNNI-AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVXVNNI-AVX512-NEXT: vpmovsxbw (%rsi), %ymm1
+; AVXVNNI-AVX512-NEXT: vpmullw %ymm0, %ymm1, %ymm0
+; AVXVNNI-AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVXVNNI-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVXVNNI-AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVXVNNI-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vmovd %xmm0, %eax
+; AVXVNNI-AVX512-NEXT: addl %edx, %eax
+; AVXVNNI-AVX512-NEXT: vzeroupper
+; AVXVNNI-AVX512-NEXT: retq
;
; AVX512-LABEL: mul_zext:
; AVX512: # %bb.0: # %entry
@@ -153,25 +157,44 @@ entry:
}
define i32 @mul_sext(ptr%a, ptr%b, i32 %c, i32 %n) {
-; AVXVNNI-LABEL: mul_sext:
-; AVXVNNI: # %bb.0: # %entry
-; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVXVNNI-NEXT: vpmovsxbw (%rsi), %ymm1
-; AVXVNNI-NEXT: vpmullw %ymm0, %ymm1, %ymm0
-; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVXVNNI-NEXT: vpmovsxwd %xmm1, %ymm1
-; AVXVNNI-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVXVNNI-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vmovd %xmm0, %eax
-; AVXVNNI-NEXT: addl %edx, %eax
-; AVXVNNI-NEXT: vzeroupper
-; AVXVNNI-NEXT: retq
+; AVXVNNI-AVX-LABEL: mul_sext:
+; AVXVNNI-AVX: # %bb.0: # %entry
+; AVXVNNI-AVX-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVXVNNI-AVX-NEXT: vpmovsxbw (%rsi), %ymm1
+; AVXVNNI-AVX-NEXT: vpmullw %ymm0, %ymm1, %ymm0
+; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVXVNNI-AVX-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVXVNNI-AVX-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVXVNNI-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vmovd %xmm0, %eax
+; AVXVNNI-AVX-NEXT: addl %edx, %eax
+; AVXVNNI-AVX-NEXT: vzeroupper
+; AVXVNNI-AVX-NEXT: retq
+;
+; AVXVNNI-AVX512-LABEL: mul_sext:
+; AVXVNNI-AVX512: # %bb.0: # %entry
+; AVXVNNI-AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVXVNNI-AVX512-NEXT: vpmovsxbw (%rsi), %ymm1
+; AVXVNNI-AVX512-NEXT: vpmullw %ymm0, %ymm1, %ymm0
+; AVXVNNI-AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVXVNNI-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVXVNNI-AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVXVNNI-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vmovd %xmm0, %eax
+; AVXVNNI-AVX512-NEXT: addl %edx, %eax
+; AVXVNNI-AVX512-NEXT: vzeroupper
+; AVXVNNI-AVX512-NEXT: retq
;
; AVX512-LABEL: mul_sext:
; AVX512: # %bb.0: # %entry
@@ -312,17 +335,30 @@ entry:
declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
define i32 @vpdpbusd_128(ptr%a, ptr%b, i32 %c, i32 %n) {
-; AVXVNNI-LABEL: vpdpbusd_128:
-; AVXVNNI: # %bb.0: # %entry
-; AVXVNNI-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
-; AVXVNNI-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVXVNNI-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3]
-; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
-; AVXVNNI-NEXT: {vex} vpdpbusd %xmm1, %xmm0, %xmm2
-; AVXVNNI-NEXT: vmovd %xmm2, %eax
-; AVXVNNI-NEXT: addl %edx, %eax
-; AVXVNNI-NEXT: retq
+; AVXVNNI-AVX-LABEL: vpdpbusd_128:
+; AVXVNNI-AVX: # %bb.0: # %entry
+; AVXVNNI-AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVXVNNI-AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVXVNNI-AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3]
+; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
+; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %xmm1, %xmm0, %xmm2
+; AVXVNNI-AVX-NEXT: vmovd %xmm2, %eax
+; AVXVNNI-AVX-NEXT: addl %edx, %eax
+; AVXVNNI-AVX-NEXT: retq
+;
+; AVXVNNI-AVX512-LABEL: vpdpbusd_128:
+; AVXVNNI-AVX512: # %bb.0: # %entry
+; AVXVNNI-AVX512-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVXVNNI-AVX512-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVXVNNI-AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3]
+; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
+; AVXVNNI-AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %xmm1, %xmm0, %xmm2
+; AVXVNNI-AVX512-NEXT: vmovd %xmm2, %eax
+; AVXVNNI-AVX512-NEXT: addl %edx, %eax
+; AVXVNNI-AVX512-NEXT: retq
;
; AVX512VNNI-LABEL: vpdpbusd_128:
; AVX512VNNI: # %bb.0: # %entry
diff --git a/llvm/test/CodeGen/X86/dpbusd_const.ll b/llvm/test/CodeGen/X86/dpbusd_const.ll
index 456e6e8..bb47df5 100644
--- a/llvm/test/CodeGen/X86/dpbusd_const.ll
+++ b/llvm/test/CodeGen/X86/dpbusd_const.ll
@@ -1,20 +1,21 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=ALL,AVXVNNI
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VNNI
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni -mattr=+avx512vl | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VLVNNI
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VNNI
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VLVNNI
define i32 @mul_4xi8_zc_exceed(<4 x i8> %a, i32 %c) {
-; ALL-LABEL: mul_4xi8_zc_exceed:
-; ALL: # %bb.0: # %entry
-; ALL-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; ALL-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,128,0]
-; ALL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; ALL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; ALL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; ALL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
-; ALL-NEXT: vmovd %xmm0, %eax
-; ALL-NEXT: addl %edi, %eax
-; ALL-NEXT: retq
+; CHECK-LABEL: mul_4xi8_zc_exceed:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; CHECK-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,128,0]
+; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vmovd %xmm0, %eax
+; CHECK-NEXT: addl %edi, %eax
+; CHECK-NEXT: retq
entry:
%0 = zext <4 x i8> %a to <4 x i32>
%1 = mul nsw <4 x i32> %0, <i32 0, i32 1, i32 2, i32 128>
@@ -24,14 +25,24 @@ entry:
}
define i32 @mul_4xi8_zc(<4 x i8> %a, i32 %c) {
-; AVXVNNI-LABEL: mul_4xi8_zc:
-; AVXVNNI: # %bb.0: # %entry
-; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
-; AVXVNNI-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
-; AVXVNNI-NEXT: vmovd %xmm1, %eax
-; AVXVNNI-NEXT: addl %edi, %eax
-; AVXVNNI-NEXT: retq
+; AVXVNNI-AVX-LABEL: mul_4xi8_zc:
+; AVXVNNI-AVX: # %bb.0: # %entry
+; AVXVNNI-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVXVNNI-AVX-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVXVNNI-AVX-NEXT: vmovd %xmm1, %eax
+; AVXVNNI-AVX-NEXT: addl %edi, %eax
+; AVXVNNI-AVX-NEXT: retq
+;
+; AVXVNNI-AVX512-LABEL: mul_4xi8_zc:
+; AVXVNNI-AVX512: # %bb.0: # %entry
+; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVXVNNI-AVX512-NEXT: vmovd %xmm1, %eax
+; AVXVNNI-AVX512-NEXT: addl %edi, %eax
+; AVXVNNI-AVX512-NEXT: retq
;
; AVX512VNNI-LABEL: mul_4xi8_zc:
; AVX512VNNI: # %bb.0: # %entry
@@ -62,16 +73,26 @@ entry:
}
define i32 @mul_4xi4_cz(<4 x i4> %a, i32 %c) {
-; AVXVNNI-LABEL: mul_4xi4_cz:
-; AVXVNNI: # %bb.0: # %entry
-; AVXVNNI-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVXVNNI-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
-; AVXVNNI-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
-; AVXVNNI-NEXT: vmovd %xmm1, %eax
-; AVXVNNI-NEXT: addl %edi, %eax
-; AVXVNNI-NEXT: retq
+; AVXVNNI-AVX-LABEL: mul_4xi4_cz:
+; AVXVNNI-AVX: # %bb.0: # %entry
+; AVXVNNI-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVXVNNI-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVXVNNI-AVX-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVXVNNI-AVX-NEXT: vmovd %xmm1, %eax
+; AVXVNNI-AVX-NEXT: addl %edi, %eax
+; AVXVNNI-AVX-NEXT: retq
+;
+; AVXVNNI-AVX512-LABEL: mul_4xi4_cz:
+; AVXVNNI-AVX512: # %bb.0: # %entry
+; AVXVNNI-AVX512-NEXT: vpmovdb %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVXVNNI-AVX512-NEXT: vmovd %xmm1, %eax
+; AVXVNNI-AVX512-NEXT: addl %edi, %eax
+; AVXVNNI-AVX512-NEXT: retq
;
; AVX512VNNI-LABEL: mul_4xi4_cz:
; AVX512VNNI: # %bb.0: # %entry
@@ -104,15 +125,26 @@ entry:
}
define i32 @mul_4xi8_cs(<4 x i8> %a, i32 %c) {
-; AVXVNNI-LABEL: mul_4xi8_cs:
-; AVXVNNI: # %bb.0: # %entry
-; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
-; AVXVNNI-NEXT: vmovd {{.*#+}} xmm2 = [16,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVXVNNI-NEXT: {vex} vpdpbusd %xmm0, %xmm2, %xmm1
-; AVXVNNI-NEXT: vmovd %xmm1, %eax
-; AVXVNNI-NEXT: addl %edi, %eax
-; AVXVNNI-NEXT: retq
+; AVXVNNI-AVX-LABEL: mul_4xi8_cs:
+; AVXVNNI-AVX: # %bb.0: # %entry
+; AVXVNNI-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVXVNNI-AVX-NEXT: vmovd {{.*#+}} xmm2 = [16,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %xmm0, %xmm2, %xmm1
+; AVXVNNI-AVX-NEXT: vmovd %xmm1, %eax
+; AVXVNNI-AVX-NEXT: addl %edi, %eax
+; AVXVNNI-AVX-NEXT: retq
+;
+; AVXVNNI-AVX512-LABEL: mul_4xi8_cs:
+; AVXVNNI-AVX512: # %bb.0: # %entry
+; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVXVNNI-AVX512-NEXT: vmovd {{.*#+}} xmm1 = [16,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVXVNNI-AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %xmm0, %xmm1, %xmm2
+; AVXVNNI-AVX512-NEXT: vmovd %xmm2, %eax
+; AVXVNNI-AVX512-NEXT: addl %edi, %eax
+; AVXVNNI-AVX512-NEXT: retq
;
; AVX512VNNI-LABEL: mul_4xi8_cs:
; AVX512VNNI: # %bb.0: # %entry
@@ -145,17 +177,17 @@ entry:
}
define i32 @mul_4xi8_cs_exceed(<4 x i8> %a, i32 %c) {
-; ALL-LABEL: mul_4xi8_cs_exceed:
-; ALL: # %bb.0: # %entry
-; ALL-NEXT: vpmovsxbd %xmm0, %xmm0
-; ALL-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,256,0]
-; ALL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; ALL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; ALL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; ALL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
-; ALL-NEXT: vmovd %xmm0, %eax
-; ALL-NEXT: addl %edi, %eax
-; ALL-NEXT: retq
+; CHECK-LABEL: mul_4xi8_cs_exceed:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vpmovsxbd %xmm0, %xmm0
+; CHECK-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,256,0]
+; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vmovd %xmm0, %eax
+; CHECK-NEXT: addl %edi, %eax
+; CHECK-NEXT: retq
entry:
%0 = sext <4 x i8> %a to <4 x i32>
%1 = mul nsw <4 x i32> <i32 0, i32 1, i32 2, i32 256>, %0
@@ -265,24 +297,44 @@ entry:
}
define i32 @mul_64xi8_zc(<64 x i8> %a, i32 %c) {
-; AVXVNNI-LABEL: mul_64xi8_zc:
-; AVXVNNI: # %bb.0: # %entry
-; AVXVNNI-NEXT: vpbroadcastd {{.*#+}} ymm2 = [0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64]
-; AVXVNNI-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVXVNNI-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; AVXVNNI-NEXT: {vex} vpdpbusd %ymm2, %ymm1, %ymm4
-; AVXVNNI-NEXT: {vex} vpdpbusd %ymm2, %ymm0, %ymm3
-; AVXVNNI-NEXT: vpaddd %ymm4, %ymm3, %ymm0
-; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vmovd %xmm0, %eax
-; AVXVNNI-NEXT: addl %edi, %eax
-; AVXVNNI-NEXT: vzeroupper
-; AVXVNNI-NEXT: retq
+; AVXVNNI-AVX-LABEL: mul_64xi8_zc:
+; AVXVNNI-AVX: # %bb.0: # %entry
+; AVXVNNI-AVX-NEXT: vpbroadcastd {{.*#+}} ymm2 = [0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64]
+; AVXVNNI-AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVXVNNI-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %ymm2, %ymm1, %ymm4
+; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %ymm2, %ymm0, %ymm3
+; AVXVNNI-AVX-NEXT: vpaddd %ymm4, %ymm3, %ymm0
+; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vmovd %xmm0, %eax
+; AVXVNNI-AVX-NEXT: addl %edi, %eax
+; AVXVNNI-AVX-NEXT: vzeroupper
+; AVXVNNI-AVX-NEXT: retq
+;
+; AVXVNNI-AVX512-LABEL: mul_64xi8_zc:
+; AVXVNNI-AVX512: # %bb.0: # %entry
+; AVXVNNI-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVXVNNI-AVX512-NEXT: vpbroadcastd {{.*#+}} ymm2 = [0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64]
+; AVXVNNI-AVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVXVNNI-AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %ymm2, %ymm1, %ymm4
+; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %ymm2, %ymm0, %ymm3
+; AVXVNNI-AVX512-NEXT: vpaddd %ymm4, %ymm3, %ymm0
+; AVXVNNI-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vmovd %xmm0, %eax
+; AVXVNNI-AVX512-NEXT: addl %edi, %eax
+; AVXVNNI-AVX512-NEXT: vzeroupper
+; AVXVNNI-AVX512-NEXT: retq
;
; AVX512-LABEL: mul_64xi8_zc:
; AVX512: # %bb.0: # %entry
diff --git a/llvm/test/CodeGen/X86/ftrunc.ll b/llvm/test/CodeGen/X86/ftrunc.ll
index 3ed9858..9095fb1 100644
--- a/llvm/test/CodeGen/X86/ftrunc.ll
+++ b/llvm/test/CodeGen/X86/ftrunc.ll
@@ -243,7 +243,7 @@ define <4 x double> @trunc_unsigned_v4f64(<4 x double> %x) #0 {
ret <4 x double> %r
}
-define float @trunc_signed_f32_no_fast_math(float %x) {
+define float @trunc_signed_f32_no_fast_math(float %x) nounwind {
; SSE-LABEL: trunc_signed_f32_no_fast_math:
; SSE: # %bb.0:
; SSE-NEXT: cvttps2dq %xmm0, %xmm0
@@ -259,14 +259,12 @@ define float @trunc_signed_f32_no_fast_math(float %x) {
; X86-AVX1-LABEL: trunc_signed_f32_no_fast_math:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %eax
-; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
; X86-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
; X86-AVX1-NEXT: flds (%esp)
; X86-AVX1-NEXT: popl %eax
-; X86-AVX1-NEXT: .cfi_def_cfa_offset 4
; X86-AVX1-NEXT: retl
%i = fptosi float %x to i32
%r = sitofp i32 %i to float
@@ -306,7 +304,7 @@ define float @trunc_signed_f32_nsz(float %x) #0 {
ret float %r
}
-define double @trunc_signed32_f64_no_fast_math(double %x) {
+define double @trunc_signed32_f64_no_fast_math(double %x) nounwind {
; SSE-LABEL: trunc_signed32_f64_no_fast_math:
; SSE: # %bb.0:
; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
@@ -322,10 +320,7 @@ define double @trunc_signed32_f64_no_fast_math(double %x) {
; X86-AVX1-LABEL: trunc_signed32_f64_no_fast_math:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %ebp
-; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
-; X86-AVX1-NEXT: .cfi_offset %ebp, -8
; X86-AVX1-NEXT: movl %esp, %ebp
-; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp
; X86-AVX1-NEXT: andl $-8, %esp
; X86-AVX1-NEXT: subl $8, %esp
; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -335,7 +330,6 @@ define double @trunc_signed32_f64_no_fast_math(double %x) {
; X86-AVX1-NEXT: fldl (%esp)
; X86-AVX1-NEXT: movl %ebp, %esp
; X86-AVX1-NEXT: popl %ebp
-; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4
; X86-AVX1-NEXT: retl
%i = fptosi double %x to i32
%r = sitofp i32 %i to double
@@ -377,7 +371,7 @@ define double @trunc_signed32_f64_nsz(double %x) #0 {
ret double %r
}
-define double @trunc_f32_signed32_f64_no_fast_math(float %x) {
+define double @trunc_f32_signed32_f64_no_fast_math(float %x) nounwind {
; SSE-LABEL: trunc_f32_signed32_f64_no_fast_math:
; SSE: # %bb.0:
; SSE-NEXT: cvttps2dq %xmm0, %xmm0
@@ -393,10 +387,7 @@ define double @trunc_f32_signed32_f64_no_fast_math(float %x) {
; X86-AVX1-LABEL: trunc_f32_signed32_f64_no_fast_math:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %ebp
-; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
-; X86-AVX1-NEXT: .cfi_offset %ebp, -8
; X86-AVX1-NEXT: movl %esp, %ebp
-; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp
; X86-AVX1-NEXT: andl $-8, %esp
; X86-AVX1-NEXT: subl $8, %esp
; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -406,7 +397,6 @@ define double @trunc_f32_signed32_f64_no_fast_math(float %x) {
; X86-AVX1-NEXT: fldl (%esp)
; X86-AVX1-NEXT: movl %ebp, %esp
; X86-AVX1-NEXT: popl %ebp
-; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4
; X86-AVX1-NEXT: retl
%i = fptosi float %x to i32
%r = sitofp i32 %i to double
@@ -445,7 +435,7 @@ define double @trunc_f32_signed32_f64_nsz(float %x) #0 {
ret double %r
}
-define float @trunc_f64_signed32_f32_no_fast_math(double %x) {
+define float @trunc_f64_signed32_f32_no_fast_math(double %x) nounwind {
; SSE-LABEL: trunc_f64_signed32_f32_no_fast_math:
; SSE: # %bb.0:
; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
@@ -461,14 +451,12 @@ define float @trunc_f64_signed32_f32_no_fast_math(double %x) {
; X86-AVX1-LABEL: trunc_f64_signed32_f32_no_fast_math:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %eax
-; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
; X86-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
; X86-AVX1-NEXT: flds (%esp)
; X86-AVX1-NEXT: popl %eax
-; X86-AVX1-NEXT: .cfi_def_cfa_offset 4
; X86-AVX1-NEXT: retl
%i = fptosi double %x to i32
%r = sitofp i32 %i to float
@@ -503,7 +491,7 @@ define float @trunc_f64_signed32_f32_nsz(double %x) #0 {
ret float %r
}
-define double @trunc_signed_f64_no_fast_math(double %x) {
+define double @trunc_signed_f64_no_fast_math(double %x) nounwind {
; SSE-LABEL: trunc_signed_f64_no_fast_math:
; SSE: # %bb.0:
; SSE-NEXT: cvttsd2si %xmm0, %rax
@@ -520,10 +508,7 @@ define double @trunc_signed_f64_no_fast_math(double %x) {
; X86-AVX1-LABEL: trunc_signed_f64_no_fast_math:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %ebp
-; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
-; X86-AVX1-NEXT: .cfi_offset %ebp, -8
; X86-AVX1-NEXT: movl %esp, %ebp
-; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp
; X86-AVX1-NEXT: andl $-8, %esp
; X86-AVX1-NEXT: subl $24, %esp
; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -537,7 +522,6 @@ define double @trunc_signed_f64_no_fast_math(double %x) {
; X86-AVX1-NEXT: fldl {{[0-9]+}}(%esp)
; X86-AVX1-NEXT: movl %ebp, %esp
; X86-AVX1-NEXT: popl %ebp
-; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4
; X86-AVX1-NEXT: retl
%i = fptosi double %x to i64
%r = sitofp i64 %i to double
diff --git a/llvm/test/CodeGen/X86/isint.ll b/llvm/test/CodeGen/X86/isint.ll
index 8a56f49..8c11fe1 100644
--- a/llvm/test/CodeGen/X86/isint.ll
+++ b/llvm/test/CodeGen/X86/isint.ll
@@ -1,29 +1,29 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=CHECK64 %s
-; RUN: llc < %s -mtriple=i686-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=CHECK32 %s
+; RUN: llc < %s -mtriple=x86_64-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=X64 %s
+; RUN: llc < %s -mtriple=i686-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=X86 %s
; PR19059
define i32 @isint_return(double %d) nounwind {
-; CHECK64-LABEL: isint_return:
-; CHECK64: # %bb.0:
-; CHECK64-NEXT: cvttpd2dq %xmm0, %xmm1
-; CHECK64-NEXT: cvtdq2pd %xmm1, %xmm1
-; CHECK64-NEXT: cmpeqsd %xmm0, %xmm1
-; CHECK64-NEXT: movq %xmm1, %rax
-; CHECK64-NEXT: andl $1, %eax
-; CHECK64-NEXT: # kill: def $eax killed $eax killed $rax
-; CHECK64-NEXT: retq
+; X64-LABEL: isint_return:
+; X64: # %bb.0:
+; X64-NEXT: cvttpd2dq %xmm0, %xmm1
+; X64-NEXT: cvtdq2pd %xmm1, %xmm1
+; X64-NEXT: cmpeqsd %xmm0, %xmm1
+; X64-NEXT: movq %xmm1, %rax
+; X64-NEXT: andl $1, %eax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: retq
;
-; CHECK32-LABEL: isint_return:
-; CHECK32: # %bb.0:
-; CHECK32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK32-NEXT: cvttpd2dq %xmm0, %xmm1
-; CHECK32-NEXT: cvtdq2pd %xmm1, %xmm1
-; CHECK32-NEXT: cmpeqsd %xmm0, %xmm1
-; CHECK32-NEXT: movd %xmm1, %eax
-; CHECK32-NEXT: andl $1, %eax
-; CHECK32-NEXT: retl
+; X86-LABEL: isint_return:
+; X86: # %bb.0:
+; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: cvttpd2dq %xmm0, %xmm1
+; X86-NEXT: cvtdq2pd %xmm1, %xmm1
+; X86-NEXT: cmpeqsd %xmm0, %xmm1
+; X86-NEXT: movd %xmm1, %eax
+; X86-NEXT: andl $1, %eax
+; X86-NEXT: retl
%i = fptosi double %d to i32
%e = sitofp i32 %i to double
%c = fcmp oeq double %d, %e
@@ -32,24 +32,24 @@ define i32 @isint_return(double %d) nounwind {
}
define i32 @isint_float_return(float %f) nounwind {
-; CHECK64-LABEL: isint_float_return:
-; CHECK64: # %bb.0:
-; CHECK64-NEXT: cvttps2dq %xmm0, %xmm1
-; CHECK64-NEXT: cvtdq2ps %xmm1, %xmm1
-; CHECK64-NEXT: cmpeqss %xmm0, %xmm1
-; CHECK64-NEXT: movd %xmm1, %eax
-; CHECK64-NEXT: andl $1, %eax
-; CHECK64-NEXT: retq
+; X64-LABEL: isint_float_return:
+; X64: # %bb.0:
+; X64-NEXT: cvttps2dq %xmm0, %xmm1
+; X64-NEXT: cvtdq2ps %xmm1, %xmm1
+; X64-NEXT: cmpeqss %xmm0, %xmm1
+; X64-NEXT: movd %xmm1, %eax
+; X64-NEXT: andl $1, %eax
+; X64-NEXT: retq
;
-; CHECK32-LABEL: isint_float_return:
-; CHECK32: # %bb.0:
-; CHECK32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK32-NEXT: cvttps2dq %xmm0, %xmm1
-; CHECK32-NEXT: cvtdq2ps %xmm1, %xmm1
-; CHECK32-NEXT: cmpeqss %xmm0, %xmm1
-; CHECK32-NEXT: movd %xmm1, %eax
-; CHECK32-NEXT: andl $1, %eax
-; CHECK32-NEXT: retl
+; X86-LABEL: isint_float_return:
+; X86: # %bb.0:
+; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: cvttps2dq %xmm0, %xmm1
+; X86-NEXT: cvtdq2ps %xmm1, %xmm1
+; X86-NEXT: cmpeqss %xmm0, %xmm1
+; X86-NEXT: movd %xmm1, %eax
+; X86-NEXT: andl $1, %eax
+; X86-NEXT: retl
%i = fptosi float %f to i32
%g = sitofp i32 %i to float
%c = fcmp oeq float %f, %g
@@ -60,32 +60,32 @@ define i32 @isint_float_return(float %f) nounwind {
declare void @foo()
define void @isint_branch(double %d) nounwind {
-; CHECK64-LABEL: isint_branch:
-; CHECK64: # %bb.0:
-; CHECK64-NEXT: cvttpd2dq %xmm0, %xmm1
-; CHECK64-NEXT: cvtdq2pd %xmm1, %xmm1
-; CHECK64-NEXT: ucomisd %xmm1, %xmm0
-; CHECK64-NEXT: jne .LBB2_2
-; CHECK64-NEXT: jp .LBB2_2
-; CHECK64-NEXT: # %bb.1: # %true
-; CHECK64-NEXT: pushq %rax
-; CHECK64-NEXT: callq foo@PLT
-; CHECK64-NEXT: popq %rax
-; CHECK64-NEXT: .LBB2_2: # %false
-; CHECK64-NEXT: retq
+; X64-LABEL: isint_branch:
+; X64: # %bb.0:
+; X64-NEXT: cvttpd2dq %xmm0, %xmm1
+; X64-NEXT: cvtdq2pd %xmm1, %xmm1
+; X64-NEXT: ucomisd %xmm1, %xmm0
+; X64-NEXT: jne .LBB2_2
+; X64-NEXT: jp .LBB2_2
+; X64-NEXT: # %bb.1: # %true
+; X64-NEXT: pushq %rax
+; X64-NEXT: callq foo@PLT
+; X64-NEXT: popq %rax
+; X64-NEXT: .LBB2_2: # %false
+; X64-NEXT: retq
;
-; CHECK32-LABEL: isint_branch:
-; CHECK32: # %bb.0:
-; CHECK32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK32-NEXT: cvttpd2dq %xmm0, %xmm1
-; CHECK32-NEXT: cvtdq2pd %xmm1, %xmm1
-; CHECK32-NEXT: ucomisd %xmm1, %xmm0
-; CHECK32-NEXT: jne .LBB2_2
-; CHECK32-NEXT: jp .LBB2_2
-; CHECK32-NEXT: # %bb.1: # %true
-; CHECK32-NEXT: calll foo@PLT
-; CHECK32-NEXT: .LBB2_2: # %false
-; CHECK32-NEXT: retl
+; X86-LABEL: isint_branch:
+; X86: # %bb.0:
+; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: cvttpd2dq %xmm0, %xmm1
+; X86-NEXT: cvtdq2pd %xmm1, %xmm1
+; X86-NEXT: ucomisd %xmm1, %xmm0
+; X86-NEXT: jne .LBB2_2
+; X86-NEXT: jp .LBB2_2
+; X86-NEXT: # %bb.1: # %true
+; X86-NEXT: calll foo@PLT
+; X86-NEXT: .LBB2_2: # %false
+; X86-NEXT: retl
%i = fptosi double %d to i32
%e = sitofp i32 %i to double
%c = fcmp oeq double %d, %e
diff --git a/llvm/test/CodeGen/X86/known-signbits-shl.ll b/llvm/test/CodeGen/X86/known-signbits-shl.ll
index 473fecc..57d557d 100644
--- a/llvm/test/CodeGen/X86/known-signbits-shl.ll
+++ b/llvm/test/CodeGen/X86/known-signbits-shl.ll
@@ -137,7 +137,7 @@ define void @computeNumSignBits_shl_zext_vec_3(<2 x i8> %x, ptr %p) nounwind {
; X64-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; X64-NEXT: por %xmm2, %xmm1
; X64-NEXT: movdqa %xmm0, %xmm2
-; X64-NEXT: paddw %xmm0, %xmm2
+; X64-NEXT: paddw %xmm2, %xmm2
; X64-NEXT: movdqa %xmm2, %xmm3
; X64-NEXT: psraw $1, %xmm3
; X64-NEXT: pcmpeqw %xmm0, %xmm3
diff --git a/llvm/test/CodeGen/X86/lea-16bit.ll b/llvm/test/CodeGen/X86/lea-16bit.ll
index cec29ab..40da01d 100644
--- a/llvm/test/CodeGen/X86/lea-16bit.ll
+++ b/llvm/test/CodeGen/X86/lea-16bit.ll
@@ -13,7 +13,8 @@ define i16 @lea16bit(i16 %in) {
; NDD-LABEL: lea16bit:
; NDD: # %bb.0:
; NDD-NEXT: # kill: def $edi killed $edi def $rdi
-; NDD-NEXT: leaw 1(%rdi,%rdi), %ax
+; NDD-NEXT: leal 1(%rdi,%rdi), %eax
+; NDD-NEXT: # kill: def $ax killed $ax killed $eax
; NDD-NEXT: retq
%shl = shl i16 %in, 1
%or = or i16 %shl, 1
diff --git a/llvm/test/CodeGen/X86/lea-8bit.ll b/llvm/test/CodeGen/X86/lea-8bit.ll
index 98222df..fc295f7 100644
--- a/llvm/test/CodeGen/X86/lea-8bit.ll
+++ b/llvm/test/CodeGen/X86/lea-8bit.ll
@@ -14,7 +14,8 @@ define i8 @lea8bit(i8 %in) {
; NDD-LABEL: lea8bit:
; NDD: # %bb.0:
; NDD-NEXT: # kill: def $edi killed $edi def $rdi
-; NDD-NEXT: leab 1(%rdi,%rdi), %al
+; NDD-NEXT: leal 1(%rdi,%rdi), %eax
+; NDD-NEXT: # kill: def $al killed $al killed $eax
; NDD-NEXT: retq
%shl = shl i8 %in, 1
%or = or i8 %shl, 1
diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index 4e6f666..4cde581 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -4806,9 +4806,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16
; X64-KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; X64-KNL-NEXT: vpslld $31, %zmm0, %zmm0
; X64-KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
-; X64-KNL-NEXT: vmovdqu64 (%rsi), %zmm0
+; X64-KNL-NEXT: vpslld $1, (%rsi), %zmm0
; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-KNL-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; X64-KNL-NEXT: vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
; X64-KNL-NEXT: vmovaps %zmm1, %zmm0
; X64-KNL-NEXT: retq
@@ -4830,9 +4829,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16
; X64-SKX-SMALL-NEXT: vpmovsxbd %xmm0, %zmm0
; X64-SKX-SMALL-NEXT: vpslld $31, %zmm0, %zmm0
; X64-SKX-SMALL-NEXT: vpmovd2m %zmm0, %k1
-; X64-SKX-SMALL-NEXT: vmovdqu64 (%rsi), %zmm0
+; X64-SKX-SMALL-NEXT: vpslld $1, (%rsi), %zmm0
; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; X64-SKX-SMALL-NEXT: vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
; X64-SKX-SMALL-NEXT: vmovaps %zmm1, %zmm0
; X64-SKX-SMALL-NEXT: retq
@@ -4842,10 +4840,9 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16
; X64-SKX-LARGE-NEXT: vpmovsxbd %xmm0, %zmm0
; X64-SKX-LARGE-NEXT: vpslld $31, %zmm0, %zmm0
; X64-SKX-LARGE-NEXT: vpmovd2m %zmm0, %k1
-; X64-SKX-LARGE-NEXT: vmovdqu64 (%rsi), %zmm0
+; X64-SKX-LARGE-NEXT: vpslld $1, (%rsi), %zmm0
; X64-SKX-LARGE-NEXT: movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; X64-SKX-LARGE-NEXT: vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
; X64-SKX-LARGE-NEXT: vmovaps %zmm1, %zmm0
; X64-SKX-LARGE-NEXT: retq
@@ -4875,9 +4872,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a
; X64-KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; X64-KNL-NEXT: vpslld $31, %zmm0, %zmm0
; X64-KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
-; X64-KNL-NEXT: vmovdqu64 (%rsi), %zmm0
+; X64-KNL-NEXT: vpslld $1, (%rsi), %zmm0
; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-KNL-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; X64-KNL-NEXT: vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
; X64-KNL-NEXT: vmovaps %zmm1, %zmm0
; X64-KNL-NEXT: retq
@@ -4899,9 +4895,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a
; X64-SKX-SMALL-NEXT: vpmovsxbd %xmm0, %zmm0
; X64-SKX-SMALL-NEXT: vpslld $31, %zmm0, %zmm0
; X64-SKX-SMALL-NEXT: vpmovd2m %zmm0, %k1
-; X64-SKX-SMALL-NEXT: vmovdqu64 (%rsi), %zmm0
+; X64-SKX-SMALL-NEXT: vpslld $1, (%rsi), %zmm0
; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; X64-SKX-SMALL-NEXT: vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
; X64-SKX-SMALL-NEXT: vmovaps %zmm1, %zmm0
; X64-SKX-SMALL-NEXT: retq
@@ -4911,10 +4906,9 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a
; X64-SKX-LARGE-NEXT: vpmovsxbd %xmm0, %zmm0
; X64-SKX-LARGE-NEXT: vpslld $31, %zmm0, %zmm0
; X64-SKX-LARGE-NEXT: vpmovd2m %zmm0, %k1
-; X64-SKX-LARGE-NEXT: vmovdqu64 (%rsi), %zmm0
+; X64-SKX-LARGE-NEXT: vpslld $1, (%rsi), %zmm0
; X64-SKX-LARGE-NEXT: movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; X64-SKX-LARGE-NEXT: vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
; X64-SKX-LARGE-NEXT: vmovaps %zmm1, %zmm0
; X64-SKX-LARGE-NEXT: retq
@@ -4944,9 +4938,8 @@ define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair(
; X64-KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; X64-KNL-NEXT: vpslld $31, %zmm0, %zmm0
; X64-KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
-; X64-KNL-NEXT: vmovdqu64 (%rsi), %zmm0
-; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-KNL-NEXT: vpaddd %zmm0, %zmm0, %zmm2
+; X64-KNL-NEXT: vpslld $1, (%rsi), %zmm0
+; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm2
; X64-KNL-NEXT: kmovw %k1, %k2
; X64-KNL-NEXT: vmovaps %zmm1, %zmm0
; X64-KNL-NEXT: vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
@@ -4972,9 +4965,8 @@ define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair(
; X64-SKX-SMALL-NEXT: vpmovsxbd %xmm0, %zmm0
; X64-SKX-SMALL-NEXT: vpslld $31, %zmm0, %zmm0
; X64-SKX-SMALL-NEXT: vpmovd2m %zmm0, %k1
-; X64-SKX-SMALL-NEXT: vmovdqu64 (%rsi), %zmm0
-; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT: vpaddd %zmm0, %zmm0, %zmm2
+; X64-SKX-SMALL-NEXT: vpslld $1, (%rsi), %zmm0
+; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm2
; X64-SKX-SMALL-NEXT: kmovw %k1, %k2
; X64-SKX-SMALL-NEXT: vmovaps %zmm1, %zmm0
; X64-SKX-SMALL-NEXT: vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
@@ -4986,10 +4978,9 @@ define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair(
; X64-SKX-LARGE-NEXT: vpmovsxbd %xmm0, %zmm0
; X64-SKX-LARGE-NEXT: vpslld $31, %zmm0, %zmm0
; X64-SKX-LARGE-NEXT: vpmovd2m %zmm0, %k1
-; X64-SKX-LARGE-NEXT: vmovdqu64 (%rsi), %zmm0
+; X64-SKX-LARGE-NEXT: vpslld $1, (%rsi), %zmm0
; X64-SKX-LARGE-NEXT: movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT: vpaddd %zmm0, %zmm0, %zmm2
+; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm2
; X64-SKX-LARGE-NEXT: kmovw %k1, %k2
; X64-SKX-LARGE-NEXT: vmovaps %zmm1, %zmm0
; X64-SKX-LARGE-NEXT: vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
diff --git a/llvm/test/CodeGen/X86/negative-sin.ll b/llvm/test/CodeGen/X86/negative-sin.ll
index f24507d..4836da2 100644
--- a/llvm/test/CodeGen/X86/negative-sin.ll
+++ b/llvm/test/CodeGen/X86/negative-sin.ll
@@ -82,18 +82,13 @@ define double @semi_strict2(double %e) nounwind {
ret double %h
}
-; FIXME:
-; Auto-upgrade function attribute to IR-level fast-math-flags.
-
-define double @fn_attr(double %e) nounwind #0 {
-; CHECK-LABEL: fn_attr:
+define double @nsz_flag(double %e) nounwind {
+; CHECK-LABEL: nsz_flag:
; CHECK: # %bb.0:
; CHECK-NEXT: jmp sin@PLT # TAILCALL
- %f = fsub double 0.0, %e
- %g = call double @sin(double %f) readonly
- %h = fsub double 0.0, %g
+ %f = fsub nsz double 0.0, %e
+ %g = call nsz double @sin(double %f) readonly
+ %h = fsub nsz double 0.0, %g
ret double %h
}
-attributes #0 = { "unsafe-fp-math"="true" "no-signed-zeros-fp-math"="true" }
-
diff --git a/llvm/test/CodeGen/X86/oddsubvector.ll b/llvm/test/CodeGen/X86/oddsubvector.ll
index f539830..5df1867 100644
--- a/llvm/test/CodeGen/X86/oddsubvector.ll
+++ b/llvm/test/CodeGen/X86/oddsubvector.ll
@@ -155,10 +155,10 @@ define <16 x i32> @PR42819(ptr %a0) {
define void @PR42833() {
; SSE2-LABEL: PR42833:
; SSE2: # %bb.0:
+; SSE2-NEXT: movl b(%rip), %eax
; SSE2-NEXT: movdqa c+144(%rip), %xmm2
; SSE2-NEXT: movdqa c+128(%rip), %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: addl b(%rip), %eax
+; SSE2-NEXT: addl c+128(%rip), %eax
; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: movd %eax, %xmm3
; SSE2-NEXT: paddd %xmm0, %xmm3
@@ -166,7 +166,7 @@ define void @PR42833() {
; SSE2-NEXT: psubd %xmm2, %xmm4
; SSE2-NEXT: paddd %xmm2, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: paddd %xmm0, %xmm5
+; SSE2-NEXT: paddd %xmm5, %xmm5
; SSE2-NEXT: movss {{.*#+}} xmm5 = xmm3[0],xmm5[1,2,3]
; SSE2-NEXT: movdqa %xmm2, c+144(%rip)
; SSE2-NEXT: movaps %xmm5, c+128(%rip)
@@ -191,17 +191,17 @@ define void @PR42833() {
;
; SSE42-LABEL: PR42833:
; SSE42: # %bb.0:
+; SSE42-NEXT: movl b(%rip), %eax
; SSE42-NEXT: movdqa c+144(%rip), %xmm1
; SSE42-NEXT: movdqa c+128(%rip), %xmm0
-; SSE42-NEXT: movd %xmm0, %eax
-; SSE42-NEXT: addl b(%rip), %eax
+; SSE42-NEXT: addl c+128(%rip), %eax
; SSE42-NEXT: movd %eax, %xmm2
; SSE42-NEXT: paddd %xmm0, %xmm2
; SSE42-NEXT: movdqa d+144(%rip), %xmm3
; SSE42-NEXT: psubd %xmm1, %xmm3
; SSE42-NEXT: paddd %xmm1, %xmm1
; SSE42-NEXT: movdqa %xmm0, %xmm4
-; SSE42-NEXT: paddd %xmm0, %xmm4
+; SSE42-NEXT: paddd %xmm4, %xmm4
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0,1],xmm4[2,3,4,5,6,7]
; SSE42-NEXT: movdqa %xmm1, c+144(%rip)
; SSE42-NEXT: movdqa %xmm4, c+128(%rip)
diff --git a/llvm/test/CodeGen/X86/pr62286.ll b/llvm/test/CodeGen/X86/pr62286.ll
index ce03f8f..161e965 100644
--- a/llvm/test/CodeGen/X86/pr62286.ll
+++ b/llvm/test/CodeGen/X86/pr62286.ll
@@ -26,27 +26,33 @@ define i64 @PR62286(i32 %a) {
; AVX1-LABEL: PR62286:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
-; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
+; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6,7]
; AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
-; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
-; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR62286:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
-; AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm1
-; AVX2-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3]
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,0]
+; AVX2-NEXT: vpaddd %ymm0, %ymm0, %ymm1
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4],ymm0[5,6,7]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
@@ -59,12 +65,12 @@ define i64 @PR62286(i32 %a) {
; AVX512-LABEL: PR62286:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovd %edi, %xmm0
-; AVX512-NEXT: movb $8, %al
+; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,0]
+; AVX512-NEXT: vpaddd %ymm0, %ymm0, %ymm1
+; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT: movw $4369, %ax # imm = 0x1111
; AVX512-NEXT: kmovd %eax, %k1
-; AVX512-NEXT: vpexpandd %ymm0, %ymm1 {%k1} {z}
-; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512-NEXT: vpaddd %ymm0, %ymm0, %ymm0
-; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
+; AVX512-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
diff --git a/llvm/test/CodeGen/X86/pr74736.ll b/llvm/test/CodeGen/X86/pr74736.ll
index ceccee0..5895526 100644
--- a/llvm/test/CodeGen/X86/pr74736.ll
+++ b/llvm/test/CodeGen/X86/pr74736.ll
@@ -6,8 +6,8 @@ define void @main(<16 x i32> %0, i32 %1) {
; SSE-LABEL: main:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movd %edi, %xmm4
-; SSE-NEXT: movss {{.*#+}} xmm0 = [1,0,0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm4[1,0]
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [0,1,0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,0]
; SSE-NEXT: paddd %xmm0, %xmm0
; SSE-NEXT: paddd %xmm1, %xmm1
; SSE-NEXT: paddd %xmm3, %xmm3
@@ -32,20 +32,20 @@ define void @main(<16 x i32> %0, i32 %1) {
; AVX-LABEL: main:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm0[1,2,3]
; AVX-NEXT: movl $1, %eax
; AVX-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
; AVX-NEXT: vpinsrd $3, %edi, %xmm2, %xmm2
-; AVX-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vpaddd %ymm0, %ymm0, %ymm0
-; AVX-NEXT: vpaddd %ymm1, %ymm1, %ymm1
-; AVX-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,1,1,3,3,5,5,7]
-; AVX-NEXT: vpermd %ymm0, %ymm2, %ymm2
+; AVX-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vpaddd %ymm2, %ymm2, %ymm2
+; AVX-NEXT: vpaddd %ymm1, %ymm1, %ymm3
; AVX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,3,3,3,7,7,7,7]
-; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,1,1,3,4,5,5,7]
+; AVX-NEXT: vpaddd %ymm0, %ymm0, %ymm0
+; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm3[0,1,1,3,4,5,5,7]
; AVX-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,1,1,3,3,5,5,7]
+; AVX-NEXT: vpermd %ymm2, %ymm1, %ymm1
+; AVX-NEXT: vpxor %ymm0, %ymm1, %ymm0
; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
diff --git a/llvm/test/CodeGen/X86/setoeq.ll b/llvm/test/CodeGen/X86/setoeq.ll
index f0addf4..131e279 100644
--- a/llvm/test/CodeGen/X86/setoeq.ll
+++ b/llvm/test/CodeGen/X86/setoeq.ll
@@ -1,40 +1,532 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 | FileCheck %s
-
-define zeroext i8 @t(double %x) nounwind readnone {
-; CHECK-LABEL: t:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: cvttpd2dq %xmm0, %xmm1
-; CHECK-NEXT: cvtdq2pd %xmm1, %xmm1
-; CHECK-NEXT: cmpeqsd %xmm0, %xmm1
-; CHECK-NEXT: movd %xmm1, %eax
-; CHECK-NEXT: andl $1, %eax
-; CHECK-NEXT: # kill: def $al killed $al killed $eax
-; CHECK-NEXT: retl
-entry:
- %0 = fptosi double %x to i32 ; <i32> [#uses=1]
- %1 = sitofp i32 %0 to double ; <double> [#uses=1]
- %2 = fcmp oeq double %1, %x ; <i1> [#uses=1]
- %retval12 = zext i1 %2 to i8 ; <i8> [#uses=1]
- ret i8 %retval12
-}
-
-define zeroext i8 @u(double %x) nounwind readnone {
-; CHECK-LABEL: u:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: cvttpd2dq %xmm0, %xmm1
-; CHECK-NEXT: cvtdq2pd %xmm1, %xmm1
-; CHECK-NEXT: cmpneqsd %xmm0, %xmm1
-; CHECK-NEXT: movd %xmm1, %eax
-; CHECK-NEXT: andl $1, %eax
-; CHECK-NEXT: # kill: def $al killed $al killed $eax
-; CHECK-NEXT: retl
-entry:
- %0 = fptosi double %x to i32 ; <i32> [#uses=1]
- %1 = sitofp i32 %0 to double ; <double> [#uses=1]
- %2 = fcmp une double %1, %x ; <i1> [#uses=1]
- %retval12 = zext i1 %2 to i8 ; <i8> [#uses=1]
+; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=i686-- -mattr=+avx | FileCheck %s --check-prefixes=AVX
+; RUN: llc < %s -mtriple=i686-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX512
+
+define zeroext i8 @oeq_f64_i32(double %x) nounwind readnone {
+; SSE-LABEL: oeq_f64_i32:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: cvttpd2dq %xmm0, %xmm1
+; SSE-NEXT: cvtdq2pd %xmm1, %xmm1
+; SSE-NEXT: cmpeqsd %xmm0, %xmm1
+; SSE-NEXT: movd %xmm1, %eax
+; SSE-NEXT: andl $1, %eax
+; SSE-NEXT: # kill: def $al killed $al killed $eax
+; SSE-NEXT: retl
+;
+; AVX-LABEL: oeq_f64_i32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vcvttpd2dq %xmm0, %xmm1
+; AVX-NEXT: vcvtdq2pd %xmm1, %xmm1
+; AVX-NEXT: vcmpeqsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vmovd %xmm0, %eax
+; AVX-NEXT: andl $1, %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: retl
+;
+; AVX512-LABEL: oeq_f64_i32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: vcvttpd2dq %xmm0, %xmm1
+; AVX512-NEXT: vcvtdq2pd %xmm1, %xmm1
+; AVX512-NEXT: vcmpeqsd %xmm0, %xmm1, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: retl
+entry:
+ %0 = fptosi double %x to i32
+ %1 = sitofp i32 %0 to double
+ %2 = fcmp oeq double %1, %x
+ %retval12 = zext i1 %2 to i8
+ ret i8 %retval12
+}
+
+define zeroext i8 @oeq_f64_u32(double %x) nounwind readnone {
+; SSE-LABEL: oeq_f64_u32:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: cvttsd2si %xmm0, %eax
+; SSE-NEXT: movl %eax, %ecx
+; SSE-NEXT: sarl $31, %ecx
+; SSE-NEXT: movapd %xmm0, %xmm1
+; SSE-NEXT: subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; SSE-NEXT: cvttsd2si %xmm1, %edx
+; SSE-NEXT: andl %ecx, %edx
+; SSE-NEXT: orl %eax, %edx
+; SSE-NEXT: movd %edx, %xmm1
+; SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; SSE-NEXT: subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; SSE-NEXT: cmpeqsd %xmm0, %xmm1
+; SSE-NEXT: movd %xmm1, %eax
+; SSE-NEXT: andl $1, %eax
+; SSE-NEXT: # kill: def $al killed $al killed $eax
+; SSE-NEXT: retl
+;
+; AVX-LABEL: oeq_f64_u32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vcvttsd2si %xmm0, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: sarl $31, %ecx
+; AVX-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1
+; AVX-NEXT: vcvttsd2si %xmm1, %edx
+; AVX-NEXT: andl %ecx, %edx
+; AVX-NEXT: orl %eax, %edx
+; AVX-NEXT: vmovd %edx, %xmm1
+; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
+; AVX-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
+; AVX-NEXT: vcmpeqsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vmovd %xmm0, %eax
+; AVX-NEXT: andl $1, %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: retl
+;
+; AVX512-LABEL: oeq_f64_u32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: vcvttsd2usi %xmm0, %eax
+; AVX512-NEXT: vcvtusi2sd %eax, %xmm7, %xmm1
+; AVX512-NEXT: vcmpeqsd %xmm0, %xmm1, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: retl
+entry:
+ %0 = fptoui double %x to i32
+ %1 = uitofp i32 %0 to double
+ %2 = fcmp oeq double %1, %x
+ %retval12 = zext i1 %2 to i8
+ ret i8 %retval12
+}
+
+define zeroext i8 @oeq_f64_i64(double %x) nounwind readnone {
+; SSE-LABEL: oeq_f64_i64:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: pushl %ebp
+; SSE-NEXT: movl %esp, %ebp
+; SSE-NEXT: andl $-8, %esp
+; SSE-NEXT: subl $32, %esp
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
+; SSE-NEXT: fldl {{[0-9]+}}(%esp)
+; SSE-NEXT: fnstcw {{[0-9]+}}(%esp)
+; SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; SSE-NEXT: orl $3072, %eax # imm = 0xC00
+; SSE-NEXT: movw %ax, {{[0-9]+}}(%esp)
+; SSE-NEXT: fldcw {{[0-9]+}}(%esp)
+; SSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; SSE-NEXT: fldcw {{[0-9]+}}(%esp)
+; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movlps %xmm1, {{[0-9]+}}(%esp)
+; SSE-NEXT: fildll {{[0-9]+}}(%esp)
+; SSE-NEXT: fstpl {{[0-9]+}}(%esp)
+; SSE-NEXT: cmpeqsd {{[0-9]+}}(%esp), %xmm0
+; SSE-NEXT: movd %xmm0, %eax
+; SSE-NEXT: andl $1, %eax
+; SSE-NEXT: # kill: def $al killed $al killed $eax
+; SSE-NEXT: movl %ebp, %esp
+; SSE-NEXT: popl %ebp
+; SSE-NEXT: retl
+;
+; AVX-LABEL: oeq_f64_i64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushl %ebp
+; AVX-NEXT: movl %esp, %ebp
+; AVX-NEXT: andl $-8, %esp
+; AVX-NEXT: subl $24, %esp
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd %xmm0, (%esp)
+; AVX-NEXT: fldl (%esp)
+; AVX-NEXT: fisttpll (%esp)
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
+; AVX-NEXT: fildll {{[0-9]+}}(%esp)
+; AVX-NEXT: fstpl {{[0-9]+}}(%esp)
+; AVX-NEXT: vcmpeqsd {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-NEXT: vmovd %xmm0, %eax
+; AVX-NEXT: andl $1, %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: movl %ebp, %esp
+; AVX-NEXT: popl %ebp
+; AVX-NEXT: retl
+;
+; AVX512-LABEL: oeq_f64_i64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: vcvttpd2qq %xmm0, %xmm1
+; AVX512-NEXT: vcvtqq2pd %ymm1, %ymm1
+; AVX512-NEXT: vcmpeqsd %xmm0, %xmm1, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retl
+entry:
+ %0 = fptosi double %x to i64
+ %1 = sitofp i64 %0 to double
+ %2 = fcmp oeq double %1, %x
+ %retval12 = zext i1 %2 to i8
+ ret i8 %retval12
+}
+
+define zeroext i8 @oeq_f64_u64(double %x) nounwind readnone {
+; SSE-LABEL: oeq_f64_u64:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: pushl %ebp
+; SSE-NEXT: movl %esp, %ebp
+; SSE-NEXT: andl $-8, %esp
+; SSE-NEXT: subl $16, %esp
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
+; SSE-NEXT: ucomisd %xmm0, %xmm1
+; SSE-NEXT: jbe .LBB3_2
+; SSE-NEXT: # %bb.1: # %entry
+; SSE-NEXT: xorpd %xmm1, %xmm1
+; SSE-NEXT: .LBB3_2: # %entry
+; SSE-NEXT: movapd %xmm0, %xmm2
+; SSE-NEXT: subsd %xmm1, %xmm2
+; SSE-NEXT: movsd %xmm2, {{[0-9]+}}(%esp)
+; SSE-NEXT: setbe %al
+; SSE-NEXT: fldl {{[0-9]+}}(%esp)
+; SSE-NEXT: fnstcw {{[0-9]+}}(%esp)
+; SSE-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; SSE-NEXT: orl $3072, %ecx # imm = 0xC00
+; SSE-NEXT: movw %cx, {{[0-9]+}}(%esp)
+; SSE-NEXT: fldcw {{[0-9]+}}(%esp)
+; SSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; SSE-NEXT: fldcw {{[0-9]+}}(%esp)
+; SSE-NEXT: movzbl %al, %eax
+; SSE-NEXT: shll $31, %eax
+; SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax
+; SSE-NEXT: movd %eax, %xmm1
+; SSE-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
+; SSE-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
+; SSE-NEXT: movapd %xmm2, %xmm1
+; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
+; SSE-NEXT: addsd %xmm2, %xmm1
+; SSE-NEXT: cmpeqsd %xmm0, %xmm1
+; SSE-NEXT: movd %xmm1, %eax
+; SSE-NEXT: andl $1, %eax
+; SSE-NEXT: # kill: def $al killed $al killed $eax
+; SSE-NEXT: movl %ebp, %esp
+; SSE-NEXT: popl %ebp
+; SSE-NEXT: retl
+;
+; AVX-LABEL: oeq_f64_u64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushl %ebp
+; AVX-NEXT: movl %esp, %ebp
+; AVX-NEXT: andl $-8, %esp
+; AVX-NEXT: subl $8, %esp
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
+; AVX-NEXT: vucomisd %xmm0, %xmm1
+; AVX-NEXT: jbe .LBB3_2
+; AVX-NEXT: # %bb.1: # %entry
+; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: .LBB3_2: # %entry
+; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm1
+; AVX-NEXT: vmovsd %xmm1, (%esp)
+; AVX-NEXT: fldl (%esp)
+; AVX-NEXT: fisttpll (%esp)
+; AVX-NEXT: setbe %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: shll $31, %eax
+; AVX-NEXT: xorl {{[0-9]+}}(%esp), %eax
+; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
+; AVX-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
+; AVX-NEXT: vshufpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX-NEXT: vaddsd %xmm1, %xmm2, %xmm1
+; AVX-NEXT: vcmpeqsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vmovd %xmm0, %eax
+; AVX-NEXT: andl $1, %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: movl %ebp, %esp
+; AVX-NEXT: popl %ebp
+; AVX-NEXT: retl
+;
+; AVX512-LABEL: oeq_f64_u64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: vcvttpd2uqq %xmm0, %xmm1
+; AVX512-NEXT: vcvtuqq2pd %ymm1, %ymm1
+; AVX512-NEXT: vcmpeqsd %xmm0, %xmm1, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retl
+entry:
+ %0 = fptoui double %x to i64
+ %1 = uitofp i64 %0 to double
+ %2 = fcmp oeq double %1, %x
+ %retval12 = zext i1 %2 to i8
+ ret i8 %retval12
+}
+
+define zeroext i8 @une_f64_i32(double %x) nounwind readnone {
+; SSE-LABEL: une_f64_i32:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: cvttpd2dq %xmm0, %xmm1
+; SSE-NEXT: cvtdq2pd %xmm1, %xmm1
+; SSE-NEXT: cmpneqsd %xmm0, %xmm1
+; SSE-NEXT: movd %xmm1, %eax
+; SSE-NEXT: andl $1, %eax
+; SSE-NEXT: # kill: def $al killed $al killed $eax
+; SSE-NEXT: retl
+;
+; AVX-LABEL: une_f64_i32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vcvttpd2dq %xmm0, %xmm1
+; AVX-NEXT: vcvtdq2pd %xmm1, %xmm1
+; AVX-NEXT: vcmpneqsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vmovd %xmm0, %eax
+; AVX-NEXT: andl $1, %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: retl
+;
+; AVX512-LABEL: une_f64_i32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: vcvttpd2dq %xmm0, %xmm1
+; AVX512-NEXT: vcvtdq2pd %xmm1, %xmm1
+; AVX512-NEXT: vcmpneqsd %xmm0, %xmm1, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: retl
+entry:
+ %0 = fptosi double %x to i32
+ %1 = sitofp i32 %0 to double
+ %2 = fcmp une double %1, %x
+ %retval12 = zext i1 %2 to i8
+ ret i8 %retval12
+}
+
+define zeroext i8 @une_f64_u32(double %x) nounwind readnone {
+; SSE-LABEL: une_f64_u32:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: cvttsd2si %xmm0, %eax
+; SSE-NEXT: movl %eax, %ecx
+; SSE-NEXT: sarl $31, %ecx
+; SSE-NEXT: movapd %xmm0, %xmm1
+; SSE-NEXT: subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; SSE-NEXT: cvttsd2si %xmm1, %edx
+; SSE-NEXT: andl %ecx, %edx
+; SSE-NEXT: orl %eax, %edx
+; SSE-NEXT: movd %edx, %xmm1
+; SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; SSE-NEXT: subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; SSE-NEXT: cmpneqsd %xmm0, %xmm1
+; SSE-NEXT: movd %xmm1, %eax
+; SSE-NEXT: andl $1, %eax
+; SSE-NEXT: # kill: def $al killed $al killed $eax
+; SSE-NEXT: retl
+;
+; AVX-LABEL: une_f64_u32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vcvttsd2si %xmm0, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: sarl $31, %ecx
+; AVX-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1
+; AVX-NEXT: vcvttsd2si %xmm1, %edx
+; AVX-NEXT: andl %ecx, %edx
+; AVX-NEXT: orl %eax, %edx
+; AVX-NEXT: vmovd %edx, %xmm1
+; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
+; AVX-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
+; AVX-NEXT: vcmpneqsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vmovd %xmm0, %eax
+; AVX-NEXT: andl $1, %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: retl
+;
+; AVX512-LABEL: une_f64_u32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: vcvttsd2usi %xmm0, %eax
+; AVX512-NEXT: vcvtusi2sd %eax, %xmm7, %xmm1
+; AVX512-NEXT: vcmpneqsd %xmm0, %xmm1, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: retl
+entry:
+ %0 = fptoui double %x to i32
+ %1 = uitofp i32 %0 to double
+ %2 = fcmp une double %1, %x
+ %retval12 = zext i1 %2 to i8
+ ret i8 %retval12
+}
+
+define zeroext i8 @une_f64_i64(double %x) nounwind readnone {
+; SSE-LABEL: une_f64_i64:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: pushl %ebp
+; SSE-NEXT: movl %esp, %ebp
+; SSE-NEXT: andl $-8, %esp
+; SSE-NEXT: subl $32, %esp
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
+; SSE-NEXT: fldl {{[0-9]+}}(%esp)
+; SSE-NEXT: fnstcw {{[0-9]+}}(%esp)
+; SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; SSE-NEXT: orl $3072, %eax # imm = 0xC00
+; SSE-NEXT: movw %ax, {{[0-9]+}}(%esp)
+; SSE-NEXT: fldcw {{[0-9]+}}(%esp)
+; SSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; SSE-NEXT: fldcw {{[0-9]+}}(%esp)
+; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movlps %xmm1, {{[0-9]+}}(%esp)
+; SSE-NEXT: fildll {{[0-9]+}}(%esp)
+; SSE-NEXT: fstpl {{[0-9]+}}(%esp)
+; SSE-NEXT: cmpneqsd {{[0-9]+}}(%esp), %xmm0
+; SSE-NEXT: movd %xmm0, %eax
+; SSE-NEXT: andl $1, %eax
+; SSE-NEXT: # kill: def $al killed $al killed $eax
+; SSE-NEXT: movl %ebp, %esp
+; SSE-NEXT: popl %ebp
+; SSE-NEXT: retl
+;
+; AVX-LABEL: une_f64_i64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushl %ebp
+; AVX-NEXT: movl %esp, %ebp
+; AVX-NEXT: andl $-8, %esp
+; AVX-NEXT: subl $24, %esp
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd %xmm0, (%esp)
+; AVX-NEXT: fldl (%esp)
+; AVX-NEXT: fisttpll (%esp)
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
+; AVX-NEXT: fildll {{[0-9]+}}(%esp)
+; AVX-NEXT: fstpl {{[0-9]+}}(%esp)
+; AVX-NEXT: vcmpneqsd {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-NEXT: vmovd %xmm0, %eax
+; AVX-NEXT: andl $1, %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: movl %ebp, %esp
+; AVX-NEXT: popl %ebp
+; AVX-NEXT: retl
+;
+; AVX512-LABEL: une_f64_i64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: vcvttpd2qq %xmm0, %xmm1
+; AVX512-NEXT: vcvtqq2pd %ymm1, %ymm1
+; AVX512-NEXT: vcmpneqsd %xmm0, %xmm1, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retl
+entry:
+ %0 = fptosi double %x to i64
+ %1 = sitofp i64 %0 to double
+ %2 = fcmp une double %1, %x
+ %retval12 = zext i1 %2 to i8
+ ret i8 %retval12
+}
+
+define zeroext i8 @une_f64_u64(double %x) nounwind readnone {
+; SSE-LABEL: une_f64_u64:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: pushl %ebp
+; SSE-NEXT: movl %esp, %ebp
+; SSE-NEXT: andl $-8, %esp
+; SSE-NEXT: subl $16, %esp
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
+; SSE-NEXT: ucomisd %xmm0, %xmm1
+; SSE-NEXT: jbe .LBB7_2
+; SSE-NEXT: # %bb.1: # %entry
+; SSE-NEXT: xorpd %xmm1, %xmm1
+; SSE-NEXT: .LBB7_2: # %entry
+; SSE-NEXT: movapd %xmm0, %xmm2
+; SSE-NEXT: subsd %xmm1, %xmm2
+; SSE-NEXT: movsd %xmm2, {{[0-9]+}}(%esp)
+; SSE-NEXT: setbe %al
+; SSE-NEXT: fldl {{[0-9]+}}(%esp)
+; SSE-NEXT: fnstcw {{[0-9]+}}(%esp)
+; SSE-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; SSE-NEXT: orl $3072, %ecx # imm = 0xC00
+; SSE-NEXT: movw %cx, {{[0-9]+}}(%esp)
+; SSE-NEXT: fldcw {{[0-9]+}}(%esp)
+; SSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; SSE-NEXT: fldcw {{[0-9]+}}(%esp)
+; SSE-NEXT: movzbl %al, %eax
+; SSE-NEXT: shll $31, %eax
+; SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax
+; SSE-NEXT: movd %eax, %xmm1
+; SSE-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
+; SSE-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
+; SSE-NEXT: movapd %xmm2, %xmm1
+; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
+; SSE-NEXT: addsd %xmm2, %xmm1
+; SSE-NEXT: cmpneqsd %xmm0, %xmm1
+; SSE-NEXT: movd %xmm1, %eax
+; SSE-NEXT: andl $1, %eax
+; SSE-NEXT: # kill: def $al killed $al killed $eax
+; SSE-NEXT: movl %ebp, %esp
+; SSE-NEXT: popl %ebp
+; SSE-NEXT: retl
+;
+; AVX-LABEL: une_f64_u64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushl %ebp
+; AVX-NEXT: movl %esp, %ebp
+; AVX-NEXT: andl $-8, %esp
+; AVX-NEXT: subl $8, %esp
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
+; AVX-NEXT: vucomisd %xmm0, %xmm1
+; AVX-NEXT: jbe .LBB7_2
+; AVX-NEXT: # %bb.1: # %entry
+; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: .LBB7_2: # %entry
+; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm1
+; AVX-NEXT: vmovsd %xmm1, (%esp)
+; AVX-NEXT: fldl (%esp)
+; AVX-NEXT: fisttpll (%esp)
+; AVX-NEXT: setbe %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: shll $31, %eax
+; AVX-NEXT: xorl {{[0-9]+}}(%esp), %eax
+; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
+; AVX-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
+; AVX-NEXT: vshufpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX-NEXT: vaddsd %xmm1, %xmm2, %xmm1
+; AVX-NEXT: vcmpneqsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vmovd %xmm0, %eax
+; AVX-NEXT: andl $1, %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: movl %ebp, %esp
+; AVX-NEXT: popl %ebp
+; AVX-NEXT: retl
+;
+; AVX512-LABEL: une_f64_u64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: vcvttpd2uqq %xmm0, %xmm1
+; AVX512-NEXT: vcvtuqq2pd %ymm1, %ymm1
+; AVX512-NEXT: vcmpneqsd %xmm0, %xmm1, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retl
+entry:
+ %0 = fptoui double %x to i64
+ %1 = uitofp i64 %0 to double
+ %2 = fcmp une double %1, %x
+ %retval12 = zext i1 %2 to i8
ret i8 %retval12
}
diff --git a/llvm/test/CodeGen/X86/shift-i512.ll b/llvm/test/CodeGen/X86/shift-i512.ll
index 756019d..03b61d9 100644
--- a/llvm/test/CodeGen/X86/shift-i512.ll
+++ b/llvm/test/CodeGen/X86/shift-i512.ll
@@ -10,7 +10,7 @@ define <8 x i64> @shl_i512_1(<8 x i64> %a) {
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: valignq {{.*#+}} zmm1 = zmm0[3,4,5,6,7,0,1,2]
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512VL-NEXT: vpsllq $1, %xmm0, %xmm3
+; AVX512VL-NEXT: vpaddq %xmm0, %xmm0, %xmm3
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
; AVX512VL-NEXT: vpsrlq $63, %xmm4, %xmm4
; AVX512VL-NEXT: vpaddq %xmm2, %xmm2, %xmm2
@@ -34,7 +34,7 @@ define <8 x i64> @shl_i512_1(<8 x i64> %a) {
; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512VBMI-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
; AVX512VBMI-NEXT: vpshldq $1, %xmm3, %xmm2, %xmm3
-; AVX512VBMI-NEXT: vpsllq $1, %xmm0, %xmm4
+; AVX512VBMI-NEXT: vpaddq %xmm0, %xmm0, %xmm4
; AVX512VBMI-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3
; AVX512VBMI-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
; AVX512VBMI-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
@@ -51,7 +51,7 @@ define <8 x i64> @shl_i512_1(<8 x i64> %a) {
; ZNVER4-NEXT: vextracti32x4 $2, %zmm0, %xmm1
; ZNVER4-NEXT: vextracti128 $1, %ymm0, %xmm2
; ZNVER4-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
-; ZNVER4-NEXT: vpsllq $1, %xmm0, %xmm4
+; ZNVER4-NEXT: vpaddq %xmm0, %xmm0, %xmm4
; ZNVER4-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
; ZNVER4-NEXT: vpshldq $1, %xmm3, %xmm2, %xmm3
; ZNVER4-NEXT: vextracti64x4 $1, %zmm0, %ymm2
diff --git a/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
index 3f48b22..a48be03 100644
--- a/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
@@ -5791,20 +5791,20 @@ declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone
define <2 x i64> @test_mm_slli_epi16(<2 x i64> %a0) {
; SSE-LABEL: test_mm_slli_epi16:
; SSE: # %bb.0:
-; SSE-NEXT: psllw $1, %xmm0 # encoding: [0x66,0x0f,0x71,0xf0,0x01]
+; SSE-NEXT: psllw $2, %xmm0 # encoding: [0x66,0x0f,0x71,0xf0,0x02]
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_slli_epi16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpsllw $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x71,0xf0,0x01]
+; AVX1-NEXT: vpsllw $2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x71,0xf0,0x02]
; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_slli_epi16:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpsllw $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x01]
+; AVX512-NEXT: vpsllw $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x02]
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
- %res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %arg0, i32 1)
+ %res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %arg0, i32 2)
%bc = bitcast <8 x i16> %res to <2 x i64>
ret <2 x i64> %bc
}
@@ -5813,20 +5813,20 @@ declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) nounwind readnone
define <2 x i64> @test_mm_slli_epi32(<2 x i64> %a0) {
; SSE-LABEL: test_mm_slli_epi32:
; SSE: # %bb.0:
-; SSE-NEXT: pslld $1, %xmm0 # encoding: [0x66,0x0f,0x72,0xf0,0x01]
+; SSE-NEXT: pslld $2, %xmm0 # encoding: [0x66,0x0f,0x72,0xf0,0x02]
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_slli_epi32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpslld $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x72,0xf0,0x01]
+; AVX1-NEXT: vpslld $2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x72,0xf0,0x02]
; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_slli_epi32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpslld $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xf0,0x01]
+; AVX512-NEXT: vpslld $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xf0,0x02]
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
- %res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %arg0, i32 1)
+ %res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %arg0, i32 2)
%bc = bitcast <4 x i32> %res to <2 x i64>
ret <2 x i64> %bc
}
@@ -5835,19 +5835,19 @@ declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32) nounwind readnone
define <2 x i64> @test_mm_slli_epi64(<2 x i64> %a0) {
; SSE-LABEL: test_mm_slli_epi64:
; SSE: # %bb.0:
-; SSE-NEXT: psllq $1, %xmm0 # encoding: [0x66,0x0f,0x73,0xf0,0x01]
+; SSE-NEXT: psllq $2, %xmm0 # encoding: [0x66,0x0f,0x73,0xf0,0x02]
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_slli_epi64:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpsllq $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xf0,0x01]
+; AVX1-NEXT: vpsllq $2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xf0,0x02]
; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_slli_epi64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpsllq $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf0,0x01]
+; AVX512-NEXT: vpsllq $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf0,0x02]
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 1)
+ %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 2)
ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32) nounwind readnone
diff --git a/llvm/test/CodeGen/X86/vec_shift6.ll b/llvm/test/CodeGen/X86/vec_shift6.ll
index 71e659c..219e32c 100644
--- a/llvm/test/CodeGen/X86/vec_shift6.ll
+++ b/llvm/test/CodeGen/X86/vec_shift6.ll
@@ -28,14 +28,14 @@ define <8 x i16> @test2(<8 x i16> %a) {
; SSE2-LABEL: test2:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: paddw %xmm0, %xmm1
+; SSE2-NEXT: paddw %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSE41-LABEL: test2:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: paddw %xmm0, %xmm1
+; SSE41-NEXT: paddw %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
@@ -56,7 +56,7 @@ define <4 x i32> @test3(<4 x i32> %a) {
; SSE2-LABEL: test3:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm1
; SSE2-NEXT: pslld $2, %xmm0
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
@@ -81,14 +81,14 @@ define <4 x i32> @test4(<4 x i32> %a) {
; SSE2-LABEL: test4:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSE41-LABEL: test4:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: paddd %xmm0, %xmm1
+; SSE41-NEXT: paddd %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vec_unsafe-fp-math.ll b/llvm/test/CodeGen/X86/vec_unsafe-fp-math.ll
index 23d22e7..3f92d2b 100644
--- a/llvm/test/CodeGen/X86/vec_unsafe-fp-math.ll
+++ b/llvm/test/CodeGen/X86/vec_unsafe-fp-math.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -enable-unsafe-fp-math -enable-no-signed-zeros-fp-math -mtriple=x86_64-unknown-unknown | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
; Make sure that vectors get the same benefits as scalars when using unsafe-fp-math.
@@ -18,7 +18,7 @@ define <4 x float> @vec_fneg(<4 x float> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-NEXT: retq
- %sub = fsub <4 x float> zeroinitializer, %x
+ %sub = fsub nsz <4 x float> zeroinitializer, %x
ret <4 x float> %sub
}
diff --git a/llvm/test/CodeGen/X86/vector-gep.ll b/llvm/test/CodeGen/X86/vector-gep.ll
index 5c48559..b4cffcd 100644
--- a/llvm/test/CodeGen/X86/vector-gep.ll
+++ b/llvm/test/CodeGen/X86/vector-gep.ll
@@ -122,91 +122,87 @@ define <64 x ptr> @AGEP9(ptr %param, <64 x i32> %off) nounwind {
; CHECK-NEXT: movl %esp, %ebp
; CHECK-NEXT: andl $-32, %esp
; CHECK-NEXT: subl $160, %esp
-; CHECK-NEXT: vmovdqa %ymm2, %ymm5
-; CHECK-NEXT: vmovdqa %ymm1, %ymm3
-; CHECK-NEXT: vmovdqa %ymm0, %ymm1
-; CHECK-NEXT: vmovdqa 72(%ebp), %ymm0
-; CHECK-NEXT: vmovdqa 40(%ebp), %ymm2
-; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm4
-; CHECK-NEXT: vbroadcastss 12(%ebp), %xmm7
-; CHECK-NEXT: vpaddd %xmm4, %xmm7, %xmm4
-; CHECK-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm2
-; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm2
-; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2
-; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2
-; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2
-; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm3
+; CHECK-NEXT: vbroadcastss 12(%ebp), %xmm5
+; CHECK-NEXT: vpaddd %xmm3, %xmm5, %xmm3
+; CHECK-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
-; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: vmovdqa 104(%ebp), %ymm0
-; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2
-; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2
-; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
+; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm0
; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
-; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: vmovdqa 136(%ebp), %ymm0
-; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2
-; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2
-; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
+; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm0
; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
-; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: vmovdqa 168(%ebp), %ymm0
-; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2
-; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2
-; CHECK-NEXT: vmovdqa %xmm2, (%esp) # 16-byte Spill
-; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT: vmovdqa 40(%ebp), %xmm0
; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
-; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm2
-; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm0
-; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0
-; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm1
-; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm1
-; CHECK-NEXT: vpaddd %xmm1, %xmm7, %xmm1
-; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm6
-; CHECK-NEXT: vpaddd %xmm6, %xmm7, %xmm6
-; CHECK-NEXT: vextractf128 $1, %ymm3, %xmm3
-; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm3
-; CHECK-NEXT: vpaddd %xmm3, %xmm7, %xmm3
-; CHECK-NEXT: vmovdqa %ymm5, %ymm4
-; CHECK-NEXT: vpaddd %xmm4, %xmm4, %xmm5
-; CHECK-NEXT: vpaddd %xmm5, %xmm7, %xmm5
-; CHECK-NEXT: vextractf128 $1, %ymm4, %xmm4
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
+; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovdqa 56(%ebp), %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
+; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovdqa 72(%ebp), %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
+; CHECK-NEXT: vmovdqa %xmm0, (%esp) # 16-byte Spill
+; CHECK-NEXT: vmovdqa 88(%ebp), %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm2
+; CHECK-NEXT: vmovdqa 104(%ebp), %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm1
+; CHECK-NEXT: vmovdqa 120(%ebp), %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
+; CHECK-NEXT: vmovdqa 136(%ebp), %xmm6
+; CHECK-NEXT: vpaddd %xmm6, %xmm6, %xmm6
+; CHECK-NEXT: vpaddd %xmm6, %xmm5, %xmm6
+; CHECK-NEXT: vmovdqa 152(%ebp), %xmm7
+; CHECK-NEXT: vpaddd %xmm7, %xmm7, %xmm7
+; CHECK-NEXT: vpaddd %xmm7, %xmm5, %xmm7
+; CHECK-NEXT: vmovdqa 168(%ebp), %xmm4
; CHECK-NEXT: vpaddd %xmm4, %xmm4, %xmm4
-; CHECK-NEXT: vpaddd %xmm4, %xmm7, %xmm4
+; CHECK-NEXT: vpaddd %xmm4, %xmm5, %xmm4
+; CHECK-NEXT: vmovdqa 184(%ebp), %xmm3
+; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm3
+; CHECK-NEXT: vpaddd %xmm3, %xmm5, %xmm3
; CHECK-NEXT: movl 8(%ebp), %eax
-; CHECK-NEXT: vmovdqa %xmm4, 80(%eax)
-; CHECK-NEXT: vmovdqa %xmm5, 64(%eax)
-; CHECK-NEXT: vmovdqa %xmm3, 48(%eax)
-; CHECK-NEXT: vmovdqa %xmm6, 32(%eax)
-; CHECK-NEXT: vmovdqa %xmm1, 16(%eax)
-; CHECK-NEXT: vmovdqa %xmm0, (%eax)
-; CHECK-NEXT: vmovdqa %xmm2, 240(%eax)
+; CHECK-NEXT: vmovdqa %xmm3, 240(%eax)
+; CHECK-NEXT: vmovdqa %xmm4, 224(%eax)
+; CHECK-NEXT: vmovdqa %xmm7, 208(%eax)
+; CHECK-NEXT: vmovdqa %xmm6, 192(%eax)
+; CHECK-NEXT: vmovdqa %xmm0, 176(%eax)
+; CHECK-NEXT: vmovdqa %xmm1, 160(%eax)
+; CHECK-NEXT: vmovdqa %xmm2, 144(%eax)
; CHECK-NEXT: vmovaps (%esp), %xmm0 # 16-byte Reload
-; CHECK-NEXT: vmovaps %xmm0, 224(%eax)
+; CHECK-NEXT: vmovaps %xmm0, 128(%eax)
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-NEXT: vmovaps %xmm0, 208(%eax)
+; CHECK-NEXT: vmovaps %xmm0, 112(%eax)
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-NEXT: vmovaps %xmm0, 192(%eax)
+; CHECK-NEXT: vmovaps %xmm0, 96(%eax)
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-NEXT: vmovaps %xmm0, 176(%eax)
+; CHECK-NEXT: vmovaps %xmm0, 80(%eax)
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-NEXT: vmovaps %xmm0, 160(%eax)
+; CHECK-NEXT: vmovaps %xmm0, 64(%eax)
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-NEXT: vmovaps %xmm0, 144(%eax)
+; CHECK-NEXT: vmovaps %xmm0, 48(%eax)
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-NEXT: vmovaps %xmm0, 128(%eax)
+; CHECK-NEXT: vmovaps %xmm0, 32(%eax)
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-NEXT: vmovaps %xmm0, 112(%eax)
+; CHECK-NEXT: vmovaps %xmm0, 16(%eax)
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-NEXT: vmovaps %xmm0, 96(%eax)
+; CHECK-NEXT: vmovaps %xmm0, (%eax)
; CHECK-NEXT: movl %ebp, %esp
; CHECK-NEXT: popl %ebp
; CHECK-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll
index 13f7d68..33d80f6 100644
--- a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll
+++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll
@@ -652,7 +652,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE2-NEXT: paddb %xmm4, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psllw $1, %xmm2
+; SSE2-NEXT: paddw %xmm2, %xmm2
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -678,7 +678,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE41-NEXT: paddb %xmm3, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psllw $1, %xmm2
+; SSE41-NEXT: paddw %xmm2, %xmm2
; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
; SSE41-NEXT: psrlw $2, %xmm1
; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -701,7 +701,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpsllw $1, %xmm1, %xmm2
+; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm1
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
@@ -720,7 +720,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; AVX2NOBW-NEXT: vpsrlw $1, %xmm2, %xmm2
; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX2NOBW-NEXT: vpaddb %xmm1, %xmm2, %xmm1
-; AVX2NOBW-NEXT: vpsllw $1, %xmm1, %xmm2
+; AVX2NOBW-NEXT: vpaddw %xmm1, %xmm1, %xmm2
; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX2NOBW-NEXT: vpsrlw $2, %xmm1, %xmm1
; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
@@ -739,7 +739,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; AVX512BW-NEXT: vpsrlw $1, %xmm2, %xmm2
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512BW-NEXT: vpaddb %xmm1, %xmm2, %xmm1
-; AVX512BW-NEXT: vpsllw $1, %xmm1, %xmm2
+; AVX512BW-NEXT: vpaddw %xmm1, %xmm1, %xmm2
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512BW-NEXT: vpsrlw $2, %xmm1, %xmm1
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll
index 1a5c373..e43108f 100644
--- a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll
+++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll
@@ -590,7 +590,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX1-NEXT: vpand %xmm6, %xmm5, %xmm5
; AVX1-NEXT: vpaddb %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vpsllw $1, %xmm3, %xmm5
+; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm5
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm7 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248]
; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm3
@@ -609,7 +609,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpsllw $1, %xmm2, %xmm3
+; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3
; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm2, %xmm8, %xmm2
@@ -633,7 +633,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX2NOBW-NEXT: vpsrlw $1, %ymm2, %ymm2
; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
; AVX2NOBW-NEXT: vpaddb %ymm1, %ymm2, %ymm1
-; AVX2NOBW-NEXT: vpsllw $1, %ymm1, %ymm2
+; AVX2NOBW-NEXT: vpaddw %ymm1, %ymm1, %ymm2
; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
; AVX2NOBW-NEXT: vpsrlw $2, %ymm1, %ymm1
; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
@@ -651,7 +651,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX512BW-NEXT: vpsrlw $1, %ymm2, %ymm2
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
; AVX512BW-NEXT: vpaddb %ymm1, %ymm2, %ymm1
-; AVX512BW-NEXT: vpsllw $1, %ymm1, %ymm2
+; AVX512BW-NEXT: vpaddw %ymm1, %ymm1, %ymm2
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
; AVX512BW-NEXT: vpsrlw $2, %ymm1, %ymm1
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll
index 9c56894..bf98bcc 100644
--- a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll
+++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll
@@ -485,7 +485,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: vpbroadcastb {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX512F-NEXT: vpand %ymm6, %ymm5, %ymm5
; AVX512F-NEXT: vpaddb %ymm3, %ymm5, %ymm3
-; AVX512F-NEXT: vpsllw $1, %ymm3, %ymm5
+; AVX512F-NEXT: vpaddw %ymm3, %ymm3, %ymm5
; AVX512F-NEXT: vpbroadcastb {{.*#+}} ymm7 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248]
; AVX512F-NEXT: vpand %ymm7, %ymm5, %ymm5
; AVX512F-NEXT: vpsrlw $2, %ymm3, %ymm3
@@ -504,7 +504,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3
; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3
; AVX512F-NEXT: vpaddb %ymm2, %ymm3, %ymm2
-; AVX512F-NEXT: vpsllw $1, %ymm2, %ymm3
+; AVX512F-NEXT: vpaddw %ymm2, %ymm2, %ymm3
; AVX512F-NEXT: vpand %ymm7, %ymm3, %ymm3
; AVX512F-NEXT: vpsrlw $2, %ymm2, %ymm2
; AVX512F-NEXT: vpand %ymm2, %ymm8, %ymm2
@@ -528,7 +528,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind {
; AVX512BW-NEXT: vpsrlw $1, %zmm2, %zmm2
; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2
; AVX512BW-NEXT: vpaddb %zmm1, %zmm2, %zmm1
-; AVX512BW-NEXT: vpsllw $1, %zmm1, %zmm2
+; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm2
; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2
; AVX512BW-NEXT: vpsrlw $2, %zmm1, %zmm1
; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm1
diff --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll
index 13b21a7..6e1bf25 100644
--- a/llvm/test/CodeGen/X86/vector-mul.ll
+++ b/llvm/test/CodeGen/X86/vector-mul.ll
@@ -821,10 +821,10 @@ define <16 x i16> @madd_v16i16_3(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; X86-SSE-NEXT: andl $-16, %esp
; X86-SSE-NEXT: subl $16, %esp
; X86-SSE-NEXT: movdqa %xmm1, %xmm3
-; X86-SSE-NEXT: paddw %xmm1, %xmm3
+; X86-SSE-NEXT: paddw %xmm3, %xmm3
; X86-SSE-NEXT: paddw %xmm3, %xmm1
; X86-SSE-NEXT: movdqa %xmm0, %xmm3
-; X86-SSE-NEXT: paddw %xmm0, %xmm3
+; X86-SSE-NEXT: paddw %xmm3, %xmm3
; X86-SSE-NEXT: paddw %xmm2, %xmm0
; X86-SSE-NEXT: paddw %xmm3, %xmm0
; X86-SSE-NEXT: paddw 8(%ebp), %xmm1
@@ -835,9 +835,9 @@ define <16 x i16> @madd_v16i16_3(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; X64-SSE-LABEL: madd_v16i16_3:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movdqa %xmm1, %xmm4
-; X64-SSE-NEXT: paddw %xmm1, %xmm4
+; X64-SSE-NEXT: paddw %xmm4, %xmm4
; X64-SSE-NEXT: movdqa %xmm0, %xmm5
-; X64-SSE-NEXT: paddw %xmm0, %xmm5
+; X64-SSE-NEXT: paddw %xmm5, %xmm5
; X64-SSE-NEXT: paddw %xmm2, %xmm0
; X64-SSE-NEXT: paddw %xmm5, %xmm0
; X64-SSE-NEXT: paddw %xmm3, %xmm1
diff --git a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll
index 227e000..ab1feba 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll
@@ -907,7 +907,7 @@ define i1 @mask_v8i32_2(<8 x i32> %a0) {
; SSE2-LABEL: mask_v8i32_2:
; SSE2: # %bb.0:
; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: pslld $1, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm0
; SSE2-NEXT: movmskps %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
; SSE2-NEXT: sete %al
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
index 2b1cf5b..99dac74 100644
--- a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
@@ -927,7 +927,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
; SSE2-LABEL: constant_shift_v2i64:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: paddq %xmm0, %xmm1
+; SSE2-NEXT: paddq %xmm1, %xmm1
; SSE2-NEXT: psllq $7, %xmm0
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
@@ -975,7 +975,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
; X86-SSE-LABEL: constant_shift_v2i64:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: movdqa %xmm0, %xmm1
-; X86-SSE-NEXT: paddq %xmm0, %xmm1
+; X86-SSE-NEXT: paddq %xmm1, %xmm1
; X86-SSE-NEXT: psllq $7, %xmm0
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; X86-SSE-NEXT: retl
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll
index bec3349..3590c4d 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll
@@ -62,15 +62,12 @@ define <4 x i32> @combine_blend_of_permutes_v4i32(<2 x i64> %a0, <2 x i64> %a1)
define <4 x float> @freeze_insertps(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: freeze_insertps:
; SSE: # %bb.0:
-; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm0[1],xmm1[1,2,3]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: freeze_insertps:
; AVX: # %bb.0:
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[1],xmm1[1,2,3]
+; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: retq
%s0 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 16)
%f0 = freeze <4 x float> %s0
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
index 5b61de5..ee9d8a5 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
@@ -3550,14 +3550,14 @@ define <8 x i16> @PR141475(i32 %in) {
; SSE-LABEL: PR141475:
; SSE: # %bb.0:
; SSE-NEXT: movd %edi, %xmm0
-; SSE-NEXT: pslld $1, %xmm0
+; SSE-NEXT: paddd %xmm0, %xmm0
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE-NEXT: retq
;
; AVX-LABEL: PR141475:
; AVX: # %bb.0:
; AVX-NEXT: vmovd %edi, %xmm0
-; AVX-NEXT: vpslld $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX-NEXT: retq
%mul = shl i32 %in, 1
diff --git a/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll b/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll
index 54dc107..3b93734 100644
--- a/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll
+++ b/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll
@@ -1438,26 +1438,26 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_10(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_127_mask_shl_1(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddw %xmm0, %xmm0
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_127_mask_shl_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_1:
; X64-SSE2: # %bb.0:
-; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: paddw %xmm0, %xmm0
+; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_127_mask_shl_1:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0
+; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
%t1 = shl <8 x i16> %t0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
@@ -1656,26 +1656,26 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_6(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_65024_mask_shl_1(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddw %xmm0, %xmm0
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_shl_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_shl_1:
; X64-SSE2: # %bb.0:
-; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: paddw %xmm0, %xmm0
+; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_shl_1:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0
+; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024>
%t1 = shl <8 x i16> %t0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
@@ -2373,40 +2373,40 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_18(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_32767_mask_shl_1(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddd %xmm0, %xmm0
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X86-AVX2: # %bb.0:
-; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767]
-; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65534,65534,65534,65534]
+; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X64-SSE2: # %bb.0:
-; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: paddd %xmm0, %xmm0
+; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X64-AVX1: # %bb.0:
-; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767]
-; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65534,65534,65534,65534]
+; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 32767, i32 32767, i32 32767, i32 32767>
%t1 = shl <4 x i32> %t0, <i32 1, i32 1, i32 1, i32 1>
@@ -2675,40 +2675,40 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_10(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_4294836224_mask_shl_1(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddd %xmm0, %xmm0
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X86-AVX2: # %bb.0:
-; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224]
-; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294705152,4294705152,4294705152,4294705152]
+; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X64-SSE2: # %bb.0:
-; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: paddd %xmm0, %xmm0
+; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X64-AVX1: # %bb.0:
-; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224]
-; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294705152,4294705152,4294705152,4294705152]
+; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 4294836224, i32 4294836224, i32 4294836224, i32 4294836224>
%t1 = shl <4 x i32> %t0, <i32 1, i32 1, i32 1, i32 1>
@@ -3325,26 +3325,26 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_34(<2 x i64> %
define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_1(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddq %xmm0, %xmm0
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_1:
; X64-SSE2: # %bb.0:
-; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: paddq %xmm0, %xmm0
+; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_1:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0
+; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 2147483647, i64 2147483647>
%t1 = shl <2 x i64> %t0, <i64 1, i64 1>
@@ -3543,26 +3543,26 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_18(<2 x i64> %a0) {
define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_shl_1(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddq %xmm0, %xmm0
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1:
; X64-SSE2: # %bb.0:
-; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: paddq %xmm0, %xmm0
+; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0
+; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 18446744065119617024, i64 18446744065119617024>
%t1 = shl <2 x i64> %t0, <i64 1, i64 1>