aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/X86
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86')
-rw-r--r--llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-05-01-InvalidOrdCompare.ll2
-rw-r--r--llvm/test/CodeGen/X86/2012-08-28-UnsafeMathCrash.ll2
-rw-r--r--llvm/test/CodeGen/X86/atomic-load-store.ll250
-rw-r--r--llvm/test/CodeGen/X86/avx-minmax.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx512-unsafe-fp-math.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx512fp16-combine-vfmulc-fadd.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc-fadd.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc.ll2
-rw-r--r--llvm/test/CodeGen/X86/bf16-fast-isel.ll66
-rw-r--r--llvm/test/CodeGen/X86/bitcnt-big-integer.ll3021
-rw-r--r--llvm/test/CodeGen/X86/call-graph-section-addrtaken.ll2
-rw-r--r--llvm/test/CodeGen/X86/call-graph-section-assembly.ll2
-rw-r--r--llvm/test/CodeGen/X86/dag-fmf-cse.ll2
-rw-r--r--llvm/test/CodeGen/X86/fabs.ll2
-rw-r--r--llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll789
-rw-r--r--llvm/test/CodeGen/X86/fp-undef.ll2
-rw-r--r--llvm/test/CodeGen/X86/fp128-select.ll6
-rw-r--r--llvm/test/CodeGen/X86/fsxor-alignment.ll2
-rw-r--r--llvm/test/CodeGen/X86/machine-trace-metrics-crash.ll2
-rw-r--r--llvm/test/CodeGen/X86/neg_fp.ll2
-rw-r--r--llvm/test/CodeGen/X86/negate-add-zero.ll2
-rw-r--r--llvm/test/CodeGen/X86/recip-pic.ll2
-rw-r--r--llvm/test/CodeGen/X86/sincos-opt.ll6
-rw-r--r--llvm/test/CodeGen/X86/sincos.ll2
-rw-r--r--llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll12
26 files changed, 3749 insertions, 439 deletions
diff --git a/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll b/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
index bea11e9..940fe8c 100644
--- a/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
+++ b/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -mtriple=i686-- -mattr=-sse | FileCheck %s -check-prefix=WITHNANS
-; RUN: llc < %s -mtriple=i686-- -mattr=-sse -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s -check-prefix=NONANS
+; RUN: llc < %s -mtriple=i686-- -mattr=-sse -enable-no-nans-fp-math | FileCheck %s -check-prefix=NONANS
; WITHNANS-LABEL: test:
; WITHNANS: setnp
diff --git a/llvm/test/CodeGen/X86/2008-05-01-InvalidOrdCompare.ll b/llvm/test/CodeGen/X86/2008-05-01-InvalidOrdCompare.ll
index 8411a40..ff7a99a 100644
--- a/llvm/test/CodeGen/X86/2008-05-01-InvalidOrdCompare.ll
+++ b/llvm/test/CodeGen/X86/2008-05-01-InvalidOrdCompare.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -enable-unsafe-fp-math -mtriple=i686-- | FileCheck %s
+; RUN: llc < %s -mtriple=i686-- | FileCheck %s
; rdar://5902801
declare void @test2()
diff --git a/llvm/test/CodeGen/X86/2012-08-28-UnsafeMathCrash.ll b/llvm/test/CodeGen/X86/2012-08-28-UnsafeMathCrash.ll
index 6ebbb2e..0e0e20f 100644
--- a/llvm/test/CodeGen/X86/2012-08-28-UnsafeMathCrash.ll
+++ b/llvm/test/CodeGen/X86/2012-08-28-UnsafeMathCrash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -enable-unsafe-fp-math
+; RUN: llc < %s
; <rdar://problem/12180135>
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
target triple = "i386-apple-macosx10.8.0"
diff --git a/llvm/test/CodeGen/X86/atomic-load-store.ll b/llvm/test/CodeGen/X86/atomic-load-store.ll
index 45277ce..4f5cb5a 100644
--- a/llvm/test/CodeGen/X86/atomic-load-store.ll
+++ b/llvm/test/CodeGen/X86/atomic-load-store.ll
@@ -1,12 +1,12 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64 | FileCheck %s --check-prefixes=CHECK,CHECK-O3
-; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,CHECK-O3
-; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,CHECK-O3
-; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,CHECK-O3
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,CHECK-SSE-O3
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,CHECK-AVX-O3
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,CHECK-AVX-O3
; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64 | FileCheck %s --check-prefixes=CHECK,CHECK-O0
-; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,CHECK-O0
-; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,CHECK-O0
-; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,CHECK-O0
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,CHECK-SSE-O0
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,CHECK-AVX-O0
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,CHECK-AVX-O0
define void @test1(ptr %ptr, i32 %val1) {
; CHECK-LABEL: test1:
@@ -34,6 +34,238 @@ define i32 @test3(ptr %ptr) {
%val = load atomic i32, ptr %ptr seq_cst, align 4
ret i32 %val
}
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK-O0: {{.*}}
-; CHECK-O3: {{.*}}
+
+define <1 x i32> @atomic_vec1_i32(ptr %x) {
+; CHECK-LABEL: atomic_vec1_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl (%rdi), %eax
+; CHECK-NEXT: retq
+ %ret = load atomic <1 x i32>, ptr %x acquire, align 4
+ ret <1 x i32> %ret
+}
+
+define <1 x i8> @atomic_vec1_i8(ptr %x) {
+; CHECK-O3-LABEL: atomic_vec1_i8:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: movzbl (%rdi), %eax
+; CHECK-O3-NEXT: retq
+;
+; CHECK-SSE-O3-LABEL: atomic_vec1_i8:
+; CHECK-SSE-O3: # %bb.0:
+; CHECK-SSE-O3-NEXT: movzbl (%rdi), %eax
+; CHECK-SSE-O3-NEXT: retq
+;
+; CHECK-AVX-O3-LABEL: atomic_vec1_i8:
+; CHECK-AVX-O3: # %bb.0:
+; CHECK-AVX-O3-NEXT: movzbl (%rdi), %eax
+; CHECK-AVX-O3-NEXT: retq
+;
+; CHECK-O0-LABEL: atomic_vec1_i8:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: movb (%rdi), %al
+; CHECK-O0-NEXT: retq
+;
+; CHECK-SSE-O0-LABEL: atomic_vec1_i8:
+; CHECK-SSE-O0: # %bb.0:
+; CHECK-SSE-O0-NEXT: movb (%rdi), %al
+; CHECK-SSE-O0-NEXT: retq
+;
+; CHECK-AVX-O0-LABEL: atomic_vec1_i8:
+; CHECK-AVX-O0: # %bb.0:
+; CHECK-AVX-O0-NEXT: movb (%rdi), %al
+; CHECK-AVX-O0-NEXT: retq
+ %ret = load atomic <1 x i8>, ptr %x acquire, align 1
+ ret <1 x i8> %ret
+}
+
+define <1 x i16> @atomic_vec1_i16(ptr %x) {
+; CHECK-O3-LABEL: atomic_vec1_i16:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-O3-NEXT: retq
+;
+; CHECK-SSE-O3-LABEL: atomic_vec1_i16:
+; CHECK-SSE-O3: # %bb.0:
+; CHECK-SSE-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-SSE-O3-NEXT: retq
+;
+; CHECK-AVX-O3-LABEL: atomic_vec1_i16:
+; CHECK-AVX-O3: # %bb.0:
+; CHECK-AVX-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-AVX-O3-NEXT: retq
+;
+; CHECK-O0-LABEL: atomic_vec1_i16:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: movw (%rdi), %ax
+; CHECK-O0-NEXT: retq
+;
+; CHECK-SSE-O0-LABEL: atomic_vec1_i16:
+; CHECK-SSE-O0: # %bb.0:
+; CHECK-SSE-O0-NEXT: movw (%rdi), %ax
+; CHECK-SSE-O0-NEXT: retq
+;
+; CHECK-AVX-O0-LABEL: atomic_vec1_i16:
+; CHECK-AVX-O0: # %bb.0:
+; CHECK-AVX-O0-NEXT: movw (%rdi), %ax
+; CHECK-AVX-O0-NEXT: retq
+ %ret = load atomic <1 x i16>, ptr %x acquire, align 2
+ ret <1 x i16> %ret
+}
+
+define <1 x i32> @atomic_vec1_i8_zext(ptr %x) {
+; CHECK-O3-LABEL: atomic_vec1_i8_zext:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: movzbl (%rdi), %eax
+; CHECK-O3-NEXT: movzbl %al, %eax
+; CHECK-O3-NEXT: retq
+;
+; CHECK-SSE-O3-LABEL: atomic_vec1_i8_zext:
+; CHECK-SSE-O3: # %bb.0:
+; CHECK-SSE-O3-NEXT: movzbl (%rdi), %eax
+; CHECK-SSE-O3-NEXT: movzbl %al, %eax
+; CHECK-SSE-O3-NEXT: retq
+;
+; CHECK-AVX-O3-LABEL: atomic_vec1_i8_zext:
+; CHECK-AVX-O3: # %bb.0:
+; CHECK-AVX-O3-NEXT: movzbl (%rdi), %eax
+; CHECK-AVX-O3-NEXT: movzbl %al, %eax
+; CHECK-AVX-O3-NEXT: retq
+;
+; CHECK-O0-LABEL: atomic_vec1_i8_zext:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: movb (%rdi), %al
+; CHECK-O0-NEXT: movzbl %al, %eax
+; CHECK-O0-NEXT: retq
+;
+; CHECK-SSE-O0-LABEL: atomic_vec1_i8_zext:
+; CHECK-SSE-O0: # %bb.0:
+; CHECK-SSE-O0-NEXT: movb (%rdi), %al
+; CHECK-SSE-O0-NEXT: movzbl %al, %eax
+; CHECK-SSE-O0-NEXT: retq
+;
+; CHECK-AVX-O0-LABEL: atomic_vec1_i8_zext:
+; CHECK-AVX-O0: # %bb.0:
+; CHECK-AVX-O0-NEXT: movb (%rdi), %al
+; CHECK-AVX-O0-NEXT: movzbl %al, %eax
+; CHECK-AVX-O0-NEXT: retq
+ %ret = load atomic <1 x i8>, ptr %x acquire, align 1
+ %zret = zext <1 x i8> %ret to <1 x i32>
+ ret <1 x i32> %zret
+}
+
+define <1 x i64> @atomic_vec1_i16_sext(ptr %x) {
+; CHECK-O3-LABEL: atomic_vec1_i16_sext:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-O3-NEXT: movswq %ax, %rax
+; CHECK-O3-NEXT: retq
+;
+; CHECK-SSE-O3-LABEL: atomic_vec1_i16_sext:
+; CHECK-SSE-O3: # %bb.0:
+; CHECK-SSE-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-SSE-O3-NEXT: movswq %ax, %rax
+; CHECK-SSE-O3-NEXT: retq
+;
+; CHECK-AVX-O3-LABEL: atomic_vec1_i16_sext:
+; CHECK-AVX-O3: # %bb.0:
+; CHECK-AVX-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-AVX-O3-NEXT: movswq %ax, %rax
+; CHECK-AVX-O3-NEXT: retq
+;
+; CHECK-O0-LABEL: atomic_vec1_i16_sext:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: movw (%rdi), %ax
+; CHECK-O0-NEXT: movswq %ax, %rax
+; CHECK-O0-NEXT: retq
+;
+; CHECK-SSE-O0-LABEL: atomic_vec1_i16_sext:
+; CHECK-SSE-O0: # %bb.0:
+; CHECK-SSE-O0-NEXT: movw (%rdi), %ax
+; CHECK-SSE-O0-NEXT: movswq %ax, %rax
+; CHECK-SSE-O0-NEXT: retq
+;
+; CHECK-AVX-O0-LABEL: atomic_vec1_i16_sext:
+; CHECK-AVX-O0: # %bb.0:
+; CHECK-AVX-O0-NEXT: movw (%rdi), %ax
+; CHECK-AVX-O0-NEXT: movswq %ax, %rax
+; CHECK-AVX-O0-NEXT: retq
+ %ret = load atomic <1 x i16>, ptr %x acquire, align 2
+ %sret = sext <1 x i16> %ret to <1 x i64>
+ ret <1 x i64> %sret
+}
+
+define <1 x ptr addrspace(270)> @atomic_vec1_ptr270(ptr %x) {
+; CHECK-LABEL: atomic_vec1_ptr270:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl (%rdi), %eax
+; CHECK-NEXT: retq
+ %ret = load atomic <1 x ptr addrspace(270)>, ptr %x acquire, align 4
+ ret <1 x ptr addrspace(270)> %ret
+}
+
+define <1 x bfloat> @atomic_vec1_bfloat(ptr %x) {
+; CHECK-O3-LABEL: atomic_vec1_bfloat:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-O3-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-O3-NEXT: retq
+;
+; CHECK-SSE-O3-LABEL: atomic_vec1_bfloat:
+; CHECK-SSE-O3: # %bb.0:
+; CHECK-SSE-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-SSE-O3-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-SSE-O3-NEXT: retq
+;
+; CHECK-AVX-O3-LABEL: atomic_vec1_bfloat:
+; CHECK-AVX-O3: # %bb.0:
+; CHECK-AVX-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-AVX-O3-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
+; CHECK-AVX-O3-NEXT: retq
+;
+; CHECK-O0-LABEL: atomic_vec1_bfloat:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: movw (%rdi), %cx
+; CHECK-O0-NEXT: # implicit-def: $eax
+; CHECK-O0-NEXT: movw %cx, %ax
+; CHECK-O0-NEXT: # implicit-def: $xmm0
+; CHECK-O0-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-O0-NEXT: retq
+;
+; CHECK-SSE-O0-LABEL: atomic_vec1_bfloat:
+; CHECK-SSE-O0: # %bb.0:
+; CHECK-SSE-O0-NEXT: movw (%rdi), %cx
+; CHECK-SSE-O0-NEXT: # implicit-def: $eax
+; CHECK-SSE-O0-NEXT: movw %cx, %ax
+; CHECK-SSE-O0-NEXT: # implicit-def: $xmm0
+; CHECK-SSE-O0-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-SSE-O0-NEXT: retq
+;
+; CHECK-AVX-O0-LABEL: atomic_vec1_bfloat:
+; CHECK-AVX-O0: # %bb.0:
+; CHECK-AVX-O0-NEXT: movw (%rdi), %cx
+; CHECK-AVX-O0-NEXT: # implicit-def: $eax
+; CHECK-AVX-O0-NEXT: movw %cx, %ax
+; CHECK-AVX-O0-NEXT: # implicit-def: $xmm0
+; CHECK-AVX-O0-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
+; CHECK-AVX-O0-NEXT: retq
+ %ret = load atomic <1 x bfloat>, ptr %x acquire, align 2
+ ret <1 x bfloat> %ret
+}
+
+define <1 x ptr> @atomic_vec1_ptr_align(ptr %x) nounwind {
+; CHECK-LABEL: atomic_vec1_ptr_align:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: retq
+ %ret = load atomic <1 x ptr>, ptr %x acquire, align 8
+ ret <1 x ptr> %ret
+}
+
+define <1 x i64> @atomic_vec1_i64_align(ptr %x) nounwind {
+; CHECK-LABEL: atomic_vec1_i64_align:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: retq
+ %ret = load atomic <1 x i64>, ptr %x acquire, align 8
+ ret <1 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/X86/avx-minmax.ll b/llvm/test/CodeGen/X86/avx-minmax.ll
index 6da04c5..8e4b6c6 100644
--- a/llvm/test/CodeGen/X86/avx-minmax.ll
+++ b/llvm/test/CodeGen/X86/avx-minmax.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx -enable-no-nans-fp-math | FileCheck %s
define <2 x double> @maxpd(<2 x double> %x, <2 x double> %y) {
; CHECK-LABEL: maxpd:
diff --git a/llvm/test/CodeGen/X86/avx512-unsafe-fp-math.ll b/llvm/test/CodeGen/X86/avx512-unsafe-fp-math.ll
index f827998..eb9de8a 100644
--- a/llvm/test/CodeGen/X86/avx512-unsafe-fp-math.ll
+++ b/llvm/test/CodeGen/X86/avx512-unsafe-fp-math.ll
@@ -2,7 +2,7 @@
; RUN: llc < %s -mtriple=x86_64 -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mattr=+avx512f | FileCheck %s --check-prefix=CHECK_UNSAFE
; RUN: llc < %s -mtriple=x86_64 -enable-no-nans-fp-math -mattr=+avx512f | FileCheck %s
; RUN: llc < %s -mtriple=x86_64 -enable-no-signed-zeros-fp-math -mattr=+avx512f | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx512f | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64 -mattr=+avx512f | FileCheck %s
; RUN: llc < %s -mtriple=x86_64 -mattr=+avx512f | FileCheck %s
define <16 x float> @test_max_v16f32(ptr %a_ptr, <16 x float> %b) {
diff --git a/llvm/test/CodeGen/X86/avx512fp16-combine-vfmulc-fadd.ll b/llvm/test/CodeGen/X86/avx512fp16-combine-vfmulc-fadd.ll
index 5d9784a..1147d79 100644
--- a/llvm/test/CodeGen/X86/avx512fp16-combine-vfmulc-fadd.ll
+++ b/llvm/test/CodeGen/X86/avx512fp16-combine-vfmulc-fadd.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl --enable-unsafe-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl | FileCheck %s
define dso_local <32 x half> @test1(<32 x half> %acc.coerce, <32 x half> %lhs.coerce, <32 x half> %rhs.coerce) {
; CHECK-LABEL: test1:
diff --git a/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc-fadd.ll b/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc-fadd.ll
index b58bae9..1c4d9c6 100644
--- a/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc-fadd.ll
+++ b/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc-fadd.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl --enable-unsafe-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl | FileCheck %s
define dso_local <32 x half> @test1(<32 x half> %acc.coerce, <32 x half> %lhs.coerce.conj, <32 x half> %rhs.coerce) local_unnamed_addr #0 {
; CHECK-LABEL: test1:
diff --git a/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc.ll b/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc.ll
index 92bdebb..a8ff969 100644
--- a/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc.ll
+++ b/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl --enable-unsafe-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl | FileCheck %s
define dso_local <32 x half> @test1(<32 x half> %lhs.coerce.conj, <32 x half> %rhs.coerce) local_unnamed_addr #0 {
; CHECK-LABEL: test1:
diff --git a/llvm/test/CodeGen/X86/bf16-fast-isel.ll b/llvm/test/CodeGen/X86/bf16-fast-isel.ll
new file mode 100644
index 0000000..c659e0e
--- /dev/null
+++ b/llvm/test/CodeGen/X86/bf16-fast-isel.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc --fast-isel < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+
+define i8 @test_direct_call(ptr %f) nounwind {
+; CHECK-LABEL: test_direct_call:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo@PLT
+; CHECK-NEXT: callq bar@PLT
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: retq
+entry:
+ %call = call bfloat @foo(ptr %f)
+ %call2 = call zeroext i8 @bar(bfloat %call)
+ ret i8 %call2
+}
+
+define i8 @test_fast_direct_call(ptr %f) nounwind {
+; CHECK-LABEL: test_fast_direct_call:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo_fast@PLT
+; CHECK-NEXT: callq bar@PLT
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: retq
+entry:
+ %call = call fastcc bfloat @foo_fast(ptr %f)
+ %call2 = call zeroext i8 @bar(bfloat %call)
+ ret i8 %call2
+}
+
+define i8 @test_indirect_all(ptr %fptr, ptr %f) nounwind {
+; CHECK-LABEL: test_indirect_all:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movq %rsi, %rdi
+; CHECK-NEXT: callq foo@PLT
+; CHECK-NEXT: callq *%rbx
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
+entry:
+ %call = call bfloat @foo(ptr %f)
+ %call2 = call zeroext i8 %fptr(bfloat %call)
+ ret i8 %call2
+}
+
+define i8 @test_fast_indirect_all(ptr %fptr, ptr %f) nounwind {
+; CHECK-LABEL: test_fast_indirect_all:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movq %rsi, %rdi
+; CHECK-NEXT: callq foo@PLT
+; CHECK-NEXT: callq *%rbx
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
+entry:
+ %call = call fastcc bfloat @foo(ptr %f)
+ %call2 = call zeroext i8 %fptr(bfloat %call)
+ ret i8 %call2
+}
+
+declare bfloat @foo(ptr %f)
+declare zeroext i8 @bar(bfloat)
+declare fastcc bfloat @foo_fast(ptr %f)
diff --git a/llvm/test/CodeGen/X86/bitcnt-big-integer.ll b/llvm/test/CodeGen/X86/bitcnt-big-integer.ll
new file mode 100644
index 0000000..13149d7
--- /dev/null
+++ b/llvm/test/CodeGen/X86/bitcnt-big-integer.ll
@@ -0,0 +1,3021 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 -mattr=+avx512vpopcntdq | FileCheck %s --check-prefixes=CHECK,AVX512
+
+;
+; CTPOP
+;
+
+define i32 @test_ctpop_i128(i128 %a0) nounwind {
+; CHECK-LABEL: test_ctpop_i128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: popcntq %rsi, %rcx
+; CHECK-NEXT: popcntq %rdi, %rax
+; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
+; CHECK-NEXT: retq
+ %cnt = call i128 @llvm.ctpop.i128(i128 %a0)
+ %res = trunc i128 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_ctpop_i128(ptr %p0) nounwind {
+; CHECK-LABEL: load_ctpop_i128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: popcntq 8(%rdi), %rcx
+; CHECK-NEXT: popcntq (%rdi), %rax
+; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
+; CHECK-NEXT: retq
+ %a0 = load i128, ptr %p0
+ %cnt = call i128 @llvm.ctpop.i128(i128 %a0)
+ %res = trunc i128 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @test_ctpop_i256(i256 %a0) nounwind {
+; CHECK-LABEL: test_ctpop_i256:
+; CHECK: # %bb.0:
+; CHECK-NEXT: popcntq %rcx, %rax
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: popcntq %rdx, %rcx
+; CHECK-NEXT: addl %eax, %ecx
+; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: popcntq %rsi, %rdx
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: popcntq %rdi, %rax
+; CHECK-NEXT: addl %edx, %eax
+; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
+; CHECK-NEXT: retq
+ %cnt = call i256 @llvm.ctpop.i256(i256 %a0)
+ %res = trunc i256 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_ctpop_i256(ptr %p0) nounwind {
+; SSE-LABEL: load_ctpop_i256:
+; SSE: # %bb.0:
+; SSE-NEXT: popcntq 24(%rdi), %rcx
+; SSE-NEXT: popcntq 16(%rdi), %rdx
+; SSE-NEXT: popcntq 8(%rdi), %rsi
+; SSE-NEXT: popcntq (%rdi), %rax
+; SSE-NEXT: addl %ecx, %edx
+; SSE-NEXT: addl %esi, %eax
+; SSE-NEXT: addl %edx, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_ctpop_i256:
+; AVX2: # %bb.0:
+; AVX2-NEXT: popcntq 24(%rdi), %rax
+; AVX2-NEXT: popcntq 16(%rdi), %rcx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: popcntq 8(%rdi), %rdx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq (%rdi), %rax
+; AVX2-NEXT: addl %edx, %eax
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_ctpop_i256:
+; AVX512: # %bb.0:
+; AVX512-NEXT: popcntq 24(%rdi), %rax
+; AVX512-NEXT: popcntq 16(%rdi), %rcx
+; AVX512-NEXT: addl %eax, %ecx
+; AVX512-NEXT: popcntq 8(%rdi), %rdx
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq (%rdi), %rax
+; AVX512-NEXT: addl %edx, %eax
+; AVX512-NEXT: addl %ecx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %a0 = load i256, ptr %p0
+ %cnt = call i256 @llvm.ctpop.i256(i256 %a0)
+ %res = trunc i256 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @test_ctpop_i512(i512 %a0) nounwind {
+; CHECK-LABEL: test_ctpop_i512:
+; CHECK: # %bb.0:
+; CHECK-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT: popcntq {{[0-9]+}}(%rsp), %r10
+; CHECK-NEXT: addl %eax, %r10d
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: popcntq %r9, %rax
+; CHECK-NEXT: popcntq %r8, %r8
+; CHECK-NEXT: addl %eax, %r8d
+; CHECK-NEXT: addl %r10d, %r8d
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: popcntq %rcx, %rax
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: popcntq %rdx, %rcx
+; CHECK-NEXT: addl %eax, %ecx
+; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: popcntq %rsi, %rdx
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: popcntq %rdi, %rax
+; CHECK-NEXT: addl %edx, %eax
+; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: addl %r8d, %eax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
+; CHECK-NEXT: retq
+ %cnt = call i512 @llvm.ctpop.i512(i512 %a0)
+ %res = trunc i512 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_ctpop_i512(ptr %p0) nounwind {
+; SSE-LABEL: load_ctpop_i512:
+; SSE: # %bb.0:
+; SSE-NEXT: popcntq 56(%rdi), %rax
+; SSE-NEXT: popcntq 48(%rdi), %rcx
+; SSE-NEXT: popcntq 40(%rdi), %rdx
+; SSE-NEXT: popcntq 32(%rdi), %rsi
+; SSE-NEXT: addl %eax, %ecx
+; SSE-NEXT: addl %edx, %esi
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq 24(%rdi), %rax
+; SSE-NEXT: addl %ecx, %esi
+; SSE-NEXT: xorl %ecx, %ecx
+; SSE-NEXT: popcntq 16(%rdi), %rcx
+; SSE-NEXT: addl %eax, %ecx
+; SSE-NEXT: xorl %edx, %edx
+; SSE-NEXT: popcntq 8(%rdi), %rdx
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq (%rdi), %rax
+; SSE-NEXT: addl %edx, %eax
+; SSE-NEXT: addl %ecx, %eax
+; SSE-NEXT: addl %esi, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_ctpop_i512:
+; AVX2: # %bb.0:
+; AVX2-NEXT: popcntq 56(%rdi), %rax
+; AVX2-NEXT: popcntq 48(%rdi), %rcx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq 40(%rdi), %rax
+; AVX2-NEXT: popcntq 32(%rdi), %rdx
+; AVX2-NEXT: addl %eax, %edx
+; AVX2-NEXT: addl %ecx, %edx
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: popcntq 24(%rdi), %rcx
+; AVX2-NEXT: popcntq 16(%rdi), %rsi
+; AVX2-NEXT: popcntq 8(%rdi), %r8
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq (%rdi), %rax
+; AVX2-NEXT: addl %ecx, %esi
+; AVX2-NEXT: addl %r8d, %eax
+; AVX2-NEXT: addl %esi, %eax
+; AVX2-NEXT: addl %edx, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_ctpop_i512:
+; AVX512: # %bb.0:
+; AVX512-NEXT: popcntq 56(%rdi), %rax
+; AVX512-NEXT: popcntq 48(%rdi), %rcx
+; AVX512-NEXT: addl %eax, %ecx
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq 40(%rdi), %rax
+; AVX512-NEXT: popcntq 32(%rdi), %rdx
+; AVX512-NEXT: addl %eax, %edx
+; AVX512-NEXT: addl %ecx, %edx
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq 24(%rdi), %rax
+; AVX512-NEXT: xorl %ecx, %ecx
+; AVX512-NEXT: popcntq 16(%rdi), %rcx
+; AVX512-NEXT: popcntq 8(%rdi), %rsi
+; AVX512-NEXT: addl %eax, %ecx
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq (%rdi), %rax
+; AVX512-NEXT: addl %esi, %eax
+; AVX512-NEXT: addl %ecx, %eax
+; AVX512-NEXT: addl %edx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %a0 = load i512, ptr %p0
+ %cnt = call i512 @llvm.ctpop.i512(i512 %a0)
+ %res = trunc i512 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @test_ctpop_i1024(i1024 %a0) nounwind {
+; SSE-LABEL: test_ctpop_i1024:
+; SSE: # %bb.0:
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %r10
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %r11
+; SSE-NEXT: addl %eax, %r10d
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; SSE-NEXT: addl %r11d, %eax
+; SSE-NEXT: xorl %r11d, %r11d
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %r11
+; SSE-NEXT: xorl %ebx, %ebx
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %rbx
+; SSE-NEXT: addl %r10d, %eax
+; SSE-NEXT: addl %r11d, %ebx
+; SSE-NEXT: xorl %r11d, %r11d
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %r11
+; SSE-NEXT: xorl %r10d, %r10d
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %r10
+; SSE-NEXT: addl %r11d, %r10d
+; SSE-NEXT: addl %ebx, %r10d
+; SSE-NEXT: xorl %r11d, %r11d
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %r11
+; SSE-NEXT: xorl %ebx, %ebx
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %rbx
+; SSE-NEXT: addl %eax, %r10d
+; SSE-NEXT: addl %r11d, %ebx
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq %r9, %rax
+; SSE-NEXT: popcntq %r8, %r8
+; SSE-NEXT: addl %eax, %r8d
+; SSE-NEXT: addl %ebx, %r8d
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq %rcx, %rax
+; SSE-NEXT: xorl %ecx, %ecx
+; SSE-NEXT: popcntq %rdx, %rcx
+; SSE-NEXT: addl %eax, %ecx
+; SSE-NEXT: xorl %edx, %edx
+; SSE-NEXT: popcntq %rsi, %rdx
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq %rdi, %rax
+; SSE-NEXT: addl %edx, %eax
+; SSE-NEXT: addl %ecx, %eax
+; SSE-NEXT: addl %r8d, %eax
+; SSE-NEXT: addl %r10d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: test_ctpop_i1024:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %r10
+; AVX2-NEXT: addl %eax, %r10d
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %r11
+; AVX2-NEXT: addl %eax, %r11d
+; AVX2-NEXT: addl %r10d, %r11d
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: xorl %ebx, %ebx
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %rbx
+; AVX2-NEXT: xorl %r14d, %r14d
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %r14
+; AVX2-NEXT: addl %eax, %ebx
+; AVX2-NEXT: xorl %r10d, %r10d
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %r10
+; AVX2-NEXT: addl %r14d, %r10d
+; AVX2-NEXT: addl %ebx, %r10d
+; AVX2-NEXT: addl %r11d, %r10d
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: xorl %r11d, %r11d
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %r11
+; AVX2-NEXT: addl %eax, %r11d
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq %r9, %rax
+; AVX2-NEXT: popcntq %r8, %r8
+; AVX2-NEXT: addl %eax, %r8d
+; AVX2-NEXT: addl %r11d, %r8d
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq %rcx, %rax
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: popcntq %rdx, %rcx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: xorl %edx, %edx
+; AVX2-NEXT: popcntq %rsi, %rdx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq %rdi, %rax
+; AVX2-NEXT: addl %edx, %eax
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: addl %r8d, %eax
+; AVX2-NEXT: addl %r10d, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ctpop_i1024:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT: addl %eax, %r10d
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %r11
+; AVX512-NEXT: addl %eax, %r11d
+; AVX512-NEXT: addl %r10d, %r11d
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: xorl %ebx, %ebx
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %rbx
+; AVX512-NEXT: xorl %r14d, %r14d
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %r14
+; AVX512-NEXT: addl %eax, %ebx
+; AVX512-NEXT: xorl %r10d, %r10d
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT: addl %r14d, %r10d
+; AVX512-NEXT: addl %ebx, %r10d
+; AVX512-NEXT: addl %r11d, %r10d
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: xorl %r11d, %r11d
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %r11
+; AVX512-NEXT: addl %eax, %r11d
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq %r9, %rax
+; AVX512-NEXT: popcntq %r8, %r8
+; AVX512-NEXT: addl %eax, %r8d
+; AVX512-NEXT: addl %r11d, %r8d
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq %rcx, %rax
+; AVX512-NEXT: xorl %ecx, %ecx
+; AVX512-NEXT: popcntq %rdx, %rcx
+; AVX512-NEXT: addl %eax, %ecx
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: popcntq %rsi, %rdx
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq %rdi, %rax
+; AVX512-NEXT: addl %edx, %eax
+; AVX512-NEXT: addl %ecx, %eax
+; AVX512-NEXT: addl %r8d, %eax
+; AVX512-NEXT: addl %r10d, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: retq
+ %cnt = call i1024 @llvm.ctpop.i1024(i1024 %a0)
+ %res = trunc i1024 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_ctpop_i1024(ptr %p0) nounwind {
+; SSE-LABEL: load_ctpop_i1024:
+; SSE: # %bb.0:
+; SSE-NEXT: popcntq 120(%rdi), %rax
+; SSE-NEXT: popcntq 112(%rdi), %rcx
+; SSE-NEXT: popcntq 104(%rdi), %rdx
+; SSE-NEXT: popcntq 96(%rdi), %rsi
+; SSE-NEXT: addl %eax, %ecx
+; SSE-NEXT: addl %edx, %esi
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq 88(%rdi), %rax
+; SSE-NEXT: addl %ecx, %esi
+; SSE-NEXT: xorl %edx, %edx
+; SSE-NEXT: popcntq 80(%rdi), %rdx
+; SSE-NEXT: addl %eax, %edx
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq 72(%rdi), %rax
+; SSE-NEXT: xorl %ecx, %ecx
+; SSE-NEXT: popcntq 64(%rdi), %rcx
+; SSE-NEXT: addl %eax, %ecx
+; SSE-NEXT: addl %edx, %ecx
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq 56(%rdi), %rax
+; SSE-NEXT: addl %esi, %ecx
+; SSE-NEXT: xorl %edx, %edx
+; SSE-NEXT: popcntq 48(%rdi), %rdx
+; SSE-NEXT: addl %eax, %edx
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq 40(%rdi), %rax
+; SSE-NEXT: xorl %esi, %esi
+; SSE-NEXT: popcntq 32(%rdi), %rsi
+; SSE-NEXT: addl %eax, %esi
+; SSE-NEXT: addl %edx, %esi
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq 24(%rdi), %rax
+; SSE-NEXT: xorl %edx, %edx
+; SSE-NEXT: popcntq 16(%rdi), %rdx
+; SSE-NEXT: popcntq 8(%rdi), %r8
+; SSE-NEXT: addl %eax, %edx
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq (%rdi), %rax
+; SSE-NEXT: addl %r8d, %eax
+; SSE-NEXT: addl %edx, %eax
+; SSE-NEXT: addl %esi, %eax
+; SSE-NEXT: addl %ecx, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_ctpop_i1024:
+; AVX2: # %bb.0:
+; AVX2-NEXT: popcntq 120(%rdi), %rax
+; AVX2-NEXT: popcntq 112(%rdi), %rcx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq 104(%rdi), %rax
+; AVX2-NEXT: popcntq 96(%rdi), %rdx
+; AVX2-NEXT: addl %eax, %edx
+; AVX2-NEXT: addl %ecx, %edx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq 88(%rdi), %rax
+; AVX2-NEXT: popcntq 80(%rdi), %rsi
+; AVX2-NEXT: popcntq 72(%rdi), %r8
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: popcntq 64(%rdi), %rcx
+; AVX2-NEXT: addl %eax, %esi
+; AVX2-NEXT: addl %r8d, %ecx
+; AVX2-NEXT: addl %esi, %ecx
+; AVX2-NEXT: addl %edx, %ecx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq 56(%rdi), %rax
+; AVX2-NEXT: xorl %edx, %edx
+; AVX2-NEXT: popcntq 48(%rdi), %rdx
+; AVX2-NEXT: xorl %esi, %esi
+; AVX2-NEXT: popcntq 40(%rdi), %rsi
+; AVX2-NEXT: xorl %r8d, %r8d
+; AVX2-NEXT: popcntq 32(%rdi), %r8
+; AVX2-NEXT: addl %eax, %edx
+; AVX2-NEXT: addl %esi, %r8d
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq 24(%rdi), %rax
+; AVX2-NEXT: addl %edx, %r8d
+; AVX2-NEXT: xorl %edx, %edx
+; AVX2-NEXT: popcntq 16(%rdi), %rdx
+; AVX2-NEXT: addl %eax, %edx
+; AVX2-NEXT: xorl %esi, %esi
+; AVX2-NEXT: popcntq 8(%rdi), %rsi
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq (%rdi), %rax
+; AVX2-NEXT: addl %esi, %eax
+; AVX2-NEXT: addl %edx, %eax
+; AVX2-NEXT: addl %r8d, %eax
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_ctpop_i1024:
+; AVX512: # %bb.0:
+; AVX512-NEXT: popcntq 120(%rdi), %rax
+; AVX512-NEXT: popcntq 112(%rdi), %rcx
+; AVX512-NEXT: addl %eax, %ecx
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq 104(%rdi), %rax
+; AVX512-NEXT: popcntq 96(%rdi), %rdx
+; AVX512-NEXT: addl %eax, %edx
+; AVX512-NEXT: addl %ecx, %edx
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq 88(%rdi), %rax
+; AVX512-NEXT: popcntq 80(%rdi), %rsi
+; AVX512-NEXT: popcntq 72(%rdi), %r8
+; AVX512-NEXT: addl %eax, %esi
+; AVX512-NEXT: xorl %ecx, %ecx
+; AVX512-NEXT: popcntq 64(%rdi), %rcx
+; AVX512-NEXT: addl %r8d, %ecx
+; AVX512-NEXT: addl %esi, %ecx
+; AVX512-NEXT: addl %edx, %ecx
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq 56(%rdi), %rax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: popcntq 48(%rdi), %rdx
+; AVX512-NEXT: xorl %esi, %esi
+; AVX512-NEXT: popcntq 40(%rdi), %rsi
+; AVX512-NEXT: addl %eax, %edx
+; AVX512-NEXT: xorl %r8d, %r8d
+; AVX512-NEXT: popcntq 32(%rdi), %r8
+; AVX512-NEXT: addl %esi, %r8d
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq 24(%rdi), %rax
+; AVX512-NEXT: addl %edx, %r8d
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: popcntq 16(%rdi), %rdx
+; AVX512-NEXT: addl %eax, %edx
+; AVX512-NEXT: xorl %esi, %esi
+; AVX512-NEXT: popcntq 8(%rdi), %rsi
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq (%rdi), %rax
+; AVX512-NEXT: addl %esi, %eax
+; AVX512-NEXT: addl %edx, %eax
+; AVX512-NEXT: addl %r8d, %eax
+; AVX512-NEXT: addl %ecx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %a0 = load i1024, ptr %p0
+ %cnt = call i1024 @llvm.ctpop.i1024(i1024 %a0)
+ %res = trunc i1024 %cnt to i32
+ ret i32 %res
+}
+
+;
+; CTLZ
+;
+
+define i32 @test_ctlz_i128(i128 %a0) nounwind {
+; SSE-LABEL: test_ctlz_i128:
+; SSE: # %bb.0:
+; SSE-NEXT: bsrq %rsi, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: movl $127, %eax
+; SSE-NEXT: bsrq %rdi, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rsi, %rsi
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: test_ctlz_i128:
+; AVX2: # %bb.0:
+; AVX2-NEXT: lzcntq %rsi, %rcx
+; AVX2-NEXT: lzcntq %rdi, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %rsi, %rsi
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ctlz_i128:
+; AVX512: # %bb.0:
+; AVX512-NEXT: lzcntq %rsi, %rcx
+; AVX512-NEXT: lzcntq %rdi, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rsi, %rsi
+; AVX512-NEXT: cmovnel %ecx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %cnt = call i128 @llvm.ctlz.i128(i128 %a0, i1 0)
+ %res = trunc i128 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_ctlz_i128(ptr %p0) nounwind {
+; SSE-LABEL: load_ctlz_i128:
+; SSE: # %bb.0:
+; SSE-NEXT: movq 8(%rdi), %rcx
+; SSE-NEXT: bsrq %rcx, %rdx
+; SSE-NEXT: xorl $63, %edx
+; SSE-NEXT: movl $127, %eax
+; SSE-NEXT: bsrq (%rdi), %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rcx, %rcx
+; SSE-NEXT: cmovnel %edx, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_ctlz_i128:
+; AVX2: # %bb.0:
+; AVX2-NEXT: movq 8(%rdi), %rcx
+; AVX2-NEXT: lzcntq %rcx, %rdx
+; AVX2-NEXT: lzcntq (%rdi), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %rcx, %rcx
+; AVX2-NEXT: cmovnel %edx, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_ctlz_i128:
+; AVX512: # %bb.0:
+; AVX512-NEXT: movq 8(%rdi), %rcx
+; AVX512-NEXT: lzcntq %rcx, %rdx
+; AVX512-NEXT: lzcntq (%rdi), %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rcx, %rcx
+; AVX512-NEXT: cmovnel %edx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %a0 = load i128, ptr %p0
+ %cnt = call i128 @llvm.ctlz.i128(i128 %a0, i1 0)
+ %res = trunc i128 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @test_ctlz_i256(i256 %a0) nounwind {
+; SSE-LABEL: test_ctlz_i256:
+; SSE: # %bb.0:
+; SSE-NEXT: bsrq %rcx, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %rdx, %r8
+; SSE-NEXT: xorl $63, %r8d
+; SSE-NEXT: orl $64, %r8d
+; SSE-NEXT: testq %rcx, %rcx
+; SSE-NEXT: cmovnel %eax, %r8d
+; SSE-NEXT: bsrq %rsi, %r9
+; SSE-NEXT: xorl $63, %r9d
+; SSE-NEXT: movl $127, %eax
+; SSE-NEXT: bsrq %rdi, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rsi, %rsi
+; SSE-NEXT: cmovnel %r9d, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %rcx, %rdx
+; SSE-NEXT: cmovnel %r8d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: test_ctlz_i256:
+; AVX2: # %bb.0:
+; AVX2-NEXT: lzcntq %rcx, %rax
+; AVX2-NEXT: lzcntq %rdx, %r8
+; AVX2-NEXT: addl $64, %r8d
+; AVX2-NEXT: testq %rcx, %rcx
+; AVX2-NEXT: cmovnel %eax, %r8d
+; AVX2-NEXT: lzcntq %rsi, %r9
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %rdi, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %rsi, %rsi
+; AVX2-NEXT: cmovnel %r9d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %rcx, %rdx
+; AVX2-NEXT: cmovnel %r8d, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ctlz_i256:
+; AVX512: # %bb.0:
+; AVX512-NEXT: lzcntq %rcx, %rax
+; AVX512-NEXT: lzcntq %rdx, %r8
+; AVX512-NEXT: addl $64, %r8d
+; AVX512-NEXT: testq %rcx, %rcx
+; AVX512-NEXT: cmovnel %eax, %r8d
+; AVX512-NEXT: lzcntq %rsi, %r9
+; AVX512-NEXT: lzcntq %rdi, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rsi, %rsi
+; AVX512-NEXT: cmovnel %r9d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %rcx, %rdx
+; AVX512-NEXT: cmovnel %r8d, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %cnt = call i256 @llvm.ctlz.i256(i256 %a0, i1 0)
+ %res = trunc i256 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_ctlz_i256(ptr %p0) nounwind {
+; SSE-LABEL: load_ctlz_i256:
+; SSE: # %bb.0:
+; SSE-NEXT: movq 16(%rdi), %rcx
+; SSE-NEXT: movq 24(%rdi), %rdx
+; SSE-NEXT: bsrq %rdx, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %rcx, %rsi
+; SSE-NEXT: xorl $63, %esi
+; SSE-NEXT: orl $64, %esi
+; SSE-NEXT: testq %rdx, %rdx
+; SSE-NEXT: cmovnel %eax, %esi
+; SSE-NEXT: movq 8(%rdi), %r8
+; SSE-NEXT: bsrq %r8, %r9
+; SSE-NEXT: xorl $63, %r9d
+; SSE-NEXT: movl $127, %eax
+; SSE-NEXT: bsrq (%rdi), %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %r8, %r8
+; SSE-NEXT: cmovnel %r9d, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %rdx, %rcx
+; SSE-NEXT: cmovnel %esi, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_ctlz_i256:
+; AVX2: # %bb.0:
+; AVX2-NEXT: movq 16(%rdi), %rcx
+; AVX2-NEXT: movq 24(%rdi), %rdx
+; AVX2-NEXT: lzcntq %rdx, %rax
+; AVX2-NEXT: lzcntq %rcx, %rsi
+; AVX2-NEXT: addl $64, %esi
+; AVX2-NEXT: testq %rdx, %rdx
+; AVX2-NEXT: cmovnel %eax, %esi
+; AVX2-NEXT: movq 8(%rdi), %r8
+; AVX2-NEXT: lzcntq %r8, %r9
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq (%rdi), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %r8, %r8
+; AVX2-NEXT: cmovnel %r9d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %rdx, %rcx
+; AVX2-NEXT: cmovnel %esi, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_ctlz_i256:
+; AVX512: # %bb.0:
+; AVX512-NEXT: movq 8(%rdi), %rcx
+; AVX512-NEXT: movq 16(%rdi), %rdx
+; AVX512-NEXT: movq 24(%rdi), %rsi
+; AVX512-NEXT: lzcntq %rsi, %rax
+; AVX512-NEXT: lzcntq %rdx, %r8
+; AVX512-NEXT: addl $64, %r8d
+; AVX512-NEXT: testq %rsi, %rsi
+; AVX512-NEXT: cmovnel %eax, %r8d
+; AVX512-NEXT: lzcntq %rcx, %r9
+; AVX512-NEXT: lzcntq (%rdi), %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rcx, %rcx
+; AVX512-NEXT: cmovnel %r9d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %rsi, %rdx
+; AVX512-NEXT: cmovnel %r8d, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %a0 = load i256, ptr %p0
+ %cnt = call i256 @llvm.ctlz.i256(i256 %a0, i1 0)
+ %res = trunc i256 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @test_ctlz_i512(i512 %a0) nounwind {
+; SSE-LABEL: test_ctlz_i512:
+; SSE: # %bb.0:
+; SSE-NEXT: pushq %r15
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; SSE-NEXT: bsrq %r11, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %r10, %r14
+; SSE-NEXT: xorl $63, %r14d
+; SSE-NEXT: orl $64, %r14d
+; SSE-NEXT: testq %r11, %r11
+; SSE-NEXT: cmovnel %eax, %r14d
+; SSE-NEXT: bsrq %r9, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %r8, %rbx
+; SSE-NEXT: xorl $63, %ebx
+; SSE-NEXT: orl $64, %ebx
+; SSE-NEXT: testq %r9, %r9
+; SSE-NEXT: cmovnel %eax, %ebx
+; SSE-NEXT: subl $-128, %ebx
+; SSE-NEXT: movq %r10, %rax
+; SSE-NEXT: orq %r11, %rax
+; SSE-NEXT: cmovnel %r14d, %ebx
+; SSE-NEXT: bsrq %rcx, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %rdx, %r14
+; SSE-NEXT: xorl $63, %r14d
+; SSE-NEXT: orl $64, %r14d
+; SSE-NEXT: testq %rcx, %rcx
+; SSE-NEXT: cmovnel %eax, %r14d
+; SSE-NEXT: bsrq %rsi, %r15
+; SSE-NEXT: xorl $63, %r15d
+; SSE-NEXT: movl $127, %eax
+; SSE-NEXT: bsrq %rdi, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rsi, %rsi
+; SSE-NEXT: cmovnel %r15d, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %rcx, %rdx
+; SSE-NEXT: cmovnel %r14d, %eax
+; SSE-NEXT: addl $256, %eax # imm = 0x100
+; SSE-NEXT: orq %r11, %r9
+; SSE-NEXT: orq %r10, %r8
+; SSE-NEXT: orq %r9, %r8
+; SSE-NEXT: cmovnel %ebx, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r14
+; SSE-NEXT: popq %r15
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: test_ctlz_i512:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; AVX2-NEXT: lzcntq %r11, %rax
+; AVX2-NEXT: xorl %r14d, %r14d
+; AVX2-NEXT: lzcntq %r10, %r14
+; AVX2-NEXT: addl $64, %r14d
+; AVX2-NEXT: testq %r11, %r11
+; AVX2-NEXT: cmovnel %eax, %r14d
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %r9, %rax
+; AVX2-NEXT: xorl %ebx, %ebx
+; AVX2-NEXT: lzcntq %r8, %rbx
+; AVX2-NEXT: addl $64, %ebx
+; AVX2-NEXT: testq %r9, %r9
+; AVX2-NEXT: cmovnel %eax, %ebx
+; AVX2-NEXT: subl $-128, %ebx
+; AVX2-NEXT: movq %r10, %rax
+; AVX2-NEXT: orq %r11, %rax
+; AVX2-NEXT: cmovnel %r14d, %ebx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %rcx, %rax
+; AVX2-NEXT: xorl %r14d, %r14d
+; AVX2-NEXT: lzcntq %rdx, %r14
+; AVX2-NEXT: addl $64, %r14d
+; AVX2-NEXT: testq %rcx, %rcx
+; AVX2-NEXT: cmovnel %eax, %r14d
+; AVX2-NEXT: xorl %r15d, %r15d
+; AVX2-NEXT: lzcntq %rsi, %r15
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %rdi, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %rsi, %rsi
+; AVX2-NEXT: cmovnel %r15d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %rcx, %rdx
+; AVX2-NEXT: cmovnel %r14d, %eax
+; AVX2-NEXT: addl $256, %eax # imm = 0x100
+; AVX2-NEXT: orq %r11, %r9
+; AVX2-NEXT: orq %r10, %r8
+; AVX2-NEXT: orq %r9, %r8
+; AVX2-NEXT: cmovnel %ebx, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ctlz_i512:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; AVX512-NEXT: lzcntq %r11, %rax
+; AVX512-NEXT: lzcntq %r10, %r14
+; AVX512-NEXT: addl $64, %r14d
+; AVX512-NEXT: testq %r11, %r11
+; AVX512-NEXT: cmovnel %eax, %r14d
+; AVX512-NEXT: lzcntq %r9, %rax
+; AVX512-NEXT: lzcntq %r8, %rbx
+; AVX512-NEXT: addl $64, %ebx
+; AVX512-NEXT: testq %r9, %r9
+; AVX512-NEXT: cmovnel %eax, %ebx
+; AVX512-NEXT: subl $-128, %ebx
+; AVX512-NEXT: movq %r10, %rax
+; AVX512-NEXT: orq %r11, %rax
+; AVX512-NEXT: cmovnel %r14d, %ebx
+; AVX512-NEXT: lzcntq %rcx, %rax
+; AVX512-NEXT: lzcntq %rdx, %r14
+; AVX512-NEXT: addl $64, %r14d
+; AVX512-NEXT: testq %rcx, %rcx
+; AVX512-NEXT: cmovnel %eax, %r14d
+; AVX512-NEXT: lzcntq %rsi, %r15
+; AVX512-NEXT: lzcntq %rdi, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rsi, %rsi
+; AVX512-NEXT: cmovnel %r15d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %rcx, %rdx
+; AVX512-NEXT: cmovnel %r14d, %eax
+; AVX512-NEXT: addl $256, %eax # imm = 0x100
+; AVX512-NEXT: orq %r11, %r9
+; AVX512-NEXT: orq %r10, %r8
+; AVX512-NEXT: orq %r9, %r8
+; AVX512-NEXT: cmovnel %ebx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: retq
+ %cnt = call i512 @llvm.ctlz.i512(i512 %a0, i1 0)
+ %res = trunc i512 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_ctlz_i512(ptr %p0) nounwind {
+; SSE-LABEL: load_ctlz_i512:
+; SSE: # %bb.0:
+; SSE-NEXT: pushq %r15
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: movq 8(%rdi), %r10
+; SSE-NEXT: movq 16(%rdi), %r9
+; SSE-NEXT: movq 32(%rdi), %rcx
+; SSE-NEXT: movq 40(%rdi), %rdx
+; SSE-NEXT: movq 48(%rdi), %rsi
+; SSE-NEXT: movq 56(%rdi), %r8
+; SSE-NEXT: bsrq %r8, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %rsi, %r14
+; SSE-NEXT: xorl $63, %r14d
+; SSE-NEXT: orl $64, %r14d
+; SSE-NEXT: testq %r8, %r8
+; SSE-NEXT: cmovnel %eax, %r14d
+; SSE-NEXT: bsrq %rdx, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %rcx, %r11
+; SSE-NEXT: xorl $63, %r11d
+; SSE-NEXT: orl $64, %r11d
+; SSE-NEXT: testq %rdx, %rdx
+; SSE-NEXT: cmovnel %eax, %r11d
+; SSE-NEXT: movq 24(%rdi), %rbx
+; SSE-NEXT: subl $-128, %r11d
+; SSE-NEXT: movq %rsi, %rax
+; SSE-NEXT: orq %r8, %rax
+; SSE-NEXT: cmovnel %r14d, %r11d
+; SSE-NEXT: bsrq %rbx, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %r9, %r14
+; SSE-NEXT: xorl $63, %r14d
+; SSE-NEXT: orl $64, %r14d
+; SSE-NEXT: testq %rbx, %rbx
+; SSE-NEXT: cmovnel %eax, %r14d
+; SSE-NEXT: bsrq %r10, %r15
+; SSE-NEXT: xorl $63, %r15d
+; SSE-NEXT: movl $127, %eax
+; SSE-NEXT: bsrq (%rdi), %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %r10, %r10
+; SSE-NEXT: cmovnel %r15d, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %rbx, %r9
+; SSE-NEXT: cmovnel %r14d, %eax
+; SSE-NEXT: addl $256, %eax # imm = 0x100
+; SSE-NEXT: orq %r8, %rdx
+; SSE-NEXT: orq %rsi, %rcx
+; SSE-NEXT: orq %rdx, %rcx
+; SSE-NEXT: cmovnel %r11d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r14
+; SSE-NEXT: popq %r15
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_ctlz_i512:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: movq 8(%rdi), %r10
+; AVX2-NEXT: movq 16(%rdi), %r9
+; AVX2-NEXT: movq 32(%rdi), %rcx
+; AVX2-NEXT: movq 40(%rdi), %rdx
+; AVX2-NEXT: movq 48(%rdi), %rsi
+; AVX2-NEXT: movq 56(%rdi), %r8
+; AVX2-NEXT: lzcntq %r8, %rax
+; AVX2-NEXT: xorl %ebx, %ebx
+; AVX2-NEXT: lzcntq %rsi, %rbx
+; AVX2-NEXT: addl $64, %ebx
+; AVX2-NEXT: testq %r8, %r8
+; AVX2-NEXT: cmovnel %eax, %ebx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %rdx, %rax
+; AVX2-NEXT: lzcntq %rcx, %r11
+; AVX2-NEXT: addl $64, %r11d
+; AVX2-NEXT: testq %rdx, %rdx
+; AVX2-NEXT: cmovnel %eax, %r11d
+; AVX2-NEXT: subl $-128, %r11d
+; AVX2-NEXT: movq %rsi, %rax
+; AVX2-NEXT: orq %r8, %rax
+; AVX2-NEXT: cmovnel %ebx, %r11d
+; AVX2-NEXT: movq 24(%rdi), %rbx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %rbx, %rax
+; AVX2-NEXT: xorl %r14d, %r14d
+; AVX2-NEXT: lzcntq %r9, %r14
+; AVX2-NEXT: addl $64, %r14d
+; AVX2-NEXT: testq %rbx, %rbx
+; AVX2-NEXT: cmovnel %eax, %r14d
+; AVX2-NEXT: xorl %r15d, %r15d
+; AVX2-NEXT: lzcntq %r10, %r15
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq (%rdi), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %r10, %r10
+; AVX2-NEXT: cmovnel %r15d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %rbx, %r9
+; AVX2-NEXT: cmovnel %r14d, %eax
+; AVX2-NEXT: addl $256, %eax # imm = 0x100
+; AVX2-NEXT: orq %r8, %rdx
+; AVX2-NEXT: orq %rsi, %rcx
+; AVX2-NEXT: orq %rdx, %rcx
+; AVX2-NEXT: cmovnel %r11d, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_ctlz_i512:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: movq 8(%rdi), %r11
+; AVX512-NEXT: movq 16(%rdi), %r9
+; AVX512-NEXT: movq 24(%rdi), %r10
+; AVX512-NEXT: movq 32(%rdi), %rcx
+; AVX512-NEXT: movq 40(%rdi), %rdx
+; AVX512-NEXT: movq 48(%rdi), %rsi
+; AVX512-NEXT: movq 56(%rdi), %r8
+; AVX512-NEXT: lzcntq %r8, %rax
+; AVX512-NEXT: lzcntq %rsi, %r14
+; AVX512-NEXT: addl $64, %r14d
+; AVX512-NEXT: testq %r8, %r8
+; AVX512-NEXT: cmovnel %eax, %r14d
+; AVX512-NEXT: lzcntq %rdx, %rax
+; AVX512-NEXT: lzcntq %rcx, %rbx
+; AVX512-NEXT: addl $64, %ebx
+; AVX512-NEXT: testq %rdx, %rdx
+; AVX512-NEXT: cmovnel %eax, %ebx
+; AVX512-NEXT: subl $-128, %ebx
+; AVX512-NEXT: movq %rsi, %rax
+; AVX512-NEXT: orq %r8, %rax
+; AVX512-NEXT: cmovnel %r14d, %ebx
+; AVX512-NEXT: lzcntq %r10, %rax
+; AVX512-NEXT: lzcntq %r9, %r14
+; AVX512-NEXT: addl $64, %r14d
+; AVX512-NEXT: testq %r10, %r10
+; AVX512-NEXT: cmovnel %eax, %r14d
+; AVX512-NEXT: lzcntq (%rdi), %rax
+; AVX512-NEXT: lzcntq %r11, %rdi
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %r11, %r11
+; AVX512-NEXT: cmovnel %edi, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %r10, %r9
+; AVX512-NEXT: cmovnel %r14d, %eax
+; AVX512-NEXT: addl $256, %eax # imm = 0x100
+; AVX512-NEXT: orq %r8, %rdx
+; AVX512-NEXT: orq %rsi, %rcx
+; AVX512-NEXT: orq %rdx, %rcx
+; AVX512-NEXT: cmovnel %ebx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: retq
+ %a0 = load i512, ptr %p0
+ %cnt = call i512 @llvm.ctlz.i512(i512 %a0, i1 0)
+ %res = trunc i512 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @test_ctlz_i1024(i1024 %a0) nounwind {
+; SSE-LABEL: test_ctlz_i1024:
+; SSE: # %bb.0:
+; SSE-NEXT: pushq %rbp
+; SSE-NEXT: pushq %r15
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %r13
+; SSE-NEXT: pushq %r12
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: movq %r9, %r11
+; SSE-NEXT: movq %r8, %r9
+; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: movq %rdx, %r12
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rbx
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r14
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r15
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; SSE-NEXT: bsrq %r8, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %r15, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: orl $64, %ecx
+; SSE-NEXT: testq %r8, %r8
+; SSE-NEXT: cmovnel %eax, %ecx
+; SSE-NEXT: bsrq %r14, %rdx
+; SSE-NEXT: xorl $63, %edx
+; SSE-NEXT: bsrq {{[0-9]+}}(%rsp), %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: orl $64, %eax
+; SSE-NEXT: testq %r14, %r14
+; SSE-NEXT: cmovnel %edx, %eax
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r13
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: movq %r15, %rdx
+; SSE-NEXT: orq %r8, %rdx
+; SSE-NEXT: movq %r8, %r14
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: bsrq %r13, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: bsrq %rbx, %rdx
+; SSE-NEXT: xorl $63, %edx
+; SSE-NEXT: orl $64, %edx
+; SSE-NEXT: testq %r13, %r13
+; SSE-NEXT: cmovnel %ecx, %edx
+; SSE-NEXT: bsrq %r10, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; SSE-NEXT: bsrq %r8, %rbp
+; SSE-NEXT: xorl $63, %ebp
+; SSE-NEXT: orl $64, %ebp
+; SSE-NEXT: testq %r10, %r10
+; SSE-NEXT: cmovnel %ecx, %ebp
+; SSE-NEXT: subl $-128, %ebp
+; SSE-NEXT: movq %rbx, %rcx
+; SSE-NEXT: orq %r13, %rcx
+; SSE-NEXT: cmovnel %edx, %ebp
+; SSE-NEXT: addl $256, %ebp # imm = 0x100
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; SSE-NEXT: orq %r14, %rcx
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; SSE-NEXT: orq %r15, %rdx
+; SSE-NEXT: orq %rcx, %rdx
+; SSE-NEXT: cmovnel %eax, %ebp
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r14
+; SSE-NEXT: bsrq %r14, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r15
+; SSE-NEXT: bsrq %r15, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: orl $64, %ecx
+; SSE-NEXT: testq %r14, %r14
+; SSE-NEXT: cmovnel %eax, %ecx
+; SSE-NEXT: bsrq %r11, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %r9, %rdx
+; SSE-NEXT: xorl $63, %edx
+; SSE-NEXT: orl $64, %edx
+; SSE-NEXT: testq %r11, %r11
+; SSE-NEXT: cmovnel %eax, %edx
+; SSE-NEXT: subl $-128, %edx
+; SSE-NEXT: movq %r15, %rax
+; SSE-NEXT: orq %r14, %rax
+; SSE-NEXT: cmovnel %ecx, %edx
+; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; SSE-NEXT: bsrq %r15, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %r12, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: orl $64, %ecx
+; SSE-NEXT: testq %r15, %r15
+; SSE-NEXT: cmovnel %eax, %ecx
+; SSE-NEXT: movl $127, %eax
+; SSE-NEXT: bsrq %rdi, %rax
+; SSE-NEXT: bsrq %rsi, %rdi
+; SSE-NEXT: xorl $63, %edi
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rsi, %rsi
+; SSE-NEXT: cmovnel %edi, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %r15, %r12
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: orq %r14, %r11
+; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r9
+; SSE-NEXT: addl $256, %eax # imm = 0x100
+; SSE-NEXT: orq %r11, %r9
+; SSE-NEXT: cmovnel %edx, %eax
+; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r13
+; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r10
+; SSE-NEXT: orq %r13, %r10
+; SSE-NEXT: orq {{[0-9]+}}(%rsp), %rbx
+; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r8
+; SSE-NEXT: orq %rbx, %r8
+; SSE-NEXT: addl $512, %eax # imm = 0x200
+; SSE-NEXT: orq %r10, %r8
+; SSE-NEXT: cmovnel %ebp, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r12
+; SSE-NEXT: popq %r13
+; SSE-NEXT: popq %r14
+; SSE-NEXT: popq %r15
+; SSE-NEXT: popq %rbp
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: test_ctlz_i1024:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: movq %r9, %r14
+; AVX2-NEXT: movq %r8, %r11
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r15
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rbx
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r12
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: lzcntq %r12, %rcx
+; AVX2-NEXT: xorl %r9d, %r9d
+; AVX2-NEXT: lzcntq %r8, %r9
+; AVX2-NEXT: addl $64, %r9d
+; AVX2-NEXT: testq %r12, %r12
+; AVX2-NEXT: cmovnel %ecx, %r9d
+; AVX2-NEXT: xorl %esi, %esi
+; AVX2-NEXT: lzcntq %r10, %rsi
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: lzcntq %rax, %rcx
+; AVX2-NEXT: addl $64, %ecx
+; AVX2-NEXT: testq %r10, %r10
+; AVX2-NEXT: cmovnel %esi, %ecx
+; AVX2-NEXT: subl $-128, %ecx
+; AVX2-NEXT: movq %r8, %rsi
+; AVX2-NEXT: orq %r12, %rsi
+; AVX2-NEXT: cmovnel %r9d, %ecx
+; AVX2-NEXT: xorl %edi, %edi
+; AVX2-NEXT: lzcntq %rbx, %rdi
+; AVX2-NEXT: xorl %esi, %esi
+; AVX2-NEXT: lzcntq %r15, %rsi
+; AVX2-NEXT: addl $64, %esi
+; AVX2-NEXT: testq %rbx, %rbx
+; AVX2-NEXT: cmovnel %edi, %esi
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r13
+; AVX2-NEXT: xorl %ebp, %ebp
+; AVX2-NEXT: lzcntq %r13, %rbp
+; AVX2-NEXT: addl $64, %ebp
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r9
+; AVX2-NEXT: xorl %edi, %edi
+; AVX2-NEXT: lzcntq %r9, %rdi
+; AVX2-NEXT: testq %r9, %r9
+; AVX2-NEXT: cmovnel %edi, %ebp
+; AVX2-NEXT: subl $-128, %ebp
+; AVX2-NEXT: movq %r15, %rdi
+; AVX2-NEXT: orq %rbx, %rdi
+; AVX2-NEXT: cmovnel %esi, %ebp
+; AVX2-NEXT: addl $256, %ebp # imm = 0x100
+; AVX2-NEXT: movq %r10, %rdi
+; AVX2-NEXT: orq %r12, %rdi
+; AVX2-NEXT: movq %rax, %rsi
+; AVX2-NEXT: orq %r8, %rsi
+; AVX2-NEXT: orq %rdi, %rsi
+; AVX2-NEXT: cmovnel %ecx, %ebp
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdi
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %rdi, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r12
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: lzcntq %r12, %rcx
+; AVX2-NEXT: testq %r12, %r12
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: lzcntq %r11, %rcx
+; AVX2-NEXT: addl $64, %ecx
+; AVX2-NEXT: xorl %esi, %esi
+; AVX2-NEXT: lzcntq %r14, %rsi
+; AVX2-NEXT: testq %r14, %r14
+; AVX2-NEXT: cmovnel %esi, %ecx
+; AVX2-NEXT: subl $-128, %ecx
+; AVX2-NEXT: movq %rdi, %rsi
+; AVX2-NEXT: orq %r12, %rsi
+; AVX2-NEXT: cmovnel %eax, %ecx
+; AVX2-NEXT: movq %rdx, %rdi
+; AVX2-NEXT: lzcntq %rdx, %rdx
+; AVX2-NEXT: addl $64, %edx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %r10, %rax
+; AVX2-NEXT: testq %r10, %r10
+; AVX2-NEXT: cmovnel %eax, %edx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX2-NEXT: lzcntq %rax, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX2-NEXT: lzcntq %rsi, %r8
+; AVX2-NEXT: testq %rsi, %rsi
+; AVX2-NEXT: cmovnel %r8d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %r10, %rdi
+; AVX2-NEXT: cmovnel %edx, %eax
+; AVX2-NEXT: orq %r12, %r14
+; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r11
+; AVX2-NEXT: addl $256, %eax # imm = 0x100
+; AVX2-NEXT: orq %r14, %r11
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %rbx
+; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r9
+; AVX2-NEXT: orq %rbx, %r9
+; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r15
+; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r13
+; AVX2-NEXT: orq %r15, %r13
+; AVX2-NEXT: addl $512, %eax # imm = 0x200
+; AVX2-NEXT: orq %r9, %r13
+; AVX2-NEXT: cmovnel %ebp, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ctlz_i1024:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: movq %r9, %r14
+; AVX512-NEXT: movq %r8, %r11
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r15
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rbx
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r12
+; AVX512-NEXT: lzcntq %r12, %rcx
+; AVX512-NEXT: lzcntq %r8, %r9
+; AVX512-NEXT: addl $64, %r9d
+; AVX512-NEXT: testq %r12, %r12
+; AVX512-NEXT: cmovnel %ecx, %r9d
+; AVX512-NEXT: lzcntq %r10, %rsi
+; AVX512-NEXT: lzcntq %rax, %rcx
+; AVX512-NEXT: addl $64, %ecx
+; AVX512-NEXT: testq %r10, %r10
+; AVX512-NEXT: cmovnel %esi, %ecx
+; AVX512-NEXT: subl $-128, %ecx
+; AVX512-NEXT: movq %r8, %rsi
+; AVX512-NEXT: orq %r12, %rsi
+; AVX512-NEXT: cmovnel %r9d, %ecx
+; AVX512-NEXT: lzcntq %rbx, %rdi
+; AVX512-NEXT: lzcntq %r15, %rsi
+; AVX512-NEXT: addl $64, %esi
+; AVX512-NEXT: testq %rbx, %rbx
+; AVX512-NEXT: cmovnel %edi, %esi
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r13
+; AVX512-NEXT: lzcntq %r13, %rbp
+; AVX512-NEXT: addl $64, %ebp
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r9
+; AVX512-NEXT: lzcntq %r9, %rdi
+; AVX512-NEXT: testq %r9, %r9
+; AVX512-NEXT: cmovnel %edi, %ebp
+; AVX512-NEXT: subl $-128, %ebp
+; AVX512-NEXT: movq %r15, %rdi
+; AVX512-NEXT: orq %rbx, %rdi
+; AVX512-NEXT: cmovnel %esi, %ebp
+; AVX512-NEXT: addl $256, %ebp # imm = 0x100
+; AVX512-NEXT: movq %r10, %rdi
+; AVX512-NEXT: orq %r12, %rdi
+; AVX512-NEXT: movq %rax, %rsi
+; AVX512-NEXT: orq %r8, %rsi
+; AVX512-NEXT: orq %rdi, %rsi
+; AVX512-NEXT: cmovnel %ecx, %ebp
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdi
+; AVX512-NEXT: lzcntq %rdi, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r12
+; AVX512-NEXT: lzcntq %r12, %rcx
+; AVX512-NEXT: testq %r12, %r12
+; AVX512-NEXT: cmovnel %ecx, %eax
+; AVX512-NEXT: lzcntq %r11, %rcx
+; AVX512-NEXT: addl $64, %ecx
+; AVX512-NEXT: lzcntq %r14, %rsi
+; AVX512-NEXT: testq %r14, %r14
+; AVX512-NEXT: cmovnel %esi, %ecx
+; AVX512-NEXT: subl $-128, %ecx
+; AVX512-NEXT: movq %rdi, %rsi
+; AVX512-NEXT: orq %r12, %rsi
+; AVX512-NEXT: cmovnel %eax, %ecx
+; AVX512-NEXT: movq %rdx, %rdi
+; AVX512-NEXT: lzcntq %rdx, %rdx
+; AVX512-NEXT: addl $64, %edx
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; AVX512-NEXT: lzcntq %r10, %rax
+; AVX512-NEXT: testq %r10, %r10
+; AVX512-NEXT: cmovnel %eax, %edx
+; AVX512-NEXT: lzcntq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX512-NEXT: lzcntq %rsi, %r8
+; AVX512-NEXT: testq %rsi, %rsi
+; AVX512-NEXT: cmovnel %r8d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %r10, %rdi
+; AVX512-NEXT: cmovnel %edx, %eax
+; AVX512-NEXT: orq %r12, %r14
+; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r11
+; AVX512-NEXT: addl $256, %eax # imm = 0x100
+; AVX512-NEXT: orq %r14, %r11
+; AVX512-NEXT: cmovnel %ecx, %eax
+; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %rbx
+; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r9
+; AVX512-NEXT: orq %rbx, %r9
+; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r15
+; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r13
+; AVX512-NEXT: orq %r15, %r13
+; AVX512-NEXT: addl $512, %eax # imm = 0x200
+; AVX512-NEXT: orq %r9, %r13
+; AVX512-NEXT: cmovnel %ebp, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: retq
+ %cnt = call i1024 @llvm.ctlz.i1024(i1024 %a0, i1 0)
+ %res = trunc i1024 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_ctlz_i1024(ptr %p0) nounwind {
+; SSE-LABEL: load_ctlz_i1024:
+; SSE: # %bb.0:
+; SSE-NEXT: pushq %rbp
+; SSE-NEXT: pushq %r15
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %r13
+; SSE-NEXT: pushq %r12
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: movq 40(%rdi), %rbp
+; SSE-NEXT: movq 64(%rdi), %rbx
+; SSE-NEXT: movq 72(%rdi), %r11
+; SSE-NEXT: movq 80(%rdi), %r12
+; SSE-NEXT: movq 88(%rdi), %r14
+; SSE-NEXT: movq 96(%rdi), %rsi
+; SSE-NEXT: movq 104(%rdi), %r9
+; SSE-NEXT: movq 112(%rdi), %r10
+; SSE-NEXT: movq 120(%rdi), %r8
+; SSE-NEXT: bsrq %r8, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %r10, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: orl $64, %ecx
+; SSE-NEXT: testq %r8, %r8
+; SSE-NEXT: cmovnel %eax, %ecx
+; SSE-NEXT: bsrq %r9, %rdx
+; SSE-NEXT: xorl $63, %edx
+; SSE-NEXT: bsrq %rsi, %rax
+; SSE-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: orl $64, %eax
+; SSE-NEXT: testq %r9, %r9
+; SSE-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: cmovnel %edx, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: movq %r10, %rdx
+; SSE-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: orq %r8, %rdx
+; SSE-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: bsrq %r14, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: bsrq %r12, %rdx
+; SSE-NEXT: xorl $63, %edx
+; SSE-NEXT: orl $64, %edx
+; SSE-NEXT: testq %r14, %r14
+; SSE-NEXT: cmovnel %ecx, %edx
+; SSE-NEXT: bsrq %r11, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: bsrq %rbx, %r15
+; SSE-NEXT: xorl $63, %r15d
+; SSE-NEXT: orl $64, %r15d
+; SSE-NEXT: testq %r11, %r11
+; SSE-NEXT: cmovnel %ecx, %r15d
+; SSE-NEXT: subl $-128, %r15d
+; SSE-NEXT: movq %r12, %rcx
+; SSE-NEXT: orq %r14, %rcx
+; SSE-NEXT: cmovnel %edx, %r15d
+; SSE-NEXT: movq 48(%rdi), %r12
+; SSE-NEXT: addl $256, %r15d # imm = 0x100
+; SSE-NEXT: movq %r9, %rcx
+; SSE-NEXT: orq %r8, %rcx
+; SSE-NEXT: movq %rsi, %rdx
+; SSE-NEXT: orq %r10, %rdx
+; SSE-NEXT: orq %rcx, %rdx
+; SSE-NEXT: movq 56(%rdi), %r13
+; SSE-NEXT: cmovnel %eax, %r15d
+; SSE-NEXT: bsrq %r13, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %r12, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: orl $64, %ecx
+; SSE-NEXT: testq %r13, %r13
+; SSE-NEXT: cmovnel %eax, %ecx
+; SSE-NEXT: movq %rbp, %r10
+; SSE-NEXT: bsrq %rbp, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: movq 32(%rdi), %r8
+; SSE-NEXT: bsrq %r8, %rbp
+; SSE-NEXT: xorl $63, %ebp
+; SSE-NEXT: orl $64, %ebp
+; SSE-NEXT: testq %r10, %r10
+; SSE-NEXT: cmovnel %eax, %ebp
+; SSE-NEXT: subl $-128, %ebp
+; SSE-NEXT: movq %r12, %rax
+; SSE-NEXT: orq %r13, %rax
+; SSE-NEXT: cmovnel %ecx, %ebp
+; SSE-NEXT: movq 24(%rdi), %r9
+; SSE-NEXT: bsrq %r9, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: movq 16(%rdi), %rsi
+; SSE-NEXT: bsrq %rsi, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: orl $64, %ecx
+; SSE-NEXT: testq %r9, %r9
+; SSE-NEXT: cmovnel %eax, %ecx
+; SSE-NEXT: movl $127, %eax
+; SSE-NEXT: bsrq (%rdi), %rax
+; SSE-NEXT: movq 8(%rdi), %rdi
+; SSE-NEXT: bsrq %rdi, %rdx
+; SSE-NEXT: xorl $63, %edx
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rdi, %rdi
+; SSE-NEXT: cmovnel %edx, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %r9, %rsi
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: orq %r13, %r10
+; SSE-NEXT: orq %r12, %r8
+; SSE-NEXT: addl $256, %eax # imm = 0x100
+; SSE-NEXT: orq %r10, %r8
+; SSE-NEXT: cmovnel %ebp, %eax
+; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
+; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; SSE-NEXT: orq %r14, %r11
+; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; SSE-NEXT: orq %rcx, %rbx
+; SSE-NEXT: addl $512, %eax # imm = 0x200
+; SSE-NEXT: orq %r11, %rbx
+; SSE-NEXT: cmovnel %r15d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r12
+; SSE-NEXT: popq %r13
+; SSE-NEXT: popq %r14
+; SSE-NEXT: popq %r15
+; SSE-NEXT: popq %rbp
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_ctlz_i1024:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: movq 48(%rdi), %r9
+; AVX2-NEXT: movq 56(%rdi), %rbp
+; AVX2-NEXT: movq 64(%rdi), %r11
+; AVX2-NEXT: movq 72(%rdi), %r10
+; AVX2-NEXT: movq 80(%rdi), %r14
+; AVX2-NEXT: movq 88(%rdi), %rbx
+; AVX2-NEXT: movq 96(%rdi), %rdx
+; AVX2-NEXT: movq 104(%rdi), %r8
+; AVX2-NEXT: movq 112(%rdi), %rsi
+; AVX2-NEXT: movq 120(%rdi), %r15
+; AVX2-NEXT: lzcntq %r15, %rax
+; AVX2-NEXT: lzcntq %rsi, %rcx
+; AVX2-NEXT: addl $64, %ecx
+; AVX2-NEXT: testq %r15, %r15
+; AVX2-NEXT: cmovnel %eax, %ecx
+; AVX2-NEXT: xorl %r12d, %r12d
+; AVX2-NEXT: lzcntq %r8, %r12
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %rdx, %rax
+; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %r8, %r8
+; AVX2-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: cmovnel %r12d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: movq %rsi, %r12
+; AVX2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: orq %r15, %r12
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: lzcntq %rbx, %rcx
+; AVX2-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: xorl %r13d, %r13d
+; AVX2-NEXT: lzcntq %r14, %r13
+; AVX2-NEXT: addl $64, %r13d
+; AVX2-NEXT: testq %rbx, %rbx
+; AVX2-NEXT: cmovnel %ecx, %r13d
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: lzcntq %r10, %rcx
+; AVX2-NEXT: xorl %r12d, %r12d
+; AVX2-NEXT: lzcntq %r11, %r12
+; AVX2-NEXT: addl $64, %r12d
+; AVX2-NEXT: testq %r10, %r10
+; AVX2-NEXT: cmovnel %ecx, %r12d
+; AVX2-NEXT: subl $-128, %r12d
+; AVX2-NEXT: movq %r14, %rcx
+; AVX2-NEXT: orq %rbx, %rcx
+; AVX2-NEXT: cmovnel %r13d, %r12d
+; AVX2-NEXT: addl $256, %r12d # imm = 0x100
+; AVX2-NEXT: movq %r8, %rcx
+; AVX2-NEXT: orq %r15, %rcx
+; AVX2-NEXT: orq %rsi, %rdx
+; AVX2-NEXT: orq %rcx, %rdx
+; AVX2-NEXT: cmovnel %eax, %r12d
+; AVX2-NEXT: movq %rbp, %r14
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: lzcntq %rbp, %rcx
+; AVX2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %r9, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %rbp, %rbp
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: movq 32(%rdi), %r13
+; AVX2-NEXT: xorl %ebp, %ebp
+; AVX2-NEXT: lzcntq %r13, %rbp
+; AVX2-NEXT: addl $64, %ebp
+; AVX2-NEXT: movq 40(%rdi), %r8
+; AVX2-NEXT: xorl %edx, %edx
+; AVX2-NEXT: lzcntq %r8, %rdx
+; AVX2-NEXT: testq %r8, %r8
+; AVX2-NEXT: cmovnel %edx, %ebp
+; AVX2-NEXT: subl $-128, %ebp
+; AVX2-NEXT: movq %r9, %rdx
+; AVX2-NEXT: orq %r14, %rdx
+; AVX2-NEXT: cmovnel %eax, %ebp
+; AVX2-NEXT: movq 16(%rdi), %r9
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: lzcntq %r9, %rcx
+; AVX2-NEXT: addl $64, %ecx
+; AVX2-NEXT: movq 24(%rdi), %rdx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %rdx, %rax
+; AVX2-NEXT: testq %rdx, %rdx
+; AVX2-NEXT: cmovnel %eax, %ecx
+; AVX2-NEXT: movq 8(%rdi), %rsi
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq (%rdi), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: lzcntq %rsi, %rdi
+; AVX2-NEXT: testq %rsi, %rsi
+; AVX2-NEXT: cmovnel %edi, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %rdx, %r9
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: orq %r14, %r8
+; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
+; AVX2-NEXT: addl $256, %eax # imm = 0x100
+; AVX2-NEXT: orq %r8, %r13
+; AVX2-NEXT: cmovnel %ebp, %eax
+; AVX2-NEXT: orq %r15, %rbx
+; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; AVX2-NEXT: orq %rbx, %r10
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX2-NEXT: orq %rcx, %r11
+; AVX2-NEXT: addl $512, %eax # imm = 0x200
+; AVX2-NEXT: orq %r10, %r11
+; AVX2-NEXT: cmovnel %r12d, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_ctlz_i1024:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: movq 32(%rdi), %r14
+; AVX512-NEXT: movq 48(%rdi), %rbp
+; AVX512-NEXT: movq 64(%rdi), %r11
+; AVX512-NEXT: movq 72(%rdi), %r10
+; AVX512-NEXT: movq 80(%rdi), %rdx
+; AVX512-NEXT: movq 88(%rdi), %rbx
+; AVX512-NEXT: movq 96(%rdi), %rsi
+; AVX512-NEXT: movq 104(%rdi), %r9
+; AVX512-NEXT: movq 112(%rdi), %r8
+; AVX512-NEXT: movq 120(%rdi), %r15
+; AVX512-NEXT: lzcntq %r15, %rax
+; AVX512-NEXT: lzcntq %r8, %rcx
+; AVX512-NEXT: addl $64, %ecx
+; AVX512-NEXT: testq %r15, %r15
+; AVX512-NEXT: cmovnel %eax, %ecx
+; AVX512-NEXT: lzcntq %r9, %r12
+; AVX512-NEXT: lzcntq %rsi, %rax
+; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %r9, %r9
+; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: cmovnel %r12d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: movq %r8, %r12
+; AVX512-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: orq %r15, %r12
+; AVX512-NEXT: cmovnel %ecx, %eax
+; AVX512-NEXT: lzcntq %rbx, %rcx
+; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: lzcntq %rdx, %r13
+; AVX512-NEXT: addl $64, %r13d
+; AVX512-NEXT: testq %rbx, %rbx
+; AVX512-NEXT: cmovnel %ecx, %r13d
+; AVX512-NEXT: lzcntq %r10, %rcx
+; AVX512-NEXT: lzcntq %r11, %r12
+; AVX512-NEXT: addl $64, %r12d
+; AVX512-NEXT: testq %r10, %r10
+; AVX512-NEXT: cmovnel %ecx, %r12d
+; AVX512-NEXT: subl $-128, %r12d
+; AVX512-NEXT: movq %rdx, %rcx
+; AVX512-NEXT: orq %rbx, %rcx
+; AVX512-NEXT: cmovnel %r13d, %r12d
+; AVX512-NEXT: addl $256, %r12d # imm = 0x100
+; AVX512-NEXT: movq %r9, %rcx
+; AVX512-NEXT: orq %r15, %rcx
+; AVX512-NEXT: orq %r8, %rsi
+; AVX512-NEXT: orq %rcx, %rsi
+; AVX512-NEXT: movq 56(%rdi), %r13
+; AVX512-NEXT: cmovnel %eax, %r12d
+; AVX512-NEXT: lzcntq %r13, %rcx
+; AVX512-NEXT: movq %rbp, %rsi
+; AVX512-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: lzcntq %rbp, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %r13, %r13
+; AVX512-NEXT: cmovnel %ecx, %eax
+; AVX512-NEXT: lzcntq %r14, %rbp
+; AVX512-NEXT: addl $64, %ebp
+; AVX512-NEXT: movq 40(%rdi), %r8
+; AVX512-NEXT: lzcntq %r8, %rdx
+; AVX512-NEXT: testq %r8, %r8
+; AVX512-NEXT: cmovnel %edx, %ebp
+; AVX512-NEXT: subl $-128, %ebp
+; AVX512-NEXT: movq %rsi, %rdx
+; AVX512-NEXT: orq %r13, %rdx
+; AVX512-NEXT: cmovnel %eax, %ebp
+; AVX512-NEXT: movq 16(%rdi), %r9
+; AVX512-NEXT: lzcntq %r9, %rcx
+; AVX512-NEXT: addl $64, %ecx
+; AVX512-NEXT: movq 24(%rdi), %rdx
+; AVX512-NEXT: lzcntq %rdx, %rax
+; AVX512-NEXT: testq %rdx, %rdx
+; AVX512-NEXT: cmovnel %eax, %ecx
+; AVX512-NEXT: movq 8(%rdi), %rsi
+; AVX512-NEXT: lzcntq (%rdi), %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: lzcntq %rsi, %rdi
+; AVX512-NEXT: testq %rsi, %rsi
+; AVX512-NEXT: cmovnel %edi, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %rdx, %r9
+; AVX512-NEXT: cmovnel %ecx, %eax
+; AVX512-NEXT: orq %r13, %r8
+; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
+; AVX512-NEXT: addl $256, %eax # imm = 0x100
+; AVX512-NEXT: orq %r8, %r14
+; AVX512-NEXT: cmovnel %ebp, %eax
+; AVX512-NEXT: orq %r15, %rbx
+; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; AVX512-NEXT: orq %rbx, %r10
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX512-NEXT: orq %rcx, %r11
+; AVX512-NEXT: addl $512, %eax # imm = 0x200
+; AVX512-NEXT: orq %r10, %r11
+; AVX512-NEXT: cmovnel %r12d, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: retq
+ %a0 = load i1024, ptr %p0
+ %cnt = call i1024 @llvm.ctlz.i1024(i1024 %a0, i1 0)
+ %res = trunc i1024 %cnt to i32
+ ret i32 %res
+}
+
+;
+; CTTZ
+;
+
+define i32 @test_cttz_i128(i128 %a0) nounwind {
+; SSE-LABEL: test_cttz_i128:
+; SSE: # %bb.0:
+; SSE-NEXT: rep bsfq %rdi, %rcx
+; SSE-NEXT: movl $64, %eax
+; SSE-NEXT: rep bsfq %rsi, %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rdi, %rdi
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: test_cttz_i128:
+; AVX2: # %bb.0:
+; AVX2-NEXT: tzcntq %rdi, %rcx
+; AVX2-NEXT: tzcntq %rsi, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %rdi, %rdi
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_cttz_i128:
+; AVX512: # %bb.0:
+; AVX512-NEXT: tzcntq %rdi, %rcx
+; AVX512-NEXT: tzcntq %rsi, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rdi, %rdi
+; AVX512-NEXT: cmovnel %ecx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %cnt = call i128 @llvm.cttz.i128(i128 %a0, i1 0)
+ %res = trunc i128 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_cttz_i128(ptr %p0) nounwind {
+; SSE-LABEL: load_cttz_i128:
+; SSE: # %bb.0:
+; SSE-NEXT: movq (%rdi), %rcx
+; SSE-NEXT: rep bsfq %rcx, %rdx
+; SSE-NEXT: movl $64, %eax
+; SSE-NEXT: rep bsfq 8(%rdi), %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rcx, %rcx
+; SSE-NEXT: cmovnel %edx, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_cttz_i128:
+; AVX2: # %bb.0:
+; AVX2-NEXT: movq (%rdi), %rcx
+; AVX2-NEXT: tzcntq %rcx, %rdx
+; AVX2-NEXT: tzcntq 8(%rdi), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %rcx, %rcx
+; AVX2-NEXT: cmovnel %edx, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_cttz_i128:
+; AVX512: # %bb.0:
+; AVX512-NEXT: movq (%rdi), %rcx
+; AVX512-NEXT: tzcntq %rcx, %rdx
+; AVX512-NEXT: tzcntq 8(%rdi), %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rcx, %rcx
+; AVX512-NEXT: cmovnel %edx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %a0 = load i128, ptr %p0
+ %cnt = call i128 @llvm.cttz.i128(i128 %a0, i1 0)
+ %res = trunc i128 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @test_cttz_i256(i256 %a0) nounwind {
+; SSE-LABEL: test_cttz_i256:
+; SSE: # %bb.0:
+; SSE-NEXT: rep bsfq %rdi, %rax
+; SSE-NEXT: rep bsfq %rsi, %r8
+; SSE-NEXT: addl $64, %r8d
+; SSE-NEXT: testq %rdi, %rdi
+; SSE-NEXT: cmovnel %eax, %r8d
+; SSE-NEXT: rep bsfq %rdx, %r9
+; SSE-NEXT: movl $64, %eax
+; SSE-NEXT: rep bsfq %rcx, %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rdx, %rdx
+; SSE-NEXT: cmovnel %r9d, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %rsi, %rdi
+; SSE-NEXT: cmovnel %r8d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: test_cttz_i256:
+; AVX2: # %bb.0:
+; AVX2-NEXT: tzcntq %rdi, %rax
+; AVX2-NEXT: tzcntq %rsi, %r8
+; AVX2-NEXT: addl $64, %r8d
+; AVX2-NEXT: testq %rdi, %rdi
+; AVX2-NEXT: cmovnel %eax, %r8d
+; AVX2-NEXT: tzcntq %rdx, %r9
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %rcx, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %rdx, %rdx
+; AVX2-NEXT: cmovnel %r9d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %rsi, %rdi
+; AVX2-NEXT: cmovnel %r8d, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_cttz_i256:
+; AVX512: # %bb.0:
+; AVX512-NEXT: tzcntq %rdi, %rax
+; AVX512-NEXT: tzcntq %rsi, %r8
+; AVX512-NEXT: addl $64, %r8d
+; AVX512-NEXT: testq %rdi, %rdi
+; AVX512-NEXT: cmovnel %eax, %r8d
+; AVX512-NEXT: tzcntq %rdx, %r9
+; AVX512-NEXT: tzcntq %rcx, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rdx, %rdx
+; AVX512-NEXT: cmovnel %r9d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %rsi, %rdi
+; AVX512-NEXT: cmovnel %r8d, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %cnt = call i256 @llvm.cttz.i256(i256 %a0, i1 0)
+ %res = trunc i256 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_cttz_i256(ptr %p0) nounwind {
+; SSE-LABEL: load_cttz_i256:
+; SSE: # %bb.0:
+; SSE-NEXT: movq 16(%rdi), %rcx
+; SSE-NEXT: movq (%rdi), %rdx
+; SSE-NEXT: movq 8(%rdi), %rsi
+; SSE-NEXT: rep bsfq %rdx, %rax
+; SSE-NEXT: rep bsfq %rsi, %r8
+; SSE-NEXT: addl $64, %r8d
+; SSE-NEXT: testq %rdx, %rdx
+; SSE-NEXT: cmovnel %eax, %r8d
+; SSE-NEXT: rep bsfq %rcx, %r9
+; SSE-NEXT: movl $64, %eax
+; SSE-NEXT: rep bsfq 24(%rdi), %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rcx, %rcx
+; SSE-NEXT: cmovnel %r9d, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %rsi, %rdx
+; SSE-NEXT: cmovnel %r8d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_cttz_i256:
+; AVX2: # %bb.0:
+; AVX2-NEXT: movq (%rdi), %rcx
+; AVX2-NEXT: movq 8(%rdi), %rdx
+; AVX2-NEXT: tzcntq %rcx, %rax
+; AVX2-NEXT: tzcntq %rdx, %rsi
+; AVX2-NEXT: addl $64, %esi
+; AVX2-NEXT: testq %rcx, %rcx
+; AVX2-NEXT: cmovnel %eax, %esi
+; AVX2-NEXT: movq 16(%rdi), %r8
+; AVX2-NEXT: tzcntq %r8, %r9
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq 24(%rdi), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %r8, %r8
+; AVX2-NEXT: cmovnel %r9d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %rdx, %rcx
+; AVX2-NEXT: cmovnel %esi, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_cttz_i256:
+; AVX512: # %bb.0:
+; AVX512-NEXT: movq 16(%rdi), %rcx
+; AVX512-NEXT: movq (%rdi), %rdx
+; AVX512-NEXT: movq 8(%rdi), %rsi
+; AVX512-NEXT: tzcntq %rdx, %rax
+; AVX512-NEXT: tzcntq %rsi, %r8
+; AVX512-NEXT: addl $64, %r8d
+; AVX512-NEXT: testq %rdx, %rdx
+; AVX512-NEXT: cmovnel %eax, %r8d
+; AVX512-NEXT: tzcntq %rcx, %r9
+; AVX512-NEXT: tzcntq 24(%rdi), %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rcx, %rcx
+; AVX512-NEXT: cmovnel %r9d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %rsi, %rdx
+; AVX512-NEXT: cmovnel %r8d, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %a0 = load i256, ptr %p0
+ %cnt = call i256 @llvm.cttz.i256(i256 %a0, i1 0)
+ %res = trunc i256 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @test_cttz_i512(i512 %a0) nounwind {
+; SSE-LABEL: test_cttz_i512:
+; SSE: # %bb.0:
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: rep bsfq %rdi, %rax
+; SSE-NEXT: rep bsfq %rsi, %r11
+; SSE-NEXT: addl $64, %r11d
+; SSE-NEXT: testq %rdi, %rdi
+; SSE-NEXT: cmovnel %eax, %r11d
+; SSE-NEXT: rep bsfq %rdx, %rax
+; SSE-NEXT: rep bsfq %rcx, %r10
+; SSE-NEXT: addl $64, %r10d
+; SSE-NEXT: testq %rdx, %rdx
+; SSE-NEXT: cmovnel %eax, %r10d
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rbx
+; SSE-NEXT: subl $-128, %r10d
+; SSE-NEXT: movq %rdi, %rax
+; SSE-NEXT: orq %rsi, %rax
+; SSE-NEXT: cmovnel %r11d, %r10d
+; SSE-NEXT: rep bsfq %r8, %rax
+; SSE-NEXT: rep bsfq %r9, %r11
+; SSE-NEXT: addl $64, %r11d
+; SSE-NEXT: testq %r8, %r8
+; SSE-NEXT: cmovnel %eax, %r11d
+; SSE-NEXT: rep bsfq %rbx, %r14
+; SSE-NEXT: movl $64, %eax
+; SSE-NEXT: rep bsfq {{[0-9]+}}(%rsp), %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rbx, %rbx
+; SSE-NEXT: cmovnel %r14d, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %r9, %r8
+; SSE-NEXT: cmovnel %r11d, %eax
+; SSE-NEXT: addl $256, %eax # imm = 0x100
+; SSE-NEXT: orq %rcx, %rsi
+; SSE-NEXT: orq %rdx, %rdi
+; SSE-NEXT: orq %rsi, %rdi
+; SSE-NEXT: cmovnel %r10d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r14
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: test_cttz_i512:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: tzcntq %rdi, %rax
+; AVX2-NEXT: tzcntq %rsi, %r11
+; AVX2-NEXT: addl $64, %r11d
+; AVX2-NEXT: testq %rdi, %rdi
+; AVX2-NEXT: cmovnel %eax, %r11d
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %rdx, %rax
+; AVX2-NEXT: tzcntq %rcx, %r10
+; AVX2-NEXT: addl $64, %r10d
+; AVX2-NEXT: testq %rdx, %rdx
+; AVX2-NEXT: cmovnel %eax, %r10d
+; AVX2-NEXT: subl $-128, %r10d
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: orq %rsi, %rax
+; AVX2-NEXT: cmovnel %r11d, %r10d
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %r8, %rax
+; AVX2-NEXT: xorl %ebx, %ebx
+; AVX2-NEXT: tzcntq %r9, %rbx
+; AVX2-NEXT: addl $64, %ebx
+; AVX2-NEXT: testq %r8, %r8
+; AVX2-NEXT: cmovnel %eax, %ebx
+; AVX2-NEXT: xorl %r14d, %r14d
+; AVX2-NEXT: tzcntq %r11, %r14
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %r11, %r11
+; AVX2-NEXT: cmovnel %r14d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %r9, %r8
+; AVX2-NEXT: cmovnel %ebx, %eax
+; AVX2-NEXT: addl $256, %eax # imm = 0x100
+; AVX2-NEXT: orq %rcx, %rsi
+; AVX2-NEXT: orq %rdx, %rdi
+; AVX2-NEXT: orq %rsi, %rdi
+; AVX2-NEXT: cmovnel %r10d, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_cttz_i512:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; AVX512-NEXT: tzcntq %rdi, %rax
+; AVX512-NEXT: tzcntq %rsi, %rbx
+; AVX512-NEXT: addl $64, %ebx
+; AVX512-NEXT: testq %rdi, %rdi
+; AVX512-NEXT: cmovnel %eax, %ebx
+; AVX512-NEXT: tzcntq %rdx, %rax
+; AVX512-NEXT: tzcntq %rcx, %r10
+; AVX512-NEXT: addl $64, %r10d
+; AVX512-NEXT: testq %rdx, %rdx
+; AVX512-NEXT: cmovnel %eax, %r10d
+; AVX512-NEXT: subl $-128, %r10d
+; AVX512-NEXT: movq %rdi, %rax
+; AVX512-NEXT: orq %rsi, %rax
+; AVX512-NEXT: cmovnel %ebx, %r10d
+; AVX512-NEXT: tzcntq %r8, %rax
+; AVX512-NEXT: tzcntq %r9, %rbx
+; AVX512-NEXT: addl $64, %ebx
+; AVX512-NEXT: testq %r8, %r8
+; AVX512-NEXT: cmovnel %eax, %ebx
+; AVX512-NEXT: tzcntq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: tzcntq %r11, %r14
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %r11, %r11
+; AVX512-NEXT: cmovnel %r14d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %r9, %r8
+; AVX512-NEXT: cmovnel %ebx, %eax
+; AVX512-NEXT: addl $256, %eax # imm = 0x100
+; AVX512-NEXT: orq %rcx, %rsi
+; AVX512-NEXT: orq %rdx, %rdi
+; AVX512-NEXT: orq %rsi, %rdi
+; AVX512-NEXT: cmovnel %r10d, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: retq
+ %cnt = call i512 @llvm.cttz.i512(i512 %a0, i1 0)
+ %res = trunc i512 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_cttz_i512(ptr %p0) nounwind {
+; SSE-LABEL: load_cttz_i512:
+; SSE: # %bb.0:
+; SSE-NEXT: pushq %r15
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: movq 48(%rdi), %r10
+; SSE-NEXT: movq 40(%rdi), %r9
+; SSE-NEXT: movq 24(%rdi), %r8
+; SSE-NEXT: movq 16(%rdi), %rdx
+; SSE-NEXT: movq (%rdi), %rcx
+; SSE-NEXT: movq 8(%rdi), %rsi
+; SSE-NEXT: rep bsfq %rcx, %rax
+; SSE-NEXT: rep bsfq %rsi, %rbx
+; SSE-NEXT: addl $64, %ebx
+; SSE-NEXT: testq %rcx, %rcx
+; SSE-NEXT: cmovnel %eax, %ebx
+; SSE-NEXT: rep bsfq %rdx, %rax
+; SSE-NEXT: rep bsfq %r8, %r11
+; SSE-NEXT: addl $64, %r11d
+; SSE-NEXT: testq %rdx, %rdx
+; SSE-NEXT: cmovnel %eax, %r11d
+; SSE-NEXT: movq 32(%rdi), %r14
+; SSE-NEXT: subl $-128, %r11d
+; SSE-NEXT: movq %rcx, %rax
+; SSE-NEXT: orq %rsi, %rax
+; SSE-NEXT: cmovnel %ebx, %r11d
+; SSE-NEXT: rep bsfq %r14, %rax
+; SSE-NEXT: rep bsfq %r9, %rbx
+; SSE-NEXT: addl $64, %ebx
+; SSE-NEXT: testq %r14, %r14
+; SSE-NEXT: cmovnel %eax, %ebx
+; SSE-NEXT: rep bsfq %r10, %r15
+; SSE-NEXT: movl $64, %eax
+; SSE-NEXT: rep bsfq 56(%rdi), %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %r10, %r10
+; SSE-NEXT: cmovnel %r15d, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %r9, %r14
+; SSE-NEXT: cmovnel %ebx, %eax
+; SSE-NEXT: addl $256, %eax # imm = 0x100
+; SSE-NEXT: orq %r8, %rsi
+; SSE-NEXT: orq %rdx, %rcx
+; SSE-NEXT: orq %rsi, %rcx
+; SSE-NEXT: cmovnel %r11d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r14
+; SSE-NEXT: popq %r15
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_cttz_i512:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: movq 48(%rdi), %r10
+; AVX2-NEXT: movq 40(%rdi), %r9
+; AVX2-NEXT: movq 24(%rdi), %r8
+; AVX2-NEXT: movq 16(%rdi), %rdx
+; AVX2-NEXT: movq (%rdi), %rcx
+; AVX2-NEXT: movq 8(%rdi), %rsi
+; AVX2-NEXT: tzcntq %rcx, %rax
+; AVX2-NEXT: xorl %ebx, %ebx
+; AVX2-NEXT: tzcntq %rsi, %rbx
+; AVX2-NEXT: addl $64, %ebx
+; AVX2-NEXT: testq %rcx, %rcx
+; AVX2-NEXT: cmovnel %eax, %ebx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %rdx, %rax
+; AVX2-NEXT: tzcntq %r8, %r11
+; AVX2-NEXT: addl $64, %r11d
+; AVX2-NEXT: testq %rdx, %rdx
+; AVX2-NEXT: cmovnel %eax, %r11d
+; AVX2-NEXT: subl $-128, %r11d
+; AVX2-NEXT: movq %rcx, %rax
+; AVX2-NEXT: orq %rsi, %rax
+; AVX2-NEXT: cmovnel %ebx, %r11d
+; AVX2-NEXT: movq 32(%rdi), %rbx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %rbx, %rax
+; AVX2-NEXT: xorl %r14d, %r14d
+; AVX2-NEXT: tzcntq %r9, %r14
+; AVX2-NEXT: addl $64, %r14d
+; AVX2-NEXT: testq %rbx, %rbx
+; AVX2-NEXT: cmovnel %eax, %r14d
+; AVX2-NEXT: xorl %r15d, %r15d
+; AVX2-NEXT: tzcntq %r10, %r15
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq 56(%rdi), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %r10, %r10
+; AVX2-NEXT: cmovnel %r15d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %r9, %rbx
+; AVX2-NEXT: cmovnel %r14d, %eax
+; AVX2-NEXT: addl $256, %eax # imm = 0x100
+; AVX2-NEXT: orq %r8, %rsi
+; AVX2-NEXT: orq %rdx, %rcx
+; AVX2-NEXT: orq %rsi, %rcx
+; AVX2-NEXT: cmovnel %r11d, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_cttz_i512:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: movq 48(%rdi), %r11
+; AVX512-NEXT: movq 40(%rdi), %r9
+; AVX512-NEXT: movq 32(%rdi), %r10
+; AVX512-NEXT: movq 24(%rdi), %r8
+; AVX512-NEXT: movq 16(%rdi), %rdx
+; AVX512-NEXT: movq (%rdi), %rcx
+; AVX512-NEXT: movq 8(%rdi), %rsi
+; AVX512-NEXT: tzcntq %rcx, %rax
+; AVX512-NEXT: tzcntq %rsi, %r14
+; AVX512-NEXT: addl $64, %r14d
+; AVX512-NEXT: testq %rcx, %rcx
+; AVX512-NEXT: cmovnel %eax, %r14d
+; AVX512-NEXT: tzcntq %rdx, %rax
+; AVX512-NEXT: tzcntq %r8, %rbx
+; AVX512-NEXT: addl $64, %ebx
+; AVX512-NEXT: testq %rdx, %rdx
+; AVX512-NEXT: cmovnel %eax, %ebx
+; AVX512-NEXT: subl $-128, %ebx
+; AVX512-NEXT: movq %rcx, %rax
+; AVX512-NEXT: orq %rsi, %rax
+; AVX512-NEXT: cmovnel %r14d, %ebx
+; AVX512-NEXT: tzcntq %r10, %rax
+; AVX512-NEXT: tzcntq %r9, %r14
+; AVX512-NEXT: addl $64, %r14d
+; AVX512-NEXT: testq %r10, %r10
+; AVX512-NEXT: cmovnel %eax, %r14d
+; AVX512-NEXT: tzcntq 56(%rdi), %rax
+; AVX512-NEXT: tzcntq %r11, %rdi
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %r11, %r11
+; AVX512-NEXT: cmovnel %edi, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %r9, %r10
+; AVX512-NEXT: cmovnel %r14d, %eax
+; AVX512-NEXT: addl $256, %eax # imm = 0x100
+; AVX512-NEXT: orq %r8, %rsi
+; AVX512-NEXT: orq %rdx, %rcx
+; AVX512-NEXT: orq %rsi, %rcx
+; AVX512-NEXT: cmovnel %ebx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: retq
+ %a0 = load i512, ptr %p0
+ %cnt = call i512 @llvm.cttz.i512(i512 %a0, i1 0)
+ %res = trunc i512 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @test_cttz_i1024(i1024 %a0) nounwind {
+; SSE-LABEL: test_cttz_i1024:
+; SSE: # %bb.0:
+; SSE-NEXT: pushq %rbp
+; SSE-NEXT: pushq %r15
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %r13
+; SSE-NEXT: pushq %r12
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: movq %r9, %r13
+; SSE-NEXT: movq %r8, %r14
+; SSE-NEXT: movq %rcx, %rbx
+; SSE-NEXT: movq %rdx, %r10
+; SSE-NEXT: movq %rsi, %r9
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; SSE-NEXT: rep bsfq %rdi, %rax
+; SSE-NEXT: rep bsfq %r9, %r15
+; SSE-NEXT: addl $64, %r15d
+; SSE-NEXT: testq %rdi, %rdi
+; SSE-NEXT: cmovnel %eax, %r15d
+; SSE-NEXT: rep bsfq %r10, %r12
+; SSE-NEXT: rep bsfq %rcx, %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %r10, %r10
+; SSE-NEXT: cmovnel %r12d, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: movq %rdi, %r12
+; SSE-NEXT: orq %r9, %r12
+; SSE-NEXT: cmovnel %r15d, %eax
+; SSE-NEXT: rep bsfq %r8, %r15
+; SSE-NEXT: movq %r13, %rcx
+; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: rep bsfq %r13, %r13
+; SSE-NEXT: addl $64, %r13d
+; SSE-NEXT: testq %r8, %r8
+; SSE-NEXT: cmovnel %r15d, %r13d
+; SSE-NEXT: rep bsfq %rdx, %r12
+; SSE-NEXT: rep bsfq {{[0-9]+}}(%rsp), %r15
+; SSE-NEXT: addl $64, %r15d
+; SSE-NEXT: testq %rdx, %rdx
+; SSE-NEXT: cmovnel %r12d, %r15d
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r12
+; SSE-NEXT: subl $-128, %r15d
+; SSE-NEXT: movq %r8, %rbp
+; SSE-NEXT: orq %rcx, %rbp
+; SSE-NEXT: cmovnel %r13d, %r15d
+; SSE-NEXT: addl $256, %r15d # imm = 0x100
+; SSE-NEXT: movq %r9, %r13
+; SSE-NEXT: orq %rbx, %r13
+; SSE-NEXT: movq %rdi, %rbp
+; SSE-NEXT: orq %r10, %rbp
+; SSE-NEXT: orq %r13, %rbp
+; SSE-NEXT: cmovnel %eax, %r15d
+; SSE-NEXT: rep bsfq %r11, %r13
+; SSE-NEXT: rep bsfq %r12, %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %r11, %r11
+; SSE-NEXT: cmovnel %r13d, %eax
+; SSE-NEXT: rep bsfq {{[0-9]+}}(%rsp), %r13
+; SSE-NEXT: addl $64, %r13d
+; SSE-NEXT: rep bsfq %rsi, %rcx
+; SSE-NEXT: testq %rsi, %rsi
+; SSE-NEXT: cmovnel %ecx, %r13d
+; SSE-NEXT: subl $-128, %r13d
+; SSE-NEXT: movq %r11, %rcx
+; SSE-NEXT: orq %r12, %rcx
+; SSE-NEXT: cmovnel %eax, %r13d
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rbp
+; SSE-NEXT: rep bsfq %rbp, %rcx
+; SSE-NEXT: addl $64, %ecx
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; SSE-NEXT: rep bsfq %rdx, %rax
+; SSE-NEXT: testq %rdx, %rdx
+; SSE-NEXT: cmovnel %eax, %ecx
+; SSE-NEXT: movl $64, %eax
+; SSE-NEXT: rep bsfq {{[0-9]+}}(%rsp), %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; SSE-NEXT: rep bsfq %r8, %rsi
+; SSE-NEXT: testq %r8, %r8
+; SSE-NEXT: cmovnel %esi, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %rbp, %rdx
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r12
+; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r11
+; SSE-NEXT: addl $256, %eax # imm = 0x100
+; SSE-NEXT: orq %r12, %r11
+; SSE-NEXT: cmovnel %r13d, %eax
+; SSE-NEXT: orq {{[0-9]+}}(%rsp), %rbx
+; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
+; SSE-NEXT: orq %rbx, %r9
+; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r10
+; SSE-NEXT: orq %r14, %rdi
+; SSE-NEXT: orq %r10, %rdi
+; SSE-NEXT: addl $512, %eax # imm = 0x200
+; SSE-NEXT: orq %r9, %rdi
+; SSE-NEXT: cmovnel %r15d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r12
+; SSE-NEXT: popq %r13
+; SSE-NEXT: popq %r14
+; SSE-NEXT: popq %r15
+; SSE-NEXT: popq %rbp
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: test_cttz_i1024:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: movq %r9, %rbx
+; AVX2-NEXT: movq %r8, %r14
+; AVX2-NEXT: movq %rcx, %r11
+; AVX2-NEXT: movq %rdx, %r10
+; AVX2-NEXT: movq %rsi, %r9
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX2-NEXT: tzcntq %rdi, %rax
+; AVX2-NEXT: xorl %r15d, %r15d
+; AVX2-NEXT: tzcntq %r9, %r15
+; AVX2-NEXT: addl $64, %r15d
+; AVX2-NEXT: testq %rdi, %rdi
+; AVX2-NEXT: cmovnel %eax, %r15d
+; AVX2-NEXT: xorl %r12d, %r12d
+; AVX2-NEXT: tzcntq %r10, %r12
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %r11, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %r10, %r10
+; AVX2-NEXT: cmovnel %r12d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: movq %rdi, %r12
+; AVX2-NEXT: orq %r9, %r12
+; AVX2-NEXT: cmovnel %r15d, %eax
+; AVX2-NEXT: xorl %r15d, %r15d
+; AVX2-NEXT: tzcntq %r14, %r15
+; AVX2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: xorl %r12d, %r12d
+; AVX2-NEXT: tzcntq %rbx, %r12
+; AVX2-NEXT: addl $64, %r12d
+; AVX2-NEXT: testq %r14, %r14
+; AVX2-NEXT: cmovnel %r15d, %r12d
+; AVX2-NEXT: xorl %r13d, %r13d
+; AVX2-NEXT: tzcntq %rcx, %r13
+; AVX2-NEXT: xorl %r15d, %r15d
+; AVX2-NEXT: tzcntq %rdx, %r15
+; AVX2-NEXT: addl $64, %r15d
+; AVX2-NEXT: testq %rcx, %rcx
+; AVX2-NEXT: cmovnel %r13d, %r15d
+; AVX2-NEXT: subl $-128, %r15d
+; AVX2-NEXT: movq %r14, %r13
+; AVX2-NEXT: orq %rbx, %r13
+; AVX2-NEXT: cmovnel %r12d, %r15d
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r12
+; AVX2-NEXT: addl $256, %r15d # imm = 0x100
+; AVX2-NEXT: movq %r9, %r13
+; AVX2-NEXT: orq %r11, %r13
+; AVX2-NEXT: movq %rdi, %rbp
+; AVX2-NEXT: orq %r10, %rbp
+; AVX2-NEXT: orq %r13, %rbp
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r13
+; AVX2-NEXT: cmovnel %eax, %r15d
+; AVX2-NEXT: xorl %ebp, %ebp
+; AVX2-NEXT: tzcntq %r12, %rbp
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %r13, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %r12, %r12
+; AVX2-NEXT: cmovnel %ebp, %eax
+; AVX2-NEXT: xorl %ebp, %ebp
+; AVX2-NEXT: tzcntq %r8, %rbp
+; AVX2-NEXT: addl $64, %ebp
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: tzcntq %rsi, %rcx
+; AVX2-NEXT: testq %rsi, %rsi
+; AVX2-NEXT: cmovnel %ecx, %ebp
+; AVX2-NEXT: subl $-128, %ebp
+; AVX2-NEXT: movq %r12, %rcx
+; AVX2-NEXT: orq %r13, %rcx
+; AVX2-NEXT: cmovnel %eax, %ebp
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rbx
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: tzcntq %rbx, %rcx
+; AVX2-NEXT: addl $64, %ecx
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %rdx, %rax
+; AVX2-NEXT: testq %rdx, %rdx
+; AVX2-NEXT: cmovnel %eax, %ecx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; AVX2-NEXT: tzcntq %r8, %rsi
+; AVX2-NEXT: testq %r8, %r8
+; AVX2-NEXT: cmovnel %esi, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %rbx, %rdx
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r13
+; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r12
+; AVX2-NEXT: addl $256, %eax # imm = 0x100
+; AVX2-NEXT: orq %r13, %r12
+; AVX2-NEXT: cmovnel %ebp, %eax
+; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r11
+; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
+; AVX2-NEXT: orq %r11, %r9
+; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r10
+; AVX2-NEXT: orq %r14, %rdi
+; AVX2-NEXT: orq %r10, %rdi
+; AVX2-NEXT: addl $512, %eax # imm = 0x200
+; AVX2-NEXT: orq %r9, %rdi
+; AVX2-NEXT: cmovnel %r15d, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_cttz_i1024:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: movq %r9, %r14
+; AVX512-NEXT: movq %r8, %r15
+; AVX512-NEXT: movq %rcx, %r11
+; AVX512-NEXT: movq %rdx, %r10
+; AVX512-NEXT: movq %rsi, %r9
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rbx
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; AVX512-NEXT: tzcntq %rdi, %rax
+; AVX512-NEXT: tzcntq %r9, %r12
+; AVX512-NEXT: addl $64, %r12d
+; AVX512-NEXT: testq %rdi, %rdi
+; AVX512-NEXT: cmovnel %eax, %r12d
+; AVX512-NEXT: tzcntq %rdx, %r13
+; AVX512-NEXT: tzcntq %r11, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rdx, %rdx
+; AVX512-NEXT: cmovnel %r13d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: movq %rdi, %r13
+; AVX512-NEXT: orq %r9, %r13
+; AVX512-NEXT: cmovnel %r12d, %eax
+; AVX512-NEXT: tzcntq %r8, %r12
+; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: tzcntq %r14, %r13
+; AVX512-NEXT: addl $64, %r13d
+; AVX512-NEXT: testq %r8, %r8
+; AVX512-NEXT: cmovnel %r12d, %r13d
+; AVX512-NEXT: tzcntq %rcx, %rbp
+; AVX512-NEXT: tzcntq {{[0-9]+}}(%rsp), %r12
+; AVX512-NEXT: addl $64, %r12d
+; AVX512-NEXT: testq %rcx, %rcx
+; AVX512-NEXT: cmovnel %ebp, %r12d
+; AVX512-NEXT: subl $-128, %r12d
+; AVX512-NEXT: movq %r8, %rbp
+; AVX512-NEXT: orq %r14, %rbp
+; AVX512-NEXT: cmovnel %r13d, %r12d
+; AVX512-NEXT: addl $256, %r12d # imm = 0x100
+; AVX512-NEXT: movq %r9, %r13
+; AVX512-NEXT: orq %r11, %r13
+; AVX512-NEXT: movq %rdi, %rbp
+; AVX512-NEXT: orq %rdx, %rbp
+; AVX512-NEXT: orq %r13, %rbp
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r13
+; AVX512-NEXT: cmovnel %eax, %r12d
+; AVX512-NEXT: tzcntq %rbx, %rbp
+; AVX512-NEXT: tzcntq %r13, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rbx, %rbx
+; AVX512-NEXT: cmovnel %ebp, %eax
+; AVX512-NEXT: tzcntq {{[0-9]+}}(%rsp), %rbp
+; AVX512-NEXT: addl $64, %ebp
+; AVX512-NEXT: tzcntq %rsi, %rcx
+; AVX512-NEXT: testq %rsi, %rsi
+; AVX512-NEXT: cmovnel %ecx, %ebp
+; AVX512-NEXT: subl $-128, %ebp
+; AVX512-NEXT: movq %rbx, %rcx
+; AVX512-NEXT: orq %r13, %rcx
+; AVX512-NEXT: cmovnel %eax, %ebp
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r14
+; AVX512-NEXT: tzcntq %r14, %rcx
+; AVX512-NEXT: addl $64, %ecx
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX512-NEXT: tzcntq %rdx, %rax
+; AVX512-NEXT: testq %rdx, %rdx
+; AVX512-NEXT: cmovnel %eax, %ecx
+; AVX512-NEXT: tzcntq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; AVX512-NEXT: tzcntq %r8, %rsi
+; AVX512-NEXT: testq %r8, %r8
+; AVX512-NEXT: cmovnel %esi, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %r14, %rdx
+; AVX512-NEXT: cmovnel %ecx, %eax
+; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r13
+; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %rbx
+; AVX512-NEXT: addl $256, %eax # imm = 0x100
+; AVX512-NEXT: orq %r13, %rbx
+; AVX512-NEXT: cmovnel %ebp, %eax
+; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r11
+; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
+; AVX512-NEXT: orq %r11, %r9
+; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT: orq %r15, %rdi
+; AVX512-NEXT: orq %r10, %rdi
+; AVX512-NEXT: addl $512, %eax # imm = 0x200
+; AVX512-NEXT: orq %r9, %rdi
+; AVX512-NEXT: cmovnel %r12d, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: retq
+ %cnt = call i1024 @llvm.cttz.i1024(i1024 %a0, i1 0)
+ %res = trunc i1024 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_cttz_i1024(ptr %p0) nounwind {
+; SSE-LABEL: load_cttz_i1024:
+; SSE: # %bb.0:
+; SSE-NEXT: pushq %rbp
+; SSE-NEXT: pushq %r15
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %r13
+; SSE-NEXT: pushq %r12
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: movq 88(%rdi), %r10
+; SSE-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: movq 56(%rdi), %rcx
+; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: movq 40(%rdi), %rsi
+; SSE-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: movq 24(%rdi), %r9
+; SSE-NEXT: movq 16(%rdi), %r15
+; SSE-NEXT: movq (%rdi), %r8
+; SSE-NEXT: movq 8(%rdi), %r11
+; SSE-NEXT: rep bsfq %r8, %rax
+; SSE-NEXT: rep bsfq %r11, %rdx
+; SSE-NEXT: addl $64, %edx
+; SSE-NEXT: testq %r8, %r8
+; SSE-NEXT: cmovnel %eax, %edx
+; SSE-NEXT: rep bsfq %r15, %rbx
+; SSE-NEXT: rep bsfq %r9, %rax
+; SSE-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %r15, %r15
+; SSE-NEXT: cmovnel %ebx, %eax
+; SSE-NEXT: movq 32(%rdi), %rbx
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: movq %r8, %r14
+; SSE-NEXT: orq %r11, %r14
+; SSE-NEXT: cmovnel %edx, %eax
+; SSE-NEXT: rep bsfq %rbx, %rdx
+; SSE-NEXT: rep bsfq %rsi, %r12
+; SSE-NEXT: addl $64, %r12d
+; SSE-NEXT: testq %rbx, %rbx
+; SSE-NEXT: cmovnel %edx, %r12d
+; SSE-NEXT: movq 48(%rdi), %r13
+; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: rep bsfq %r13, %rdx
+; SSE-NEXT: rep bsfq %rcx, %r14
+; SSE-NEXT: addl $64, %r14d
+; SSE-NEXT: testq %r13, %r13
+; SSE-NEXT: cmovnel %edx, %r14d
+; SSE-NEXT: subl $-128, %r14d
+; SSE-NEXT: movq %rbx, %rdx
+; SSE-NEXT: orq %rsi, %rdx
+; SSE-NEXT: cmovnel %r12d, %r14d
+; SSE-NEXT: movq 72(%rdi), %r12
+; SSE-NEXT: addl $256, %r14d # imm = 0x100
+; SSE-NEXT: movq %r11, %rdx
+; SSE-NEXT: orq %r9, %rdx
+; SSE-NEXT: movq %r8, %r13
+; SSE-NEXT: orq %r15, %r13
+; SSE-NEXT: orq %rdx, %r13
+; SSE-NEXT: movq 64(%rdi), %r13
+; SSE-NEXT: cmovnel %eax, %r14d
+; SSE-NEXT: rep bsfq %r13, %rdx
+; SSE-NEXT: rep bsfq %r12, %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %r13, %r13
+; SSE-NEXT: cmovnel %edx, %eax
+; SSE-NEXT: rep bsfq %r10, %rbp
+; SSE-NEXT: addl $64, %ebp
+; SSE-NEXT: movq 80(%rdi), %r10
+; SSE-NEXT: rep bsfq %r10, %rcx
+; SSE-NEXT: testq %r10, %r10
+; SSE-NEXT: cmovnel %ecx, %ebp
+; SSE-NEXT: subl $-128, %ebp
+; SSE-NEXT: movq %r13, %rcx
+; SSE-NEXT: orq %r12, %rcx
+; SSE-NEXT: cmovnel %eax, %ebp
+; SSE-NEXT: movq 104(%rdi), %r9
+; SSE-NEXT: rep bsfq %r9, %rcx
+; SSE-NEXT: addl $64, %ecx
+; SSE-NEXT: movq 96(%rdi), %rdx
+; SSE-NEXT: rep bsfq %rdx, %rax
+; SSE-NEXT: testq %rdx, %rdx
+; SSE-NEXT: cmovnel %eax, %ecx
+; SSE-NEXT: movl $64, %eax
+; SSE-NEXT: rep bsfq 120(%rdi), %rax
+; SSE-NEXT: movq 112(%rdi), %rdi
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: rep bsfq %rdi, %rsi
+; SSE-NEXT: testq %rdi, %rdi
+; SSE-NEXT: cmovnel %esi, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %r9, %rdx
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; SSE-NEXT: orq %r10, %r13
+; SSE-NEXT: addl $256, %eax # imm = 0x100
+; SSE-NEXT: orq %r12, %r13
+; SSE-NEXT: cmovnel %ebp, %eax
+; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; SSE-NEXT: orq %rcx, %r11
+; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
+; SSE-NEXT: orq %rbx, %r8
+; SSE-NEXT: orq %r15, %r8
+; SSE-NEXT: addl $512, %eax # imm = 0x200
+; SSE-NEXT: orq %r11, %r8
+; SSE-NEXT: cmovnel %r14d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r12
+; SSE-NEXT: popq %r13
+; SSE-NEXT: popq %r14
+; SSE-NEXT: popq %r15
+; SSE-NEXT: popq %rbp
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_cttz_i1024:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: movq 72(%rdi), %r14
+; AVX2-NEXT: movq 64(%rdi), %r15
+; AVX2-NEXT: movq 56(%rdi), %r9
+; AVX2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq 48(%rdi), %rcx
+; AVX2-NEXT: movq 40(%rdi), %r10
+; AVX2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq 32(%rdi), %rsi
+; AVX2-NEXT: movq 24(%rdi), %rbp
+; AVX2-NEXT: movq 16(%rdi), %rbx
+; AVX2-NEXT: movq (%rdi), %r8
+; AVX2-NEXT: movq 8(%rdi), %r11
+; AVX2-NEXT: tzcntq %r8, %rax
+; AVX2-NEXT: tzcntq %r11, %rdx
+; AVX2-NEXT: addl $64, %edx
+; AVX2-NEXT: testq %r8, %r8
+; AVX2-NEXT: cmovnel %eax, %edx
+; AVX2-NEXT: xorl %r12d, %r12d
+; AVX2-NEXT: tzcntq %rbx, %r12
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %rbp, %rax
+; AVX2-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %rbx, %rbx
+; AVX2-NEXT: cmovnel %r12d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: movq %r8, %r12
+; AVX2-NEXT: orq %r11, %r12
+; AVX2-NEXT: cmovnel %edx, %eax
+; AVX2-NEXT: xorl %edx, %edx
+; AVX2-NEXT: tzcntq %rsi, %rdx
+; AVX2-NEXT: xorl %r13d, %r13d
+; AVX2-NEXT: tzcntq %r10, %r13
+; AVX2-NEXT: addl $64, %r13d
+; AVX2-NEXT: testq %rsi, %rsi
+; AVX2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: cmovnel %edx, %r13d
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: xorl %edx, %edx
+; AVX2-NEXT: tzcntq %rcx, %rdx
+; AVX2-NEXT: xorl %r12d, %r12d
+; AVX2-NEXT: tzcntq %r9, %r12
+; AVX2-NEXT: addl $64, %r12d
+; AVX2-NEXT: testq %rcx, %rcx
+; AVX2-NEXT: cmovnel %edx, %r12d
+; AVX2-NEXT: subl $-128, %r12d
+; AVX2-NEXT: movq %rsi, %rdx
+; AVX2-NEXT: orq %r10, %rdx
+; AVX2-NEXT: cmovnel %r13d, %r12d
+; AVX2-NEXT: addl $256, %r12d # imm = 0x100
+; AVX2-NEXT: movq %r11, %rdx
+; AVX2-NEXT: orq %rbp, %rdx
+; AVX2-NEXT: movq %r8, %r13
+; AVX2-NEXT: orq %rbx, %r13
+; AVX2-NEXT: orq %rdx, %r13
+; AVX2-NEXT: cmovnel %eax, %r12d
+; AVX2-NEXT: xorl %edx, %edx
+; AVX2-NEXT: tzcntq %r15, %rdx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %r14, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %r15, %r15
+; AVX2-NEXT: cmovnel %edx, %eax
+; AVX2-NEXT: movq 88(%rdi), %rbp
+; AVX2-NEXT: xorl %r13d, %r13d
+; AVX2-NEXT: tzcntq %rbp, %r13
+; AVX2-NEXT: addl $64, %r13d
+; AVX2-NEXT: movq 80(%rdi), %r10
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: tzcntq %r10, %rcx
+; AVX2-NEXT: testq %r10, %r10
+; AVX2-NEXT: cmovnel %ecx, %r13d
+; AVX2-NEXT: subl $-128, %r13d
+; AVX2-NEXT: movq %r15, %rcx
+; AVX2-NEXT: orq %r14, %rcx
+; AVX2-NEXT: cmovnel %eax, %r13d
+; AVX2-NEXT: movq 104(%rdi), %r9
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: tzcntq %r9, %rcx
+; AVX2-NEXT: addl $64, %ecx
+; AVX2-NEXT: movq 96(%rdi), %rdx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %rdx, %rax
+; AVX2-NEXT: testq %rdx, %rdx
+; AVX2-NEXT: cmovnel %eax, %ecx
+; AVX2-NEXT: movq 112(%rdi), %rsi
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq 120(%rdi), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: tzcntq %rsi, %rdi
+; AVX2-NEXT: testq %rsi, %rsi
+; AVX2-NEXT: cmovnel %edi, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %r9, %rdx
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: orq %rbp, %r14
+; AVX2-NEXT: orq %r10, %r15
+; AVX2-NEXT: addl $256, %eax # imm = 0x100
+; AVX2-NEXT: orq %r14, %r15
+; AVX2-NEXT: cmovnel %r13d, %eax
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX2-NEXT: orq %rcx, %r11
+; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; AVX2-NEXT: orq %rbx, %r8
+; AVX2-NEXT: addl $512, %eax # imm = 0x200
+; AVX2-NEXT: orq %r11, %r8
+; AVX2-NEXT: cmovnel %r12d, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_cttz_i1024:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: movq 88(%rdi), %rbp
+; AVX512-NEXT: movq 72(%rdi), %r15
+; AVX512-NEXT: movq 56(%rdi), %r9
+; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq 48(%rdi), %rcx
+; AVX512-NEXT: movq 40(%rdi), %r10
+; AVX512-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq 32(%rdi), %rsi
+; AVX512-NEXT: movq 24(%rdi), %r14
+; AVX512-NEXT: movq 16(%rdi), %rbx
+; AVX512-NEXT: movq (%rdi), %r8
+; AVX512-NEXT: movq 8(%rdi), %r11
+; AVX512-NEXT: tzcntq %r8, %rax
+; AVX512-NEXT: tzcntq %r11, %rdx
+; AVX512-NEXT: addl $64, %edx
+; AVX512-NEXT: testq %r8, %r8
+; AVX512-NEXT: cmovnel %eax, %edx
+; AVX512-NEXT: tzcntq %rbx, %r12
+; AVX512-NEXT: tzcntq %r14, %rax
+; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rbx, %rbx
+; AVX512-NEXT: cmovnel %r12d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: movq %r8, %r12
+; AVX512-NEXT: orq %r11, %r12
+; AVX512-NEXT: cmovnel %edx, %eax
+; AVX512-NEXT: tzcntq %rsi, %rdx
+; AVX512-NEXT: tzcntq %r10, %r13
+; AVX512-NEXT: addl $64, %r13d
+; AVX512-NEXT: testq %rsi, %rsi
+; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: cmovnel %edx, %r13d
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: tzcntq %rcx, %rdx
+; AVX512-NEXT: tzcntq %r9, %r12
+; AVX512-NEXT: addl $64, %r12d
+; AVX512-NEXT: testq %rcx, %rcx
+; AVX512-NEXT: cmovnel %edx, %r12d
+; AVX512-NEXT: subl $-128, %r12d
+; AVX512-NEXT: movq %rsi, %rdx
+; AVX512-NEXT: orq %r10, %rdx
+; AVX512-NEXT: cmovnel %r13d, %r12d
+; AVX512-NEXT: addl $256, %r12d # imm = 0x100
+; AVX512-NEXT: movq %r11, %rdx
+; AVX512-NEXT: orq %r14, %rdx
+; AVX512-NEXT: movq %r8, %r13
+; AVX512-NEXT: orq %rbx, %r13
+; AVX512-NEXT: orq %rdx, %r13
+; AVX512-NEXT: movq 64(%rdi), %r13
+; AVX512-NEXT: cmovnel %eax, %r12d
+; AVX512-NEXT: tzcntq %r13, %rdx
+; AVX512-NEXT: tzcntq %r15, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %r13, %r13
+; AVX512-NEXT: cmovnel %edx, %eax
+; AVX512-NEXT: movq %rbp, %r14
+; AVX512-NEXT: tzcntq %rbp, %rbp
+; AVX512-NEXT: addl $64, %ebp
+; AVX512-NEXT: movq 80(%rdi), %r10
+; AVX512-NEXT: tzcntq %r10, %rcx
+; AVX512-NEXT: testq %r10, %r10
+; AVX512-NEXT: cmovnel %ecx, %ebp
+; AVX512-NEXT: subl $-128, %ebp
+; AVX512-NEXT: movq %r13, %rcx
+; AVX512-NEXT: orq %r15, %rcx
+; AVX512-NEXT: cmovnel %eax, %ebp
+; AVX512-NEXT: movq 104(%rdi), %r9
+; AVX512-NEXT: tzcntq %r9, %rcx
+; AVX512-NEXT: addl $64, %ecx
+; AVX512-NEXT: movq 96(%rdi), %rdx
+; AVX512-NEXT: tzcntq %rdx, %rax
+; AVX512-NEXT: testq %rdx, %rdx
+; AVX512-NEXT: cmovnel %eax, %ecx
+; AVX512-NEXT: movq 112(%rdi), %rsi
+; AVX512-NEXT: tzcntq 120(%rdi), %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: tzcntq %rsi, %rdi
+; AVX512-NEXT: testq %rsi, %rsi
+; AVX512-NEXT: cmovnel %edi, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %r9, %rdx
+; AVX512-NEXT: cmovnel %ecx, %eax
+; AVX512-NEXT: orq %r14, %r15
+; AVX512-NEXT: orq %r10, %r13
+; AVX512-NEXT: addl $256, %eax # imm = 0x100
+; AVX512-NEXT: orq %r15, %r13
+; AVX512-NEXT: cmovnel %ebp, %eax
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX512-NEXT: orq %rcx, %r11
+; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; AVX512-NEXT: orq %rbx, %r8
+; AVX512-NEXT: addl $512, %eax # imm = 0x200
+; AVX512-NEXT: orq %r11, %r8
+; AVX512-NEXT: cmovnel %r12d, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: retq
+ %a0 = load i1024, ptr %p0
+ %cnt = call i1024 @llvm.cttz.i1024(i1024 %a0, i1 0)
+ %res = trunc i1024 %cnt to i32
+ ret i32 %res
+}
diff --git a/llvm/test/CodeGen/X86/call-graph-section-addrtaken.ll b/llvm/test/CodeGen/X86/call-graph-section-addrtaken.ll
index 632d90d..f36baba 100644
--- a/llvm/test/CodeGen/X86/call-graph-section-addrtaken.ll
+++ b/llvm/test/CodeGen/X86/call-graph-section-addrtaken.ll
@@ -27,7 +27,7 @@ entry:
!1 = !{i64 0, !"_ZTSFivE.generalized"}
!2 = !{i64 0, !"_ZTSFviE.generalized"}
-; CHECK: .section .llvm.callgraph,"o",@progbits,.text
+; CHECK: .section .llvm.callgraph,"o",@llvm_call_graph,.text
;; Version
; CHECK-NEXT: .byte 0
;; Flags -- Potential indirect target so LSB is set to 1. Other bits are 0.
diff --git a/llvm/test/CodeGen/X86/call-graph-section-assembly.ll b/llvm/test/CodeGen/X86/call-graph-section-assembly.ll
index ed6849a..cdbad66 100644
--- a/llvm/test/CodeGen/X86/call-graph-section-assembly.ll
+++ b/llvm/test/CodeGen/X86/call-graph-section-assembly.ll
@@ -36,7 +36,7 @@ entry:
!4 = !{!5}
!5 = !{i64 0, !"_ZTSFPvS_E.generalized"}
-; CHECK: .section .llvm.callgraph,"o",@progbits,.text
+; CHECK: .section .llvm.callgraph,"o",@llvm_call_graph,.text
;; Version
; CHECK-NEXT: .byte 0
;; Flags
diff --git a/llvm/test/CodeGen/X86/dag-fmf-cse.ll b/llvm/test/CodeGen/X86/dag-fmf-cse.ll
index 609ccdc..cdcc082 100644
--- a/llvm/test/CodeGen/X86/dag-fmf-cse.ll
+++ b/llvm/test/CodeGen/X86/dag-fmf-cse.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=fma -enable-unsafe-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=fma | FileCheck %s
; If fast-math-flags are propagated correctly, the mul1 expression
; should be recognized as a factor in the last fsub, so we should
diff --git a/llvm/test/CodeGen/X86/fabs.ll b/llvm/test/CodeGen/X86/fabs.ll
index 82c82ac..4e6da83 100644
--- a/llvm/test/CodeGen/X86/fabs.ll
+++ b/llvm/test/CodeGen/X86/fabs.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse,-sse2,-sse3 | FileCheck %s --check-prefix=X87
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse,-sse2,-sse3 -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s --check-prefix=X87UNSAFE
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse,-sse2,-sse3 -enable-no-nans-fp-math | FileCheck %s --check-prefix=X87UNSAFE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
declare float @fabsf(float)
diff --git a/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll b/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll
index 0fe107c..aae6cda 100644
--- a/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll
+++ b/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll
@@ -22,25 +22,24 @@ declare <4 x bfloat> @llvm.maximumnum.v4bf16(<4 x bfloat>, <4 x bfloat>)
define float @test_fmaximumnum(float %x, float %y) nounwind {
; SSE2-LABEL: test_fmaximumnum:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: js .LBB0_2
-; SSE2-NEXT: # %bb.1:
+; SSE2-NEXT: js .LBB0_1
+; SSE2-NEXT: # %bb.2:
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: jmp .LBB0_3
+; SSE2-NEXT: .LBB0_1:
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: .LBB0_3:
; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: .LBB0_2:
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: cmpordss %xmm3, %xmm0
-; SSE2-NEXT: movaps %xmm0, %xmm4
-; SSE2-NEXT: andps %xmm3, %xmm4
-; SSE2-NEXT: js .LBB0_4
-; SSE2-NEXT: # %bb.3:
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: .LBB0_4:
-; SSE2-NEXT: maxss %xmm1, %xmm3
-; SSE2-NEXT: andnps %xmm3, %xmm0
-; SSE2-NEXT: orps %xmm4, %xmm0
+; SSE2-NEXT: maxss %xmm2, %xmm3
+; SSE2-NEXT: movaps %xmm3, %xmm0
+; SSE2-NEXT: cmpunordss %xmm3, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm2
+; SSE2-NEXT: andnps %xmm3, %xmm2
+; SSE2-NEXT: andps %xmm1, %xmm0
+; SSE2-NEXT: orps %xmm2, %xmm0
; SSE2-NEXT: retq
;
; AVX1-LABEL: test_fmaximumnum:
@@ -56,7 +55,7 @@ define float @test_fmaximumnum(float %x, float %y) nounwind {
; AVX1-NEXT: vmovdqa %xmm0, %xmm1
; AVX1-NEXT: .LBB0_3:
; AVX1-NEXT: vmaxss %xmm2, %xmm1, %xmm0
-; AVX1-NEXT: vcmpordss %xmm1, %xmm1, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
@@ -70,7 +69,7 @@ define float @test_fmaximumnum(float %x, float %y) nounwind {
; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0
-; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; AVX512-NEXT: retq
;
@@ -95,7 +94,7 @@ define float @test_fmaximumnum(float %x, float %y) nounwind {
; X86-NEXT: vmovdqa %xmm2, %xmm0
; X86-NEXT: .LBB0_3:
; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
@@ -371,26 +370,25 @@ define float @test_fmaximumnum_nsz(float %x, float %y) "no-signed-zeros-fp-math"
; SSE2-LABEL: test_fmaximumnum_nsz:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm0, %xmm2
-; SSE2-NEXT: cmpordss %xmm0, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm3
-; SSE2-NEXT: andps %xmm0, %xmm3
-; SSE2-NEXT: maxss %xmm1, %xmm0
-; SSE2-NEXT: andnps %xmm0, %xmm2
-; SSE2-NEXT: orps %xmm3, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: maxss %xmm1, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm1
+; SSE2-NEXT: cmpunordss %xmm2, %xmm1
+; SSE2-NEXT: andps %xmm1, %xmm0
+; SSE2-NEXT: andnps %xmm2, %xmm1
+; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX1-LABEL: test_fmaximumnum_nsz:
; AVX1: # %bb.0:
; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
; AVX512-LABEL: test_fmaximumnum_nsz:
; AVX512: # %bb.0:
; AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; AVX512-NEXT: vcmpordss %xmm0, %xmm0, %k1
+; AVX512-NEXT: vcmpunordss %xmm1, %xmm1, %k1
; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vmovaps %xmm1, %xmm0
; AVX512-NEXT: retq
@@ -404,9 +402,9 @@ define float @test_fmaximumnum_nsz(float %x, float %y) "no-signed-zeros-fp-math"
; X86: # %bb.0:
; X86-NEXT: pushl %eax
; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm1
-; X86-NEXT: vmaxss {{[0-9]+}}(%esp), %xmm0, %xmm2
-; X86-NEXT: vblendvps %xmm1, %xmm0, %xmm2, %xmm0
+; X86-NEXT: vmaxss {{[0-9]+}}(%esp), %xmm0, %xmm1
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
; X86-NEXT: popl %eax
@@ -421,23 +419,22 @@ define float @test_fmaximumnum_combine_cmps(float %x, float %y) nounwind {
; SSE2-NEXT: divss %xmm0, %xmm1
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: movaps %xmm0, %xmm3
-; SSE2-NEXT: js .LBB9_2
-; SSE2-NEXT: # %bb.1:
-; SSE2-NEXT: movaps %xmm1, %xmm3
-; SSE2-NEXT: .LBB9_2:
-; SSE2-NEXT: movaps %xmm3, %xmm2
-; SSE2-NEXT: cmpordss %xmm3, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm4
-; SSE2-NEXT: andps %xmm3, %xmm4
-; SSE2-NEXT: js .LBB9_4
-; SSE2-NEXT: # %bb.3:
+; SSE2-NEXT: js .LBB9_1
+; SSE2-NEXT: # %bb.2:
+; SSE2-NEXT: movaps %xmm0, %xmm2
+; SSE2-NEXT: jmp .LBB9_3
+; SSE2-NEXT: .LBB9_1:
+; SSE2-NEXT: movaps %xmm1, %xmm2
; SSE2-NEXT: movaps %xmm0, %xmm1
-; SSE2-NEXT: .LBB9_4:
-; SSE2-NEXT: maxss %xmm1, %xmm3
+; SSE2-NEXT: .LBB9_3:
+; SSE2-NEXT: movaps %xmm1, %xmm3
+; SSE2-NEXT: maxss %xmm2, %xmm3
+; SSE2-NEXT: movaps %xmm3, %xmm0
+; SSE2-NEXT: cmpunordss %xmm3, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: andnps %xmm3, %xmm2
-; SSE2-NEXT: orps %xmm4, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: andps %xmm1, %xmm0
+; SSE2-NEXT: orps %xmm2, %xmm0
; SSE2-NEXT: retq
;
; AVX1-LABEL: test_fmaximumnum_combine_cmps:
@@ -454,7 +451,7 @@ define float @test_fmaximumnum_combine_cmps(float %x, float %y) nounwind {
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: .LBB9_3:
; AVX1-NEXT: vmaxss %xmm2, %xmm1, %xmm0
-; AVX1-NEXT: vcmpordss %xmm1, %xmm1, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
@@ -469,7 +466,7 @@ define float @test_fmaximumnum_combine_cmps(float %x, float %y) nounwind {
; AVX512F-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
; AVX512F-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512F-NEXT: vmaxss %xmm2, %xmm1, %xmm0
-; AVX512F-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512F-NEXT: vcmpunordss %xmm0, %xmm0, %k1
; AVX512F-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; AVX512F-NEXT: retq
;
@@ -507,7 +504,7 @@ define float @test_fmaximumnum_combine_cmps(float %x, float %y) nounwind {
; X86-NEXT: vmovaps %xmm1, %xmm0
; X86-NEXT: .LBB9_3:
; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
@@ -527,23 +524,23 @@ define float @test_fminimumnum(float %x, float %y) nounwind {
; SSE2: # %bb.0:
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: js .LBB10_2
-; SSE2-NEXT: # %bb.1:
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: .LBB10_2:
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: cmpordss %xmm3, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm4
-; SSE2-NEXT: andps %xmm3, %xmm4
-; SSE2-NEXT: js .LBB10_4
-; SSE2-NEXT: # %bb.3:
+; SSE2-NEXT: js .LBB10_1
+; SSE2-NEXT: # %bb.2:
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: jmp .LBB10_3
+; SSE2-NEXT: .LBB10_1:
+; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: .LBB10_4:
-; SSE2-NEXT: minss %xmm0, %xmm3
+; SSE2-NEXT: .LBB10_3:
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: minss %xmm2, %xmm3
+; SSE2-NEXT: movaps %xmm3, %xmm1
+; SSE2-NEXT: cmpunordss %xmm3, %xmm1
+; SSE2-NEXT: movaps %xmm1, %xmm2
; SSE2-NEXT: andnps %xmm3, %xmm2
-; SSE2-NEXT: orps %xmm4, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: andps %xmm0, %xmm1
+; SSE2-NEXT: orps %xmm2, %xmm1
+; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX1-LABEL: test_fminimumnum:
@@ -559,7 +556,7 @@ define float @test_fminimumnum(float %x, float %y) nounwind {
; AVX1-NEXT: vmovdqa %xmm1, %xmm0
; AVX1-NEXT: .LBB10_3:
; AVX1-NEXT: vminss %xmm2, %xmm0, %xmm1
-; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
@@ -573,7 +570,7 @@ define float @test_fminimumnum(float %x, float %y) nounwind {
; AVX512-NEXT: vmovss %xmm0, %xmm2, %xmm2 {%k1}
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; AVX512-NEXT: vminss %xmm2, %xmm0, %xmm1
-; AVX512-NEXT: vcmpordss %xmm0, %xmm0, %k1
+; AVX512-NEXT: vcmpunordss %xmm1, %xmm1, %k1
; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vmovaps %xmm1, %xmm0
; AVX512-NEXT: retq
@@ -599,7 +596,7 @@ define float @test_fminimumnum(float %x, float %y) nounwind {
; X86-NEXT: vmovdqa %xmm1, %xmm0
; X86-NEXT: .LBB10_3:
; X86-NEXT: vminss %xmm2, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
@@ -857,26 +854,25 @@ define float @test_fminimumnum_nsz(float %x, float %y) nounwind {
; SSE2-LABEL: test_fminimumnum_nsz:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm0, %xmm2
-; SSE2-NEXT: cmpordss %xmm0, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm3
-; SSE2-NEXT: andps %xmm0, %xmm3
-; SSE2-NEXT: minss %xmm1, %xmm0
-; SSE2-NEXT: andnps %xmm0, %xmm2
-; SSE2-NEXT: orps %xmm3, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: minss %xmm1, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm1
+; SSE2-NEXT: cmpunordss %xmm2, %xmm1
+; SSE2-NEXT: andps %xmm1, %xmm0
+; SSE2-NEXT: andnps %xmm2, %xmm1
+; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX1-LABEL: test_fminimumnum_nsz:
; AVX1: # %bb.0:
; AVX1-NEXT: vminss %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
; AVX512-LABEL: test_fminimumnum_nsz:
; AVX512: # %bb.0:
; AVX512-NEXT: vminss %xmm1, %xmm0, %xmm1
-; AVX512-NEXT: vcmpordss %xmm0, %xmm0, %k1
+; AVX512-NEXT: vcmpunordss %xmm1, %xmm1, %k1
; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vmovaps %xmm1, %xmm0
; AVX512-NEXT: retq
@@ -890,9 +886,9 @@ define float @test_fminimumnum_nsz(float %x, float %y) nounwind {
; X86: # %bb.0:
; X86-NEXT: pushl %eax
; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm1
-; X86-NEXT: vminss {{[0-9]+}}(%esp), %xmm0, %xmm2
-; X86-NEXT: vblendvps %xmm1, %xmm0, %xmm2, %xmm0
+; X86-NEXT: vminss {{[0-9]+}}(%esp), %xmm0, %xmm1
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
; X86-NEXT: popl %eax
@@ -907,23 +903,23 @@ define float @test_fminimumnum_combine_cmps(float %x, float %y) nounwind {
; SSE2-NEXT: divss %xmm0, %xmm1
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: movaps %xmm1, %xmm3
-; SSE2-NEXT: js .LBB19_2
-; SSE2-NEXT: # %bb.1:
-; SSE2-NEXT: movaps %xmm0, %xmm3
-; SSE2-NEXT: .LBB19_2:
-; SSE2-NEXT: movaps %xmm3, %xmm2
-; SSE2-NEXT: cmpordss %xmm3, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm4
-; SSE2-NEXT: andps %xmm3, %xmm4
-; SSE2-NEXT: js .LBB19_4
-; SSE2-NEXT: # %bb.3:
+; SSE2-NEXT: js .LBB19_1
+; SSE2-NEXT: # %bb.2:
+; SSE2-NEXT: movaps %xmm1, %xmm2
+; SSE2-NEXT: jmp .LBB19_3
+; SSE2-NEXT: .LBB19_1:
+; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: movaps %xmm1, %xmm0
-; SSE2-NEXT: .LBB19_4:
-; SSE2-NEXT: minss %xmm0, %xmm3
+; SSE2-NEXT: .LBB19_3:
+; SSE2-NEXT: movaps %xmm0, %xmm3
+; SSE2-NEXT: minss %xmm2, %xmm3
+; SSE2-NEXT: movaps %xmm3, %xmm1
+; SSE2-NEXT: cmpunordss %xmm3, %xmm1
+; SSE2-NEXT: movaps %xmm1, %xmm2
; SSE2-NEXT: andnps %xmm3, %xmm2
-; SSE2-NEXT: orps %xmm4, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: andps %xmm0, %xmm1
+; SSE2-NEXT: orps %xmm2, %xmm1
+; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX1-LABEL: test_fminimumnum_combine_cmps:
@@ -940,7 +936,7 @@ define float @test_fminimumnum_combine_cmps(float %x, float %y) nounwind {
; AVX1-NEXT: vmovaps %xmm2, %xmm0
; AVX1-NEXT: .LBB19_3:
; AVX1-NEXT: vminss %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
@@ -955,7 +951,7 @@ define float @test_fminimumnum_combine_cmps(float %x, float %y) nounwind {
; AVX512F-NEXT: vmovss %xmm0, %xmm2, %xmm2 {%k1}
; AVX512F-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; AVX512F-NEXT: vminss %xmm2, %xmm0, %xmm1
-; AVX512F-NEXT: vcmpordss %xmm0, %xmm0, %k1
+; AVX512F-NEXT: vcmpunordss %xmm1, %xmm1, %k1
; AVX512F-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512F-NEXT: vmovaps %xmm1, %xmm0
; AVX512F-NEXT: retq
@@ -994,7 +990,7 @@ define float @test_fminimumnum_combine_cmps(float %x, float %y) nounwind {
; X86-NEXT: vmovaps %xmm2, %xmm0
; X86-NEXT: .LBB19_3:
; X86-NEXT: vminss %xmm1, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
@@ -1022,9 +1018,9 @@ define <2 x double> @test_fminimumnum_vector(<2 x double> %x, <2 x double> %y) {
; SSE2-NEXT: por %xmm4, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm1
; SSE2-NEXT: minpd %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: cmpordpd %xmm3, %xmm0
-; SSE2-NEXT: andpd %xmm0, %xmm3
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: cmpunordpd %xmm1, %xmm0
+; SSE2-NEXT: pand %xmm0, %xmm3
; SSE2-NEXT: andnpd %xmm1, %xmm0
; SSE2-NEXT: orpd %xmm3, %xmm0
; SSE2-NEXT: retq
@@ -1034,7 +1030,7 @@ define <2 x double> @test_fminimumnum_vector(<2 x double> %x, <2 x double> %y) {
; AVX-NEXT: vblendvpd %xmm0, %xmm0, %xmm1, %xmm2
; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0
; AVX-NEXT: vminpd %xmm2, %xmm0, %xmm1
-; AVX-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
+; AVX-NEXT: vcmpunordpd %xmm1, %xmm1, %xmm2
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
@@ -1048,7 +1044,7 @@ define <2 x double> @test_fminimumnum_vector(<2 x double> %x, <2 x double> %y) {
; X86-NEXT: vblendvpd %xmm0, %xmm0, %xmm1, %xmm2
; X86-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0
; X86-NEXT: vminpd %xmm2, %xmm0, %xmm1
-; X86-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordpd %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: retl
%r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> %y)
@@ -1084,19 +1080,17 @@ define <2 x double> @test_fminimumnum_vector_zero(<2 x double> %x) {
; SSE2: # %bb.0:
; SSE2-NEXT: xorpd %xmm1, %xmm1
; SSE2-NEXT: minpd %xmm0, %xmm1
-; SSE2-NEXT: movapd %xmm0, %xmm2
-; SSE2-NEXT: cmpordpd %xmm0, %xmm2
-; SSE2-NEXT: andpd %xmm2, %xmm0
-; SSE2-NEXT: andnpd %xmm1, %xmm2
-; SSE2-NEXT: orpd %xmm2, %xmm0
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: cmpunordpd %xmm1, %xmm0
+; SSE2-NEXT: andnpd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: test_fminimumnum_vector_zero:
; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
-; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX10_2-LABEL: test_fminimumnum_vector_zero:
@@ -1108,9 +1102,9 @@ define <2 x double> @test_fminimumnum_vector_zero(<2 x double> %x) {
; X86-LABEL: test_fminimumnum_vector_zero:
; X86: # %bb.0:
; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
-; X86-NEXT: vminpd %xmm0, %xmm1, %xmm1
-; X86-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
-; X86-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; X86-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm1
+; X86-NEXT: vandnpd %xmm0, %xmm1, %xmm0
; X86-NEXT: retl
%r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> <double 0., double 0.>)
ret <2 x double> %r
@@ -1120,20 +1114,21 @@ define <4 x float> @test_fmaximumnum_vector_signed_zero(<4 x float> %x) {
; SSE2-LABEL: test_fmaximumnum_vector_signed_zero:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; SSE2-NEXT: maxps %xmm0, %xmm1
-; SSE2-NEXT: movaps %xmm0, %xmm2
-; SSE2-NEXT: cmpordps %xmm0, %xmm2
-; SSE2-NEXT: andps %xmm2, %xmm0
-; SSE2-NEXT: andnps %xmm1, %xmm2
-; SSE2-NEXT: orps %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm1, %xmm2
+; SSE2-NEXT: maxps %xmm0, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordps %xmm2, %xmm0
+; SSE2-NEXT: andps %xmm0, %xmm1
+; SSE2-NEXT: andnps %xmm2, %xmm0
+; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: test_fmaximumnum_vector_signed_zero:
; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; AVX-NEXT: vmaxps %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vcmpordps %xmm0, %xmm0, %xmm2
-; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vmaxps %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vcmpunordps %xmm0, %xmm0, %xmm2
+; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX10_2-LABEL: test_fmaximumnum_vector_signed_zero:
@@ -1144,9 +1139,9 @@ define <4 x float> @test_fmaximumnum_vector_signed_zero(<4 x float> %x) {
; X86-LABEL: test_fmaximumnum_vector_signed_zero:
; X86: # %bb.0:
; X86-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; X86-NEXT: vmaxps %xmm0, %xmm1, %xmm1
-; X86-NEXT: vcmpordps %xmm0, %xmm0, %xmm2
-; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmaxps %xmm0, %xmm1, %xmm0
+; X86-NEXT: vcmpunordps %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; X86-NEXT: retl
%r = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> %x, <4 x float> <float -0., float -0., float -0., float -0.>)
ret <4 x float> %r
@@ -1155,13 +1150,14 @@ define <4 x float> @test_fmaximumnum_vector_signed_zero(<4 x float> %x) {
define <2 x double> @test_fminimumnum_vector_partially_zero(<2 x double> %x) {
; SSE2-LABEL: test_fminimumnum_vector_partially_zero:
; SSE2: # %bb.0:
-; SSE2-NEXT: movapd %xmm0, %xmm1
-; SSE2-NEXT: cmpordpd %xmm0, %xmm1
-; SSE2-NEXT: xorpd %xmm2, %xmm2
-; SSE2-NEXT: movhpd {{.*#+}} xmm2 = xmm2[0],mem[0]
+; SSE2-NEXT: xorpd %xmm1, %xmm1
+; SSE2-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; SSE2-NEXT: movapd %xmm1, %xmm2
; SSE2-NEXT: minpd %xmm0, %xmm2
-; SSE2-NEXT: andpd %xmm1, %xmm0
-; SSE2-NEXT: andnpd %xmm2, %xmm1
+; SSE2-NEXT: movapd %xmm2, %xmm0
+; SSE2-NEXT: cmpunordpd %xmm2, %xmm0
+; SSE2-NEXT: andpd %xmm0, %xmm1
+; SSE2-NEXT: andnpd %xmm2, %xmm0
; SSE2-NEXT: orpd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -1169,9 +1165,9 @@ define <2 x double> @test_fminimumnum_vector_partially_zero(<2 x double> %x) {
; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
-; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm2
+; AVX-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX10_2-LABEL: test_fminimumnum_vector_partially_zero:
@@ -1185,9 +1181,9 @@ define <2 x double> @test_fminimumnum_vector_partially_zero(<2 x double> %x) {
; X86: # %bb.0:
; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X86-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
-; X86-NEXT: vminpd %xmm0, %xmm1, %xmm1
-; X86-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
-; X86-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; X86-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; X86-NEXT: retl
%r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> <double 0., double 5.>)
ret <2 x double> %r
@@ -1212,9 +1208,9 @@ define <2 x double> @test_fminimumnum_vector_different_zeros(<2 x double> %x) {
; SSE2-NEXT: por %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm1
; SSE2-NEXT: minpd %xmm4, %xmm1
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: cmpordpd %xmm3, %xmm0
-; SSE2-NEXT: andpd %xmm0, %xmm3
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: cmpunordpd %xmm1, %xmm0
+; SSE2-NEXT: pand %xmm0, %xmm3
; SSE2-NEXT: andnpd %xmm1, %xmm0
; SSE2-NEXT: orpd %xmm3, %xmm0
; SSE2-NEXT: retq
@@ -1226,7 +1222,7 @@ define <2 x double> @test_fminimumnum_vector_different_zeros(<2 x double> %x) {
; AVX-NEXT: vblendvpd %xmm0, %xmm0, %xmm1, %xmm2
; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0
; AVX-NEXT: vminpd %xmm2, %xmm0, %xmm1
-; AVX-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
+; AVX-NEXT: vcmpunordpd %xmm1, %xmm1, %xmm2
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
@@ -1244,7 +1240,7 @@ define <2 x double> @test_fminimumnum_vector_different_zeros(<2 x double> %x) {
; X86-NEXT: vblendvpd %xmm0, %xmm0, %xmm1, %xmm2
; X86-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0
; X86-NEXT: vminpd %xmm2, %xmm0, %xmm1
-; X86-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordpd %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: retl
%r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> <double 0., double -0.>)
@@ -1278,20 +1274,24 @@ define <4 x float> @test_fmaximumnum_vector_non_zero(<4 x float> %x) {
define <2 x double> @test_fminimumnum_vector_nan(<2 x double> %x) {
; SSE2-LABEL: test_fminimumnum_vector_nan:
; SSE2: # %bb.0:
-; SSE2-NEXT: xorpd %xmm2, %xmm2
; SSE2-NEXT: xorpd %xmm1, %xmm1
; SSE2-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
-; SSE2-NEXT: minpd %xmm0, %xmm1
-; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
-; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: movapd %xmm1, %xmm2
+; SSE2-NEXT: minpd %xmm0, %xmm2
+; SSE2-NEXT: movapd %xmm2, %xmm0
+; SSE2-NEXT: cmpunordpd %xmm2, %xmm0
+; SSE2-NEXT: andpd %xmm0, %xmm1
+; SSE2-NEXT: andnpd %xmm2, %xmm0
+; SSE2-NEXT: orpd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: test_fminimumnum_vector_nan:
; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vmovhpd {{.*#+}} xmm2 = xmm1[0],mem[0]
-; AVX-NEXT: vminpd %xmm0, %xmm2, %xmm0
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; AVX-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm2
+; AVX-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX10_2-LABEL: test_fminimumnum_vector_nan:
@@ -1306,7 +1306,7 @@ define <2 x double> @test_fminimumnum_vector_nan(<2 x double> %x) {
; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X86-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
; X86-NEXT: vminpd %xmm0, %xmm1, %xmm0
-; X86-NEXT: vcmpordpd %xmm1, %xmm1, %xmm2
+; X86-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm2
; X86-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; X86-NEXT: retl
%r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> <double 0., double 0x7fff000000000000>)
@@ -1318,19 +1318,17 @@ define <2 x double> @test_fminimumnum_vector_zero_first(<2 x double> %x) {
; SSE2: # %bb.0:
; SSE2-NEXT: xorpd %xmm1, %xmm1
; SSE2-NEXT: minpd %xmm0, %xmm1
-; SSE2-NEXT: movapd %xmm0, %xmm2
-; SSE2-NEXT: cmpordpd %xmm0, %xmm2
-; SSE2-NEXT: andpd %xmm2, %xmm0
-; SSE2-NEXT: andnpd %xmm1, %xmm2
-; SSE2-NEXT: orpd %xmm2, %xmm0
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: cmpunordpd %xmm1, %xmm0
+; SSE2-NEXT: andnpd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: test_fminimumnum_vector_zero_first:
; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
-; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX10_2-LABEL: test_fminimumnum_vector_zero_first:
@@ -1342,9 +1340,9 @@ define <2 x double> @test_fminimumnum_vector_zero_first(<2 x double> %x) {
; X86-LABEL: test_fminimumnum_vector_zero_first:
; X86: # %bb.0:
; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
-; X86-NEXT: vminpd %xmm0, %xmm1, %xmm1
-; X86-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
-; X86-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; X86-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm1
+; X86-NEXT: vandnpd %xmm0, %xmm1, %xmm0
; X86-NEXT: retl
%r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> <double 0., double 0.>, <2 x double> %x)
ret <2 x double> %r
@@ -1378,20 +1376,21 @@ define <4 x float> @test_fmaximumnum_vector_signed_zero_first(<4 x float> %x) {
; SSE2-LABEL: test_fmaximumnum_vector_signed_zero_first:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; SSE2-NEXT: maxps %xmm0, %xmm1
-; SSE2-NEXT: movaps %xmm0, %xmm2
-; SSE2-NEXT: cmpordps %xmm0, %xmm2
-; SSE2-NEXT: andps %xmm2, %xmm0
-; SSE2-NEXT: andnps %xmm1, %xmm2
-; SSE2-NEXT: orps %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm1, %xmm2
+; SSE2-NEXT: maxps %xmm0, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordps %xmm2, %xmm0
+; SSE2-NEXT: andps %xmm0, %xmm1
+; SSE2-NEXT: andnps %xmm2, %xmm0
+; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: test_fmaximumnum_vector_signed_zero_first:
; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; AVX-NEXT: vmaxps %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vcmpordps %xmm0, %xmm0, %xmm2
-; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vmaxps %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vcmpunordps %xmm0, %xmm0, %xmm2
+; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX10_2-LABEL: test_fmaximumnum_vector_signed_zero_first:
@@ -1402,9 +1401,9 @@ define <4 x float> @test_fmaximumnum_vector_signed_zero_first(<4 x float> %x) {
; X86-LABEL: test_fmaximumnum_vector_signed_zero_first:
; X86: # %bb.0:
; X86-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; X86-NEXT: vmaxps %xmm0, %xmm1, %xmm1
-; X86-NEXT: vcmpordps %xmm0, %xmm0, %xmm2
-; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmaxps %xmm0, %xmm1, %xmm0
+; X86-NEXT: vcmpunordps %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; X86-NEXT: retl
%r = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> <float -0., float -0., float -0., float -0.>, <4 x float> %x)
ret <4 x float> %r
@@ -1455,11 +1454,11 @@ define <4 x float> @test_fmaximumnum_v4f32_splat(<4 x float> %x, float %y) {
; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: maxps %xmm4, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: cmpordps %xmm0, %xmm2
-; SSE2-NEXT: andps %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm1, %xmm2
+; SSE2-NEXT: cmpunordps %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: andnps %xmm1, %xmm2
-; SSE2-NEXT: orps %xmm2, %xmm0
+; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: retq
;
; AVX1-LABEL: test_fmaximumnum_v4f32_splat:
@@ -1468,7 +1467,7 @@ define <4 x float> @test_fmaximumnum_v4f32_splat(<4 x float> %x, float %y) {
; AVX1-NEXT: vblendvps %xmm0, %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vblendvps %xmm0, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vmaxps %xmm2, %xmm0, %xmm1
-; AVX1-NEXT: vcmpordps %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vcmpunordps %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
@@ -1478,7 +1477,7 @@ define <4 x float> @test_fmaximumnum_v4f32_splat(<4 x float> %x, float %y) {
; AVX512-NEXT: vblendvps %xmm0, %xmm1, %xmm0, %xmm2
; AVX512-NEXT: vblendvps %xmm0, %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vmaxps %xmm2, %xmm0, %xmm1
-; AVX512-NEXT: vcmpordps %xmm0, %xmm0, %xmm2
+; AVX512-NEXT: vcmpunordps %xmm1, %xmm1, %xmm2
; AVX512-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX512-NEXT: retq
;
@@ -1494,7 +1493,7 @@ define <4 x float> @test_fmaximumnum_v4f32_splat(<4 x float> %x, float %y) {
; X86-NEXT: vblendvps %xmm0, %xmm1, %xmm0, %xmm2
; X86-NEXT: vblendvps %xmm0, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmaxps %xmm2, %xmm0, %xmm1
-; X86-NEXT: vcmpordps %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordps %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: retl
%splatinsert = insertelement <4 x float> poison, float %y, i64 0
@@ -1506,134 +1505,130 @@ define <4 x float> @test_fmaximumnum_v4f32_splat(<4 x float> %x, float %y) {
define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind {
; SSE2-LABEL: test_fmaximumnum_v4f16:
; SSE2: # %bb.0:
-; SSE2-NEXT: subq $104, %rsp
-; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: subq $136, %rsp
+; SSE2-NEXT: movaps %xmm0, %xmm2
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[1,1]
+; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps %xmm1, %xmm2
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1]
+; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; SSE2-NEXT: movss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE2-NEXT: psrld $16, %xmm0
; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: psrld $16, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
-; SSE2-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: callq __extendhfsf2@PLT
-; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 4-byte Folded Reload
-; SSE2-NEXT: # xmm4 = mem[0],zero,zero,zero
-; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: js .LBB33_2
-; SSE2-NEXT: # %bb.1:
-; SSE2-NEXT: movdqa %xmm4, %xmm2
-; SSE2-NEXT: .LBB33_2:
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: cmpordss %xmm2, %xmm0
-; SSE2-NEXT: movaps %xmm0, %xmm3
-; SSE2-NEXT: andps %xmm2, %xmm3
-; SSE2-NEXT: js .LBB33_4
-; SSE2-NEXT: # %bb.3:
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: .LBB33_4:
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
-; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE2-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
-; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE2-NEXT: maxss %xmm4, %xmm2
-; SSE2-NEXT: andnps %xmm2, %xmm0
-; SSE2-NEXT: orps %xmm3, %xmm0
+; SSE2-NEXT: js .LBB33_1
+; SSE2-NEXT: # %bb.2:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE2-NEXT: jmp .LBB33_3
+; SSE2-NEXT: .LBB33_1:
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: .LBB33_3:
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: psrlq $48, %xmm0
+; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: psrlq $48, %xmm0
+; SSE2-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: maxss %xmm1, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm1
+; SSE2-NEXT: andnps %xmm2, %xmm1
+; SSE2-NEXT: andps %xmm3, %xmm0
+; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: callq __truncsfhf2@PLT
; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
+; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
-; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
-; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 4-byte Folded Reload
-; SSE2-NEXT: # xmm4 = mem[0],zero,zero,zero
-; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: js .LBB33_6
+; SSE2-NEXT: js .LBB33_4
; SSE2-NEXT: # %bb.5:
-; SSE2-NEXT: movdqa %xmm4, %xmm2
-; SSE2-NEXT: .LBB33_6:
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: cmpordss %xmm2, %xmm0
-; SSE2-NEXT: movaps %xmm0, %xmm3
-; SSE2-NEXT: andps %xmm2, %xmm3
-; SSE2-NEXT: js .LBB33_8
-; SSE2-NEXT: # %bb.7:
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: .LBB33_8:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE2-NEXT: jmp .LBB33_6
+; SSE2-NEXT: .LBB33_4:
; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE2-NEXT: psrlq $48, %xmm1
-; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE2-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: psrlq $48, %xmm1
-; SSE2-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
-; SSE2-NEXT: maxss %xmm4, %xmm2
-; SSE2-NEXT: andnps %xmm2, %xmm0
-; SSE2-NEXT: orps %xmm3, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: .LBB33_6:
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: maxss %xmm1, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm1
+; SSE2-NEXT: andnps %xmm2, %xmm1
+; SSE2-NEXT: andps %xmm3, %xmm0
+; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: callq __truncsfhf2@PLT
; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: callq __extendhfsf2@PLT
-; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: callq __extendhfsf2@PLT
-; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 4-byte Folded Reload
-; SSE2-NEXT: # xmm4 = mem[0],zero,zero,zero
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: js .LBB33_10
-; SSE2-NEXT: # %bb.9:
-; SSE2-NEXT: movdqa %xmm4, %xmm2
-; SSE2-NEXT: .LBB33_10:
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: cmpordss %xmm2, %xmm1
-; SSE2-NEXT: movaps %xmm1, %xmm3
-; SSE2-NEXT: andps %xmm2, %xmm3
-; SSE2-NEXT: js .LBB33_12
-; SSE2-NEXT: # %bb.11:
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: .LBB33_12:
-; SSE2-NEXT: maxss %xmm4, %xmm2
+; SSE2-NEXT: js .LBB33_7
+; SSE2-NEXT: # %bb.8:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE2-NEXT: jmp .LBB33_9
+; SSE2-NEXT: .LBB33_7:
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: .LBB33_9:
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: maxss %xmm1, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: andnps %xmm2, %xmm1
-; SSE2-NEXT: orps %xmm3, %xmm1
-; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: andps %xmm3, %xmm0
+; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: callq __truncsfhf2@PLT
; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE2-NEXT: callq __extendhfsf2@PLT
-; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
+; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: callq __extendhfsf2@PLT
-; SSE2-NEXT: movd (%rsp), %xmm4 # 4-byte Folded Reload
-; SSE2-NEXT: # xmm4 = mem[0],zero,zero,zero
-; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: js .LBB33_14
-; SSE2-NEXT: # %bb.13:
-; SSE2-NEXT: movdqa %xmm4, %xmm2
-; SSE2-NEXT: .LBB33_14:
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: cmpordss %xmm2, %xmm0
-; SSE2-NEXT: movaps %xmm0, %xmm3
-; SSE2-NEXT: andps %xmm2, %xmm3
-; SSE2-NEXT: js .LBB33_16
-; SSE2-NEXT: # %bb.15:
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: .LBB33_16:
-; SSE2-NEXT: maxss %xmm4, %xmm2
-; SSE2-NEXT: andnps %xmm2, %xmm0
-; SSE2-NEXT: orps %xmm3, %xmm0
+; SSE2-NEXT: js .LBB33_10
+; SSE2-NEXT: # %bb.11:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: movdqa (%rsp), %xmm3 # 16-byte Reload
+; SSE2-NEXT: jmp .LBB33_12
+; SSE2-NEXT: .LBB33_10:
+; SSE2-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: .LBB33_12:
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: maxss %xmm1, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm1
+; SSE2-NEXT: andnps %xmm2, %xmm1
+; SSE2-NEXT: andps %xmm3, %xmm0
+; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: callq __truncsfhf2@PLT
; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -1641,7 +1636,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; SSE2-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: addq $104, %rsp
+; SSE2-NEXT: addq $136, %rsp
; SSE2-NEXT: retq
;
; AVX1-LABEL: test_fmaximumnum_v4f16:
@@ -1679,7 +1674,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX1-NEXT: vpsrlq $48, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmaxss %xmm1, %xmm2, %xmm0
-; AVX1-NEXT: vcmpordss %xmm2, %xmm2, %xmm1
+; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm1
; AVX1-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0
; AVX1-NEXT: callq __truncsfhf2@PLT
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -1700,7 +1695,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX1-NEXT: vmovdqa %xmm0, %xmm2
; AVX1-NEXT: .LBB33_6:
; AVX1-NEXT: vmaxss %xmm1, %xmm2, %xmm0
-; AVX1-NEXT: vcmpordss %xmm2, %xmm2, %xmm1
+; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm1
; AVX1-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0
; AVX1-NEXT: callq __truncsfhf2@PLT
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -1721,7 +1716,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX1-NEXT: vmovdqa %xmm0, %xmm2
; AVX1-NEXT: .LBB33_9:
; AVX1-NEXT: vmaxss %xmm1, %xmm2, %xmm0
-; AVX1-NEXT: vcmpordss %xmm2, %xmm2, %xmm1
+; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm1
; AVX1-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0
; AVX1-NEXT: callq __truncsfhf2@PLT
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
@@ -1742,7 +1737,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX1-NEXT: vmovdqa %xmm0, %xmm2
; AVX1-NEXT: .LBB33_12:
; AVX1-NEXT: vmaxss %xmm1, %xmm2, %xmm0
-; AVX1-NEXT: vcmpordss %xmm2, %xmm2, %xmm1
+; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm1
; AVX1-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0
; AVX1-NEXT: callq __truncsfhf2@PLT
; AVX1-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
@@ -1768,7 +1763,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1}
; AVX512-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1}
; AVX512-NEXT: vmaxss %xmm4, %xmm3, %xmm2
-; AVX512-NEXT: vcmpordss %xmm3, %xmm3, %k1
+; AVX512-NEXT: vcmpunordss %xmm2, %xmm2, %k1
; AVX512-NEXT: vmovss %xmm3, %xmm2, %xmm2 {%k1}
; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX512-NEXT: vshufps {{.*#+}} xmm3 = xmm0[3,3,3,3]
@@ -1783,7 +1778,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1}
; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1}
; AVX512-NEXT: vmaxss %xmm5, %xmm4, %xmm3
-; AVX512-NEXT: vcmpordss %xmm4, %xmm4, %k1
+; AVX512-NEXT: vcmpunordss %xmm3, %xmm3, %k1
; AVX512-NEXT: vmovss %xmm4, %xmm3, %xmm3 {%k1}
; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
@@ -1799,7 +1794,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1}
; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1}
; AVX512-NEXT: vmaxss %xmm5, %xmm4, %xmm3
-; AVX512-NEXT: vcmpordss %xmm4, %xmm4, %k1
+; AVX512-NEXT: vcmpunordss %xmm3, %xmm3, %k1
; AVX512-NEXT: vmovss %xmm4, %xmm3, %xmm3 {%k1}
; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3
; AVX512-NEXT: vshufpd {{.*#+}} xmm4 = xmm0[1,0]
@@ -1814,7 +1809,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX512-NEXT: vmovss %xmm5, %xmm6, %xmm6 {%k1}
; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1}
; AVX512-NEXT: vmaxss %xmm6, %xmm5, %xmm4
-; AVX512-NEXT: vcmpordss %xmm5, %xmm5, %k1
+; AVX512-NEXT: vcmpunordss %xmm4, %xmm4, %k1
; AVX512-NEXT: vmovss %xmm5, %xmm4, %xmm4 {%k1}
; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
@@ -1831,7 +1826,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1}
; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1}
; AVX512-NEXT: vmaxss %xmm5, %xmm4, %xmm3
-; AVX512-NEXT: vcmpordss %xmm4, %xmm4, %k1
+; AVX512-NEXT: vcmpunordss %xmm3, %xmm3, %k1
; AVX512-NEXT: vmovss %xmm4, %xmm3, %xmm3 {%k1}
; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3
; AVX512-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
@@ -1846,7 +1841,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX512-NEXT: vmovss %xmm5, %xmm6, %xmm6 {%k1}
; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1}
; AVX512-NEXT: vmaxss %xmm6, %xmm5, %xmm4
-; AVX512-NEXT: vcmpordss %xmm5, %xmm5, %k1
+; AVX512-NEXT: vcmpunordss %xmm4, %xmm4, %k1
; AVX512-NEXT: vmovss %xmm5, %xmm4, %xmm4 {%k1}
; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
@@ -1860,7 +1855,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX512-NEXT: vmovss %xmm5, %xmm6, %xmm6 {%k1}
; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1}
; AVX512-NEXT: vmaxss %xmm6, %xmm5, %xmm4
-; AVX512-NEXT: vcmpordss %xmm5, %xmm5, %k1
+; AVX512-NEXT: vcmpunordss %xmm4, %xmm4, %k1
; AVX512-NEXT: vmovss %xmm5, %xmm4, %xmm4 {%k1}
; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm0
@@ -1875,7 +1870,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX512-NEXT: vmovss %xmm1, %xmm5, %xmm5 {%k1}
; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vmaxss %xmm5, %xmm1, %xmm0
-; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
@@ -1933,7 +1928,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; X86-NEXT: vmovdqa %xmm1, %xmm0
; X86-NEXT: .LBB33_3:
; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT: calll __extendhfsf2
@@ -1955,7 +1950,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; X86-NEXT: vmovdqa %xmm1, %xmm0
; X86-NEXT: .LBB33_6:
; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT: calll __truncsfhf2
@@ -1993,7 +1988,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; X86-NEXT: vmovdqa %xmm1, %xmm0
; X86-NEXT: .LBB33_9:
; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT: calll __extendhfsf2
@@ -2015,7 +2010,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; X86-NEXT: vmovdqa %xmm1, %xmm0
; X86-NEXT: .LBB33_12:
; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT: calll __truncsfhf2
@@ -2041,120 +2036,114 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %r15
; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: pushq %r13
+; SSE2-NEXT: pushq %r12
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: subq $56, %rsp
-; SSE2-NEXT: pextrw $0, %xmm1, %r14d
-; SSE2-NEXT: pextrw $0, %xmm0, %r15d
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psrld $16, %xmm2
-; SSE2-NEXT: pextrw $0, %xmm2, %eax
; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: psrld $16, %xmm2
-; SSE2-NEXT: pextrw $0, %xmm2, %ecx
+; SSE2-NEXT: psrlq $48, %xmm2
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: psrlq $48, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm0[1,1]
+; SSE2-NEXT: pextrw $0, %xmm4, %ebp
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[1,1]
+; SSE2-NEXT: pextrw $0, %xmm4, %r15d
+; SSE2-NEXT: pextrw $0, %xmm0, %r12d
+; SSE2-NEXT: pextrw $0, %xmm1, %r13d
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: pextrw $0, %xmm0, %eax
+; SSE2-NEXT: psrld $16, %xmm1
+; SSE2-NEXT: pextrw $0, %xmm1, %ecx
; SSE2-NEXT: shll $16, %ecx
-; SSE2-NEXT: movd %ecx, %xmm3
+; SSE2-NEXT: movd %ecx, %xmm1
; SSE2-NEXT: shll $16, %eax
-; SSE2-NEXT: movd %eax, %xmm2
-; SSE2-NEXT: testl %ecx, %ecx
-; SSE2-NEXT: movdqa %xmm3, %xmm7
-; SSE2-NEXT: js .LBB34_2
-; SSE2-NEXT: # %bb.1:
-; SSE2-NEXT: movdqa %xmm2, %xmm7
-; SSE2-NEXT: .LBB34_2:
-; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm1[1,1]
-; SSE2-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,1],xmm0[1,1]
-; SSE2-NEXT: movdqa %xmm7, %xmm0
-; SSE2-NEXT: cmpordss %xmm7, %xmm0
-; SSE2-NEXT: movaps %xmm0, %xmm4
-; SSE2-NEXT: andps %xmm7, %xmm4
-; SSE2-NEXT: js .LBB34_4
-; SSE2-NEXT: # %bb.3:
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: .LBB34_4:
-; SSE2-NEXT: pextrw $0, %xmm5, %ebp
-; SSE2-NEXT: pextrw $0, %xmm6, %ebx
-; SSE2-NEXT: maxss %xmm2, %xmm7
-; SSE2-NEXT: andnps %xmm7, %xmm0
-; SSE2-NEXT: orps %xmm4, %xmm0
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: js .LBB34_1
+; SSE2-NEXT: # %bb.2:
+; SSE2-NEXT: movdqa %xmm4, %xmm0
+; SSE2-NEXT: jmp .LBB34_3
+; SSE2-NEXT: .LBB34_1:
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm4, %xmm1
+; SSE2-NEXT: .LBB34_3:
+; SSE2-NEXT: pextrw $0, %xmm2, %ebx
+; SSE2-NEXT: pextrw $0, %xmm3, %r14d
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: maxss %xmm0, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm3
+; SSE2-NEXT: andnps %xmm2, %xmm3
+; SSE2-NEXT: andps %xmm1, %xmm0
+; SSE2-NEXT: orps %xmm3, %xmm0
; SSE2-NEXT: callq __truncsfbf2@PLT
; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE2-NEXT: shll $16, %r15d
-; SSE2-NEXT: movd %r15d, %xmm3
-; SSE2-NEXT: shll $16, %r14d
-; SSE2-NEXT: movd %r14d, %xmm2
-; SSE2-NEXT: testl %r15d, %r15d
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: js .LBB34_6
+; SSE2-NEXT: shll $16, %r13d
+; SSE2-NEXT: movd %r13d, %xmm1
+; SSE2-NEXT: shll $16, %r12d
+; SSE2-NEXT: movd %r12d, %xmm2
+; SSE2-NEXT: js .LBB34_4
; SSE2-NEXT: # %bb.5:
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: jmp .LBB34_6
+; SSE2-NEXT: .LBB34_4:
+; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: movdqa %xmm2, %xmm1
; SSE2-NEXT: .LBB34_6:
-; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE2-NEXT: psrlq $48, %xmm5
-; SSE2-NEXT: movdqa (%rsp), %xmm6 # 16-byte Reload
-; SSE2-NEXT: psrlq $48, %xmm6
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: cmpordss %xmm1, %xmm0
-; SSE2-NEXT: movaps %xmm0, %xmm4
-; SSE2-NEXT: andps %xmm1, %xmm4
-; SSE2-NEXT: js .LBB34_8
-; SSE2-NEXT: # %bb.7:
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: .LBB34_8:
-; SSE2-NEXT: pextrw $0, %xmm5, %r15d
-; SSE2-NEXT: pextrw $0, %xmm6, %r14d
-; SSE2-NEXT: maxss %xmm2, %xmm1
-; SSE2-NEXT: andnps %xmm1, %xmm0
-; SSE2-NEXT: orps %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: maxss %xmm0, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm3
+; SSE2-NEXT: andnps %xmm2, %xmm3
+; SSE2-NEXT: andps %xmm1, %xmm0
+; SSE2-NEXT: orps %xmm3, %xmm0
; SSE2-NEXT: callq __truncsfbf2@PLT
; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE2-NEXT: shll $16, %ebx
-; SSE2-NEXT: movd %ebx, %xmm1
+; SSE2-NEXT: shll $16, %r15d
+; SSE2-NEXT: movd %r15d, %xmm1
; SSE2-NEXT: shll $16, %ebp
-; SSE2-NEXT: movd %ebp, %xmm3
-; SSE2-NEXT: testl %ebx, %ebx
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: js .LBB34_10
-; SSE2-NEXT: # %bb.9:
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: .LBB34_10:
+; SSE2-NEXT: movd %ebp, %xmm2
+; SSE2-NEXT: js .LBB34_7
+; SSE2-NEXT: # %bb.8:
; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: cmpordss %xmm2, %xmm0
-; SSE2-NEXT: movaps %xmm0, %xmm4
-; SSE2-NEXT: andps %xmm2, %xmm4
-; SSE2-NEXT: js .LBB34_12
-; SSE2-NEXT: # %bb.11:
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: .LBB34_12:
-; SSE2-NEXT: maxss %xmm3, %xmm2
-; SSE2-NEXT: andnps %xmm2, %xmm0
-; SSE2-NEXT: orps %xmm4, %xmm0
+; SSE2-NEXT: jmp .LBB34_9
+; SSE2-NEXT: .LBB34_7:
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: .LBB34_9:
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: maxss %xmm0, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm3
+; SSE2-NEXT: andnps %xmm2, %xmm3
+; SSE2-NEXT: andps %xmm1, %xmm0
+; SSE2-NEXT: orps %xmm3, %xmm0
; SSE2-NEXT: callq __truncsfbf2@PLT
; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; SSE2-NEXT: shll $16, %r14d
; SSE2-NEXT: movd %r14d, %xmm1
-; SSE2-NEXT: shll $16, %r15d
-; SSE2-NEXT: movd %r15d, %xmm3
-; SSE2-NEXT: testl %r14d, %r14d
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: js .LBB34_14
-; SSE2-NEXT: # %bb.13:
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: .LBB34_14:
+; SSE2-NEXT: shll $16, %ebx
+; SSE2-NEXT: movd %ebx, %xmm2
+; SSE2-NEXT: js .LBB34_10
+; SSE2-NEXT: # %bb.11:
; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: cmpordss %xmm2, %xmm0
-; SSE2-NEXT: movaps %xmm0, %xmm4
-; SSE2-NEXT: andps %xmm2, %xmm4
-; SSE2-NEXT: js .LBB34_16
-; SSE2-NEXT: # %bb.15:
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: .LBB34_16:
-; SSE2-NEXT: maxss %xmm3, %xmm2
-; SSE2-NEXT: andnps %xmm2, %xmm0
-; SSE2-NEXT: orps %xmm4, %xmm0
+; SSE2-NEXT: jmp .LBB34_12
+; SSE2-NEXT: .LBB34_10:
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: .LBB34_12:
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: maxss %xmm0, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm3
+; SSE2-NEXT: andnps %xmm2, %xmm3
+; SSE2-NEXT: andps %xmm1, %xmm0
+; SSE2-NEXT: orps %xmm3, %xmm0
; SSE2-NEXT: callq __truncsfbf2@PLT
; SSE2-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -2164,6 +2153,8 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: addq $56, %rsp
; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: popq %r12
+; SSE2-NEXT: popq %r13
; SSE2-NEXT: popq %r14
; SSE2-NEXT: popq %r15
; SSE2-NEXT: popq %rbp
@@ -2205,7 +2196,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; AVX1-NEXT: vpextrw $0, %xmm2, %ebp
; AVX1-NEXT: vpextrw $0, %xmm3, %r15d
; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: callq __truncsfbf2@PLT
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -2222,7 +2213,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; AVX1-NEXT: vmovdqa %xmm2, %xmm0
; AVX1-NEXT: .LBB34_6:
; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: callq __truncsfbf2@PLT
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -2239,7 +2230,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; AVX1-NEXT: vmovdqa %xmm2, %xmm0
; AVX1-NEXT: .LBB34_9:
; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: callq __truncsfbf2@PLT
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
@@ -2256,7 +2247,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; AVX1-NEXT: vmovdqa %xmm2, %xmm0
; AVX1-NEXT: .LBB34_12:
; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: callq __truncsfbf2@PLT
; AVX1-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
@@ -2305,7 +2296,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0
-; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; AVX512-NEXT: callq __truncsfbf2@PLT
; AVX512-NEXT: vpextrw $0, %xmm0, {{[0-9]+}}(%rsp)
@@ -2319,7 +2310,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0
-; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; AVX512-NEXT: callq __truncsfbf2@PLT
; AVX512-NEXT: vpextrw $0, %xmm0, (%rsp)
@@ -2333,7 +2324,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0
-; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; AVX512-NEXT: callq __truncsfbf2@PLT
; AVX512-NEXT: vpextrw $0, %xmm0, {{[0-9]+}}(%rsp)
@@ -2347,7 +2338,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0
-; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; AVX512-NEXT: callq __truncsfbf2@PLT
; AVX512-NEXT: vpextrw $0, %xmm0, {{[0-9]+}}(%rsp)
@@ -2400,7 +2391,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; X86-NEXT: vpextrw $0, %xmm2, %edi
; X86-NEXT: vpextrw $0, %xmm3, %ebp
; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: shll $16, %ecx
@@ -2416,7 +2407,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; X86-NEXT: vmovdqa %xmm2, %xmm0
; X86-NEXT: .LBB34_6:
; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT: calll __truncsfbf2
@@ -2436,7 +2427,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; X86-NEXT: vmovdqa %xmm2, %xmm0
; X86-NEXT: .LBB34_9:
; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT: calll __truncsfbf2
@@ -2456,7 +2447,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; X86-NEXT: vmovdqa %xmm2, %xmm0
; X86-NEXT: .LBB34_12:
; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT: calll __truncsfbf2
diff --git a/llvm/test/CodeGen/X86/fp-undef.ll b/llvm/test/CodeGen/X86/fp-undef.ll
index 227f007..c358085 100644
--- a/llvm/test/CodeGen/X86/fp-undef.ll
+++ b/llvm/test/CodeGen/X86/fp-undef.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ANY
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -enable-unsafe-fp-math | FileCheck %s --check-prefix=ANY
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ANY
; This is duplicated from tests for InstSimplify. If you're
; adding something here, you should probably add it there too.
diff --git a/llvm/test/CodeGen/X86/fp128-select.ll b/llvm/test/CodeGen/X86/fp128-select.ll
index 659e4dd..27a651e 100644
--- a/llvm/test/CodeGen/X86/fp128-select.ll
+++ b/llvm/test/CodeGen/X86/fp128-select.ll
@@ -13,8 +13,8 @@ define void @test_select(ptr %p, ptr %q, i1 zeroext %c) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: testl %edx, %edx
; SSE-NEXT: jne .LBB0_1
-; SSE-NEXT: # %bb.3:
-; SSE-NEXT: movaps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: # %bb.2:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [NaN]
; SSE-NEXT: movaps %xmm0, (%rsi)
; SSE-NEXT: retq
; SSE-NEXT: .LBB0_1:
@@ -58,7 +58,7 @@ define fp128 @test_select_cc(fp128, fp128) nounwind {
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: jmp .LBB1_3
; SSE-NEXT: .LBB1_1:
-; SSE-NEXT: movaps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.0E+0]
; SSE-NEXT: .LBB1_3: # %BB0
; SSE-NEXT: testl %ebx, %ebx
; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
diff --git a/llvm/test/CodeGen/X86/fsxor-alignment.ll b/llvm/test/CodeGen/X86/fsxor-alignment.ll
index 6fa4a31..32af5b9 100644
--- a/llvm/test/CodeGen/X86/fsxor-alignment.ll
+++ b/llvm/test/CodeGen/X86/fsxor-alignment.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 -enable-unsafe-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 | FileCheck %s
; Don't fold the incoming stack arguments into the xorps instructions used
; to do floating-point negations, because the arguments aren't vectors
diff --git a/llvm/test/CodeGen/X86/machine-trace-metrics-crash.ll b/llvm/test/CodeGen/X86/machine-trace-metrics-crash.ll
index f710a30..bd997d1 100644
--- a/llvm/test/CodeGen/X86/machine-trace-metrics-crash.ll
+++ b/llvm/test/CodeGen/X86/machine-trace-metrics-crash.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=sse -enable-unsafe-fp-math < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=sse < %s | FileCheck %s
; The debug info in this test case was causing a crash because machine trace metrics
; did not correctly ignore debug instructions. The check lines ensure that the
diff --git a/llvm/test/CodeGen/X86/neg_fp.ll b/llvm/test/CodeGen/X86/neg_fp.ll
index 8020982..18ded50 100644
--- a/llvm/test/CodeGen/X86/neg_fp.ll
+++ b/llvm/test/CodeGen/X86/neg_fp.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-- -mattr=+sse4.1 | FileCheck %s
-; Test that when we don't -enable-unsafe-fp-math, we don't do the optimization
+; Test that when we don't, we don't do the optimization
; -0 - (A - B) to (B - A) because A==B, -0 != 0
define float @negfp(float %a, float %b) nounwind {
diff --git a/llvm/test/CodeGen/X86/negate-add-zero.ll b/llvm/test/CodeGen/X86/negate-add-zero.ll
index eb4e2d3..4884832 100644
--- a/llvm/test/CodeGen/X86/negate-add-zero.ll
+++ b/llvm/test/CodeGen/X86/negate-add-zero.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -enable-unsafe-fp-math | FileCheck %s
+; RUN: llc < %s | FileCheck %s
; PR3374
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
diff --git a/llvm/test/CodeGen/X86/recip-pic.ll b/llvm/test/CodeGen/X86/recip-pic.ll
index d01ecc1..d2620e7 100644
--- a/llvm/test/CodeGen/X86/recip-pic.ll
+++ b/llvm/test/CodeGen/X86/recip-pic.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -enable-unsafe-fp-math -mcpu=slm -relocation-model=pic | FileCheck %s --check-prefix=CHECK
+; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mcpu=slm -relocation-model=pic | FileCheck %s --check-prefix=CHECK
define fastcc float @foo(float %x) unnamed_addr #0 {
; CHECK-LABEL: foo:
diff --git a/llvm/test/CodeGen/X86/sincos-opt.ll b/llvm/test/CodeGen/X86/sincos-opt.ll
index 6885456..51f3e52 100644
--- a/llvm/test/CodeGen/X86/sincos-opt.ll
+++ b/llvm/test/CodeGen/X86/sincos-opt.ll
@@ -1,10 +1,10 @@
; RUN: llc < %s -mtriple=x86_64-apple-macosx10.9.0 -mcpu=core2 | FileCheck %s --check-prefix=OSX_SINCOS
; RUN: llc < %s -mtriple=x86_64-apple-macosx10.8.0 -mcpu=core2 | FileCheck %s --check-prefix=OSX_NOOPT
; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu -mcpu=core2 | FileCheck %s --check-prefix=GNU_SINCOS
-; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu -mcpu=core2 -enable-unsafe-fp-math | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH
-; RUN: llc < %s -mtriple=x86_64-pc-linux-gnux32 -mcpu=core2 -enable-unsafe-fp-math | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH
+; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu -mcpu=core2 | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH
+; RUN: llc < %s -mtriple=x86_64-pc-linux-gnux32 -mcpu=core2 | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH
; RUN: llc < %s -mtriple=x86_64-fuchsia -mcpu=core2 | FileCheck %s --check-prefix=GNU_SINCOS
-; RUN: llc < %s -mtriple=x86_64-fuchsia -mcpu=core2 -enable-unsafe-fp-math | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH
+; RUN: llc < %s -mtriple=x86_64-fuchsia -mcpu=core2 | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH
; RUN: llc < %s -mtriple=x86_64-scei-ps4 -mcpu=btver2 | FileCheck %s --check-prefix=PS4_SINCOS
; RUN: llc < %s -mtriple=x86_64-sie-ps5 -mcpu=znver2 | FileCheck %s --check-prefix=PS4_SINCOS
diff --git a/llvm/test/CodeGen/X86/sincos.ll b/llvm/test/CodeGen/X86/sincos.ll
index 7903407..9206c25 100644
--- a/llvm/test/CodeGen/X86/sincos.ll
+++ b/llvm/test/CodeGen/X86/sincos.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; Make sure this testcase codegens to the sin and cos instructions, not calls
-; RUN: llc < %s -mtriple=i686-apple-macosx -mattr=-sse,-sse2,-sse3 -enable-unsafe-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=i686-apple-macosx -mattr=-sse,-sse2,-sse3 | FileCheck %s
; RUN: llc < %s -mtriple=i686-apple-macosx -mattr=-sse,-sse2,-sse3 | FileCheck %s
declare float @sinf(float) readonly
diff --git a/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll b/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
index c0beb6f..2822d40 100644
--- a/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
+++ b/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math | FileCheck %s --check-prefix=CST --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+sse4.1 | FileCheck %s --check-prefix=CST --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx | FileCheck %s --check-prefix=CST --check-prefix=AVX
-; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx512f | FileCheck %s --check-prefix=AVX512F
-; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx512vl | FileCheck %s --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=x86_64 | FileCheck %s --check-prefix=CST --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64 -mattr=+sse4.1 | FileCheck %s --check-prefix=CST --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64 -mattr=+avx | FileCheck %s --check-prefix=CST --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64 -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64 -mattr=+avx512f | FileCheck %s --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64 -mattr=+avx512vl | FileCheck %s --check-prefix=AVX512VL
; Check that the constant used in the vectors are the right ones.
; SSE2: [[MASKCSTADDR:.LCPI[0-9_]+]]: