aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/X86/ftrunc.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/ftrunc.ll')
-rw-r--r--llvm/test/CodeGen/X86/ftrunc.ll720
1 files changed, 345 insertions, 375 deletions
diff --git a/llvm/test/CodeGen/X86/ftrunc.ll b/llvm/test/CodeGen/X86/ftrunc.ll
index d52d145..08705e9 100644
--- a/llvm/test/CodeGen/X86/ftrunc.ll
+++ b/llvm/test/CodeGen/X86/ftrunc.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=X64_AVX1
-; RUN: llc < %s -mtriple=i686-- -mattr=+avx | FileCheck %s --check-prefixes=X32_AVX1
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,X64-AVX1
+; RUN: llc < %s -mtriple=i686-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,X86-AVX1
declare i32 @llvm.fptoui.sat.i32.f32(float)
declare i64 @llvm.fptosi.sat.i64.f64(double)
@@ -21,20 +21,20 @@ define float @trunc_unsigned_f32(float %x) #0 {
; SSE41-NEXT: roundss $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_unsigned_f32:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_unsigned_f32:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %eax
-; X32_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32_AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovss %xmm0, (%esp)
-; X32_AVX1-NEXT: flds (%esp)
-; X32_AVX1-NEXT: popl %eax
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_unsigned_f32:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_unsigned_f32:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %eax
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT: flds (%esp)
+; X86-AVX1-NEXT: popl %eax
+; X86-AVX1-NEXT: retl
%i = fptoui float %x to i32
%r = uitofp i32 %i to float
ret float %r
@@ -63,24 +63,24 @@ define double @trunc_unsigned_f64(double %x) #0 {
; SSE41-NEXT: roundsd $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_unsigned_f64:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_unsigned_f64:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %ebp
-; X32_AVX1-NEXT: movl %esp, %ebp
-; X32_AVX1-NEXT: andl $-8, %esp
-; X32_AVX1-NEXT: subl $8, %esp
-; X32_AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32_AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovsd %xmm0, (%esp)
-; X32_AVX1-NEXT: fldl (%esp)
-; X32_AVX1-NEXT: movl %ebp, %esp
-; X32_AVX1-NEXT: popl %ebp
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_unsigned_f64:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_unsigned_f64:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %ebp
+; X86-AVX1-NEXT: movl %esp, %ebp
+; X86-AVX1-NEXT: andl $-8, %esp
+; X86-AVX1-NEXT: subl $8, %esp
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX1-NEXT: fldl (%esp)
+; X86-AVX1-NEXT: movl %ebp, %esp
+; X86-AVX1-NEXT: popl %ebp
+; X86-AVX1-NEXT: retl
%i = fptoui double %x to i64
%r = uitofp i64 %i to double
ret double %r
@@ -110,15 +110,10 @@ define <4 x float> @trunc_unsigned_v4f32(<4 x float> %x) #0 {
; SSE41-NEXT: roundps $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_unsigned_v4f32:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundps $11, %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_unsigned_v4f32:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: vroundps $11, %xmm0, %xmm0
-; X32_AVX1-NEXT: retl
+; AVX-LABEL: trunc_unsigned_v4f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundps $11, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
%i = fptoui <4 x float> %x to <4 x i32>
%r = uitofp <4 x i32> %i to <4 x float>
ret <4 x float> %r
@@ -162,15 +157,10 @@ define <2 x double> @trunc_unsigned_v2f64(<2 x double> %x) #0 {
; SSE41-NEXT: roundpd $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_unsigned_v2f64:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundpd $11, %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_unsigned_v2f64:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: vroundpd $11, %xmm0, %xmm0
-; X32_AVX1-NEXT: retl
+; AVX-LABEL: trunc_unsigned_v2f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundpd $11, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
%i = fptoui <2 x double> %x to <2 x i64>
%r = uitofp <2 x i64> %i to <2 x double>
ret <2 x double> %r
@@ -244,15 +234,10 @@ define <4 x double> @trunc_unsigned_v4f64(<4 x double> %x) #0 {
; SSE41-NEXT: roundpd $11, %xmm1, %xmm1
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_unsigned_v4f64:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundpd $11, %ymm0, %ymm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_unsigned_v4f64:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: vroundpd $11, %ymm0, %ymm0
-; X32_AVX1-NEXT: retl
+; AVX-LABEL: trunc_unsigned_v4f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundpd $11, %ymm0, %ymm0
+; AVX-NEXT: ret{{[l|q]}}
%i = fptoui <4 x double> %x to <4 x i64>
%r = uitofp <4 x i64> %i to <4 x double>
ret <4 x double> %r
@@ -265,24 +250,24 @@ define float @trunc_signed_f32_no_fast_math(float %x) {
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed_f32_no_fast_math:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
-; X64_AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed_f32_no_fast_math:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %eax
-; X32_AVX1-NEXT: .cfi_def_cfa_offset 8
-; X32_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32_AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
-; X32_AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovss %xmm0, (%esp)
-; X32_AVX1-NEXT: flds (%esp)
-; X32_AVX1-NEXT: popl %eax
-; X32_AVX1-NEXT: .cfi_def_cfa_offset 4
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_signed_f32_no_fast_math:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; X64-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_signed_f32_no_fast_math:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %eax
+; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; X86-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT: flds (%esp)
+; X86-AVX1-NEXT: popl %eax
+; X86-AVX1-NEXT: .cfi_def_cfa_offset 4
+; X86-AVX1-NEXT: retl
%i = fptosi float %x to i32
%r = sitofp i32 %i to float
ret float %r
@@ -302,20 +287,20 @@ define float @trunc_signed_f32_nsz(float %x) #0 {
; SSE41-NEXT: roundss $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed_f32_nsz:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed_f32_nsz:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %eax
-; X32_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32_AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovss %xmm0, (%esp)
-; X32_AVX1-NEXT: flds (%esp)
-; X32_AVX1-NEXT: popl %eax
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_signed_f32_nsz:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_signed_f32_nsz:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %eax
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT: flds (%esp)
+; X86-AVX1-NEXT: popl %eax
+; X86-AVX1-NEXT: retl
%i = fptosi float %x to i32
%r = sitofp i32 %i to float
ret float %r
@@ -328,30 +313,30 @@ define double @trunc_signed32_f64_no_fast_math(double %x) {
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed32_f64_no_fast_math:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
-; X64_AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed32_f64_no_fast_math:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %ebp
-; X32_AVX1-NEXT: .cfi_def_cfa_offset 8
-; X32_AVX1-NEXT: .cfi_offset %ebp, -8
-; X32_AVX1-NEXT: movl %esp, %ebp
-; X32_AVX1-NEXT: .cfi_def_cfa_register %ebp
-; X32_AVX1-NEXT: andl $-8, %esp
-; X32_AVX1-NEXT: subl $8, %esp
-; X32_AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32_AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
-; X32_AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovlps %xmm0, (%esp)
-; X32_AVX1-NEXT: fldl (%esp)
-; X32_AVX1-NEXT: movl %ebp, %esp
-; X32_AVX1-NEXT: popl %ebp
-; X32_AVX1-NEXT: .cfi_def_cfa %esp, 4
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_signed32_f64_no_fast_math:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
+; X64-AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_signed32_f64_no_fast_math:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %ebp
+; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
+; X86-AVX1-NEXT: .cfi_offset %ebp, -8
+; X86-AVX1-NEXT: movl %esp, %ebp
+; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp
+; X86-AVX1-NEXT: andl $-8, %esp
+; X86-AVX1-NEXT: subl $8, %esp
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
+; X86-AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovlps %xmm0, (%esp)
+; X86-AVX1-NEXT: fldl (%esp)
+; X86-AVX1-NEXT: movl %ebp, %esp
+; X86-AVX1-NEXT: popl %ebp
+; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4
+; X86-AVX1-NEXT: retl
%i = fptosi double %x to i32
%r = sitofp i32 %i to double
ret double %r
@@ -369,24 +354,24 @@ define double @trunc_signed32_f64_nsz(double %x) #0 {
; SSE41-NEXT: roundsd $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed32_f64_nsz:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed32_f64_nsz:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %ebp
-; X32_AVX1-NEXT: movl %esp, %ebp
-; X32_AVX1-NEXT: andl $-8, %esp
-; X32_AVX1-NEXT: subl $8, %esp
-; X32_AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32_AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovsd %xmm0, (%esp)
-; X32_AVX1-NEXT: fldl (%esp)
-; X32_AVX1-NEXT: movl %ebp, %esp
-; X32_AVX1-NEXT: popl %ebp
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_signed32_f64_nsz:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_signed32_f64_nsz:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %ebp
+; X86-AVX1-NEXT: movl %esp, %ebp
+; X86-AVX1-NEXT: andl $-8, %esp
+; X86-AVX1-NEXT: subl $8, %esp
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX1-NEXT: fldl (%esp)
+; X86-AVX1-NEXT: movl %ebp, %esp
+; X86-AVX1-NEXT: popl %ebp
+; X86-AVX1-NEXT: retl
%i = fptosi double %x to i32
%r = sitofp i32 %i to double
ret double %r
@@ -399,30 +384,30 @@ define double @trunc_f32_signed32_f64_no_fast_math(float %x) {
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_f32_signed32_f64_no_fast_math:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
-; X64_AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_f32_signed32_f64_no_fast_math:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %ebp
-; X32_AVX1-NEXT: .cfi_def_cfa_offset 8
-; X32_AVX1-NEXT: .cfi_offset %ebp, -8
-; X32_AVX1-NEXT: movl %esp, %ebp
-; X32_AVX1-NEXT: .cfi_def_cfa_register %ebp
-; X32_AVX1-NEXT: andl $-8, %esp
-; X32_AVX1-NEXT: subl $8, %esp
-; X32_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32_AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
-; X32_AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovlps %xmm0, (%esp)
-; X32_AVX1-NEXT: fldl (%esp)
-; X32_AVX1-NEXT: movl %ebp, %esp
-; X32_AVX1-NEXT: popl %ebp
-; X32_AVX1-NEXT: .cfi_def_cfa %esp, 4
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_f32_signed32_f64_no_fast_math:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; X64-AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_f32_signed32_f64_no_fast_math:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %ebp
+; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
+; X86-AVX1-NEXT: .cfi_offset %ebp, -8
+; X86-AVX1-NEXT: movl %esp, %ebp
+; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp
+; X86-AVX1-NEXT: andl $-8, %esp
+; X86-AVX1-NEXT: subl $8, %esp
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; X86-AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovlps %xmm0, (%esp)
+; X86-AVX1-NEXT: fldl (%esp)
+; X86-AVX1-NEXT: movl %ebp, %esp
+; X86-AVX1-NEXT: popl %ebp
+; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4
+; X86-AVX1-NEXT: retl
%i = fptosi float %x to i32
%r = sitofp i32 %i to double
ret double %r
@@ -435,26 +420,26 @@ define double @trunc_f32_signed32_f64_nsz(float %x) #0 {
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_f32_signed32_f64_nsz:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
-; X64_AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_f32_signed32_f64_nsz:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %ebp
-; X32_AVX1-NEXT: movl %esp, %ebp
-; X32_AVX1-NEXT: andl $-8, %esp
-; X32_AVX1-NEXT: subl $8, %esp
-; X32_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32_AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
-; X32_AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovlps %xmm0, (%esp)
-; X32_AVX1-NEXT: fldl (%esp)
-; X32_AVX1-NEXT: movl %ebp, %esp
-; X32_AVX1-NEXT: popl %ebp
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_f32_signed32_f64_nsz:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; X64-AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_f32_signed32_f64_nsz:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %ebp
+; X86-AVX1-NEXT: movl %esp, %ebp
+; X86-AVX1-NEXT: andl $-8, %esp
+; X86-AVX1-NEXT: subl $8, %esp
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; X86-AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovlps %xmm0, (%esp)
+; X86-AVX1-NEXT: fldl (%esp)
+; X86-AVX1-NEXT: movl %ebp, %esp
+; X86-AVX1-NEXT: popl %ebp
+; X86-AVX1-NEXT: retl
%i = fptosi float %x to i32
%r = sitofp i32 %i to double
ret double %r
@@ -467,24 +452,24 @@ define float @trunc_f64_signed32_f32_no_fast_math(double %x) {
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_f64_signed32_f32_no_fast_math:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
-; X64_AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_f64_signed32_f32_no_fast_math:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %eax
-; X32_AVX1-NEXT: .cfi_def_cfa_offset 8
-; X32_AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32_AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
-; X32_AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovss %xmm0, (%esp)
-; X32_AVX1-NEXT: flds (%esp)
-; X32_AVX1-NEXT: popl %eax
-; X32_AVX1-NEXT: .cfi_def_cfa_offset 4
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_f64_signed32_f32_no_fast_math:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
+; X64-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_f64_signed32_f32_no_fast_math:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %eax
+; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
+; X86-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT: flds (%esp)
+; X86-AVX1-NEXT: popl %eax
+; X86-AVX1-NEXT: .cfi_def_cfa_offset 4
+; X86-AVX1-NEXT: retl
%i = fptosi double %x to i32
%r = sitofp i32 %i to float
ret float %r
@@ -497,22 +482,22 @@ define float @trunc_f64_signed32_f32_nsz(double %x) #0 {
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_f64_signed32_f32_nsz:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
-; X64_AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_f64_signed32_f32_nsz:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %eax
-; X32_AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32_AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
-; X32_AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovss %xmm0, (%esp)
-; X32_AVX1-NEXT: flds (%esp)
-; X32_AVX1-NEXT: popl %eax
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_f64_signed32_f32_nsz:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
+; X64-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_f64_signed32_f32_nsz:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %eax
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
+; X86-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT: flds (%esp)
+; X86-AVX1-NEXT: popl %eax
+; X86-AVX1-NEXT: retl
%i = fptosi double %x to i32
%r = sitofp i32 %i to float
ret float %r
@@ -526,34 +511,34 @@ define double @trunc_signed_f64_no_fast_math(double %x) {
; SSE-NEXT: cvtsi2sd %rax, %xmm0
; SSE-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed_f64_no_fast_math:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vcvttsd2si %xmm0, %rax
-; X64_AVX1-NEXT: vcvtsi2sd %rax, %xmm1, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed_f64_no_fast_math:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %ebp
-; X32_AVX1-NEXT: .cfi_def_cfa_offset 8
-; X32_AVX1-NEXT: .cfi_offset %ebp, -8
-; X32_AVX1-NEXT: movl %esp, %ebp
-; X32_AVX1-NEXT: .cfi_def_cfa_register %ebp
-; X32_AVX1-NEXT: andl $-8, %esp
-; X32_AVX1-NEXT: subl $24, %esp
-; X32_AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32_AVX1-NEXT: vmovsd %xmm0, (%esp)
-; X32_AVX1-NEXT: fldl (%esp)
-; X32_AVX1-NEXT: fisttpll (%esp)
-; X32_AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32_AVX1-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
-; X32_AVX1-NEXT: fildll {{[0-9]+}}(%esp)
-; X32_AVX1-NEXT: fstpl {{[0-9]+}}(%esp)
-; X32_AVX1-NEXT: fldl {{[0-9]+}}(%esp)
-; X32_AVX1-NEXT: movl %ebp, %esp
-; X32_AVX1-NEXT: popl %ebp
-; X32_AVX1-NEXT: .cfi_def_cfa %esp, 4
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_signed_f64_no_fast_math:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vcvttsd2si %xmm0, %rax
+; X64-AVX1-NEXT: vcvtsi2sd %rax, %xmm1, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_signed_f64_no_fast_math:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %ebp
+; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
+; X86-AVX1-NEXT: .cfi_offset %ebp, -8
+; X86-AVX1-NEXT: movl %esp, %ebp
+; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp
+; X86-AVX1-NEXT: andl $-8, %esp
+; X86-AVX1-NEXT: subl $24, %esp
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX1-NEXT: fldl (%esp)
+; X86-AVX1-NEXT: fisttpll (%esp)
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT: fildll {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT: movl %ebp, %esp
+; X86-AVX1-NEXT: popl %ebp
+; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4
+; X86-AVX1-NEXT: retl
%i = fptosi double %x to i64
%r = sitofp i64 %i to double
ret double %r
@@ -572,24 +557,24 @@ define double @trunc_signed_f64_nsz(double %x) #0 {
; SSE41-NEXT: roundsd $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed_f64_nsz:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed_f64_nsz:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %ebp
-; X32_AVX1-NEXT: movl %esp, %ebp
-; X32_AVX1-NEXT: andl $-8, %esp
-; X32_AVX1-NEXT: subl $8, %esp
-; X32_AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32_AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovsd %xmm0, (%esp)
-; X32_AVX1-NEXT: fldl (%esp)
-; X32_AVX1-NEXT: movl %ebp, %esp
-; X32_AVX1-NEXT: popl %ebp
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_signed_f64_nsz:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_signed_f64_nsz:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %ebp
+; X86-AVX1-NEXT: movl %esp, %ebp
+; X86-AVX1-NEXT: andl $-8, %esp
+; X86-AVX1-NEXT: subl $8, %esp
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX1-NEXT: fldl (%esp)
+; X86-AVX1-NEXT: movl %ebp, %esp
+; X86-AVX1-NEXT: popl %ebp
+; X86-AVX1-NEXT: retl
%i = fptosi double %x to i64
%r = sitofp i64 %i to double
ret double %r
@@ -607,15 +592,10 @@ define <4 x float> @trunc_signed_v4f32_nsz(<4 x float> %x) #0 {
; SSE41-NEXT: roundps $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed_v4f32_nsz:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundps $11, %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed_v4f32_nsz:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: vroundps $11, %xmm0, %xmm0
-; X32_AVX1-NEXT: retl
+; AVX-LABEL: trunc_signed_v4f32_nsz:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundps $11, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
%i = fptosi <4 x float> %x to <4 x i32>
%r = sitofp <4 x i32> %i to <4 x float>
ret <4 x float> %r
@@ -638,15 +618,10 @@ define <2 x double> @trunc_signed_v2f64_nsz(<2 x double> %x) #0 {
; SSE41-NEXT: roundpd $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed_v2f64_nsz:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundpd $11, %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed_v2f64_nsz:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: vroundpd $11, %xmm0, %xmm0
-; X32_AVX1-NEXT: retl
+; AVX-LABEL: trunc_signed_v2f64_nsz:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundpd $11, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
%i = fptosi <2 x double> %x to <2 x i64>
%r = sitofp <2 x i64> %i to <2 x double>
ret <2 x double> %r
@@ -678,15 +653,10 @@ define <4 x double> @trunc_signed_v4f64_nsz(<4 x double> %x) #0 {
; SSE41-NEXT: roundpd $11, %xmm1, %xmm1
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed_v4f64_nsz:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundpd $11, %ymm0, %ymm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed_v4f64_nsz:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: vroundpd $11, %ymm0, %ymm0
-; X32_AVX1-NEXT: retl
+; AVX-LABEL: trunc_signed_v4f64_nsz:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundpd $11, %ymm0, %ymm0
+; AVX-NEXT: ret{{[l|q]}}
%i = fptosi <4 x double> %x to <4 x i64>
%r = sitofp <4 x i64> %i to <4 x double>
ret <4 x double> %r
@@ -715,45 +685,45 @@ define float @trunc_unsigned_f32_disable_via_intrinsic(float %x) #0 {
; SSE-NEXT: cvtsi2ss %rax, %xmm0
; SSE-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_unsigned_f32_disable_via_intrinsic:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vcvttss2si %xmm0, %rax
-; X64_AVX1-NEXT: xorl %ecx, %ecx
-; X64_AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X64_AVX1-NEXT: vucomiss %xmm1, %xmm0
-; X64_AVX1-NEXT: cmovael %eax, %ecx
-; X64_AVX1-NEXT: vucomiss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64_AVX1-NEXT: movl $-1, %eax
-; X64_AVX1-NEXT: cmovbel %ecx, %eax
-; X64_AVX1-NEXT: vcvtsi2ss %rax, %xmm2, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_unsigned_f32_disable_via_intrinsic:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %eax
-; X32_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32_AVX1-NEXT: vcvttss2si %xmm0, %eax
-; X32_AVX1-NEXT: movl %eax, %ecx
-; X32_AVX1-NEXT: sarl $31, %ecx
-; X32_AVX1-NEXT: vsubss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1
-; X32_AVX1-NEXT: vcvttss2si %xmm1, %edx
-; X32_AVX1-NEXT: andl %ecx, %edx
-; X32_AVX1-NEXT: orl %eax, %edx
-; X32_AVX1-NEXT: xorl %eax, %eax
-; X32_AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X32_AVX1-NEXT: vucomiss %xmm1, %xmm0
-; X32_AVX1-NEXT: cmovael %edx, %eax
-; X32_AVX1-NEXT: vucomiss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32_AVX1-NEXT: movl $-1, %ecx
-; X32_AVX1-NEXT: cmovbel %eax, %ecx
-; X32_AVX1-NEXT: vmovd %ecx, %xmm0
-; X32_AVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X32_AVX1-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X32_AVX1-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovss %xmm0, (%esp)
-; X32_AVX1-NEXT: flds (%esp)
-; X32_AVX1-NEXT: popl %eax
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_unsigned_f32_disable_via_intrinsic:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vcvttss2si %xmm0, %rax
+; X64-AVX1-NEXT: xorl %ecx, %ecx
+; X64-AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; X64-AVX1-NEXT: vucomiss %xmm1, %xmm0
+; X64-AVX1-NEXT: cmovael %eax, %ecx
+; X64-AVX1-NEXT: vucomiss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-AVX1-NEXT: movl $-1, %eax
+; X64-AVX1-NEXT: cmovbel %ecx, %eax
+; X64-AVX1-NEXT: vcvtsi2ss %rax, %xmm2, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_unsigned_f32_disable_via_intrinsic:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %eax
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vcvttss2si %xmm0, %eax
+; X86-AVX1-NEXT: movl %eax, %ecx
+; X86-AVX1-NEXT: sarl $31, %ecx
+; X86-AVX1-NEXT: vsubss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1
+; X86-AVX1-NEXT: vcvttss2si %xmm1, %edx
+; X86-AVX1-NEXT: andl %ecx, %edx
+; X86-AVX1-NEXT: orl %eax, %edx
+; X86-AVX1-NEXT: xorl %eax, %eax
+; X86-AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX1-NEXT: vucomiss %xmm1, %xmm0
+; X86-AVX1-NEXT: cmovael %edx, %eax
+; X86-AVX1-NEXT: vucomiss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-AVX1-NEXT: movl $-1, %ecx
+; X86-AVX1-NEXT: cmovbel %eax, %ecx
+; X86-AVX1-NEXT: vmovd %ecx, %xmm0
+; X86-AVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT: flds (%esp)
+; X86-AVX1-NEXT: popl %eax
+; X86-AVX1-NEXT: retl
%i = call i32 @llvm.fptoui.sat.i32.f32(float %x)
%r = uitofp i32 %i to float
ret float %r
@@ -773,56 +743,56 @@ define double @trunc_signed_f64_disable_via_intrinsic(double %x) #0 {
; SSE-NEXT: cvtsi2sd %rax, %xmm0
; SSE-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed_f64_disable_via_intrinsic:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vcvttsd2si %xmm0, %rax
-; X64_AVX1-NEXT: vucomisd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64_AVX1-NEXT: movabsq $9223372036854775807, %rcx # imm = 0x7FFFFFFFFFFFFFFF
-; X64_AVX1-NEXT: cmovbeq %rax, %rcx
-; X64_AVX1-NEXT: xorl %eax, %eax
-; X64_AVX1-NEXT: vucomisd %xmm0, %xmm0
-; X64_AVX1-NEXT: cmovnpq %rcx, %rax
-; X64_AVX1-NEXT: vcvtsi2sd %rax, %xmm1, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed_f64_disable_via_intrinsic:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %ebp
-; X32_AVX1-NEXT: movl %esp, %ebp
-; X32_AVX1-NEXT: pushl %esi
-; X32_AVX1-NEXT: andl $-8, %esp
-; X32_AVX1-NEXT: subl $32, %esp
-; X32_AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32_AVX1-NEXT: vmovsd %xmm0, (%esp)
-; X32_AVX1-NEXT: fldl (%esp)
-; X32_AVX1-NEXT: fisttpll (%esp)
-; X32_AVX1-NEXT: xorl %eax, %eax
-; X32_AVX1-NEXT: vucomisd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32_AVX1-NEXT: movl $-2147483648, %ecx # imm = 0x80000000
-; X32_AVX1-NEXT: movl $0, %edx
-; X32_AVX1-NEXT: jb .LBB19_2
-; X32_AVX1-NEXT: # %bb.1:
-; X32_AVX1-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32_AVX1-NEXT: movl (%esp), %edx
-; X32_AVX1-NEXT: .LBB19_2:
-; X32_AVX1-NEXT: vucomisd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32_AVX1-NEXT: movl $-1, %esi
-; X32_AVX1-NEXT: cmovbel %edx, %esi
-; X32_AVX1-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF
-; X32_AVX1-NEXT: cmovbel %ecx, %edx
-; X32_AVX1-NEXT: vucomisd %xmm0, %xmm0
-; X32_AVX1-NEXT: cmovpl %eax, %edx
-; X32_AVX1-NEXT: cmovpl %eax, %esi
-; X32_AVX1-NEXT: vmovd %esi, %xmm0
-; X32_AVX1-NEXT: vpinsrd $1, %edx, %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp)
-; X32_AVX1-NEXT: fildll {{[0-9]+}}(%esp)
-; X32_AVX1-NEXT: fstpl {{[0-9]+}}(%esp)
-; X32_AVX1-NEXT: fldl {{[0-9]+}}(%esp)
-; X32_AVX1-NEXT: leal -4(%ebp), %esp
-; X32_AVX1-NEXT: popl %esi
-; X32_AVX1-NEXT: popl %ebp
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_signed_f64_disable_via_intrinsic:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vcvttsd2si %xmm0, %rax
+; X64-AVX1-NEXT: vucomisd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-AVX1-NEXT: movabsq $9223372036854775807, %rcx # imm = 0x7FFFFFFFFFFFFFFF
+; X64-AVX1-NEXT: cmovbeq %rax, %rcx
+; X64-AVX1-NEXT: xorl %eax, %eax
+; X64-AVX1-NEXT: vucomisd %xmm0, %xmm0
+; X64-AVX1-NEXT: cmovnpq %rcx, %rax
+; X64-AVX1-NEXT: vcvtsi2sd %rax, %xmm1, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_signed_f64_disable_via_intrinsic:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %ebp
+; X86-AVX1-NEXT: movl %esp, %ebp
+; X86-AVX1-NEXT: pushl %esi
+; X86-AVX1-NEXT: andl $-8, %esp
+; X86-AVX1-NEXT: subl $32, %esp
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX1-NEXT: fldl (%esp)
+; X86-AVX1-NEXT: fisttpll (%esp)
+; X86-AVX1-NEXT: xorl %eax, %eax
+; X86-AVX1-NEXT: vucomisd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-AVX1-NEXT: movl $-2147483648, %ecx # imm = 0x80000000
+; X86-AVX1-NEXT: movl $0, %edx
+; X86-AVX1-NEXT: jb .LBB19_2
+; X86-AVX1-NEXT: # %bb.1:
+; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX1-NEXT: movl (%esp), %edx
+; X86-AVX1-NEXT: .LBB19_2:
+; X86-AVX1-NEXT: vucomisd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-AVX1-NEXT: movl $-1, %esi
+; X86-AVX1-NEXT: cmovbel %edx, %esi
+; X86-AVX1-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF
+; X86-AVX1-NEXT: cmovbel %ecx, %edx
+; X86-AVX1-NEXT: vucomisd %xmm0, %xmm0
+; X86-AVX1-NEXT: cmovpl %eax, %edx
+; X86-AVX1-NEXT: cmovpl %eax, %esi
+; X86-AVX1-NEXT: vmovd %esi, %xmm0
+; X86-AVX1-NEXT: vpinsrd $1, %edx, %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT: fildll {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT: leal -4(%ebp), %esp
+; X86-AVX1-NEXT: popl %esi
+; X86-AVX1-NEXT: popl %ebp
+; X86-AVX1-NEXT: retl
%i = call i64 @llvm.fptosi.sat.i64.f64(double %x)
%r = sitofp i64 %i to double
ret double %r