; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 ; RUN: llc -mtriple=x86_64-unknown-unknown -verify-machineinstrs < %s | FileCheck -check-prefixes=X64 %s ; RUN: llc -mtriple=x86_64-pc-win32 -verify-machineinstrs < %s | FileCheck -check-prefixes=WIN64 %s ; RUN: llc -mtriple=i386-pc-win32 -verify-machineinstrs < %s | FileCheck -check-prefix=WIN32 %s define float @ldexp_f32(i8 zeroext %x) nounwind { ; X64-LABEL: ldexp_f32: ; X64: # %bb.0: ; X64-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0] ; X64-NEXT: jmp ldexpf@PLT # TAILCALL ; ; WIN64-LABEL: ldexp_f32: ; WIN64: # %bb.0: ; WIN64-NEXT: subq $40, %rsp ; WIN64-NEXT: movzbl %cl, %edx ; WIN64-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0] ; WIN64-NEXT: callq ldexp ; WIN64-NEXT: cvtsd2ss %xmm0, %xmm0 ; WIN64-NEXT: addq $40, %rsp ; WIN64-NEXT: retq ; ; WIN32-LABEL: ldexp_f32: ; WIN32: # %bb.0: ; WIN32-NEXT: subl $16, %esp ; WIN32-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp) ; WIN32-NEXT: fld1 ; WIN32-NEXT: fstpl (%esp) ; WIN32-NEXT: calll _ldexp ; WIN32-NEXT: fstps {{[0-9]+}}(%esp) ; WIN32-NEXT: flds {{[0-9]+}}(%esp) ; WIN32-NEXT: addl $16, %esp ; WIN32-NEXT: retl %zext = zext i8 %x to i32 %ldexp = call float @llvm.ldexp.f32.i32(float 1.000000e+00, i32 %zext) ret float %ldexp } define double @ldexp_f64(i8 zeroext %x) nounwind { ; X64-LABEL: ldexp_f64: ; X64: # %bb.0: ; X64-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0] ; X64-NEXT: jmp ldexp@PLT # TAILCALL ; ; WIN64-LABEL: ldexp_f64: ; WIN64: # %bb.0: ; WIN64-NEXT: movzbl %cl, %edx ; WIN64-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0] ; WIN64-NEXT: jmp ldexp # TAILCALL ; ; WIN32-LABEL: ldexp_f64: ; WIN32: # %bb.0: ; WIN32-NEXT: subl $12, %esp ; WIN32-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp) ; WIN32-NEXT: fld1 ; WIN32-NEXT: fstpl (%esp) ; WIN32-NEXT: calll _ldexp ; WIN32-NEXT: addl $12, %esp ; WIN32-NEXT: retl %zext = zext i8 %x to i32 %ldexp = call double @llvm.ldexp.f64.i32(double 1.000000e+00, i32 %zext) ret double %ldexp } define <2 x float> @ldexp_v2f32(<2 x float> %val, <2 x i32> %exp) nounwind { ; X64-LABEL: ldexp_v2f32: ; X64: # %bb.0: ; X64-NEXT: subq $56, %rsp ; X64-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; X64-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; X64-NEXT: movd %xmm1, %edi ; X64-NEXT: callq ldexpf@PLT ; X64-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; X64-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] ; X64-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; X64-NEXT: # xmm1 = mem[1,1,1,1] ; X64-NEXT: movd %xmm1, %edi ; X64-NEXT: callq ldexpf@PLT ; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; X64-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; X64-NEXT: movaps %xmm1, %xmm0 ; X64-NEXT: addq $56, %rsp ; X64-NEXT: retq ; ; WIN64-LABEL: ldexp_v2f32: ; WIN64: # %bb.0: ; WIN64-NEXT: pushq %rsi ; WIN64-NEXT: subq $80, %rsp ; WIN64-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; WIN64-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; WIN64-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; WIN64-NEXT: movq %rdx, %rsi ; WIN64-NEXT: movaps (%rcx), %xmm7 ; WIN64-NEXT: movl 12(%rdx), %edx ; WIN64-NEXT: movaps %xmm7, %xmm0 ; WIN64-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm7[3,3] ; WIN64-NEXT: cvtss2sd %xmm0, %xmm0 ; WIN64-NEXT: callq ldexp ; WIN64-NEXT: xorps %xmm6, %xmm6 ; WIN64-NEXT: cvtsd2ss %xmm0, %xmm6 ; WIN64-NEXT: movl 8(%rsi), %edx ; WIN64-NEXT: movaps %xmm7, %xmm0 ; WIN64-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm7[1] ; WIN64-NEXT: cvtss2sd %xmm0, %xmm0 ; WIN64-NEXT: callq ldexp ; WIN64-NEXT: xorps %xmm8, %xmm8 ; WIN64-NEXT: cvtsd2ss %xmm0, %xmm8 ; WIN64-NEXT: unpcklps {{.*#+}} xmm8 = xmm8[0],xmm6[0],xmm8[1],xmm6[1] ; WIN64-NEXT: movl (%rsi), %edx ; WIN64-NEXT: movl 4(%rsi), %esi ; WIN64-NEXT: xorps %xmm0, %xmm0 ; WIN64-NEXT: cvtss2sd %xmm7, %xmm0 ; WIN64-NEXT: callq ldexp ; WIN64-NEXT: xorps %xmm6, %xmm6 ; WIN64-NEXT: cvtsd2ss %xmm0, %xmm6 ; WIN64-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,1,1,1] ; WIN64-NEXT: xorps %xmm0, %xmm0 ; WIN64-NEXT: cvtss2sd %xmm7, %xmm0 ; WIN64-NEXT: movl %esi, %edx ; WIN64-NEXT: callq ldexp ; WIN64-NEXT: cvtsd2ss %xmm0, %xmm0 ; WIN64-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1] ; WIN64-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm8[0] ; WIN64-NEXT: movaps %xmm6, %xmm0 ; WIN64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload ; WIN64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload ; WIN64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload ; WIN64-NEXT: addq $80, %rsp ; WIN64-NEXT: popq %rsi ; WIN64-NEXT: retq ; ; WIN32-LABEL: ldexp_v2f32: ; WIN32: # %bb.0: ; WIN32-NEXT: pushl %esi ; WIN32-NEXT: subl $20, %esp ; WIN32-NEXT: movl {{[0-9]+}}(%esp), %esi ; WIN32-NEXT: movl {{[0-9]+}}(%esp), %eax ; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp) ; WIN32-NEXT: flds {{[0-9]+}}(%esp) ; WIN32-NEXT: fstpl (%esp) ; WIN32-NEXT: calll _ldexp ; WIN32-NEXT: movl %esi, {{[0-9]+}}(%esp) ; WIN32-NEXT: flds {{[0-9]+}}(%esp) ; WIN32-NEXT: fstpl (%esp) ; WIN32-NEXT: fstps {{[0-9]+}}(%esp) ; WIN32-NEXT: calll _ldexp ; WIN32-NEXT: fstps {{[0-9]+}}(%esp) ; WIN32-NEXT: flds {{[0-9]+}}(%esp) ; WIN32-NEXT: flds {{[0-9]+}}(%esp) ; WIN32-NEXT: addl $20, %esp ; WIN32-NEXT: popl %esi ; WIN32-NEXT: retl %1 = call <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float> %val, <2 x i32> %exp) ret <2 x float> %1 } define <4 x float> @ldexp_v4f32(<4 x float> %val, <4 x i32> %exp) nounwind { ; X64-LABEL: ldexp_v4f32: ; X64: # %bb.0: ; X64-NEXT: subq $72, %rsp ; X64-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; X64-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3] ; X64-NEXT: movd %xmm2, %edi ; X64-NEXT: callq ldexpf@PLT ; X64-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; X64-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; X64-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; X64-NEXT: # xmm1 = mem[2,3,2,3] ; X64-NEXT: movd %xmm1, %edi ; X64-NEXT: callq ldexpf@PLT ; X64-NEXT: unpcklps (%rsp), %xmm0 # 16-byte Folded Reload ; X64-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; X64-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; X64-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; X64-NEXT: movd %xmm0, %edi ; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; X64-NEXT: callq ldexpf@PLT ; X64-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] ; X64-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; X64-NEXT: # xmm1 = mem[1,1,1,1] ; X64-NEXT: movd %xmm1, %edi ; X64-NEXT: callq ldexpf@PLT ; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; X64-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; X64-NEXT: unpcklpd (%rsp), %xmm1 # 16-byte Folded Reload ; X64-NEXT: # xmm1 = xmm1[0],mem[0] ; X64-NEXT: movaps %xmm1, %xmm0 ; X64-NEXT: addq $72, %rsp ; X64-NEXT: retq ; ; WIN64-LABEL: ldexp_v4f32: ; WIN64: # %bb.0: ; WIN64-NEXT: pushq %rsi ; WIN64-NEXT: subq $80, %rsp ; WIN64-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; WIN64-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; WIN64-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; WIN64-NEXT: movq %rdx, %rsi ; WIN64-NEXT: movaps (%rcx), %xmm7 ; WIN64-NEXT: movl 12(%rdx), %edx ; WIN64-NEXT: movaps %xmm7, %xmm0 ; WIN64-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm7[3,3] ; WIN64-NEXT: cvtss2sd %xmm0, %xmm0 ; WIN64-NEXT: callq ldexp ; WIN64-NEXT: xorps %xmm6, %xmm6 ; WIN64-NEXT: cvtsd2ss %xmm0, %xmm6 ; WIN64-NEXT: movl 8(%rsi), %edx ; WIN64-NEXT: movaps %xmm7, %xmm0 ; WIN64-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm7[1] ; WIN64-NEXT: cvtss2sd %xmm0, %xmm0 ; WIN64-NEXT: callq ldexp ; WIN64-NEXT: xorps %xmm8, %xmm8 ; WIN64-NEXT: cvtsd2ss %xmm0, %xmm8 ; WIN64-NEXT: unpcklps {{.*#+}} xmm8 = xmm8[0],xmm6[0],xmm8[1],xmm6[1] ; WIN64-NEXT: movl (%rsi), %edx ; WIN64-NEXT: movl 4(%rsi), %esi ; WIN64-NEXT: xorps %xmm0, %xmm0 ; WIN64-NEXT: cvtss2sd %xmm7, %xmm0 ; WIN64-NEXT: callq ldexp ; WIN64-NEXT: xorps %xmm6, %xmm6 ; WIN64-NEXT: cvtsd2ss %xmm0, %xmm6 ; WIN64-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,1,1,1] ; WIN64-NEXT: xorps %xmm0, %xmm0 ; WIN64-NEXT: cvtss2sd %xmm7, %xmm0 ; WIN64-NEXT: movl %esi, %edx ; WIN64-NEXT: callq ldexp ; WIN64-NEXT: cvtsd2ss %xmm0, %xmm0 ; WIN64-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1] ; WIN64-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm8[0] ; WIN64-NEXT: movaps %xmm6, %xmm0 ; WIN64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload ; WIN64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload ; WIN64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload ; WIN64-NEXT: addq $80, %rsp ; WIN64-NEXT: popq %rsi ; WIN64-NEXT: retq ; ; WIN32-LABEL: ldexp_v4f32: ; WIN32: # %bb.0: ; WIN32-NEXT: pushl %ebp ; WIN32-NEXT: pushl %ebx ; WIN32-NEXT: pushl %edi ; WIN32-NEXT: pushl %esi ; WIN32-NEXT: subl $44, %esp ; WIN32-NEXT: movl {{[0-9]+}}(%esp), %esi ; WIN32-NEXT: movl {{[0-9]+}}(%esp), %edi ; WIN32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; WIN32-NEXT: movl {{[0-9]+}}(%esp), %ebp ; WIN32-NEXT: movl {{[0-9]+}}(%esp), %eax ; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp) ; WIN32-NEXT: flds {{[0-9]+}}(%esp) ; WIN32-NEXT: fstpl (%esp) ; WIN32-NEXT: calll _ldexp ; WIN32-NEXT: fstpl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Spill ; WIN32-NEXT: movl %ebp, {{[0-9]+}}(%esp) ; WIN32-NEXT: flds {{[0-9]+}}(%esp) ; WIN32-NEXT: fstpl (%esp) ; WIN32-NEXT: calll _ldexp ; WIN32-NEXT: fstpl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Spill ; WIN32-NEXT: movl %ebx, {{[0-9]+}}(%esp) ; WIN32-NEXT: flds {{[0-9]+}}(%esp) ; WIN32-NEXT: fstpl (%esp) ; WIN32-NEXT: calll _ldexp ; WIN32-NEXT: movl %edi, {{[0-9]+}}(%esp) ; WIN32-NEXT: flds {{[0-9]+}}(%esp) ; WIN32-NEXT: fstpl (%esp) ; WIN32-NEXT: fstps {{[0-9]+}}(%esp) ; WIN32-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload ; WIN32-NEXT: fstps {{[0-9]+}}(%esp) ; WIN32-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload ; WIN32-NEXT: fstps {{[0-9]+}}(%esp) ; WIN32-NEXT: calll _ldexp ; WIN32-NEXT: fstps {{[0-9]+}}(%esp) ; WIN32-NEXT: flds {{[0-9]+}}(%esp) ; WIN32-NEXT: flds {{[0-9]+}}(%esp) ; WIN32-NEXT: flds {{[0-9]+}}(%esp) ; WIN32-NEXT: flds {{[0-9]+}}(%esp) ; WIN32-NEXT: fstps 12(%esi) ; WIN32-NEXT: fstps 8(%esi) ; WIN32-NEXT: fstps 4(%esi) ; WIN32-NEXT: fstps (%esi) ; WIN32-NEXT: movl %esi, %eax ; WIN32-NEXT: addl $44, %esp ; WIN32-NEXT: popl %esi ; WIN32-NEXT: popl %edi ; WIN32-NEXT: popl %ebx ; WIN32-NEXT: popl %ebp ; WIN32-NEXT: retl %1 = call <4 x float> @llvm.ldexp.v4f32.v4i32(<4 x float> %val, <4 x i32> %exp) ret <4 x float> %1 } define <2 x double> @ldexp_v2f64(<2 x double> %val, <2 x i32> %exp) nounwind { ; X64-LABEL: ldexp_v2f64: ; X64: # %bb.0: ; X64-NEXT: subq $56, %rsp ; X64-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; X64-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; X64-NEXT: movd %xmm1, %edi ; X64-NEXT: callq ldexp@PLT ; X64-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; X64-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; X64-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; X64-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; X64-NEXT: # xmm1 = mem[1,1,1,1] ; X64-NEXT: movd %xmm1, %edi ; X64-NEXT: callq ldexp@PLT ; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; X64-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; X64-NEXT: movaps %xmm1, %xmm0 ; X64-NEXT: addq $56, %rsp ; X64-NEXT: retq ; ; WIN64-LABEL: ldexp_v2f64: ; WIN64: # %bb.0: ; WIN64-NEXT: pushq %rsi ; WIN64-NEXT: subq $64, %rsp ; WIN64-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; WIN64-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; WIN64-NEXT: movaps (%rcx), %xmm6 ; WIN64-NEXT: movl (%rdx), %eax ; WIN64-NEXT: movl 4(%rdx), %esi ; WIN64-NEXT: movaps %xmm6, %xmm0 ; WIN64-NEXT: movl %eax, %edx ; WIN64-NEXT: callq ldexp ; WIN64-NEXT: movaps %xmm0, %xmm7 ; WIN64-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1] ; WIN64-NEXT: movaps %xmm6, %xmm0 ; WIN64-NEXT: movl %esi, %edx ; WIN64-NEXT: callq ldexp ; WIN64-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm0[0] ; WIN64-NEXT: movaps %xmm7, %xmm0 ; WIN64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload ; WIN64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload ; WIN64-NEXT: addq $64, %rsp ; WIN64-NEXT: popq %rsi ; WIN64-NEXT: retq ; ; WIN32-LABEL: ldexp_v2f64: ; WIN32: # %bb.0: ; WIN32-NEXT: pushl %esi ; WIN32-NEXT: subl $28, %esp ; WIN32-NEXT: fldl {{[0-9]+}}(%esp) ; WIN32-NEXT: fstpl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Spill ; WIN32-NEXT: movl {{[0-9]+}}(%esp), %esi ; WIN32-NEXT: fldl {{[0-9]+}}(%esp) ; WIN32-NEXT: movl {{[0-9]+}}(%esp), %eax ; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp) ; WIN32-NEXT: fstpl (%esp) ; WIN32-NEXT: calll _ldexp ; WIN32-NEXT: fstpl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Spill ; WIN32-NEXT: movl %esi, {{[0-9]+}}(%esp) ; WIN32-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload ; WIN32-NEXT: fstpl (%esp) ; WIN32-NEXT: calll _ldexp ; WIN32-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload ; WIN32-NEXT: fxch %st(1) ; WIN32-NEXT: addl $28, %esp ; WIN32-NEXT: popl %esi ; WIN32-NEXT: retl %1 = call <2 x double> @llvm.ldexp.v2f64.v2i32(<2 x double> %val, <2 x i32> %exp) ret <2 x double> %1 } define <4 x double> @ldexp_v4f64(<4 x double> %val, <4 x i32> %exp) nounwind { ; X64-LABEL: ldexp_v4f64: ; X64: # %bb.0: ; X64-NEXT: pushq %rbp ; X64-NEXT: pushq %rbx ; X64-NEXT: subq $72, %rsp ; X64-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; X64-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; X64-NEXT: movdqa %xmm2, (%rsp) # 16-byte Spill ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3] ; X64-NEXT: movd %xmm1, %ebx ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm2[3,3,3,3] ; X64-NEXT: movd %xmm1, %ebp ; X64-NEXT: movd %xmm2, %edi ; X64-NEXT: callq ldexp@PLT ; X64-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; X64-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; X64-NEXT: pshufd $85, (%rsp), %xmm1 # 16-byte Folded Reload ; X64-NEXT: # xmm1 = mem[1,1,1,1] ; X64-NEXT: movd %xmm1, %edi ; X64-NEXT: callq ldexp@PLT ; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; X64-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; X64-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; X64-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; X64-NEXT: movl %ebp, %edi ; X64-NEXT: callq ldexp@PLT ; X64-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; X64-NEXT: movl %ebx, %edi ; X64-NEXT: callq ldexp@PLT ; X64-NEXT: movaps %xmm0, %xmm1 ; X64-NEXT: unpcklpd (%rsp), %xmm1 # 16-byte Folded Reload ; X64-NEXT: # xmm1 = xmm1[0],mem[0] ; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; X64-NEXT: addq $72, %rsp ; X64-NEXT: popq %rbx ; X64-NEXT: popq %rbp ; X64-NEXT: retq ; ; WIN64-LABEL: ldexp_v4f64: ; WIN64: # %bb.0: ; WIN64-NEXT: pushq %rsi ; WIN64-NEXT: pushq %rdi ; WIN64-NEXT: pushq %rbx ; WIN64-NEXT: subq $80, %rsp ; WIN64-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; WIN64-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; WIN64-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; WIN64-NEXT: movl 12(%r8), %esi ; WIN64-NEXT: movl 8(%r8), %edi ; WIN64-NEXT: movaps (%rdx), %xmm6 ; WIN64-NEXT: movaps (%rcx), %xmm7 ; WIN64-NEXT: movl (%r8), %edx ; WIN64-NEXT: movl 4(%r8), %ebx ; WIN64-NEXT: movaps %xmm7, %xmm0 ; WIN64-NEXT: callq ldexp ; WIN64-NEXT: movaps %xmm0, %xmm8 ; WIN64-NEXT: movhlps {{.*#+}} xmm7 = xmm7[1,1] ; WIN64-NEXT: movaps %xmm7, %xmm0 ; WIN64-NEXT: movl %ebx, %edx ; WIN64-NEXT: callq ldexp ; WIN64-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm0[0] ; WIN64-NEXT: movaps %xmm6, %xmm0 ; WIN64-NEXT: movl %edi, %edx ; WIN64-NEXT: callq ldexp ; WIN64-NEXT: movaps %xmm0, %xmm7 ; WIN64-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1] ; WIN64-NEXT: movaps %xmm6, %xmm0 ; WIN64-NEXT: movl %esi, %edx ; WIN64-NEXT: callq ldexp ; WIN64-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm0[0] ; WIN64-NEXT: movaps %xmm8, %xmm0 ; WIN64-NEXT: movaps %xmm7, %xmm1 ; WIN64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload ; WIN64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload ; WIN64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload ; WIN64-NEXT: addq $80, %rsp ; WIN64-NEXT: popq %rbx ; WIN64-NEXT: popq %rdi ; WIN64-NEXT: popq %rsi ; WIN64-NEXT: retq ; ; WIN32-LABEL: ldexp_v4f64: ; WIN32: # %bb.0: ; WIN32-NEXT: pushl %ebp ; WIN32-NEXT: pushl %ebx ; WIN32-NEXT: pushl %edi ; WIN32-NEXT: pushl %esi ; WIN32-NEXT: subl $44, %esp ; WIN32-NEXT: movl {{[0-9]+}}(%esp), %esi ; WIN32-NEXT: fldl {{[0-9]+}}(%esp) ; WIN32-NEXT: fstpl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Spill ; WIN32-NEXT: movl {{[0-9]+}}(%esp), %edi ; WIN32-NEXT: fldl {{[0-9]+}}(%esp) ; WIN32-NEXT: fstpl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Spill ; WIN32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; WIN32-NEXT: fldl {{[0-9]+}}(%esp) ; WIN32-NEXT: fstpl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Spill ; WIN32-NEXT: movl {{[0-9]+}}(%esp), %ebp ; WIN32-NEXT: fldl {{[0-9]+}}(%esp) ; WIN32-NEXT: movl {{[0-9]+}}(%esp), %eax ; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp) ; WIN32-NEXT: fstpl (%esp) ; WIN32-NEXT: calll _ldexp ; WIN32-NEXT: fstpl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Spill ; WIN32-NEXT: movl %ebp, {{[0-9]+}}(%esp) ; WIN32-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload ; WIN32-NEXT: fstpl (%esp) ; WIN32-NEXT: calll _ldexp ; WIN32-NEXT: fstpl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Spill ; WIN32-NEXT: movl %ebx, {{[0-9]+}}(%esp) ; WIN32-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload ; WIN32-NEXT: fstpl (%esp) ; WIN32-NEXT: calll _ldexp ; WIN32-NEXT: fstpl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Spill ; WIN32-NEXT: movl %edi, {{[0-9]+}}(%esp) ; WIN32-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload ; WIN32-NEXT: fstpl (%esp) ; WIN32-NEXT: calll _ldexp ; WIN32-NEXT: fstpl 24(%esi) ; WIN32-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload ; WIN32-NEXT: fstpl 16(%esi) ; WIN32-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload ; WIN32-NEXT: fstpl 8(%esi) ; WIN32-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload ; WIN32-NEXT: fstpl (%esi) ; WIN32-NEXT: movl %esi, %eax ; WIN32-NEXT: addl $44, %esp ; WIN32-NEXT: popl %esi ; WIN32-NEXT: popl %edi ; WIN32-NEXT: popl %ebx ; WIN32-NEXT: popl %ebp ; WIN32-NEXT: retl %1 = call <4 x double> @llvm.ldexp.v4f64.v4i32(<4 x double> %val, <4 x i32> %exp) ret <4 x double> %1 } define half @ldexp_f16(half %arg0, i32 %arg1) nounwind { ; X64-LABEL: ldexp_f16: ; X64: # %bb.0: ; X64-NEXT: pushq %rbx ; X64-NEXT: movl %edi, %ebx ; X64-NEXT: callq __extendhfsf2@PLT ; X64-NEXT: movl %ebx, %edi ; X64-NEXT: callq ldexpf@PLT ; X64-NEXT: callq __truncsfhf2@PLT ; X64-NEXT: popq %rbx ; X64-NEXT: retq ; ; WIN64-LABEL: ldexp_f16: ; WIN64: # %bb.0: ; WIN64-NEXT: pushq %rsi ; WIN64-NEXT: subq $32, %rsp ; WIN64-NEXT: movl %edx, %esi ; WIN64-NEXT: callq __extendhfsf2 ; WIN64-NEXT: cvtss2sd %xmm0, %xmm0 ; WIN64-NEXT: movl %esi, %edx ; WIN64-NEXT: callq ldexp ; WIN64-NEXT: callq __truncdfhf2 ; WIN64-NEXT: addq $32, %rsp ; WIN64-NEXT: popq %rsi ; WIN64-NEXT: retq ; ; WIN32-LABEL: ldexp_f16: ; WIN32: # %bb.0: ; WIN32-NEXT: pushl %esi ; WIN32-NEXT: subl $16, %esp ; WIN32-NEXT: movl {{[0-9]+}}(%esp), %esi ; WIN32-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; WIN32-NEXT: movl %eax, (%esp) ; WIN32-NEXT: calll ___extendhfsf2 ; WIN32-NEXT: movl %esi, {{[0-9]+}}(%esp) ; WIN32-NEXT: fstpl (%esp) ; WIN32-NEXT: calll _ldexp ; WIN32-NEXT: fstps {{[0-9]+}}(%esp) ; WIN32-NEXT: flds {{[0-9]+}}(%esp) ; WIN32-NEXT: fstps (%esp) ; WIN32-NEXT: calll ___truncsfhf2 ; WIN32-NEXT: addl $16, %esp ; WIN32-NEXT: popl %esi ; WIN32-NEXT: retl %ldexp = call half @llvm.ldexp.f16.i32(half %arg0, i32 %arg1) ret half %ldexp } declare double @llvm.ldexp.f64.i32(double, i32) #0 declare float @llvm.ldexp.f32.i32(float, i32) #0 declare <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float>, <2 x i32>) #0 declare <4 x float> @llvm.ldexp.v4f32.v4i32(<4 x float>, <4 x i32>) #0 declare <2 x double> @llvm.ldexp.v2f64.v2i32(<2 x double>, <2 x i32>) #0 declare <4 x double> @llvm.ldexp.v4f64.v4i32(<4 x double>, <4 x i32>) #0 declare half @llvm.ldexp.f16.i32(half, i32) #0 attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } attributes #1 = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) }