; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32 ; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64 define { half, half } @test_sincos_f16(half %a) #0 { ; LA32-LABEL: test_sincos_f16: ; LA32: # %bb.0: ; LA32-NEXT: addi.w $sp, $sp, -32 ; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill ; LA32-NEXT: st.w $fp, $sp, 24 # 4-byte Folded Spill ; LA32-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill ; LA32-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill ; LA32-NEXT: bl __extendhfsf2 ; LA32-NEXT: fmov.s $fs0, $fa0 ; LA32-NEXT: bl cosf ; LA32-NEXT: bl __truncsfhf2 ; LA32-NEXT: fmov.s $fs1, $fa0 ; LA32-NEXT: fmov.s $fa0, $fs0 ; LA32-NEXT: bl sinf ; LA32-NEXT: movfr2gr.s $fp, $fs1 ; LA32-NEXT: bl __truncsfhf2 ; LA32-NEXT: movfr2gr.s $a0, $fa0 ; LA32-NEXT: lu12i.w $a1, -16 ; LA32-NEXT: or $a0, $a0, $a1 ; LA32-NEXT: movgr2fr.w $fa0, $a0 ; LA32-NEXT: or $a0, $fp, $a1 ; LA32-NEXT: movgr2fr.w $fa1, $a0 ; LA32-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload ; LA32-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload ; LA32-NEXT: ld.w $fp, $sp, 24 # 4-byte Folded Reload ; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload ; LA32-NEXT: addi.w $sp, $sp, 32 ; LA32-NEXT: ret ; ; LA64-LABEL: test_sincos_f16: ; LA64: # %bb.0: ; LA64-NEXT: addi.d $sp, $sp, -32 ; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill ; LA64-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill ; LA64-NEXT: fst.d $fs0, $sp, 8 # 8-byte Folded Spill ; LA64-NEXT: fst.d $fs1, $sp, 0 # 8-byte Folded Spill ; LA64-NEXT: pcaddu18i $ra, %call36(__extendhfsf2) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: fmov.s $fs0, $fa0 ; LA64-NEXT: pcaddu18i $ra, %call36(cosf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: pcaddu18i $ra, %call36(__truncsfhf2) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: fmov.s $fs1, $fa0 ; LA64-NEXT: fmov.s $fa0, $fs0 ; LA64-NEXT: pcaddu18i $ra, %call36(sinf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: movfr2gr.s $fp, $fs1 ; LA64-NEXT: pcaddu18i $ra, %call36(__truncsfhf2) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: movfr2gr.s $a0, $fa0 ; LA64-NEXT: lu12i.w $a1, -16 ; LA64-NEXT: or $a0, $a0, $a1 ; LA64-NEXT: movgr2fr.w $fa0, $a0 ; LA64-NEXT: or $a0, $fp, $a1 ; LA64-NEXT: movgr2fr.w $fa1, $a0 ; LA64-NEXT: fld.d $fs1, $sp, 0 # 8-byte Folded Reload ; LA64-NEXT: fld.d $fs0, $sp, 8 # 8-byte Folded Reload ; LA64-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload ; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload ; LA64-NEXT: addi.d $sp, $sp, 32 ; LA64-NEXT: ret %result = call { half, half } @llvm.sincos.f16(half %a) ret { half, half } %result } define half @test_sincos_f16_only_use_sin(half %a) #0 { ; LA32-LABEL: test_sincos_f16_only_use_sin: ; LA32: # %bb.0: ; LA32-NEXT: addi.w $sp, $sp, -16 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill ; LA32-NEXT: bl __extendhfsf2 ; LA32-NEXT: bl sinf ; LA32-NEXT: bl __truncsfhf2 ; LA32-NEXT: movfr2gr.s $a0, $fa0 ; LA32-NEXT: lu12i.w $a1, -16 ; LA32-NEXT: or $a0, $a0, $a1 ; LA32-NEXT: movgr2fr.w $fa0, $a0 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload ; LA32-NEXT: addi.w $sp, $sp, 16 ; LA32-NEXT: ret ; ; LA64-LABEL: test_sincos_f16_only_use_sin: ; LA64: # %bb.0: ; LA64-NEXT: addi.d $sp, $sp, -16 ; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill ; LA64-NEXT: pcaddu18i $ra, %call36(__extendhfsf2) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: pcaddu18i $ra, %call36(sinf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: pcaddu18i $ra, %call36(__truncsfhf2) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: movfr2gr.s $a0, $fa0 ; LA64-NEXT: lu12i.w $a1, -16 ; LA64-NEXT: or $a0, $a0, $a1 ; LA64-NEXT: movgr2fr.w $fa0, $a0 ; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload ; LA64-NEXT: addi.d $sp, $sp, 16 ; LA64-NEXT: ret %result = call { half, half } @llvm.sincos.f16(half %a) %result.0 = extractvalue { half, half } %result, 0 ret half %result.0 } define half @test_sincos_f16_only_use_cos(half %a) #0 { ; LA32-LABEL: test_sincos_f16_only_use_cos: ; LA32: # %bb.0: ; LA32-NEXT: addi.w $sp, $sp, -16 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill ; LA32-NEXT: bl __extendhfsf2 ; LA32-NEXT: bl cosf ; LA32-NEXT: bl __truncsfhf2 ; LA32-NEXT: movfr2gr.s $a0, $fa0 ; LA32-NEXT: lu12i.w $a1, -16 ; LA32-NEXT: or $a0, $a0, $a1 ; LA32-NEXT: movgr2fr.w $fa0, $a0 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload ; LA32-NEXT: addi.w $sp, $sp, 16 ; LA32-NEXT: ret ; ; LA64-LABEL: test_sincos_f16_only_use_cos: ; LA64: # %bb.0: ; LA64-NEXT: addi.d $sp, $sp, -16 ; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill ; LA64-NEXT: pcaddu18i $ra, %call36(__extendhfsf2) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: pcaddu18i $ra, %call36(cosf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: pcaddu18i $ra, %call36(__truncsfhf2) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: movfr2gr.s $a0, $fa0 ; LA64-NEXT: lu12i.w $a1, -16 ; LA64-NEXT: or $a0, $a0, $a1 ; LA64-NEXT: movgr2fr.w $fa0, $a0 ; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload ; LA64-NEXT: addi.d $sp, $sp, 16 ; LA64-NEXT: ret %result = call { half, half } @llvm.sincos.f16(half %a) %result.1 = extractvalue { half, half } %result, 1 ret half %result.1 } define { <2 x half>, <2 x half> } @test_sincos_v2f16(<2 x half> %a) #0 { ; LA32-LABEL: test_sincos_v2f16: ; LA32: # %bb.0: ; LA32-NEXT: addi.w $sp, $sp, -64 ; LA32-NEXT: st.w $ra, $sp, 60 # 4-byte Folded Spill ; LA32-NEXT: st.w $fp, $sp, 56 # 4-byte Folded Spill ; LA32-NEXT: st.w $s0, $sp, 52 # 4-byte Folded Spill ; LA32-NEXT: st.w $s1, $sp, 48 # 4-byte Folded Spill ; LA32-NEXT: st.w $s2, $sp, 44 # 4-byte Folded Spill ; LA32-NEXT: fst.d $fs0, $sp, 32 # 8-byte Folded Spill ; LA32-NEXT: fst.d $fs1, $sp, 24 # 8-byte Folded Spill ; LA32-NEXT: fst.d $fs2, $sp, 16 # 8-byte Folded Spill ; LA32-NEXT: fst.d $fs3, $sp, 8 # 8-byte Folded Spill ; LA32-NEXT: move $fp, $a0 ; LA32-NEXT: movgr2fr.w $fs0, $a2 ; LA32-NEXT: movgr2fr.w $fa0, $a1 ; LA32-NEXT: bl __extendhfsf2 ; LA32-NEXT: fmov.s $fs1, $fa0 ; LA32-NEXT: bl cosf ; LA32-NEXT: bl __truncsfhf2 ; LA32-NEXT: fmov.s $fs2, $fa0 ; LA32-NEXT: fmov.s $fa0, $fs0 ; LA32-NEXT: bl __extendhfsf2 ; LA32-NEXT: fmov.s $fs0, $fa0 ; LA32-NEXT: bl sinf ; LA32-NEXT: bl __truncsfhf2 ; LA32-NEXT: fmov.s $fs3, $fa0 ; LA32-NEXT: fmov.s $fa0, $fs1 ; LA32-NEXT: bl sinf ; LA32-NEXT: bl __truncsfhf2 ; LA32-NEXT: fmov.s $fs1, $fa0 ; LA32-NEXT: fmov.s $fa0, $fs0 ; LA32-NEXT: bl cosf ; LA32-NEXT: movfr2gr.s $s0, $fs1 ; LA32-NEXT: movfr2gr.s $s1, $fs3 ; LA32-NEXT: movfr2gr.s $s2, $fs2 ; LA32-NEXT: bl __truncsfhf2 ; LA32-NEXT: movfr2gr.s $a0, $fa0 ; LA32-NEXT: st.h $a0, $fp, 6 ; LA32-NEXT: st.h $s2, $fp, 4 ; LA32-NEXT: st.h $s1, $fp, 2 ; LA32-NEXT: st.h $s0, $fp, 0 ; LA32-NEXT: fld.d $fs3, $sp, 8 # 8-byte Folded Reload ; LA32-NEXT: fld.d $fs2, $sp, 16 # 8-byte Folded Reload ; LA32-NEXT: fld.d $fs1, $sp, 24 # 8-byte Folded Reload ; LA32-NEXT: fld.d $fs0, $sp, 32 # 8-byte Folded Reload ; LA32-NEXT: ld.w $s2, $sp, 44 # 4-byte Folded Reload ; LA32-NEXT: ld.w $s1, $sp, 48 # 4-byte Folded Reload ; LA32-NEXT: ld.w $s0, $sp, 52 # 4-byte Folded Reload ; LA32-NEXT: ld.w $fp, $sp, 56 # 4-byte Folded Reload ; LA32-NEXT: ld.w $ra, $sp, 60 # 4-byte Folded Reload ; LA32-NEXT: addi.w $sp, $sp, 64 ; LA32-NEXT: ret ; ; LA64-LABEL: test_sincos_v2f16: ; LA64: # %bb.0: ; LA64-NEXT: addi.d $sp, $sp, -80 ; LA64-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill ; LA64-NEXT: st.d $fp, $sp, 64 # 8-byte Folded Spill ; LA64-NEXT: st.d $s0, $sp, 56 # 8-byte Folded Spill ; LA64-NEXT: st.d $s1, $sp, 48 # 8-byte Folded Spill ; LA64-NEXT: st.d $s2, $sp, 40 # 8-byte Folded Spill ; LA64-NEXT: fst.d $fs0, $sp, 32 # 8-byte Folded Spill ; LA64-NEXT: fst.d $fs1, $sp, 24 # 8-byte Folded Spill ; LA64-NEXT: fst.d $fs2, $sp, 16 # 8-byte Folded Spill ; LA64-NEXT: fst.d $fs3, $sp, 8 # 8-byte Folded Spill ; LA64-NEXT: move $s0, $a2 ; LA64-NEXT: move $fp, $a0 ; LA64-NEXT: movgr2fr.w $fa0, $a1 ; LA64-NEXT: pcaddu18i $ra, %call36(__extendhfsf2) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: fmov.s $fs0, $fa0 ; LA64-NEXT: pcaddu18i $ra, %call36(cosf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: pcaddu18i $ra, %call36(__truncsfhf2) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: fmov.s $fs1, $fa0 ; LA64-NEXT: movgr2fr.w $fa0, $s0 ; LA64-NEXT: pcaddu18i $ra, %call36(__extendhfsf2) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: fmov.s $fs2, $fa0 ; LA64-NEXT: pcaddu18i $ra, %call36(sinf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: pcaddu18i $ra, %call36(__truncsfhf2) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: fmov.s $fs3, $fa0 ; LA64-NEXT: fmov.s $fa0, $fs0 ; LA64-NEXT: pcaddu18i $ra, %call36(sinf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: pcaddu18i $ra, %call36(__truncsfhf2) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: fmov.s $fs0, $fa0 ; LA64-NEXT: fmov.s $fa0, $fs2 ; LA64-NEXT: pcaddu18i $ra, %call36(cosf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: movfr2gr.s $s0, $fs0 ; LA64-NEXT: movfr2gr.s $s1, $fs3 ; LA64-NEXT: movfr2gr.s $s2, $fs1 ; LA64-NEXT: pcaddu18i $ra, %call36(__truncsfhf2) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: movfr2gr.s $a0, $fa0 ; LA64-NEXT: st.h $a0, $fp, 6 ; LA64-NEXT: st.h $s2, $fp, 4 ; LA64-NEXT: st.h $s1, $fp, 2 ; LA64-NEXT: st.h $s0, $fp, 0 ; LA64-NEXT: fld.d $fs3, $sp, 8 # 8-byte Folded Reload ; LA64-NEXT: fld.d $fs2, $sp, 16 # 8-byte Folded Reload ; LA64-NEXT: fld.d $fs1, $sp, 24 # 8-byte Folded Reload ; LA64-NEXT: fld.d $fs0, $sp, 32 # 8-byte Folded Reload ; LA64-NEXT: ld.d $s2, $sp, 40 # 8-byte Folded Reload ; LA64-NEXT: ld.d $s1, $sp, 48 # 8-byte Folded Reload ; LA64-NEXT: ld.d $s0, $sp, 56 # 8-byte Folded Reload ; LA64-NEXT: ld.d $fp, $sp, 64 # 8-byte Folded Reload ; LA64-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload ; LA64-NEXT: addi.d $sp, $sp, 80 ; LA64-NEXT: ret %result = call { <2 x half>, <2 x half> } @llvm.sincos.v2f16(<2 x half> %a) ret { <2 x half>, <2 x half> } %result } define { float, float } @test_sincos_f32(float %a) #0 { ; LA32-LABEL: test_sincos_f32: ; LA32: # %bb.0: ; LA32-NEXT: addi.w $sp, $sp, -32 ; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill ; LA32-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill ; LA32-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill ; LA32-NEXT: fmov.s $fs0, $fa0 ; LA32-NEXT: bl sinf ; LA32-NEXT: fmov.s $fs1, $fa0 ; LA32-NEXT: fmov.s $fa0, $fs0 ; LA32-NEXT: bl cosf ; LA32-NEXT: fmov.s $fa1, $fa0 ; LA32-NEXT: fmov.s $fa0, $fs1 ; LA32-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload ; LA32-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload ; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload ; LA32-NEXT: addi.w $sp, $sp, 32 ; LA32-NEXT: ret ; ; LA64-LABEL: test_sincos_f32: ; LA64: # %bb.0: ; LA64-NEXT: addi.d $sp, $sp, -32 ; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill ; LA64-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill ; LA64-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill ; LA64-NEXT: fmov.s $fs0, $fa0 ; LA64-NEXT: pcaddu18i $ra, %call36(sinf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: fmov.s $fs1, $fa0 ; LA64-NEXT: fmov.s $fa0, $fs0 ; LA64-NEXT: pcaddu18i $ra, %call36(cosf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: fmov.s $fa1, $fa0 ; LA64-NEXT: fmov.s $fa0, $fs1 ; LA64-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload ; LA64-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload ; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload ; LA64-NEXT: addi.d $sp, $sp, 32 ; LA64-NEXT: ret %result = call { float, float } @llvm.sincos.f32(float %a) ret { float, float } %result } define { <2 x float>, <2 x float> } @test_sincos_v2f32(<2 x float> %a) #0 { ; LA32-LABEL: test_sincos_v2f32: ; LA32: # %bb.0: ; LA32-NEXT: addi.w $sp, $sp, -48 ; LA32-NEXT: st.w $ra, $sp, 44 # 4-byte Folded Spill ; LA32-NEXT: st.w $fp, $sp, 40 # 4-byte Folded Spill ; LA32-NEXT: fst.d $fs0, $sp, 32 # 8-byte Folded Spill ; LA32-NEXT: fst.d $fs1, $sp, 24 # 8-byte Folded Spill ; LA32-NEXT: fst.d $fs2, $sp, 16 # 8-byte Folded Spill ; LA32-NEXT: fst.d $fs3, $sp, 8 # 8-byte Folded Spill ; LA32-NEXT: fmov.s $fs0, $fa1 ; LA32-NEXT: fmov.s $fs1, $fa0 ; LA32-NEXT: move $fp, $a0 ; LA32-NEXT: bl sinf ; LA32-NEXT: fmov.s $fs2, $fa0 ; LA32-NEXT: fmov.s $fa0, $fs0 ; LA32-NEXT: bl sinf ; LA32-NEXT: fmov.s $fs3, $fa0 ; LA32-NEXT: fmov.s $fa0, $fs1 ; LA32-NEXT: bl cosf ; LA32-NEXT: fmov.s $fs1, $fa0 ; LA32-NEXT: fmov.s $fa0, $fs0 ; LA32-NEXT: bl cosf ; LA32-NEXT: fst.s $fa0, $fp, 12 ; LA32-NEXT: fst.s $fs1, $fp, 8 ; LA32-NEXT: fst.s $fs3, $fp, 4 ; LA32-NEXT: fst.s $fs2, $fp, 0 ; LA32-NEXT: fld.d $fs3, $sp, 8 # 8-byte Folded Reload ; LA32-NEXT: fld.d $fs2, $sp, 16 # 8-byte Folded Reload ; LA32-NEXT: fld.d $fs1, $sp, 24 # 8-byte Folded Reload ; LA32-NEXT: fld.d $fs0, $sp, 32 # 8-byte Folded Reload ; LA32-NEXT: ld.w $fp, $sp, 40 # 4-byte Folded Reload ; LA32-NEXT: ld.w $ra, $sp, 44 # 4-byte Folded Reload ; LA32-NEXT: addi.w $sp, $sp, 48 ; LA32-NEXT: ret ; ; LA64-LABEL: test_sincos_v2f32: ; LA64: # %bb.0: ; LA64-NEXT: addi.d $sp, $sp, -80 ; LA64-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill ; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill ; LA64-NEXT: vreplvei.w $vr0, $vr0, 1 ; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(sinf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 ; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload ; LA64-NEXT: vreplvei.w $vr0, $vr0, 0 ; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(sinf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 ; LA64-NEXT: vld $vr1, $sp, 32 # 16-byte Folded Reload ; LA64-NEXT: vextrins.w $vr0, $vr1, 16 ; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(cosf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 ; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(cosf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: fmov.s $fa1, $fa0 ; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload ; LA64-NEXT: vextrins.w $vr1, $vr0, 16 ; LA64-NEXT: vld $vr0, $sp, 32 # 16-byte Folded Reload ; LA64-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload ; LA64-NEXT: addi.d $sp, $sp, 80 ; LA64-NEXT: ret %result = call { <2 x float>, <2 x float> } @llvm.sincos.v2f32(<2 x float> %a) ret { <2 x float>, <2 x float> } %result } define { <3 x float>, <3 x float> } @test_sincos_v3f32(<3 x float> %a) #0 { ; LA32-LABEL: test_sincos_v3f32: ; LA32: # %bb.0: ; LA32-NEXT: addi.w $sp, $sp, -64 ; LA32-NEXT: st.w $ra, $sp, 60 # 4-byte Folded Spill ; LA32-NEXT: st.w $fp, $sp, 56 # 4-byte Folded Spill ; LA32-NEXT: fst.d $fs0, $sp, 48 # 8-byte Folded Spill ; LA32-NEXT: fst.d $fs1, $sp, 40 # 8-byte Folded Spill ; LA32-NEXT: fst.d $fs2, $sp, 32 # 8-byte Folded Spill ; LA32-NEXT: fst.d $fs3, $sp, 24 # 8-byte Folded Spill ; LA32-NEXT: fst.d $fs4, $sp, 16 # 8-byte Folded Spill ; LA32-NEXT: fst.d $fs5, $sp, 8 # 8-byte Folded Spill ; LA32-NEXT: fmov.s $fs0, $fa2 ; LA32-NEXT: fmov.s $fs1, $fa1 ; LA32-NEXT: fmov.s $fs2, $fa0 ; LA32-NEXT: move $fp, $a0 ; LA32-NEXT: bl sinf ; LA32-NEXT: fmov.s $fs3, $fa0 ; LA32-NEXT: fmov.s $fa0, $fs1 ; LA32-NEXT: bl sinf ; LA32-NEXT: fmov.s $fs4, $fa0 ; LA32-NEXT: fmov.s $fa0, $fs0 ; LA32-NEXT: bl sinf ; LA32-NEXT: fmov.s $fs5, $fa0 ; LA32-NEXT: fmov.s $fa0, $fs2 ; LA32-NEXT: bl cosf ; LA32-NEXT: fmov.s $fs2, $fa0 ; LA32-NEXT: fmov.s $fa0, $fs1 ; LA32-NEXT: bl cosf ; LA32-NEXT: fmov.s $fs1, $fa0 ; LA32-NEXT: fmov.s $fa0, $fs0 ; LA32-NEXT: bl cosf ; LA32-NEXT: fst.s $fa0, $fp, 24 ; LA32-NEXT: fst.s $fs1, $fp, 20 ; LA32-NEXT: fst.s $fs2, $fp, 16 ; LA32-NEXT: fst.s $fs5, $fp, 8 ; LA32-NEXT: fst.s $fs4, $fp, 4 ; LA32-NEXT: fst.s $fs3, $fp, 0 ; LA32-NEXT: fld.d $fs5, $sp, 8 # 8-byte Folded Reload ; LA32-NEXT: fld.d $fs4, $sp, 16 # 8-byte Folded Reload ; LA32-NEXT: fld.d $fs3, $sp, 24 # 8-byte Folded Reload ; LA32-NEXT: fld.d $fs2, $sp, 32 # 8-byte Folded Reload ; LA32-NEXT: fld.d $fs1, $sp, 40 # 8-byte Folded Reload ; LA32-NEXT: fld.d $fs0, $sp, 48 # 8-byte Folded Reload ; LA32-NEXT: ld.w $fp, $sp, 56 # 4-byte Folded Reload ; LA32-NEXT: ld.w $ra, $sp, 60 # 4-byte Folded Reload ; LA32-NEXT: addi.w $sp, $sp, 64 ; LA32-NEXT: ret ; ; LA64-LABEL: test_sincos_v3f32: ; LA64: # %bb.0: ; LA64-NEXT: addi.d $sp, $sp, -96 ; LA64-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill ; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill ; LA64-NEXT: vreplvei.w $vr0, $vr0, 1 ; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(sinf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 ; LA64-NEXT: vst $vr0, $sp, 64 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload ; LA64-NEXT: vreplvei.w $vr0, $vr0, 0 ; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(sinf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 ; LA64-NEXT: vld $vr1, $sp, 64 # 16-byte Folded Reload ; LA64-NEXT: vextrins.w $vr0, $vr1, 16 ; LA64-NEXT: vst $vr0, $sp, 64 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload ; LA64-NEXT: vreplvei.w $vr0, $vr0, 2 ; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(sinf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 ; LA64-NEXT: vld $vr1, $sp, 64 # 16-byte Folded Reload ; LA64-NEXT: vextrins.w $vr1, $vr0, 32 ; LA64-NEXT: vst $vr1, $sp, 64 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 32 # 16-byte Folded Reload ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(cosf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 ; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(cosf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 ; LA64-NEXT: vld $vr1, $sp, 32 # 16-byte Folded Reload ; LA64-NEXT: vextrins.w $vr0, $vr1, 16 ; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload ; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(cosf) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 ; LA64-NEXT: vld $vr1, $sp, 32 # 16-byte Folded Reload ; LA64-NEXT: vextrins.w $vr1, $vr0, 32 ; LA64-NEXT: vld $vr0, $sp, 64 # 16-byte Folded Reload ; LA64-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload ; LA64-NEXT: addi.d $sp, $sp, 96 ; LA64-NEXT: ret %result = call { <3 x float>, <3 x float> } @llvm.sincos.v3f32(<3 x float> %a) ret { <3 x float>, <3 x float> } %result } define { double, double } @test_sincos_f64(double %a) #0 { ; LA32-LABEL: test_sincos_f64: ; LA32: # %bb.0: ; LA32-NEXT: addi.w $sp, $sp, -32 ; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill ; LA32-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill ; LA32-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill ; LA32-NEXT: fmov.d $fs0, $fa0 ; LA32-NEXT: bl sin ; LA32-NEXT: fmov.d $fs1, $fa0 ; LA32-NEXT: fmov.d $fa0, $fs0 ; LA32-NEXT: bl cos ; LA32-NEXT: fmov.d $fa1, $fa0 ; LA32-NEXT: fmov.d $fa0, $fs1 ; LA32-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload ; LA32-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload ; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload ; LA32-NEXT: addi.w $sp, $sp, 32 ; LA32-NEXT: ret ; ; LA64-LABEL: test_sincos_f64: ; LA64: # %bb.0: ; LA64-NEXT: addi.d $sp, $sp, -32 ; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill ; LA64-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill ; LA64-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill ; LA64-NEXT: fmov.d $fs0, $fa0 ; LA64-NEXT: pcaddu18i $ra, %call36(sin) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: fmov.d $fs1, $fa0 ; LA64-NEXT: fmov.d $fa0, $fs0 ; LA64-NEXT: pcaddu18i $ra, %call36(cos) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: fmov.d $fa1, $fa0 ; LA64-NEXT: fmov.d $fa0, $fs1 ; LA64-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload ; LA64-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload ; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload ; LA64-NEXT: addi.d $sp, $sp, 32 ; LA64-NEXT: ret %result = call { double, double } @llvm.sincos.f64(double %a) ret { double, double } %result } define { <2 x double>, <2 x double> } @test_sincos_v2f64(<2 x double> %a) #0 { ; LA32-LABEL: test_sincos_v2f64: ; LA32: # %bb.0: ; LA32-NEXT: addi.w $sp, $sp, -48 ; LA32-NEXT: st.w $ra, $sp, 44 # 4-byte Folded Spill ; LA32-NEXT: st.w $fp, $sp, 40 # 4-byte Folded Spill ; LA32-NEXT: fst.d $fs0, $sp, 32 # 8-byte Folded Spill ; LA32-NEXT: fst.d $fs1, $sp, 24 # 8-byte Folded Spill ; LA32-NEXT: fst.d $fs2, $sp, 16 # 8-byte Folded Spill ; LA32-NEXT: fst.d $fs3, $sp, 8 # 8-byte Folded Spill ; LA32-NEXT: fmov.d $fs0, $fa1 ; LA32-NEXT: fmov.d $fs1, $fa0 ; LA32-NEXT: move $fp, $a0 ; LA32-NEXT: bl sin ; LA32-NEXT: fmov.d $fs2, $fa0 ; LA32-NEXT: fmov.d $fa0, $fs0 ; LA32-NEXT: bl sin ; LA32-NEXT: fmov.d $fs3, $fa0 ; LA32-NEXT: fmov.d $fa0, $fs1 ; LA32-NEXT: bl cos ; LA32-NEXT: fmov.d $fs1, $fa0 ; LA32-NEXT: fmov.d $fa0, $fs0 ; LA32-NEXT: bl cos ; LA32-NEXT: fst.d $fa0, $fp, 24 ; LA32-NEXT: fst.d $fs1, $fp, 16 ; LA32-NEXT: fst.d $fs3, $fp, 8 ; LA32-NEXT: fst.d $fs2, $fp, 0 ; LA32-NEXT: fld.d $fs3, $sp, 8 # 8-byte Folded Reload ; LA32-NEXT: fld.d $fs2, $sp, 16 # 8-byte Folded Reload ; LA32-NEXT: fld.d $fs1, $sp, 24 # 8-byte Folded Reload ; LA32-NEXT: fld.d $fs0, $sp, 32 # 8-byte Folded Reload ; LA32-NEXT: ld.w $fp, $sp, 40 # 4-byte Folded Reload ; LA32-NEXT: ld.w $ra, $sp, 44 # 4-byte Folded Reload ; LA32-NEXT: addi.w $sp, $sp, 48 ; LA32-NEXT: ret ; ; LA64-LABEL: test_sincos_v2f64: ; LA64: # %bb.0: ; LA64-NEXT: addi.d $sp, $sp, -80 ; LA64-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill ; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill ; LA64-NEXT: vreplvei.d $vr0, $vr0, 1 ; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill ; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(sin) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 ; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload ; LA64-NEXT: vreplvei.d $vr0, $vr0, 0 ; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill ; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(sin) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 ; LA64-NEXT: vld $vr1, $sp, 32 # 16-byte Folded Reload ; LA64-NEXT: vextrins.d $vr0, $vr1, 16 ; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload ; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(cos) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 ; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload ; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(cos) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: fmov.d $fa1, $fa0 ; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload ; LA64-NEXT: vextrins.d $vr1, $vr0, 16 ; LA64-NEXT: vld $vr0, $sp, 32 # 16-byte Folded Reload ; LA64-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload ; LA64-NEXT: addi.d $sp, $sp, 80 ; LA64-NEXT: ret %result = call { <2 x double>, <2 x double> } @llvm.sincos.v2f64(<2 x double> %a) ret { <2 x double>, <2 x double> } %result } define { fp128, fp128 } @test_sincos_f128(fp128 %a) #0 { ; LA32-LABEL: test_sincos_f128: ; LA32: # %bb.0: ; LA32-NEXT: addi.w $sp, $sp, -96 ; LA32-NEXT: st.w $ra, $sp, 92 # 4-byte Folded Spill ; LA32-NEXT: st.w $fp, $sp, 88 # 4-byte Folded Spill ; LA32-NEXT: st.w $s0, $sp, 84 # 4-byte Folded Spill ; LA32-NEXT: st.w $s1, $sp, 80 # 4-byte Folded Spill ; LA32-NEXT: st.w $s2, $sp, 76 # 4-byte Folded Spill ; LA32-NEXT: st.w $s3, $sp, 72 # 4-byte Folded Spill ; LA32-NEXT: ld.w $s0, $a1, 0 ; LA32-NEXT: ld.w $s1, $a1, 4 ; LA32-NEXT: ld.w $s2, $a1, 8 ; LA32-NEXT: ld.w $s3, $a1, 12 ; LA32-NEXT: move $fp, $a0 ; LA32-NEXT: st.w $s3, $sp, 52 ; LA32-NEXT: st.w $s2, $sp, 48 ; LA32-NEXT: st.w $s1, $sp, 44 ; LA32-NEXT: addi.w $a0, $sp, 56 ; LA32-NEXT: addi.w $a1, $sp, 40 ; LA32-NEXT: st.w $s0, $sp, 40 ; LA32-NEXT: bl sinl ; LA32-NEXT: st.w $s3, $sp, 20 ; LA32-NEXT: st.w $s2, $sp, 16 ; LA32-NEXT: st.w $s1, $sp, 12 ; LA32-NEXT: addi.w $a0, $sp, 24 ; LA32-NEXT: addi.w $a1, $sp, 8 ; LA32-NEXT: st.w $s0, $sp, 8 ; LA32-NEXT: bl cosl ; LA32-NEXT: ld.w $a0, $sp, 56 ; LA32-NEXT: ld.w $a1, $sp, 60 ; LA32-NEXT: ld.w $a2, $sp, 64 ; LA32-NEXT: ld.w $a3, $sp, 68 ; LA32-NEXT: ld.w $a4, $sp, 36 ; LA32-NEXT: ld.w $a5, $sp, 32 ; LA32-NEXT: ld.w $a6, $sp, 28 ; LA32-NEXT: ld.w $a7, $sp, 24 ; LA32-NEXT: st.w $a4, $fp, 28 ; LA32-NEXT: st.w $a5, $fp, 24 ; LA32-NEXT: st.w $a6, $fp, 20 ; LA32-NEXT: st.w $a7, $fp, 16 ; LA32-NEXT: st.w $a3, $fp, 12 ; LA32-NEXT: st.w $a2, $fp, 8 ; LA32-NEXT: st.w $a1, $fp, 4 ; LA32-NEXT: st.w $a0, $fp, 0 ; LA32-NEXT: ld.w $s3, $sp, 72 # 4-byte Folded Reload ; LA32-NEXT: ld.w $s2, $sp, 76 # 4-byte Folded Reload ; LA32-NEXT: ld.w $s1, $sp, 80 # 4-byte Folded Reload ; LA32-NEXT: ld.w $s0, $sp, 84 # 4-byte Folded Reload ; LA32-NEXT: ld.w $fp, $sp, 88 # 4-byte Folded Reload ; LA32-NEXT: ld.w $ra, $sp, 92 # 4-byte Folded Reload ; LA32-NEXT: addi.w $sp, $sp, 96 ; LA32-NEXT: ret ; ; LA64-LABEL: test_sincos_f128: ; LA64: # %bb.0: ; LA64-NEXT: addi.d $sp, $sp, -48 ; LA64-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill ; LA64-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill ; LA64-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill ; LA64-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill ; LA64-NEXT: st.d $s2, $sp, 8 # 8-byte Folded Spill ; LA64-NEXT: st.d $s3, $sp, 0 # 8-byte Folded Spill ; LA64-NEXT: move $fp, $a2 ; LA64-NEXT: move $s0, $a1 ; LA64-NEXT: move $s1, $a0 ; LA64-NEXT: move $a0, $a1 ; LA64-NEXT: move $a1, $a2 ; LA64-NEXT: pcaddu18i $ra, %call36(sinl) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: move $s2, $a0 ; LA64-NEXT: move $s3, $a1 ; LA64-NEXT: move $a0, $s0 ; LA64-NEXT: move $a1, $fp ; LA64-NEXT: pcaddu18i $ra, %call36(cosl) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: st.d $a1, $s1, 24 ; LA64-NEXT: st.d $a0, $s1, 16 ; LA64-NEXT: st.d $s3, $s1, 8 ; LA64-NEXT: st.d $s2, $s1, 0 ; LA64-NEXT: ld.d $s3, $sp, 0 # 8-byte Folded Reload ; LA64-NEXT: ld.d $s2, $sp, 8 # 8-byte Folded Reload ; LA64-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload ; LA64-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload ; LA64-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload ; LA64-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload ; LA64-NEXT: addi.d $sp, $sp, 48 ; LA64-NEXT: ret %result = call { fp128, fp128 } @llvm.sincos.f128(fp128 %a) ret { fp128, fp128 } %result } define { <2 x fp128>, <2 x fp128> } @test_sincos_v2f128(<2 x fp128> %a) #0 { ; LA32-LABEL: test_sincos_v2f128: ; LA32: # %bb.0: ; LA32-NEXT: addi.w $sp, $sp, -176 ; LA32-NEXT: st.w $ra, $sp, 172 # 4-byte Folded Spill ; LA32-NEXT: st.w $fp, $sp, 168 # 4-byte Folded Spill ; LA32-NEXT: st.w $s0, $sp, 164 # 4-byte Folded Spill ; LA32-NEXT: st.w $s1, $sp, 160 # 4-byte Folded Spill ; LA32-NEXT: st.w $s2, $sp, 156 # 4-byte Folded Spill ; LA32-NEXT: st.w $s3, $sp, 152 # 4-byte Folded Spill ; LA32-NEXT: st.w $s4, $sp, 148 # 4-byte Folded Spill ; LA32-NEXT: st.w $s5, $sp, 144 # 4-byte Folded Spill ; LA32-NEXT: st.w $s6, $sp, 140 # 4-byte Folded Spill ; LA32-NEXT: st.w $s7, $sp, 136 # 4-byte Folded Spill ; LA32-NEXT: ld.w $s0, $a1, 16 ; LA32-NEXT: ld.w $s1, $a1, 20 ; LA32-NEXT: ld.w $s2, $a1, 24 ; LA32-NEXT: ld.w $s3, $a1, 28 ; LA32-NEXT: ld.w $s4, $a1, 0 ; LA32-NEXT: ld.w $s5, $a1, 4 ; LA32-NEXT: ld.w $s6, $a1, 8 ; LA32-NEXT: ld.w $s7, $a1, 12 ; LA32-NEXT: move $fp, $a0 ; LA32-NEXT: st.w $s7, $sp, 52 ; LA32-NEXT: st.w $s6, $sp, 48 ; LA32-NEXT: st.w $s5, $sp, 44 ; LA32-NEXT: addi.w $a0, $sp, 56 ; LA32-NEXT: addi.w $a1, $sp, 40 ; LA32-NEXT: st.w $s4, $sp, 40 ; LA32-NEXT: bl sinl ; LA32-NEXT: st.w $s3, $sp, 116 ; LA32-NEXT: st.w $s2, $sp, 112 ; LA32-NEXT: st.w $s1, $sp, 108 ; LA32-NEXT: addi.w $a0, $sp, 120 ; LA32-NEXT: addi.w $a1, $sp, 104 ; LA32-NEXT: st.w $s0, $sp, 104 ; LA32-NEXT: bl sinl ; LA32-NEXT: st.w $s7, $sp, 20 ; LA32-NEXT: st.w $s6, $sp, 16 ; LA32-NEXT: st.w $s5, $sp, 12 ; LA32-NEXT: addi.w $a0, $sp, 24 ; LA32-NEXT: addi.w $a1, $sp, 8 ; LA32-NEXT: st.w $s4, $sp, 8 ; LA32-NEXT: bl cosl ; LA32-NEXT: st.w $s3, $sp, 84 ; LA32-NEXT: st.w $s2, $sp, 80 ; LA32-NEXT: st.w $s1, $sp, 76 ; LA32-NEXT: addi.w $a0, $sp, 88 ; LA32-NEXT: addi.w $a1, $sp, 72 ; LA32-NEXT: st.w $s0, $sp, 72 ; LA32-NEXT: bl cosl ; LA32-NEXT: ld.w $a0, $sp, 56 ; LA32-NEXT: ld.w $a1, $sp, 60 ; LA32-NEXT: ld.w $a2, $sp, 64 ; LA32-NEXT: ld.w $a3, $sp, 68 ; LA32-NEXT: ld.w $a4, $sp, 120 ; LA32-NEXT: ld.w $a5, $sp, 124 ; LA32-NEXT: ld.w $a6, $sp, 128 ; LA32-NEXT: ld.w $a7, $sp, 132 ; LA32-NEXT: ld.w $t0, $sp, 24 ; LA32-NEXT: ld.w $t1, $sp, 28 ; LA32-NEXT: ld.w $t2, $sp, 32 ; LA32-NEXT: ld.w $t3, $sp, 36 ; LA32-NEXT: ld.w $t4, $sp, 100 ; LA32-NEXT: ld.w $t5, $sp, 96 ; LA32-NEXT: ld.w $t6, $sp, 92 ; LA32-NEXT: ld.w $t7, $sp, 88 ; LA32-NEXT: st.w $t4, $fp, 60 ; LA32-NEXT: st.w $t5, $fp, 56 ; LA32-NEXT: st.w $t6, $fp, 52 ; LA32-NEXT: st.w $t7, $fp, 48 ; LA32-NEXT: st.w $t3, $fp, 44 ; LA32-NEXT: st.w $t2, $fp, 40 ; LA32-NEXT: st.w $t1, $fp, 36 ; LA32-NEXT: st.w $t0, $fp, 32 ; LA32-NEXT: st.w $a7, $fp, 28 ; LA32-NEXT: st.w $a6, $fp, 24 ; LA32-NEXT: st.w $a5, $fp, 20 ; LA32-NEXT: st.w $a4, $fp, 16 ; LA32-NEXT: st.w $a3, $fp, 12 ; LA32-NEXT: st.w $a2, $fp, 8 ; LA32-NEXT: st.w $a1, $fp, 4 ; LA32-NEXT: st.w $a0, $fp, 0 ; LA32-NEXT: ld.w $s7, $sp, 136 # 4-byte Folded Reload ; LA32-NEXT: ld.w $s6, $sp, 140 # 4-byte Folded Reload ; LA32-NEXT: ld.w $s5, $sp, 144 # 4-byte Folded Reload ; LA32-NEXT: ld.w $s4, $sp, 148 # 4-byte Folded Reload ; LA32-NEXT: ld.w $s3, $sp, 152 # 4-byte Folded Reload ; LA32-NEXT: ld.w $s2, $sp, 156 # 4-byte Folded Reload ; LA32-NEXT: ld.w $s1, $sp, 160 # 4-byte Folded Reload ; LA32-NEXT: ld.w $s0, $sp, 164 # 4-byte Folded Reload ; LA32-NEXT: ld.w $fp, $sp, 168 # 4-byte Folded Reload ; LA32-NEXT: ld.w $ra, $sp, 172 # 4-byte Folded Reload ; LA32-NEXT: addi.w $sp, $sp, 176 ; LA32-NEXT: ret ; ; LA64-LABEL: test_sincos_v2f128: ; LA64: # %bb.0: ; LA64-NEXT: addi.d $sp, $sp, -96 ; LA64-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill ; LA64-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill ; LA64-NEXT: st.d $s0, $sp, 72 # 8-byte Folded Spill ; LA64-NEXT: st.d $s1, $sp, 64 # 8-byte Folded Spill ; LA64-NEXT: st.d $s2, $sp, 56 # 8-byte Folded Spill ; LA64-NEXT: st.d $s3, $sp, 48 # 8-byte Folded Spill ; LA64-NEXT: st.d $s4, $sp, 40 # 8-byte Folded Spill ; LA64-NEXT: st.d $s5, $sp, 32 # 8-byte Folded Spill ; LA64-NEXT: st.d $s6, $sp, 24 # 8-byte Folded Spill ; LA64-NEXT: st.d $s7, $sp, 16 # 8-byte Folded Spill ; LA64-NEXT: ld.d $fp, $a1, 16 ; LA64-NEXT: ld.d $s0, $a1, 24 ; LA64-NEXT: ld.d $s1, $a1, 0 ; LA64-NEXT: ld.d $s2, $a1, 8 ; LA64-NEXT: move $s3, $a0 ; LA64-NEXT: move $a0, $s1 ; LA64-NEXT: move $a1, $s2 ; LA64-NEXT: pcaddu18i $ra, %call36(sinl) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: move $s4, $a0 ; LA64-NEXT: move $s5, $a1 ; LA64-NEXT: move $a0, $fp ; LA64-NEXT: move $a1, $s0 ; LA64-NEXT: pcaddu18i $ra, %call36(sinl) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: move $s6, $a0 ; LA64-NEXT: move $s7, $a1 ; LA64-NEXT: move $a0, $s1 ; LA64-NEXT: move $a1, $s2 ; LA64-NEXT: pcaddu18i $ra, %call36(cosl) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: move $s1, $a0 ; LA64-NEXT: move $s2, $a1 ; LA64-NEXT: move $a0, $fp ; LA64-NEXT: move $a1, $s0 ; LA64-NEXT: pcaddu18i $ra, %call36(cosl) ; LA64-NEXT: jirl $ra, $ra, 0 ; LA64-NEXT: st.d $a1, $s3, 56 ; LA64-NEXT: st.d $a0, $s3, 48 ; LA64-NEXT: st.d $s2, $s3, 40 ; LA64-NEXT: st.d $s1, $s3, 32 ; LA64-NEXT: st.d $s7, $s3, 24 ; LA64-NEXT: st.d $s6, $s3, 16 ; LA64-NEXT: st.d $s5, $s3, 8 ; LA64-NEXT: st.d $s4, $s3, 0 ; LA64-NEXT: ld.d $s7, $sp, 16 # 8-byte Folded Reload ; LA64-NEXT: ld.d $s6, $sp, 24 # 8-byte Folded Reload ; LA64-NEXT: ld.d $s5, $sp, 32 # 8-byte Folded Reload ; LA64-NEXT: ld.d $s4, $sp, 40 # 8-byte Folded Reload ; LA64-NEXT: ld.d $s3, $sp, 48 # 8-byte Folded Reload ; LA64-NEXT: ld.d $s2, $sp, 56 # 8-byte Folded Reload ; LA64-NEXT: ld.d $s1, $sp, 64 # 8-byte Folded Reload ; LA64-NEXT: ld.d $s0, $sp, 72 # 8-byte Folded Reload ; LA64-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload ; LA64-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload ; LA64-NEXT: addi.d $sp, $sp, 96 ; LA64-NEXT: ret %result = call { <2 x fp128>, <2 x fp128> } @llvm.sincos.v2f128(<2 x fp128> %a) ret { <2 x fp128>, <2 x fp128> } %result } attributes #0 = { nounwind }