aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/LoongArch/fp16-promote.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/LoongArch/fp16-promote.ll')
-rw-r--r--llvm/test/CodeGen/LoongArch/fp16-promote.ll326
1 files changed, 326 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/LoongArch/fp16-promote.ll b/llvm/test/CodeGen/LoongArch/fp16-promote.ll
new file mode 100644
index 0000000..75f920b
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/fp16-promote.ll
@@ -0,0 +1,326 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
+
+define void @test_load_store(ptr %p, ptr %q) nounwind {
+; LA32-LABEL: test_load_store:
+; LA32: # %bb.0:
+; LA32-NEXT: ld.h $a0, $a0, 0
+; LA32-NEXT: st.h $a0, $a1, 0
+; LA32-NEXT: ret
+;
+; LA64-LABEL: test_load_store:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.h $a0, $a0, 0
+; LA64-NEXT: st.h $a0, $a1, 0
+; LA64-NEXT: ret
+ %a = load half, ptr %p
+ store half %a, ptr %q
+ ret void
+}
+
+define float @test_fpextend_float(ptr %p) nounwind {
+; LA32-LABEL: test_fpextend_float:
+; LA32: # %bb.0:
+; LA32-NEXT: ld.hu $a0, $a0, 0
+; LA32-NEXT: b %plt(__gnu_h2f_ieee)
+;
+; LA64-LABEL: test_fpextend_float:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.hu $a0, $a0, 0
+; LA64-NEXT: b %plt(__gnu_h2f_ieee)
+ %a = load half, ptr %p
+ %r = fpext half %a to float
+ ret float %r
+}
+
+define double @test_fpextend_double(ptr %p) nounwind {
+; LA32-LABEL: test_fpextend_double:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: ld.hu $a0, $a0, 0
+; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32-NEXT: fcvt.d.s $fa0, $fa0
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: test_fpextend_double:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.d $sp, $sp, -16
+; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT: ld.hu $a0, $a0, 0
+; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64-NEXT: fcvt.d.s $fa0, $fa0
+; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 16
+; LA64-NEXT: ret
+ %a = load half, ptr %p
+ %r = fpext half %a to double
+ ret double %r
+}
+
+define void @test_fptrunc_float(float %f, ptr %p) nounwind {
+; LA32-LABEL: test_fptrunc_float:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32-NEXT: move $fp, $a0
+; LA32-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA32-NEXT: st.h $a0, $fp, 0
+; LA32-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: test_fptrunc_float:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.d $sp, $sp, -16
+; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64-NEXT: move $fp, $a0
+; LA64-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA64-NEXT: st.h $a0, $fp, 0
+; LA64-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 16
+; LA64-NEXT: ret
+ %a = fptrunc float %f to half
+ store half %a, ptr %p
+ ret void
+}
+
+define void @test_fptrunc_double(double %d, ptr %p) nounwind {
+; LA32-LABEL: test_fptrunc_double:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32-NEXT: move $fp, $a0
+; LA32-NEXT: bl %plt(__truncdfhf2)
+; LA32-NEXT: st.h $a0, $fp, 0
+; LA32-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: test_fptrunc_double:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.d $sp, $sp, -16
+; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64-NEXT: move $fp, $a0
+; LA64-NEXT: bl %plt(__truncdfhf2)
+; LA64-NEXT: st.h $a0, $fp, 0
+; LA64-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 16
+; LA64-NEXT: ret
+ %a = fptrunc double %d to half
+ store half %a, ptr %p
+ ret void
+}
+
+define half @test_fadd_reg(half %a, half %b) nounwind {
+; LA32-LABEL: test_fadd_reg:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -32
+; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA32-NEXT: fmov.s $fs0, $fa0
+; LA32-NEXT: fmov.s $fa0, $fa1
+; LA32-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32-NEXT: fmov.s $fs1, $fa0
+; LA32-NEXT: fmov.s $fa0, $fs0
+; LA32-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32-NEXT: fadd.s $fa0, $fa0, $fs1
+; LA32-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 32
+; LA32-NEXT: ret
+;
+; LA64-LABEL: test_fadd_reg:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.d $sp, $sp, -32
+; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT: fmov.s $fs0, $fa0
+; LA64-NEXT: fmov.s $fa0, $fa1
+; LA64-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64-NEXT: fmov.s $fs1, $fa0
+; LA64-NEXT: fmov.s $fa0, $fs0
+; LA64-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64-NEXT: fadd.s $fa0, $fa0, $fs1
+; LA64-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 32
+; LA64-NEXT: ret
+ %r = fadd half %a, %b
+ ret half %r
+}
+
+define void @test_fadd_mem(ptr %p, ptr %q) nounwind {
+; LA32-LABEL: test_fadd_mem:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -32
+; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT: st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT: st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT: fst.d $fs0, $sp, 8 # 8-byte Folded Spill
+; LA32-NEXT: move $fp, $a1
+; LA32-NEXT: move $s0, $a0
+; LA32-NEXT: ld.hu $a0, $a0, 0
+; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32-NEXT: fmov.s $fs0, $fa0
+; LA32-NEXT: ld.hu $a0, $fp, 0
+; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32-NEXT: fadd.s $fa0, $fs0, $fa0
+; LA32-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA32-NEXT: st.h $a0, $s0, 0
+; LA32-NEXT: fld.d $fs0, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT: ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 32
+; LA32-NEXT: ret
+;
+; LA64-LABEL: test_fadd_mem:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.d $sp, $sp, -32
+; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
+; LA64-NEXT: st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT: fst.d $fs0, $sp, 0 # 8-byte Folded Spill
+; LA64-NEXT: move $fp, $a1
+; LA64-NEXT: move $s0, $a0
+; LA64-NEXT: ld.hu $a0, $a0, 0
+; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64-NEXT: fmov.s $fs0, $fa0
+; LA64-NEXT: ld.hu $a0, $fp, 0
+; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64-NEXT: fadd.s $fa0, $fs0, $fa0
+; LA64-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA64-NEXT: st.h $a0, $s0, 0
+; LA64-NEXT: fld.d $fs0, $sp, 0 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $s0, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 32
+; LA64-NEXT: ret
+ %a = load half, ptr %p
+ %b = load half, ptr %q
+ %r = fadd half %a, %b
+ store half %r, ptr %p
+ ret void
+}
+
+define half @test_fmul_reg(half %a, half %b) nounwind {
+; LA32-LABEL: test_fmul_reg:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -32
+; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA32-NEXT: fmov.s $fs0, $fa0
+; LA32-NEXT: fmov.s $fa0, $fa1
+; LA32-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32-NEXT: fmov.s $fs1, $fa0
+; LA32-NEXT: fmov.s $fa0, $fs0
+; LA32-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32-NEXT: fmul.s $fa0, $fa0, $fs1
+; LA32-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 32
+; LA32-NEXT: ret
+;
+; LA64-LABEL: test_fmul_reg:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.d $sp, $sp, -32
+; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT: fmov.s $fs0, $fa0
+; LA64-NEXT: fmov.s $fa0, $fa1
+; LA64-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64-NEXT: fmov.s $fs1, $fa0
+; LA64-NEXT: fmov.s $fa0, $fs0
+; LA64-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64-NEXT: fmul.s $fa0, $fa0, $fs1
+; LA64-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 32
+; LA64-NEXT: ret
+ %r = fmul half %a, %b
+ ret half %r
+}
+
+define void @test_fmul_mem(ptr %p, ptr %q) nounwind {
+; LA32-LABEL: test_fmul_mem:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -32
+; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT: st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT: st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT: fst.d $fs0, $sp, 8 # 8-byte Folded Spill
+; LA32-NEXT: move $fp, $a1
+; LA32-NEXT: move $s0, $a0
+; LA32-NEXT: ld.hu $a0, $a0, 0
+; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32-NEXT: fmov.s $fs0, $fa0
+; LA32-NEXT: ld.hu $a0, $fp, 0
+; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32-NEXT: fmul.s $fa0, $fs0, $fa0
+; LA32-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA32-NEXT: st.h $a0, $s0, 0
+; LA32-NEXT: fld.d $fs0, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT: ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 32
+; LA32-NEXT: ret
+;
+; LA64-LABEL: test_fmul_mem:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.d $sp, $sp, -32
+; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
+; LA64-NEXT: st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT: fst.d $fs0, $sp, 0 # 8-byte Folded Spill
+; LA64-NEXT: move $fp, $a1
+; LA64-NEXT: move $s0, $a0
+; LA64-NEXT: ld.hu $a0, $a0, 0
+; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64-NEXT: fmov.s $fs0, $fa0
+; LA64-NEXT: ld.hu $a0, $fp, 0
+; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64-NEXT: fmul.s $fa0, $fs0, $fa0
+; LA64-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA64-NEXT: st.h $a0, $s0, 0
+; LA64-NEXT: fld.d $fs0, $sp, 0 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $s0, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 32
+; LA64-NEXT: ret
+ %a = load half, ptr %p
+ %b = load half, ptr %q
+ %r = fmul half %a, %b
+ store half %r, ptr %p
+ ret void
+}