diff options
Diffstat (limited to 'llvm/test/CodeGen/LoongArch/lasx/fp-rounding.ll')
| -rw-r--r-- | llvm/test/CodeGen/LoongArch/lasx/fp-rounding.ll | 308 | 
1 files changed, 308 insertions, 0 deletions
| diff --git a/llvm/test/CodeGen/LoongArch/lasx/fp-rounding.ll b/llvm/test/CodeGen/LoongArch/lasx/fp-rounding.ll new file mode 100644 index 0000000..79407c3 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/fp-rounding.ll @@ -0,0 +1,308 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s + +;; ceilf +define void @ceil_v8f32(ptr %res, ptr %a0) nounwind { +; CHECK-LABEL: ceil_v8f32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    xvld $xr0, $a1, 0 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 5 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrp.s $vr1, $vr1 +; CHECK-NEXT:    xvpickve.w $xr2, $xr0, 4 +; CHECK-NEXT:    vreplvei.w $vr2, $vr2, 0 +; CHECK-NEXT:    vfrintrp.s $vr2, $vr2 +; CHECK-NEXT:    vextrins.w $vr2, $vr1, 16 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 6 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrp.s $vr1, $vr1 +; CHECK-NEXT:    vextrins.w $vr2, $vr1, 32 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 7 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrp.s $vr1, $vr1 +; CHECK-NEXT:    vextrins.w $vr2, $vr1, 48 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 1 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrp.s $vr1, $vr1 +; CHECK-NEXT:    xvpickve.w $xr3, $xr0, 0 +; CHECK-NEXT:    vreplvei.w $vr3, $vr3, 0 +; CHECK-NEXT:    vfrintrp.s $vr3, $vr3 +; CHECK-NEXT:    vextrins.w $vr3, $vr1, 16 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 2 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrp.s $vr1, $vr1 +; CHECK-NEXT:    vextrins.w $vr3, $vr1, 32 +; CHECK-NEXT:    xvpickve.w $xr0, $xr0, 3 +; CHECK-NEXT:    vreplvei.w $vr0, $vr0, 0 +; CHECK-NEXT:    vfrintrp.s $vr0, $vr0 +; CHECK-NEXT:    vextrins.w $vr3, $vr0, 48 +; CHECK-NEXT:    xvpermi.q $xr3, $xr2, 2 +; CHECK-NEXT:    xvst $xr3, $a0, 0 +; CHECK-NEXT:    ret +entry: +  %v0 = load <8 x float>, ptr %a0 +  %r = call <8 x float> @llvm.ceil.v8f32(<8 x float> %v0) +  store <8 x float> %r, ptr %res +  ret void +} + +;; ceil +define void @ceil_v4f64(ptr %res, ptr %a0) nounwind { +; CHECK-LABEL: ceil_v4f64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    xvld $xr0, $a1, 0 +; CHECK-NEXT:    xvpickve.d $xr1, $xr0, 3 +; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrp.d $vr1, $vr1 +; CHECK-NEXT:    xvpickve.d $xr2, $xr0, 2 +; CHECK-NEXT:    vreplvei.d $vr2, $vr2, 0 +; CHECK-NEXT:    vfrintrp.d $vr2, $vr2 +; CHECK-NEXT:    vextrins.d $vr2, $vr1, 16 +; CHECK-NEXT:    xvpickve.d $xr1, $xr0, 1 +; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrp.d $vr1, $vr1 +; CHECK-NEXT:    xvpickve.d $xr0, $xr0, 0 +; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0 +; CHECK-NEXT:    vfrintrp.d $vr0, $vr0 +; CHECK-NEXT:    vextrins.d $vr0, $vr1, 16 +; CHECK-NEXT:    xvpermi.q $xr0, $xr2, 2 +; CHECK-NEXT:    xvst $xr0, $a0, 0 +; CHECK-NEXT:    ret +entry: +  %v0 = load <4 x double>, ptr %a0 +  %r = call <4 x double> @llvm.ceil.v4f64(<4 x double> %v0) +  store <4 x double> %r, ptr %res +  ret void +} + +;; floorf +define void @floor_v8f32(ptr %res, ptr %a0) nounwind { +; CHECK-LABEL: floor_v8f32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    xvld $xr0, $a1, 0 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 5 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrm.s $vr1, $vr1 +; CHECK-NEXT:    xvpickve.w $xr2, $xr0, 4 +; CHECK-NEXT:    vreplvei.w $vr2, $vr2, 0 +; CHECK-NEXT:    vfrintrm.s $vr2, $vr2 +; CHECK-NEXT:    vextrins.w $vr2, $vr1, 16 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 6 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrm.s $vr1, $vr1 +; CHECK-NEXT:    vextrins.w $vr2, $vr1, 32 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 7 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrm.s $vr1, $vr1 +; CHECK-NEXT:    vextrins.w $vr2, $vr1, 48 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 1 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrm.s $vr1, $vr1 +; CHECK-NEXT:    xvpickve.w $xr3, $xr0, 0 +; CHECK-NEXT:    vreplvei.w $vr3, $vr3, 0 +; CHECK-NEXT:    vfrintrm.s $vr3, $vr3 +; CHECK-NEXT:    vextrins.w $vr3, $vr1, 16 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 2 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrm.s $vr1, $vr1 +; CHECK-NEXT:    vextrins.w $vr3, $vr1, 32 +; CHECK-NEXT:    xvpickve.w $xr0, $xr0, 3 +; CHECK-NEXT:    vreplvei.w $vr0, $vr0, 0 +; CHECK-NEXT:    vfrintrm.s $vr0, $vr0 +; CHECK-NEXT:    vextrins.w $vr3, $vr0, 48 +; CHECK-NEXT:    xvpermi.q $xr3, $xr2, 2 +; CHECK-NEXT:    xvst $xr3, $a0, 0 +; CHECK-NEXT:    ret +entry: +  %v0 = load <8 x float>, ptr %a0 +  %r = call <8 x float> @llvm.floor.v8f32(<8 x float> %v0) +  store <8 x float> %r, ptr %res +  ret void +} + +;; floor +define void @floor_v4f64(ptr %res, ptr %a0) nounwind { +; CHECK-LABEL: floor_v4f64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    xvld $xr0, $a1, 0 +; CHECK-NEXT:    xvpickve.d $xr1, $xr0, 3 +; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrm.d $vr1, $vr1 +; CHECK-NEXT:    xvpickve.d $xr2, $xr0, 2 +; CHECK-NEXT:    vreplvei.d $vr2, $vr2, 0 +; CHECK-NEXT:    vfrintrm.d $vr2, $vr2 +; CHECK-NEXT:    vextrins.d $vr2, $vr1, 16 +; CHECK-NEXT:    xvpickve.d $xr1, $xr0, 1 +; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrm.d $vr1, $vr1 +; CHECK-NEXT:    xvpickve.d $xr0, $xr0, 0 +; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0 +; CHECK-NEXT:    vfrintrm.d $vr0, $vr0 +; CHECK-NEXT:    vextrins.d $vr0, $vr1, 16 +; CHECK-NEXT:    xvpermi.q $xr0, $xr2, 2 +; CHECK-NEXT:    xvst $xr0, $a0, 0 +; CHECK-NEXT:    ret +entry: +  %v0 = load <4 x double>, ptr %a0 +  %r = call <4 x double> @llvm.floor.v4f64(<4 x double> %v0) +  store <4 x double> %r, ptr %res +  ret void +} + +;; truncf +define void @trunc_v8f32(ptr %res, ptr %a0) nounwind { +; CHECK-LABEL: trunc_v8f32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    xvld $xr0, $a1, 0 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 5 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrz.s $vr1, $vr1 +; CHECK-NEXT:    xvpickve.w $xr2, $xr0, 4 +; CHECK-NEXT:    vreplvei.w $vr2, $vr2, 0 +; CHECK-NEXT:    vfrintrz.s $vr2, $vr2 +; CHECK-NEXT:    vextrins.w $vr2, $vr1, 16 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 6 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrz.s $vr1, $vr1 +; CHECK-NEXT:    vextrins.w $vr2, $vr1, 32 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 7 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrz.s $vr1, $vr1 +; CHECK-NEXT:    vextrins.w $vr2, $vr1, 48 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 1 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrz.s $vr1, $vr1 +; CHECK-NEXT:    xvpickve.w $xr3, $xr0, 0 +; CHECK-NEXT:    vreplvei.w $vr3, $vr3, 0 +; CHECK-NEXT:    vfrintrz.s $vr3, $vr3 +; CHECK-NEXT:    vextrins.w $vr3, $vr1, 16 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 2 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrz.s $vr1, $vr1 +; CHECK-NEXT:    vextrins.w $vr3, $vr1, 32 +; CHECK-NEXT:    xvpickve.w $xr0, $xr0, 3 +; CHECK-NEXT:    vreplvei.w $vr0, $vr0, 0 +; CHECK-NEXT:    vfrintrz.s $vr0, $vr0 +; CHECK-NEXT:    vextrins.w $vr3, $vr0, 48 +; CHECK-NEXT:    xvpermi.q $xr3, $xr2, 2 +; CHECK-NEXT:    xvst $xr3, $a0, 0 +; CHECK-NEXT:    ret +entry: +  %v0 = load <8 x float>, ptr %a0 +  %r = call <8 x float> @llvm.trunc.v8f32(<8 x float> %v0) +  store <8 x float> %r, ptr %res +  ret void +} + +;; trunc +define void @trunc_v4f64(ptr %res, ptr %a0) nounwind { +; CHECK-LABEL: trunc_v4f64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    xvld $xr0, $a1, 0 +; CHECK-NEXT:    xvpickve.d $xr1, $xr0, 3 +; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrz.d $vr1, $vr1 +; CHECK-NEXT:    xvpickve.d $xr2, $xr0, 2 +; CHECK-NEXT:    vreplvei.d $vr2, $vr2, 0 +; CHECK-NEXT:    vfrintrz.d $vr2, $vr2 +; CHECK-NEXT:    vextrins.d $vr2, $vr1, 16 +; CHECK-NEXT:    xvpickve.d $xr1, $xr0, 1 +; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrz.d $vr1, $vr1 +; CHECK-NEXT:    xvpickve.d $xr0, $xr0, 0 +; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0 +; CHECK-NEXT:    vfrintrz.d $vr0, $vr0 +; CHECK-NEXT:    vextrins.d $vr0, $vr1, 16 +; CHECK-NEXT:    xvpermi.q $xr0, $xr2, 2 +; CHECK-NEXT:    xvst $xr0, $a0, 0 +; CHECK-NEXT:    ret +entry: +  %v0 = load <4 x double>, ptr %a0 +  %r = call <4 x double> @llvm.trunc.v4f64(<4 x double> %v0) +  store <4 x double> %r, ptr %res +  ret void +} + +;; roundevenf +define void @roundeven_v8f32(ptr %res, ptr %a0) nounwind { +; CHECK-LABEL: roundeven_v8f32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    xvld $xr0, $a1, 0 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 5 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrne.s $vr1, $vr1 +; CHECK-NEXT:    xvpickve.w $xr2, $xr0, 4 +; CHECK-NEXT:    vreplvei.w $vr2, $vr2, 0 +; CHECK-NEXT:    vfrintrne.s $vr2, $vr2 +; CHECK-NEXT:    vextrins.w $vr2, $vr1, 16 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 6 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrne.s $vr1, $vr1 +; CHECK-NEXT:    vextrins.w $vr2, $vr1, 32 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 7 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrne.s $vr1, $vr1 +; CHECK-NEXT:    vextrins.w $vr2, $vr1, 48 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 1 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrne.s $vr1, $vr1 +; CHECK-NEXT:    xvpickve.w $xr3, $xr0, 0 +; CHECK-NEXT:    vreplvei.w $vr3, $vr3, 0 +; CHECK-NEXT:    vfrintrne.s $vr3, $vr3 +; CHECK-NEXT:    vextrins.w $vr3, $vr1, 16 +; CHECK-NEXT:    xvpickve.w $xr1, $xr0, 2 +; CHECK-NEXT:    vreplvei.w $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrne.s $vr1, $vr1 +; CHECK-NEXT:    vextrins.w $vr3, $vr1, 32 +; CHECK-NEXT:    xvpickve.w $xr0, $xr0, 3 +; CHECK-NEXT:    vreplvei.w $vr0, $vr0, 0 +; CHECK-NEXT:    vfrintrne.s $vr0, $vr0 +; CHECK-NEXT:    vextrins.w $vr3, $vr0, 48 +; CHECK-NEXT:    xvpermi.q $xr3, $xr2, 2 +; CHECK-NEXT:    xvst $xr3, $a0, 0 +; CHECK-NEXT:    ret +entry: +  %v0 = load <8 x float>, ptr %a0 +  %r = call <8 x float> @llvm.roundeven.v8f32(<8 x float> %v0) +  store <8 x float> %r, ptr %res +  ret void +} + +;; roundeven +define void @roundeven_v4f64(ptr %res, ptr %a0) nounwind { +; CHECK-LABEL: roundeven_v4f64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    xvld $xr0, $a1, 0 +; CHECK-NEXT:    xvpickve.d $xr1, $xr0, 3 +; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrne.d $vr1, $vr1 +; CHECK-NEXT:    xvpickve.d $xr2, $xr0, 2 +; CHECK-NEXT:    vreplvei.d $vr2, $vr2, 0 +; CHECK-NEXT:    vfrintrne.d $vr2, $vr2 +; CHECK-NEXT:    vextrins.d $vr2, $vr1, 16 +; CHECK-NEXT:    xvpickve.d $xr1, $xr0, 1 +; CHECK-NEXT:    vreplvei.d $vr1, $vr1, 0 +; CHECK-NEXT:    vfrintrne.d $vr1, $vr1 +; CHECK-NEXT:    xvpickve.d $xr0, $xr0, 0 +; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0 +; CHECK-NEXT:    vfrintrne.d $vr0, $vr0 +; CHECK-NEXT:    vextrins.d $vr0, $vr1, 16 +; CHECK-NEXT:    xvpermi.q $xr0, $xr2, 2 +; CHECK-NEXT:    xvst $xr0, $a0, 0 +; CHECK-NEXT:    ret +entry: +  %v0 = load <4 x double>, ptr %a0 +  %r = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %v0) +  store <4 x double> %r, ptr %res +  ret void +} + +declare <8 x float> @llvm.ceil.v8f32(<8 x float>) +declare <4 x double> @llvm.ceil.v4f64(<4 x double>) +declare <8 x float> @llvm.floor.v8f32(<8 x float>) +declare <4 x double> @llvm.floor.v4f64(<4 x double>) +declare <8 x float> @llvm.trunc.v8f32(<8 x float>) +declare <4 x double> @llvm.trunc.v4f64(<4 x double>) +declare <8 x float> @llvm.roundeven.v8f32(<8 x float>) +declare <4 x double> @llvm.roundeven.v4f64(<4 x double>) | 
