; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=riscv32 -mattr=+v,+f,+d -target-abi=ilp32d -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefix=RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+d -target-abi=lp64d -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefix=RV64 define <1 x float> @powi_v1f32(<1 x float> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v1f32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vfmv.s.f v8, fa0 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: powi_v1f32: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vfmv.s.f v8, fa0 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %a = call <1 x float> @llvm.powi.v1f32.i32(<1 x float> %x, i32 %y) ret <1 x float> %a } declare <1 x float> @llvm.powi.v1f32.i32(<1 x float>, i32) define <2 x float> @powi_v2f32(<2 x float> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v2f32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -32 ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: mv s0, a0 ; RV32-NEXT: addi a1, sp, 16 ; RV32-NEXT: vs1r.v v8, (a1) # vscale x 8-byte Folded Spill ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vslidedown.vi v9, v8, 1 ; RV32-NEXT: vfmv.f.s fa0, v9 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fmv.s fs0, fa0 ; RV32-NEXT: flw fa0, 16(sp) # 8-byte Folded Reload ; RV32-NEXT: mv a0, s0 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vfmv.v.f v8, fa0 ; RV32-NEXT: vfslide1down.vf v8, v8, fs0 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret ; ; RV64-LABEL: powi_v2f32: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -64 ; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill ; RV64-NEXT: fsd fs0, 40(sp) # 8-byte Folded Spill ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: sub sp, sp, a1 ; RV64-NEXT: addi a1, sp, 32 ; RV64-NEXT: vs1r.v v8, (a1) # vscale x 8-byte Folded Spill ; RV64-NEXT: sext.w s0, a0 ; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-NEXT: vslidedown.vi v9, v8, 1 ; RV64-NEXT: vfmv.f.s fa0, v9 ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fmv.s fs0, fa0 ; RV64-NEXT: flw fa0, 32(sp) # 8-byte Folded Reload ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vfmv.v.f v8, fa0 ; RV64-NEXT: vfslide1down.vf v8, v8, fs0 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; RV64-NEXT: fld fs0, 40(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 64 ; RV64-NEXT: ret %a = call <2 x float> @llvm.powi.v2f32.i32(<2 x float> %x, i32 %y) ret <2 x float> %a } declare <2 x float> @llvm.powi.v2f32.i32(<2 x float>, i32) define <3 x float> @powi_v3f32(<3 x float> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v3f32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -32 ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 1 ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: mv s0, a0 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vs1r.v v8, (a1) # vscale x 8-byte Folded Spill ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vi v9, v8, 1 ; RV32-NEXT: vfmv.f.s fa0, v9 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fmv.s fs0, fa0 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: flw fa0, 16(a0) # 8-byte Folded Reload ; RV32-NEXT: mv a0, s0 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vfmv.v.f v8, fa0 ; RV32-NEXT: vfslide1down.vf v8, v8, fs0 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload ; RV32-NEXT: vslidedown.vi v8, v8, 2 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s0 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vfslide1down.vf v8, v8, fa0 ; RV32-NEXT: vslidedown.vi v8, v8, 1 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 1 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret ; ; RV64-LABEL: powi_v3f32: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -64 ; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill ; RV64-NEXT: fsd fs0, 40(sp) # 8-byte Folded Spill ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 1 ; RV64-NEXT: sub sp, sp, a1 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vs1r.v v8, (a1) # vscale x 8-byte Folded Spill ; RV64-NEXT: sext.w s0, a0 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vslidedown.vi v9, v8, 1 ; RV64-NEXT: vfmv.f.s fa0, v9 ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fmv.s fs0, fa0 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: flw fa0, 32(a0) # 8-byte Folded Reload ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vfmv.v.f v8, fa0 ; RV64-NEXT: vfslide1down.vf v8, v8, fs0 ; RV64-NEXT: addi a0, sp, 32 ; RV64-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload ; RV64-NEXT: vslidedown.vi v8, v8, 2 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: addi a0, sp, 32 ; RV64-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vfslide1down.vf v8, v8, fa0 ; RV64-NEXT: vslidedown.vi v8, v8, 1 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 1 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; RV64-NEXT: fld fs0, 40(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 64 ; RV64-NEXT: ret %a = call <3 x float> @llvm.powi.v3f32.i32(<3 x float> %x, i32 %y) ret <3 x float> %a } declare <3 x float> @llvm.powi.v3f32.i32(<3 x float>, i32) define <4 x float> @powi_v4f32(<4 x float> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v4f32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -32 ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 1 ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: mv s0, a0 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vs1r.v v8, (a1) # vscale x 8-byte Folded Spill ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vi v9, v8, 1 ; RV32-NEXT: vfmv.f.s fa0, v9 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fmv.s fs0, fa0 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: flw fa0, 16(a0) # 8-byte Folded Reload ; RV32-NEXT: mv a0, s0 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vfmv.v.f v8, fa0 ; RV32-NEXT: vfslide1down.vf v8, v8, fs0 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload ; RV32-NEXT: vslidedown.vi v8, v8, 2 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s0 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vfslide1down.vf v8, v8, fa0 ; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload ; RV32-NEXT: vslidedown.vi v8, v8, 3 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s0 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vfslide1down.vf v8, v8, fa0 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 1 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret ; ; RV64-LABEL: powi_v4f32: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -64 ; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill ; RV64-NEXT: fsd fs0, 40(sp) # 8-byte Folded Spill ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 1 ; RV64-NEXT: sub sp, sp, a1 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vs1r.v v8, (a1) # vscale x 8-byte Folded Spill ; RV64-NEXT: sext.w s0, a0 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vslidedown.vi v9, v8, 1 ; RV64-NEXT: vfmv.f.s fa0, v9 ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fmv.s fs0, fa0 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: flw fa0, 32(a0) # 8-byte Folded Reload ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vfmv.v.f v8, fa0 ; RV64-NEXT: vfslide1down.vf v8, v8, fs0 ; RV64-NEXT: addi a0, sp, 32 ; RV64-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload ; RV64-NEXT: vslidedown.vi v8, v8, 2 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: addi a0, sp, 32 ; RV64-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vfslide1down.vf v8, v8, fa0 ; RV64-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload ; RV64-NEXT: vslidedown.vi v8, v8, 3 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: addi a0, sp, 32 ; RV64-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vfslide1down.vf v8, v8, fa0 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 1 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; RV64-NEXT: fld fs0, 40(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 64 ; RV64-NEXT: ret %a = call <4 x float> @llvm.powi.v4f32.i32(<4 x float> %x, i32 %y) ret <4 x float> %a } declare <4 x float> @llvm.powi.v4f32.i32(<4 x float>, i32) define <8 x float> @powi_v8f32(<8 x float> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v8f32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -32 ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 2 ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: mv s0, a0 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 1 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vs2r.v v8, (a1) # vscale x 16-byte Folded Spill ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vi v10, v8, 1 ; RV32-NEXT: vfmv.f.s fa0, v10 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fmv.s fs0, fa0 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s0 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vfmv.v.f v8, fa0 ; RV32-NEXT: vfslide1down.vf v8, v8, fs0 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 2 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s0 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vfslide1down.vf v8, v8, fa0 ; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 3 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s0 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vfslide1down.vf v8, v8, fa0 ; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV32-NEXT: vslidedown.vi v8, v8, 4 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s0 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vfslide1down.vf v8, v8, fa0 ; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV32-NEXT: vslidedown.vi v8, v8, 5 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s0 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vfslide1down.vf v8, v8, fa0 ; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV32-NEXT: vslidedown.vi v8, v8, 6 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s0 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vfslide1down.vf v8, v8, fa0 ; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV32-NEXT: vslidedown.vi v8, v8, 7 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s0 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vfslide1down.vf v8, v8, fa0 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 2 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret ; ; RV64-LABEL: powi_v8f32: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -64 ; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill ; RV64-NEXT: fsd fs0, 40(sp) # 8-byte Folded Spill ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 2 ; RV64-NEXT: sub sp, sp, a1 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 1 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vs2r.v v8, (a1) # vscale x 16-byte Folded Spill ; RV64-NEXT: sext.w s0, a0 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vslidedown.vi v10, v8, 1 ; RV64-NEXT: vfmv.f.s fa0, v10 ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fmv.s fs0, fa0 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-NEXT: vfmv.v.f v8, fa0 ; RV64-NEXT: vfslide1down.vf v8, v8, fs0 ; RV64-NEXT: addi a0, sp, 32 ; RV64-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 2 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: addi a0, sp, 32 ; RV64-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-NEXT: vfslide1down.vf v8, v8, fa0 ; RV64-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 3 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: addi a0, sp, 32 ; RV64-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-NEXT: vfslide1down.vf v8, v8, fa0 ; RV64-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV64-NEXT: vslidedown.vi v8, v8, 4 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: addi a0, sp, 32 ; RV64-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-NEXT: vfslide1down.vf v8, v8, fa0 ; RV64-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV64-NEXT: vslidedown.vi v8, v8, 5 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: addi a0, sp, 32 ; RV64-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-NEXT: vfslide1down.vf v8, v8, fa0 ; RV64-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV64-NEXT: vslidedown.vi v8, v8, 6 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: addi a0, sp, 32 ; RV64-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-NEXT: vfslide1down.vf v8, v8, fa0 ; RV64-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV64-NEXT: vslidedown.vi v8, v8, 7 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: addi a0, sp, 32 ; RV64-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-NEXT: vfslide1down.vf v8, v8, fa0 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 2 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; RV64-NEXT: fld fs0, 40(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 64 ; RV64-NEXT: ret %a = call <8 x float> @llvm.powi.v8f32.i32(<8 x float> %x, i32 %y) ret <8 x float> %a } declare <8 x float> @llvm.powi.v8f32.i32(<8 x float>, i32) define <16 x float> @powi_v16f32(<16 x float> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v16f32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -272 ; RV32-NEXT: sw ra, 268(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 264(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s2, 260(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 272 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 2 ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: mv s2, a0 ; RV32-NEXT: addi a0, sp, 256 ; RV32-NEXT: vs4r.v v8, (a0) # vscale x 32-byte Folded Spill ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: vse32.v v8, (a0) ; RV32-NEXT: flw fa0, 124(sp) ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fsw fa0, 188(sp) ; RV32-NEXT: flw fa0, 120(sp) ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fsw fa0, 184(sp) ; RV32-NEXT: flw fa0, 116(sp) ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fsw fa0, 180(sp) ; RV32-NEXT: flw fa0, 112(sp) ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fsw fa0, 176(sp) ; RV32-NEXT: flw fa0, 108(sp) ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fsw fa0, 172(sp) ; RV32-NEXT: flw fa0, 104(sp) ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fsw fa0, 168(sp) ; RV32-NEXT: flw fa0, 100(sp) ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fsw fa0, 164(sp) ; RV32-NEXT: flw fa0, 96(sp) ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fsw fa0, 160(sp) ; RV32-NEXT: addi a0, sp, 256 ; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fsw fa0, 128(sp) ; RV32-NEXT: addi a0, sp, 256 ; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 3 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fsw fa0, 140(sp) ; RV32-NEXT: addi a0, sp, 256 ; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 2 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fsw fa0, 136(sp) ; RV32-NEXT: addi a0, sp, 256 ; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 1 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fsw fa0, 132(sp) ; RV32-NEXT: addi a0, sp, 256 ; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 7 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fsw fa0, 156(sp) ; RV32-NEXT: addi a0, sp, 256 ; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 6 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fsw fa0, 152(sp) ; RV32-NEXT: addi a0, sp, 256 ; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 5 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fsw fa0, 148(sp) ; RV32-NEXT: addi a0, sp, 256 ; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 4 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powisf2 ; RV32-NEXT: fsw fa0, 144(sp) ; RV32-NEXT: addi a0, sp, 128 ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: addi sp, s0, -272 ; RV32-NEXT: lw ra, 268(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 264(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s2, 260(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 272 ; RV32-NEXT: ret ; ; RV64-LABEL: powi_v16f32: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -272 ; RV64-NEXT: sd ra, 264(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 256(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s2, 248(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 272 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 2 ; RV64-NEXT: sub sp, sp, a1 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: addi a1, sp, 240 ; RV64-NEXT: vs4r.v v8, (a1) # vscale x 32-byte Folded Spill ; RV64-NEXT: addi a1, sp, 64 ; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV64-NEXT: vse32.v v8, (a1) ; RV64-NEXT: flw fa0, 124(sp) ; RV64-NEXT: sext.w s2, a0 ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fsw fa0, 188(sp) ; RV64-NEXT: flw fa0, 120(sp) ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fsw fa0, 184(sp) ; RV64-NEXT: flw fa0, 116(sp) ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fsw fa0, 180(sp) ; RV64-NEXT: flw fa0, 112(sp) ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fsw fa0, 176(sp) ; RV64-NEXT: flw fa0, 108(sp) ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fsw fa0, 172(sp) ; RV64-NEXT: flw fa0, 104(sp) ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fsw fa0, 168(sp) ; RV64-NEXT: flw fa0, 100(sp) ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fsw fa0, 164(sp) ; RV64-NEXT: flw fa0, 96(sp) ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fsw fa0, 160(sp) ; RV64-NEXT: addi a0, sp, 240 ; RV64-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fsw fa0, 128(sp) ; RV64-NEXT: addi a0, sp, 240 ; RV64-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 3 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fsw fa0, 140(sp) ; RV64-NEXT: addi a0, sp, 240 ; RV64-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 2 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fsw fa0, 136(sp) ; RV64-NEXT: addi a0, sp, 240 ; RV64-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 1 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fsw fa0, 132(sp) ; RV64-NEXT: addi a0, sp, 240 ; RV64-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 7 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fsw fa0, 156(sp) ; RV64-NEXT: addi a0, sp, 240 ; RV64-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 6 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fsw fa0, 152(sp) ; RV64-NEXT: addi a0, sp, 240 ; RV64-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 5 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fsw fa0, 148(sp) ; RV64-NEXT: addi a0, sp, 240 ; RV64-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 4 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powisf2 ; RV64-NEXT: fsw fa0, 144(sp) ; RV64-NEXT: addi a0, sp, 128 ; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: addi sp, s0, -272 ; RV64-NEXT: ld ra, 264(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 256(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s2, 248(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 272 ; RV64-NEXT: ret %a = call <16 x float> @llvm.powi.v16f32.i32(<16 x float> %x, i32 %y) ret <16 x float> %a } declare <16 x float> @llvm.powi.v16f32.i32(<16 x float>, i32) define <1 x double> @powi_v1f64(<1 x double> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v1f64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: call __powidf2 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vfmv.s.f v8, fa0 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: powi_v1f64: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: call __powidf2 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vfmv.s.f v8, fa0 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %a = call <1 x double> @llvm.powi.v1f64.i32(<1 x double> %x, i32 %y) ret <1 x double> %a } declare <1 x double> @llvm.powi.v1f64.i32(<1 x double>, i32) define <2 x double> @powi_v2f64(<2 x double> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v2f64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -32 ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: mv s0, a0 ; RV32-NEXT: addi a1, sp, 16 ; RV32-NEXT: vs1r.v v8, (a1) # vscale x 8-byte Folded Spill ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vslidedown.vi v9, v8, 1 ; RV32-NEXT: vfmv.f.s fa0, v9 ; RV32-NEXT: call __powidf2 ; RV32-NEXT: fmv.d fs0, fa0 ; RV32-NEXT: fld fa0, 16(sp) # 8-byte Folded Reload ; RV32-NEXT: mv a0, s0 ; RV32-NEXT: call __powidf2 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vfmv.v.f v8, fa0 ; RV32-NEXT: vfslide1down.vf v8, v8, fs0 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret ; ; RV64-LABEL: powi_v2f64: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -64 ; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill ; RV64-NEXT: fsd fs0, 40(sp) # 8-byte Folded Spill ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: sub sp, sp, a1 ; RV64-NEXT: addi a1, sp, 32 ; RV64-NEXT: vs1r.v v8, (a1) # vscale x 8-byte Folded Spill ; RV64-NEXT: sext.w s0, a0 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vslidedown.vi v9, v8, 1 ; RV64-NEXT: vfmv.f.s fa0, v9 ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powidf2 ; RV64-NEXT: fmv.d fs0, fa0 ; RV64-NEXT: fld fa0, 32(sp) # 8-byte Folded Reload ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powidf2 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vfmv.v.f v8, fa0 ; RV64-NEXT: vfslide1down.vf v8, v8, fs0 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; RV64-NEXT: fld fs0, 40(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 64 ; RV64-NEXT: ret %a = call <2 x double> @llvm.powi.v2f64.i32(<2 x double> %x, i32 %y) ret <2 x double> %a } declare <2 x double> @llvm.powi.v2f64.i32(<2 x double>, i32) define <4 x double> @powi_v4f64(<4 x double> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v4f64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -32 ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 2 ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: mv s0, a0 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 1 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vs2r.v v8, (a1) # vscale x 16-byte Folded Spill ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vslidedown.vi v10, v8, 1 ; RV32-NEXT: vfmv.f.s fa0, v10 ; RV32-NEXT: call __powidf2 ; RV32-NEXT: fmv.d fs0, fa0 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s0 ; RV32-NEXT: call __powidf2 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vfmv.v.f v8, fa0 ; RV32-NEXT: vfslide1down.vf v8, v8, fs0 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV32-NEXT: vslidedown.vi v8, v8, 2 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s0 ; RV32-NEXT: call __powidf2 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vfslide1down.vf v8, v8, fa0 ; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV32-NEXT: vslidedown.vi v8, v8, 3 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s0 ; RV32-NEXT: call __powidf2 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vfslide1down.vf v8, v8, fa0 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 2 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret ; ; RV64-LABEL: powi_v4f64: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -64 ; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill ; RV64-NEXT: fsd fs0, 40(sp) # 8-byte Folded Spill ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 2 ; RV64-NEXT: sub sp, sp, a1 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 1 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vs2r.v v8, (a1) # vscale x 16-byte Folded Spill ; RV64-NEXT: sext.w s0, a0 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vslidedown.vi v10, v8, 1 ; RV64-NEXT: vfmv.f.s fa0, v10 ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powidf2 ; RV64-NEXT: fmv.d fs0, fa0 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powidf2 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vfmv.v.f v8, fa0 ; RV64-NEXT: vfslide1down.vf v8, v8, fs0 ; RV64-NEXT: addi a0, sp, 32 ; RV64-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV64-NEXT: vslidedown.vi v8, v8, 2 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powidf2 ; RV64-NEXT: addi a0, sp, 32 ; RV64-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vfslide1down.vf v8, v8, fa0 ; RV64-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV64-NEXT: vslidedown.vi v8, v8, 3 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s0 ; RV64-NEXT: call __powidf2 ; RV64-NEXT: addi a0, sp, 32 ; RV64-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vfslide1down.vf v8, v8, fa0 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 2 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; RV64-NEXT: fld fs0, 40(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 64 ; RV64-NEXT: ret %a = call <4 x double> @llvm.powi.v4f64.i32(<4 x double> %x, i32 %y) ret <4 x double> %a } declare <4 x double> @llvm.powi.v4f64.i32(<4 x double>, i32) define <8 x double> @powi_v8f64(<8 x double> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v8f64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -272 ; RV32-NEXT: sw ra, 268(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 264(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s2, 260(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 272 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 2 ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: mv s2, a0 ; RV32-NEXT: addi a0, sp, 256 ; RV32-NEXT: vs4r.v v8, (a0) # vscale x 32-byte Folded Spill ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vse64.v v8, (a0) ; RV32-NEXT: fld fa0, 120(sp) ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powidf2 ; RV32-NEXT: fsd fa0, 184(sp) ; RV32-NEXT: fld fa0, 112(sp) ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powidf2 ; RV32-NEXT: fsd fa0, 176(sp) ; RV32-NEXT: fld fa0, 104(sp) ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powidf2 ; RV32-NEXT: fsd fa0, 168(sp) ; RV32-NEXT: fld fa0, 96(sp) ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powidf2 ; RV32-NEXT: fsd fa0, 160(sp) ; RV32-NEXT: addi a0, sp, 256 ; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powidf2 ; RV32-NEXT: fsd fa0, 128(sp) ; RV32-NEXT: addi a0, sp, 256 ; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 1 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powidf2 ; RV32-NEXT: fsd fa0, 136(sp) ; RV32-NEXT: addi a0, sp, 256 ; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 3 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powidf2 ; RV32-NEXT: fsd fa0, 152(sp) ; RV32-NEXT: addi a0, sp, 256 ; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 2 ; RV32-NEXT: vfmv.f.s fa0, v8 ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __powidf2 ; RV32-NEXT: fsd fa0, 144(sp) ; RV32-NEXT: addi a0, sp, 128 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi sp, s0, -272 ; RV32-NEXT: lw ra, 268(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 264(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s2, 260(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 272 ; RV32-NEXT: ret ; ; RV64-LABEL: powi_v8f64: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -272 ; RV64-NEXT: sd ra, 264(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 256(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s2, 248(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 272 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 2 ; RV64-NEXT: sub sp, sp, a1 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: addi a1, sp, 240 ; RV64-NEXT: vs4r.v v8, (a1) # vscale x 32-byte Folded Spill ; RV64-NEXT: addi a1, sp, 64 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vse64.v v8, (a1) ; RV64-NEXT: fld fa0, 120(sp) ; RV64-NEXT: sext.w s2, a0 ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powidf2 ; RV64-NEXT: fsd fa0, 184(sp) ; RV64-NEXT: fld fa0, 112(sp) ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powidf2 ; RV64-NEXT: fsd fa0, 176(sp) ; RV64-NEXT: fld fa0, 104(sp) ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powidf2 ; RV64-NEXT: fsd fa0, 168(sp) ; RV64-NEXT: fld fa0, 96(sp) ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powidf2 ; RV64-NEXT: fsd fa0, 160(sp) ; RV64-NEXT: addi a0, sp, 240 ; RV64-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powidf2 ; RV64-NEXT: fsd fa0, 128(sp) ; RV64-NEXT: addi a0, sp, 240 ; RV64-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 1 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powidf2 ; RV64-NEXT: fsd fa0, 136(sp) ; RV64-NEXT: addi a0, sp, 240 ; RV64-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 3 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powidf2 ; RV64-NEXT: fsd fa0, 152(sp) ; RV64-NEXT: addi a0, sp, 240 ; RV64-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 2 ; RV64-NEXT: vfmv.f.s fa0, v8 ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __powidf2 ; RV64-NEXT: fsd fa0, 144(sp) ; RV64-NEXT: addi a0, sp, 128 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi sp, s0, -272 ; RV64-NEXT: ld ra, 264(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 256(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s2, 248(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 272 ; RV64-NEXT: ret %a = call <8 x double> @llvm.powi.v8f64.i32(<8 x double> %x, i32 %y) ret <8 x double> %a } declare <8 x double> @llvm.powi.v8f64.i32(<8 x double>, i32)