; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 define @vsub_vv_nxv1i8( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vsub_vx_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv1i8_0( %va) { ; CHECK-LABEL: vsub_vx_nxv1i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i8 17) ret %vc } ; Test constant subs to see if we can optimize them away for scalable vectors. define @vsub_ii_nxv1i8_1() { ; CHECK-LABEL: vsub_ii_nxv1i8_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, -1 ; CHECK-NEXT: ret %vc = sub splat (i8 2), splat (i8 3) ret %vc } define @vsub_vv_nxv2i8( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vsub_vx_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv2i8_0( %va) { ; CHECK-LABEL: vsub_vx_nxv2i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i8 17) ret %vc } define @vsub_vv_nxv4i8( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vsub_vx_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv4i8_0( %va) { ; CHECK-LABEL: vsub_vx_nxv4i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i8 17) ret %vc } define @vsub_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vsub_vx_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv8i8_0( %va) { ; CHECK-LABEL: vsub_vx_nxv8i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i8 17) ret %vc } define @vsub_vv_nxv16i8( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vsub_vx_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv16i8_0( %va) { ; CHECK-LABEL: vsub_vx_nxv16i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i8 17) ret %vc } define @vsub_vv_nxv32i8( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vsub_vx_nxv32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv32i8_0( %va) { ; CHECK-LABEL: vsub_vx_nxv32i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i8 17) ret %vc } define @vsub_vv_nxv64i8( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vsub_vx_nxv64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv64i8_0( %va) { ; CHECK-LABEL: vsub_vx_nxv64i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i8 17) ret %vc } define @vsub_vv_nxv1i16( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vsub_vx_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv1i16_0( %va) { ; CHECK-LABEL: vsub_vx_nxv1i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i16 17) ret %vc } define @vsub_vv_nxv2i16( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vsub_vx_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv2i16_0( %va) { ; CHECK-LABEL: vsub_vx_nxv2i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i16 17) ret %vc } define @vsub_vv_nxv4i16( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vsub_vx_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv4i16_0( %va) { ; CHECK-LABEL: vsub_vx_nxv4i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i16 17) ret %vc } define @vsub_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vsub_vx_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv8i16_0( %va) { ; CHECK-LABEL: vsub_vx_nxv8i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i16 17) ret %vc } define @vsub_vv_nxv16i16( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vsub_vx_nxv16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv16i16_0( %va) { ; CHECK-LABEL: vsub_vx_nxv16i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i16 17) ret %vc } define @vsub_vv_nxv32i16( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv32i16( %va, i16 signext %b) { ; CHECK-LABEL: vsub_vx_nxv32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv32i16_0( %va) { ; CHECK-LABEL: vsub_vx_nxv32i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i16 17) ret %vc } define @vsub_vv_nxv1i32( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv1i32( %va, i32 signext %b) { ; CHECK-LABEL: vsub_vx_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv1i32_0( %va) { ; CHECK-LABEL: vsub_vx_nxv1i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i32 17) ret %vc } define @vsub_vv_nxv2i32( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv2i32( %va, i32 signext %b) { ; CHECK-LABEL: vsub_vx_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv2i32_0( %va) { ; CHECK-LABEL: vsub_vx_nxv2i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i32 17) ret %vc } define @vsub_vv_nxv4i32( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv4i32( %va, i32 signext %b) { ; CHECK-LABEL: vsub_vx_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv4i32_0( %va) { ; CHECK-LABEL: vsub_vx_nxv4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i32 17) ret %vc } define @vsub_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv8i32( %va, i32 signext %b) { ; CHECK-LABEL: vsub_vx_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv8i32_0( %va) { ; CHECK-LABEL: vsub_vx_nxv8i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i32 17) ret %vc } define @vsub_vv_nxv16i32( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv16i32( %va, i32 signext %b) { ; CHECK-LABEL: vsub_vx_nxv16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv16i32_0( %va) { ; CHECK-LABEL: vsub_vx_nxv16i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i32 17) ret %vc } define @vsub_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv1i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv1i64( %va, i64 %b) { ; RV32-LABEL: vsub_vx_nxv1i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv1i64_0( %va) { ; CHECK-LABEL: vsub_vx_nxv1i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i64 17) ret %vc } define @vsub_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv2i64( %va, i64 %b) { ; RV32-LABEL: vsub_vx_nxv2i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv2i64_0( %va) { ; CHECK-LABEL: vsub_vx_nxv2i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i64 17) ret %vc } define @vsub_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv4i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv4i64( %va, i64 %b) { ; RV32-LABEL: vsub_vx_nxv4i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv4i64_0( %va) { ; CHECK-LABEL: vsub_vx_nxv4i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i64 17) ret %vc } define @vsub_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv8i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc } define @vsub_vx_nxv8i64( %va, i64 %b) { ; RV32-LABEL: vsub_vx_nxv8i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sub %va, %splat ret %vc } define @vsub_vx_nxv8i64_0( %va) { ; CHECK-LABEL: vsub_vx_nxv8i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 17 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i64 17) ret %vc } define @vsub_xx_nxv8i64(i64 %a, i64 %b) nounwind { ; RV32-LABEL: vsub_xx_nxv8i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sub a4, a0, a2 ; RV32-NEXT: sltu a0, a0, a2 ; RV32-NEXT: sub a1, a1, a3 ; RV32-NEXT: sub a1, a1, a0 ; RV32-NEXT: sw a4, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v8, (a0), zero ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_xx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: sub a0, a0, a1 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmv.v.x v8, a0 ; RV64-NEXT: ret %head1 = insertelement poison, i64 %a, i32 0 %splat1 = shufflevector %head1, poison, zeroinitializer %head2 = insertelement poison, i64 %b, i32 0 %splat2 = shufflevector %head2, poison, zeroinitializer %v = sub %splat1, %splat2 ret %v } define @vsub_vv_mask_nxv8i32( %va, %vb, %mask) { ; CHECK-LABEL: vsub_vv_mask_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t ; CHECK-NEXT: ret %vs = select %mask, %vb, zeroinitializer %vc = sub %va, %vs ret %vc } define @vsub_vx_mask_nxv8i32( %va, i32 signext %b, %mask) { ; CHECK-LABEL: vsub_vx_mask_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vs = select %mask, %splat, zeroinitializer %vc = sub %va, %vs ret %vc } define @vsub_vi_nxv8i32_one( %va) { ; CHECK-LABEL: vsub_vi_nxv8i32_one: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %vc = sub %va, splat (i32 1) ret %vc } define @vsub_vi_nxv8i32_minusone( %va) { ; CHECK-LABEL: vsub_vi_nxv8i32_minusone: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 1 ; CHECK-NEXT: ret %vc = sub %va, splat (i32 -1) ret %vc } define @vsub_vi_nxv8i32_15( %va) { ; CHECK-LABEL: vsub_vi_nxv8i32_15: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -15 ; CHECK-NEXT: ret %vc = sub %va, splat (i32 15) ret %vc } define @vsub_vi_nxv8i32_16( %va) { ; CHECK-LABEL: vsub_vi_nxv8i32_16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -16 ; CHECK-NEXT: ret %vc = sub %va, splat (i32 16) ret %vc } define @vsub_vi_nxv8i32_minus15( %va) { ; CHECK-LABEL: vsub_vi_nxv8i32_minus15: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 15 ; CHECK-NEXT: ret %vc = sub %va, splat (i32 -15) ret %vc } define @vsub_vi_nxv8i32_minus16( %va) { ; CHECK-LABEL: vsub_vi_nxv8i32_minus16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -16 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %vc = sub %va, splat (i32 -16) ret %vc } define @vsub_vi_mask_nxv8i32( %va, %mask) { ; CHECK-LABEL: vsub_vi_mask_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -7, v0.t ; CHECK-NEXT: ret %vs = select %mask, splat (i32 7), zeroinitializer %vc = sub %va, %vs ret %vc } ; Make sure we are able to split a type that isn't an MVT even if the scalar ; element type isn't legal on RV32. This used to crash. define @vsub_vv_nxv64i64( %va, %vb) { ; RV32-LABEL: vsub_vv_nxv64i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: .cfi_def_cfa_offset 80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s1, 68(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s2, 64(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s3, 60(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s4, 56(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s5, 52(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s6, 48(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s7, 44(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s8, 40(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s9, 36(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: .cfi_offset s0, -8 ; RV32-NEXT: .cfi_offset s1, -12 ; RV32-NEXT: .cfi_offset s2, -16 ; RV32-NEXT: .cfi_offset s3, -20 ; RV32-NEXT: .cfi_offset s4, -24 ; RV32-NEXT: .cfi_offset s5, -28 ; RV32-NEXT: .cfi_offset s6, -32 ; RV32-NEXT: .cfi_offset s7, -36 ; RV32-NEXT: .cfi_offset s8, -40 ; RV32-NEXT: .cfi_offset s9, -44 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: mv a3, a2 ; RV32-NEXT: slli a2, a2, 1 ; RV32-NEXT: add a3, a3, a2 ; RV32-NEXT: slli a2, a2, 2 ; RV32-NEXT: add a2, a2, a3 ; RV32-NEXT: sub sp, sp, a2 ; RV32-NEXT: .cfi_escape 0x0f, 0x0f, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0xd8, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 88 * vlenb ; RV32-NEXT: mv s2, a7 ; RV32-NEXT: mv s3, a1 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 6 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 4 ; RV32-NEXT: mv a2, a1 ; RV32-NEXT: slli a1, a1, 2 ; RV32-NEXT: add a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; RV32-NEXT: csrr s6, vlenb ; RV32-NEXT: slli s7, s6, 4 ; RV32-NEXT: slli s8, s6, 3 ; RV32-NEXT: add a1, a7, s7 ; RV32-NEXT: vl8re64.v v8, (a1) ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: mv a2, a1 ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; RV32-NEXT: add a1, a7, s8 ; RV32-NEXT: vl8re64.v v8, (a1) ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 4 ; RV32-NEXT: mv a2, a1 ; RV32-NEXT: slli a1, a1, 1 ; RV32-NEXT: add a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; RV32-NEXT: mv s0, a0 ; RV32-NEXT: li a1, 56 ; RV32-NEXT: mv a0, s6 ; RV32-NEXT: call __mulsi3 ; RV32-NEXT: mv s1, a0 ; RV32-NEXT: add a0, s2, a0 ; RV32-NEXT: vl8re64.v v8, (a0) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 32 ; RV32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; RV32-NEXT: li a1, 40 ; RV32-NEXT: mv a0, s6 ; RV32-NEXT: call __mulsi3 ; RV32-NEXT: mv s4, a0 ; RV32-NEXT: add a0, s3, a0 ; RV32-NEXT: vl8re64.v v8, (a0) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: mv a1, a0 ; RV32-NEXT: slli a0, a0, 1 ; RV32-NEXT: add a0, a0, a1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 32 ; RV32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; RV32-NEXT: li a1, 48 ; RV32-NEXT: mv a0, s6 ; RV32-NEXT: call __mulsi3 ; RV32-NEXT: mv s5, a0 ; RV32-NEXT: add a0, s2, a0 ; RV32-NEXT: slli s9, s6, 5 ; RV32-NEXT: vl8re64.v v8, (a0) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: mv a1, a0 ; RV32-NEXT: slli a0, a0, 2 ; RV32-NEXT: add a0, a0, a1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 32 ; RV32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; RV32-NEXT: add a0, s3, s9 ; RV32-NEXT: vl8re64.v v8, (a0) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 32 ; RV32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; RV32-NEXT: add a0, s2, s4 ; RV32-NEXT: vl8re64.v v8, (a0) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: mv a1, a0 ; RV32-NEXT: slli a0, a0, 1 ; RV32-NEXT: add a1, a1, a0 ; RV32-NEXT: slli a0, a0, 1 ; RV32-NEXT: add a0, a0, a1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 32 ; RV32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; RV32-NEXT: li a1, 24 ; RV32-NEXT: mv a0, s6 ; RV32-NEXT: call __mulsi3 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: mv a2, a1 ; RV32-NEXT: slli a1, a1, 2 ; RV32-NEXT: add a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 4 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vsub.vv v8, v16, v8 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: mv a2, a1 ; RV32-NEXT: slli a1, a1, 2 ; RV32-NEXT: add a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 5 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: mv a2, a1 ; RV32-NEXT: slli a1, a1, 1 ; RV32-NEXT: add a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsub.vv v8, v16, v8 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 5 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 6 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 4 ; RV32-NEXT: mv a2, a1 ; RV32-NEXT: slli a1, a1, 1 ; RV32-NEXT: add a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsub.vv v8, v8, v16 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 6 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; RV32-NEXT: vl8re64.v v8, (s3) ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: mv a2, a1 ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsub.vv v8, v8, v16 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: mv a2, a1 ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; RV32-NEXT: add a1, s3, a0 ; RV32-NEXT: add a2, s2, s9 ; RV32-NEXT: add a3, s3, s7 ; RV32-NEXT: add a4, s2, a0 ; RV32-NEXT: add s3, s3, s8 ; RV32-NEXT: vl8re64.v v8, (s2) ; RV32-NEXT: csrr a5, vlenb ; RV32-NEXT: slli a5, a5, 3 ; RV32-NEXT: mv a6, a5 ; RV32-NEXT: slli a5, a5, 1 ; RV32-NEXT: add a5, a5, a6 ; RV32-NEXT: add a5, sp, a5 ; RV32-NEXT: addi a5, a5, 32 ; RV32-NEXT: vs8r.v v8, (a5) # vscale x 64-byte Folded Spill ; RV32-NEXT: vl8re64.v v8, (a2) ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: slli a2, a2, 4 ; RV32-NEXT: mv a5, a2 ; RV32-NEXT: slli a2, a2, 1 ; RV32-NEXT: add a2, a2, a5 ; RV32-NEXT: add a2, sp, a2 ; RV32-NEXT: addi a2, a2, 32 ; RV32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill ; RV32-NEXT: vl8re64.v v8, (a4) ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: slli a2, a2, 4 ; RV32-NEXT: add a2, sp, a2 ; RV32-NEXT: addi a2, a2, 32 ; RV32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill ; RV32-NEXT: vl8re64.v v8, (s3) ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, sp, a2 ; RV32-NEXT: addi a2, a2, 32 ; RV32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill ; RV32-NEXT: vl8re64.v v8, (a3) ; RV32-NEXT: addi a2, sp, 32 ; RV32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill ; RV32-NEXT: vl8re64.v v24, (a1) ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 4 ; RV32-NEXT: mv a2, a1 ; RV32-NEXT: slli a1, a1, 2 ; RV32-NEXT: add a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vl8r.v v0, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: mv a2, a1 ; RV32-NEXT: slli a1, a1, 1 ; RV32-NEXT: add a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsub.vv v0, v0, v8 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 4 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsub.vv v8, v16, v8 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 4 ; RV32-NEXT: mv a2, a1 ; RV32-NEXT: slli a1, a1, 2 ; RV32-NEXT: add a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 4 ; RV32-NEXT: mv a2, a1 ; RV32-NEXT: slli a1, a1, 1 ; RV32-NEXT: add a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: addi a1, sp, 32 ; RV32-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsub.vv v8, v16, v8 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: mv a2, a1 ; RV32-NEXT: slli a1, a1, 1 ; RV32-NEXT: add a2, a2, a1 ; RV32-NEXT: slli a1, a1, 1 ; RV32-NEXT: add a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsub.vv v16, v24, v16 ; RV32-NEXT: vs8r.v v0, (s0) ; RV32-NEXT: add s1, s0, s1 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 5 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vs8r.v v24, (s1) ; RV32-NEXT: add s5, s0, s5 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: mv a2, a1 ; RV32-NEXT: slli a1, a1, 2 ; RV32-NEXT: add a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vs8r.v v24, (s5) ; RV32-NEXT: add s4, s0, s4 ; RV32-NEXT: vs8r.v v16, (s4) ; RV32-NEXT: add s9, s0, s9 ; RV32-NEXT: add a0, s0, a0 ; RV32-NEXT: add s7, s0, s7 ; RV32-NEXT: add s0, s0, s8 ; RV32-NEXT: vs8r.v v8, (s9) ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 4 ; RV32-NEXT: mv a2, a1 ; RV32-NEXT: slli a1, a1, 2 ; RV32-NEXT: add a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: mv a1, a0 ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add a0, a0, a1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 32 ; RV32-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; RV32-NEXT: vs8r.v v8, (s7) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 6 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 32 ; RV32-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; RV32-NEXT: vs8r.v v8, (s0) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: mv a1, a0 ; RV32-NEXT: slli a0, a0, 1 ; RV32-NEXT: add a1, a1, a0 ; RV32-NEXT: slli a0, a0, 2 ; RV32-NEXT: add a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: .cfi_def_cfa sp, 80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s1, 68(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s2, 64(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s3, 60(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s4, 56(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s5, 52(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s6, 48(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s7, 44(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s8, 40(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s9, 36(sp) # 4-byte Folded Reload ; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: .cfi_restore s0 ; RV32-NEXT: .cfi_restore s1 ; RV32-NEXT: .cfi_restore s2 ; RV32-NEXT: .cfi_restore s3 ; RV32-NEXT: .cfi_restore s4 ; RV32-NEXT: .cfi_restore s5 ; RV32-NEXT: .cfi_restore s6 ; RV32-NEXT: .cfi_restore s7 ; RV32-NEXT: .cfi_restore s8 ; RV32-NEXT: .cfi_restore s9 ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vv_nxv64i64: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -128 ; RV64-NEXT: .cfi_def_cfa_offset 128 ; RV64-NEXT: sd ra, 120(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 112(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s1, 104(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s2, 96(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s3, 88(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s4, 80(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s5, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s6, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s7, 56(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s8, 48(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s9, 40(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: .cfi_offset s0, -16 ; RV64-NEXT: .cfi_offset s1, -24 ; RV64-NEXT: .cfi_offset s2, -32 ; RV64-NEXT: .cfi_offset s3, -40 ; RV64-NEXT: .cfi_offset s4, -48 ; RV64-NEXT: .cfi_offset s5, -56 ; RV64-NEXT: .cfi_offset s6, -64 ; RV64-NEXT: .cfi_offset s7, -72 ; RV64-NEXT: .cfi_offset s8, -80 ; RV64-NEXT: .cfi_offset s9, -88 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: mv a3, a2 ; RV64-NEXT: slli a2, a2, 1 ; RV64-NEXT: add a3, a3, a2 ; RV64-NEXT: slli a2, a2, 2 ; RV64-NEXT: add a2, a2, a3 ; RV64-NEXT: sub sp, sp, a2 ; RV64-NEXT: .cfi_escape 0x0f, 0x0f, 0x72, 0x00, 0x11, 0x80, 0x01, 0x22, 0x11, 0xd8, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 128 + 88 * vlenb ; RV64-NEXT: mv s2, a7 ; RV64-NEXT: mv s3, a1 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 6 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: slli a1, a1, 2 ; RV64-NEXT: add a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; RV64-NEXT: csrr s6, vlenb ; RV64-NEXT: slli s7, s6, 4 ; RV64-NEXT: slli s8, s6, 3 ; RV64-NEXT: add a1, a7, s7 ; RV64-NEXT: vl8re64.v v8, (a1) ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; RV64-NEXT: add a1, a7, s8 ; RV64-NEXT: vl8re64.v v8, (a1) ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: slli a1, a1, 1 ; RV64-NEXT: add a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; RV64-NEXT: mv s0, a0 ; RV64-NEXT: li a1, 56 ; RV64-NEXT: mv a0, s6 ; RV64-NEXT: call __muldi3 ; RV64-NEXT: mv s1, a0 ; RV64-NEXT: add a0, s2, a0 ; RV64-NEXT: vl8re64.v v8, (a0) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 5 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; RV64-NEXT: li a1, 40 ; RV64-NEXT: mv a0, s6 ; RV64-NEXT: call __muldi3 ; RV64-NEXT: mv s4, a0 ; RV64-NEXT: add a0, s3, a0 ; RV64-NEXT: vl8re64.v v8, (a0) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: mv a1, a0 ; RV64-NEXT: slli a0, a0, 1 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; RV64-NEXT: li a1, 48 ; RV64-NEXT: mv a0, s6 ; RV64-NEXT: call __muldi3 ; RV64-NEXT: mv s5, a0 ; RV64-NEXT: add a0, s2, a0 ; RV64-NEXT: slli s9, s6, 5 ; RV64-NEXT: vl8re64.v v8, (a0) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: mv a1, a0 ; RV64-NEXT: slli a0, a0, 2 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; RV64-NEXT: add a0, s3, s9 ; RV64-NEXT: vl8re64.v v8, (a0) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; RV64-NEXT: add a0, s2, s4 ; RV64-NEXT: vl8re64.v v8, (a0) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: mv a1, a0 ; RV64-NEXT: slli a0, a0, 1 ; RV64-NEXT: add a1, a1, a0 ; RV64-NEXT: slli a0, a0, 1 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; RV64-NEXT: li a1, 24 ; RV64-NEXT: mv a0, s6 ; RV64-NEXT: call __muldi3 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: slli a1, a1, 2 ; RV64-NEXT: add a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsub.vv v8, v16, v8 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: slli a1, a1, 2 ; RV64-NEXT: add a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 5 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: slli a1, a1, 1 ; RV64-NEXT: add a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; RV64-NEXT: vsub.vv v8, v16, v8 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 5 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 6 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: slli a1, a1, 1 ; RV64-NEXT: add a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; RV64-NEXT: vsub.vv v8, v8, v16 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 6 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; RV64-NEXT: vl8re64.v v8, (s3) ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; RV64-NEXT: vsub.vv v8, v8, v16 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; RV64-NEXT: add a1, s3, a0 ; RV64-NEXT: add a2, s2, s9 ; RV64-NEXT: add a3, s3, s7 ; RV64-NEXT: add a4, s2, a0 ; RV64-NEXT: add s3, s3, s8 ; RV64-NEXT: vl8re64.v v8, (s2) ; RV64-NEXT: csrr a5, vlenb ; RV64-NEXT: slli a5, a5, 3 ; RV64-NEXT: mv a6, a5 ; RV64-NEXT: slli a5, a5, 1 ; RV64-NEXT: add a5, a5, a6 ; RV64-NEXT: add a5, sp, a5 ; RV64-NEXT: addi a5, a5, 32 ; RV64-NEXT: vs8r.v v8, (a5) # vscale x 64-byte Folded Spill ; RV64-NEXT: vl8re64.v v8, (a2) ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: slli a2, a2, 4 ; RV64-NEXT: mv a5, a2 ; RV64-NEXT: slli a2, a2, 1 ; RV64-NEXT: add a2, a2, a5 ; RV64-NEXT: add a2, sp, a2 ; RV64-NEXT: addi a2, a2, 32 ; RV64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill ; RV64-NEXT: vl8re64.v v8, (a4) ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: slli a2, a2, 4 ; RV64-NEXT: add a2, sp, a2 ; RV64-NEXT: addi a2, a2, 32 ; RV64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill ; RV64-NEXT: vl8re64.v v8, (s3) ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, sp, a2 ; RV64-NEXT: addi a2, a2, 32 ; RV64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill ; RV64-NEXT: vl8re64.v v8, (a3) ; RV64-NEXT: addi a2, sp, 32 ; RV64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill ; RV64-NEXT: vl8re64.v v24, (a1) ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: slli a1, a1, 2 ; RV64-NEXT: add a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vl8r.v v0, (a1) # vscale x 64-byte Folded Reload ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: slli a1, a1, 1 ; RV64-NEXT: add a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV64-NEXT: vsub.vv v0, v0, v8 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; RV64-NEXT: vsub.vv v8, v16, v8 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: slli a1, a1, 2 ; RV64-NEXT: add a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: slli a1, a1, 1 ; RV64-NEXT: add a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV64-NEXT: addi a1, sp, 32 ; RV64-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; RV64-NEXT: vsub.vv v8, v16, v8 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: slli a1, a1, 1 ; RV64-NEXT: add a2, a2, a1 ; RV64-NEXT: slli a1, a1, 1 ; RV64-NEXT: add a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; RV64-NEXT: vsub.vv v16, v24, v16 ; RV64-NEXT: vs8r.v v0, (s0) ; RV64-NEXT: add s1, s0, s1 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 5 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload ; RV64-NEXT: vs8r.v v24, (s1) ; RV64-NEXT: add s5, s0, s5 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: slli a1, a1, 2 ; RV64-NEXT: add a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload ; RV64-NEXT: vs8r.v v24, (s5) ; RV64-NEXT: add s4, s0, s4 ; RV64-NEXT: vs8r.v v16, (s4) ; RV64-NEXT: add s9, s0, s9 ; RV64-NEXT: add a0, s0, a0 ; RV64-NEXT: add s7, s0, s7 ; RV64-NEXT: add s0, s0, s8 ; RV64-NEXT: vs8r.v v8, (s9) ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: slli a1, a1, 2 ; RV64-NEXT: add a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: mv a1, a0 ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; RV64-NEXT: vs8r.v v8, (s7) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 6 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; RV64-NEXT: vs8r.v v8, (s0) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: mv a1, a0 ; RV64-NEXT: slli a0, a0, 1 ; RV64-NEXT: add a1, a1, a0 ; RV64-NEXT: slli a0, a0, 2 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: .cfi_def_cfa sp, 128 ; RV64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 112(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s1, 104(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s2, 96(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s3, 88(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s4, 80(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s5, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s6, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s7, 56(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s8, 48(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s9, 40(sp) # 8-byte Folded Reload ; RV64-NEXT: .cfi_restore ra ; RV64-NEXT: .cfi_restore s0 ; RV64-NEXT: .cfi_restore s1 ; RV64-NEXT: .cfi_restore s2 ; RV64-NEXT: .cfi_restore s3 ; RV64-NEXT: .cfi_restore s4 ; RV64-NEXT: .cfi_restore s5 ; RV64-NEXT: .cfi_restore s6 ; RV64-NEXT: .cfi_restore s7 ; RV64-NEXT: .cfi_restore s8 ; RV64-NEXT: .cfi_restore s9 ; RV64-NEXT: addi sp, sp, 128 ; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %vc = sub %va, %vb ret %vc }