aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorJuzhe-Zhong <juzhe.zhong@rivai.ai>2023-11-30 10:36:30 +0800
committerLehua Ding <lehua.ding@rivai.ai>2023-11-30 10:39:31 +0800
commit8614cbb253484e28c3eb20cde4d1067aad56de58 (patch)
treeaf46cf2d668ba2f6582761487c58dbbd65b6d30b /gcc
parente65aaf8efe1900f7bbf76235a078000bf2ec8b45 (diff)
downloadgcc-8614cbb253484e28c3eb20cde4d1067aad56de58.zip
gcc-8614cbb253484e28c3eb20cde4d1067aad56de58.tar.gz
gcc-8614cbb253484e28c3eb20cde4d1067aad56de58.tar.bz2
RISC-V: Support highpart overlap for floating-point widen instructions
This patch leverages the approach of vwcvt/vext.vf2 which has been approved. Their approaches are totally the same. Tested no regression and committed. PR target/112431 gcc/ChangeLog: * config/riscv/vector.md: Add widenning overlap. gcc/testsuite/ChangeLog: * gcc.target/riscv/rvv/base/pr112431-10.c: New test. * gcc.target/riscv/rvv/base/pr112431-11.c: New test. * gcc.target/riscv/rvv/base/pr112431-12.c: New test. * gcc.target/riscv/rvv/base/pr112431-13.c: New test. * gcc.target/riscv/rvv/base/pr112431-14.c: New test. * gcc.target/riscv/rvv/base/pr112431-15.c: New test. * gcc.target/riscv/rvv/base/pr112431-7.c: New test. * gcc.target/riscv/rvv/base/pr112431-8.c: New test. * gcc.target/riscv/rvv/base/pr112431-9.c: New test.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/riscv/vector.md78
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c104
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c68
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c51
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c188
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c119
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c86
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c106
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c68
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c51
10 files changed, 882 insertions, 37 deletions
diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md
index 74716c7..6b891c1 100644
--- a/gcc/config/riscv/vector.md
+++ b/gcc/config/riscv/vector.md
@@ -7622,84 +7622,88 @@
;; -------------------------------------------------------------------------------
(define_insn "@pred_widen_fcvt_x<v_su>_f<mode>"
- [(set (match_operand:VWCONVERTI 0 "register_operand" "=&vr, &vr")
+ [(set (match_operand:VWCONVERTI 0 "register_operand" "=vr, vr, vr, vr, vr, vr, ?&vr, ?&vr")
(if_then_else:VWCONVERTI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 4 "vector_length_operand" " rK, rK")
- (match_operand 5 "const_int_operand" " i, i")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
- (match_operand 8 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1")
+ (match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 5 "const_int_operand" " i, i, i, i, i, i, i, i")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)
(reg:SI FRM_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VWCONVERTI
- [(match_operand:<VNCONVERT> 3 "register_operand" " vr, vr")] VFCVTS)
- (match_operand:VWCONVERTI 2 "vector_merge_operand" " vu, 0")))]
+ [(match_operand:<VNCONVERT> 3 "register_operand" " W21, W21, W42, W42, W84, W84, vr, vr")] VFCVTS)
+ (match_operand:VWCONVERTI 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
"vfwcvt.x<v_su>.f.v\t%0,%3%p1"
[(set_attr "type" "vfwcvtftoi")
(set_attr "mode" "<VNCONVERT>")
(set (attr "frm_mode")
- (symbol_ref "riscv_vector::get_frm_mode (operands[8])"))])
+ (symbol_ref "riscv_vector::get_frm_mode (operands[8])"))
+ (set_attr "group_overlap" "W21,W21,W42,W42,W84,W84,none,none")])
(define_insn "@pred_widen_<fix_cvt><mode>"
- [(set (match_operand:VWCONVERTI 0 "register_operand" "=&vr, &vr")
+ [(set (match_operand:VWCONVERTI 0 "register_operand" "=vr, vr, vr, vr, vr, vr, ?&vr, ?&vr")
(if_then_else:VWCONVERTI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 4 "vector_length_operand" " rK, rK")
- (match_operand 5 "const_int_operand" " i, i")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1")
+ (match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 5 "const_int_operand" " i, i, i, i, i, i, i, i")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(any_fix:VWCONVERTI
- (match_operand:<VNCONVERT> 3 "register_operand" " vr, vr"))
- (match_operand:VWCONVERTI 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<VNCONVERT> 3 "register_operand" " W21, W21, W42, W42, W84, W84, vr, vr"))
+ (match_operand:VWCONVERTI 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
"vfwcvt.rtz.x<u>.f.v\t%0,%3%p1"
[(set_attr "type" "vfwcvtftoi")
- (set_attr "mode" "<VNCONVERT>")])
+ (set_attr "mode" "<VNCONVERT>")
+ (set_attr "group_overlap" "W21,W21,W42,W42,W84,W84,none,none")])
(define_insn "@pred_widen_<float_cvt><mode>"
- [(set (match_operand:V_VLSF 0 "register_operand" "=&vr, &vr")
+ [(set (match_operand:V_VLSF 0 "register_operand" "=vr, vr, vr, vr, vr, vr, ?&vr, ?&vr")
(if_then_else:V_VLSF
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 4 "vector_length_operand" " rK, rK")
- (match_operand 5 "const_int_operand" " i, i")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1")
+ (match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 5 "const_int_operand" " i, i, i, i, i, i, i, i")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(any_float:V_VLSF
- (match_operand:<VNCONVERT> 3 "register_operand" " vr, vr"))
- (match_operand:V_VLSF 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<VNCONVERT> 3 "register_operand" " W21, W21, W42, W42, W84, W84, vr, vr"))
+ (match_operand:V_VLSF 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
"vfwcvt.f.x<u>.v\t%0,%3%p1"
[(set_attr "type" "vfwcvtitof")
- (set_attr "mode" "<VNCONVERT>")])
+ (set_attr "mode" "<VNCONVERT>")
+ (set_attr "group_overlap" "W21,W21,W42,W42,W84,W84,none,none")])
(define_insn "@pred_extend<mode>"
- [(set (match_operand:VWEXTF_ZVFHMIN 0 "register_operand" "=&vr, &vr")
+ [(set (match_operand:VWEXTF_ZVFHMIN 0 "register_operand" "=vr, vr, vr, vr, vr, vr, ?&vr, ?&vr")
(if_then_else:VWEXTF_ZVFHMIN
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 4 "vector_length_operand" " rK, rK")
- (match_operand 5 "const_int_operand" " i, i")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1")
+ (match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 5 "const_int_operand" " i, i, i, i, i, i, i, i")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(float_extend:VWEXTF_ZVFHMIN
- (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr, vr"))
- (match_operand:VWEXTF_ZVFHMIN 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " W21, W21, W42, W42, W84, W84, vr, vr"))
+ (match_operand:VWEXTF_ZVFHMIN 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
"vfwcvt.f.f.v\t%0,%3%p1"
[(set_attr "type" "vfwcvtftof")
- (set_attr "mode" "<V_DOUBLE_TRUNC>")])
+ (set_attr "mode" "<V_DOUBLE_TRUNC>")
+ (set_attr "group_overlap" "W21,W21,W42,W42,W84,W84,none,none")])
;; -------------------------------------------------------------------------------
;; ---- Predicated floating-point narrow conversions
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c
new file mode 100644
index 0000000..5f161b3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c
@@ -0,0 +1,104 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+ double sum5, double sum6, double sum7, double sum8, double sum9,
+ double sum10, double sum11, double sum12, double sum13, double sum14,
+ double sum15)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint32m1_t v0 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v1 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v2 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v3 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v4 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v5 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v6 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v7 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v8 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v9 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v10 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v11 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v12 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v13 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v14 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v15 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat64m2_t vw0 = __riscv_vfwcvt_f_x_v_f64m2 (v0, vl);
+ vfloat64m2_t vw1 = __riscv_vfwcvt_f_x_v_f64m2 (v1, vl);
+ vfloat64m2_t vw2 = __riscv_vfwcvt_f_x_v_f64m2 (v2, vl);
+ vfloat64m2_t vw3 = __riscv_vfwcvt_f_x_v_f64m2 (v3, vl);
+ vfloat64m2_t vw4 = __riscv_vfwcvt_f_x_v_f64m2 (v4, vl);
+ vfloat64m2_t vw5 = __riscv_vfwcvt_f_x_v_f64m2 (v5, vl);
+ vfloat64m2_t vw6 = __riscv_vfwcvt_f_x_v_f64m2 (v6, vl);
+ vfloat64m2_t vw7 = __riscv_vfwcvt_f_x_v_f64m2 (v7, vl);
+ vfloat64m2_t vw8 = __riscv_vfwcvt_f_x_v_f64m2 (v8, vl);
+ vfloat64m2_t vw9 = __riscv_vfwcvt_f_x_v_f64m2 (v9, vl);
+ vfloat64m2_t vw10 = __riscv_vfwcvt_f_x_v_f64m2 (v10, vl);
+ vfloat64m2_t vw11 = __riscv_vfwcvt_f_x_v_f64m2 (v11, vl);
+ vfloat64m2_t vw12 = __riscv_vfwcvt_f_x_v_f64m2 (v12, vl);
+ vfloat64m2_t vw13 = __riscv_vfwcvt_f_x_v_f64m2 (v13, vl);
+ vfloat64m2_t vw14 = __riscv_vfwcvt_f_x_v_f64m2 (v14, vl);
+ vfloat64m2_t vw15 = __riscv_vfwcvt_f_x_v_f64m2 (v15, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vfmv_f_s_f64m2_f64 (vw0);
+ double sum1 = __riscv_vfmv_f_s_f64m2_f64 (vw1);
+ double sum2 = __riscv_vfmv_f_s_f64m2_f64 (vw2);
+ double sum3 = __riscv_vfmv_f_s_f64m2_f64 (vw3);
+ double sum4 = __riscv_vfmv_f_s_f64m2_f64 (vw4);
+ double sum5 = __riscv_vfmv_f_s_f64m2_f64 (vw5);
+ double sum6 = __riscv_vfmv_f_s_f64m2_f64 (vw6);
+ double sum7 = __riscv_vfmv_f_s_f64m2_f64 (vw7);
+ double sum8 = __riscv_vfmv_f_s_f64m2_f64 (vw8);
+ double sum9 = __riscv_vfmv_f_s_f64m2_f64 (vw9);
+ double sum10 = __riscv_vfmv_f_s_f64m2_f64 (vw10);
+ double sum11 = __riscv_vfmv_f_s_f64m2_f64 (vw11);
+ double sum12 = __riscv_vfmv_f_s_f64m2_f64 (vw12);
+ double sum13 = __riscv_vfmv_f_s_f64m2_f64 (vw13);
+ double sum14 = __riscv_vfmv_f_s_f64m2_f64 (vw14);
+ double sum15 = __riscv_vfmv_f_s_f64m2_f64 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c
new file mode 100644
index 0000000..82827d1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+ double sum5, double sum6, double sum7)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint32m2_t v0 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+ it += vl;
+ vint32m2_t v1 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+ it += vl;
+ vint32m2_t v2 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+ it += vl;
+ vint32m2_t v3 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+ it += vl;
+ vint32m2_t v4 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+ it += vl;
+ vint32m2_t v5 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+ it += vl;
+ vint32m2_t v6 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+ it += vl;
+ vint32m2_t v7 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat64m4_t vw0 = __riscv_vfwcvt_f_x_v_f64m4 (v0, vl);
+ vfloat64m4_t vw1 = __riscv_vfwcvt_f_x_v_f64m4 (v1, vl);
+ vfloat64m4_t vw2 = __riscv_vfwcvt_f_x_v_f64m4 (v2, vl);
+ vfloat64m4_t vw3 = __riscv_vfwcvt_f_x_v_f64m4 (v3, vl);
+ vfloat64m4_t vw4 = __riscv_vfwcvt_f_x_v_f64m4 (v4, vl);
+ vfloat64m4_t vw5 = __riscv_vfwcvt_f_x_v_f64m4 (v5, vl);
+ vfloat64m4_t vw6 = __riscv_vfwcvt_f_x_v_f64m4 (v6, vl);
+ vfloat64m4_t vw7 = __riscv_vfwcvt_f_x_v_f64m4 (v7, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vfmv_f_s_f64m4_f64 (vw0);
+ double sum1 = __riscv_vfmv_f_s_f64m4_f64 (vw1);
+ double sum2 = __riscv_vfmv_f_s_f64m4_f64 (vw2);
+ double sum3 = __riscv_vfmv_f_s_f64m4_f64 (vw3);
+ double sum4 = __riscv_vfmv_f_s_f64m4_f64 (vw4);
+ double sum5 = __riscv_vfmv_f_s_f64m4_f64 (vw5);
+ double sum6 = __riscv_vfmv_f_s_f64m4_f64 (vw6);
+ double sum7 = __riscv_vfmv_f_s_f64m4_f64 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c
new file mode 100644
index 0000000..c4ae607
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint32m4_t v0 = __riscv_vle32_v_i32m4 ((void *) it, vl);
+ it += vl;
+ vint32m4_t v1 = __riscv_vle32_v_i32m4 ((void *) it, vl);
+ it += vl;
+ vint32m4_t v2 = __riscv_vle32_v_i32m4 ((void *) it, vl);
+ it += vl;
+ vint32m4_t v3 = __riscv_vle32_v_i32m4 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat64m8_t vw0 = __riscv_vfwcvt_f_x_v_f64m8 (v0, vl);
+ vfloat64m8_t vw1 = __riscv_vfwcvt_f_x_v_f64m8 (v1, vl);
+ vfloat64m8_t vw2 = __riscv_vfwcvt_f_x_v_f64m8 (v2, vl);
+ vfloat64m8_t vw3 = __riscv_vfwcvt_f_x_v_f64m8 (v3, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vfmv_f_s_f64m8_f64 (vw0);
+ double sum1 = __riscv_vfmv_f_s_f64m8_f64 (vw1);
+ double sum2 = __riscv_vfmv_f_s_f64m8_f64 (vw2);
+ double sum3 = __riscv_vfmv_f_s_f64m8_f64 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c
new file mode 100644
index 0000000..fde7076
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c
@@ -0,0 +1,188 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+ double sum5, double sum6, double sum7, double sum8, double sum9,
+ double sum10, double sum11, double sum12, double sum13, double sum14,
+ double sum15)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v8 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v9 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v10 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v11 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v12 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v13 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v14 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v15 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint64m2_t vw0 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v0, vl);
+ vint64m2_t vw1 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v1, vl);
+ vint64m2_t vw2 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v2, vl);
+ vint64m2_t vw3 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v3, vl);
+ vint64m2_t vw4 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v4, vl);
+ vint64m2_t vw5 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v5, vl);
+ vint64m2_t vw6 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v6, vl);
+ vint64m2_t vw7 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v7, vl);
+ vint64m2_t vw8 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v8, vl);
+ vint64m2_t vw9 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v9, vl);
+ vint64m2_t vw10 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v10, vl);
+ vint64m2_t vw11 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v11, vl);
+ vint64m2_t vw12 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v12, vl);
+ vint64m2_t vw13 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v13, vl);
+ vint64m2_t vw14 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v14, vl);
+ vint64m2_t vw15 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v15, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+ double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+ double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+ double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+ double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4);
+ double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5);
+ double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6);
+ double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7);
+ double sum8 = __riscv_vmv_x_s_i64m2_i64 (vw8);
+ double sum9 = __riscv_vmv_x_s_i64m2_i64 (vw9);
+ double sum10 = __riscv_vmv_x_s_i64m2_i64 (vw10);
+ double sum11 = __riscv_vmv_x_s_i64m2_i64 (vw11);
+ double sum12 = __riscv_vmv_x_s_i64m2_i64 (vw12);
+ double sum13 = __riscv_vmv_x_s_i64m2_i64 (vw13);
+ double sum14 = __riscv_vmv_x_s_i64m2_i64 (vw14);
+ double sum15 = __riscv_vmv_x_s_i64m2_i64 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+double
+foo2 (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v8 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v9 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v10 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v11 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v12 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v13 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v14 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v15 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint64m2_t vw0 = __riscv_vfwcvt_x_f_v_i64m2 (v0, vl);
+ vint64m2_t vw1 = __riscv_vfwcvt_x_f_v_i64m2 (v1, vl);
+ vint64m2_t vw2 = __riscv_vfwcvt_x_f_v_i64m2 (v2, vl);
+ vint64m2_t vw3 = __riscv_vfwcvt_x_f_v_i64m2 (v3, vl);
+ vint64m2_t vw4 = __riscv_vfwcvt_x_f_v_i64m2 (v4, vl);
+ vint64m2_t vw5 = __riscv_vfwcvt_x_f_v_i64m2 (v5, vl);
+ vint64m2_t vw6 = __riscv_vfwcvt_x_f_v_i64m2 (v6, vl);
+ vint64m2_t vw7 = __riscv_vfwcvt_x_f_v_i64m2 (v7, vl);
+ vint64m2_t vw8 = __riscv_vfwcvt_x_f_v_i64m2 (v8, vl);
+ vint64m2_t vw9 = __riscv_vfwcvt_x_f_v_i64m2 (v9, vl);
+ vint64m2_t vw10 = __riscv_vfwcvt_x_f_v_i64m2 (v10, vl);
+ vint64m2_t vw11 = __riscv_vfwcvt_x_f_v_i64m2 (v11, vl);
+ vint64m2_t vw12 = __riscv_vfwcvt_x_f_v_i64m2 (v12, vl);
+ vint64m2_t vw13 = __riscv_vfwcvt_x_f_v_i64m2 (v13, vl);
+ vint64m2_t vw14 = __riscv_vfwcvt_x_f_v_i64m2 (v14, vl);
+ vint64m2_t vw15 = __riscv_vfwcvt_x_f_v_i64m2 (v15, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+ double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+ double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+ double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+ double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4);
+ double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5);
+ double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6);
+ double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7);
+ double sum8 = __riscv_vmv_x_s_i64m2_i64 (vw8);
+ double sum9 = __riscv_vmv_x_s_i64m2_i64 (vw9);
+ double sum10 = __riscv_vmv_x_s_i64m2_i64 (vw10);
+ double sum11 = __riscv_vmv_x_s_i64m2_i64 (vw11);
+ double sum12 = __riscv_vmv_x_s_i64m2_i64 (vw12);
+ double sum13 = __riscv_vmv_x_s_i64m2_i64 (vw13);
+ double sum14 = __riscv_vmv_x_s_i64m2_i64 (vw14);
+ double sum15 = __riscv_vmv_x_s_i64m2_i64 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c
new file mode 100644
index 0000000..535ea7c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c
@@ -0,0 +1,119 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+ double sum5, double sum6, double sum7)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint64m2_t vw0 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v0, vl);
+ vint64m2_t vw1 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v1, vl);
+ vint64m2_t vw2 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v2, vl);
+ vint64m2_t vw3 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v3, vl);
+ vint64m2_t vw4 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v4, vl);
+ vint64m2_t vw5 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v5, vl);
+ vint64m2_t vw6 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v6, vl);
+ vint64m2_t vw7 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v7, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+ double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+ double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+ double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+ double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4);
+ double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5);
+ double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6);
+ double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+double
+foo2 (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint64m2_t vw0 = __riscv_vfwcvt_x_f_v_i64m2 (v0, vl);
+ vint64m2_t vw1 = __riscv_vfwcvt_x_f_v_i64m2 (v1, vl);
+ vint64m2_t vw2 = __riscv_vfwcvt_x_f_v_i64m2 (v2, vl);
+ vint64m2_t vw3 = __riscv_vfwcvt_x_f_v_i64m2 (v3, vl);
+ vint64m2_t vw4 = __riscv_vfwcvt_x_f_v_i64m2 (v4, vl);
+ vint64m2_t vw5 = __riscv_vfwcvt_x_f_v_i64m2 (v5, vl);
+ vint64m2_t vw6 = __riscv_vfwcvt_x_f_v_i64m2 (v6, vl);
+ vint64m2_t vw7 = __riscv_vfwcvt_x_f_v_i64m2 (v7, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+ double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+ double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+ double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+ double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4);
+ double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5);
+ double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6);
+ double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c
new file mode 100644
index 0000000..3d46e4a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c
@@ -0,0 +1,86 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint64m2_t vw0 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v0, vl);
+ vint64m2_t vw1 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v1, vl);
+ vint64m2_t vw2 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v2, vl);
+ vint64m2_t vw3 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v3, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+ double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+ double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+ double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+double
+foo2 (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint64m2_t vw0 = __riscv_vfwcvt_x_f_v_i64m2 (v0, vl);
+ vint64m2_t vw1 = __riscv_vfwcvt_x_f_v_i64m2 (v1, vl);
+ vint64m2_t vw2 = __riscv_vfwcvt_x_f_v_i64m2 (v2, vl);
+ vint64m2_t vw3 = __riscv_vfwcvt_x_f_v_i64m2 (v3, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+ double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+ double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+ double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c
new file mode 100644
index 0000000..7064471
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c
@@ -0,0 +1,106 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+ double sum5, double sum6, double sum7, double sum8, double sum9,
+ double sum10, double sum11, double sum12, double sum13, double sum14,
+ double sum15)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v8 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v9 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v10 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v11 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v12 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v13 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v14 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v15 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat64m2_t vw0 = __riscv_vfwcvt_f_f_v_f64m2 (v0, vl);
+ vfloat64m2_t vw1 = __riscv_vfwcvt_f_f_v_f64m2 (v1, vl);
+ vfloat64m2_t vw2 = __riscv_vfwcvt_f_f_v_f64m2 (v2, vl);
+ vfloat64m2_t vw3 = __riscv_vfwcvt_f_f_v_f64m2 (v3, vl);
+ vfloat64m2_t vw4 = __riscv_vfwcvt_f_f_v_f64m2 (v4, vl);
+ vfloat64m2_t vw5 = __riscv_vfwcvt_f_f_v_f64m2 (v5, vl);
+ vfloat64m2_t vw6 = __riscv_vfwcvt_f_f_v_f64m2 (v6, vl);
+ vfloat64m2_t vw7 = __riscv_vfwcvt_f_f_v_f64m2 (v7, vl);
+ vfloat64m2_t vw8 = __riscv_vfwcvt_f_f_v_f64m2 (v8, vl);
+ vfloat64m2_t vw9 = __riscv_vfwcvt_f_f_v_f64m2 (v9, vl);
+ vfloat64m2_t vw10 = __riscv_vfwcvt_f_f_v_f64m2 (v10, vl);
+ vfloat64m2_t vw11 = __riscv_vfwcvt_f_f_v_f64m2 (v11, vl);
+ vfloat64m2_t vw12 = __riscv_vfwcvt_f_f_v_f64m2 (v12, vl);
+ vfloat64m2_t vw13 = __riscv_vfwcvt_f_f_v_f64m2 (v13, vl);
+ vfloat64m2_t vw14 = __riscv_vfwcvt_f_f_v_f64m2 (v14, vl);
+ vfloat64m2_t vw15 = __riscv_vfwcvt_f_f_v_f64m2 (v15, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vfmv_f_s_f64m2_f64 (vw0);
+ double sum1 = __riscv_vfmv_f_s_f64m2_f64 (vw1);
+ double sum2 = __riscv_vfmv_f_s_f64m2_f64 (vw2);
+ double sum3 = __riscv_vfmv_f_s_f64m2_f64 (vw3);
+ double sum4 = __riscv_vfmv_f_s_f64m2_f64 (vw4);
+ double sum5 = __riscv_vfmv_f_s_f64m2_f64 (vw5);
+ double sum6 = __riscv_vfmv_f_s_f64m2_f64 (vw6);
+ double sum7 = __riscv_vfmv_f_s_f64m2_f64 (vw7);
+ double sum8 = __riscv_vfmv_f_s_f64m2_f64 (vw8);
+ double sum9 = __riscv_vfmv_f_s_f64m2_f64 (vw9);
+ double sum10 = __riscv_vfmv_f_s_f64m2_f64 (vw10);
+ double sum11 = __riscv_vfmv_f_s_f64m2_f64 (vw11);
+ double sum12 = __riscv_vfmv_f_s_f64m2_f64 (vw12);
+ double sum13 = __riscv_vfmv_f_s_f64m2_f64 (vw13);
+ double sum14 = __riscv_vfmv_f_s_f64m2_f64 (vw14);
+ double sum15 = __riscv_vfmv_f_s_f64m2_f64 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
+
+
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c
new file mode 100644
index 0000000..ab56d0d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+ double sum5, double sum6, double sum7)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m2_t v0 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v1 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v2 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v3 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v4 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v5 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v6 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v7 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat64m4_t vw0 = __riscv_vfwcvt_f_f_v_f64m4 (v0, vl);
+ vfloat64m4_t vw1 = __riscv_vfwcvt_f_f_v_f64m4 (v1, vl);
+ vfloat64m4_t vw2 = __riscv_vfwcvt_f_f_v_f64m4 (v2, vl);
+ vfloat64m4_t vw3 = __riscv_vfwcvt_f_f_v_f64m4 (v3, vl);
+ vfloat64m4_t vw4 = __riscv_vfwcvt_f_f_v_f64m4 (v4, vl);
+ vfloat64m4_t vw5 = __riscv_vfwcvt_f_f_v_f64m4 (v5, vl);
+ vfloat64m4_t vw6 = __riscv_vfwcvt_f_f_v_f64m4 (v6, vl);
+ vfloat64m4_t vw7 = __riscv_vfwcvt_f_f_v_f64m4 (v7, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vfmv_f_s_f64m4_f64 (vw0);
+ double sum1 = __riscv_vfmv_f_s_f64m4_f64 (vw1);
+ double sum2 = __riscv_vfmv_f_s_f64m4_f64 (vw2);
+ double sum3 = __riscv_vfmv_f_s_f64m4_f64 (vw3);
+ double sum4 = __riscv_vfmv_f_s_f64m4_f64 (vw4);
+ double sum5 = __riscv_vfmv_f_s_f64m4_f64 (vw5);
+ double sum6 = __riscv_vfmv_f_s_f64m4_f64 (vw6);
+ double sum7 = __riscv_vfmv_f_s_f64m4_f64 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c
new file mode 100644
index 0000000..82f369c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m4_t v0 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+ it += vl;
+ vfloat32m4_t v1 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+ it += vl;
+ vfloat32m4_t v2 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+ it += vl;
+ vfloat32m4_t v3 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat64m8_t vw0 = __riscv_vfwcvt_f_f_v_f64m8 (v0, vl);
+ vfloat64m8_t vw1 = __riscv_vfwcvt_f_f_v_f64m8 (v1, vl);
+ vfloat64m8_t vw2 = __riscv_vfwcvt_f_f_v_f64m8 (v2, vl);
+ vfloat64m8_t vw3 = __riscv_vfwcvt_f_f_v_f64m8 (v3, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vfmv_f_s_f64m8_f64 (vw0);
+ double sum1 = __riscv_vfmv_f_s_f64m8_f64 (vw1);
+ double sum2 = __riscv_vfmv_f_s_f64m8_f64 (vw2);
+ double sum3 = __riscv_vfmv_f_s_f64m8_f64 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */