aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ada/Makefile.rtl1
-rw-r--r--gcc/ada/init.c2
-rw-r--r--gcc/ada/libgnat/s-dorepr__freebsd.adb172
-rw-r--r--gcc/common/config/riscv/riscv-common.cc1
-rw-r--r--gcc/config/aarch64/aarch64.cc29
-rw-r--r--gcc/config/avr/avr.md4
-rw-r--r--gcc/config/riscv/constraints.md10
-rw-r--r--gcc/config/riscv/generic-vector-ooo.md4
-rw-r--r--gcc/config/riscv/genrvv-type-indexer.cc9
-rw-r--r--gcc/config/riscv/riscv-c.cc3
-rw-r--r--gcc/config/riscv/riscv-vector-builtins-shapes.cc48
-rw-r--r--gcc/config/riscv/riscv-vector-builtins-shapes.h2
-rw-r--r--gcc/config/riscv/riscv-vector-builtins-types.def40
-rw-r--r--gcc/config/riscv/riscv-vector-builtins.cc362
-rw-r--r--gcc/config/riscv/riscv-vector-builtins.def30
-rw-r--r--gcc/config/riscv/riscv-vector-builtins.h8
-rw-r--r--gcc/config/riscv/riscv.cc37
-rw-r--r--gcc/config/riscv/riscv.md5
-rw-r--r--gcc/config/riscv/sifive-vector-builtins-bases.cc78
-rw-r--r--gcc/config/riscv/sifive-vector-builtins-bases.h3
-rw-r--r--gcc/config/riscv/sifive-vector-builtins-functions.def45
-rw-r--r--gcc/config/riscv/sifive-vector.md871
-rw-r--r--gcc/config/riscv/vector-iterators.md49
-rw-r--r--gcc/config/riscv/vector.md3
-rw-r--r--gcc/cp/pt.cc20
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/lambda-targ15.C17
-rw-r--r--gcc/testsuite/g++.target/riscv/pr119832.C27
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr120006.c31
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-thread-7.c4
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/ssa-thread-23.c19
-rw-r--r--gcc/testsuite/gcc.target/aarch64/pr115258.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/cond_arith_6.c3
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/ldst_ptrue_128_to_neon.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/pcs/return_4_128.c39
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/pcs/return_5_128.c39
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/pcs/struct_3_128.c56
-rw-r--r--gcc/testsuite/gcc.target/avr/torture/pr119989-flashx-1.c7
-rw-r--r--gcc/testsuite/gcc.target/avr/torture/pr119989-flashx-2.c7
-rw-r--r--gcc/testsuite/gcc.target/avr/torture/pr119989-flashx-3.c7
-rw-r--r--gcc/testsuite/gcc.target/avr/torture/pr119989-flashx-4.c7
-rw-r--r--gcc/testsuite/gcc.target/avr/torture/pr119989-memx-1.c7
-rw-r--r--gcc/testsuite/gcc.target/avr/torture/pr119989-memx-2.c7
-rw-r--r--gcc/testsuite/gcc.target/avr/torture/pr119989-memx-3.c7
-rw-r--r--gcc/testsuite/gcc.target/avr/torture/pr119989-memx-4.c7
-rw-r--r--gcc/testsuite/gcc.target/avr/torture/pr119989.h37
-rw-r--r--gcc/testsuite/gcc.target/riscv/predef-19.c34
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_f.c88
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_i.c132
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_v.c107
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_x.c138
-rw-r--r--gcc/tree-ssa-structalias.cc36
-rw-r--r--gcc/tree-ssa-threadbackward.cc8
-rw-r--r--gcc/tree-vect-slp.cc36
-rw-r--r--gcc/tree-vectorizer.h20
54 files changed, 2655 insertions, 158 deletions
diff --git a/gcc/ada/Makefile.rtl b/gcc/ada/Makefile.rtl
index 61600ad..cb41e68 100644
--- a/gcc/ada/Makefile.rtl
+++ b/gcc/ada/Makefile.rtl
@@ -1900,6 +1900,7 @@ ifeq ($(strip $(filter-out %86 freebsd%,$(target_cpu) $(target_os))),)
$(TRASYM_DWARF_UNIX_PAIRS) \
$(ATOMICS_TARGET_PAIRS) \
$(X86_TARGET_PAIRS) \
+ s-dorepr.adb<libgnat/s-dorepr__freebsd.adb \
system.ads<libgnat/system-freebsd.ads
GNATLIB_SHARED = gnatlib-shared-dual
diff --git a/gcc/ada/init.c b/gcc/ada/init.c
index c0fb749..1be90ec 100644
--- a/gcc/ada/init.c
+++ b/gcc/ada/init.c
@@ -1686,8 +1686,8 @@ __gnat_is_vms_v7 (void)
#include <sys/ucontext.h>
#include <unistd.h>
-#ifdef __CHERI__
static void
+#ifdef __CHERI__
__gnat_error_handler (int sig,
siginfo_t *si,
void *ucontext ATTRIBUTE_UNUSED)
diff --git a/gcc/ada/libgnat/s-dorepr__freebsd.adb b/gcc/ada/libgnat/s-dorepr__freebsd.adb
new file mode 100644
index 0000000..bf8388b
--- /dev/null
+++ b/gcc/ada/libgnat/s-dorepr__freebsd.adb
@@ -0,0 +1,172 @@
+------------------------------------------------------------------------------
+-- --
+-- GNAT COMPILER COMPONENTS --
+-- --
+-- S Y S T E M . D O U B L E _ R E A L . P R O D U C T --
+-- --
+-- B o d y --
+-- --
+-- Copyright (C) 2021-2025, Free Software Foundation, Inc. --
+-- --
+-- GNAT is free software; you can redistribute it and/or modify it under --
+-- terms of the GNU General Public License as published by the Free Soft- --
+-- ware Foundation; either version 3, or (at your option) any later ver- --
+-- sion. GNAT is distributed in the hope that it will be useful, but WITH- --
+-- OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY --
+-- or FITNESS FOR A PARTICULAR PURPOSE. --
+-- --
+-- As a special exception under Section 7 of GPL version 3, you are granted --
+-- additional permissions described in the GCC Runtime Library Exception, --
+-- version 3.1, as published by the Free Software Foundation. --
+-- --
+-- You should have received a copy of the GNU General Public License and --
+-- a copy of the GCC Runtime Library Exception along with this program; --
+-- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see --
+-- <http://www.gnu.org/licenses/>. --
+-- --
+-- GNAT was originally developed by the GNAT team at New York University. --
+-- Extensive contributions were provided by Ada Core Technologies Inc. --
+-- --
+------------------------------------------------------------------------------
+
+-- This is the x86/FreeBSD version of the separate package body
+
+with Interfaces; use Interfaces;
+
+separate (System.Double_Real)
+
+package body Product is
+
+ procedure Split (N : Num; Hi : out Num; Lo : out Num);
+ -- Compute high part and low part of N
+
+ -----------
+ -- Split --
+ -----------
+
+ -- We use a bit manipulation algorithm instead of Veltkamp's splitting
+ -- because it is faster and has the property that the magnitude of the
+ -- high part is never larger than that of the input number, which will
+ -- avoid spurious overflows in the Two_Prod algorithm.
+
+ -- See the recent paper by Claude-Pierre Jeannerod, Jean-Michel Muller
+ -- and Paul Zimmermann: On various ways to split a floating-point number
+ -- ARITH 2018 - 25th IEEE Symposium on Computer Arithmetic, Jun 2018,
+ -- Amherst (MA), United States, pages 53-60.
+
+ procedure Split (N : Num; Hi : out Num; Lo : out Num) is
+ X : Num;
+
+ begin
+ -- Spill the input into the appropriate (maybe larger) bit container,
+ -- mask out the low bits and reload the modified value.
+
+ case Num'Machine_Mantissa is
+ when 24 =>
+ declare
+ Rep32 : aliased Interfaces.Unsigned_32;
+ Temp : Num := N with Address => Rep32'Address;
+ pragma Annotate (CodePeer, Modified, Rep32);
+
+ begin
+ -- Mask out the low 12 bits
+
+ Rep32 := Rep32 and 16#FFFFF000#;
+
+ X := Temp;
+ end;
+
+ when 53 =>
+ declare
+ Rep64 : aliased array (1 .. 2) of Interfaces.Unsigned_64;
+ Temp : Num := N with Address => Rep64'Address;
+ pragma Annotate (CodePeer, Modified, Rep64);
+
+ begin
+ -- Mask out the low 27 bits
+
+ Rep64 (1) := Rep64 (1) and 16#FFFFFFFFF8000000#;
+
+ X := Temp;
+ end;
+
+ when 64 =>
+ declare
+ Rep80 : aliased array (1 .. 2) of Interfaces.Unsigned_64;
+ Temp : Num := N with Address => Rep80'Address;
+ pragma Annotate (CodePeer, Modified, Rep80);
+
+ begin
+ -- Mask out the low 32 bits
+
+ if System.Default_Bit_Order = High_Order_First then
+ Rep80 (1) := Rep80 (1) and 16#FFFFFFFFFFFF0000#;
+ Rep80 (2) := Rep80 (2) and 16#0000FFFFFFFFFFFF#;
+ else
+ Rep80 (1) := Rep80 (1) and 16#FFFFFFFF00000000#;
+ end if;
+
+ X := Temp;
+ end;
+
+ when others =>
+ raise Program_Error;
+ end case;
+
+ -- Deal with denormalized numbers
+
+ if X = 0.0 then
+ Hi := N;
+ Lo := 0.0;
+ else
+ Hi := X;
+ Lo := N - X;
+ end if;
+ end Split;
+
+ --------------
+ -- Two_Prod --
+ --------------
+
+ function Two_Prod (A, B : Num) return Double_T is
+ P : constant Num := A * B;
+
+ Ahi, Alo, Bhi, Blo, E : Num;
+
+ begin
+ if Is_Infinity (P) or else Is_Zero (P) then
+ return (P, 0.0);
+
+ else
+ Split (A, Ahi, Alo);
+ Split (B, Bhi, Blo);
+
+ E := ((Ahi * Bhi - P) + Ahi * Blo + Alo * Bhi) + Alo * Blo;
+
+ return (P, E);
+ end if;
+ end Two_Prod;
+
+ -------------
+ -- Two_Sqr --
+ -------------
+
+ function Two_Sqr (A : Num) return Double_T is
+ Q : constant Num := A * A;
+
+ Hi, Lo, E : Num;
+
+ begin
+ if Is_Infinity (Q) or else Is_Zero (Q) then
+ return (Q, 0.0);
+
+ else
+ Split (A, Hi, Lo);
+
+ E := ((Hi * Hi - Q) + 2.0 * Hi * Lo) + Lo * Lo;
+
+ return (Q, E);
+ end if;
+ end Two_Sqr;
+
+end Product;
diff --git a/gcc/common/config/riscv/riscv-common.cc b/gcc/common/config/riscv/riscv-common.cc
index 15df22d..145a0f2 100644
--- a/gcc/common/config/riscv/riscv-common.cc
+++ b/gcc/common/config/riscv/riscv-common.cc
@@ -137,6 +137,7 @@ static const riscv_implied_info_t riscv_implied_info[] =
{"zve64f", "f"},
{"zve64d", "d"},
+ {"zve32x", "zicsr"},
{"zve32x", "zvl32b"},
{"zve32f", "zve32x"},
{"zve32f", "zvl32b"},
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index f7bccf5..fff8d9d 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -6416,13 +6416,30 @@ aarch64_stack_protect_canary_mem (machine_mode mode, rtx decl_rtl,
void
aarch64_emit_sve_pred_move (rtx dest, rtx pred, rtx src)
{
- expand_operand ops[3];
machine_mode mode = GET_MODE (dest);
- create_output_operand (&ops[0], dest, mode);
- create_input_operand (&ops[1], pred, GET_MODE(pred));
- create_input_operand (&ops[2], src, mode);
- temporary_volatile_ok v (true);
- expand_insn (code_for_aarch64_pred_mov (mode), 3, ops);
+ if ((MEM_P (dest) || MEM_P (src))
+ && known_eq (GET_MODE_SIZE (mode), 16)
+ && aarch64_classify_vector_mode (mode) == VEC_SVE_DATA
+ && !BYTES_BIG_ENDIAN)
+ {
+ if (MEM_P (src))
+ {
+ rtx tmp = force_reg (V16QImode, adjust_address (src, V16QImode, 0));
+ emit_move_insn (dest, lowpart_subreg (mode, tmp, V16QImode));
+ }
+ else
+ emit_move_insn (adjust_address (dest, V16QImode, 0),
+ force_lowpart_subreg (V16QImode, src, mode));
+ }
+ else
+ {
+ expand_operand ops[3];
+ create_output_operand (&ops[0], dest, mode);
+ create_input_operand (&ops[1], pred, GET_MODE(pred));
+ create_input_operand (&ops[2], src, mode);
+ temporary_volatile_ok v (true);
+ expand_insn (code_for_aarch64_pred_mov (mode), 3, ops);
+ }
}
/* Expand a pre-RA SVE data move from SRC to DEST in which at least one
diff --git a/gcc/config/avr/avr.md b/gcc/config/avr/avr.md
index 1c4e44d..01b8e4b 100644
--- a/gcc/config/avr/avr.md
+++ b/gcc/config/avr/avr.md
@@ -718,6 +718,8 @@
"&& reload_completed"
[(parallel [(set (reg:MOVMODE REG_22)
(match_dup 0))
+ (clobber (reg:QI REG_21))
+ (clobber (reg:HI REG_Z))
(clobber (reg:CC REG_CC))])]
{
operands[0] = SET_SRC (single_set (curr_insn));
@@ -727,6 +729,8 @@
[(set (reg:MOVMODE REG_22)
(mem:MOVMODE (lo_sum:PSI (reg:QI REG_21)
(reg:HI REG_Z))))
+ (clobber (reg:QI REG_21))
+ (clobber (reg:HI REG_Z))
(clobber (reg:CC REG_CC))]
"reload_completed
&& (avr_load_libgcc_insn_p (insn, ADDR_SPACE_MEMX, true)
diff --git a/gcc/config/riscv/constraints.md b/gcc/config/riscv/constraints.md
index ba3c6e6..18556a5 100644
--- a/gcc/config/riscv/constraints.md
+++ b/gcc/config/riscv/constraints.md
@@ -311,3 +311,13 @@
"Shifting immediate for SIMD shufflei3."
(and (match_code "const_int")
(match_test "IN_RANGE (ival, -64, -1)")))
+
+(define_constraint "Ou01"
+ "A 1-bit unsigned immediate."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 0, 1)")))
+
+(define_constraint "Ou02"
+ "A 2-bit unsigned immediate."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 0, 3)")))
diff --git a/gcc/config/riscv/generic-vector-ooo.md b/gcc/config/riscv/generic-vector-ooo.md
index cb71941..ab9e57f 100644
--- a/gcc/config/riscv/generic-vector-ooo.md
+++ b/gcc/config/riscv/generic-vector-ooo.md
@@ -141,3 +141,7 @@
(eq_attr "type" "rdvlenb,rdvl")
"vxu_ooo_issue,vxu_ooo_issue")
+;; Vector sf_vcp.
+(define_insn_reservation "vec_sf_vcp" 2
+ (eq_attr "type" "sf_vc,sf_vc_se")
+ "vxu_ooo_issue")
diff --git a/gcc/config/riscv/genrvv-type-indexer.cc b/gcc/config/riscv/genrvv-type-indexer.cc
index 6de23cb6..2fd429a 100644
--- a/gcc/config/riscv/genrvv-type-indexer.cc
+++ b/gcc/config/riscv/genrvv-type-indexer.cc
@@ -303,6 +303,8 @@ main (int argc, const char **argv)
fprintf (fp, " /*UNSIGNED_EEW%d_LMUL1_INTERPRET*/ %s,\n", eew,
inttype (eew, LMUL1_LOG2, /* unsigned_p */true).c_str ());
+ fprintf (fp, " /*X2*/ INVALID,\n");
+
for (unsigned lmul_log2_offset : {1, 2, 3, 4, 5, 6})
{
unsigned multiple_of_lmul = 1 << lmul_log2_offset;
@@ -426,6 +428,9 @@ main (int argc, const char **argv)
fprintf (fp, " /*UNSIGNED_EEW%d_LMUL1_INTERPRET*/ INVALID,\n",
eew);
+ fprintf (fp, " /*X2*/ %s,\n",
+ inttype (sew * 2, lmul_log2 + 1, /*unsigned_p*/ true).c_str ());
+
for (unsigned lmul_log2_offset : {1, 2, 3, 4, 5, 6})
{
unsigned multiple_of_lmul = 1 << lmul_log2_offset;
@@ -501,6 +506,8 @@ main (int argc, const char **argv)
for (unsigned eew : EEW_SIZE_LIST)
fprintf (fp, " /*UNSIGNED_EEW%d_LMUL1_INTERPRET*/ INVALID,\n", eew);
+ fprintf (fp, " /*X2*/ INVALID,\n");
+
for (unsigned lmul_log2_offset : {1, 2, 3, 4, 5, 6})
{
unsigned multiple_of_lmul = 1 << lmul_log2_offset;
@@ -588,6 +595,8 @@ main (int argc, const char **argv)
fprintf (fp, " /*UNSIGNED_EEW%d_LMUL1_INTERPRET*/ INVALID,\n",
eew);
+ fprintf (fp, " /*X2*/ INVALID,\n");
+
for (unsigned lmul_log2_offset : {1, 2, 3, 4, 5, 6})
{
unsigned multiple_of_lmul = 1 << lmul_log2_offset;
diff --git a/gcc/config/riscv/riscv-c.cc b/gcc/config/riscv/riscv-c.cc
index 7912b10..ab6dc83 100644
--- a/gcc/config/riscv/riscv-c.cc
+++ b/gcc/config/riscv/riscv-c.cc
@@ -279,7 +279,8 @@ riscv_pragma_intrinsic (cpp_reader *)
const char *name = TREE_STRING_POINTER (x);
if (strcmp (name, "vector") == 0
- || strcmp (name, "xtheadvector") == 0)
+ || strcmp (name, "xtheadvector") == 0
+ || strcmp (name, "xsfvcp") == 0)
{
struct pragma_intrinsic_flags backup_flags;
diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.cc b/gcc/config/riscv/riscv-vector-builtins-shapes.cc
index fc21b20..b855d4c 100644
--- a/gcc/config/riscv/riscv-vector-builtins-shapes.cc
+++ b/gcc/config/riscv/riscv-vector-builtins-shapes.cc
@@ -1343,6 +1343,52 @@ struct sf_vfnrclip_def : public build_base
}
};
+/* sf_vcix_se_def class. */
+struct sf_vcix_se_def : public build_base
+{
+ char *get_name (function_builder &b, const function_instance &instance,
+ bool overloaded_p) const override
+ {
+ /* Return nullptr if it is overloaded. */
+ if (overloaded_p)
+ return nullptr;
+
+ b.append_base_name (instance.base_name);
+
+ /* vop --> vop<op>_se_<type>. */
+ if (!overloaded_p)
+ {
+ b.append_name (operand_suffixes[instance.op_info->op]);
+ b.append_name ("_se");
+ b.append_name (type_suffixes[instance.type.index].vector);
+ }
+ return b.finish_name ();
+ }
+};
+
+/* sf_vcix_def class. */
+struct sf_vcix_def : public build_base
+{
+ char *get_name (function_builder &b, const function_instance &instance,
+ bool overloaded_p) const override
+ {
+ /* Return nullptr if it is overloaded. */
+ if (overloaded_p)
+ return nullptr;
+
+ b.append_base_name (instance.base_name);
+
+ /* vop --> vop_<type>. */
+ if (!overloaded_p)
+ {
+ b.append_name (operand_suffixes[instance.op_info->op]);
+ b.append_name (type_suffixes[instance.type.index].vector);
+ }
+ return b.finish_name ();
+ }
+};
+
+
SHAPE(vsetvl, vsetvl)
SHAPE(vsetvl, vsetvlmax)
SHAPE(loadstore, loadstore)
@@ -1379,4 +1425,6 @@ SHAPE(crypto_vi, crypto_vi)
SHAPE(crypto_vv_no_op_type, crypto_vv_no_op_type)
SHAPE (sf_vqmacc, sf_vqmacc)
SHAPE (sf_vfnrclip, sf_vfnrclip)
+SHAPE(sf_vcix_se, sf_vcix_se)
+SHAPE(sf_vcix, sf_vcix)
} // end namespace riscv_vector
diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.h b/gcc/config/riscv/riscv-vector-builtins-shapes.h
index 858799b..2f2636e 100644
--- a/gcc/config/riscv/riscv-vector-builtins-shapes.h
+++ b/gcc/config/riscv/riscv-vector-builtins-shapes.h
@@ -62,6 +62,8 @@ extern const function_shape *const crypto_vv_no_op_type;
/* Sifive vendor extension. */
extern const function_shape *const sf_vqmacc;
extern const function_shape *const sf_vfnrclip;
+extern const function_shape *const sf_vcix_se;
+extern const function_shape *const sf_vcix;
}
} // end namespace riscv_vector
diff --git a/gcc/config/riscv/riscv-vector-builtins-types.def b/gcc/config/riscv/riscv-vector-builtins-types.def
index 857b637..ade6644 100644
--- a/gcc/config/riscv/riscv-vector-builtins-types.def
+++ b/gcc/config/riscv/riscv-vector-builtins-types.def
@@ -369,6 +369,18 @@ along with GCC; see the file COPYING3. If not see
#define DEF_RVV_XFQF_OPS(TYPE, REQUIRE)
#endif
+/* Use "DEF_RVV_X2_U_OPS" macro include unsigned integer which will
+ be iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_X2_U_OPS
+#define DEF_RVV_X2_U_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_X2_WU_OPS" macro include widen unsigned integer which will
+ be iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_X2_WU_OPS
+#define DEF_RVV_X2_WU_OPS(TYPE, REQUIRE)
+#endif
+
DEF_RVV_I_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_I_OPS (vint8mf4_t, 0)
DEF_RVV_I_OPS (vint8mf2_t, 0)
@@ -1463,6 +1475,32 @@ DEF_RVV_XFQF_OPS (vint8mf2_t, 0)
DEF_RVV_XFQF_OPS (vint8m1_t, 0)
DEF_RVV_XFQF_OPS (vint8m2_t, 0)
+DEF_RVV_X2_U_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_X2_U_OPS (vuint8mf4_t, 0)
+DEF_RVV_X2_U_OPS (vuint8mf2_t, 0)
+DEF_RVV_X2_U_OPS (vuint8m1_t, 0)
+DEF_RVV_X2_U_OPS (vuint8m2_t, 0)
+DEF_RVV_X2_U_OPS (vuint8m4_t, 0)
+DEF_RVV_X2_U_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_X2_U_OPS (vuint16mf2_t, 0)
+DEF_RVV_X2_U_OPS (vuint16m1_t, 0)
+DEF_RVV_X2_U_OPS (vuint16m2_t, 0)
+DEF_RVV_X2_U_OPS (vuint16m4_t, 0)
+DEF_RVV_X2_U_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_X2_U_OPS (vuint32m1_t, 0)
+DEF_RVV_X2_U_OPS (vuint32m2_t, 0)
+DEF_RVV_X2_U_OPS (vuint32m4_t, 0)
+
+DEF_RVV_X2_WU_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_X2_WU_OPS (vuint16mf2_t, 0)
+DEF_RVV_X2_WU_OPS (vuint16m1_t, 0)
+DEF_RVV_X2_WU_OPS (vuint16m2_t, 0)
+DEF_RVV_X2_WU_OPS (vuint16m4_t, 0)
+DEF_RVV_X2_WU_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_X2_WU_OPS (vuint32m1_t, 0)
+DEF_RVV_X2_WU_OPS (vuint32m2_t, 0)
+DEF_RVV_X2_WU_OPS (vuint32m4_t, 0)
+
#undef DEF_RVV_I_OPS
#undef DEF_RVV_U_OPS
#undef DEF_RVV_F_OPS
@@ -1519,3 +1557,5 @@ DEF_RVV_XFQF_OPS (vint8m2_t, 0)
#undef DEF_RVV_F32_OPS
#undef DEF_RVV_QMACC_OPS
#undef DEF_RVV_XFQF_OPS
+#undef DEF_RVV_X2_U_OPS
+#undef DEF_RVV_X2_WU_OPS
diff --git a/gcc/config/riscv/riscv-vector-builtins.cc b/gcc/config/riscv/riscv-vector-builtins.cc
index 61dcdab..f3c706bf 100644
--- a/gcc/config/riscv/riscv-vector-builtins.cc
+++ b/gcc/config/riscv/riscv-vector-builtins.cc
@@ -544,6 +544,20 @@ static const rvv_type_info crypto_sew64_ops[] = {
#include "riscv-vector-builtins-types.def"
{NUM_VECTOR_TYPES, 0}};
+/* A list of signed integer will be registered for Sifive Xsfvcp intrinsic*/
+/* functions. */
+static const rvv_type_info x2_u_ops[] = {
+#define DEF_RVV_X2_U_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of signed integer will be registered for Sifive Xsfvcp intrinsic*/
+/* functions. */
+static const rvv_type_info x2_wu_ops[] = {
+#define DEF_RVV_X2_WU_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
/* A list of signed integer will be registered for intrinsic
* functions. */
static const rvv_type_info qmacc_ops[] = {
@@ -805,7 +819,7 @@ static CONSTEXPR const rvv_arg_type_info bf_wwxv_args[]
static CONSTEXPR const rvv_arg_type_info m_args[]
= {rvv_arg_type_info (RVV_BASE_mask), rvv_arg_type_info_end};
-/* A list of args for vector_type func (scalar_type) function. */
+/* A list of args for vector_type func (scalar_type/sf.vc) function. */
static CONSTEXPR const rvv_arg_type_info x_args[]
= {rvv_arg_type_info (RVV_BASE_scalar), rvv_arg_type_info_end};
@@ -1055,6 +1069,161 @@ static CONSTEXPR const rvv_arg_type_info scalar_ptr_size_args[]
rvv_arg_type_info (RVV_BASE_size), rvv_arg_type_info (RVV_BASE_vector),
rvv_arg_type_info_end};
+/* A list of args for vector_type func (sf.vc.x) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_x_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.v.x) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_v_x_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.i) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_i_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+
+/* A list of args for vector_type func (sf.vc.i) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_v_i_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.vv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_vv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.v.vv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_v_vv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.xv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_xv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.v.xv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_v_xv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.iv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_iv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.v.iv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_v_iv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.fv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_fv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar_float),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.v.fv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_v_fv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar_float),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.vvv/sf.vc.v.vvv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_vvv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.xvv/sf.vc.v.xvv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_xvv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.ivv/sf.vc.v.ivv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_ivv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_unsigned_vector),
+ rvv_arg_type_info (RVV_BASE_unsigned_vector),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.fvv/sf.vc.v.fvv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_fvv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar_float),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.vvw/sf.vc.v.vvw) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_vvw_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_x2_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.xvw/sf.vc.v.xvw) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_xvw_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_x2_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.ivw/sf.vc.v.ivw) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_ivw_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_x2_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.fvw/sf.vc.v.fvw) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_fvw_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_x2_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar_float),
+ rvv_arg_type_info_end};
+
/* A list of none preds that will be registered for intrinsic functions. */
static CONSTEXPR const predication_type_index none_preds[]
= {PRED_TYPE_none, NUM_PRED_TYPES};
@@ -3006,6 +3175,174 @@ static CONSTEXPR const rvv_op_info u_vvvv_crypto_sew64_ops
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
vvv_args /* Args */};
+static CONSTEXPR const rvv_op_info sf_vc_x_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_x, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_x_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_x_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_v_x, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_v_x_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_i_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_i, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_i_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_i_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_v_i, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_v_i_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_vv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_vv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_vv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_vv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_v_vv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_v_vv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_xv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_xv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_xv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_xv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_v_xv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_v_xv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_iv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_iv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_iv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_iv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_v_iv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_v_iv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_fv_ops
+ = {wextu_ops, /* Types */
+ OP_TYPE_fv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_fv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_fv_ops
+ = {wextu_ops, /* Types */
+ OP_TYPE_v_fv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_v_fv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_vvv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_vvv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_vvv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_vvv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_v_vvv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_vvv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_xvv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_xvv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_xvv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_xvv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_v_xvv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_xvv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_ivv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_ivv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_ivv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_ivv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_v_ivv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_ivv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_fvv_ops
+ = {wextu_ops, /* Types */
+ OP_TYPE_fvv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_fvv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_fvv_ops
+ = {wextu_ops, /* Types */
+ OP_TYPE_v_fvv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_fvv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_vvw_ops
+ = {x2_u_ops, /* Types */
+ OP_TYPE_vvw, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_vvw_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_vvw_ops
+ = {x2_u_ops, /* Types */
+ OP_TYPE_v_vvw, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_x2_vector), /* Return type */
+ sf_vc_vvw_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_xvw_ops
+ = {x2_u_ops, /* Types */
+ OP_TYPE_xvw, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_xvw_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_xvw_ops
+ = {x2_u_ops, /* Types */
+ OP_TYPE_v_xvw, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_x2_vector), /* Return type */
+ sf_vc_xvw_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_ivw_ops
+ = {x2_u_ops, /* Types */
+ OP_TYPE_ivw, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_ivw_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_ivw_ops
+ = {x2_u_ops, /* Types */
+ OP_TYPE_v_ivw, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_x2_vector), /* Return type */
+ sf_vc_ivw_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_fvw_ops
+ = {x2_wu_ops, /* Types */
+ OP_TYPE_fvw, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_fvw_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_fvw_ops
+ = {x2_wu_ops, /* Types */
+ OP_TYPE_v_fvw, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_x2_vector), /* Return type */
+ sf_vc_fvw_args /* Args */};
+
/* A list of all RVV base function types. */
static CONSTEXPR const function_type_info function_types[] = {
#define DEF_RVV_TYPE_INDEX( \
@@ -3022,7 +3359,7 @@ static CONSTEXPR const function_type_info function_types[] = {
SIGNED_EEW16_LMUL1_INTERPRET, SIGNED_EEW32_LMUL1_INTERPRET, \
SIGNED_EEW64_LMUL1_INTERPRET, UNSIGNED_EEW8_LMUL1_INTERPRET, \
UNSIGNED_EEW16_LMUL1_INTERPRET, UNSIGNED_EEW32_LMUL1_INTERPRET, \
- UNSIGNED_EEW64_LMUL1_INTERPRET, X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT, \
+ UNSIGNED_EEW64_LMUL1_INTERPRET, X2, X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT,\
X16_VLMUL_EXT, X32_VLMUL_EXT, X64_VLMUL_EXT, TUPLE_SUBPART) \
{ \
VECTOR_TYPE_##VECTOR, \
@@ -3087,6 +3424,7 @@ static CONSTEXPR const function_type_info function_types[] = {
VECTOR_TYPE_##UNSIGNED_EEW16_LMUL1_INTERPRET, \
VECTOR_TYPE_##UNSIGNED_EEW32_LMUL1_INTERPRET, \
VECTOR_TYPE_##UNSIGNED_EEW64_LMUL1_INTERPRET, \
+ VECTOR_TYPE_##X2, \
VECTOR_TYPE_##X2_VLMUL_EXT, \
VECTOR_TYPE_##X4_VLMUL_EXT, \
VECTOR_TYPE_##X8_VLMUL_EXT, \
@@ -3600,6 +3938,24 @@ rvv_arg_type_info::get_xfqf_float_type (vector_type_index type_idx) const
return NULL_TREE;
}
+tree
+rvv_arg_type_info::get_scalar_float_type (vector_type_index type_idx) const
+{
+ /* Convert vint types to their corresponding scalar float types.
+ Note:
+ - According to riscv-vector-builtins-types.def, the index of an unsigned
+ type is always one greater than its corresponding signed type.
+ - Conversion for vint8 types is not required. */
+ if (type_idx >= VECTOR_TYPE_vint16mf4_t && type_idx <= VECTOR_TYPE_vuint16m8_t)
+ return builtin_types[VECTOR_TYPE_vfloat16m1_t].scalar;
+ else if (type_idx >= VECTOR_TYPE_vint32mf2_t && type_idx <= VECTOR_TYPE_vuint32m8_t)
+ return builtin_types[VECTOR_TYPE_vfloat32m1_t].scalar;
+ else if (type_idx >= VECTOR_TYPE_vint64m1_t && type_idx <= VECTOR_TYPE_vuint64m8_t)
+ return builtin_types[VECTOR_TYPE_vfloat64m1_t].scalar;
+ else
+ return NULL_TREE;
+}
+
vector_type_index
rvv_arg_type_info::get_function_type_index (vector_type_index type_idx) const
{
@@ -3758,7 +4114,7 @@ function_instance::modifies_global_state_p () const
return true;
/* Handle direct modifications of global state. */
- return flags & (CP_WRITE_MEMORY | CP_WRITE_CSR);
+ return flags & (CP_WRITE_MEMORY | CP_WRITE_CSR | CP_USE_COPROCESSORS);
}
/* Return true if calls to the function could raise a signal. */
diff --git a/gcc/config/riscv/riscv-vector-builtins.def b/gcc/config/riscv/riscv-vector-builtins.def
index 3a62869..be3fb1a 100644
--- a/gcc/config/riscv/riscv-vector-builtins.def
+++ b/gcc/config/riscv/riscv-vector-builtins.def
@@ -82,7 +82,7 @@ along with GCC; see the file COPYING3. If not see
SIGNED_EEW16_LMUL1_INTERPRET, SIGNED_EEW32_LMUL1_INTERPRET, \
SIGNED_EEW64_LMUL1_INTERPRET, UNSIGNED_EEW8_LMUL1_INTERPRET, \
UNSIGNED_EEW16_LMUL1_INTERPRET, UNSIGNED_EEW32_LMUL1_INTERPRET, \
- UNSIGNED_EEW64_LMUL1_INTERPRET, X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT, \
+ UNSIGNED_EEW64_LMUL1_INTERPRET, X2, X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT,\
X16_VLMUL_EXT, X32_VLMUL_EXT, X64_VLMUL_EXT, TUPLE_SUBPART)
#endif
@@ -637,6 +637,32 @@ DEF_RVV_OP_TYPE (xu_w)
DEF_RVV_OP_TYPE (s)
DEF_RVV_OP_TYPE (4x8x4)
DEF_RVV_OP_TYPE (2x8x2)
+DEF_RVV_OP_TYPE (v_x)
+DEF_RVV_OP_TYPE (i)
+DEF_RVV_OP_TYPE (v_i)
+DEF_RVV_OP_TYPE (xv)
+DEF_RVV_OP_TYPE (iv)
+DEF_RVV_OP_TYPE (fv)
+DEF_RVV_OP_TYPE (vvv)
+DEF_RVV_OP_TYPE (xvv)
+DEF_RVV_OP_TYPE (ivv)
+DEF_RVV_OP_TYPE (fvv)
+DEF_RVV_OP_TYPE (vvw)
+DEF_RVV_OP_TYPE (xvw)
+DEF_RVV_OP_TYPE (ivw)
+DEF_RVV_OP_TYPE (fvw)
+DEF_RVV_OP_TYPE (v_vv)
+DEF_RVV_OP_TYPE (v_xv)
+DEF_RVV_OP_TYPE (v_iv)
+DEF_RVV_OP_TYPE (v_fv)
+DEF_RVV_OP_TYPE (v_vvv)
+DEF_RVV_OP_TYPE (v_xvv)
+DEF_RVV_OP_TYPE (v_ivv)
+DEF_RVV_OP_TYPE (v_fvv)
+DEF_RVV_OP_TYPE (v_vvw)
+DEF_RVV_OP_TYPE (v_xvw)
+DEF_RVV_OP_TYPE (v_ivw)
+DEF_RVV_OP_TYPE (v_fvw)
DEF_RVV_PRED_TYPE (ta)
DEF_RVV_PRED_TYPE (tu)
@@ -720,6 +746,7 @@ DEF_RVV_BASE_TYPE (unsigned_eew8_lmul1_interpret, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (unsigned_eew16_lmul1_interpret, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (unsigned_eew32_lmul1_interpret, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (unsigned_eew64_lmul1_interpret, get_vector_type (type_idx))
+DEF_RVV_BASE_TYPE (x2_vector, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (vlmul_ext_x2, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (vlmul_ext_x4, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (vlmul_ext_x8, get_vector_type (type_idx))
@@ -729,6 +756,7 @@ DEF_RVV_BASE_TYPE (vlmul_ext_x64, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (size_ptr, build_pointer_type (size_type_node))
DEF_RVV_BASE_TYPE (tuple_subpart, get_tuple_subpart_type (type_idx))
DEF_RVV_BASE_TYPE (xfqf_float, get_xfqf_float_type (type_idx))
+DEF_RVV_BASE_TYPE (scalar_float, get_scalar_float_type (type_idx))
DEF_RVV_VXRM_ENUM (RNU, VXRM_RNU)
DEF_RVV_VXRM_ENUM (RNE, VXRM_RNE)
diff --git a/gcc/config/riscv/riscv-vector-builtins.h b/gcc/config/riscv/riscv-vector-builtins.h
index 42ba905..ffc2893 100644
--- a/gcc/config/riscv/riscv-vector-builtins.h
+++ b/gcc/config/riscv/riscv-vector-builtins.h
@@ -130,6 +130,7 @@ enum required_ext
XSFVQMACCQOQ_EXT, /* XSFVQMACCQOQ extension */
XSFVQMACCDOD_EXT, /* XSFVQMACCDOD extension */
XSFVFNRCLIPXFQF_EXT, /* XSFVFNRCLIPXFQF extension */
+ XSFVCP_EXT, /* XSFVCP extension*/
/* Please update below to isa_name func when add or remove enum type(s). */
};
@@ -169,6 +170,8 @@ static inline const char * required_ext_to_isa_name (enum required_ext required)
return "xsfvqmaccdod";
case XSFVFNRCLIPXFQF_EXT:
return "xsfvfnrclipxfqf";
+ case XSFVCP_EXT:
+ return "xsfvcp";
default:
gcc_unreachable ();
}
@@ -212,6 +215,8 @@ static inline bool required_extensions_specified (enum required_ext required)
return TARGET_XSFVQMACCDOD;
case XSFVFNRCLIPXFQF_EXT:
return TARGET_XSFVFNRCLIPXFQF;
+ case XSFVCP_EXT:
+ return TARGET_XSFVCP;
default:
gcc_unreachable ();
}
@@ -297,6 +302,7 @@ struct rvv_arg_type_info
tree get_tree_type (vector_type_index) const;
tree get_tuple_subpart_type (vector_type_index) const;
tree get_xfqf_float_type (vector_type_index) const;
+ tree get_scalar_float_type (vector_type_index) const;
};
/* Static information for each operand. */
@@ -359,6 +365,8 @@ struct function_group_info
return TARGET_XSFVQMACCDOD;
case XSFVFNRCLIPXFQF_EXT:
return TARGET_XSFVFNRCLIPXFQF;
+ case XSFVCP_EXT:
+ return TARGET_XSFVCP;
default:
gcc_unreachable ();
}
diff --git a/gcc/config/riscv/riscv.cc b/gcc/config/riscv/riscv.cc
index c53e0dd..ed635ab 100644
--- a/gcc/config/riscv/riscv.cc
+++ b/gcc/config/riscv/riscv.cc
@@ -12273,6 +12273,41 @@ riscv_mode_needed (int entity, rtx_insn *insn, HARD_REG_SET)
}
}
+/* Return TRUE if the rouding mode is dynamic. */
+
+static bool
+riscv_dynamic_frm_mode_p (int mode)
+{
+ return mode == riscv_vector::FRM_DYN
+ || mode == riscv_vector::FRM_DYN_CALL
+ || mode == riscv_vector::FRM_DYN_EXIT;
+}
+
+/* Implement TARGET_MODE_CONFLUENCE. */
+
+static int
+riscv_mode_confluence (int entity, int mode1, int mode2)
+{
+ switch (entity)
+ {
+ case RISCV_VXRM:
+ return VXRM_MODE_NONE;
+ case RISCV_FRM:
+ {
+ /* FRM_DYN, FRM_DYN_CALL and FRM_DYN_EXIT are all compatible.
+ Although we already try to set the mode needed to FRM_DYN after a
+ function call, there are still some corner cases where both FRM_DYN
+ and FRM_DYN_CALL may appear on incoming edges. */
+ if (riscv_dynamic_frm_mode_p (mode1)
+ && riscv_dynamic_frm_mode_p (mode2))
+ return riscv_vector::FRM_DYN;
+ return riscv_vector::FRM_NONE;
+ }
+ default:
+ gcc_unreachable ();
+ }
+}
+
/* Return TRUE that an insn is asm. */
static bool
@@ -14356,6 +14391,8 @@ bool need_shadow_stack_push_pop_p ()
#define TARGET_MODE_EMIT riscv_emit_mode_set
#undef TARGET_MODE_NEEDED
#define TARGET_MODE_NEEDED riscv_mode_needed
+#undef TARGET_MODE_CONFLUENCE
+#define TARGET_MODE_CONFLUENCE riscv_mode_confluence
#undef TARGET_MODE_AFTER
#define TARGET_MODE_AFTER riscv_mode_after
#undef TARGET_MODE_ENTRY
diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md
index eec9687..c34eadb 100644
--- a/gcc/config/riscv/riscv.md
+++ b/gcc/config/riscv/riscv.md
@@ -495,6 +495,8 @@
;; SiFive custom extension instrctions
;; sf_vqmacc vector matrix integer multiply-add instructions
;; sf_vfnrclip vector fp32 to int8 ranged clip instructions
+;; sf_vc vector coprocessor interface without side effect
+;; sf_vc_se vector coprocessor interface with side effect
(define_attr "type"
"unknown,branch,jump,jalr,ret,call,load,fpload,store,fpstore,
mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
@@ -516,7 +518,8 @@
vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,
vgather,vcompress,vmov,vector,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vcpop,vrol,vror,vwsll,
vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,vaeskf1,vaeskf2,vaesz,
- vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c,vfncvtbf16,vfwcvtbf16,vfwmaccbf16"
+ vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c,vfncvtbf16,vfwcvtbf16,vfwmaccbf16,
+ sf_vc,sf_vc_se"
(cond [(eq_attr "got" "load") (const_string "load")
;; If a doubleword move uses these expensive instructions,
diff --git a/gcc/config/riscv/sifive-vector-builtins-bases.cc b/gcc/config/riscv/sifive-vector-builtins-bases.cc
index 85e1b6f..be530ca 100644
--- a/gcc/config/riscv/sifive-vector-builtins-bases.cc
+++ b/gcc/config/riscv/sifive-vector-builtins-bases.cc
@@ -195,12 +195,89 @@ public:
}
};
+/* Implements SiFive sf.vc. */
+class sf_vc : public function_base
+{
+public:
+
+ unsigned int call_properties (const function_instance &) const override
+ {
+ return CP_USE_COPROCESSORS;
+ }
+
+ rtx expand (function_expander &e) const override
+ {
+ switch (e.op_info->op)
+ {
+ case OP_TYPE_x:
+ return e.use_exact_insn (code_for_sf_vc_x_se (e.vector_mode ()));
+ case OP_TYPE_i:
+ return e.use_exact_insn (code_for_sf_vc_i_se (e.vector_mode ()));
+ case OP_TYPE_vv:
+ return e.use_exact_insn (code_for_sf_vc_vv_se (e.vector_mode ()));
+ case OP_TYPE_xv:
+ return e.use_exact_insn (code_for_sf_vc_xv_se (e.vector_mode ()));
+ case OP_TYPE_iv:
+ return e.use_exact_insn (code_for_sf_vc_iv_se (e.vector_mode ()));
+ case OP_TYPE_fv:
+ return e.use_exact_insn (code_for_sf_vc_fv_se (e.vector_mode ()));
+ case OP_TYPE_v_x:
+ return e.use_exact_insn (code_for_sf_vc_v_x_se (e.vector_mode ()));
+ case OP_TYPE_v_i:
+ return e.use_exact_insn (code_for_sf_vc_v_i_se (e.vector_mode ()));
+ case OP_TYPE_v_vv:
+ return e.use_exact_insn (code_for_sf_vc_v_vv_se (e.vector_mode ()));
+ case OP_TYPE_v_xv:
+ return e.use_exact_insn (code_for_sf_vc_v_xv_se (e.vector_mode ()));
+ case OP_TYPE_v_iv:
+ return e.use_exact_insn (code_for_sf_vc_v_iv_se (e.vector_mode ()));
+ case OP_TYPE_v_fv:
+ return e.use_exact_insn (code_for_sf_vc_v_fv_se (e.vector_mode ()));
+ case OP_TYPE_vvv:
+ return e.use_exact_insn (code_for_sf_vc_vvv_se (e.vector_mode ()));
+ case OP_TYPE_xvv:
+ return e.use_exact_insn (code_for_sf_vc_xvv_se (e.vector_mode ()));
+ case OP_TYPE_ivv:
+ return e.use_exact_insn (code_for_sf_vc_ivv_se (e.vector_mode ()));
+ case OP_TYPE_fvv:
+ return e.use_exact_insn (code_for_sf_vc_fvv_se (e.vector_mode ()));
+ case OP_TYPE_vvw:
+ return e.use_exact_insn (code_for_sf_vc_vvw_se (e.vector_mode ()));
+ case OP_TYPE_xvw:
+ return e.use_exact_insn (code_for_sf_vc_xvw_se (e.vector_mode ()));
+ case OP_TYPE_ivw:
+ return e.use_exact_insn (code_for_sf_vc_ivw_se (e.vector_mode ()));
+ case OP_TYPE_fvw:
+ return e.use_exact_insn (code_for_sf_vc_fvw_se (e.vector_mode ()));
+ case OP_TYPE_v_vvv:
+ return e.use_exact_insn (code_for_sf_vc_v_vvv_se (e.vector_mode ()));
+ case OP_TYPE_v_xvv:
+ return e.use_exact_insn (code_for_sf_vc_v_xvv_se (e.vector_mode ()));
+ case OP_TYPE_v_ivv:
+ return e.use_exact_insn (code_for_sf_vc_v_ivv_se (e.vector_mode ()));
+ case OP_TYPE_v_fvv:
+ return e.use_exact_insn (code_for_sf_vc_v_fvv_se (e.vector_mode ()));
+ case OP_TYPE_v_vvw:
+ return e.use_exact_insn (code_for_sf_vc_v_vvw_se (e.vector_mode ()));
+ case OP_TYPE_v_xvw:
+ return e.use_exact_insn (code_for_sf_vc_v_xvw_se (e.vector_mode ()));
+ case OP_TYPE_v_ivw:
+ return e.use_exact_insn (code_for_sf_vc_v_ivw_se (e.vector_mode ()));
+ case OP_TYPE_v_fvw:
+ return e.use_exact_insn (code_for_sf_vc_v_fvw_se (e.vector_mode ()));
+ default:
+ gcc_unreachable ();
+ }
+ }
+};
+
static CONSTEXPR const sf_vqmacc sf_vqmacc_obj;
static CONSTEXPR const sf_vqmaccu sf_vqmaccu_obj;
static CONSTEXPR const sf_vqmaccsu sf_vqmaccsu_obj;
static CONSTEXPR const sf_vqmaccus sf_vqmaccus_obj;
static CONSTEXPR const sf_vfnrclip_x_f_qf<UNSPEC_SF_VFNRCLIP> sf_vfnrclip_x_f_qf_obj;
static CONSTEXPR const sf_vfnrclip_xu_f_qf<UNSPEC_SF_VFNRCLIPU> sf_vfnrclip_xu_f_qf_obj;
+static CONSTEXPR const sf_vc sf_vc_obj;
/* Declare the function base NAME, pointing it to an instance
of class <NAME>_obj. */
@@ -213,4 +290,5 @@ BASE (sf_vqmaccsu)
BASE (sf_vqmaccus)
BASE (sf_vfnrclip_x_f_qf)
BASE (sf_vfnrclip_xu_f_qf)
+BASE (sf_vc)
} // end namespace riscv_vector
diff --git a/gcc/config/riscv/sifive-vector-builtins-bases.h b/gcc/config/riscv/sifive-vector-builtins-bases.h
index 69e5540..4ec1e30 100644
--- a/gcc/config/riscv/sifive-vector-builtins-bases.h
+++ b/gcc/config/riscv/sifive-vector-builtins-bases.h
@@ -23,6 +23,8 @@
namespace riscv_vector {
+static const unsigned int CP_USE_COPROCESSORS = 1U << 6;
+
namespace bases {
extern const function_base *const sf_vqmacc;
extern const function_base *const sf_vqmaccu;
@@ -30,6 +32,7 @@ extern const function_base *const sf_vqmaccsu;
extern const function_base *const sf_vqmaccus;
extern const function_base *const sf_vfnrclip_x_f_qf;
extern const function_base *const sf_vfnrclip_xu_f_qf;
+extern const function_base *const sf_vc;
}
} // end namespace riscv_vector
diff --git a/gcc/config/riscv/sifive-vector-builtins-functions.def b/gcc/config/riscv/sifive-vector-builtins-functions.def
index e6621c7..f6703ae 100644
--- a/gcc/config/riscv/sifive-vector-builtins-functions.def
+++ b/gcc/config/riscv/sifive-vector-builtins-functions.def
@@ -55,4 +55,49 @@ DEF_RVV_FUNCTION (sf_vfnrclip_x_f_qf, sf_vfnrclip, full_preds, i_clip_qf_ops)
DEF_RVV_FUNCTION (sf_vfnrclip_xu_f_qf, sf_vfnrclip, full_preds, u_clip_qf_ops)
#undef REQUIRED_EXTENSIONS
+#define REQUIRED_EXTENSIONS XSFVCP_EXT
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_x_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_i_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_vv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_xv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_iv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_fv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_vvv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_xvv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_ivv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_fvv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_vvw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_xvw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_ivw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_fvw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_x_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_i_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_vv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_xv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_iv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_fv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_vvv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_xvv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_ivv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_fvv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_vvw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_xvw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_ivw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_fvw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_x_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_i_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_vv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_xv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_iv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_fv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_vvv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_xvv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_ivv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_fvv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_vvw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_xvw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_ivw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_fvw_ops)
+#undef REQUIRED_EXTENSIONS
+
#undef DEF_RVV_FUNCTION
diff --git a/gcc/config/riscv/sifive-vector.md b/gcc/config/riscv/sifive-vector.md
index 2975b1e..a416634 100644
--- a/gcc/config/riscv/sifive-vector.md
+++ b/gcc/config/riscv/sifive-vector.md
@@ -182,3 +182,874 @@
"sf.vfnrclip.x<v_su>.f.qf\t%0,%3,%4%p1"
[(set_attr "type" "sf_vfnrclip")
(set_attr "mode" "<MODE>")])
+
+;; SF_VCP
+(define_insn "@sf_vc_x_se<mode>"
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:SI 2 "const_int_operand" "K")
+ (match_operand:SI 3 "const_int_operand" "K")
+ (match_operand:<VEL> 4 "register_operand" "r")] UNSPECV_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.x\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_x_se<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=vr,vr")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:SI 4 "const_int_operand" "K,K")
+ (match_operand:<VEL> 5 "register_operand" "r,r")] UNSPECV_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.x\t%3,%4,%0,%5"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_x<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=vr,vr")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:SI 4 "const_int_operand" "K,K")
+ (match_operand:<VEL> 5 "register_operand" "r,r")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.x\t%3,%4,%0,%5"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_i_se<mode>"
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:SI 2 "const_int_operand" "K")
+ (match_operand:SI 3 "const_int_operand" "K")
+ (match_operand:SI 4 "const_int_operand" "P")] UNSPECV_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.i\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_i_se<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=vr,vr")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:SI 4 "const_int_operand" "K,K")
+ (match_operand:SI 5 "const_int_operand" "P,P")] UNSPECV_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.i\t%3,%4,%0,%5"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_i<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=vr,vr")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:SI 4 "const_int_operand" "K,K")
+ (match_operand:SI 5 "const_int_operand" "P,P")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.i\t%3,%4,%0,%5"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_vv_se<mode>"
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:SI 2 "const_int_operand" "K")
+ (match_operand:VFULLI 3 "register_operand" "vr")
+ (match_operand:VFULLI 4 "register_operand" "vr")] UNSPECV_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.vv\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_vv_se<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vr,vr")
+ (match_operand:VFULLI 5 "register_operand" "vr,vr")] UNSPECV_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.vv\t%3,%0,%4,%5"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_vv<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vr,vr")
+ (match_operand:VFULLI 5 "register_operand" "vr,vr")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.vv\t%3,%0,%4,%5"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_xv_se<mode>"
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:SI 2 "const_int_operand" "K")
+ (match_operand:VFULLI 3 "register_operand" "vr")
+ (match_operand:<VEL> 4 "register_operand" "r")] UNSPECV_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.xv\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_xv_se<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vd,vd")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vr,vr")
+ (match_operand:<VEL> 5 "register_operand" "r,r")] UNSPECV_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.xv\t%3,%0,%4,%5"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_xv<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vd,vd")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vr,vr")
+ (match_operand:<VEL> 5 "register_operand" "r,r")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.xv\t%3,%0,%4,%5"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_iv_se<mode>"
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:SI 2 "const_int_operand" "K")
+ (match_operand:VFULLI 3 "register_operand" "vr")
+ (match_operand:SI 4 "const_int_operand" "P")] UNSPECV_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.iv\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_iv_se<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vd,vd")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vr,vr")
+ (match_operand:SI 5 "const_int_operand" "P,P")] UNSPECV_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.iv\t%3,%0,%4,%5"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_iv<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vd,vd")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vr,vr")
+ (match_operand:SI 5 "const_int_operand" "P,P")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.iv\t%3,%0,%4,%5"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_fv_se<mode>"
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:SF_FV
+ [(match_operand:SI 1 "const_int_operand" "Ou01")
+ (match_operand:SI 2 "const_int_operand" "K")
+ (match_operand:SF_FV 3 "register_operand" "vr")
+ (match_operand:<SF_XF> 4 "register_operand" "f")] UNSPECV_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.fv\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_fv_se<mode>"
+ [(set (match_operand:SF_FV 0 "register_operand" "=&vd,vd")
+ (if_then_else:SF_FV
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:SF_FV
+ [(match_operand:SI 3 "const_int_operand" "Ou01,Ou01")
+ (match_operand:SF_FV 4 "register_operand" "vr,vr")
+ (match_operand:<SF_XF> 5 "register_operand" "f,f")] UNSPECV_SF_CV)
+ (match_operand:SF_FV 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.fv\t%3,%0,%4,%5"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_fv<mode>"
+ [(set (match_operand:SF_FV 0 "register_operand" "=&vd,vd")
+ (if_then_else:SF_FV
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:SF_FV
+ [(match_operand:SI 3 "const_int_operand" "Ou01,Ou01")
+ (match_operand:SF_FV 4 "register_operand" "vr,vr")
+ (match_operand:<SF_XF> 5 "register_operand" "f,f")] UNSPEC_SF_CV)
+ (match_operand:SF_FV 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.fv\t%3,%0,%4,%5"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_vvv_se<mode>"
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:VFULLI 2 "register_operand" "vd")
+ (match_operand:VFULLI 3 "register_operand" "vr")
+ (match_operand:VFULLI 4 "register_operand" "vr")] UNSPECV_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.vvv\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_vvv_se<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vd,vd")
+ (match_operand:VFULLI 5 "register_operand" "vr,vr")
+ (match_operand:VFULLI 6 "register_operand" "vr,vr")] UNSPECV_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.vvv\t%3,%4,%6,%5"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_vvv<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vd,vd")
+ (match_operand:VFULLI 5 "register_operand" "vr,vr")
+ (match_operand:VFULLI 6 "register_operand" "vr,vr")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.vvv\t%3,%4,%6,%5"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_xvv_se<mode>"
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:VFULLI 2 "register_operand" "vd")
+ (match_operand:VFULLI 3 "register_operand" "vr")
+ (match_operand:<VEL> 4 "register_operand" "r")] UNSPECV_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.xvv\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_xvv_se<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vd,vd")
+ (match_operand:VFULLI 5 "register_operand" "vr,vr")
+ (match_operand:<VEL> 6 "register_operand" "r,r")] UNSPECV_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.xvv\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_xvv<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vd,vd")
+ (match_operand:VFULLI 5 "register_operand" "vr,vr")
+ (match_operand:<VEL> 6 "register_operand" "r,r")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.xvv\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_ivv_se<mode>"
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:VFULLI 2 "register_operand" "vd")
+ (match_operand:VFULLI 3 "register_operand" "vr")
+ (match_operand:SI 4 "const_int_operand" "P")] UNSPECV_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.ivv\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_ivv_se<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vd,vd")
+ (match_operand:VFULLI 5 "register_operand" "vr,vr")
+ (match_operand:SI 6 "const_int_operand" "P,P")] UNSPECV_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.ivv\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_ivv<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vd,vd")
+ (match_operand:VFULLI 5 "register_operand" "vr,vr")
+ (match_operand:SI 6 "const_int_operand" "P,P")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.ivv\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_fvv_se<mode>"
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:SF_FV
+ [(match_operand:SI 1 "const_int_operand" "Ou01")
+ (match_operand:SF_FV 2 "register_operand" "vd")
+ (match_operand:SF_FV 3 "register_operand" "vr")
+ (match_operand:<SF_XF> 4 "register_operand" "f")] UNSPECV_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.fvv\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_fvv_se<mode>"
+ [(set (match_operand:SF_FV 0 "register_operand" "=&vr,vr")
+ (if_then_else:SF_FV
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:SF_FV
+ [(match_operand:SI 3 "const_int_operand" "Ou01,Ou01")
+ (match_operand:SF_FV 4 "register_operand" "vd,vd")
+ (match_operand:SF_FV 5 "register_operand" "vr,vr")
+ (match_operand:<SF_XF> 6 "register_operand" "f,f")] UNSPECV_SF_CV)
+ (match_operand:SF_FV 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.fvv\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_fvv<mode>"
+ [(set (match_operand:SF_FV 0 "register_operand" "=&vr,vr")
+ (if_then_else:SF_FV
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:SF_FV
+ [(match_operand:SI 3 "const_int_operand" "Ou01,Ou01")
+ (match_operand:SF_FV 4 "register_operand" "vd,vd")
+ (match_operand:SF_FV 5 "register_operand" "vr,vr")
+ (match_operand:<SF_XF> 6 "register_operand" "f,f")] UNSPEC_SF_CV)
+ (match_operand:SF_FV 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.fvv\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_vvw_se<mode>"
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:<SF_VW>
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:<SF_VW> 2 "register_operand" "vd")
+ (match_operand:SF_VC_W 3 "register_operand" "vr")
+ (match_operand:SF_VC_W 4 "register_operand" "vr")] UNSPECV_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.vvw\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_vvw_se<mode>"
+ [(set (match_operand:<SF_VW> 0 "register_operand" "=&vr,vr")
+ (if_then_else:<SF_VW>
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:<SF_VW>
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:<SF_VW> 4 "register_operand" "vd,vd")
+ (match_operand:SF_VC_W 5 "register_operand" "vr,vr")
+ (match_operand:SF_VC_W 6 "register_operand" "vr,vr")] UNSPECV_SF_CV)
+ (match_operand:<SF_VW> 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.vvw\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_vvw<mode>"
+ [(set (match_operand:<SF_VW> 0 "register_operand" "=&vr,vr")
+ (if_then_else:<SF_VW>
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:<SF_VW>
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:<SF_VW> 4 "register_operand" "vd,vd")
+ (match_operand:SF_VC_W 5 "register_operand" "vr,vr")
+ (match_operand:SF_VC_W 6 "register_operand" "vr,vr")] UNSPEC_SF_CV)
+ (match_operand:<SF_VW> 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.vvw\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_xvw_se<mode>"
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:<SF_VW>
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:<SF_VW> 2 "register_operand" "vd")
+ (match_operand:SF_VC_W 3 "register_operand" "vr")
+ (match_operand:<VEL> 4 "register_operand" "r")] UNSPECV_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.xvw\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_xvw_se<mode>"
+ [(set (match_operand:<SF_VW> 0 "register_operand" "=&vr,vr")
+ (if_then_else:<SF_VW>
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:<SF_VW>
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:<SF_VW> 4 "register_operand" "vd,vd")
+ (match_operand:SF_VC_W 5 "register_operand" "vr,vr")
+ (match_operand:<VEL> 6 "register_operand" "r,r")] UNSPECV_SF_CV)
+ (match_operand:<SF_VW> 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.xvw\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_xvw<mode>"
+ [(set (match_operand:<SF_VW> 0 "register_operand" "=&vr,vr")
+ (if_then_else:<SF_VW>
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:<SF_VW>
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:<SF_VW> 4 "register_operand" "vd,vd")
+ (match_operand:SF_VC_W 5 "register_operand" "vr,vr")
+ (match_operand:<VEL> 6 "register_operand" "r,r")] UNSPEC_SF_CV)
+ (match_operand:<SF_VW> 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.xvw\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_ivw_se<mode>"
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:<SF_VW>
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:<SF_VW> 2 "register_operand" "vd")
+ (match_operand:SF_VC_W 3 "register_operand" "vr")
+ (match_operand:SI 4 "immediate_operand" "P")] UNSPECV_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.ivw\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_ivw_se<mode>"
+ [(set (match_operand:<SF_VW> 0 "register_operand" "=&vr,vr")
+ (if_then_else:<SF_VW>
+ (unspec_volatile:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:<SF_VW>
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:<SF_VW> 4 "register_operand" "vd,vd")
+ (match_operand:SF_VC_W 5 "register_operand" "vr,vr")
+ (match_operand:SI 6 "immediate_operand" "P,P")] UNSPEC_SF_CV)
+ (match_operand:<SF_VW> 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.ivw\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_ivw<mode>"
+ [(set (match_operand:<SF_VW> 0 "register_operand" "=&vr,vr")
+ (if_then_else:<SF_VW>
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:<SF_VW>
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:<SF_VW> 4 "register_operand" "vd,vd")
+ (match_operand:SF_VC_W 5 "register_operand" "vr,vr")
+ (match_operand:SI 6 "immediate_operand" "P,P")] UNSPEC_SF_CV)
+ (match_operand:<SF_VW> 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.ivw\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_fvw_se<mode>"
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:<SF_VW>
+ [(match_operand:SI 1 "const_int_operand" "Ou01")
+ (match_operand:<SF_VW> 2 "register_operand" "vd")
+ (match_operand:SF_VC_FW 3 "register_operand" "vr")
+ (match_operand:<SF_XFW> 4 "register_operand" "f")] UNSPECV_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.fvw\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_fvw_se<mode>"
+ [(set (match_operand:<SF_VW> 0 "register_operand" "=&vr,vr")
+ (if_then_else:<SF_VW>
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:<SF_VW>
+ [(match_operand:SI 3 "const_int_operand" "Ou01,Ou01")
+ (match_operand:<SF_VW> 4 "register_operand" "vd,vd")
+ (match_operand:SF_VC_FW 5 "register_operand" "vr,vr")
+ (match_operand:<SF_XFW> 6 "register_operand" "f,f")] UNSPECV_SF_CV)
+ (match_operand:<SF_VW> 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.fvw\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_fvw<mode>"
+ [(set (match_operand:<SF_VW> 0 "register_operand" "=&vr,vr")
+ (if_then_else:<SF_VW>
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:<SF_VW>
+ [(match_operand:SI 3 "const_int_operand" "Ou01,Ou01")
+ (match_operand:<SF_VW> 4 "register_operand" "vd,vd")
+ (match_operand:SF_VC_FW 5 "register_operand" "vr,vr")
+ (match_operand:<SF_XFW> 6 "register_operand" "f,f")] UNSPEC_SF_CV)
+ (match_operand:<SF_VW> 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.fvw\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
diff --git a/gcc/config/riscv/vector-iterators.md b/gcc/config/riscv/vector-iterators.md
index f8da71b..5687e8a 100644
--- a/gcc/config/riscv/vector-iterators.md
+++ b/gcc/config/riscv/vector-iterators.md
@@ -120,10 +120,12 @@
UNSPEC_SF_VFNRCLIP
UNSPEC_SF_VFNRCLIPU
+ UNSPEC_SF_CV
])
(define_c_enum "unspecv" [
UNSPECV_FRM_RESTORE_EXIT
+ UNSPECV_SF_CV
])
;; Subset of VI with fractional LMUL types
@@ -4873,3 +4875,50 @@
(RVVM1QI "rvvm4sf")
(RVVM2QI "rvvm8sf")
])
+
+
+(define_mode_iterator SF_VC_W [
+ RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+])
+
+(define_mode_attr SF_VW [
+ (RVVM4QI "RVVM8HI") (RVVM2QI "RVVM4HI") (RVVM1QI "RVVM2HI") (RVVMF2QI "RVVM1HI")
+ (RVVMF4QI "RVVMF2HI") (RVVMF8QI "RVVMF4HI")
+ (RVVM4HI "RVVM8SI") (RVVM2HI "RVVM4SI") (RVVM1HI "RVVM2SI") (RVVMF2HI "RVVM1SI")
+ (RVVMF4HI "RVVMF2SI")
+ (RVVM4SI "RVVM8DI") (RVVM2SI "RVVM4DI") (RVVM1SI "RVVM2DI") (RVVMF2SI "RVVM1DI")
+])
+
+(define_mode_attr sf_vw [
+ (RVVM4QI "rvvm8hi") (RVVM2QI "rvvm4hi") (RVVM1QI "rvvm2hi") (RVVMF2QI "rvvm1hi")
+ (RVVMF4QI "rvvmf2hi") (RVVMF8QI "rvvmf4hi")
+ (RVVM4HI "rvvm8si") (RVVM2HI "rvvm4si") (RVVM1HI "rvvm2si") (RVVMF2HI "rvvm1si")
+ (RVVMF4HI "rvvmf2si")
+ (RVVM4SI "rvvm8di") (RVVM2SI "rvvm4di") (RVVM1SI "rvvm2di") (RVVMF2SI "rvvm1di")
+])
+
+(define_mode_iterator SF_FV [
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ (RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
+ (RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
+])
+
+
+(define_mode_iterator SF_VC_FW [
+ RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+])
+
+(define_mode_attr SF_XF [
+ (RVVM8HI "HF") (RVVM4HI "HF") (RVVM2HI "HF") (RVVM1HI "HF") (RVVMF2HI "HF") (RVVMF4HI "HF")
+ (RVVM8SI "SF") (RVVM4SI "SF") (RVVM2SI "SF") (RVVM1SI "SF") (RVVMF2SI "SF")
+ (RVVM8DI "DF") (RVVM4DI "DF") (RVVM2DI "DF") (RVVM1DI "DF")
+])
+
+(define_mode_attr SF_XFW [
+ (RVVM4HI "HF") (RVVM2HI "HF") (RVVM1HI "HF") (RVVMF2HI "HF") (RVVMF4HI "HF")
+ (RVVM4SI "SF") (RVVM2SI "SF") (RVVM1SI "SF") (RVVMF2SI "SF")
+])
diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md
index 3ab4d76..5191ae4 100644
--- a/gcc/config/riscv/vector.md
+++ b/gcc/config/riscv/vector.md
@@ -56,8 +56,7 @@
vssegtux,vssegtox,vlsegdff,vandn,vbrev,vbrev8,vrev8,vcpop,vclz,vctz,vrol,\
vror,vwsll,vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,\
vaeskf1,vaeskf2,vaesz,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c,\
- vfncvtbf16,vfwcvtbf16,vfwmaccbf16,\
- sf_vqmacc,sf_vfnrclip")
+ vfncvtbf16,vfwcvtbf16,vfwmaccbf16,sf_vqmacc,sf_vfnrclip,sf_vc,sf_vc_se")
(const_string "true")]
(const_string "false")))
diff --git a/gcc/cp/pt.cc b/gcc/cp/pt.cc
index e8d342f..26ed9de 100644
--- a/gcc/cp/pt.cc
+++ b/gcc/cp/pt.cc
@@ -17181,18 +17181,24 @@ tsubst (tree t, tree args, tsubst_flags_t complain, tree in_decl)
case UNBOUND_CLASS_TEMPLATE:
{
- ++processing_template_decl;
- tree ctx = tsubst_entering_scope (TYPE_CONTEXT (t), args,
- complain, in_decl);
- --processing_template_decl;
tree name = TYPE_IDENTIFIER (t);
+ if (name == error_mark_node)
+ return error_mark_node;
+
tree parm_list = DECL_TEMPLATE_PARMS (TYPE_NAME (t));
+ parm_list = tsubst_template_parms (parm_list, args, complain);
+ if (parm_list == error_mark_node)
+ return error_mark_node;
- if (ctx == error_mark_node || name == error_mark_node)
+ if (parm_list && TMPL_PARMS_DEPTH (parm_list) > 1)
+ ++processing_template_decl;
+ tree ctx = tsubst_entering_scope (TYPE_CONTEXT (t), args,
+ complain, in_decl);
+ if (parm_list && TMPL_PARMS_DEPTH (parm_list) > 1)
+ --processing_template_decl;
+ if (ctx == error_mark_node)
return error_mark_node;
- if (parm_list)
- parm_list = tsubst_template_parms (parm_list, args, complain);
return make_unbound_class_template (ctx, name, parm_list, complain);
}
diff --git a/gcc/testsuite/g++.dg/cpp2a/lambda-targ15.C b/gcc/testsuite/g++.dg/cpp2a/lambda-targ15.C
new file mode 100644
index 0000000..90160a5
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/lambda-targ15.C
@@ -0,0 +1,17 @@
+// PR c++/119981
+// { dg-do compile { target c++20 } }
+
+template<template<class> class P>
+struct mp_copy_if{};
+
+template<auto Fn>
+struct g {
+ template<class> struct fn{};
+};
+
+template<typename>
+void test3() {
+ mp_copy_if<g<[]{}>::template fn> b;
+}
+
+template void test3<int>();
diff --git a/gcc/testsuite/g++.target/riscv/pr119832.C b/gcc/testsuite/g++.target/riscv/pr119832.C
new file mode 100644
index 0000000..f4dc480
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/pr119832.C
@@ -0,0 +1,27 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=rv64gcv -mabi=lp64 -ffast-math" } */
+
+struct ac {
+ ~ac();
+ void u();
+};
+struct ae {
+ int s;
+ float *ag;
+};
+
+float c;
+
+void ak(ae *al, int n) {
+ ac d;
+ for (int i;i<n;++i) {
+ float a = 0;
+ for (long j; j < al[i].s; j++)
+ a += al[i].ag[j];
+ c = a;
+ d.u();
+ }
+}
+
+/* { dg-final { scan-assembler-not "frrm\t" } } */
+/* { dg-final { scan-assembler-not "fsrm\t" } } */
diff --git a/gcc/testsuite/gcc.dg/torture/pr120006.c b/gcc/testsuite/gcc.dg/torture/pr120006.c
new file mode 100644
index 0000000..c067f0e
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr120006.c
@@ -0,0 +1,31 @@
+/* { dg-do run } */
+/* { dg-additional-options "-fipa-pta" } */
+
+char *b;
+int f = 1;
+
+char *xstrdup(char *i) {
+ char *c = __builtin_strdup(i);
+ if (!c)
+ __builtin_exit(1);
+ return c;
+}
+
+int main() {
+ char g;
+ char h[8];
+
+ for (int i = 0; i < 2; i++) {
+ char c = *__builtin_strdup("");
+ b = &g;
+
+ if (f) {
+ h[0] = '-';
+ h[1] = 'a';
+ h[2] = '\0';
+ b = xstrdup(h);
+ }
+ }
+ if (__builtin_strcmp(b, "-a") != 0)
+ __builtin_abort();
+}
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-thread-7.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-thread-7.c
index d84acee..8be9878 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-thread-7.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-thread-7.c
@@ -11,8 +11,8 @@
to change decisions in switch expansion which in turn can expose new
jump threading opportunities. Skip the later tests on aarch64. */
/* { dg-final { scan-tree-dump-not "Jumps threaded" "dom3" { target { ! aarch64*-*-* } } } } */
-/* { dg-final { scan-tree-dump "Jumps threaded: 9" "thread2" { target { ! aarch64*-*-* } } } } */
-/* { dg-final { scan-tree-dump "Jumps threaded: 17" "thread2" { target { aarch64*-*-* } } } } */
+/* { dg-final { scan-tree-dump "Jumps threaded: 10" "thread2" { target { ! aarch64*-*-* } } } } */
+/* { dg-final { scan-tree-dump "Jumps threaded: 14" "thread2" { target { aarch64*-*-* } } } } */
enum STATE {
S0=0,
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-thread-23.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-thread-23.c
new file mode 100644
index 0000000..930360a
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-thread-23.c
@@ -0,0 +1,19 @@
+/* PR120003 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-cddce3-details" } */
+
+extern _Bool g(int);
+
+_Bool f()
+{
+ _Bool retval = 0;
+ for(int i=0; i<1000000; ++i)
+ retval = retval || g(i);
+ return retval;
+}
+
+/* Jump threading after loop optimization should get the counting loop
+ separated from the loop until retval is true and CD-DCE elide it.
+ It's difficult to check for the fact that a true retval terminates
+ the loop so check CD-DCE eliminates one loop instead. */
+/* { dg-final { scan-tree-dump "fix_loop_structure: removing loop" "cddce3" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/pr115258.c b/gcc/testsuite/gcc.target/aarch64/pr115258.c
index 9a489d4..f60b50a 100644
--- a/gcc/testsuite/gcc.target/aarch64/pr115258.c
+++ b/gcc/testsuite/gcc.target/aarch64/pr115258.c
@@ -1,4 +1,4 @@
-/* { dg-options "-O2" } */
+/* { dg-options "-O2 -mcmodel=small" } */
/* { dg-final { check-function-bodies "**" "" "" } } */
/*
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/cond_arith_6.c b/gcc/testsuite/gcc.target/aarch64/sve/cond_arith_6.c
index 4085ab1..d5a12f1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/cond_arith_6.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/cond_arith_6.c
@@ -8,7 +8,8 @@ f (float *x)
x[i] -= 1.0f;
}
-/* { dg-final { scan-assembler {\tld1w\tz} } } */
+/* { dg-final { scan-assembler {\tld1w\tz} { target aarch64_big_endian } } } */
+/* { dg-final { scan-assembler {\tldr\tq} { target aarch64_little_endian } } } */
/* { dg-final { scan-assembler {\tfcmgt\tp} } } */
/* { dg-final { scan-assembler {\tfsub\tz} } } */
/* { dg-final { scan-assembler {\tst1w\tz} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/ldst_ptrue_128_to_neon.c b/gcc/testsuite/gcc.target/aarch64/sve/ldst_ptrue_128_to_neon.c
new file mode 100644
index 0000000..43d36e8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/ldst_ptrue_128_to_neon.c
@@ -0,0 +1,48 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -msve-vector-bits=128" } */
+/* { dg-require-effective-target aarch64_little_endian } */
+
+#include <arm_sve.h>
+
+#define TEST(TYPE, TY, B) \
+ sv##TYPE \
+ ld1_##TY##B (TYPE *x) \
+ { \
+ svbool_t pg = svptrue_b##B (); \
+ return svld1_##TY##B (pg, x); \
+ } \
+ \
+ void \
+ st1_##TY##B (TYPE *x, sv##TYPE data) \
+ { \
+ svbool_t pg = svptrue_b##B (); \
+ svst1_##TY##B (pg, x, data); \
+ } \
+ \
+ sv##TYPE \
+ ld1_vol_##TY##B (volatile sv##TYPE *ptr) \
+ { \
+ return *ptr; \
+ } \
+ \
+ void \
+ st1_vol_##TY##B (volatile sv##TYPE *ptr, sv##TYPE x) \
+ { \
+ *ptr = x; \
+ }
+
+TEST (bfloat16_t, bf, 16)
+TEST (float16_t, f, 16)
+TEST (float32_t, f, 32)
+TEST (float64_t, f, 64)
+TEST (int8_t, s, 8)
+TEST (int16_t, s, 16)
+TEST (int32_t, s, 32)
+TEST (int64_t, s, 64)
+TEST (uint8_t, u, 8)
+TEST (uint16_t, u, 16)
+TEST (uint32_t, u, 32)
+TEST (uint64_t, u, 64)
+
+/* { dg-final { scan-assembler-times {\tldr\tq0, \[x0\]} 24 } } */
+/* { dg-final { scan-assembler-times {\tstr\tq0, \[x0\]} 24 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pcs/return_4_128.c b/gcc/testsuite/gcc.target/aarch64/sve/pcs/return_4_128.c
index 87d528c..ac5f981 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/pcs/return_4_128.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/pcs/return_4_128.c
@@ -11,104 +11,91 @@
/*
** callee_s8:
-** ptrue (p[0-7])\.b, vl16
-** ld1b z0\.b, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (s8, __SVInt8_t)
/*
** callee_u8:
-** ptrue (p[0-7])\.b, vl16
-** ld1b z0\.b, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (u8, __SVUint8_t)
/*
** callee_mf8:
-** ptrue (p[0-7])\.b, vl16
-** ld1b z0\.b, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (mf8, __SVMfloat8_t)
/*
** callee_s16:
-** ptrue (p[0-7])\.b, vl16
-** ld1h z0\.h, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (s16, __SVInt16_t)
/*
** callee_u16:
-** ptrue (p[0-7])\.b, vl16
-** ld1h z0\.h, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (u16, __SVUint16_t)
/*
** callee_f16:
-** ptrue (p[0-7])\.b, vl16
-** ld1h z0\.h, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (f16, __SVFloat16_t)
/*
** callee_bf16:
-** ptrue (p[0-7])\.b, vl16
-** ld1h z0\.h, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (bf16, __SVBfloat16_t)
/*
** callee_s32:
-** ptrue (p[0-7])\.b, vl16
-** ld1w z0\.s, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (s32, __SVInt32_t)
/*
** callee_u32:
-** ptrue (p[0-7])\.b, vl16
-** ld1w z0\.s, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (u32, __SVUint32_t)
/*
** callee_f32:
-** ptrue (p[0-7])\.b, vl16
-** ld1w z0\.s, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (f32, __SVFloat32_t)
/*
** callee_s64:
-** ptrue (p[0-7])\.b, vl16
-** ld1d z0\.d, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (s64, __SVInt64_t)
/*
** callee_u64:
-** ptrue (p[0-7])\.b, vl16
-** ld1d z0\.d, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (u64, __SVUint64_t)
/*
** callee_f64:
-** ptrue (p[0-7])\.b, vl16
-** ld1d z0\.d, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (f64, __SVFloat64_t)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pcs/return_5_128.c b/gcc/testsuite/gcc.target/aarch64/sve/pcs/return_5_128.c
index 347a16c..2fab6fe 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/pcs/return_5_128.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/pcs/return_5_128.c
@@ -13,104 +13,91 @@
/*
** callee_s8:
-** ptrue (p[0-7])\.b, vl16
-** ld1b z0\.b, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (s8, svint8_t)
/*
** callee_u8:
-** ptrue (p[0-7])\.b, vl16
-** ld1b z0\.b, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (u8, svuint8_t)
/*
** callee_mf8:
-** ptrue (p[0-7])\.b, vl16
-** ld1b z0\.b, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (mf8, svmfloat8_t)
/*
** callee_s16:
-** ptrue (p[0-7])\.b, vl16
-** ld1h z0\.h, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (s16, svint16_t)
/*
** callee_u16:
-** ptrue (p[0-7])\.b, vl16
-** ld1h z0\.h, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (u16, svuint16_t)
/*
** callee_f16:
-** ptrue (p[0-7])\.b, vl16
-** ld1h z0\.h, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (f16, svfloat16_t)
/*
** callee_bf16:
-** ptrue (p[0-7])\.b, vl16
-** ld1h z0\.h, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (bf16, svbfloat16_t)
/*
** callee_s32:
-** ptrue (p[0-7])\.b, vl16
-** ld1w z0\.s, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (s32, svint32_t)
/*
** callee_u32:
-** ptrue (p[0-7])\.b, vl16
-** ld1w z0\.s, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (u32, svuint32_t)
/*
** callee_f32:
-** ptrue (p[0-7])\.b, vl16
-** ld1w z0\.s, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (f32, svfloat32_t)
/*
** callee_s64:
-** ptrue (p[0-7])\.b, vl16
-** ld1d z0\.d, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (s64, svint64_t)
/*
** callee_u64:
-** ptrue (p[0-7])\.b, vl16
-** ld1d z0\.d, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (u64, svuint64_t)
/*
** callee_f64:
-** ptrue (p[0-7])\.b, vl16
-** ld1d z0\.d, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
CALLEE (f64, svfloat64_t)
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pcs/struct_3_128.c b/gcc/testsuite/gcc.target/aarch64/sve/pcs/struct_3_128.c
index d99ce12..29bdaf3 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/pcs/struct_3_128.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/pcs/struct_3_128.c
@@ -473,17 +473,16 @@ SEL2 (struct, pst_uniform4)
** sub sp, sp, #144
** add (x[0-9]+), sp, #?31
** and x7, \1, #?(?:-32|4294967264)
-** ptrue (p[0-7])\.b, vl16
-** st1w z0\.s, \2, \[x7\]
-** add (x[0-9]+), x7, #?32
+** mov (x[0-9]+), x7
+** str q0, \[\2\], 32
** (
-** str z1, \[\3\]
-** str z2, \[\3, #1, mul vl\]
+** str z1, \[\2\]
+** str z2, \[\2, #1, mul vl\]
** |
-** stp q1, q2, \[\3\]
+** stp q1, q2, \[\2\]
** )
-** str z3, \[\3, #2, mul vl\]
-** st1w z4\.s, \2, \[x7, #6, mul vl\]
+** str z3, \[\2, #2, mul vl\]
+** str q4, \[x7, 96\]
** add sp, sp, #?144
** ret
*/
@@ -516,20 +515,12 @@ SEL2 (struct, pst_mixed1)
** test_pst_mixed1:
** sub sp, sp, #176
** str p0, \[sp\]
-** ptrue p0\.b, vl16
-** st1h z0\.h, p0, \[sp, #1, mul vl\]
-** st1h z1\.h, p0, \[sp, #2, mul vl\]
-** st1w z2\.s, p0, \[sp, #3, mul vl\]
-** st1d z3\.d, p0, \[sp, #4, mul vl\]
+** stp q0, q1, \[sp, 16\]
+** stp q2, q3, \[sp, 48\]
** str p1, \[sp, #40, mul vl\]
** str p2, \[sp, #41, mul vl\]
-** st1b z4\.b, p0, \[sp, #6, mul vl\]
-** st1h z5\.h, p0, \[sp, #7, mul vl\]
-** ...
-** st1w z6\.s, p0, [^\n]*
-** ...
-** st1d z7\.d, p0, [^\n]*
-** ...
+** stp q4, q5, \[sp, 96\]
+** stp q6, q7, \[sp, 128\]
** str p3, \[sp, #80, mul vl\]
** mov (x7, sp|w7, wsp)
** add sp, sp, #?176
@@ -557,15 +548,11 @@ SEL2 (struct, pst_mixed2)
** test_pst_mixed2:
** sub sp, sp, #128
** str p0, \[sp\]
-** ptrue (p[03])\.b, vl16
-** add (x[0-9]+), sp, #?2
-** st1b z0\.b, \1, \[\2\]
+** str q0, \[sp, 2\]
** str p1, \[sp, #9, mul vl\]
-** add (x[0-9]+), sp, #?20
-** st1b z1\.b, \1, \[\3\]
+** str q1, \[sp, 20\]
** str p2, \[sp, #18, mul vl\]
-** add (x[0-9]+), sp, #?38
-** st1b z2\.b, \1, \[\4\]
+** str q2, \[sp, 38\]
** (
** str z3, \[sp, #4, mul vl\]
** str z4, \[sp, #5, mul vl\]
@@ -595,8 +582,7 @@ SEL2 (struct, pst_big1)
/*
** test_pst_big1_a: { target lp64 }
-** ptrue (p[0-7])\.b, vl16
-** ld1b z0\.b, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
/*
@@ -760,8 +746,7 @@ test_pst_big3_d (struct pst_big3 x)
/*
** test_pst_big3_e: { target lp64 }
-** ptrue (p[0-7])\.b, vl16
-** ld1b z0\.b, \1/z, \[x0, #1, mul vl\]
+** ldr q0, \[x0, 16\]
** ret
*/
/*
@@ -780,8 +765,7 @@ test_pst_big3_e (struct pst_big3 x)
/*
** test_pst_big3_f: { target lp64 }
-** ptrue (p[0-7])\.b, vl16
-** ld1b z0\.b, \1/z, \[x0, #5, mul vl\]
+** ldr q0, \[x0, 80\]
** ret
*/
/*
@@ -1035,8 +1019,7 @@ SEL2 (struct, nonpst6)
/*
** test_nonpst6: { target lp64 }
-** ptrue (p[0-3])\.b, vl16
-** ld1d z0\.d, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
/*
@@ -1063,8 +1046,7 @@ SEL2 (struct, nonpst7)
/*
** test_nonpst7: { target lp64 }
-** ptrue (p[0-3])\.b, vl16
-** ld1d z0\.d, \1/z, \[x0\]
+** ldr q0, \[x0\]
** ret
*/
/*
diff --git a/gcc/testsuite/gcc.target/avr/torture/pr119989-flashx-1.c b/gcc/testsuite/gcc.target/avr/torture/pr119989-flashx-1.c
new file mode 100644
index 0000000..086d1ea
--- /dev/null
+++ b/gcc/testsuite/gcc.target/avr/torture/pr119989-flashx-1.c
@@ -0,0 +1,7 @@
+/* { dg-do run { target { ! avr_tiny } } } */
+/* { dg-additional-options "-std=gnu99" } */
+
+typedef __UINT8_TYPE__ TYP;
+#define AS __flashx
+
+#include "pr119989.h"
diff --git a/gcc/testsuite/gcc.target/avr/torture/pr119989-flashx-2.c b/gcc/testsuite/gcc.target/avr/torture/pr119989-flashx-2.c
new file mode 100644
index 0000000..d053ab9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/avr/torture/pr119989-flashx-2.c
@@ -0,0 +1,7 @@
+/* { dg-do run { target { ! avr_tiny } } } */
+/* { dg-additional-options "-std=gnu99" } */
+
+typedef __UINT16_TYPE__ TYP;
+#define AS __flashx
+
+#include "pr119989.h"
diff --git a/gcc/testsuite/gcc.target/avr/torture/pr119989-flashx-3.c b/gcc/testsuite/gcc.target/avr/torture/pr119989-flashx-3.c
new file mode 100644
index 0000000..1a5e8f0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/avr/torture/pr119989-flashx-3.c
@@ -0,0 +1,7 @@
+/* { dg-do run { target { ! avr_tiny } } } */
+/* { dg-additional-options "-std=gnu99" } */
+
+__extension__ typedef __uint24 TYP;
+#define AS __flashx
+
+#include "pr119989.h"
diff --git a/gcc/testsuite/gcc.target/avr/torture/pr119989-flashx-4.c b/gcc/testsuite/gcc.target/avr/torture/pr119989-flashx-4.c
new file mode 100644
index 0000000..63fb52c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/avr/torture/pr119989-flashx-4.c
@@ -0,0 +1,7 @@
+/* { dg-do run { target { ! avr_tiny } } } */
+/* { dg-additional-options "-std=gnu99" } */
+
+typedef __UINT32_TYPE__ TYP;
+#define AS __flashx
+
+#include "pr119989.h"
diff --git a/gcc/testsuite/gcc.target/avr/torture/pr119989-memx-1.c b/gcc/testsuite/gcc.target/avr/torture/pr119989-memx-1.c
new file mode 100644
index 0000000..4553517
--- /dev/null
+++ b/gcc/testsuite/gcc.target/avr/torture/pr119989-memx-1.c
@@ -0,0 +1,7 @@
+/* { dg-do run { target { ! avr_tiny } } } */
+/* { dg-additional-options "-std=gnu99" } */
+
+typedef __UINT8_TYPE__ TYP;
+#define AS __memx
+
+#include "pr119989.h"
diff --git a/gcc/testsuite/gcc.target/avr/torture/pr119989-memx-2.c b/gcc/testsuite/gcc.target/avr/torture/pr119989-memx-2.c
new file mode 100644
index 0000000..b28c497
--- /dev/null
+++ b/gcc/testsuite/gcc.target/avr/torture/pr119989-memx-2.c
@@ -0,0 +1,7 @@
+/* { dg-do run { target { ! avr_tiny } } } */
+/* { dg-additional-options "-std=gnu99" } */
+
+typedef __UINT16_TYPE__ TYP;
+#define AS __memx
+
+#include "pr119989.h"
diff --git a/gcc/testsuite/gcc.target/avr/torture/pr119989-memx-3.c b/gcc/testsuite/gcc.target/avr/torture/pr119989-memx-3.c
new file mode 100644
index 0000000..bb20053
--- /dev/null
+++ b/gcc/testsuite/gcc.target/avr/torture/pr119989-memx-3.c
@@ -0,0 +1,7 @@
+/* { dg-do run { target { ! avr_tiny } } } */
+/* { dg-additional-options "-std=gnu99" } */
+
+__extension__ typedef __uint24 TYP;
+#define AS __memx
+
+#include "pr119989.h"
diff --git a/gcc/testsuite/gcc.target/avr/torture/pr119989-memx-4.c b/gcc/testsuite/gcc.target/avr/torture/pr119989-memx-4.c
new file mode 100644
index 0000000..05a3ee7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/avr/torture/pr119989-memx-4.c
@@ -0,0 +1,7 @@
+/* { dg-do run { target { ! avr_tiny } } } */
+/* { dg-additional-options "-std=gnu99" } */
+
+typedef __UINT32_TYPE__ TYP;
+#define AS __memx
+
+#include "pr119989.h"
diff --git a/gcc/testsuite/gcc.target/avr/torture/pr119989.h b/gcc/testsuite/gcc.target/avr/torture/pr119989.h
new file mode 100644
index 0000000..12b5449
--- /dev/null
+++ b/gcc/testsuite/gcc.target/avr/torture/pr119989.h
@@ -0,0 +1,37 @@
+const AS TYP some_data[] = { 1, 2, 3, 4, 5 };
+const AS TYP *IP;
+
+TYP DT, a, b;
+
+__attribute__((noipa))
+void do_test1 (void)
+{
+ DT = *IP;
+ DT = *IP--;
+}
+
+__attribute__((noipa))
+void do_test2 (void)
+{
+ DT = *IP;
+ __asm volatile ("" ::: "memory"); // Prevents unwanted optimization
+ DT = *IP--;
+}
+
+TYP difference(void)
+{
+ IP = &some_data[3];
+ do_test1();
+ a = DT;
+ IP = &some_data[3];
+ do_test2();
+ b = DT;
+ return a - b; // Expected: 0
+}
+
+int main (void)
+{
+ if (difference () != 0)
+ __builtin_exit (__LINE__);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/predef-19.c b/gcc/testsuite/gcc.target/riscv/predef-19.c
index 2b90702..ca3d57a 100644
--- a/gcc/testsuite/gcc.target/riscv/predef-19.c
+++ b/gcc/testsuite/gcc.target/riscv/predef-19.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -march=rv64gc_zve32x -mabi=lp64d -mcmodel=medlow -misa-spec=2.2" } */
+/* { dg-options "-O2 -march=rv64im_zve32x -mabi=lp64 -mcmodel=medlow -misa-spec=2.2" } */
int main () {
@@ -15,50 +15,30 @@ int main () {
#error "__riscv_i"
#endif
-#if !defined(__riscv_c)
-#error "__riscv_c"
-#endif
-
#if defined(__riscv_e)
#error "__riscv_e"
#endif
-#if !defined(__riscv_a)
-#error "__riscv_a"
-#endif
-
#if !defined(__riscv_m)
#error "__riscv_m"
#endif
-#if !defined(__riscv_f)
-#error "__riscv_f"
-#endif
-
-#if !defined(__riscv_d)
-#error "__riscv_d"
-#endif
-
-#if defined(__riscv_v)
-#error "__riscv_v"
+#if !defined(__riscv_zicsr)
+#error "__riscv_zicsr"
#endif
-#if defined(__riscv_zvl128b)
-#error "__riscv_zvl128b"
+#if !defined(_riscv_zmmul)
+#error "__riscv_zmmul"
#endif
-#if defined(__riscv_zvl64b)
-#error "__riscv_zvl64b"
+#if !defined(__riscv_zve32x)
+#error "__riscv_zve32x"
#endif
#if !defined(__riscv_zvl32b)
#error "__riscv_zvl32b"
#endif
-#if !defined(__riscv_zve32x)
-#error "__riscv_zve32x"
-#endif
-
#if !defined(__riscv_vector)
#error "__riscv_vector"
#endif
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_f.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_f.c
new file mode 100644
index 0000000..7667e56
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_f.c
@@ -0,0 +1,88 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_xsfvcp -mabi=lp64d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "sifive_vector.h"
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+
+/*
+** test_sf_vc_v_fv_u16mf4:
+** ...
+** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+
+** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+
+** ...
+*/
+vuint16mf4_t test_sf_vc_v_fv_u16mf4(vuint16mf4_t vs2, float16_t fs1, size_t vl) {
+ return __riscv_sf_vc_v_fv_u16mf4(1, vs2, fs1, vl);
+}
+
+/*
+** test_sf_vc_v_fv_se_u16mf4:
+** ...
+** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+
+** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+
+** ...
+*/
+vuint16mf4_t test_sf_vc_v_fv_se_u16mf4(vuint16mf4_t vs2, float16_t fs1, size_t vl) {
+ return __riscv_sf_vc_v_fv_se_u16mf4(1, vs2, fs1, vl);
+}
+
+/*
+** test_sf_vc_fv_se_u16mf2:
+** ...
+** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+
+** sf\.vc\.fv\t[0-9]+,[0-9]+,v[0-9]+,fa[0-9]+
+** ...
+*/
+void test_sf_vc_fv_se_u16mf2(vuint16mf2_t vs2, float16_t fs1, size_t vl) {
+ __riscv_sf_vc_fv_se_u16mf2(1, 3, vs2, fs1, vl);
+}
+
+/*
+** test_sf_vc_v_fvv_u16m1:
+** ...
+** vsetivli\s+zero+,0+,e16+,m1,ta,ma+
+** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+
+** ...
+*/
+vuint16m1_t test_sf_vc_v_fvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, float16_t fs1, size_t vl) {
+ return __riscv_sf_vc_v_fvv_u16m1(1, vd, vs2, fs1, vl);
+}
+
+/*
+** test_sf_vc_v_fvv_se_u16m1:
+** ...
+** vsetivli\s+zero+,0+,e16+,m1,ta,ma+
+** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+
+** ...
+*/
+vuint16m1_t test_sf_vc_v_fvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, float16_t fs1, size_t vl) {
+ return __riscv_sf_vc_v_fvv_se_u16m1(1, vd, vs2, fs1, vl);
+}
+
+/*
+** test_sf_vc_fvv_se_u32m8:
+** ...
+** vsetivli\s+zero+,0+,e32+,m8,ta,ma+
+** sf\.vc\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+
+** ...
+*/
+void test_sf_vc_fvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float32_t fs1, size_t vl) {
+ __riscv_sf_vc_fvv_se_u32m8(1, vd, vs2, fs1, vl);
+}
+
+
+/*
+** test_sf_vc_fvw_se_u32m2:
+** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
+** sf\.vc\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+
+** ...
+*/
+void test_sf_vc_fvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float32_t fs1, size_t vl) {
+ __riscv_sf_vc_fvw_se_u32m2(1, vd, vs2, fs1, vl);
+}
+
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_i.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_i.c
new file mode 100644
index 0000000..5528cc5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_i.c
@@ -0,0 +1,132 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_xsfvcp -mabi=lp64d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "sifive_vector.h"
+
+
+/*
+** test_sf_vc_v_i_u16m4:
+** ...
+** vsetivli\s+zero+,0+,e16+,m4,ta,ma+
+** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+vuint16m4_t test_sf_vc_v_i_u16m4(size_t vl) {
+ return __riscv_sf_vc_v_i_u16m4(1, 2, 4, vl);
+}
+
+/*
+** test_sf_vc_v_i_se_u16m4:
+** ...
+** vsetivli\s+zero+,0+,e16+,m4,ta,ma+
+** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+vuint16m4_t test_sf_vc_v_i_se_u16m4(size_t vl) {
+ return __riscv_sf_vc_v_i_se_u16m4(1, 2, 4, vl);
+}
+
+/*
+** test_sf_vc_i_se_u16mf4:
+** ...
+** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+
+** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+
+** ...
+*/
+void test_sf_vc_i_se_u16mf4(size_t vl) {
+ __riscv_sf_vc_i_se_u16mf4(1, 2, 3, 4, vl);
+}
+
+/*
+** test_sf_vc_v_iv_u32m2:
+** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
+** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+vuint32m2_t test_sf_vc_v_iv_u32m2(vuint32m2_t vs2, size_t vl) {
+ return __riscv_sf_vc_v_iv_u32m2(1, vs2, 4, vl);
+}
+
+/*
+** test_sf_vc_v_iv_se_u32m2:
+** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
+** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+vuint32m2_t test_sf_vc_v_iv_se_u32m2(vuint32m2_t vs2, size_t vl) {
+ return __riscv_sf_vc_v_iv_se_u32m2(1, vs2, 4, vl);
+}
+
+/*
+** test_sf_vc_iv_se_u16m2:
+** ...
+** vsetivli\s+zero+,0+,e16+,m2,ta,ma+
+** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+void test_sf_vc_iv_se_u16m2(vuint16m2_t vs2, size_t vl) {
+ __riscv_sf_vc_iv_se_u16m2(1, 3, vs2, 4, vl);
+}
+
+/*
+** test_sf_vc_v_ivv_u8m8:
+** ...
+** vsetivli\s+zero+,0+,e8+,m8,ta,ma+
+** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+vuint8m8_t test_sf_vc_v_ivv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) {
+ return __riscv_sf_vc_v_ivv_u8m8(1, vd, vs2, 4, vl);
+}
+
+/*
+** test_sf_vc_v_ivv_se_u8m8:
+** ...
+** vsetivli\s+zero+,0+,e8+,m8,ta,ma+
+** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+vuint8m8_t test_sf_vc_v_ivv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) {
+ return __riscv_sf_vc_v_ivv_se_u8m8(1, vd, vs2, 4, vl);
+}
+
+/*
+** test_sf_vc_ivv_se_u64m1:
+** ...
+** vsetivli\s+zero+,0+,e64+,m1,ta,ma+
+** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+void test_sf_vc_ivv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) {
+ __riscv_sf_vc_ivv_se_u64m1(1, vd, vs2, 4, vl);
+}
+
+/*
+** test_sf_vc_v_ivw_u8mf4:
+** ...
+** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+
+** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+vuint16mf2_t test_sf_vc_v_ivw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) {
+ return __riscv_sf_vc_v_ivw_u8mf4(1, vd, vs2, 4, vl);
+}
+
+/*
+** test_sf_vc_v_ivw_se_u8mf4:
+** ...
+** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+
+** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+vuint16mf2_t test_sf_vc_v_ivw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) {
+ return __riscv_sf_vc_v_ivw_se_u8mf4(1, vd, vs2, 4, vl);
+}
+
+void test_sf_vc_ivw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) {
+ __riscv_sf_vc_ivw_se_u32m4(1, vd, vs2, 4, vl);
+}
+
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_v.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_v.c
new file mode 100644
index 0000000..e3022c1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_v.c
@@ -0,0 +1,107 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_xsfvcp -mabi=lp64d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "sifive_vector.h"
+
+
+/*
+** test_sf_vc_v_vv_u8mf8:
+** ...
+** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+
+** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+
+** ...
+*/
+vuint8mf8_t test_sf_vc_v_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t rs1, size_t vl) {
+ return __riscv_sf_vc_v_vv_u8mf8(1, vs2, rs1, vl);
+}
+
+/*
+** test_sf_vc_v_vv_se_u8mf8:
+** ...
+** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+
+** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+
+** ...
+*/
+vuint8mf8_t test_sf_vc_v_vv_se_u8mf8(vuint8mf8_t vs2, vuint8mf8_t rs1, size_t vl) {
+ return __riscv_sf_vc_v_vv_se_u8mf8(1, vs2, rs1, vl);
+}
+
+/*
+** test_sf_vc_vv_se_u16m1:
+** ...
+** vsetivli\s+zero+,0+,e16+,m1,ta,ma+
+** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+
+** ...
+*/
+void test_sf_vc_vv_se_u16m1(vuint16m1_t vs2, vuint16m1_t rs1, size_t vl) {
+ __riscv_sf_vc_vv_se_u16m1(1, 3, vs2, rs1, vl);
+}
+
+/*
+** test_sf_vc_v_vvv_u32mf2:
+** ...
+** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+
+** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+
+** ...
+*/
+vuint32mf2_t test_sf_vc_v_vvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t rs1, size_t vl) {
+ return __riscv_sf_vc_v_vvv_u32mf2(1, vd, vs2, rs1, vl);
+}
+
+/*
+** test_sf_vc_v_vvv_se_u32mf2:
+** ...
+** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+
+** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+
+** ...
+*/
+vuint32mf2_t test_sf_vc_v_vvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t rs1, size_t vl) {
+ return __riscv_sf_vc_v_vvv_se_u32mf2(1, vd, vs2, rs1, vl);
+}
+
+/*
+** test_sf_vc_vvv_se_u64m1:
+** ...
+** vsetivli\s+zero+,0+,e64+,m1,ta,ma+
+** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+
+** ...
+*/
+void test_sf_vc_vvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t rs1, size_t vl) {
+ __riscv_sf_vc_vvv_se_u64m1(1, vd, vs2, rs1, vl);
+}
+
+
+/*
+** test_sf_vc_v_vvw_u8m1:
+** ...
+** vsetivli\s+zero+,0+,e8+,m1,ta,ma+
+** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+
+** ...
+*/
+vuint16m2_t test_sf_vc_v_vvw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t rs1, size_t vl) {
+ return __riscv_sf_vc_v_vvw_u8m1(1, vd, vs2, rs1, vl);
+}
+
+/*
+** test_sf_vc_v_vvw_se_u8m1:
+** ...
+** vsetivli\s+zero+,0+,e8+,m1,ta,ma+
+** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+
+** ...
+*/
+vuint16m2_t test_sf_vc_v_vvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t rs1, size_t vl) {
+ return __riscv_sf_vc_v_vvw_se_u8m1(1, vd, vs2, rs1, vl);
+}
+
+/*
+** test_sf_vc_vvw_se_u16mf2:
+** ...
+** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+
+** sf\.vc\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+
+** ...
+*/
+void test_sf_vc_vvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t rs1, size_t vl) {
+ __riscv_sf_vc_vvw_se_u16mf2(1, vd, vs2, rs1, vl);
+}
+
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_x.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_x.c
new file mode 100644
index 0000000..10c92c8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_x.c
@@ -0,0 +1,138 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_xsfvcp -mabi=lp64d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "sifive_vector.h"
+
+/*
+** test_sf_vc_v_x_u32m1:
+** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
+** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+vuint32m1_t test_sf_vc_v_x_u32m1(uint32_t xs1, size_t vl) {
+ return __riscv_sf_vc_v_x_u32m1(1, 2, xs1, vl);
+}
+
+/*
+** test_sf_vc_v_x_se_u32m1:
+** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
+** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+vuint32m1_t test_sf_vc_v_x_se_u32m1(uint32_t xs1, size_t vl) {
+ return __riscv_sf_vc_v_x_se_u32m1(1, 2, xs1, vl);
+}
+
+/*
+** test_sf_vc_x_se_u16m8:
+** ...
+** vsetivli\s+zero+,0+,e16+,m8,ta,ma+
+** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+
+** ...
+*/
+void test_sf_vc_x_se_u16m8(uint16_t xs1, size_t vl) {
+ __riscv_sf_vc_x_se_u16m8(1, 2, 3, xs1, vl);
+}
+
+/*
+** test_sf_vc_v_xv_u32m2:
+** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
+** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+vuint32m2_t test_sf_vc_v_xv_u32m2(vuint32m2_t vs2, uint32_t xs1, size_t vl) {
+ return __riscv_sf_vc_v_xv_u32m2(1, vs2, xs1, vl);
+}
+
+/*
+** test_sf_vc_v_xv_se_u32m2:
+** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
+** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+vuint32m2_t test_sf_vc_v_xv_se_u32m2(vuint32m2_t vs2, uint32_t xs1, size_t vl) {
+ return __riscv_sf_vc_v_xv_se_u32m2(1, vs2, xs1, vl);
+}
+
+/*
+** test_sf_vc_xv_se_u16m4:
+** ...
+** vsetivli\s+zero+,0+,e16+,m4,ta,ma+
+** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+void test_sf_vc_xv_se_u16m4(vuint16m4_t vs2, uint16_t xs1, size_t vl) {
+ __riscv_sf_vc_xv_se_u16m4(1, 3, vs2, xs1, vl);
+}
+
+/*
+** test_sf_vc_v_xvv_u16m1:
+** ...
+** vsetivli\s+zero+,0+,e16+,m1,ta,ma+
+** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+vuint16m1_t test_sf_vc_v_xvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t xs1, size_t vl) {
+ return __riscv_sf_vc_v_xvv_u16m1(1, vd, vs2, xs1, vl);
+}
+
+/*
+** test_sf_vc_v_xvv_se_u16m1:
+** ...
+** vsetivli\s+zero+,0+,e16+,m1,ta,ma+
+** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+vuint16m1_t test_sf_vc_v_xvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t xs1, size_t vl) {
+ return __riscv_sf_vc_v_xvv_se_u16m1(1, vd, vs2, xs1, vl);
+}
+
+/*
+** test_sf_vc_xvv_se_u32m2:
+** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
+** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+void test_sf_vc_xvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t xs1, size_t vl) {
+ __riscv_sf_vc_xvv_se_u32m2(1, vd, vs2, xs1, vl);
+}
+
+/*
+** test_sf_vc_v_xvw_u32m1:
+** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
+** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+vuint64m2_t test_sf_vc_v_xvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t xs1, size_t vl) {
+ return __riscv_sf_vc_v_xvw_u32m1(1, vd, vs2, xs1, vl);
+}
+
+/*
+** test_sf_vc_v_xvw_se_u32m1:
+** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
+** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+vuint64m2_t test_sf_vc_v_xvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t xs1, size_t vl) {
+ return __riscv_sf_vc_v_xvw_se_u32m1(1, vd, vs2, xs1, vl);
+}
+
+/*
+** test_sf_vc_xvw_se_u32m1:
+** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
+** sf\.vc\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+void test_sf_vc_xvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t xs1, size_t vl) {
+ __riscv_sf_vc_xvw_se_u32m1(1, vd, vs2, xs1, vl);
+}
+
diff --git a/gcc/tree-ssa-structalias.cc b/gcc/tree-ssa-structalias.cc
index f79b542..3ad0c69 100644
--- a/gcc/tree-ssa-structalias.cc
+++ b/gcc/tree-ssa-structalias.cc
@@ -5583,6 +5583,42 @@ find_func_clobbers (struct function *fn, gimple *origt)
process_ipa_clobber (fi, gimple_call_arg (t, 2));
return;
}
+ /* The following functions use what their first argument
+ points to. */
+ case BUILT_IN_STRDUP:
+ case BUILT_IN_STRNDUP:
+ case BUILT_IN_REALLOC:
+ case BUILT_IN_INDEX:
+ case BUILT_IN_STRCHR:
+ case BUILT_IN_STRRCHR:
+ case BUILT_IN_MEMCHR:
+ {
+ tree src = gimple_call_arg (t, 0);
+ get_constraint_for_ptr_offset (src, NULL_TREE, &rhsc);
+ lhs = get_function_part_constraint (fi, fi_uses);
+ struct constraint_expr *rhsp;
+ FOR_EACH_VEC_ELT (rhsc, i, rhsp)
+ process_constraint (new_constraint (lhs, *rhsp));
+ return;
+ }
+ /* The following functions use what their first and second argument
+ point to. */
+ case BUILT_IN_STRSTR:
+ case BUILT_IN_STRPBRK:
+ {
+ tree src = gimple_call_arg (t, 0);
+ get_constraint_for_ptr_offset (src, NULL_TREE, &rhsc);
+ lhs = get_function_part_constraint (fi, fi_uses);
+ struct constraint_expr *rhsp;
+ FOR_EACH_VEC_ELT (rhsc, i, rhsp)
+ process_constraint (new_constraint (lhs, *rhsp));
+ rhsc.truncate (0);
+ src = gimple_call_arg (t, 1);
+ get_constraint_for_ptr_offset (src, NULL_TREE, &rhsc);
+ FOR_EACH_VEC_ELT (rhsc, i, rhsp)
+ process_constraint (new_constraint (lhs, *rhsp));
+ return;
+ }
/* The following functions neither read nor clobber memory. */
case BUILT_IN_ASSUME_ALIGNED:
case BUILT_IN_FREE:
diff --git a/gcc/tree-ssa-threadbackward.cc b/gcc/tree-ssa-threadbackward.cc
index d0b74b2..3adb83e 100644
--- a/gcc/tree-ssa-threadbackward.cc
+++ b/gcc/tree-ssa-threadbackward.cc
@@ -349,9 +349,6 @@ back_threader::find_paths_to_names (basic_block bb, bitmap interesting,
unsigned overall_paths,
back_threader_profitability &profit)
{
- if (m_visited_bbs.add (bb))
- return;
-
m_path.safe_push (bb);
// Try to resolve the path without looking back. Avoid resolving paths
@@ -377,7 +374,8 @@ back_threader::find_paths_to_names (basic_block bb, bitmap interesting,
// Continue looking for ways to extend the path but limit the
// search space along a branch
else if ((overall_paths = overall_paths * EDGE_COUNT (bb->preds))
- <= (unsigned)param_max_jump_thread_paths)
+ <= (unsigned)param_max_jump_thread_paths
+ && !m_visited_bbs.add (bb))
{
// For further greedy searching we want to remove interesting
// names defined in BB but add ones on the PHI edges for the
@@ -489,6 +487,7 @@ back_threader::find_paths_to_names (basic_block bb, bitmap interesting,
backtracking we have to restore it. */
for (int j : new_imports)
bitmap_clear_bit (m_imports, j);
+ m_visited_bbs.remove (bb);
}
else if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " FAIL: Search space limit %d reached.\n",
@@ -496,7 +495,6 @@ back_threader::find_paths_to_names (basic_block bb, bitmap interesting,
// Reset things to their original state.
m_path.pop ();
- m_visited_bbs.remove (bb);
}
// Search backwards from BB looking for paths where the final
diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
index 19beeed..b5a9604 100644
--- a/gcc/tree-vect-slp.cc
+++ b/gcc/tree-vect-slp.cc
@@ -11161,9 +11161,14 @@ vect_schedule_slp_node (vec_info *vinfo,
== cycle_phi_info_type);
gphi *phi = as_a <gphi *>
(vect_find_last_scalar_stmt_in_slp (child)->stmt);
- if (!last_stmt
- || vect_stmt_dominates_stmt_p (last_stmt, phi))
+ if (!last_stmt)
last_stmt = phi;
+ else if (vect_stmt_dominates_stmt_p (last_stmt, phi))
+ last_stmt = phi;
+ else if (vect_stmt_dominates_stmt_p (phi, last_stmt))
+ ;
+ else
+ gcc_unreachable ();
}
/* We are emitting all vectorized stmts in the same place and
the last one is the last.
@@ -11174,9 +11179,14 @@ vect_schedule_slp_node (vec_info *vinfo,
FOR_EACH_VEC_ELT (SLP_TREE_VEC_DEFS (child), j, vdef)
{
gimple *vstmt = SSA_NAME_DEF_STMT (vdef);
- if (!last_stmt
- || vect_stmt_dominates_stmt_p (last_stmt, vstmt))
+ if (!last_stmt)
+ last_stmt = vstmt;
+ else if (vect_stmt_dominates_stmt_p (last_stmt, vstmt))
last_stmt = vstmt;
+ else if (vect_stmt_dominates_stmt_p (vstmt, last_stmt))
+ ;
+ else
+ gcc_unreachable ();
}
}
else if (!SLP_TREE_VECTYPE (child))
@@ -11189,9 +11199,14 @@ vect_schedule_slp_node (vec_info *vinfo,
&& !SSA_NAME_IS_DEFAULT_DEF (def))
{
gimple *stmt = SSA_NAME_DEF_STMT (def);
- if (!last_stmt
- || vect_stmt_dominates_stmt_p (last_stmt, stmt))
+ if (!last_stmt)
+ last_stmt = stmt;
+ else if (vect_stmt_dominates_stmt_p (last_stmt, stmt))
last_stmt = stmt;
+ else if (vect_stmt_dominates_stmt_p (stmt, last_stmt))
+ ;
+ else
+ gcc_unreachable ();
}
}
else
@@ -11212,9 +11227,14 @@ vect_schedule_slp_node (vec_info *vinfo,
&& !SSA_NAME_IS_DEFAULT_DEF (vdef))
{
gimple *vstmt = SSA_NAME_DEF_STMT (vdef);
- if (!last_stmt
- || vect_stmt_dominates_stmt_p (last_stmt, vstmt))
+ if (!last_stmt)
+ last_stmt = vstmt;
+ else if (vect_stmt_dominates_stmt_p (last_stmt, vstmt))
last_stmt = vstmt;
+ else if (vect_stmt_dominates_stmt_p (vstmt, last_stmt))
+ ;
+ else
+ gcc_unreachable ();
}
}
}
diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h
index 01d19c7..94cbfde6 100644
--- a/gcc/tree-vectorizer.h
+++ b/gcc/tree-vectorizer.h
@@ -1870,11 +1870,25 @@ vect_orig_stmt (stmt_vec_info stmt_info)
inline stmt_vec_info
get_later_stmt (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info)
{
- if (gimple_uid (vect_orig_stmt (stmt1_info)->stmt)
- > gimple_uid (vect_orig_stmt (stmt2_info)->stmt))
+ gimple *stmt1 = vect_orig_stmt (stmt1_info)->stmt;
+ gimple *stmt2 = vect_orig_stmt (stmt2_info)->stmt;
+ if (gimple_bb (stmt1) == gimple_bb (stmt2))
+ {
+ if (gimple_uid (stmt1) > gimple_uid (stmt2))
+ return stmt1_info;
+ else
+ return stmt2_info;
+ }
+ /* ??? We should be really calling this function only with stmts
+ in the same BB but we can recover if there's a domination
+ relationship between them. */
+ else if (dominated_by_p (CDI_DOMINATORS,
+ gimple_bb (stmt1), gimple_bb (stmt2)))
return stmt1_info;
- else
+ else if (dominated_by_p (CDI_DOMINATORS,
+ gimple_bb (stmt2), gimple_bb (stmt1)))
return stmt2_info;
+ gcc_unreachable ();
}
/* If STMT_INFO has been replaced by a pattern statement, return the