aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorKuan-Lin Chen <rufus@andestech.com>2025-09-07 09:42:19 -0600
committerJeff Law <jlaw@ventanamicro.com>2025-09-07 09:42:42 -0600
commitd21713b3bc8bdf043106dfc8e8bf9ae304cf4b88 (patch)
tree6dcc657cd86e877675ff294e572d232d05bcc407 /gcc
parenta0344144dbccccf039f431a368f486f9dc6813ab (diff)
downloadgcc-d21713b3bc8bdf043106dfc8e8bf9ae304cf4b88.zip
gcc-d21713b3bc8bdf043106dfc8e8bf9ae304cf4b88.tar.gz
gcc-d21713b3bc8bdf043106dfc8e8bf9ae304cf4b88.tar.bz2
RISC-V: Add support for the XAndesvpackfph ISA extension.
This extension defines vector instructions to extract a pair of FP16 data from a floating-point register. Multiply the top FP16 data with the FP16 elements and add the result with the bottom FP16 data. gcc/ChangeLog: * common/config/riscv/riscv-common.cc: Turn on VECTOR_ELEN_FP_16 for XAndesvpackfph. * config/riscv/andes-vector-builtins-bases.cc (nds_vfpmad): New class. * config/riscv/andes-vector-builtins-bases.h: New def. * config/riscv/andes-vector-builtins-functions.def (nds_vfpmadt): Ditto. (nds_vfpmadb): Ditto. (nds_vfpmadt_frm): Ditto. (nds_vfpmadb_frm): Ditto. * config/riscv/andes-vector.md (@pred_nds_vfpmad<nds_tb><mode>): New pattern. * config/riscv/riscv-vector-builtins-types.def (DEF_RVV_F16_OPS): New def. * config/riscv/riscv-vector-builtins.cc (f16_ops): Ditto * config/riscv/riscv-vector-builtins.def (float32_type_node): Ditto. * config/riscv/riscv-vector-builtins.h (XANDESVPACKFPH_EXT): Ditto. (required_ext_to_isa_name): Add case XANDESVPACKFPH_EXT. (required_extensions_specified): Ditto. * config/riscv/vector-iterators.md (VHF): New iterator. gcc/testsuite/ChangeLog: * gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadb.c: New test. * gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadt.c: New test. * gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadb.c: New test. * gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadt.c: New test. * gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadb.c: New test. * gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadt.c: New test. * gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadb.c: New test. * gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadt.c: New test.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/common/config/riscv/riscv-common.cc3
-rw-r--r--gcc/config/riscv/andes-vector-builtins-bases.cc25
-rw-r--r--gcc/config/riscv/andes-vector-builtins-bases.h4
-rw-r--r--gcc/config/riscv/andes-vector-builtins-functions.def8
-rw-r--r--gcc/config/riscv/andes-vector.md32
-rw-r--r--gcc/config/riscv/riscv-vector-builtins-types.def14
-rw-r--r--gcc/config/riscv/riscv-vector-builtins.cc19
-rw-r--r--gcc/config/riscv/riscv-vector-builtins.def1
-rw-r--r--gcc/config/riscv/riscv-vector-builtins.h5
-rw-r--r--gcc/config/riscv/vector-iterators.md5
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/J58
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadb.c103
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadt.c103
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadb.c103
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadt.c103
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadb.c199
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadt.c199
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadb.c199
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadt.c199
19 files changed, 1381 insertions, 1 deletions
diff --git a/gcc/common/config/riscv/riscv-common.cc b/gcc/common/config/riscv/riscv-common.cc
index e736545..a165506 100644
--- a/gcc/common/config/riscv/riscv-common.cc
+++ b/gcc/common/config/riscv/riscv-common.cc
@@ -1545,7 +1545,8 @@ static const riscv_extra_ext_flag_table_t riscv_extra_ext_flag_table[] =
RISCV_EXT_FLAG_ENTRY ("xtheadvector", x_riscv_isa_flags, MASK_FULL_V),
RISCV_EXT_FLAG_ENTRY ("xtheadvector", x_riscv_isa_flags, MASK_VECTOR),
- RISCV_EXT_FLAG_ENTRY ("xandesvbfhcvt", x_riscv_vector_elen_flags, MASK_VECTOR_ELEN_BF_16),
+ RISCV_EXT_FLAG_ENTRY ("xandesvbfhcvt", x_riscv_vector_elen_flags, MASK_VECTOR_ELEN_BF_16),
+ RISCV_EXT_FLAG_ENTRY ("xandesvpackfph", x_riscv_vector_elen_flags, MASK_VECTOR_ELEN_FP_16),
{NULL, NULL, NULL, 0}
};
diff --git a/gcc/config/riscv/andes-vector-builtins-bases.cc b/gcc/config/riscv/andes-vector-builtins-bases.cc
index 7fef63f..8220111 100644
--- a/gcc/config/riscv/andes-vector-builtins-bases.cc
+++ b/gcc/config/riscv/andes-vector-builtins-bases.cc
@@ -106,11 +106,32 @@ public:
}
};
+template<int UNSPEC, enum frm_op_type FRM_OP = NO_FRM>
+class nds_vfpmad : public function_base
+{
+public:
+ bool has_rounding_mode_operand_p () const override
+ {
+ return FRM_OP == HAS_FRM;
+ }
+ bool may_require_frm_p () const override { return true; }
+
+ rtx expand (function_expander &e) const override
+ {
+ return e.use_exact_insn (code_for_pred_nds_vfpmad (UNSPEC,
+ e.vector_mode ()));
+ }
+};
+
static CONSTEXPR const nds_vfwcvtbf16_f nds_vfwcvt_s_obj;
static CONSTEXPR const nds_vfncvtbf16_f<NO_FRM> nds_vfncvt_bf16_obj;
static CONSTEXPR const nds_vfncvtbf16_f<HAS_FRM> nds_vfncvt_bf16_frm_obj;
static CONSTEXPR const nds_nibbleload<true> nds_vln8_obj;
static CONSTEXPR const nds_nibbleload<false> nds_vlnu8_obj;
+static CONSTEXPR const nds_vfpmad <UNSPEC_NDS_VFPMADT, NO_FRM> nds_vfpmadt_obj;
+static CONSTEXPR const nds_vfpmad <UNSPEC_NDS_VFPMADB, NO_FRM> nds_vfpmadb_obj;
+static CONSTEXPR const nds_vfpmad <UNSPEC_NDS_VFPMADT, HAS_FRM> nds_vfpmadt_frm_obj;
+static CONSTEXPR const nds_vfpmad <UNSPEC_NDS_VFPMADB, HAS_FRM> nds_vfpmadb_frm_obj;
/* Declare the function base NAME, pointing it to an instance
of class <NAME>_obj. */
@@ -122,4 +143,8 @@ BASE (nds_vfncvt_bf16)
BASE (nds_vfncvt_bf16_frm)
BASE (nds_vln8)
BASE (nds_vlnu8)
+BASE (nds_vfpmadt)
+BASE (nds_vfpmadb)
+BASE (nds_vfpmadt_frm)
+BASE (nds_vfpmadb_frm)
} // end namespace riscv_vector
diff --git a/gcc/config/riscv/andes-vector-builtins-bases.h b/gcc/config/riscv/andes-vector-builtins-bases.h
index b57480f..4b93f79 100644
--- a/gcc/config/riscv/andes-vector-builtins-bases.h
+++ b/gcc/config/riscv/andes-vector-builtins-bases.h
@@ -29,6 +29,10 @@ extern const function_base *const nds_vfncvt_bf16;
extern const function_base *const nds_vfncvt_bf16_frm;
extern const function_base *const nds_vln8;
extern const function_base *const nds_vlnu8;
+extern const function_base *const nds_vfpmadt;
+extern const function_base *const nds_vfpmadb;
+extern const function_base *const nds_vfpmadt_frm;
+extern const function_base *const nds_vfpmadb_frm;
}
} // end namespace riscv_vector
diff --git a/gcc/config/riscv/andes-vector-builtins-functions.def b/gcc/config/riscv/andes-vector-builtins-functions.def
index 8e5926b..5d5762a 100644
--- a/gcc/config/riscv/andes-vector-builtins-functions.def
+++ b/gcc/config/riscv/andes-vector-builtins-functions.def
@@ -48,4 +48,12 @@ DEF_RVV_FUNCTION (nds_vln8, alu, full_preds, q_v_void_const_ptr_ops)
DEF_RVV_FUNCTION (nds_vlnu8, alu, full_preds, qu_v_void_const_ptr_ops)
#undef REQUIRED_EXTENSIONS
+/* Prefix name for `__riscv_nds_`. */
+#define REQUIRED_EXTENSIONS XANDESVPACKFPH_EXT
+DEF_RVV_FUNCTION (nds_vfpmadt, alu, full_preds, f16_vvw_ops)
+DEF_RVV_FUNCTION (nds_vfpmadb, alu, full_preds, f16_vvw_ops)
+DEF_RVV_FUNCTION (nds_vfpmadt_frm, alu_frm, full_preds, f16_vvw_ops)
+DEF_RVV_FUNCTION (nds_vfpmadb_frm, alu_frm, full_preds, f16_vvw_ops)
+#undef REQUIRED_EXTENSIONS
+
#undef DEF_RVV_FUNCTION
diff --git a/gcc/config/riscv/andes-vector.md b/gcc/config/riscv/andes-vector.md
index 28bc553..b2c886d 100644
--- a/gcc/config/riscv/andes-vector.md
+++ b/gcc/config/riscv/andes-vector.md
@@ -21,8 +21,13 @@
UNSPEC_NDS_VFWCVTBF16
UNSPEC_NDS_VFNCVTBF16
UNSPEC_NDS_INTLOAD
+ UNSPEC_NDS_VFPMADT
+ UNSPEC_NDS_VFPMADB
])
+(define_int_iterator NDS_VFPMAD [UNSPEC_NDS_VFPMADT UNSPEC_NDS_VFPMADB])
+(define_int_attr nds_tb [(UNSPEC_NDS_VFPMADT "t") (UNSPEC_NDS_VFPMADB "b")])
+
;; ....................
;;
;; VECTOR BFLOAT16 CONVERSION
@@ -103,3 +108,30 @@
nds.vln<u>8.v\t%0,%3,%1.t"
[(set_attr "type" "vlde,vlde,vlde")
(set_attr "mode" "<MODE>")])
+
+;; Vector Packed FP16.
+
+(define_insn "@pred_nds_vfpmad<nds_tb><mode>"
+ [(set (match_operand:VHF 0 "register_operand" "=&vr, &vr")
+ (if_then_else:VHF
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1, vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK")
+ (match_operand 6 "const_int_operand" " i, i")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)
+ (reg:SI FRM_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VHF
+ [(match_operand:VHF 3 "register_operand" "vr, vr")
+ (match_operand:SF 4 "register_operand" " f, f")] NDS_VFPMAD)
+ (match_operand:VHF 2 "vector_merge_operand" "vu, 0")))]
+ "TARGET_VECTOR && TARGET_XANDESVPACKFPH"
+ "nds.vfpmad<nds_tb>.vf\t%0,%4,%3%p1"
+ [(set_attr "type" "vfmuladd")
+ (set_attr "mode" "<MODE>")
+ (set_attr "enabled" "yes")
+ (set (attr "frm_mode")
+ (symbol_ref "riscv_vector::get_frm_mode (operands[9])"))])
diff --git a/gcc/config/riscv/riscv-vector-builtins-types.def b/gcc/config/riscv/riscv-vector-builtins-types.def
index d07a0ba..73fe1fa 100644
--- a/gcc/config/riscv/riscv-vector-builtins-types.def
+++ b/gcc/config/riscv/riscv-vector-builtins-types.def
@@ -393,6 +393,12 @@ along with GCC; see the file COPYING3. If not see
#define DEF_RVV_QU_OPS(TYPE, REQUIRE)
#endif
+/* Use "DEF_RVV_F16_OPS" macro include all types for vfloat16
+ which will be iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_F16_OPS
+#define DEF_RVV_F16_OPS(TYPE, REQUIRE)
+#endif
+
DEF_RVV_I_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_I_OPS (vint8mf4_t, 0)
DEF_RVV_I_OPS (vint8mf2_t, 0)
@@ -1529,6 +1535,13 @@ DEF_RVV_QU_OPS (vuint8m2_t, 0)
DEF_RVV_QU_OPS (vuint8m4_t, 0)
DEF_RVV_QU_OPS (vuint8m8_t, 0)
+DEF_RVV_F16_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_F16_OPS (vfloat16mf2_t, RVV_REQUIRE_ELEN_FP_16)
+DEF_RVV_F16_OPS (vfloat16m1_t, RVV_REQUIRE_ELEN_FP_16)
+DEF_RVV_F16_OPS (vfloat16m2_t, RVV_REQUIRE_ELEN_FP_16)
+DEF_RVV_F16_OPS (vfloat16m4_t, RVV_REQUIRE_ELEN_FP_16)
+DEF_RVV_F16_OPS (vfloat16m8_t, RVV_REQUIRE_ELEN_FP_16)
+
#undef DEF_RVV_I_OPS
#undef DEF_RVV_U_OPS
#undef DEF_RVV_F_OPS
@@ -1589,3 +1602,4 @@ DEF_RVV_QU_OPS (vuint8m8_t, 0)
#undef DEF_RVV_X2_WU_OPS
#undef DEF_RVV_Q_OPS
#undef DEF_RVV_QU_OPS
+#undef DEF_RVV_F16_OPS
diff --git a/gcc/config/riscv/riscv-vector-builtins.cc b/gcc/config/riscv/riscv-vector-builtins.cc
index 6c2d4b9..3ab09f4 100644
--- a/gcc/config/riscv/riscv-vector-builtins.cc
+++ b/gcc/config/riscv/riscv-vector-builtins.cc
@@ -584,6 +584,12 @@ static const rvv_type_info qu_ops[] = {
#include "riscv-vector-builtins-types.def"
{NUM_VECTOR_TYPES, 0}};
+/* A list of FP16 will be registered for intrinsic functions. */
+static const rvv_type_info f16_ops[] = {
+#define DEF_RVV_F16_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
static CONSTEXPR const rvv_arg_type_info rvv_arg_type_info_end
= rvv_arg_type_info (NUM_BASE_TYPES);
@@ -1241,6 +1247,12 @@ static CONSTEXPR const rvv_arg_type_info sf_vc_fvw_args[]
static CONSTEXPR const rvv_arg_type_info void_const_ptr_args[]
= {rvv_arg_type_info (RVV_BASE_void_const_ptr), rvv_arg_type_info_end};
+/* A list of args for vector_type func (vector_type, widen_lmul1_scalar)
+ function. */
+static CONSTEXPR const rvv_arg_type_info vw_args[]
+ = {rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_float32), rvv_arg_type_info_end};
+
/* A list of none preds that will be registered for intrinsic functions. */
static CONSTEXPR const predication_type_index none_preds[]
= {PRED_TYPE_none, NUM_PRED_TYPES};
@@ -3129,6 +3141,12 @@ static CONSTEXPR const rvv_op_info qu_v_void_const_ptr_ops
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
void_const_ptr_args /* Args */};
+static CONSTEXPR const rvv_op_info f16_vvw_ops
+ = {f16_ops, /* Types */
+ OP_TYPE_vf, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ vw_args /* Args */};
+
/* A static operand information for vector_type func (vector_type).
Some insns just supports SEW=32, such as the crypto vector Zvkg extension.
* function registration. */
@@ -3425,6 +3443,7 @@ static CONSTEXPR const function_type_info function_types[] = {
VECTOR_TYPE_INVALID, \
VECTOR_TYPE_INVALID, \
VECTOR_TYPE_INVALID, \
+ VECTOR_TYPE_INVALID, \
VECTOR_TYPE_##SIGNED_EEW8_INDEX, \
VECTOR_TYPE_##EEW8_INDEX, \
VECTOR_TYPE_##EEW16_INDEX, \
diff --git a/gcc/config/riscv/riscv-vector-builtins.def b/gcc/config/riscv/riscv-vector-builtins.def
index 7000e81..0ca98a1 100644
--- a/gcc/config/riscv/riscv-vector-builtins.def
+++ b/gcc/config/riscv/riscv-vector-builtins.def
@@ -699,6 +699,7 @@ DEF_RVV_BASE_TYPE (size, size_type_node)
DEF_RVV_BASE_TYPE (ptrdiff, ptrdiff_type_node)
DEF_RVV_BASE_TYPE (unsigned_long, long_unsigned_type_node)
DEF_RVV_BASE_TYPE (long, long_integer_type_node)
+DEF_RVV_BASE_TYPE (float32, float32_type_node)
DEF_RVV_BASE_TYPE (signed_eew8_index, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (eew8_index, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (eew16_index, get_vector_type (type_idx))
diff --git a/gcc/config/riscv/riscv-vector-builtins.h b/gcc/config/riscv/riscv-vector-builtins.h
index 440bc3b..6302307 100644
--- a/gcc/config/riscv/riscv-vector-builtins.h
+++ b/gcc/config/riscv/riscv-vector-builtins.h
@@ -133,6 +133,7 @@ enum required_ext
XSFVCP_EXT, /* XSFVCP extension*/
XANDESVBFHCVT_EXT, /* XANDESVBFHCVT extension */
XANDESVSINTLOAD_EXT, /* XANDESVSINTLOAD extension */
+ XANDESVPACKFPH_EXT, /* XANDESVPACKFPH extension */
/* Please update below to isa_name func when add or remove enum type(s). */
};
@@ -178,6 +179,8 @@ static inline const char * required_ext_to_isa_name (enum required_ext required)
return "xandesvbfhcvt";
case XANDESVSINTLOAD_EXT:
return "xandesvsintload";
+ case XANDESVPACKFPH_EXT:
+ return "xandesvpackfph";
default:
gcc_unreachable ();
}
@@ -227,6 +230,8 @@ static inline bool required_extensions_specified (enum required_ext required)
return TARGET_XANDESVBFHCVT;
case XANDESVSINTLOAD_EXT:
return TARGET_XANDESVSINTLOAD;
+ case XANDESVPACKFPH_EXT:
+ return TARGET_XANDESVPACKFPH;
default:
gcc_unreachable ();
}
diff --git a/gcc/config/riscv/vector-iterators.md b/gcc/config/riscv/vector-iterators.md
index 802fa3b..8a3815c 100644
--- a/gcc/config/riscv/vector-iterators.md
+++ b/gcc/config/riscv/vector-iterators.md
@@ -5005,3 +5005,8 @@
RVVM8QI RVVM4QI RVVM2QI RVVM1QI
RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
])
+
+(define_mode_iterator VHF [
+ RVVM8HF RVVM4HF RVVM2HF RVVM1HF RVVMF2HF
+ (RVVMF4HF "TARGET_MIN_VLEN > 32")
+])
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/J b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/J
new file mode 100644
index 0000000..55ef940
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/J
@@ -0,0 +1,58 @@
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 1) /* { dg-do compile } */
+cd1ce3b326d4 (demin.han 2024-03-13 18:43:26 +0800 2) /* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -mrvv-max-lmul=m8 -ffast-math -fdump-tree-optimized" } */
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 3)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 4) #include "def.h"
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 5)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 6) DEF_OP_V (nearbyintf16, 1, _Float16, __builtin_nearbyintf16)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 7) DEF_OP_V (nearbyintf16, 2, _Float16, __builtin_nearbyintf16)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 8) DEF_OP_V (nearbyintf16, 4, _Float16, __builtin_nearbyintf16)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 9) DEF_OP_V (nearbyintf16, 8, _Float16, __builtin_nearbyintf16)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 10) DEF_OP_V (nearbyintf16, 16, _Float16, __builtin_nearbyintf16)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 11) DEF_OP_V (nearbyintf16, 32, _Float16, __builtin_nearbyintf16)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 12) DEF_OP_V (nearbyintf16, 64, _Float16, __builtin_nearbyintf16)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 13) DEF_OP_V (nearbyintf16, 128, _Float16, __builtin_nearbyintf16)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 14) DEF_OP_V (nearbyintf16, 256, _Float16, __builtin_nearbyintf16)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 15) DEF_OP_V (nearbyintf16, 512, _Float16, __builtin_nearbyintf16)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 16) DEF_OP_V (nearbyintf16, 1024, _Float16, __builtin_nearbyintf16)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 17) DEF_OP_V (nearbyintf16, 2048, _Float16, __builtin_nearbyintf16)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 18)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 19) DEF_OP_V (nearbyintf, 1, float, __builtin_nearbyintf)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 20) DEF_OP_V (nearbyintf, 2, float, __builtin_nearbyintf)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 21) DEF_OP_V (nearbyintf, 4, float, __builtin_nearbyintf)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 22) DEF_OP_V (nearbyintf, 8, float, __builtin_nearbyintf)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 23) DEF_OP_V (nearbyintf, 16, float, __builtin_nearbyintf)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 24) DEF_OP_V (nearbyintf, 32, float, __builtin_nearbyintf)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 25) DEF_OP_V (nearbyintf, 64, float, __builtin_nearbyintf)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 26) DEF_OP_V (nearbyintf, 128, float, __builtin_nearbyintf)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 27) DEF_OP_V (nearbyintf, 256, float, __builtin_nearbyintf)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 28) DEF_OP_V (nearbyintf, 512, float, __builtin_nearbyintf)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 29) DEF_OP_V (nearbyintf, 1024, float, __builtin_nearbyintf)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 30)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 31) DEF_OP_V (nearbyint, 1, double, __builtin_nearbyint)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 32) DEF_OP_V (nearbyint, 2, double, __builtin_nearbyint)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 33) DEF_OP_V (nearbyint, 4, double, __builtin_nearbyint)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 34) DEF_OP_V (nearbyint, 8, double, __builtin_nearbyint)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 35) DEF_OP_V (nearbyint, 16, double, __builtin_nearbyint)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 36) DEF_OP_V (nearbyint, 32, double, __builtin_nearbyint)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 37) DEF_OP_V (nearbyint, 64, double, __builtin_nearbyint)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 38) DEF_OP_V (nearbyint, 128, double, __builtin_nearbyint)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 39) DEF_OP_V (nearbyint, 256, double, __builtin_nearbyint)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 40) DEF_OP_V (nearbyint, 512, double, __builtin_nearbyint)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 41)
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 42) /* { dg-final { scan-assembler-not {csrr} } } */
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 43) /* { dg-final { scan-tree-dump-not "1,1" "optimized" } } */
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 44) /* { dg-final { scan-tree-dump-not "2,2" "optimized" } } */
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 45) /* { dg-final { scan-tree-dump-not "4,4" "optimized" } } */
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 46) /* { dg-final { scan-tree-dump-not "16,16" "optimized" } } */
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 47) /* { dg-final { scan-tree-dump-not "32,32" "optimized" } } */
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 48) /* { dg-final { scan-tree-dump-not "64,64" "optimized" } } */
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 49) /* { dg-final { scan-tree-dump-not "128,128" "optimized" } } */
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 50) /* { dg-final { scan-tree-dump-not "256,256" "optimized" } } */
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 51) /* { dg-final { scan-tree-dump-not "512,512" "optimized" } } */
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 52) /* { dg-final { scan-tree-dump-not "1024,1024" "optimized" } } */
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 53) /* { dg-final { scan-tree-dump-not "2048,2048" "optimized" } } */
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 54) /* { dg-final { scan-tree-dump-not "4096,4096" "optimized" } } */
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 55) /* { dg-final { scan-assembler-times {vfcvt\.x\.f\.v\s+v[0-9]+,\s*v[0-9]+,\s*v0\.t} 30 } } */
+e2023d2d5ff2 (Pan Li 2023-09-25 10:04:36 +0800 56) /* { dg-final { scan-assembler-times {vfcvt\.f\.x\.v\s+v[0-9]+,\s*v[0-9]+,\s*v0\.t} 30 } } */
+f5696e9b1113 (Vineet Gupta 2025-08-13 20:20:34 -0700 57) /* { dg-final { scan-assembler-times {frflags\s+[atx][0-9]+} 32 } } */
+f5696e9b1113 (Vineet Gupta 2025-08-13 20:20:34 -0700 58) /* { dg-final { scan-assembler-times {fsflags\s+[atx][0-9]+} 32 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadb.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadb.c
new file mode 100644
index 0000000..decd594
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadb.c
@@ -0,0 +1,103 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_zvfh_xandesvpackfph -O3 -mabi=ilp32" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_zvfh_xandesvpackfph -O3 -mabi=lp64" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4(vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4(op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2(vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2(op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1(vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1(op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2(vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2(op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4(vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4(op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8(vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8(op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_m(mask, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_m(mask, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_m(mask, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_m(mask, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_m(mask, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_m(mask, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm(vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm(vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm(vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm(vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm(vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm(vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+/* { dg-final { scan-assembler-times {nds\.vfpmadb\.vf\s+} 24 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadt.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadt.c
new file mode 100644
index 0000000..bc104ff
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadt.c
@@ -0,0 +1,103 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_zvfh_xandesvpackfph -O3 -mabi=ilp32" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_zvfh_xandesvpackfph -O3 -mabi=lp64" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4(vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4(op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2(vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2(op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1(vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1(op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2(vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2(op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4(vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4(op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8(vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8(op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_m(mask, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_m(mask, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_m(mask, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_m(mask, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_m(mask, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_m(mask, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm(vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm(vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm(vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm(vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm(vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm(vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+/* { dg-final { scan-assembler-times {nds\.vfpmadt\.vf\s+} 24 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadb.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadb.c
new file mode 100644
index 0000000..bbb084d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadb.c
@@ -0,0 +1,103 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_zvfh_xandesvpackfph -O3 -mabi=ilp32" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_zvfh_xandesvpackfph -O3 -mabi=lp64" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4(vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2(vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1(vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2(vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4(vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8(vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm(vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm(vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm(vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm(vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm(vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm(vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+/* { dg-final { scan-assembler-times {nds\.vfpmadb\.vf\s+} 24 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadt.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadt.c
new file mode 100644
index 0000000..1e1347e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadt.c
@@ -0,0 +1,103 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_zvfh_xandesvpackfph -O3 -mabi=ilp32" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_zvfh_xandesvpackfph -O3 -mabi=lp64" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4(vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2(vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1(vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2(vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4(vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8(vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm(vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm(vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm(vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm(vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm(vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm(vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+/* { dg-final { scan-assembler-times {nds\.vfpmadt\.vf\s+} 24 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadb.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadb.c
new file mode 100644
index 0000000..31de0dd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadb.c
@@ -0,0 +1,199 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_zvfh_xandesvpackfph -O3 -mabi=ilp32" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_zvfh_xandesvpackfph -O3 -mabi=lp64" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vfpmadb\.vf\s+} 48 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadt.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadt.c
new file mode 100644
index 0000000..0b41e9a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadt.c
@@ -0,0 +1,199 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_zvfh_xandesvpackfph -O3 -mabi=ilp32" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_zvfh_xandesvpackfph -O3 -mabi=lp64" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vfpmadt\.vf\s+} 48 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadb.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadb.c
new file mode 100644
index 0000000..ff79fe7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadb.c
@@ -0,0 +1,199 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_zvfh_xandesvpackfph -O3 -mabi=ilp32" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_zvfh_xandesvpackfph -O3 -mabi=lp64" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vfpmadb\.vf\s+} 48 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadt.c b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadt.c
new file mode 100644
index 0000000..f40b54f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadt.c
@@ -0,0 +1,199 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_zvfh_xandesvpackfph -O3 -mabi=ilp32" { target { rv32 } } } */
+/* { dg-options "-march=rv64gv_zvfh_xandesvpackfph -O3 -mabi=lp64" { target { rv64 } } } */
+
+#include "andes_vector.h"
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+/* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vfpmadt\.vf\s+} 48 } } */