aboutsummaryrefslogtreecommitdiff
path: root/gcc/config/aarch64/aarch64-sve-builtins-base.cc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config/aarch64/aarch64-sve-builtins-base.cc')
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins-base.cc268
1 files changed, 255 insertions, 13 deletions
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
index 13e020b..927c5bb 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
@@ -201,6 +201,15 @@ class svac_impl : public function_base
public:
CONSTEXPR svac_impl (int unspec) : m_unspec (unspec) {}
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ tree pg = gimple_call_arg (f.call, 0);
+ if (is_pfalse (pg))
+ return f.fold_call_to (pg);
+ return NULL;
+ }
+
rtx
expand (function_expander &e) const override
{
@@ -216,6 +225,14 @@ public:
class svadda_impl : public function_base
{
public:
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ if (is_pfalse (gimple_call_arg (f.call, 0)))
+ return f.fold_call_to (gimple_call_arg (f.call, 1));
+ return NULL;
+ }
+
rtx
expand (function_expander &e) const override
{
@@ -227,6 +244,21 @@ public:
}
};
+class svaddv_impl : public reduction
+{
+public:
+ CONSTEXPR svaddv_impl ()
+ : reduction (UNSPEC_SADDV, UNSPEC_UADDV, UNSPEC_FADDV) {}
+
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ if (is_pfalse (gimple_call_arg (f.call, 0)))
+ return f.fold_call_to (build_zero_cst (TREE_TYPE (f.lhs)));
+ return NULL;
+ }
+};
+
/* Implements svadr[bhwd]. */
class svadr_bhwd_impl : public function_base
{
@@ -245,11 +277,25 @@ public:
e.args.quick_push (expand_vector_broadcast (mode, shift));
return e.use_exact_insn (code_for_aarch64_adr_shift (mode));
}
-
/* How many bits left to shift the vector displacement. */
unsigned int m_shift;
};
+
+class svandv_impl : public reduction
+{
+public:
+ CONSTEXPR svandv_impl () : reduction (UNSPEC_ANDV) {}
+
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ if (is_pfalse (gimple_call_arg (f.call, 0)))
+ return f.fold_call_to (build_all_ones_cst (TREE_TYPE (f.lhs)));
+ return NULL;
+ }
+};
+
class svbic_impl : public function_base
{
public:
@@ -333,6 +379,14 @@ class svclast_impl : public quiet<function_base>
public:
CONSTEXPR svclast_impl (int unspec) : m_unspec (unspec) {}
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ if (is_pfalse (gimple_call_arg (f.call, 0)))
+ return f.fold_call_to (gimple_call_arg (f.call, 1));
+ return NULL;
+ }
+
rtx
expand (function_expander &e) const override
{
@@ -425,6 +479,8 @@ public:
return gimple_build_assign (f.lhs, m_code, rhs1, rhs2);
}
+ if (is_pfalse (pg))
+ return f.fold_call_to (pg);
return NULL;
}
@@ -464,6 +520,15 @@ public:
: m_code (code), m_unspec_for_sint (unspec_for_sint),
m_unspec_for_uint (unspec_for_uint) {}
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ tree pg = gimple_call_arg (f.call, 0);
+ if (is_pfalse (pg))
+ return f.fold_call_to (pg);
+ return NULL;
+ }
+
rtx
expand (function_expander &e) const override
{
@@ -502,6 +567,16 @@ public:
class svcmpuo_impl : public quiet<function_base>
{
public:
+
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ tree pg = gimple_call_arg (f.call, 0);
+ if (is_pfalse (pg))
+ return f.fold_call_to (pg);
+ return NULL;
+ }
+
rtx
expand (function_expander &e) const override
{
@@ -598,6 +673,16 @@ public:
class svcntp_impl : public function_base
{
public:
+
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ tree pg = gimple_call_arg (f.call, 0);
+ if (is_pfalse (pg))
+ return f.fold_call_to (build_zero_cst (TREE_TYPE (f.lhs)));
+ return NULL;
+ }
+
rtx
expand (function_expander &e) const override
{
@@ -613,6 +698,19 @@ public:
}
};
+class svcompact_impl
+ : public QUIET_CODE_FOR_MODE0 (aarch64_sve_compact)
+{
+public:
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ if (is_pfalse (gimple_call_arg (f.call, 0)))
+ return f.fold_call_to (build_zero_cst (TREE_TYPE (f.lhs)));
+ return NULL;
+ }
+};
+
/* Implements svcreate2, svcreate3 and svcreate4. */
class svcreate_impl : public quiet<multi_vector_function>
{
@@ -749,6 +847,18 @@ public:
}
};
+class svcvtnt_impl : public CODE_FOR_MODE0 (aarch64_sve_cvtnt)
+{
+public:
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ if (f.pred == PRED_x && is_pfalse (gimple_call_arg (f.call, 1)))
+ f.fold_call_to (build_zero_cst (TREE_TYPE (f.lhs)));
+ return NULL;
+ }
+};
+
class svdiv_impl : public rtx_code_function
{
public:
@@ -1155,6 +1265,20 @@ public:
}
};
+class sveorv_impl : public reduction
+{
+public:
+ CONSTEXPR sveorv_impl () : reduction (UNSPEC_XORV) {}
+
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ if (is_pfalse (gimple_call_arg (f.call, 0)))
+ return f.fold_call_to (build_zero_cst (TREE_TYPE (f.lhs)));
+ return NULL;
+ }
+};
+
/* Implements svextb, svexth and svextw. */
class svext_bhw_impl : public function_base
{
@@ -1397,7 +1521,8 @@ public:
BIT_FIELD_REF lowers to Advanced SIMD element extract, so we have to
ensure the index of the element being accessed is in the range of a
Advanced SIMD vector width. */
- gimple *fold (gimple_folder & f) const override
+ gimple *
+ fold (gimple_folder & f) const override
{
tree pred = gimple_call_arg (f.call, 0);
tree val = gimple_call_arg (f.call, 1);
@@ -1973,6 +2098,80 @@ public:
}
};
+class svminv_impl : public reduction
+{
+public:
+ CONSTEXPR svminv_impl ()
+ : reduction (UNSPEC_SMINV, UNSPEC_UMINV, UNSPEC_FMINV) {}
+
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ if (is_pfalse (gimple_call_arg (f.call, 0)))
+ {
+ tree rhs = f.type_suffix (0).integer_p
+ ? TYPE_MAX_VALUE (TREE_TYPE (f.lhs))
+ : build_real (TREE_TYPE (f.lhs), dconstinf);
+ return f.fold_call_to (rhs);
+ }
+ return NULL;
+ }
+};
+
+class svmaxnmv_impl : public reduction
+{
+public:
+ CONSTEXPR svmaxnmv_impl () : reduction (UNSPEC_FMAXNMV) {}
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ if (is_pfalse (gimple_call_arg (f.call, 0)))
+ {
+ REAL_VALUE_TYPE rnan = dconst0;
+ rnan.cl = rvc_nan;
+ return f.fold_call_to (build_real (TREE_TYPE (f.lhs), rnan));
+ }
+ return NULL;
+ }
+};
+
+class svmaxv_impl : public reduction
+{
+public:
+ CONSTEXPR svmaxv_impl ()
+ : reduction (UNSPEC_SMAXV, UNSPEC_UMAXV, UNSPEC_FMAXV) {}
+
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ if (is_pfalse (gimple_call_arg (f.call, 0)))
+ {
+ tree rhs = f.type_suffix (0).integer_p
+ ? TYPE_MIN_VALUE (TREE_TYPE (f.lhs))
+ : build_real (TREE_TYPE (f.lhs), dconstninf);
+ return f.fold_call_to (rhs);
+ }
+ return NULL;
+ }
+};
+
+class svminnmv_impl : public reduction
+{
+public:
+ CONSTEXPR svminnmv_impl () : reduction (UNSPEC_FMINNMV) {}
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ if (is_pfalse (gimple_call_arg (f.call, 0)))
+ {
+ REAL_VALUE_TYPE rnan = dconst0;
+ rnan.cl = rvc_nan;
+ return f.fold_call_to (build_real (TREE_TYPE (f.lhs), rnan));
+ }
+ return NULL;
+ }
+};
+
class svmla_impl : public function_base
{
public:
@@ -2222,6 +2421,20 @@ public:
}
};
+class svorv_impl : public reduction
+{
+public:
+ CONSTEXPR svorv_impl () : reduction (UNSPEC_IORV) {}
+
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ if (is_pfalse (gimple_call_arg (f.call, 0)))
+ return f.fold_call_to (build_zero_cst (TREE_TYPE (f.lhs)));
+ return NULL;
+ }
+};
+
class svpfalse_impl : public function_base
{
public:
@@ -2246,6 +2459,16 @@ class svpfirst_svpnext_impl : public function_base
{
public:
CONSTEXPR svpfirst_svpnext_impl (int unspec) : m_unspec (unspec) {}
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ tree pg = gimple_call_arg (f.call, 0);
+ if (is_pfalse (pg))
+ return f.fold_call_to (m_unspec == UNSPEC_PFIRST
+ ? gimple_call_arg (f.call, 1)
+ : pg);
+ return NULL;
+ }
rtx
expand (function_expander &e) const override
@@ -2326,6 +2549,13 @@ class svptest_impl : public function_base
{
public:
CONSTEXPR svptest_impl (rtx_code compare) : m_compare (compare) {}
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ if (is_pfalse (gimple_call_arg (f.call, 0)))
+ return f.fold_call_to (boolean_false_node);
+ return NULL;
+ }
rtx
expand (function_expander &e) const override
@@ -2741,6 +2971,18 @@ public:
}
};
+class svsplice_impl : public QUIET_CODE_FOR_MODE0 (aarch64_sve_splice)
+{
+public:
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ if (is_pfalse (gimple_call_arg (f.call, 0)))
+ return f.fold_call_to (gimple_call_arg (f.call, 2));
+ return NULL;
+ }
+};
+
class svst1_impl : public full_width_access
{
public:
@@ -3194,13 +3436,13 @@ FUNCTION (svacle, svac_impl, (UNSPEC_COND_FCMLE))
FUNCTION (svaclt, svac_impl, (UNSPEC_COND_FCMLT))
FUNCTION (svadd, rtx_code_function, (PLUS, PLUS, UNSPEC_COND_FADD))
FUNCTION (svadda, svadda_impl,)
-FUNCTION (svaddv, reduction, (UNSPEC_SADDV, UNSPEC_UADDV, UNSPEC_FADDV))
+FUNCTION (svaddv, svaddv_impl,)
FUNCTION (svadrb, svadr_bhwd_impl, (0))
FUNCTION (svadrd, svadr_bhwd_impl, (3))
FUNCTION (svadrh, svadr_bhwd_impl, (1))
FUNCTION (svadrw, svadr_bhwd_impl, (2))
FUNCTION (svand, rtx_code_function, (AND, AND))
-FUNCTION (svandv, reduction, (UNSPEC_ANDV))
+FUNCTION (svandv, svandv_impl,)
FUNCTION (svasr, rtx_code_function, (ASHIFTRT, ASHIFTRT))
FUNCTION (svasr_wide, shift_wide, (ASHIFTRT, UNSPEC_ASHIFTRT_WIDE))
FUNCTION (svasrd, unspec_based_function, (UNSPEC_ASRD, -1, -1))
@@ -3257,12 +3499,12 @@ FUNCTION (svcnth_pat, svcnt_bhwd_pat_impl, (VNx8HImode))
FUNCTION (svcntp, svcntp_impl,)
FUNCTION (svcntw, svcnt_bhwd_impl, (VNx4SImode))
FUNCTION (svcntw_pat, svcnt_bhwd_pat_impl, (VNx4SImode))
-FUNCTION (svcompact, QUIET_CODE_FOR_MODE0 (aarch64_sve_compact),)
+FUNCTION (svcompact, svcompact_impl,)
FUNCTION (svcreate2, svcreate_impl, (2))
FUNCTION (svcreate3, svcreate_impl, (3))
FUNCTION (svcreate4, svcreate_impl, (4))
FUNCTION (svcvt, svcvt_impl,)
-FUNCTION (svcvtnt, CODE_FOR_MODE0 (aarch64_sve_cvtnt),)
+FUNCTION (svcvtnt, svcvtnt_impl,)
FUNCTION (svdiv, svdiv_impl,)
FUNCTION (svdivr, rtx_code_function_rotated, (DIV, UDIV, UNSPEC_COND_FDIV))
FUNCTION (svdot, svdot_impl,)
@@ -3273,7 +3515,7 @@ FUNCTION (svdup_lane, svdup_lane_impl,)
FUNCTION (svdupq, svdupq_impl,)
FUNCTION (svdupq_lane, svdupq_lane_impl,)
FUNCTION (sveor, rtx_code_function, (XOR, XOR, -1))
-FUNCTION (sveorv, reduction, (UNSPEC_XORV))
+FUNCTION (sveorv, sveorv_impl,)
FUNCTION (svexpa, unspec_based_function, (-1, -1, UNSPEC_FEXPA))
FUNCTION (svext, QUIET_CODE_FOR_MODE0 (aarch64_sve_ext),)
FUNCTION (svextb, svext_bhw_impl, (QImode))
@@ -3337,14 +3579,14 @@ FUNCTION (svmax, rtx_code_function, (SMAX, UMAX, UNSPEC_COND_FMAX,
UNSPEC_FMAX))
FUNCTION (svmaxnm, cond_or_uncond_unspec_function, (UNSPEC_COND_FMAXNM,
UNSPEC_FMAXNM))
-FUNCTION (svmaxnmv, reduction, (UNSPEC_FMAXNMV))
-FUNCTION (svmaxv, reduction, (UNSPEC_SMAXV, UNSPEC_UMAXV, UNSPEC_FMAXV))
+FUNCTION (svmaxnmv, svmaxnmv_impl,)
+FUNCTION (svmaxv, svmaxv_impl,)
FUNCTION (svmin, rtx_code_function, (SMIN, UMIN, UNSPEC_COND_FMIN,
UNSPEC_FMIN))
FUNCTION (svminnm, cond_or_uncond_unspec_function, (UNSPEC_COND_FMINNM,
UNSPEC_FMINNM))
-FUNCTION (svminnmv, reduction, (UNSPEC_FMINNMV))
-FUNCTION (svminv, reduction, (UNSPEC_SMINV, UNSPEC_UMINV, UNSPEC_FMINV))
+FUNCTION (svminnmv, svminnmv_impl,)
+FUNCTION (svminv, svminv_impl,)
FUNCTION (svmla, svmla_impl,)
FUNCTION (svmla_lane, svmla_lane_impl,)
FUNCTION (svmls, svmls_impl,)
@@ -3367,7 +3609,7 @@ FUNCTION (svnor, svnor_impl,)
FUNCTION (svnot, svnot_impl,)
FUNCTION (svorn, svorn_impl,)
FUNCTION (svorr, rtx_code_function, (IOR, IOR))
-FUNCTION (svorv, reduction, (UNSPEC_IORV))
+FUNCTION (svorv, svorv_impl,)
FUNCTION (svpfalse, svpfalse_impl,)
FUNCTION (svpfirst, svpfirst_svpnext_impl, (UNSPEC_PFIRST))
FUNCTION (svpnext, svpfirst_svpnext_impl, (UNSPEC_PNEXT))
@@ -3429,7 +3671,7 @@ FUNCTION (svset2, svset_impl, (2))
FUNCTION (svset3, svset_impl, (3))
FUNCTION (svset4, svset_impl, (4))
FUNCTION (svsetffr, svsetffr_impl,)
-FUNCTION (svsplice, QUIET_CODE_FOR_MODE0 (aarch64_sve_splice),)
+FUNCTION (svsplice, svsplice_impl,)
FUNCTION (svsqrt, rtx_code_function, (SQRT, SQRT, UNSPEC_COND_FSQRT))
FUNCTION (svst1, svst1_impl,)
FUNCTION (svst1_scatter, svst1_scatter_impl,)