diff options
author | Pan Li <pan2.li@intel.com> | 2024-06-28 11:33:41 +0800 |
---|---|---|
committer | Pan Li <pan2.li@intel.com> | 2024-06-29 18:37:32 +0800 |
commit | 21e3565927eda5ce9907d91100623052fa8182cd (patch) | |
tree | bd6d3a40fc045391d937f3c1c292e2165ed116c6 /gcc/match.pd | |
parent | 45e74d5dfa4c5f372df0d3545bc342b6a2505e71 (diff) | |
download | gcc-21e3565927eda5ce9907d91100623052fa8182cd.zip gcc-21e3565927eda5ce9907d91100623052fa8182cd.tar.gz gcc-21e3565927eda5ce9907d91100623052fa8182cd.tar.bz2 |
Match: Support imm form for unsigned scalar .SAT_ADD
This patch would like to support the form of unsigned scalar .SAT_ADD
when one of the op is IMM. For example as below:
Form IMM:
#define DEF_SAT_U_ADD_IMM_FMT_1(T) \
T __attribute__((noinline)) \
sat_u_add_imm_##T##_fmt_1 (T x) \
{ \
return (T)(x + 9) >= x ? (x + 9) : -1; \
}
DEF_SAT_U_ADD_IMM_FMT_1(uint64_t)
Before this patch:
__attribute__((noinline))
uint64_t sat_u_add_imm_uint64_t_fmt_1 (uint64_t x)
{
long unsigned int _1;
uint64_t _3;
;; basic block 2, loop depth 0
;; pred: ENTRY
_1 = MIN_EXPR <x_2(D), 18446744073709551606>;
_3 = _1 + 9;
return _3;
;; succ: EXIT
}
After this patch:
__attribute__((noinline))
uint64_t sat_u_add_imm_uint64_t_fmt_1 (uint64_t x)
{
uint64_t _3;
;; basic block 2, loop depth 0
;; pred: ENTRY
_3 = .SAT_ADD (x_2(D), 9); [tail call]
return _3;
;; succ: EXIT
}
The below test suites are passed for this patch:
1. The rv64gcv fully regression test with newlib.
2. The x86 bootstrap test.
3. The x86 fully regression test.
gcc/ChangeLog:
* match.pd: Add imm form for .SAT_ADD matching.
* tree-ssa-math-opts.cc (math_opts_dom_walker::after_dom_children):
Add .SAT_ADD matching under PLUS_EXPR.
Signed-off-by: Pan Li <pan2.li@intel.com>
Diffstat (limited to 'gcc/match.pd')
-rw-r--r-- | gcc/match.pd | 24 |
1 files changed, 24 insertions, 0 deletions
diff --git a/gcc/match.pd b/gcc/match.pd index 3fa3f2e..7fff7b5 100644 --- a/gcc/match.pd +++ b/gcc/match.pd @@ -3154,6 +3154,30 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) (match (unsigned_integer_sat_add @0 @1) (cond^ (gt @0 (usadd_left_part_1@2 @0 @1)) integer_minus_onep @2)) +/* Unsigned saturation add, case 9 (one op is imm): + SAT_U_ADD = (X + 3) >= x ? (X + 3) : -1. */ +(match (unsigned_integer_sat_add @0 @1) + (plus (min @0 INTEGER_CST@2) INTEGER_CST@1) + (if (INTEGRAL_TYPE_P (type) && TYPE_UNSIGNED (type) + && types_match (type, @0, @1)) + (with + { + unsigned precision = TYPE_PRECISION (type); + wide_int cst_1 = wi::to_wide (@1); + wide_int cst_2 = wi::to_wide (@2); + wide_int max = wi::mask (precision, false, precision); + wide_int sum = wi::add (cst_1, cst_2); + } + (if (wi::eq_p (max, sum)))))) + +/* Unsigned saturation add, case 10 (one op is imm): + SAT_U_ADD = __builtin_add_overflow (X, 3, &ret) == 0 ? ret : -1. */ +(match (unsigned_integer_sat_add @0 @1) + (cond^ (ne (imagpart (IFN_ADD_OVERFLOW@2 @0 INTEGER_CST@1)) integer_zerop) + integer_minus_onep (realpart @2)) + (if (INTEGRAL_TYPE_P (type) && TYPE_UNSIGNED (type) + && types_match (type, @0)))) + /* Unsigned saturation sub, case 1 (branch with gt): SAT_U_SUB = X > Y ? X - Y : 0 */ (match (unsigned_integer_sat_sub @0 @1) |