aboutsummaryrefslogtreecommitdiff
path: root/gcc/match.pd
diff options
context:
space:
mode:
authorPan Li <pan2.li@intel.com>2024-05-29 16:18:31 +0800
committerPan Li <pan2.li@intel.com>2024-06-06 19:14:55 +0800
commit2d11de35d378a0763a8956638766182a49272e0b (patch)
tree97938e5bddf60b42b9da1337a7765ff6aa914751 /gcc/match.pd
parent346f33e27809ae012696c4731c8ebcec2414dbfb (diff)
downloadgcc-2d11de35d378a0763a8956638766182a49272e0b.zip
gcc-2d11de35d378a0763a8956638766182a49272e0b.tar.gz
gcc-2d11de35d378a0763a8956638766182a49272e0b.tar.bz2
Vect: Support IFN SAT_SUB for unsigned vector int
This patch would like to support the .SAT_SUB for the unsigned vector int. Given we have below example code: void vec_sat_sub_u64 (uint64_t *out, uint64_t *x, uint64_t *y, unsigned n) { for (unsigned i = 0; i < n; i++) out[i] = (x[i] - y[i]) & (-(uint64_t)(x[i] >= y[i])); } Before this patch: void vec_sat_sub_u64 (uint64_t *out, uint64_t *x, uint64_t *y, unsigned n) { ... _77 = .SELECT_VL (ivtmp_75, POLY_INT_CST [2, 2]); ivtmp_56 = _77 * 8; vect__4.7_59 = .MASK_LEN_LOAD (vectp_x.5_57, 64B, { -1, ... }, _77, 0); vect__6.10_63 = .MASK_LEN_LOAD (vectp_y.8_61, 64B, { -1, ... }, _77, 0); mask__7.11_64 = vect__4.7_59 >= vect__6.10_63; _66 = .COND_SUB (mask__7.11_64, vect__4.7_59, vect__6.10_63, { 0, ... }); .MASK_LEN_STORE (vectp_out.15_71, 64B, { -1, ... }, _77, 0, _66); vectp_x.5_58 = vectp_x.5_57 + ivtmp_56; vectp_y.8_62 = vectp_y.8_61 + ivtmp_56; vectp_out.15_72 = vectp_out.15_71 + ivtmp_56; ivtmp_76 = ivtmp_75 - _77; ... } After this patch: void vec_sat_sub_u64 (uint64_t *out, uint64_t *x, uint64_t *y, unsigned n) { ... _76 = .SELECT_VL (ivtmp_74, POLY_INT_CST [2, 2]); ivtmp_60 = _76 * 8; vect__4.7_63 = .MASK_LEN_LOAD (vectp_x.5_61, 64B, { -1, ... }, _76, 0); vect__6.10_67 = .MASK_LEN_LOAD (vectp_y.8_65, 64B, { -1, ... }, _76, 0); vect_patt_37.11_68 = .SAT_SUB (vect__4.7_63, vect__6.10_67); .MASK_LEN_STORE (vectp_out.12_70, 64B, { -1, ... }, _76, 0, vect_patt_37.11_68); vectp_x.5_62 = vectp_x.5_61 + ivtmp_60; vectp_y.8_66 = vectp_y.8_65 + ivtmp_60; vectp_out.12_71 = vectp_out.12_70 + ivtmp_60; ivtmp_75 = ivtmp_74 - _76; ... } The below test suites are passed for this patch * The x86 bootstrap test. * The x86 fully regression test. * The riscv fully regression tests. gcc/ChangeLog: * match.pd: Add new form for vector mode recog. * tree-vect-patterns.cc (gimple_unsigned_integer_sat_sub): Add new match func decl; (vect_recog_build_binary_gimple_call): Extract helper func to build gcall with given internal_fn. (vect_recog_sat_sub_pattern): Add new func impl to recog .SAT_SUB. Signed-off-by: Pan Li <pan2.li@intel.com>
Diffstat (limited to 'gcc/match.pd')
-rw-r--r--gcc/match.pd14
1 files changed, 14 insertions, 0 deletions
diff --git a/gcc/match.pd b/gcc/match.pd
index 7c1ad42..ebc60eb 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -3110,6 +3110,20 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(if (INTEGRAL_TYPE_P (type) && TYPE_UNSIGNED (type)
&& types_match (type, @0, @1))))
+/* Unsigned saturation sub, case 3 (branchless with gt):
+ SAT_U_SUB = (X - Y) * (X > Y). */
+(match (unsigned_integer_sat_sub @0 @1)
+ (mult:c (minus @0 @1) (convert (gt @0 @1)))
+ (if (INTEGRAL_TYPE_P (type) && TYPE_UNSIGNED (type)
+ && types_match (type, @0, @1))))
+
+/* Unsigned saturation sub, case 4 (branchless with ge):
+ SAT_U_SUB = (X - Y) * (X >= Y). */
+(match (unsigned_integer_sat_sub @0 @1)
+ (mult:c (minus @0 @1) (convert (ge @0 @1)))
+ (if (INTEGRAL_TYPE_P (type) && TYPE_UNSIGNED (type)
+ && types_match (type, @0, @1))))
+
/* x > y && x != XXX_MIN --> x > y
x > y && x == XXX_MIN --> false . */
(for eqne (eq ne)