aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2023-06-15 08:49:27 +0200
committerJakub Jelinek <jakub@redhat.com>2023-06-15 08:49:27 +0200
commit4e31e63ea7edffd1212fc17ce8d50672035bb64b (patch)
tree6d8b849025561968fa1b7aa91dbdd92bf196b349 /gcc
parentea616f687dccbe42012f786c0ebade5b05850206 (diff)
downloadgcc-4e31e63ea7edffd1212fc17ce8d50672035bb64b.zip
gcc-4e31e63ea7edffd1212fc17ce8d50672035bb64b.tar.gz
gcc-4e31e63ea7edffd1212fc17ce8d50672035bb64b.tar.bz2
middle-end: Move constant args folding of .UBSAN_CHECK_* and .*_OVERFLOW into fold-const-call.cc
Here is an incremental patch to handle constant folding of these in fold-const-call.cc rather than gimple-fold.cc. Not really sure if that is the way to go because it is replacing 28 lines of former code with 65 of new code, for the overall benefit that say int foo (long long *p) { int one = 1; long long max = __LONG_LONG_MAX__; return __builtin_add_overflow (one, max, p); } can be now fully folded already in ccp1 pass while before it was only cleaned up in forwprop1 pass right after it. On Wed, Jun 14, 2023 at 12:25:46PM +0000, Richard Biener wrote: > I think that's still very much desirable so this followup looks OK. > Maybe you can re-base it as prerequesite though? Rebased then (of course with the UADDC/USUBC handling removed from this first patch, will be added in the second one). 2023-06-15 Jakub Jelinek <jakub@redhat.com> * gimple-fold.cc (gimple_fold_call): Move handling of arg0 as well as arg1 INTEGER_CSTs for .UBSAN_CHECK_{ADD,SUB,MUL} and .{ADD,SUB,MUL}_OVERFLOW calls from here... * fold-const-call.cc (fold_const_call): ... here.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/fold-const-call.cc41
-rw-r--r--gcc/gimple-fold.cc16
2 files changed, 41 insertions, 16 deletions
diff --git a/gcc/fold-const-call.cc b/gcc/fold-const-call.cc
index 663eae2..00ff4e4 100644
--- a/gcc/fold-const-call.cc
+++ b/gcc/fold-const-call.cc
@@ -1669,6 +1669,7 @@ fold_const_call (combined_fn fn, tree type, tree arg0, tree arg1)
{
const char *p0, *p1;
char c;
+ tree_code subcode;
switch (fn)
{
case CFN_BUILT_IN_STRSPN:
@@ -1738,6 +1739,46 @@ fold_const_call (combined_fn fn, tree type, tree arg0, tree arg1)
case CFN_FOLD_LEFT_PLUS:
return fold_const_fold_left (type, arg0, arg1, PLUS_EXPR);
+ case CFN_UBSAN_CHECK_ADD:
+ case CFN_ADD_OVERFLOW:
+ subcode = PLUS_EXPR;
+ goto arith_overflow;
+
+ case CFN_UBSAN_CHECK_SUB:
+ case CFN_SUB_OVERFLOW:
+ subcode = MINUS_EXPR;
+ goto arith_overflow;
+
+ case CFN_UBSAN_CHECK_MUL:
+ case CFN_MUL_OVERFLOW:
+ subcode = MULT_EXPR;
+ goto arith_overflow;
+
+ arith_overflow:
+ if (integer_cst_p (arg0) && integer_cst_p (arg1))
+ {
+ tree itype
+ = TREE_CODE (type) == COMPLEX_TYPE ? TREE_TYPE (type) : type;
+ bool ovf = false;
+ tree r = int_const_binop (subcode, fold_convert (itype, arg0),
+ fold_convert (itype, arg1));
+ if (!r || TREE_CODE (r) != INTEGER_CST)
+ return NULL_TREE;
+ if (arith_overflowed_p (subcode, itype, arg0, arg1))
+ ovf = true;
+ if (TREE_OVERFLOW (r))
+ r = drop_tree_overflow (r);
+ if (itype == type)
+ {
+ if (ovf)
+ return NULL_TREE;
+ return r;
+ }
+ else
+ return build_complex (type, r, build_int_cst (itype, ovf));
+ }
+ return NULL_TREE;
+
default:
return fold_const_call_1 (fn, type, arg0, arg1);
}
diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
index 581575b..df88ad7 100644
--- a/gcc/gimple-fold.cc
+++ b/gcc/gimple-fold.cc
@@ -5702,22 +5702,6 @@ gimple_fold_call (gimple_stmt_iterator *gsi, bool inplace)
result = arg0;
else if (subcode == MULT_EXPR && integer_onep (arg0))
result = arg1;
- else if (TREE_CODE (arg0) == INTEGER_CST
- && TREE_CODE (arg1) == INTEGER_CST)
- {
- if (cplx_result)
- result = int_const_binop (subcode, fold_convert (type, arg0),
- fold_convert (type, arg1));
- else
- result = int_const_binop (subcode, arg0, arg1);
- if (result && arith_overflowed_p (subcode, type, arg0, arg1))
- {
- if (cplx_result)
- overflow = build_one_cst (type);
- else
- result = NULL_TREE;
- }
- }
if (result)
{
if (result == integer_zero_node)