/* Fold a constant sub-tree into a single node for C-compiler
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/*@@ This file should be rewritten to use an arbitrary precision
@@ representation for "struct tree_int_cst" and "struct tree_real_cst".
@@ Perhaps the routines could also be used for bc/dc, and made a lib.
@@ The routines that translate from the ap rep should
@@ warn if precision et. al. is lost.
@@ This would also make life easier when this technology is used
@@ for cross-compilers. */
/* The entry points in this file are fold, size_int_wide, size_binop
and force_fit_type_double.
fold takes a tree as argument and returns a simplified tree.
size_binop takes a tree code for an arithmetic operation
and two operands that are trees, and produces a tree for the
result, assuming the type comes from `sizetype'.
size_int takes an integer value, and creates a tree constant
with type from `sizetype'.
force_fit_type_double takes a constant, an overflowable flag and a
prior overflow indicator. It forces the value to fit the type and
sets TREE_OVERFLOW.
Note: Since the folders get called on non-gimple code as well as
gimple code, we need to handle GIMPLE tuples as well as their
corresponding tree equivalents. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "flags.h"
#include "tree.h"
#include "real.h"
#include "fixed-value.h"
#include "rtl.h"
#include "expr.h"
#include "tm_p.h"
#include "target.h"
#include "toplev.h"
#include "intl.h"
#include "ggc.h"
#include "hashtab.h"
#include "langhooks.h"
#include "md5.h"
#include "gimple.h"
/* Nonzero if we are folding constants inside an initializer; zero
otherwise. */
int folding_initializer = 0;
/* The following constants represent a bit based encoding of GCC's
comparison operators. This encoding simplifies transformations
on relational comparison operators, such as AND and OR. */
enum comparison_code {
COMPCODE_FALSE = 0,
COMPCODE_LT = 1,
COMPCODE_EQ = 2,
COMPCODE_LE = 3,
COMPCODE_GT = 4,
COMPCODE_LTGT = 5,
COMPCODE_GE = 6,
COMPCODE_ORD = 7,
COMPCODE_UNORD = 8,
COMPCODE_UNLT = 9,
COMPCODE_UNEQ = 10,
COMPCODE_UNLE = 11,
COMPCODE_UNGT = 12,
COMPCODE_NE = 13,
COMPCODE_UNGE = 14,
COMPCODE_TRUE = 15
};
static void encode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT, HOST_WIDE_INT);
static void decode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
static bool negate_mathfn_p (enum built_in_function);
static bool negate_expr_p (tree);
static tree negate_expr (tree);
static tree split_tree (tree, enum tree_code, tree *, tree *, tree *, int);
static tree associate_trees (tree, tree, enum tree_code, tree);
static tree const_binop (enum tree_code, tree, tree, int);
static enum comparison_code comparison_to_compcode (enum tree_code);
static enum tree_code compcode_to_comparison (enum comparison_code);
static tree combine_comparisons (enum tree_code, enum tree_code,
enum tree_code, tree, tree, tree);
static int truth_value_p (enum tree_code);
static int operand_equal_for_comparison_p (tree, tree, tree);
static int twoval_comparison_p (tree, tree *, tree *, int *);
static tree eval_subst (tree, tree, tree, tree, tree);
static tree pedantic_omit_one_operand (tree, tree, tree);
static tree distribute_bit_expr (enum tree_code, tree, tree, tree);
static tree decode_field_reference (tree, HOST_WIDE_INT *, HOST_WIDE_INT *,
enum machine_mode *, int *, int *,
tree *, tree *);
static tree sign_bit_p (tree, const_tree);
static int simple_operand_p (const_tree);
static tree range_binop (enum tree_code, tree, tree, int, tree, int);
static tree range_predecessor (tree);
static tree range_successor (tree);
static tree make_range (tree, int *, tree *, tree *, bool *);
static tree build_range_check (tree, tree, int, tree, tree);
static int merge_ranges (int *, tree *, tree *, int, tree, tree, int, tree,
tree);
static tree fold_range_test (enum tree_code, tree, tree, tree);
static tree fold_cond_expr_with_comparison (tree, tree, tree, tree);
static tree unextend (tree, int, int, tree);
static tree fold_truthop (enum tree_code, tree, tree, tree);
static tree optimize_minmax_comparison (enum tree_code, tree, tree, tree);
static tree extract_muldiv (tree, tree, enum tree_code, tree, bool *);
static tree extract_muldiv_1 (tree, tree, enum tree_code, tree, bool *);
static tree fold_binary_op_with_conditional_arg (enum tree_code, tree,
tree, tree,
tree, tree, int);
static tree fold_mathfn_compare (enum built_in_function, enum tree_code,
tree, tree, tree);
static tree fold_inf_compare (enum tree_code, tree, tree, tree);
static tree fold_div_compare (enum tree_code, tree, tree, tree);
static bool reorder_operands_p (const_tree, const_tree);
static tree fold_negate_const (tree, tree);
static tree fold_not_const (tree, tree);
static tree fold_relational_const (enum tree_code, tree, tree, tree);
/* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
and SUM1. Then this yields nonzero if overflow occurred during the
addition.
Overflow occurs if A and B have the same sign, but A and SUM differ in
sign. Use `^' to test whether signs differ, and `< 0' to isolate the
sign. */
#define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
/* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
We do that by representing the two-word integer in 4 words, with only
HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive
number. The value of the word is LOWPART + HIGHPART * BASE. */
#define LOWPART(x) \
((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1))
#define HIGHPART(x) \
((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2)
#define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2)
/* Unpack a two-word integer into 4 words.
LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
WORDS points to the array of HOST_WIDE_INTs. */
static void
encode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi)
{
words[0] = LOWPART (low);
words[1] = HIGHPART (low);
words[2] = LOWPART (hi);
words[3] = HIGHPART (hi);
}
/* Pack an array of 4 words into a two-word integer.
WORDS points to the array of words.
The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
static void
decode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT *low,
HOST_WIDE_INT *hi)
{
*low = words[0] + words[1] * BASE;
*hi = words[2] + words[3] * BASE;
}
/* Force the double-word integer L1, H1 to be within the range of the
integer type TYPE. Stores the properly truncated and sign-extended
double-word integer in *LV, *HV. Returns true if the operation
overflows, that is, argument and result are different. */
int
fit_double_type (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, const_tree type)
{
unsigned HOST_WIDE_INT low0 = l1;
HOST_WIDE_INT high0 = h1;
unsigned int prec;
int sign_extended_type;
if (POINTER_TYPE_P (type)
|| TREE_CODE (type) == OFFSET_TYPE)
prec = POINTER_SIZE;
else
prec = TYPE_PRECISION (type);
/* Size types *are* sign extended. */
sign_extended_type = (!TYPE_UNSIGNED (type)
|| (TREE_CODE (type) == INTEGER_TYPE
&& TYPE_IS_SIZETYPE (type)));
/* First clear all bits that are beyond the type's precision. */
if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
;
else if (prec > HOST_BITS_PER_WIDE_INT)
h1 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
else
{
h1 = 0;
if (prec < HOST_BITS_PER_WIDE_INT)
l1 &= ~((HOST_WIDE_INT) (-1) << prec);
}
/* Then do sign extension if necessary. */
if (!sign_extended_type)
/* No sign extension */;
else if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
/* Correct width already. */;
else if (prec > HOST_BITS_PER_WIDE_INT)
{
/* Sign extend top half? */
if (h1 & ((unsigned HOST_WIDE_INT)1
<< (prec - HOST_BITS_PER_WIDE_INT - 1)))
h1 |= (HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT);
}
else if (prec == HOST_BITS_PER_WIDE_INT)
{
if ((HOST_WIDE_INT)l1 < 0)
h1 = -1;
}
else
{
/* Sign extend bottom half? */
if (l1 & ((unsigned HOST_WIDE_INT)1 << (prec - 1)))
{
h1 = -1;
l1 |= (HOST_WIDE_INT)(-1) << prec;
}
}
*lv = l1;
*hv = h1;
/* If the value didn't fit, signal overflow. */
return l1 != low0 || h1 != high0;
}
/* We force the double-int HIGH:LOW to the range of the type TYPE by
sign or zero extending it.
OVERFLOWABLE indicates if we are interested
in overflow of the value, when >0 we are only interested in signed
overflow, for <0 we are interested in any overflow. OVERFLOWED
indicates whether overflow has already occurred. CONST_OVERFLOWED
indicates whether constant overflow has already occurred. We force
T's value to be within range of T's type (by setting to 0 or 1 all
the bits outside the type's range). We set TREE_OVERFLOWED if,
OVERFLOWED is nonzero,
or OVERFLOWABLE is >0 and signed overflow occurs
or OVERFLOWABLE is <0 and any overflow occurs
We return a new tree node for the extended double-int. The node
is shared if no overflow flags are set. */
tree
force_fit_type_double (tree type, unsigned HOST_WIDE_INT low,
HOST_WIDE_INT high, int overflowable,
bool overflowed)
{
int sign_extended_type;
bool overflow;
/* Size types *are* sign extended. */
sign_extended_type = (!TYPE_UNSIGNED (type)
|| (TREE_CODE (type) == INTEGER_TYPE
&& TYPE_IS_SIZETYPE (type)));
overflow = fit_double_type (low, high, &low, &high, type);
/* If we need to set overflow flags, return a new unshared node. */
if (overflowed || overflow)
{
if (overflowed
|| overflowable < 0
|| (overflowable > 0 && sign_extended_type))
{
tree t = make_node (INTEGER_CST);
TREE_INT_CST_LOW (t) = low;
TREE_INT_CST_HIGH (t) = high;
TREE_TYPE (t) = type;
TREE_OVERFLOW (t) = 1;
return t;
}
}
/* Else build a shared node. */
return build_int_cst_wide (type, low, high);
}
/* Add two doubleword integers with doubleword result.
Return nonzero if the operation overflows according to UNSIGNED_P.
Each argument is given as two `HOST_WIDE_INT' pieces.
One argument is L1 and H1; the other, L2 and H2.
The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
int
add_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
bool unsigned_p)
{
unsigned HOST_WIDE_INT l;
HOST_WIDE_INT h;
l = l1 + l2;
h = h1 + h2 + (l < l1);
*lv = l;
*hv = h;
if (unsigned_p)
return (unsigned HOST_WIDE_INT) h < (unsigned HOST_WIDE_INT) h1;
else
return OVERFLOW_SUM_SIGN (h1, h2, h);
}
/* Negate a doubleword integer with doubleword result.
Return nonzero if the operation overflows, assuming it's signed.
The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
int
neg_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
{
if (l1 == 0)
{
*lv = 0;
*hv = - h1;
return (*hv & h1) < 0;
}
else
{
*lv = -l1;
*hv = ~h1;
return 0;
}
}
/* Multiply two doubleword integers with doubleword result.
Return nonzero if the operation overflows according to UNSIGNED_P.
Each argument is given as two `HOST_WIDE_INT' pieces.
One argument is L1 and H1; the other, L2 and H2.
The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
int
mul_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
bool unsigned_p)
{
HOST_WIDE_INT arg1[4];
HOST_WIDE_INT arg2[4];
HOST_WIDE_INT prod[4 * 2];
unsigned HOST_WIDE_INT carry;
int i, j, k;
unsigned HOST_WIDE_INT toplow, neglow;
HOST_WIDE_INT tophigh, neghigh;
encode (arg1, l1, h1);
encode (arg2, l2, h2);
memset (prod, 0, sizeof prod);
for (i = 0; i < 4; i++)
{
carry = 0;
for (j = 0; j < 4; j++)
{
k = i + j;
/* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
carry += arg1[i] * arg2[j];
/* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
carry += prod[k];
prod[k] = LOWPART (carry);
carry = HIGHPART (carry);
}
prod[i + 4] = carry;
}
decode (prod, lv, hv);
decode (prod + 4, &toplow, &tophigh);
/* Unsigned overflow is immediate. */
if (unsigned_p)
return (toplow | tophigh) != 0;
/* Check for signed overflow by calculating the signed representation of the
top half of the result; it should agree with the low half's sign bit. */
if (h1 < 0)
{
neg_double (l2, h2, &neglow, &neghigh);
add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
}
if (h2 < 0)
{
neg_double (l1, h1, &neglow, &neghigh);
add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
}
return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
}
/* Shift the doubleword integer in L1, H1 left by COUNT places
keeping only PREC bits of result.
Shift right if COUNT is negative.
ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
void
lshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
HOST_WIDE_INT count, unsigned int prec,
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, int arith)
{
unsigned HOST_WIDE_INT signmask;
if (count < 0)
{
rshift_double (l1, h1, -count, prec, lv, hv, arith);
return;
}
if (SHIFT_COUNT_TRUNCATED)
count %= prec;
if (count >= 2 * HOST_BITS_PER_WIDE_INT)
{
/* Shifting by the host word size is undefined according to the
ANSI standard, so we must handle this as a special case. */
*hv = 0;
*lv = 0;
}
else if (count >= HOST_BITS_PER_WIDE_INT)
{
*hv = l1 << (count - HOST_BITS_PER_WIDE_INT);
*lv = 0;
}
else
{
*hv = (((unsigned HOST_WIDE_INT) h1 << count)
| (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
*lv = l1 << count;
}
/* Sign extend all bits that are beyond the precision. */
signmask = -((prec > HOST_BITS_PER_WIDE_INT
? ((unsigned HOST_WIDE_INT) *hv
>> (prec - HOST_BITS_PER_WIDE_INT - 1))
: (*lv >> (prec - 1))) & 1);
if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
;
else if (prec >= HOST_BITS_PER_WIDE_INT)
{
*hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
*hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT);
}
else
{
*hv = signmask;
*lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
*lv |= signmask << prec;
}
}
/* Shift the doubleword integer in L1, H1 right by COUNT places
keeping only PREC bits of result. COUNT must be positive.
ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
void
rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
HOST_WIDE_INT count, unsigned int prec,
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
int arith)
{
unsigned HOST_WIDE_INT signmask;
signmask = (arith
? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
: 0);
if (SHIFT_COUNT_TRUNCATED)
count %= prec;
if (count >= 2 * HOST_BITS_PER_WIDE_INT)
{
/* Shifting by the host word size is undefined according to the
ANSI standard, so we must handle this as a special case. */
*hv = 0;
*lv = 0;
}
else if (count >= HOST_BITS_PER_WIDE_INT)
{
*hv = 0;
*lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT);
}
else
{
*hv = (unsigned HOST_WIDE_INT) h1 >> count;
*lv = ((l1 >> count)
| ((unsigned HOST_WIDE_INT) h1 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
}
/* Zero / sign extend all bits that are beyond the precision. */
if (count >= (HOST_WIDE_INT)prec)
{
*hv = signmask;
*lv = signmask;
}
else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT)
;
else if ((prec - count) >= HOST_BITS_PER_WIDE_INT)
{
*hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT));
*hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT);
}
else
{
*hv = signmask;
*lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count));
*lv |= signmask << (prec - count);
}
}
/* Rotate the doubleword integer in L1, H1 left by COUNT places
keeping only PREC bits of result.
Rotate right if COUNT is negative.
Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
void
lrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
HOST_WIDE_INT count, unsigned int prec,
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
{
unsigned HOST_WIDE_INT s1l, s2l;
HOST_WIDE_INT s1h, s2h;
count %= prec;
if (count < 0)
count += prec;
lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
*lv = s1l | s2l;
*hv = s1h | s2h;
}
/* Rotate the doubleword integer in L1, H1 left by COUNT places
keeping only PREC bits of result. COUNT must be positive.
Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
void
rrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
HOST_WIDE_INT count, unsigned int prec,
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
{
unsigned HOST_WIDE_INT s1l, s2l;
HOST_WIDE_INT s1h, s2h;
count %= prec;
if (count < 0)
count += prec;
rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
*lv = s1l | s2l;
*hv = s1h | s2h;
}
/* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
CODE is a tree code for a kind of division, one of
TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
or EXACT_DIV_EXPR
It controls how the quotient is rounded to an integer.
Return nonzero if the operation overflows.
UNS nonzero says do unsigned division. */
int
div_and_round_double (enum tree_code code, int uns,
unsigned HOST_WIDE_INT lnum_orig, /* num == numerator == dividend */
HOST_WIDE_INT hnum_orig,
unsigned HOST_WIDE_INT lden_orig, /* den == denominator == divisor */
HOST_WIDE_INT hden_orig,
unsigned HOST_WIDE_INT *lquo,
HOST_WIDE_INT *hquo, unsigned HOST_WIDE_INT *lrem,
HOST_WIDE_INT *hrem)
{
int quo_neg = 0;
HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
HOST_WIDE_INT den[4], quo[4];
int i, j;
unsigned HOST_WIDE_INT work;
unsigned HOST_WIDE_INT carry = 0;
unsigned HOST_WIDE_INT lnum = lnum_orig;
HOST_WIDE_INT hnum = hnum_orig;
unsigned HOST_WIDE_INT lden = lden_orig;
HOST_WIDE_INT hden = hden_orig;
int overflow = 0;
if (hden == 0 && lden == 0)
overflow = 1, lden = 1;
/* Calculate quotient sign and convert operands to unsigned. */
if (!uns)
{
if (hnum < 0)
{
quo_neg = ~ quo_neg;
/* (minimum integer) / (-1) is the only overflow case. */
if (neg_double (lnum, hnum, &lnum, &hnum)
&& ((HOST_WIDE_INT) lden & hden) == -1)
overflow = 1;
}
if (hden < 0)
{
quo_neg = ~ quo_neg;
neg_double (lden, hden, &lden, &hden);
}
}
if (hnum == 0 && hden == 0)
{ /* single precision */
*hquo = *hrem = 0;
/* This unsigned division rounds toward zero. */
*lquo = lnum / lden;
goto finish_up;
}
if (hnum == 0)
{ /* trivial case: dividend < divisor */
/* hden != 0 already checked. */
*hquo = *lquo = 0;
*hrem = hnum;
*lrem = lnum;
goto finish_up;
}
memset (quo, 0, sizeof quo);
memset (num, 0, sizeof num); /* to zero 9th element */
memset (den, 0, sizeof den);
encode (num, lnum, hnum);
encode (den, lden, hden);
/* Special code for when the divisor < BASE. */
if (hden == 0 && lden < (unsigned HOST_WIDE_INT) BASE)
{
/* hnum != 0 already checked. */
for (i = 4 - 1; i >= 0; i--)
{
work = num[i] + carry * BASE;
quo[i] = work / lden;
carry = work % lden;
}
}
else
{
/* Full double precision division,
with thanks to Don Knuth's "Seminumerical Algorithms". */
int num_hi_sig, den_hi_sig;
unsigned HOST_WIDE_INT quo_est, scale;
/* Find the highest nonzero divisor digit. */
for (i = 4 - 1;; i--)
if (den[i] != 0)
{
den_hi_sig = i;
break;
}
/* Insure that the first digit of the divisor is at least BASE/2.
This is required by the quotient digit estimation algorithm. */
scale = BASE / (den[den_hi_sig] + 1);
if (scale > 1)
{ /* scale divisor and dividend */
carry = 0;
for (i = 0; i <= 4 - 1; i++)
{
work = (num[i] * scale) + carry;
num[i] = LOWPART (work);
carry = HIGHPART (work);
}
num[4] = carry;
carry = 0;
for (i = 0; i <= 4 - 1; i++)
{
work = (den[i] * scale) + carry;
den[i] = LOWPART (work);
carry = HIGHPART (work);
if (den[i] != 0) den_hi_sig = i;
}
}
num_hi_sig = 4;
/* Main loop */
for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--)
{
/* Guess the next quotient digit, quo_est, by dividing the first
two remaining dividend digits by the high order quotient digit.
quo_est is never low and is at most 2 high. */
unsigned HOST_WIDE_INT tmp;
num_hi_sig = i + den_hi_sig + 1;
work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
if (num[num_hi_sig] != den[den_hi_sig])
quo_est = work / den[den_hi_sig];
else
quo_est = BASE - 1;
/* Refine quo_est so it's usually correct, and at most one high. */
tmp = work - quo_est * den[den_hi_sig];
if (tmp < BASE
&& (den[den_hi_sig - 1] * quo_est
> (tmp * BASE + num[num_hi_sig - 2])))
quo_est--;
/* Try QUO_EST as the quotient digit, by multiplying the
divisor by QUO_EST and subtracting from the remaining dividend.
Keep in mind that QUO_EST is the I - 1st digit. */
carry = 0;
for (j = 0; j <= den_hi_sig; j++)
{
work = quo_est * den[j] + carry;
carry = HIGHPART (work);
work = num[i + j] - LOWPART (work);
num[i + j] = LOWPART (work);
carry += HIGHPART (work) != 0;
}
/* If quo_est was high by one, then num[i] went negative and
we need to correct things. */
if (num[num_hi_sig] < (HOST_WIDE_INT) carry)
{
quo_est--;
carry = 0; /* add divisor back in */
for (j = 0; j <= den_hi_sig; j++)
{
work = num[i + j] + den[j] + carry;
carry = HIGHPART (work);
num[i + j] = LOWPART (work);
}
num [num_hi_sig] += carry;
}
/* Store the quotient digit. */
quo[i] = quo_est;
}
}
decode (quo, lquo, hquo);
finish_up:
/* If result is negative, make it so. */
if (quo_neg)
neg_double (*lquo, *hquo, lquo, hquo);
/* Compute trial remainder: rem = num - (quo * den) */
mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
neg_double (*lrem, *hrem, lrem, hrem);
add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
switch (code)
{
case TRUNC_DIV_EXPR:
case TRUNC_MOD_EXPR: /* round toward zero */
case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
return overflow;
case FLOOR_DIV_EXPR:
case FLOOR_MOD_EXPR: /* round toward negative infinity */
if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
{
/* quo = quo - 1; */
add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
lquo, hquo);
}
else
return overflow;
break;
case CEIL_DIV_EXPR:
case CEIL_MOD_EXPR: /* round toward positive infinity */
if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
{
add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
lquo, hquo);
}
else
return overflow;
break;
case ROUND_DIV_EXPR:
case ROUND_MOD_EXPR: /* round to closest integer */
{
unsigned HOST_WIDE_INT labs_rem = *lrem;
HOST_WIDE_INT habs_rem = *hrem;
unsigned HOST_WIDE_INT labs_den = lden, ltwice;
HOST_WIDE_INT habs_den = hden, htwice;
/* Get absolute values. */
if (*hrem < 0)
neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
if (hden < 0)
neg_double (lden, hden, &labs_den, &habs_den);
/* If (2 * abs (lrem) >= abs (lden)), adjust the quotient. */
mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
labs_rem, habs_rem, <wice, &htwice);
if (((unsigned HOST_WIDE_INT) habs_den
< (unsigned HOST_WIDE_INT) htwice)
|| (((unsigned HOST_WIDE_INT) habs_den
== (unsigned HOST_WIDE_INT) htwice)
&& (labs_den <= ltwice)))
{
if (*hquo < 0)
/* quo = quo - 1; */
add_double (*lquo, *hquo,
(HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
else
/* quo = quo + 1; */
add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
lquo, hquo);
}
else
return overflow;
}
break;
default:
gcc_unreachable ();
}
/* Compute true remainder: rem = num - (quo * den) */
mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
neg_double (*lrem, *hrem, lrem, hrem);
add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
return overflow;
}
/* If ARG2 divides ARG1 with zero remainder, carries out the division
of type CODE and returns the quotient.
Otherwise returns NULL_TREE. */
static tree
div_if_zero_remainder (enum tree_code code, const_tree arg1, const_tree arg2)
{
unsigned HOST_WIDE_INT int1l, int2l;
HOST_WIDE_INT int1h, int2h;
unsigned HOST_WIDE_INT quol, reml;
HOST_WIDE_INT quoh, remh;
tree type = TREE_TYPE (arg1);
int uns = TYPE_UNSIGNED (type);
int1l = TREE_INT_CST_LOW (arg1);
int1h = TREE_INT_CST_HIGH (arg1);
/* &obj[0] + -128 really should be compiled as &obj[-8] rather than
&obj[some_exotic_number]. */
if (POINTER_TYPE_P (type))
{
uns = false;
type = signed_type_for (type);
fit_double_type (int1l, int1h, &int1l, &int1h,
type);
}
else
fit_double_type (int1l, int1h, &int1l, &int1h, type);
int2l = TREE_INT_CST_LOW (arg2);
int2h = TREE_INT_CST_HIGH (arg2);
div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
&quol, &quoh, &reml, &remh);
if (remh != 0 || reml != 0)
return NULL_TREE;
return build_int_cst_wide (type, quol, quoh);
}
/* This is nonzero if we should defer warnings about undefined
overflow. This facility exists because these warnings are a
special case. The code to estimate loop iterations does not want
to issue any warnings, since it works with expressions which do not
occur in user code. Various bits of cleanup code call fold(), but
only use the result if it has certain characteristics (e.g., is a
constant); that code only wants to issue a warning if the result is
used. */
static int fold_deferring_overflow_warnings;
/* If a warning about undefined overflow is deferred, this is the
warning. Note that this may cause us to turn two warnings into
one, but that is fine since it is sufficient to only give one
warning per expression. */
static const char* fold_deferred_overflow_warning;
/* If a warning about undefined overflow is deferred, this is the
level at which the warning should be emitted. */
static enum warn_strict_overflow_code fold_deferred_overflow_code;
/* Start deferring overflow warnings. We could use a stack here to
permit nested calls, but at present it is not necessary. */
void
fold_defer_overflow_warnings (void)
{
++fold_deferring_overflow_warnings;
}
/* Stop deferring overflow warnings. If there is a pending warning,
and ISSUE is true, then issue the warning if appropriate. STMT is
the statement with which the warning should be associated (used for
location information); STMT may be NULL. CODE is the level of the
warning--a warn_strict_overflow_code value. This function will use
the smaller of CODE and the deferred code when deciding whether to
issue the warning. CODE may be zero to mean to always use the
deferred code. */
void
fold_undefer_overflow_warnings (bool issue, const_gimple stmt, int code)
{
const char *warnmsg;
location_t locus;
gcc_assert (fold_deferring_overflow_warnings > 0);
--fold_deferring_overflow_warnings;
if (fold_deferring_overflow_warnings > 0)
{
if (fold_deferred_overflow_warning != NULL
&& code != 0
&& code < (int) fold_deferred_overflow_code)
fold_deferred_overflow_code = code;
return;
}
warnmsg = fold_deferred_overflow_warning;
fold_deferred_overflow_warning = NULL;
if (!issue || warnmsg == NULL)
return;
if (gimple_no_warning_p (stmt))
return;
/* Use the smallest code level when deciding to issue the
warning. */
if (code == 0 || code > (int) fold_deferred_overflow_code)
code = fold_deferred_overflow_code;
if (!issue_strict_overflow_warning (code))
return;
if (stmt == NULL)
locus = input_location;
else
locus = gimple_location (stmt);
warning (OPT_Wstrict_overflow, "%H%s", &locus, warnmsg);
}
/* Stop deferring overflow warnings, ignoring any deferred
warnings. */
void
fold_undefer_and_ignore_overflow_warnings (void)
{
fold_undefer_overflow_warnings (false, NULL, 0);
}
/* Whether we are deferring overflow warnings. */
bool
fold_deferring_overflow_warnings_p (void)
{
return fold_deferring_overflow_warnings > 0;
}
/* This is called when we fold something based on the fact that signed
overflow is undefined. */
static void
fold_overflow_warning (const char* gmsgid, enum warn_strict_overflow_code wc)
{
if (fold_deferring_overflow_warnings > 0)
{
if (fold_deferred_overflow_warning == NULL
|| wc < fold_deferred_overflow_code)
{
fold_deferred_overflow_warning = gmsgid;
fold_deferred_overflow_code = wc;
}
}
else if (issue_strict_overflow_warning (wc))
warning (OPT_Wstrict_overflow, gmsgid);
}
/* Return true if the built-in mathematical function specified by CODE
is odd, i.e. -f(x) == f(-x). */
static bool
negate_mathfn_p (enum built_in_function code)
{
switch (code)
{
CASE_FLT_FN (BUILT_IN_ASIN):
CASE_FLT_FN (BUILT_IN_ASINH):
CASE_FLT_FN (BUILT_IN_ATAN):
CASE_FLT_FN (BUILT_IN_ATANH):
CASE_FLT_FN (BUILT_IN_CASIN):
CASE_FLT_FN (BUILT_IN_CASINH):
CASE_FLT_FN (BUILT_IN_CATAN):
CASE_FLT_FN (BUILT_IN_CATANH):
CASE_FLT_FN (BUILT_IN_CBRT):
CASE_FLT_FN (BUILT_IN_CPROJ):
CASE_FLT_FN (BUILT_IN_CSIN):
CASE_FLT_FN (BUILT_IN_CSINH):
CASE_FLT_FN (BUILT_IN_CTAN):
CASE_FLT_FN (BUILT_IN_CTANH):
CASE_FLT_FN (BUILT_IN_ERF):
CASE_FLT_FN (BUILT_IN_LLROUND):
CASE_FLT_FN (BUILT_IN_LROUND):
CASE_FLT_FN (BUILT_IN_ROUND):
CASE_FLT_FN (BUILT_IN_SIN):
CASE_FLT_FN (BUILT_IN_SINH):
CASE_FLT_FN (BUILT_IN_TAN):
CASE_FLT_FN (BUILT_IN_TANH):
CASE_FLT_FN (BUILT_IN_TRUNC):
return true;
CASE_FLT_FN (BUILT_IN_LLRINT):
CASE_FLT_FN (BUILT_IN_LRINT):
CASE_FLT_FN (BUILT_IN_NEARBYINT):
CASE_FLT_FN (BUILT_IN_RINT):
return !flag_rounding_math;
default:
break;
}
return false;
}
/* Check whether we may negate an integer constant T without causing
overflow. */
bool
may_negate_without_overflow_p (const_tree t)
{
unsigned HOST_WIDE_INT val;
unsigned int prec;
tree type;
gcc_assert (TREE_CODE (t) == INTEGER_CST);
type = TREE_TYPE (t);
if (TYPE_UNSIGNED (type))
return false;
prec = TYPE_PRECISION (type);
if (prec > HOST_BITS_PER_WIDE_INT)
{
if (TREE_INT_CST_LOW (t) != 0)
return true;
prec -= HOST_BITS_PER_WIDE_INT;
val = TREE_INT_CST_HIGH (t);
}
else
val = TREE_INT_CST_LOW (t);
if (prec < HOST_BITS_PER_WIDE_INT)
val &= ((unsigned HOST_WIDE_INT) 1 << prec) - 1;
return val != ((unsigned HOST_WIDE_INT) 1 << (prec - 1));
}
/* Determine whether an expression T can be cheaply negated using
the function negate_expr without introducing undefined overflow. */
static bool
negate_expr_p (tree t)
{
tree type;
if (t == 0)
return false;
type = TREE_TYPE (t);
STRIP_SIGN_NOPS (t);
switch (TREE_CODE (t))
{
case INTEGER_CST:
if (TYPE_OVERFLOW_WRAPS (type))
return true;
/* Check that -CST will not overflow type. */
return may_negate_without_overflow_p (t);
case BIT_NOT_EXPR:
return (INTEGRAL_TYPE_P (type)
&& TYPE_OVERFLOW_WRAPS (type));
case FIXED_CST:
case REAL_CST:
case NEGATE_EXPR:
return true;
case COMPLEX_CST:
return negate_expr_p (TREE_REALPART (t))
&& negate_expr_p (TREE_IMAGPART (t));
case COMPLEX_EXPR:
return negate_expr_p (TREE_OPERAND (t, 0))
&& negate_expr_p (TREE_OPERAND (t, 1));
case CONJ_EXPR:
return negate_expr_p (TREE_OPERAND (t, 0));
case PLUS_EXPR:
if (HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
|| HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
return false;
/* -(A + B) -> (-B) - A. */
if (negate_expr_p (TREE_OPERAND (t, 1))
&& reorder_operands_p (TREE_OPERAND (t, 0),
TREE_OPERAND (t, 1)))
return true;
/* -(A + B) -> (-A) - B. */
return negate_expr_p (TREE_OPERAND (t, 0));
case MINUS_EXPR:
/* We can't turn -(A-B) into B-A when we honor signed zeros. */
return !HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
&& !HONOR_SIGNED_ZEROS (TYPE_MODE (type))
&& reorder_operands_p (TREE_OPERAND (t, 0),
TREE_OPERAND (t, 1));
case MULT_EXPR:
if (TYPE_UNSIGNED (TREE_TYPE (t)))
break;
/* Fall through. */
case RDIV_EXPR:
if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (TREE_TYPE (t))))
return negate_expr_p (TREE_OPERAND (t, 1))
|| negate_expr_p (TREE_OPERAND (t, 0));
break;
case TRUNC_DIV_EXPR:
case ROUND_DIV_EXPR:
case FLOOR_DIV_EXPR:
case CEIL_DIV_EXPR:
case EXACT_DIV_EXPR:
/* In general we can't negate A / B, because if A is INT_MIN and
B is 1, we may turn this into INT_MIN / -1 which is undefined
and actually traps on some architectures. But if overflow is
undefined, we can negate, because - (INT_MIN / 1) is an
overflow. */
if (INTEGRAL_TYPE_P (TREE_TYPE (t))
&& !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (t)))
break;
return negate_expr_p (TREE_OPERAND (t, 1))
|| negate_expr_p (TREE_OPERAND (t, 0));
case NOP_EXPR:
/* Negate -((double)float) as (double)(-float). */
if (TREE_CODE (type) == REAL_TYPE)
{
tree tem = strip_float_extensions (t);
if (tem != t)
return negate_expr_p (tem);
}
break;
case CALL_EXPR:
/* Negate -f(x) as f(-x). */
if (negate_mathfn_p (builtin_mathfn_code (t)))
return negate_expr_p (CALL_EXPR_ARG (t, 0));
break;
case RSHIFT_EXPR:
/* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
{
tree op1 = TREE_OPERAND (t, 1);
if (TREE_INT_CST_HIGH (op1) == 0
&& (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
== TREE_INT_CST_LOW (op1))
return true;
}
break;
default:
break;
}
return false;
}
/* Given T, an expression, return a folded tree for -T or NULL_TREE, if no
simplification is possible.
If negate_expr_p would return true for T, NULL_TREE will never be
returned. */
static tree
fold_negate_expr (tree t)
{
tree type = TREE_TYPE (t);
tree tem;
switch (TREE_CODE (t))
{
/* Convert - (~A) to A + 1. */
case BIT_NOT_EXPR:
if (INTEGRAL_TYPE_P (type))
return fold_build2 (PLUS_EXPR, type, TREE_OPERAND (t, 0),
build_int_cst (type, 1));
break;
case INTEGER_CST:
tem = fold_negate_const (t, type);
if (TREE_OVERFLOW (tem) == TREE_OVERFLOW (t)
|| !TYPE_OVERFLOW_TRAPS (type))
return tem;
break;
case REAL_CST:
tem = fold_negate_const (t, type);
/* Two's complement FP formats, such as c4x, may overflow. */
if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
return tem;
break;
case FIXED_CST:
tem = fold_negate_const (t, type);
return tem;
case COMPLEX_CST:
{
tree rpart = negate_expr (TREE_REALPART (t));
tree ipart = negate_expr (TREE_IMAGPART (t));
if ((TREE_CODE (rpart) == REAL_CST
&& TREE_CODE (ipart) == REAL_CST)
|| (TREE_CODE (rpart) == INTEGER_CST
&& TREE_CODE (ipart) == INTEGER_CST))
return build_complex (type, rpart, ipart);
}
break;
case COMPLEX_EXPR:
if (negate_expr_p (t))
return fold_build2 (COMPLEX_EXPR, type,
fold_negate_expr (TREE_OPERAND (t, 0)),
fold_negate_expr (TREE_OPERAND (t, 1)));
break;
case CONJ_EXPR:
if (negate_expr_p (t))
return fold_build1 (CONJ_EXPR, type,
fold_negate_expr (TREE_OPERAND (t, 0)));
break;
case NEGATE_EXPR:
return TREE_OPERAND (t, 0);
case PLUS_EXPR:
if (!HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
|