aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog12
-rw-r--r--gcc/tree-chrec.c10
-rw-r--r--gcc/tree-scalar-evolution.c183
-rw-r--r--gcc/tree-ssa-loop-ivopts.c6
-rw-r--r--gcc/tree-vrp.c40
5 files changed, 231 insertions, 20 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index d2bc8c5..e8c816a 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,15 @@
+2006-08-16 Zdenek Dvorak <dvorakz@suse.cz>
+
+ PR tree-optimization/27865
+ * tree-vrp.c (adjust_range_with_scev): Do not use TYPE_{MIN,MAX}_VALUE
+ for pointer types.
+ * tree-scalar-evolution.c (fold_used_pointer_cast, pointer_offset_p,
+ fold_used_pointer, pointer_used_p): New functions.
+ (analyze_scalar_evolution_1): Use fold_used_pointer.
+ * tree-chrec.c (convert_affine_scev): Convert no-op casts correctly.
+ * tree-ssa-loop-ivopts.c (generic_type_for): Return integral type
+ for pointers.
+
2006-08-17 Paolo Bonzini <bonzini@gnu.org>
PR c++/28573
diff --git a/gcc/tree-chrec.c b/gcc/tree-chrec.c
index f36fc9b..a74a49c 100644
--- a/gcc/tree-chrec.c
+++ b/gcc/tree-chrec.c
@@ -1162,7 +1162,10 @@ convert_affine_scev (struct loop *loop, tree type,
-- must_check_src_overflow is true, and the range of TYPE is superset
of the range of CT -- i.e., in all cases except if CT signed and
TYPE unsigned.
- -- both CT and TYPE have the same precision and signedness. */
+ -- both CT and TYPE have the same precision and signedness, and we
+ verify instead that the source does not overflow (this may be
+ easier than verifying it for the result, as we may use the
+ information about the semantics of overflow in CT). */
if (must_check_src_overflow)
{
if (TYPE_UNSIGNED (type) && !TYPE_UNSIGNED (ct))
@@ -1172,7 +1175,10 @@ convert_affine_scev (struct loop *loop, tree type,
}
else if (TYPE_UNSIGNED (ct) == TYPE_UNSIGNED (type)
&& TYPE_PRECISION (ct) == TYPE_PRECISION (type))
- must_check_rslt_overflow = false;
+ {
+ must_check_rslt_overflow = false;
+ must_check_src_overflow = true;
+ }
else
must_check_rslt_overflow = true;
}
diff --git a/gcc/tree-scalar-evolution.c b/gcc/tree-scalar-evolution.c
index 52b0ba3..13cbe42 100644
--- a/gcc/tree-scalar-evolution.c
+++ b/gcc/tree-scalar-evolution.c
@@ -1718,6 +1718,184 @@ compute_scalar_evolution_in_loop (struct loop *wrto_loop,
return analyze_scalar_evolution_1 (wrto_loop, res, chrec_not_analyzed_yet);
}
+/* Folds EXPR, if it is a cast to pointer, assuming that the created
+ polynomial_chrec does not wrap. */
+
+static tree
+fold_used_pointer_cast (tree expr)
+{
+ tree op;
+ tree type, inner_type;
+
+ if (TREE_CODE (expr) != NOP_EXPR && TREE_CODE (expr) != CONVERT_EXPR)
+ return expr;
+
+ op = TREE_OPERAND (expr, 0);
+ if (TREE_CODE (op) != POLYNOMIAL_CHREC)
+ return expr;
+
+ type = TREE_TYPE (expr);
+ inner_type = TREE_TYPE (op);
+
+ if (!INTEGRAL_TYPE_P (inner_type)
+ || TYPE_PRECISION (inner_type) != TYPE_PRECISION (type))
+ return expr;
+
+ return build_polynomial_chrec (CHREC_VARIABLE (op),
+ chrec_convert (type, CHREC_LEFT (op), NULL_TREE),
+ chrec_convert (type, CHREC_RIGHT (op), NULL_TREE));
+}
+
+/* Returns true if EXPR is an expression corresponding to offset of pointer
+ in p + offset. */
+
+static bool
+pointer_offset_p (tree expr)
+{
+ if (TREE_CODE (expr) == INTEGER_CST)
+ return true;
+
+ if ((TREE_CODE (expr) == NOP_EXPR || TREE_CODE (expr) == CONVERT_EXPR)
+ && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0))))
+ return true;
+
+ return false;
+}
+
+/* EXPR is a scalar evolution of a pointer that is dereferenced or used in
+ comparison. This means that it must point to a part of some object in
+ memory, which enables us to argue about overflows and possibly simplify
+ the EXPR. Returns the simplified value.
+
+ Currently, for
+
+ int i, n;
+ int *p;
+
+ for (i = -n; i < n; i++)
+ *(p + i) = ...;
+
+ We generate the following code (assuming that size of int and size_t is
+ 4 bytes):
+
+ for (i = -n; i < n; i++)
+ {
+ size_t tmp1, tmp2;
+ int *tmp3, *tmp4;
+
+ tmp1 = (size_t) i; (1)
+ tmp2 = 4 * tmp1; (2)
+ tmp3 = (int *) tmp2; (3)
+ tmp4 = p + tmp3; (4)
+
+ *tmp4 = ...;
+ }
+
+ We in general assume that pointer arithmetics does not overflow (since its
+ behavior is undefined in that case). One of the problems is that our
+ translation does not capture this property very well -- (int *) is
+ considered unsigned, hence the computation in (4) does overflow if i is
+ negative.
+
+ This impreciseness creates complications in scev analysis. The scalar
+ evolution of i is [-n, +, 1]. Since int and size_t have the same precision
+ (in this example), and size_t is unsigned (so we do not care about
+ overflows), we succeed to derive that scev of tmp1 is [(size_t) -n, +, 1]
+ and scev of tmp2 is [4 * (size_t) -n, +, 4]. With tmp3, we run into
+ problem -- [(int *) (4 * (size_t) -n), +, 4] wraps, and since we on several
+ places assume that this is not the case for scevs with pointer type, we
+ cannot use this scev for tmp3; hence, its scev is
+ (int *) [(4 * (size_t) -n), +, 4], and scev of tmp4 is
+ p + (int *) [(4 * (size_t) -n), +, 4]. Most of the optimizers are unable to
+ work with scevs of this shape.
+
+ However, since tmp4 is dereferenced, all its values must belong to a single
+ object, and taking into account that the precision of int * and size_t is
+ the same, it is impossible for its scev to wrap. Hence, we can derive that
+ its evolution is [p + (int *) (4 * (size_t) -n), +, 4], which the optimizers
+ can work with.
+
+ ??? Maybe we should use different representation for pointer arithmetics,
+ however that is a long-term project with a lot of potential for creating
+ bugs. */
+
+static tree
+fold_used_pointer (tree expr)
+{
+ tree op0, op1, new0, new1;
+ enum tree_code code = TREE_CODE (expr);
+
+ if (code == PLUS_EXPR
+ || code == MINUS_EXPR)
+ {
+ op0 = TREE_OPERAND (expr, 0);
+ op1 = TREE_OPERAND (expr, 1);
+
+ if (pointer_offset_p (op1))
+ {
+ new0 = fold_used_pointer (op0);
+ new1 = fold_used_pointer_cast (op1);
+ }
+ else if (code == PLUS_EXPR && pointer_offset_p (op0))
+ {
+ new0 = fold_used_pointer_cast (op0);
+ new1 = fold_used_pointer (op1);
+ }
+ else
+ return expr;
+
+ if (new0 == op0 && new1 == op1)
+ return expr;
+
+ if (code == PLUS_EXPR)
+ expr = chrec_fold_plus (TREE_TYPE (expr), new0, new1);
+ else
+ expr = chrec_fold_minus (TREE_TYPE (expr), new0, new1);
+
+ return expr;
+ }
+ else
+ return fold_used_pointer_cast (expr);
+}
+
+/* Returns true if PTR is dereferenced, or used in comparison. */
+
+static bool
+pointer_used_p (tree ptr)
+{
+ use_operand_p use_p;
+ imm_use_iterator imm_iter;
+ tree stmt, rhs;
+ struct ptr_info_def *pi = get_ptr_info (ptr);
+ var_ann_t v_ann = var_ann (SSA_NAME_VAR (ptr));
+
+ /* Check whether the pointer has a memory tag; if it does, it is
+ (or at least used to be) dereferenced. */
+ if ((pi != NULL && pi->name_mem_tag != NULL)
+ || v_ann->symbol_mem_tag)
+ return true;
+
+ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, ptr)
+ {
+ stmt = USE_STMT (use_p);
+ if (TREE_CODE (stmt) == COND_EXPR)
+ return true;
+
+ if (TREE_CODE (stmt) != MODIFY_EXPR)
+ continue;
+
+ rhs = TREE_OPERAND (stmt, 1);
+ if (!COMPARISON_CLASS_P (rhs))
+ continue;
+
+ if (TREE_OPERAND (stmt, 0) == ptr
+ || TREE_OPERAND (stmt, 1) == ptr)
+ return true;
+ }
+
+ return false;
+}
+
/* Helper recursive function. */
static tree
@@ -1766,6 +1944,11 @@ analyze_scalar_evolution_1 (struct loop *loop, tree var, tree res)
{
case MODIFY_EXPR:
res = interpret_rhs_modify_expr (loop, def, TREE_OPERAND (def, 1), type);
+
+ if (POINTER_TYPE_P (type)
+ && !automatically_generated_chrec_p (res)
+ && pointer_used_p (var))
+ res = fold_used_pointer (res);
break;
case PHI_NODE:
diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c
index 0e8fa94..2ead549 100644
--- a/gcc/tree-ssa-loop-ivopts.c
+++ b/gcc/tree-ssa-loop-ivopts.c
@@ -1925,14 +1925,14 @@ strip_offset (tree expr, unsigned HOST_WIDE_INT *offset)
}
/* Returns variant of TYPE that can be used as base for different uses.
- For integer types, we return unsigned variant of the type, which
- avoids problems with overflows. For pointer types, we return void *. */
+ We return unsigned type with the same precision, which avoids problems
+ with overflows. */
static tree
generic_type_for (tree type)
{
if (POINTER_TYPE_P (type))
- return ptr_type_node;
+ return unsigned_type_for (type);
if (TYPE_UNSIGNED (type))
return type;
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index 335ae61..5f53211 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -2015,7 +2015,7 @@ static void
adjust_range_with_scev (value_range_t *vr, struct loop *loop, tree stmt,
tree var)
{
- tree init, step, chrec;
+ tree init, step, chrec, tmin, tmax, min, max, type;
enum ev_direction dir;
/* TODO. Don't adjust anti-ranges. An anti-range may provide
@@ -2049,13 +2049,23 @@ adjust_range_with_scev (value_range_t *vr, struct loop *loop, tree stmt,
true))
return;
- if (!POINTER_TYPE_P (TREE_TYPE (init))
- && (vr->type == VR_VARYING || vr->type == VR_UNDEFINED))
+ type = TREE_TYPE (var);
+ if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
+ tmin = lower_bound_in_type (type, type);
+ else
+ tmin = TYPE_MIN_VALUE (type);
+ if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
+ tmax = upper_bound_in_type (type, type);
+ else
+ tmax = TYPE_MAX_VALUE (type);
+
+ if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
{
+ min = tmin;
+ max = tmax;
+
/* For VARYING or UNDEFINED ranges, just about anything we get
from scalar evolutions should be better. */
- tree min = TYPE_MIN_VALUE (TREE_TYPE (init));
- tree max = TYPE_MAX_VALUE (TREE_TYPE (init));
if (dir == EV_DIR_DECREASES)
max = init;
@@ -2064,7 +2074,8 @@ adjust_range_with_scev (value_range_t *vr, struct loop *loop, tree stmt,
/* If we would create an invalid range, then just assume we
know absolutely nothing. This may be over-conservative,
- but it's clearly safe. */
+ but it's clearly safe, and should happen only in unreachable
+ parts of code, or for invalid programs. */
if (compare_values (min, max) == 1)
return;
@@ -2072,8 +2083,8 @@ adjust_range_with_scev (value_range_t *vr, struct loop *loop, tree stmt,
}
else if (vr->type == VR_RANGE)
{
- tree min = vr->min;
- tree max = vr->max;
+ min = vr->min;
+ max = vr->max;
if (dir == EV_DIR_DECREASES)
{
@@ -2084,10 +2095,11 @@ adjust_range_with_scev (value_range_t *vr, struct loop *loop, tree stmt,
max = init;
/* If we just created an invalid range with the minimum
- greater than the maximum, take the minimum all the
- way to -INF. */
+ greater than the maximum, we fail conservatively.
+ This should happen only in unreachable
+ parts of code, or for invalid programs. */
if (compare_values (min, max) == 1)
- min = TYPE_MIN_VALUE (TREE_TYPE (min));
+ return;
}
}
else
@@ -2097,11 +2109,9 @@ adjust_range_with_scev (value_range_t *vr, struct loop *loop, tree stmt,
{
min = init;
- /* If we just created an invalid range with the minimum
- greater than the maximum, take the maximum all the
- way to +INF. */
+ /* Again, avoid creating invalid range by failing. */
if (compare_values (min, max) == 1)
- max = TYPE_MAX_VALUE (TREE_TYPE (max));
+ return;
}
}