aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog7
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/20040703-1.c147
-rw-r--r--gcc/tree-sra.c9
3 files changed, 160 insertions, 3 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 5ab89e1a..4bf082f 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,12 @@
2004-07-03 Richard Henderson <rth@redhat.com>
+ PR tree-optimization/16341
+ * tree-sra.c (sra_walk_function): Increment to next stmt before
+ processing the current stmt.
+ (sra_insert_after): Always use BSI_SAME_STMT.
+
+2004-07-03 Richard Henderson <rth@redhat.com>
+
* tree-ssa-dom.c (simplify_rhs_and_lookup_avail_expr): Don't fold
fp plus with minus.
diff --git a/gcc/testsuite/gcc.c-torture/execute/20040703-1.c b/gcc/testsuite/gcc.c-torture/execute/20040703-1.c
new file mode 100644
index 0000000..eba358d
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/execute/20040703-1.c
@@ -0,0 +1,147 @@
+/* PR 16341 */
+
+#define PART_PRECISION (sizeof (cpp_num_part) * 8)
+
+typedef unsigned int cpp_num_part;
+typedef struct cpp_num cpp_num;
+struct cpp_num
+{
+ cpp_num_part high;
+ cpp_num_part low;
+ int unsignedp; /* True if value should be treated as unsigned. */
+ int overflow; /* True if the most recent calculation overflowed. */
+};
+
+static int
+num_positive (cpp_num num, unsigned int precision)
+{
+ if (precision > PART_PRECISION)
+ {
+ precision -= PART_PRECISION;
+ return (num.high & (cpp_num_part) 1 << (precision - 1)) == 0;
+ }
+
+ return (num.low & (cpp_num_part) 1 << (precision - 1)) == 0;
+}
+
+static cpp_num
+num_trim (cpp_num num, unsigned int precision)
+{
+ if (precision > PART_PRECISION)
+ {
+ precision -= PART_PRECISION;
+ if (precision < PART_PRECISION)
+ num.high &= ((cpp_num_part) 1 << precision) - 1;
+ }
+ else
+ {
+ if (precision < PART_PRECISION)
+ num.low &= ((cpp_num_part) 1 << precision) - 1;
+ num.high = 0;
+ }
+
+ return num;
+}
+
+/* Shift NUM, of width PRECISION, right by N bits. */
+static cpp_num
+num_rshift (cpp_num num, unsigned int precision, unsigned int n)
+{
+ cpp_num_part sign_mask;
+ int x = num_positive (num, precision);
+
+ if (num.unsignedp || x)
+ sign_mask = 0;
+ else
+ sign_mask = ~(cpp_num_part) 0;
+
+ if (n >= precision)
+ num.high = num.low = sign_mask;
+ else
+ {
+ /* Sign-extend. */
+ if (precision < PART_PRECISION)
+ num.high = sign_mask, num.low |= sign_mask << precision;
+ else if (precision < 2 * PART_PRECISION)
+ num.high |= sign_mask << (precision - PART_PRECISION);
+
+ if (n >= PART_PRECISION)
+ {
+ n -= PART_PRECISION;
+ num.low = num.high;
+ num.high = sign_mask;
+ }
+
+ if (n)
+ {
+ num.low = (num.low >> n) | (num.high << (PART_PRECISION - n));
+ num.high = (num.high >> n) | (sign_mask << (PART_PRECISION - n));
+ }
+ }
+
+ num = num_trim (num, precision);
+ num.overflow = 0;
+ return num;
+}
+ #define num_zerop(num) ((num.low | num.high) == 0)
+#define num_eq(num1, num2) (num1.low == num2.low && num1.high == num2.high)
+
+cpp_num
+num_lshift (cpp_num num, unsigned int precision, unsigned int n)
+{
+ if (n >= precision)
+ {
+ num.overflow = !num.unsignedp && !num_zerop (num);
+ num.high = num.low = 0;
+ }
+ else
+ {
+ cpp_num orig;
+ unsigned int m = n;
+
+ orig = num;
+ if (m >= PART_PRECISION)
+ {
+ m -= PART_PRECISION;
+ num.high = num.low;
+ num.low = 0;
+ }
+ if (m)
+ {
+ num.high = (num.high << m) | (num.low >> (PART_PRECISION - m));
+ num.low <<= m;
+ }
+ num = num_trim (num, precision);
+
+ if (num.unsignedp)
+ num.overflow = 0;
+ else
+ {
+ cpp_num maybe_orig = num_rshift (num, precision, n);
+ num.overflow = !num_eq (orig, maybe_orig);
+ }
+ }
+
+ return num;
+}
+
+unsigned int precision = 64;
+unsigned int n = 16;
+
+cpp_num num = { 0, 3, 0, 0 };
+
+int main()
+{
+ cpp_num res = num_lshift (num, 64, n);
+
+ if (res.low != 0x30000)
+ abort ();
+
+ if (res.high != 0)
+ abort ();
+
+ if (res.overflow != 0)
+ abort ();
+
+ exit (0);
+}
diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c
index 0f04fcb..f315ad9 100644
--- a/gcc/tree-sra.c
+++ b/gcc/tree-sra.c
@@ -828,13 +828,13 @@ static void
sra_walk_function (const struct sra_walk_fns *fns)
{
basic_block bb;
- block_stmt_iterator si;
+ block_stmt_iterator si, ni;
/* ??? Phase 4 could derive some benefit to walking the function in
dominator tree order. */
FOR_EACH_BB (bb)
- for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si))
+ for (si = bsi_start (bb); !bsi_end_p (si); si = ni)
{
tree stmt, t;
stmt_ann_t ann;
@@ -842,6 +842,9 @@ sra_walk_function (const struct sra_walk_fns *fns)
stmt = bsi_stmt (si);
ann = stmt_ann (stmt);
+ ni = si;
+ bsi_next (&ni);
+
/* If the statement has no virtual operands, then it doesn't
make any structure references that we care about. */
if (NUM_V_MAY_DEFS (V_MAY_DEF_OPS (ann)) == 0
@@ -1616,7 +1619,7 @@ sra_insert_after (block_stmt_iterator *bsi, tree list)
if (stmt_ends_bb_p (stmt))
insert_edge_copies (list, bsi->bb);
else
- bsi_insert_after (bsi, list, BSI_CONTINUE_LINKING);
+ bsi_insert_after (bsi, list, BSI_SAME_STMT);
}
/* Similarly, but replace the statement at BSI. */