aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@linaro.org>2017-12-20 12:55:57 +0000
committerRichard Sandiford <rsandifo@gcc.gnu.org>2017-12-20 12:55:57 +0000
commit4a022c701b1dca1bfc3d73dc82dd5858525876ef (patch)
treeb5141fcb9c93c0793610d79602133e6aa7a1ff4c /gcc
parentcc8bea091633989bef6d665c40193a9e255ceb81 (diff)
downloadgcc-4a022c701b1dca1bfc3d73dc82dd5858525876ef.zip
gcc-4a022c701b1dca1bfc3d73dc82dd5858525876ef.tar.gz
gcc-4a022c701b1dca1bfc3d73dc82dd5858525876ef.tar.bz2
poly_int: symbolic_number
This patch changes symbol_number::bytepos from a HOST_WIDE_INT to a poly_int64. perform_symbolic_merge can cope with symbolic offsets as long as the difference between the two offsets is constant. (This could happen for a constant-sized field that occurs at a variable offset, for example.) 2017-12-20 Richard Sandiford <richard.sandiford@linaro.org> Alan Hayward <alan.hayward@arm.com> David Sherwood <david.sherwood@arm.com> gcc/ * gimple-ssa-store-merging.c (symbolic_number::bytepos): Change from HOST_WIDE_INT to poly_int64_pod. (perform_symbolic_merge): Update accordingly. (bswap_replace): Likewise. Co-Authored-By: Alan Hayward <alan.hayward@arm.com> Co-Authored-By: David Sherwood <david.sherwood@arm.com> From-SVN: r255889
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog9
-rw-r--r--gcc/gimple-ssa-store-merging.c22
2 files changed, 22 insertions, 9 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 1c2a7be..d3b8c8d 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -2,6 +2,15 @@
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
+ * gimple-ssa-store-merging.c (symbolic_number::bytepos): Change from
+ HOST_WIDE_INT to poly_int64_pod.
+ (perform_symbolic_merge): Update accordingly.
+ (bswap_replace): Likewise.
+
+2017-12-20 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
* tree-affine.h (aff_tree::offset): Change from widest_int
to poly_widest_int.
(wide_int_ext_for_comb): Delete.
diff --git a/gcc/gimple-ssa-store-merging.c b/gcc/gimple-ssa-store-merging.c
index 90d8c81..9c1d97a 100644
--- a/gcc/gimple-ssa-store-merging.c
+++ b/gcc/gimple-ssa-store-merging.c
@@ -216,7 +216,7 @@ struct symbolic_number {
tree type;
tree base_addr;
tree offset;
- HOST_WIDE_INT bytepos;
+ poly_int64_pod bytepos;
tree src;
tree alias_set;
tree vuse;
@@ -452,7 +452,7 @@ perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
if (rhs1 != rhs2)
{
uint64_t inc;
- HOST_WIDE_INT start_sub, end_sub, end1, end2, end;
+ HOST_WIDE_INT start1, start2, start_sub, end_sub, end1, end2, end;
struct symbolic_number *toinc_n_ptr, *n_end;
basic_block bb1, bb2;
@@ -464,15 +464,19 @@ perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
|| (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
return NULL;
- if (n1->bytepos < n2->bytepos)
+ start1 = 0;
+ if (!(n2->bytepos - n1->bytepos).is_constant (&start2))
+ return NULL;
+
+ if (start1 < start2)
{
n_start = n1;
- start_sub = n2->bytepos - n1->bytepos;
+ start_sub = start2 - start1;
}
else
{
n_start = n2;
- start_sub = n1->bytepos - n2->bytepos;
+ start_sub = start1 - start2;
}
bb1 = gimple_bb (source_stmt1);
@@ -484,8 +488,8 @@ perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
/* Find the highest address at which a load is performed and
compute related info. */
- end1 = n1->bytepos + (n1->range - 1);
- end2 = n2->bytepos + (n2->range - 1);
+ end1 = start1 + (n1->range - 1);
+ end2 = start2 + (n2->range - 1);
if (end1 < end2)
{
end = end2;
@@ -504,7 +508,7 @@ perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
else
toinc_n_ptr = (n_start == n1) ? n2 : n1;
- n->range = end - n_start->bytepos + 1;
+ n->range = end - MIN (start1, start2) + 1;
/* Check that the range of memory covered can be represented by
a symbolic number. */
@@ -933,7 +937,7 @@ bswap_replace (gimple_stmt_iterator gsi, gimple *ins_stmt, tree fndecl,
tree load_offset_ptr, aligned_load_type;
gimple *load_stmt;
unsigned align = get_object_alignment (src);
- HOST_WIDE_INT load_offset = 0;
+ poly_int64 load_offset = 0;
if (cur_stmt)
{