aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Pinski <andrew.pinski@oss.qualcomm.com>2025-08-30 15:16:20 -0700
committerAndrew Pinski <andrew.pinski@oss.qualcomm.com>2025-09-03 00:01:26 -0700
commit9f94029829daf3b83bb95226dcfe003c5ddcade2 (patch)
tree71ea18aa334133e0e0a935e5b1ce3272b5d67312
parent23d5056310bda8da6a277f42f57226301809ca89 (diff)
downloadgcc-9f94029829daf3b83bb95226dcfe003c5ddcade2.zip
gcc-9f94029829daf3b83bb95226dcfe003c5ddcade2.tar.gz
gcc-9f94029829daf3b83bb95226dcfe003c5ddcade2.tar.bz2
fold: Unwrap MEM_REF after get_inner_reference in split_address_to_core_and_offset [PR121355]
Inside split_address_to_core_and_offset, this calls get_inner_reference. Take: ``` _6 = t_3(D) + 12; _8 = &MEM[(struct s1 *)t_3(D) + 4B].t; _1 = _6 - _8; ``` On the assignement of _8, get_inner_reference will return `MEM[(struct s1 *)t_3(D) + 4B]` and an offset but that does not match up with `t_3(D)` which is how split_address_to_core_and_offset handles pointer plus. So this patch adds the unwrapping of the MEM_REF after the call to get_inner_reference and have it act like a pointer plus. Changes since v1: * v2: Remove check on operand 1 for poly_int_tree_p, it is always. Add before the check to see if it fits in shwi instead of after. Bootstrapped and tested on x86_64-linux-gnu. PR tree-optimization/121355 gcc/ChangeLog: * fold-const.cc (split_address_to_core_and_offset): Handle an MEM_REF after the call to get_inner_reference. gcc/testsuite/ChangeLog: * gcc.dg/tree-ssa/ptrdiff-1.c: New test. Signed-off-by: Andrew Pinski <andrew.pinski@oss.qualcomm.com>
-rw-r--r--gcc/fold-const.cc11
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/ptrdiff-1.c45
2 files changed, 56 insertions, 0 deletions
diff --git a/gcc/fold-const.cc b/gcc/fold-const.cc
index 8867540..fe7a5fe 100644
--- a/gcc/fold-const.cc
+++ b/gcc/fold-const.cc
@@ -16514,6 +16514,17 @@ split_address_to_core_and_offset (tree exp,
core = get_inner_reference (TREE_OPERAND (exp, 0), &bitsize, pbitpos,
poffset, &mode, &unsignedp, &reversep,
&volatilep);
+ /* If we are left with MEM[a + CST] strip that and add it to the
+ pbitpos and return a. */
+ if (TREE_CODE (core) == MEM_REF)
+ {
+ poly_offset_int tem;
+ tem = wi::to_poly_offset (TREE_OPERAND (core, 1));
+ tem <<= LOG2_BITS_PER_UNIT;
+ tem += *pbitpos;
+ if (tem.to_shwi (pbitpos))
+ return TREE_OPERAND (core, 0);
+ }
core = build_fold_addr_expr_loc (loc, core);
}
else if (TREE_CODE (exp) == POINTER_PLUS_EXPR)
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ptrdiff-1.c b/gcc/testsuite/gcc.dg/tree-ssa/ptrdiff-1.c
new file mode 100644
index 0000000..af9291c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/ptrdiff-1.c
@@ -0,0 +1,45 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-optimized" } */
+
+/* PR tree-optimization/121355 */
+
+#define array_size 2
+struct s1
+{
+ int t[array_size];
+};
+
+struct s2
+{
+ int p;
+ struct s1 t;
+};
+static inline int *b(struct s1 *t)
+{
+ return t->t;
+}
+static inline int *e(struct s1 *t)
+{
+ return b(t) + array_size;
+}
+void g(struct s2 *t)
+{
+ struct s1 *t2 = &t->t;
+ int *te = e(t2);
+ int *ts = b(t2);
+ int tt = te - ts;
+/*
+ _6 = t_3(D) + 12;
+ _8 = &MEM[(struct s1 *)t_3(D) + 4B].t;
+ _1 = _6 - _8;
+
+ _1 should be optimized to 2*sizeof(int) == 8.
+ */
+
+ if (tt != array_size)
+ __builtin_abort();
+}
+
+/* the call to abort should be removed. */
+
+/* { dg-final { scan-tree-dump-not "abort " "optimized" } } */