aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Botcazou <ebotcazou@gcc.gnu.org>2020-07-09 00:13:50 +0200
committerEric Botcazou <ebotcazou@gcc.gnu.org>2020-07-09 00:59:59 +0200
commitb541b871135cb8f261d079006c79698a82e3594d (patch)
treeea67db21fcba13c246749cf5e8eed64e59f77442
parenta8b522311beef5e02de15427e924752ea02def2a (diff)
downloadgcc-b541b871135cb8f261d079006c79698a82e3594d.zip
gcc-b541b871135cb8f261d079006c79698a82e3594d.tar.gz
gcc-b541b871135cb8f261d079006c79698a82e3594d.tar.bz2
Make memory copy functions scalar storage order barriers
This addresses the issue raised about the usage of memory copy functions to toggle the scalar storage order. Recall that you cannot (the compiler errors out) take the address of a scalar which is stored in reverse order, but you can do it for the enclosing aggregate type., which means that you can also pass it to the memory copy functions. In this case, the optimizer may rewrite the copy into a scalar copy, which is a no-no. gcc/c-family/ChangeLog: * c.opt (Wscalar-storage-order): Add explicit variable. gcc/c/ChangeLog: * c-typeck.c (convert_for_assignment): If -Wscalar-storage-order is set, warn for conversion between pointers that point to incompatible scalar storage orders. gcc/ChangeLog: * gimple-fold.c (gimple_fold_builtin_memory_op): Do not fold if either type has reverse scalar storage order. * tree-ssa-sccvn.c (vn_reference_lookup_3): Do not propagate through a memory copy if either type has reverse scalar storage order. gcc/testsuite/ChangeLog: * gcc.dg/sso-11.c: New test. * gcc.dg/sso/sso.exp: Pass -Wno-scalar-storage-order. * gcc.dg/sso/memcpy-1.c: New test.
-rw-r--r--gcc/c-family/c.opt2
-rw-r--r--gcc/c/c-typeck.c35
-rw-r--r--gcc/gimple-fold.c38
-rw-r--r--gcc/testsuite/gcc.dg/sso-11.c36
-rw-r--r--gcc/testsuite/gcc.dg/sso/memcpy-1.c59
-rw-r--r--gcc/testsuite/gcc.dg/sso/sso.exp12
-rw-r--r--gcc/tree-ssa-sccvn.c12
7 files changed, 175 insertions, 19 deletions
diff --git a/gcc/c-family/c.opt b/gcc/c-family/c.opt
index 89a5828..21df0c1 100644
--- a/gcc/c-family/c.opt
+++ b/gcc/c-family/c.opt
@@ -1072,7 +1072,7 @@ C ObjC C++ ObjC++ Var(warn_return_type) Warning LangEnabledBy(C ObjC C++ ObjC++,
Warn whenever a function's return type defaults to \"int\" (C), or about inconsistent return types (C++).
Wscalar-storage-order
-C ObjC C++ ObjC++ Init(1) Warning
+C ObjC C++ ObjC++ Var(warn_scalar_storage_order) Init(1) Warning
Warn on suspicious constructs involving reverse scalar storage order.
Wselector
diff --git a/gcc/c/c-typeck.c b/gcc/c/c-typeck.c
index 3be3690c..b28c2c5 100644
--- a/gcc/c/c-typeck.c
+++ b/gcc/c/c-typeck.c
@@ -7151,6 +7151,41 @@ convert_for_assignment (location_t location, location_t expr_loc, tree type,
}
}
+ /* See if the pointers point to incompatible scalar storage orders. */
+ if (warn_scalar_storage_order
+ && (AGGREGATE_TYPE_P (ttl) && TYPE_REVERSE_STORAGE_ORDER (ttl))
+ != (AGGREGATE_TYPE_P (ttr) && TYPE_REVERSE_STORAGE_ORDER (ttr)))
+ {
+ switch (errtype)
+ {
+ case ic_argpass:
+ /* Do not warn for built-in functions, for example memcpy, since we
+ control how they behave and they can be useful in this area. */
+ if (TREE_CODE (rname) != FUNCTION_DECL || !DECL_IS_BUILTIN (rname))
+ warning_at (location, OPT_Wscalar_storage_order,
+ "passing argument %d of %qE from incompatible "
+ "scalar storage order", parmnum, rname);
+ break;
+ case ic_assign:
+ warning_at (location, OPT_Wscalar_storage_order,
+ "assignment to %qT from pointer type %qT with "
+ "incompatible scalar storage order", type, rhstype);
+ break;
+ case ic_init:
+ warning_at (location, OPT_Wscalar_storage_order,
+ "initialization of %qT from pointer type %qT with "
+ "incompatible scalar storage order", type, rhstype);
+ break;
+ case ic_return:
+ warning_at (location, OPT_Wscalar_storage_order,
+ "returning %qT from pointer type with incompatible "
+ "scalar storage order %qT", rhstype, type);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+
/* Any non-function converts to a [const][volatile] void *
and vice versa; otherwise, targets must be the same.
Meanwhile, the lhs target must have all the qualifiers of the rhs. */
diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c
index 72c5e43..41b84ba 100644
--- a/gcc/gimple-fold.c
+++ b/gcc/gimple-fold.c
@@ -740,15 +740,24 @@ gimple_fold_builtin_memory_op (gimple_stmt_iterator *gsi,
}
else
{
- tree srctype, desttype, destvar, srcvar, srcoff;
+ /* We cannot (easily) change the type of the copy if it is a storage
+ order barrier, i.e. is equivalent to a VIEW_CONVERT_EXPR that can
+ modify the storage order of objects (see storage_order_barrier_p). */
+ tree srctype
+ = POINTER_TYPE_P (TREE_TYPE (src))
+ ? TREE_TYPE (TREE_TYPE (src)) : NULL_TREE;
+ tree desttype
+ = POINTER_TYPE_P (TREE_TYPE (dest))
+ ? TREE_TYPE (TREE_TYPE (dest)) : NULL_TREE;
+ tree destvar, srcvar, srcoff;
unsigned int src_align, dest_align;
- tree off0;
- const char *tmp_str;
unsigned HOST_WIDE_INT tmp_len;
+ const char *tmp_str;
/* Build accesses at offset zero with a ref-all character type. */
- off0 = build_int_cst (build_pointer_type_for_mode (char_type_node,
- ptr_mode, true), 0);
+ tree off0
+ = build_int_cst (build_pointer_type_for_mode (char_type_node,
+ ptr_mode, true), 0);
/* If we can perform the copy efficiently with first doing all loads
and then all stores inline it that way. Currently efficiently
@@ -766,7 +775,13 @@ gimple_fold_builtin_memory_op (gimple_stmt_iterator *gsi,
hack can be removed. */
&& !c_strlen (src, 1)
&& !((tmp_str = c_getstr (src, &tmp_len)) != NULL
- && memchr (tmp_str, 0, tmp_len) == NULL))
+ && memchr (tmp_str, 0, tmp_len) == NULL)
+ && !(srctype
+ && AGGREGATE_TYPE_P (srctype)
+ && TYPE_REVERSE_STORAGE_ORDER (srctype))
+ && !(desttype
+ && AGGREGATE_TYPE_P (desttype)
+ && TYPE_REVERSE_STORAGE_ORDER (desttype)))
{
unsigned ilen = tree_to_uhwi (len);
if (pow2p_hwi (ilen))
@@ -946,8 +961,13 @@ gimple_fold_builtin_memory_op (gimple_stmt_iterator *gsi,
if (!tree_fits_shwi_p (len))
return false;
- if (!POINTER_TYPE_P (TREE_TYPE (src))
- || !POINTER_TYPE_P (TREE_TYPE (dest)))
+ if (!srctype
+ || (AGGREGATE_TYPE_P (srctype)
+ && TYPE_REVERSE_STORAGE_ORDER (srctype)))
+ return false;
+ if (!desttype
+ || (AGGREGATE_TYPE_P (desttype)
+ && TYPE_REVERSE_STORAGE_ORDER (desttype)))
return false;
/* In the following try to find a type that is most natural to be
used for the memcpy source and destination and that allows
@@ -955,11 +975,9 @@ gimple_fold_builtin_memory_op (gimple_stmt_iterator *gsi,
using that type. In theory we could always use a char[len] type
but that only gains us that the destination and source possibly
no longer will have their address taken. */
- srctype = TREE_TYPE (TREE_TYPE (src));
if (TREE_CODE (srctype) == ARRAY_TYPE
&& !tree_int_cst_equal (TYPE_SIZE_UNIT (srctype), len))
srctype = TREE_TYPE (srctype);
- desttype = TREE_TYPE (TREE_TYPE (dest));
if (TREE_CODE (desttype) == ARRAY_TYPE
&& !tree_int_cst_equal (TYPE_SIZE_UNIT (desttype), len))
desttype = TREE_TYPE (desttype);
diff --git a/gcc/testsuite/gcc.dg/sso-11.c b/gcc/testsuite/gcc.dg/sso-11.c
new file mode 100644
index 0000000..6b50a25
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/sso-11.c
@@ -0,0 +1,36 @@
+/* Test support of scalar_storage_order attribute */
+
+/* { dg-do compile } */
+
+struct __attribute__((scalar_storage_order("big-endian"))) S1
+{
+ int i;
+};
+
+struct __attribute__((scalar_storage_order("little-endian"))) S2
+{
+ int i;
+};
+
+extern int foo (void *);
+
+int incompatible_call (int which, struct S1 *s1, struct S2 *s2)
+{
+ if (which == 1) return foo (s1); else foo (s2); /* { dg-warning "incompatible scalar storage order" } */
+}
+
+void incompatible_assign (struct S1 *s1, struct S2 *s2)
+{
+ void *p1, *p2;
+ p1 = s1, p2 = s2; /* { dg-warning "incompatible scalar storage order" } */
+}
+
+void incompatible_init (struct S1 *s1, struct S2 *s2)
+{
+ void *p1 = s1, *p2 = s2; /* { dg-warning "incompatible scalar storage order" } */
+}
+
+void *incompatible_return (int which, struct S1 *s1, struct S2 *s2)
+{
+ if (which == 1) return s1; else return s2; /* { dg-warning "incompatible scalar storage order" } */
+}
diff --git a/gcc/testsuite/gcc.dg/sso/memcpy-1.c b/gcc/testsuite/gcc.dg/sso/memcpy-1.c
new file mode 100644
index 0000000..b4e1c87
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/sso/memcpy-1.c
@@ -0,0 +1,59 @@
+/* { dg-do run } */
+
+typedef unsigned char uint8_t;
+typedef unsigned int uint32_t;
+
+#define __big_endian__ scalar_storage_order("big-endian")
+#define __little_endian__ scalar_storage_order("little-endian")
+
+typedef union
+{
+ uint32_t val;
+ uint8_t v[4];
+} __attribute__((__big_endian__)) upal_u32be_t;
+
+typedef union
+{
+ uint32_t val;
+ uint8_t v[4];
+} __attribute__((__little_endian__)) upal_u32le_t;
+
+static inline uint32_t native_to_big_endian(uint32_t t)
+{
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return t;
+#else
+ return __builtin_bswap32(t);
+#endif
+}
+static inline uint32_t native_to_little_endian(uint32_t t)
+{
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return __builtin_bswap32(t);
+#else
+ return t;
+#endif
+}
+#define test(p, p1, i) do { if (p[i] != p1[i]) __builtin_abort (); } while (0)
+
+#define tests(p, p1) do { test(p, p1, 0); test(p, p1, 1); \
+ test(p, p1, 2); test(p, p1, 3); } while (0)
+
+int main(void)
+{
+ const uint32_t u = 0x12345678;
+
+ upal_u32be_t tempb;
+ __builtin_memcpy (&tempb, &u, sizeof(uint32_t));
+ uint32_t bu = tempb.val;
+ uint32_t b1u = native_to_big_endian(u);
+ tests (((uint8_t*)&bu), ((uint8_t*)&b1u));
+
+ upal_u32le_t templ;
+ __builtin_memcpy (&templ, &u, sizeof(uint32_t));
+ uint32_t lu = templ.val;
+ uint32_t l1u = native_to_little_endian(u);
+ tests (((uint8_t*)&lu), ((uint8_t*)&l1u));
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/sso/sso.exp b/gcc/testsuite/gcc.dg/sso/sso.exp
index a017f3e..80f534b 100644
--- a/gcc/testsuite/gcc.dg/sso/sso.exp
+++ b/gcc/testsuite/gcc.dg/sso/sso.exp
@@ -27,12 +27,12 @@ torture-init
dg-init
set SSO_TORTURE_OPTIONS [list \
- { -O0 } \
- { -O1 -fno-inline } \
- { -O2 } \
- { -O3 -finline-functions } \
- { -Os } \
- { -Og -g } ]
+ { -Wno-scalar-storage-order -O0 } \
+ { -Wno-scalar-storage-order -O1 -fno-inline } \
+ { -Wno-scalar-storage-order -O2 } \
+ { -Wno-scalar-storage-order -O3 -finline-functions } \
+ { -Wno-scalar-storage-order -Os } \
+ { -Wno-scalar-storage-order -Og -g } ]
set-torture-options $SSO_TORTURE_OPTIONS
diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index 4b3f31c..e269f78 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -3224,8 +3224,10 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *data_,
return NULL;
}
- /* 6) For memcpy copies translate the reference through them if
- the copy kills ref. */
+ /* 6) For memcpy copies translate the reference through them if the copy
+ kills ref. But we cannot (easily) do this translation if the memcpy is
+ a storage order barrier, i.e. is equivalent to a VIEW_CONVERT_EXPR that
+ can modify the storage order of objects (see storage_order_barrier_p). */
else if (data->vn_walk_kind == VN_WALKREWRITE
&& is_gimple_reg_type (vr->type)
/* ??? Handle BCOPY as well. */
@@ -3275,6 +3277,9 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *data_,
}
if (TREE_CODE (lhs) == ADDR_EXPR)
{
+ if (AGGREGATE_TYPE_P (TREE_TYPE (TREE_TYPE (lhs)))
+ && TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (TREE_TYPE (lhs))))
+ return (void *)-1;
tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (lhs, 0),
&lhs_offset);
if (!tem)
@@ -3303,6 +3308,9 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *data_,
rhs = vn_valueize (rhs);
if (TREE_CODE (rhs) == ADDR_EXPR)
{
+ if (AGGREGATE_TYPE_P (TREE_TYPE (TREE_TYPE (rhs)))
+ && TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (TREE_TYPE (rhs))))
+ return (void *)-1;
tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs, 0),
&rhs_offset);
if (!tem)