aboutsummaryrefslogtreecommitdiff
path: root/gcc/ada/gcc-interface/utils.c
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/ada/gcc-interface/utils.c')
-rw-r--r--gcc/ada/gcc-interface/utils.c84
1 files changed, 71 insertions, 13 deletions
diff --git a/gcc/ada/gcc-interface/utils.c b/gcc/ada/gcc-interface/utils.c
index 3b893b8..224dc00 100644
--- a/gcc/ada/gcc-interface/utils.c
+++ b/gcc/ada/gcc-interface/utils.c
@@ -957,6 +957,7 @@ make_packable_type (tree type, bool in_record)
TYPE_NAME (new_type) = TYPE_NAME (type);
TYPE_JUSTIFIED_MODULAR_P (new_type) = TYPE_JUSTIFIED_MODULAR_P (type);
TYPE_CONTAINS_TEMPLATE_P (new_type) = TYPE_CONTAINS_TEMPLATE_P (type);
+ TYPE_REVERSE_STORAGE_ORDER (new_type) = TYPE_REVERSE_STORAGE_ORDER (type);
if (TREE_CODE (type) == RECORD_TYPE)
TYPE_PADDING_P (new_type) = TYPE_PADDING_P (type);
@@ -1175,14 +1176,15 @@ pad_type_hasher::equal (pad_type_hash *t1, pad_type_hash *t2)
type1 = t1->type;
type2 = t2->type;
- /* We consider that the padded types are equivalent if they pad the same
- type and have the same size, alignment and RM size. Taking the mode
- into account is redundant since it is determined by the others. */
+ /* We consider that the padded types are equivalent if they pad the same type
+ and have the same size, alignment, RM size and storage order. Taking the
+ mode into account is redundant since it is determined by the others. */
return
TREE_TYPE (TYPE_FIELDS (type1)) == TREE_TYPE (TYPE_FIELDS (type2))
&& TYPE_SIZE (type1) == TYPE_SIZE (type2)
&& TYPE_ALIGN (type1) == TYPE_ALIGN (type2)
- && TYPE_ADA_SIZE (type1) == TYPE_ADA_SIZE (type2);
+ && TYPE_ADA_SIZE (type1) == TYPE_ADA_SIZE (type2)
+ && TYPE_REVERSE_STORAGE_ORDER (type1) == TYPE_REVERSE_STORAGE_ORDER (type2);
}
/* Look up the padded TYPE in the hash table and return its canonical version
@@ -1452,6 +1454,31 @@ built:
return record;
}
+
+/* Return a copy of the padded TYPE but with reverse storage order. */
+
+tree
+set_reverse_storage_order_on_pad_type (tree type)
+{
+ tree field, canonical_pad_type;
+
+#ifdef ENABLE_CHECKING
+ /* If the inner type is not scalar then the function does nothing. */
+ tree inner_type = TREE_TYPE (TYPE_FIELDS (type));
+ gcc_assert (!AGGREGATE_TYPE_P (inner_type) && !VECTOR_TYPE_P (inner_type));
+#endif
+
+ /* This is required for the canonicalization. */
+ gcc_assert (TREE_CONSTANT (TYPE_SIZE (type)));
+
+ field = copy_node (TYPE_FIELDS (type));
+ type = copy_type (type);
+ DECL_CONTEXT (field) = type;
+ TYPE_FIELDS (type) = field;
+ TYPE_REVERSE_STORAGE_ORDER (type) = 1;
+ canonical_pad_type = lookup_and_insert_pad_type (type);
+ return canonical_pad_type ? canonical_pad_type : type;
+}
/* Relate the alias sets of GNU_NEW_TYPE and GNU_OLD_TYPE according to OP.
If this is a multi-dimensional array type, do this recursively.
@@ -3357,7 +3384,7 @@ gnat_types_compatible_p (tree t1, tree t2)
return 1;
/* Array types are also compatible if they are constrained and have the same
- domain(s) and the same component type. */
+ domain(s), the same component type and the same scalar storage order. */
if (code == ARRAY_TYPE
&& (TYPE_DOMAIN (t1) == TYPE_DOMAIN (t2)
|| (TYPE_DOMAIN (t1)
@@ -3368,7 +3395,8 @@ gnat_types_compatible_p (tree t1, tree t2)
TYPE_MAX_VALUE (TYPE_DOMAIN (t2)))))
&& (TREE_TYPE (t1) == TREE_TYPE (t2)
|| (TREE_CODE (TREE_TYPE (t1)) == ARRAY_TYPE
- && gnat_types_compatible_p (TREE_TYPE (t1), TREE_TYPE (t2)))))
+ && gnat_types_compatible_p (TREE_TYPE (t1), TREE_TYPE (t2))))
+ && TYPE_REVERSE_STORAGE_ORDER (t1) == TYPE_REVERSE_STORAGE_ORDER (t2))
return 1;
return 0;
@@ -4849,17 +4877,38 @@ unchecked_convert (tree type, tree expr, bool notrunc_p)
}
/* If we are converting to an integral type whose precision is not equal
- to its size, first unchecked convert to a record type that contains an
- field of the given precision. Then extract the field. */
+ to its size, first unchecked convert to a record type that contains a
+ field of the given precision. Then extract the result from the field.
+
+ There is a subtlety if the source type is an aggregate type with reverse
+ storage order because its representation is not contiguous in the native
+ storage order, i.e. a direct unchecked conversion to an integral type
+ with N bits of precision cannot read the first N bits of the aggregate
+ type. To overcome it, we do an unchecked conversion to an integral type
+ with reverse storage order and return the resulting value. This also
+ ensures that the result of the unchecked conversion doesn't depend on
+ the endianness of the target machine, but only on the storage order of
+ the aggregate type.
+
+ Finally, for the sake of consistency, we do the unchecked conversion
+ to an integral type with reverse storage order as soon as the source
+ type is an aggregate type with reverse storage order, even if there
+ are no considerations of precision or size involved. */
else if (INTEGRAL_TYPE_P (type)
&& TYPE_RM_SIZE (type)
- && 0 != compare_tree_int (TYPE_RM_SIZE (type),
- GET_MODE_BITSIZE (TYPE_MODE (type))))
+ && (0 != compare_tree_int (TYPE_RM_SIZE (type),
+ GET_MODE_BITSIZE (TYPE_MODE (type)))
+ || (AGGREGATE_TYPE_P (etype)
+ && TYPE_REVERSE_STORAGE_ORDER (etype))))
{
tree rec_type = make_node (RECORD_TYPE);
unsigned HOST_WIDE_INT prec = TREE_INT_CST_LOW (TYPE_RM_SIZE (type));
tree field_type, field;
+ if (AGGREGATE_TYPE_P (etype))
+ TYPE_REVERSE_STORAGE_ORDER (rec_type)
+ = TYPE_REVERSE_STORAGE_ORDER (etype);
+
if (TYPE_UNSIGNED (type))
field_type = make_unsigned_type (prec);
else
@@ -4878,11 +4927,16 @@ unchecked_convert (tree type, tree expr, bool notrunc_p)
/* Similarly if we are converting from an integral type whose precision is
not equal to its size, first copy into a field of the given precision
- and unchecked convert the record type. */
+ and unchecked convert the record type.
+
+ The same considerations as above apply if the target type is an aggregate
+ type with reverse storage order and we also proceed similarly. */
else if (INTEGRAL_TYPE_P (etype)
&& TYPE_RM_SIZE (etype)
- && 0 != compare_tree_int (TYPE_RM_SIZE (etype),
- GET_MODE_BITSIZE (TYPE_MODE (etype))))
+ && (0 != compare_tree_int (TYPE_RM_SIZE (etype),
+ GET_MODE_BITSIZE (TYPE_MODE (etype)))
+ || (AGGREGATE_TYPE_P (type)
+ && TYPE_REVERSE_STORAGE_ORDER (type))))
{
tree rec_type = make_node (RECORD_TYPE);
unsigned HOST_WIDE_INT prec = TREE_INT_CST_LOW (TYPE_RM_SIZE (etype));
@@ -4890,6 +4944,10 @@ unchecked_convert (tree type, tree expr, bool notrunc_p)
vec_alloc (v, 1);
tree field_type, field;
+ if (AGGREGATE_TYPE_P (type))
+ TYPE_REVERSE_STORAGE_ORDER (rec_type)
+ = TYPE_REVERSE_STORAGE_ORDER (type);
+
if (TYPE_UNSIGNED (etype))
field_type = make_unsigned_type (prec);
else