aboutsummaryrefslogtreecommitdiff
path: root/gcc/ada/gcc-interface/utils.c
diff options
context:
space:
mode:
authorEric Botcazou <ebotcazou@adacore.com>2018-10-22 11:03:17 +0000
committerEric Botcazou <ebotcazou@gcc.gnu.org>2018-10-22 11:03:17 +0000
commit39c61276fd8577bec79eff31da4080398a373a24 (patch)
tree31b807b3b2ffeae973e2c61ccf6bfd7edd998317 /gcc/ada/gcc-interface/utils.c
parent9e4cacfab2d836d4dabc0b2aac095e635288bd27 (diff)
downloadgcc-39c61276fd8577bec79eff31da4080398a373a24.zip
gcc-39c61276fd8577bec79eff31da4080398a373a24.tar.gz
gcc-39c61276fd8577bec79eff31da4080398a373a24.tar.bz2
utils.c (unchecked_convert): Use local variables for the biased and reverse SSO attributes of both types.
* gcc-interface/utils.c (unchecked_convert): Use local variables for the biased and reverse SSO attributes of both types. Further extend the processing of integral types in the presence of reverse SSO to all scalar types. From-SVN: r265381
Diffstat (limited to 'gcc/ada/gcc-interface/utils.c')
-rw-r--r--gcc/ada/gcc-interface/utils.c87
1 files changed, 52 insertions, 35 deletions
diff --git a/gcc/ada/gcc-interface/utils.c b/gcc/ada/gcc-interface/utils.c
index 1ce2f72..e7dfb41 100644
--- a/gcc/ada/gcc-interface/utils.c
+++ b/gcc/ada/gcc-interface/utils.c
@@ -5092,8 +5092,16 @@ unchecked_convert (tree type, tree expr, bool notrunc_p)
tree etype = TREE_TYPE (expr);
enum tree_code ecode = TREE_CODE (etype);
enum tree_code code = TREE_CODE (type);
+ const bool ebiased
+ = (ecode == INTEGER_TYPE && TYPE_BIASED_REPRESENTATION_P (etype));
+ const bool biased
+ = (code == INTEGER_TYPE && TYPE_BIASED_REPRESENTATION_P (type));
+ const bool ereverse
+ = (AGGREGATE_TYPE_P (etype) && TYPE_REVERSE_STORAGE_ORDER (etype));
+ const bool reverse
+ = (AGGREGATE_TYPE_P (type) && TYPE_REVERSE_STORAGE_ORDER (type));
tree tem;
- int c;
+ int c = 0;
/* If the expression is already of the right type, we are done. */
if (etype == type)
@@ -5109,7 +5117,7 @@ unchecked_convert (tree type, tree expr, bool notrunc_p)
|| (ecode == RECORD_TYPE && TYPE_JUSTIFIED_MODULAR_P (etype))))
|| code == UNCONSTRAINED_ARRAY_TYPE)
{
- if (ecode == INTEGER_TYPE && TYPE_BIASED_REPRESENTATION_P (etype))
+ if (ebiased)
{
tree ntype = copy_type (etype);
TYPE_BIASED_REPRESENTATION_P (ntype) = 0;
@@ -5117,7 +5125,7 @@ unchecked_convert (tree type, tree expr, bool notrunc_p)
expr = build1 (NOP_EXPR, ntype, expr);
}
- if (code == INTEGER_TYPE && TYPE_BIASED_REPRESENTATION_P (type))
+ if (biased)
{
tree rtype = copy_type (type);
TYPE_BIASED_REPRESENTATION_P (rtype) = 0;
@@ -5146,30 +5154,35 @@ unchecked_convert (tree type, tree expr, bool notrunc_p)
Finally, for the sake of consistency, we do the unchecked conversion
to an integral type with reverse storage order as soon as the source
type is an aggregate type with reverse storage order, even if there
- are no considerations of precision or size involved. */
- else if (INTEGRAL_TYPE_P (type)
- && TYPE_RM_SIZE (type)
- && (tree_int_cst_compare (TYPE_RM_SIZE (type),
- TYPE_SIZE (type)) < 0
- || (AGGREGATE_TYPE_P (etype)
- && TYPE_REVERSE_STORAGE_ORDER (etype))))
+ are no considerations of precision or size involved. Ultimately, we
+ further extend this processing to any scalar type. */
+ else if ((INTEGRAL_TYPE_P (type)
+ && TYPE_RM_SIZE (type)
+ && ((c = tree_int_cst_compare (TYPE_RM_SIZE (type),
+ TYPE_SIZE (type))) < 0
+ || ereverse))
+ || (SCALAR_FLOAT_TYPE_P (type) && ereverse))
{
tree rec_type = make_node (RECORD_TYPE);
- unsigned HOST_WIDE_INT prec = TREE_INT_CST_LOW (TYPE_RM_SIZE (type));
tree field_type, field;
- if (AGGREGATE_TYPE_P (etype))
- TYPE_REVERSE_STORAGE_ORDER (rec_type)
- = TYPE_REVERSE_STORAGE_ORDER (etype);
+ TYPE_REVERSE_STORAGE_ORDER (rec_type) = ereverse;
- if (type_unsigned_for_rm (type))
- field_type = make_unsigned_type (prec);
+ if (c < 0)
+ {
+ const unsigned HOST_WIDE_INT prec
+ = TREE_INT_CST_LOW (TYPE_RM_SIZE (type));
+ if (type_unsigned_for_rm (type))
+ field_type = make_unsigned_type (prec);
+ else
+ field_type = make_signed_type (prec);
+ SET_TYPE_RM_SIZE (field_type, TYPE_RM_SIZE (type));
+ }
else
- field_type = make_signed_type (prec);
- SET_TYPE_RM_SIZE (field_type, TYPE_RM_SIZE (type));
+ field_type = type;
field = create_field_decl (get_identifier ("OBJ"), field_type, rec_type,
- NULL_TREE, bitsize_zero_node, 1, 0);
+ NULL_TREE, bitsize_zero_node, c < 0, 0);
finish_record_type (rec_type, field, 1, false);
@@ -5184,31 +5197,35 @@ unchecked_convert (tree type, tree expr, bool notrunc_p)
The same considerations as above apply if the target type is an aggregate
type with reverse storage order and we also proceed similarly. */
- else if (INTEGRAL_TYPE_P (etype)
- && TYPE_RM_SIZE (etype)
- && (tree_int_cst_compare (TYPE_RM_SIZE (etype),
- TYPE_SIZE (etype)) < 0
- || (AGGREGATE_TYPE_P (type)
- && TYPE_REVERSE_STORAGE_ORDER (type))))
+ else if ((INTEGRAL_TYPE_P (etype)
+ && TYPE_RM_SIZE (etype)
+ && ((c = tree_int_cst_compare (TYPE_RM_SIZE (etype),
+ TYPE_SIZE (etype))) < 0
+ || reverse))
+ || (SCALAR_FLOAT_TYPE_P (etype) && reverse))
{
tree rec_type = make_node (RECORD_TYPE);
- unsigned HOST_WIDE_INT prec = TREE_INT_CST_LOW (TYPE_RM_SIZE (etype));
vec<constructor_elt, va_gc> *v;
vec_alloc (v, 1);
tree field_type, field;
- if (AGGREGATE_TYPE_P (type))
- TYPE_REVERSE_STORAGE_ORDER (rec_type)
- = TYPE_REVERSE_STORAGE_ORDER (type);
+ TYPE_REVERSE_STORAGE_ORDER (rec_type) = reverse;
- if (type_unsigned_for_rm (etype))
- field_type = make_unsigned_type (prec);
+ if (c < 0)
+ {
+ const unsigned HOST_WIDE_INT prec
+ = TREE_INT_CST_LOW (TYPE_RM_SIZE (etype));
+ if (type_unsigned_for_rm (etype))
+ field_type = make_unsigned_type (prec);
+ else
+ field_type = make_signed_type (prec);
+ SET_TYPE_RM_SIZE (field_type, TYPE_RM_SIZE (etype));
+ }
else
- field_type = make_signed_type (prec);
- SET_TYPE_RM_SIZE (field_type, TYPE_RM_SIZE (etype));
+ field_type = etype;
field = create_field_decl (get_identifier ("OBJ"), field_type, rec_type,
- NULL_TREE, bitsize_zero_node, 1, 0);
+ NULL_TREE, bitsize_zero_node, c < 0, 0);
finish_record_type (rec_type, field, 1, false);
@@ -5308,8 +5325,8 @@ unchecked_convert (tree type, tree expr, bool notrunc_p)
signed and have the same precision. */
tree type_rm_size;
if (!notrunc_p
+ && !biased
&& INTEGRAL_TYPE_P (type)
- && !(code == INTEGER_TYPE && TYPE_BIASED_REPRESENTATION_P (type))
&& (type_rm_size = TYPE_RM_SIZE (type))
&& tree_int_cst_compare (type_rm_size, TYPE_SIZE (type)) < 0
&& !(INTEGRAL_TYPE_P (etype)