aboutsummaryrefslogtreecommitdiff
path: root/gcc/ada/gcc-interface/utils.c
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/ada/gcc-interface/utils.c')
-rw-r--r--gcc/ada/gcc-interface/utils.c64
1 files changed, 40 insertions, 24 deletions
diff --git a/gcc/ada/gcc-interface/utils.c b/gcc/ada/gcc-interface/utils.c
index 8db92e5..221b0b5 100644
--- a/gcc/ada/gcc-interface/utils.c
+++ b/gcc/ada/gcc-interface/utils.c
@@ -1070,6 +1070,25 @@ make_packable_type (tree type, bool in_record, unsigned int max_align)
return new_type;
}
+/* Return true if TYPE has an unsigned representation. This needs to be used
+ when the representation of types whose precision is not equal to their size
+ is manipulated based on the RM size. */
+
+static inline bool
+type_unsigned_for_rm (tree type)
+{
+ /* This is the common case. */
+ if (TYPE_UNSIGNED (type))
+ return true;
+
+ /* See the E_Signed_Integer_Subtype case of gnat_to_gnu_entity. */
+ if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
+ && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
+ return true;
+
+ return false;
+}
+
/* Given a type TYPE, return a new type whose size is appropriate for SIZE.
If TYPE is the best type, return it. Otherwise, make a new type. We
only support new integral and pointer types. FOR_BIASED is true if
@@ -1113,10 +1132,7 @@ make_type_from_size (tree type, tree size_tree, bool for_biased)
/* The type should be an unsigned type if the original type is unsigned
or if the lower bound is constant and non-negative or if the type is
biased, see E_Signed_Integer_Subtype case of gnat_to_gnu_entity. */
- if (TYPE_UNSIGNED (type)
- || (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
- && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
- || biased_p)
+ if (type_unsigned_for_rm (type) || biased_p)
new_type = make_unsigned_type (size);
else
new_type = make_signed_type (size);
@@ -4913,7 +4929,12 @@ can_fold_for_view_convert_p (tree expr)
we expect the 8 bits at Vbits'Address to always contain Value, while
their original location depends on the endianness, at Value'Address
- on a little-endian architecture but not on a big-endian one. */
+ on a little-endian architecture but not on a big-endian one.
+
+ One pitfall is that we cannot use TYPE_UNSIGNED directly to decide how
+ the bits between the precision and the size are filled, because of the
+ trick used in the E_Signed_Integer_Subtype case of gnat_to_gnu_entity.
+ So we use the special predicate type_unsigned_for_rm above. */
tree
unchecked_convert (tree type, tree expr, bool notrunc_p)
@@ -4991,7 +5012,7 @@ unchecked_convert (tree type, tree expr, bool notrunc_p)
TYPE_REVERSE_STORAGE_ORDER (rec_type)
= TYPE_REVERSE_STORAGE_ORDER (etype);
- if (TYPE_UNSIGNED (type))
+ if (type_unsigned_for_rm (type))
field_type = make_unsigned_type (prec);
else
field_type = make_signed_type (prec);
@@ -5030,7 +5051,7 @@ unchecked_convert (tree type, tree expr, bool notrunc_p)
TYPE_REVERSE_STORAGE_ORDER (rec_type)
= TYPE_REVERSE_STORAGE_ORDER (type);
- if (TYPE_UNSIGNED (etype))
+ if (type_unsigned_for_rm (etype))
field_type = make_unsigned_type (prec);
else
field_type = make_signed_type (prec);
@@ -5131,31 +5152,26 @@ unchecked_convert (tree type, tree expr, bool notrunc_p)
expr = build1 (VIEW_CONVERT_EXPR, type, expr);
}
- /* If the result is an integral type whose precision is not equal to its
- size, sign- or zero-extend the result. We need not do this if the input
- is an integral type of the same precision and signedness or if the output
- is a biased type or if both the input and output are unsigned, or if the
- lower bound is constant and non-negative, see E_Signed_Integer_Subtype
- case of gnat_to_gnu_entity. */
+ /* If the result is a non-biased integral type whose precision is not equal
+ to its size, sign- or zero-extend the result. But we need not do this
+ if the input is also an integral type and both are unsigned or both are
+ signed and have the same precision. */
if (!notrunc_p
&& INTEGRAL_TYPE_P (type)
+ && !(code == INTEGER_TYPE && TYPE_BIASED_REPRESENTATION_P (type))
&& TYPE_RM_SIZE (type)
&& tree_int_cst_compare (TYPE_RM_SIZE (type), TYPE_SIZE (type)) < 0
&& !(INTEGRAL_TYPE_P (etype)
- && TYPE_UNSIGNED (type) == TYPE_UNSIGNED (etype)
- && tree_int_cst_compare (TYPE_RM_SIZE (type),
- TYPE_RM_SIZE (etype)
- ? TYPE_RM_SIZE (etype)
- : TYPE_SIZE (etype)) == 0)
- && !(code == INTEGER_TYPE && TYPE_BIASED_REPRESENTATION_P (type))
- && !((TYPE_UNSIGNED (type)
- || (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
- && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0))
- && TYPE_UNSIGNED (etype)))
+ && type_unsigned_for_rm (type) == type_unsigned_for_rm (etype)
+ && (type_unsigned_for_rm (type)
+ || tree_int_cst_compare (TYPE_RM_SIZE (type),
+ TYPE_RM_SIZE (etype)
+ ? TYPE_RM_SIZE (etype)
+ : TYPE_SIZE (etype)) == 0)))
{
tree base_type
= gnat_type_for_size (TREE_INT_CST_LOW (TYPE_SIZE (type)),
- TYPE_UNSIGNED (type));
+ type_unsigned_for_rm (type));
tree shift_expr
= convert (base_type,
size_binop (MINUS_EXPR,