diff options
author | Richard Sandiford <richard.sandiford@linaro.org> | 2017-12-21 06:57:18 +0000 |
---|---|---|
committer | Richard Sandiford <rsandifo@gcc.gnu.org> | 2017-12-21 06:57:18 +0000 |
commit | 39bb8924559d0487fb7cb6d4dc33d6b1e9c41004 (patch) | |
tree | 9887f91a47e5c46de73b822b210ba630c6b62cf2 /gcc/expr.c | |
parent | a97d8b982793ba5bf1e54a41d9bb110182b8baf8 (diff) | |
download | gcc-39bb8924559d0487fb7cb6d4dc33d6b1e9c41004.zip gcc-39bb8924559d0487fb7cb6d4dc33d6b1e9c41004.tar.gz gcc-39bb8924559d0487fb7cb6d4dc33d6b1e9c41004.tar.bz2 |
poly_int: get_bit_range
This patch makes get_bit_range return the range and position as poly_ints.
2017-12-21 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
gcc/
* expr.h (get_bit_range): Return the bitstart and bitend as
poly_uint64s rather than unsigned HOST_WIDE_INTs. Return the bitpos
as a poly_int64 rather than a HOST_WIDE_INT.
* expr.c (get_bit_range): Likewise.
(expand_assignment): Update call accordingly.
* fold-const.c (optimize_bit_field_compare): Likewise.
Co-Authored-By: Alan Hayward <alan.hayward@arm.com>
Co-Authored-By: David Sherwood <david.sherwood@arm.com>
From-SVN: r255912
Diffstat (limited to 'gcc/expr.c')
-rw-r--r-- | gcc/expr.c | 88 |
1 files changed, 43 insertions, 45 deletions
@@ -4809,13 +4809,10 @@ optimize_bitfield_assignment_op (poly_uint64 pbitsize, *BITSTART and *BITEND. */ void -get_bit_range (unsigned HOST_WIDE_INT *bitstart, - unsigned HOST_WIDE_INT *bitend, - tree exp, - HOST_WIDE_INT *bitpos, - tree *offset) +get_bit_range (poly_uint64_pod *bitstart, poly_uint64_pod *bitend, tree exp, + poly_int64_pod *bitpos, tree *offset) { - HOST_WIDE_INT bitoffset; + poly_int64 bitoffset; tree field, repr; gcc_assert (TREE_CODE (exp) == COMPONENT_REF); @@ -4836,13 +4833,13 @@ get_bit_range (unsigned HOST_WIDE_INT *bitstart, if (handled_component_p (TREE_OPERAND (exp, 0))) { machine_mode rmode; - HOST_WIDE_INT rbitsize, rbitpos; + poly_int64 rbitsize, rbitpos; tree roffset; int unsignedp, reversep, volatilep = 0; get_inner_reference (TREE_OPERAND (exp, 0), &rbitsize, &rbitpos, &roffset, &rmode, &unsignedp, &reversep, &volatilep); - if ((rbitpos % BITS_PER_UNIT) != 0) + if (!multiple_p (rbitpos, BITS_PER_UNIT)) { *bitstart = *bitend = 0; return; @@ -4853,10 +4850,10 @@ get_bit_range (unsigned HOST_WIDE_INT *bitstart, relative to the representative. DECL_FIELD_OFFSET of field and repr are the same by construction if they are not constants, see finish_bitfield_layout. */ - if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)) - && tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr))) - bitoffset = (tree_to_uhwi (DECL_FIELD_OFFSET (field)) - - tree_to_uhwi (DECL_FIELD_OFFSET (repr))) * BITS_PER_UNIT; + poly_uint64 field_offset, repr_offset; + if (poly_int_tree_p (DECL_FIELD_OFFSET (field), &field_offset) + && poly_int_tree_p (DECL_FIELD_OFFSET (repr), &repr_offset)) + bitoffset = (field_offset - repr_offset) * BITS_PER_UNIT; else bitoffset = 0; bitoffset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) @@ -4865,17 +4862,16 @@ get_bit_range (unsigned HOST_WIDE_INT *bitstart, /* If the adjustment is larger than bitpos, we would have a negative bit position for the lower bound and this may wreak havoc later. Adjust offset and bitpos to make the lower bound non-negative in that case. */ - if (bitoffset > *bitpos) + if (maybe_gt (bitoffset, *bitpos)) { - HOST_WIDE_INT adjust = bitoffset - *bitpos; - gcc_assert ((adjust % BITS_PER_UNIT) == 0); + poly_int64 adjust_bits = upper_bound (bitoffset, *bitpos) - *bitpos; + poly_int64 adjust_bytes = exact_div (adjust_bits, BITS_PER_UNIT); - *bitpos += adjust; + *bitpos += adjust_bits; if (*offset == NULL_TREE) - *offset = size_int (-adjust / BITS_PER_UNIT); + *offset = size_int (-adjust_bytes); else - *offset - = size_binop (MINUS_EXPR, *offset, size_int (adjust / BITS_PER_UNIT)); + *offset = size_binop (MINUS_EXPR, *offset, size_int (adjust_bytes)); *bitstart = 0; } else @@ -4988,9 +4984,9 @@ expand_assignment (tree to, tree from, bool nontemporal) || TREE_CODE (TREE_TYPE (to)) == ARRAY_TYPE) { machine_mode mode1; - HOST_WIDE_INT bitsize, bitpos; - unsigned HOST_WIDE_INT bitregion_start = 0; - unsigned HOST_WIDE_INT bitregion_end = 0; + poly_int64 bitsize, bitpos; + poly_uint64 bitregion_start = 0; + poly_uint64 bitregion_end = 0; tree offset; int unsignedp, reversep, volatilep = 0; tree tem; @@ -5000,11 +4996,11 @@ expand_assignment (tree to, tree from, bool nontemporal) &unsignedp, &reversep, &volatilep); /* Make sure bitpos is not negative, it can wreak havoc later. */ - if (bitpos < 0) + if (maybe_lt (bitpos, 0)) { gcc_assert (offset == NULL_TREE); - offset = size_int (bitpos >> LOG2_BITS_PER_UNIT); - bitpos &= BITS_PER_UNIT - 1; + offset = size_int (bits_to_bytes_round_down (bitpos)); + bitpos = num_trailing_bits (bitpos); } if (TREE_CODE (to) == COMPONENT_REF @@ -5014,9 +5010,9 @@ expand_assignment (tree to, tree from, bool nontemporal) However, if we do not have a DECL_BIT_FIELD_TYPE but BITPOS or BITSIZE are not byte-aligned, there is no need to limit the range we can access. This can occur with packed structures in Ada. */ - else if (bitsize > 0 - && bitsize % BITS_PER_UNIT == 0 - && bitpos % BITS_PER_UNIT == 0) + else if (maybe_gt (bitsize, 0) + && multiple_p (bitsize, BITS_PER_UNIT) + && multiple_p (bitpos, BITS_PER_UNIT)) { bitregion_start = bitpos; bitregion_end = bitpos + bitsize - 1; @@ -5078,16 +5074,18 @@ expand_assignment (tree to, tree from, bool nontemporal) This is only done for aligned data values, as these can be expected to result in single move instructions. */ + poly_int64 bytepos; if (mode1 != VOIDmode - && bitpos != 0 - && bitsize > 0 - && (bitpos % bitsize) == 0 - && (bitsize % GET_MODE_ALIGNMENT (mode1)) == 0 + && maybe_ne (bitpos, 0) + && maybe_gt (bitsize, 0) + && multiple_p (bitpos, BITS_PER_UNIT, &bytepos) + && multiple_p (bitpos, bitsize) + && multiple_p (bitsize, GET_MODE_ALIGNMENT (mode1)) && MEM_ALIGN (to_rtx) >= GET_MODE_ALIGNMENT (mode1)) { - to_rtx = adjust_address (to_rtx, mode1, bitpos / BITS_PER_UNIT); + to_rtx = adjust_address (to_rtx, mode1, bytepos); bitregion_start = 0; - if (bitregion_end >= (unsigned HOST_WIDE_INT) bitpos) + if (known_ge (bitregion_end, poly_uint64 (bitpos))) bitregion_end -= bitpos; bitpos = 0; } @@ -5102,8 +5100,7 @@ expand_assignment (tree to, tree from, bool nontemporal) code contains an out-of-bounds access to a small array. */ if (!MEM_P (to_rtx) && GET_MODE (to_rtx) != BLKmode - && (unsigned HOST_WIDE_INT) bitpos - >= GET_MODE_PRECISION (GET_MODE (to_rtx))) + && known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (to_rtx)))) { expand_normal (from); result = NULL; @@ -5114,25 +5111,26 @@ expand_assignment (tree to, tree from, bool nontemporal) unsigned short mode_bitsize = GET_MODE_BITSIZE (GET_MODE (to_rtx)); if (TYPE_MODE (TREE_TYPE (from)) == GET_MODE (to_rtx) && COMPLEX_MODE_P (GET_MODE (to_rtx)) - && bitpos == 0 - && bitsize == mode_bitsize) + && known_eq (bitpos, 0) + && known_eq (bitsize, mode_bitsize)) result = store_expr (from, to_rtx, false, nontemporal, reversep); - else if (bitsize == mode_bitsize / 2 - && (bitpos == 0 || bitpos == mode_bitsize / 2)) - result = store_expr (from, XEXP (to_rtx, bitpos != 0), false, - nontemporal, reversep); - else if (bitpos + bitsize <= mode_bitsize / 2) + else if (known_eq (bitsize, mode_bitsize / 2) + && (known_eq (bitpos, 0) + || known_eq (bitpos, mode_bitsize / 2))) + result = store_expr (from, XEXP (to_rtx, maybe_ne (bitpos, 0)), + false, nontemporal, reversep); + else if (known_le (bitpos + bitsize, mode_bitsize / 2)) result = store_field (XEXP (to_rtx, 0), bitsize, bitpos, bitregion_start, bitregion_end, mode1, from, get_alias_set (to), nontemporal, reversep); - else if (bitpos >= mode_bitsize / 2) + else if (known_ge (bitpos, mode_bitsize / 2)) result = store_field (XEXP (to_rtx, 1), bitsize, bitpos - mode_bitsize / 2, bitregion_start, bitregion_end, mode1, from, get_alias_set (to), nontemporal, reversep); - else if (bitpos == 0 && bitsize == mode_bitsize) + else if (known_eq (bitpos, 0) && known_eq (bitsize, mode_bitsize)) { result = expand_normal (from); if (GET_CODE (result) == CONCAT) |