aboutsummaryrefslogtreecommitdiff
path: root/gcc/stor-layout.c
diff options
context:
space:
mode:
authorRichard Sandiford <rdsandiford@googlemail.com>2012-11-28 20:13:22 +0000
committerRichard Sandiford <rsandifo@gcc.gnu.org>2012-11-28 20:13:22 +0000
commit00efe3ea3c907b4d7b090da19add210432b3de2c (patch)
treeaaafc068a028daa1ab2ecddaa3afa63e83019d13 /gcc/stor-layout.c
parent1099bb0a712134f8a43e06407292aa95f27c0e54 (diff)
downloadgcc-00efe3ea3c907b4d7b090da19add210432b3de2c.zip
gcc-00efe3ea3c907b4d7b090da19add210432b3de2c.tar.gz
gcc-00efe3ea3c907b4d7b090da19add210432b3de2c.tar.bz2
re PR middle-end/55438 (tmpdir-gcc.dg-struct-layout-1/t001 - t028, gcc.c-torture/execute/991118-1.c, gcc.c-torture/execute/bf64-1.c, ICE)
gcc/ PR middle-end/55438 * expmed.c (simple_mem_bitfield_p): New function, extracted from store_bit_field_1 and extract_bit_field_1. Use GET_MODE_ALIGNMENT rather than bitsize when checking the alignment. (store_bit_field_1, extract_bit_field_1): Call it. * stor-layout.c (bit_field_mode_iterator::bit_field_mode_iterator): Don't limit ALIGN_. Assume that memory is mapped in chunks of at least word size, regardless of BIGGEST_ALIGNMENT. (bit_field_mode_iterator::get_mode): Use GET_MODE_ALIGNMENT rather than unit when checking the alignment. (get_best_mode): Use GET_MODE_ALIGNMENT. From-SVN: r193905
Diffstat (limited to 'gcc/stor-layout.c')
-rw-r--r--gcc/stor-layout.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c
index d0c093f..3d97796 100644
--- a/gcc/stor-layout.c
+++ b/gcc/stor-layout.c
@@ -2643,15 +2643,17 @@ bit_field_mode_iterator
unsigned int align, bool volatilep)
: mode_ (GET_CLASS_NARROWEST_MODE (MODE_INT)), bitsize_ (bitsize),
bitpos_ (bitpos), bitregion_start_ (bitregion_start),
- bitregion_end_ (bitregion_end), align_ (MIN (align, BIGGEST_ALIGNMENT)),
+ bitregion_end_ (bitregion_end), align_ (align),
volatilep_ (volatilep), count_ (0)
{
if (!bitregion_end_)
{
- /* We can assume that any aligned chunk of ALIGN_ bits that overlaps
+ /* We can assume that any aligned chunk of UNITS bits that overlaps
the bitfield is mapped and won't trap. */
- bitregion_end_ = bitpos + bitsize + align_ - 1;
- bitregion_end_ -= bitregion_end_ % align_ + 1;
+ unsigned HOST_WIDE_INT units = MIN (align, MAX (BIGGEST_ALIGNMENT,
+ BITS_PER_WORD));
+ bitregion_end_ = bitpos + bitsize + units - 1;
+ bitregion_end_ -= bitregion_end_ % units + 1;
}
}
@@ -2694,7 +2696,8 @@ bit_field_mode_iterator::next_mode (enum machine_mode *out_mode)
break;
/* Stop if the mode requires too much alignment. */
- if (unit > align_ && SLOW_UNALIGNED_ACCESS (mode_, align_))
+ if (GET_MODE_ALIGNMENT (mode_) > align_
+ && SLOW_UNALIGNED_ACCESS (mode_, align_))
break;
*out_mode = mode_;
@@ -2753,8 +2756,9 @@ get_best_mode (int bitsize, int bitpos,
enum machine_mode widest_mode = VOIDmode;
enum machine_mode mode;
while (iter.next_mode (&mode)
- /* ??? For historical reasons, reject modes that are wider than
- the alignment. This has both advantages and disadvantages.
+ /* ??? For historical reasons, reject modes that would normally
+ receive greater alignment, even if unaligned accesses are
+ acceptable. This has both advantages and disadvantages.
Removing this check means that something like:
struct s { unsigned int x; unsigned int y; };
@@ -2808,7 +2812,7 @@ get_best_mode (int bitsize, int bitpos,
causes store_bit_field to keep a 128-bit memory reference,
so that the final bitfield reference still has a MEM_EXPR
and MEM_OFFSET. */
- && GET_MODE_BITSIZE (mode) <= align
+ && GET_MODE_ALIGNMENT (mode) <= align
&& (largest_mode == VOIDmode
|| GET_MODE_SIZE (mode) <= GET_MODE_SIZE (largest_mode)))
{