aboutsummaryrefslogtreecommitdiff
path: root/gcc/lra-spills.c
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@arm.com>2016-11-25 08:23:08 +0000
committerRichard Sandiford <rsandifo@gcc.gnu.org>2016-11-25 08:23:08 +0000
commit83d0488b79a97fa38a46b00fbce2c72c4af91ed9 (patch)
treea6a2d07e1d1291391656c78cb3726365e7c7a9a1 /gcc/lra-spills.c
parent664e69688d7681be99aaa900ef193d3196e4b7d1 (diff)
downloadgcc-83d0488b79a97fa38a46b00fbce2c72c4af91ed9.zip
gcc-83d0488b79a97fa38a46b00fbce2c72c4af91ed9.tar.gz
gcc-83d0488b79a97fa38a46b00fbce2c72c4af91ed9.tar.bz2
Tweak LRA handling of shared spill slots
The previous code processed the users of a stack slot in order of decreasing size and allocated the slot based on the first user. This seems a bit dangerous, since the ordering is based on the mode of the biggest reference while the allocation is based also on the size of the register itself (which I think could be larger). That scheme doesn't scale well to polynomial sizes, since there's no guarantee that the order of the sizes is known at compile time. This patch instead records an upper bound on the size required by all users of a slot. It also records the maximum alignment requirement. gcc/ 2016-11-15 Richard Sandiford <richard.sandiford@arm.com> Alan Hayward <alan.hayward@arm.com> David Sherwood <david.sherwood@arm.com> * function.h (spill_slot_alignment): Declare. * function.c (spill_slot_alignment): New function. * lra-spills.c (slot): Add align and size fields. (assign_mem_slot): Use them in the call to assign_stack_local. (add_pseudo_to_slot): Update the fields. (assign_stack_slot_num_and_sort_pseudos): Initialise the fields. Co-Authored-By: Alan Hayward <alan.hayward@arm.com> Co-Authored-By: David Sherwood <david.sherwood@arm.com> From-SVN: r242863
Diffstat (limited to 'gcc/lra-spills.c')
-rw-r--r--gcc/lra-spills.c70
1 files changed, 30 insertions, 40 deletions
diff --git a/gcc/lra-spills.c b/gcc/lra-spills.c
index 6e044cd..9f1d5e9 100644
--- a/gcc/lra-spills.c
+++ b/gcc/lra-spills.c
@@ -104,6 +104,10 @@ struct slot
/* Hard reg into which the slot pseudos are spilled. The value is
negative for pseudos spilled into memory. */
int hard_regno;
+ /* Maximum alignment required by all users of the slot. */
+ unsigned int align;
+ /* Maximum size required by all users of the slot. */
+ HOST_WIDE_INT size;
/* Memory representing the all stack slot. It can be different from
memory representing a pseudo belonging to give stack slot because
pseudo can be placed in a part of the corresponding stack slot.
@@ -128,51 +132,23 @@ assign_mem_slot (int i)
{
rtx x = NULL_RTX;
machine_mode mode = GET_MODE (regno_reg_rtx[i]);
- unsigned int inherent_size = PSEUDO_REGNO_BYTES (i);
- unsigned int inherent_align = GET_MODE_ALIGNMENT (mode);
- unsigned int max_ref_width = GET_MODE_SIZE (lra_reg_info[i].biggest_mode);
- unsigned int total_size = MAX (inherent_size, max_ref_width);
- unsigned int min_align = max_ref_width * BITS_PER_UNIT;
- int adjust = 0;
+ HOST_WIDE_INT inherent_size = PSEUDO_REGNO_BYTES (i);
+ machine_mode wider_mode
+ = (GET_MODE_SIZE (mode) >= GET_MODE_SIZE (lra_reg_info[i].biggest_mode)
+ ? mode : lra_reg_info[i].biggest_mode);
+ HOST_WIDE_INT total_size = GET_MODE_SIZE (wider_mode);
+ HOST_WIDE_INT adjust = 0;
lra_assert (regno_reg_rtx[i] != NULL_RTX && REG_P (regno_reg_rtx[i])
&& lra_reg_info[i].nrefs != 0 && reg_renumber[i] < 0);
- x = slots[pseudo_slots[i].slot_num].mem;
-
- /* We can use a slot already allocated because it is guaranteed the
- slot provides both enough inherent space and enough total
- space. */
- if (x)
- ;
- /* Each pseudo has an inherent size which comes from its own mode,
- and a total size which provides room for paradoxical subregs
- which refer to the pseudo reg in wider modes. We allocate a new
- slot, making sure that it has enough inherent space and total
- space. */
- else
+ unsigned int slot_num = pseudo_slots[i].slot_num;
+ x = slots[slot_num].mem;
+ if (!x)
{
- rtx stack_slot;
-
- /* No known place to spill from => no slot to reuse. */
- x = assign_stack_local (mode, total_size,
- min_align > inherent_align
- || total_size > inherent_size ? -1 : 0);
- stack_slot = x;
- /* Cancel the big-endian correction done in assign_stack_local.
- Get the address of the beginning of the slot. This is so we
- can do a big-endian correction unconditionally below. */
- if (BYTES_BIG_ENDIAN)
- {
- adjust = inherent_size - total_size;
- if (adjust)
- stack_slot
- = adjust_address_nv (x,
- mode_for_size (total_size * BITS_PER_UNIT,
- MODE_INT, 1),
- adjust);
- }
- slots[pseudo_slots[i].slot_num].mem = stack_slot;
+ x = assign_stack_local (BLKmode, slots[slot_num].size,
+ slots[slot_num].align);
+ slots[slot_num].mem = x;
}
/* On a big endian machine, the "address" of the slot is the address
@@ -335,6 +311,18 @@ add_pseudo_to_slot (int regno, int slot_num)
{
struct pseudo_slot *first;
+ /* Each pseudo has an inherent size which comes from its own mode,
+ and a total size which provides room for paradoxical subregs.
+ We need to make sure the size and alignment of the slot are
+ sufficient for both. */
+ machine_mode mode = (GET_MODE_SIZE (PSEUDO_REGNO_MODE (regno))
+ >= GET_MODE_SIZE (lra_reg_info[regno].biggest_mode)
+ ? PSEUDO_REGNO_MODE (regno)
+ : lra_reg_info[regno].biggest_mode);
+ unsigned int align = spill_slot_alignment (mode);
+ slots[slot_num].align = MAX (slots[slot_num].align, align);
+ slots[slot_num].size = MAX (slots[slot_num].size, GET_MODE_SIZE (mode));
+
if (slots[slot_num].regno < 0)
{
/* It is the first pseudo in the slot. */
@@ -385,6 +373,8 @@ assign_stack_slot_num_and_sort_pseudos (int *pseudo_regnos, int n)
{
/* New slot. */
slots[j].live_ranges = NULL;
+ slots[j].size = 0;
+ slots[j].align = BITS_PER_UNIT;
slots[j].regno = slots[j].hard_regno = -1;
slots[j].mem = NULL_RTX;
slots_num++;