aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@linaro.org>2017-12-21 06:58:16 +0000
committerRichard Sandiford <rsandifo@gcc.gnu.org>2017-12-21 06:58:16 +0000
commitf075bd950c5ad3e2baeb3d8f82fe962efc8e4f7a (patch)
tree8b895dc76d9b5a9544b28dcb10c3ac925ccea4dd
parent7ee216163faf80d2a55f1c08abb971b7da34a793 (diff)
downloadgcc-f075bd950c5ad3e2baeb3d8f82fe962efc8e4f7a.zip
gcc-f075bd950c5ad3e2baeb3d8f82fe962efc8e4f7a.tar.gz
gcc-f075bd950c5ad3e2baeb3d8f82fe962efc8e4f7a.tar.bz2
poly_int: frame allocations
This patch converts the frame allocation code (mostly in function.c) to use poly_int64 rather than HOST_WIDE_INT for frame offsets and sizes. 2017-12-21 Richard Sandiford <richard.sandiford@linaro.org> Alan Hayward <alan.hayward@arm.com> David Sherwood <david.sherwood@arm.com> gcc/ * function.h (frame_space): Change start and length from HOST_WIDE_INT to poly_int64. (get_frame_size): Return the size as a poly_int64 rather than a HOST_WIDE_INT. (frame_offset_overflow): Take the offset as a poly_int64 rather than a HOST_WIDE_INT. (assign_stack_local_1, assign_stack_local, assign_stack_temp_for_type) (assign_stack_temp): Likewise for the size. * function.c (get_frame_size): Return a poly_int64 rather than a HOST_WIDE_INT. (frame_offset_overflow): Take the offset as a poly_int64 rather than a HOST_WIDE_INT. (try_fit_stack_local): Take the start, length and size as poly_int64s rather than HOST_WIDE_INTs. Return the offset as a poly_int64_pod rather than a HOST_WIDE_INT. (add_frame_space): Take the start and end as poly_int64s rather than HOST_WIDE_INTs. (assign_stack_local_1, assign_stack_local, assign_stack_temp_for_type) (assign_stack_temp): Likewise for the size. (temp_slot): Change size, base_offset and full_size from HOST_WIDE_INT to poly_int64. (find_temp_slot_from_address): Handle polynomial offsets. (combine_temp_slots): Likewise. * emit-rtl.h (rtl_data::x_frame_offset): Change from HOST_WIDE_INT to poly_int64. * cfgexpand.c (alloc_stack_frame_space): Return the offset as a poly_int64 rather than a HOST_WIDE_INT. (expand_one_stack_var_at): Take the offset as a poly_int64 rather than a HOST_WIDE_INT. (expand_stack_vars, expand_one_stack_var_1, expand_used_vars): Handle polynomial frame offsets. * config/m32r/m32r-protos.h (m32r_compute_frame_size): Take the size as a poly_int64 rather than an int. * config/m32r/m32r.c (m32r_compute_frame_size): Likewise. * config/v850/v850-protos.h (compute_frame_size): Likewise. * config/v850/v850.c (compute_frame_size): Likewise. * config/xtensa/xtensa-protos.h (compute_frame_size): Likewise. * config/xtensa/xtensa.c (compute_frame_size): Likewise. * config/pa/pa-protos.h (pa_compute_frame_size): Likewise. * config/pa/pa.c (pa_compute_frame_size): Likewise. * explow.h (get_dynamic_stack_base): Take the offset as a poly_int64 rather than a HOST_WIDE_INT. * explow.c (get_dynamic_stack_base): Likewise. * final.c (final_start_function): Use the constant lower bound of the frame size for -Wframe-larger-than. * ira.c (do_reload): Adjust for new get_frame_size return type. * lra.c (lra): Likewise. * reload1.c (reload): Likewise. * config/avr/avr.c (avr_asm_function_end_prologue): Likewise. * config/pa/pa.h (EXIT_IGNORE_STACK): Likewise. * rtlanal.c (get_initial_register_offset): Return the offset as a poly_int64 rather than a HOST_WIDE_INT. Co-Authored-By: Alan Hayward <alan.hayward@arm.com> Co-Authored-By: David Sherwood <david.sherwood@arm.com> From-SVN: r255917
-rw-r--r--gcc/ChangeLog57
-rw-r--r--gcc/cfgexpand.c64
-rw-r--r--gcc/config/avr/avr.c2
-rw-r--r--gcc/config/m32r/m32r-protos.h2
-rw-r--r--gcc/config/m32r/m32r.c2
-rw-r--r--gcc/config/pa/pa-protos.h2
-rw-r--r--gcc/config/pa/pa.c2
-rw-r--r--gcc/config/pa/pa.h2
-rw-r--r--gcc/config/v850/v850-protos.h2
-rw-r--r--gcc/config/v850/v850.c2
-rw-r--r--gcc/config/xtensa/xtensa-protos.h2
-rw-r--r--gcc/config/xtensa/xtensa.c2
-rw-r--r--gcc/emit-rtl.h2
-rw-r--r--gcc/explow.c2
-rw-r--r--gcc/explow.h3
-rw-r--r--gcc/final.c11
-rw-r--r--gcc/function.c155
-rw-r--r--gcc/function.h16
-rw-r--r--gcc/ira.c4
-rw-r--r--gcc/lra.c2
-rw-r--r--gcc/reload1.c14
-rw-r--r--gcc/rtlanal.c4
22 files changed, 216 insertions, 138 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 2d8d866..7039a3d 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -2,6 +2,63 @@
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
+ * function.h (frame_space): Change start and length from HOST_WIDE_INT
+ to poly_int64.
+ (get_frame_size): Return the size as a poly_int64 rather than a
+ HOST_WIDE_INT.
+ (frame_offset_overflow): Take the offset as a poly_int64 rather
+ than a HOST_WIDE_INT.
+ (assign_stack_local_1, assign_stack_local, assign_stack_temp_for_type)
+ (assign_stack_temp): Likewise for the size.
+ * function.c (get_frame_size): Return a poly_int64 rather than
+ a HOST_WIDE_INT.
+ (frame_offset_overflow): Take the offset as a poly_int64 rather
+ than a HOST_WIDE_INT.
+ (try_fit_stack_local): Take the start, length and size as poly_int64s
+ rather than HOST_WIDE_INTs. Return the offset as a poly_int64_pod
+ rather than a HOST_WIDE_INT.
+ (add_frame_space): Take the start and end as poly_int64s rather than
+ HOST_WIDE_INTs.
+ (assign_stack_local_1, assign_stack_local, assign_stack_temp_for_type)
+ (assign_stack_temp): Likewise for the size.
+ (temp_slot): Change size, base_offset and full_size from HOST_WIDE_INT
+ to poly_int64.
+ (find_temp_slot_from_address): Handle polynomial offsets.
+ (combine_temp_slots): Likewise.
+ * emit-rtl.h (rtl_data::x_frame_offset): Change from HOST_WIDE_INT
+ to poly_int64.
+ * cfgexpand.c (alloc_stack_frame_space): Return the offset as a
+ poly_int64 rather than a HOST_WIDE_INT.
+ (expand_one_stack_var_at): Take the offset as a poly_int64 rather
+ than a HOST_WIDE_INT.
+ (expand_stack_vars, expand_one_stack_var_1, expand_used_vars): Handle
+ polynomial frame offsets.
+ * config/m32r/m32r-protos.h (m32r_compute_frame_size): Take the size
+ as a poly_int64 rather than an int.
+ * config/m32r/m32r.c (m32r_compute_frame_size): Likewise.
+ * config/v850/v850-protos.h (compute_frame_size): Likewise.
+ * config/v850/v850.c (compute_frame_size): Likewise.
+ * config/xtensa/xtensa-protos.h (compute_frame_size): Likewise.
+ * config/xtensa/xtensa.c (compute_frame_size): Likewise.
+ * config/pa/pa-protos.h (pa_compute_frame_size): Likewise.
+ * config/pa/pa.c (pa_compute_frame_size): Likewise.
+ * explow.h (get_dynamic_stack_base): Take the offset as a poly_int64
+ rather than a HOST_WIDE_INT.
+ * explow.c (get_dynamic_stack_base): Likewise.
+ * final.c (final_start_function): Use the constant lower bound
+ of the frame size for -Wframe-larger-than.
+ * ira.c (do_reload): Adjust for new get_frame_size return type.
+ * lra.c (lra): Likewise.
+ * reload1.c (reload): Likewise.
+ * config/avr/avr.c (avr_asm_function_end_prologue): Likewise.
+ * config/pa/pa.h (EXIT_IGNORE_STACK): Likewise.
+ * rtlanal.c (get_initial_register_offset): Return the offset as
+ a poly_int64 rather than a HOST_WIDE_INT.
+
+2017-12-21 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
* reload1.c (elim_table): Change initial_offset, offset and
previous_offset from HOST_WIDE_INT to poly_int64_pod.
(offsets_at): Change the target array's element type from
diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c
index e616ec1..32c48ac 100644
--- a/gcc/cfgexpand.c
+++ b/gcc/cfgexpand.c
@@ -389,22 +389,23 @@ align_base (HOST_WIDE_INT base, unsigned HOST_WIDE_INT align, bool align_up)
/* Allocate SIZE bytes at byte alignment ALIGN from the stack frame.
Return the frame offset. */
-static HOST_WIDE_INT
+static poly_int64
alloc_stack_frame_space (HOST_WIDE_INT size, unsigned HOST_WIDE_INT align)
{
- HOST_WIDE_INT offset, new_frame_offset;
+ poly_int64 offset, new_frame_offset;
if (FRAME_GROWS_DOWNWARD)
{
new_frame_offset
- = align_base (frame_offset - frame_phase - size,
- align, false) + frame_phase;
+ = aligned_lower_bound (frame_offset - frame_phase - size,
+ align) + frame_phase;
offset = new_frame_offset;
}
else
{
new_frame_offset
- = align_base (frame_offset - frame_phase, align, true) + frame_phase;
+ = aligned_upper_bound (frame_offset - frame_phase,
+ align) + frame_phase;
offset = new_frame_offset;
new_frame_offset += size;
}
@@ -980,13 +981,13 @@ dump_stack_var_partition (void)
static void
expand_one_stack_var_at (tree decl, rtx base, unsigned base_align,
- HOST_WIDE_INT offset)
+ poly_int64 offset)
{
unsigned align;
rtx x;
/* If this fails, we've overflowed the stack frame. Error nicely? */
- gcc_assert (offset == trunc_int_for_mode (offset, Pmode));
+ gcc_assert (known_eq (offset, trunc_int_for_mode (offset, Pmode)));
x = plus_constant (Pmode, base, offset);
x = gen_rtx_MEM (TREE_CODE (decl) == SSA_NAME
@@ -1000,7 +1001,7 @@ expand_one_stack_var_at (tree decl, rtx base, unsigned base_align,
important, we'll simply use the alignment that is already set. */
if (base == virtual_stack_vars_rtx)
offset -= frame_phase;
- align = least_bit_hwi (offset);
+ align = known_alignment (offset);
align *= BITS_PER_UNIT;
if (align == 0 || align > base_align)
align = base_align;
@@ -1094,7 +1095,7 @@ expand_stack_vars (bool (*pred) (size_t), struct stack_vars_data *data)
{
rtx base;
unsigned base_align, alignb;
- HOST_WIDE_INT offset;
+ poly_int64 offset;
i = stack_vars_sorted[si];
@@ -1119,13 +1120,16 @@ expand_stack_vars (bool (*pred) (size_t), struct stack_vars_data *data)
if (alignb * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT)
{
base = virtual_stack_vars_rtx;
- if ((asan_sanitize_stack_p ())
- && pred)
+ /* ASAN description strings don't yet have a syntax for expressing
+ polynomial offsets. */
+ HOST_WIDE_INT prev_offset;
+ if (asan_sanitize_stack_p ()
+ && pred
+ && frame_offset.is_constant (&prev_offset))
{
- HOST_WIDE_INT prev_offset
- = align_base (frame_offset,
- MAX (alignb, ASAN_RED_ZONE_SIZE),
- !FRAME_GROWS_DOWNWARD);
+ prev_offset = align_base (prev_offset,
+ MAX (alignb, ASAN_RED_ZONE_SIZE),
+ !FRAME_GROWS_DOWNWARD);
tree repr_decl = NULL_TREE;
offset
= alloc_stack_frame_space (stack_vars[i].size
@@ -1133,7 +1137,10 @@ expand_stack_vars (bool (*pred) (size_t), struct stack_vars_data *data)
MAX (alignb, ASAN_RED_ZONE_SIZE));
data->asan_vec.safe_push (prev_offset);
- data->asan_vec.safe_push (offset + stack_vars[i].size);
+ /* Allocating a constant amount of space from a constant
+ starting offset must give a constant result. */
+ data->asan_vec.safe_push ((offset + stack_vars[i].size)
+ .to_constant ());
/* Find best representative of the partition.
Prefer those with DECL_NAME, even better
satisfying asan_protect_stack_decl predicate. */
@@ -1179,7 +1186,7 @@ expand_stack_vars (bool (*pred) (size_t), struct stack_vars_data *data)
space. */
if (large_size > 0 && ! large_allocation_done)
{
- HOST_WIDE_INT loffset;
+ poly_int64 loffset;
rtx large_allocsize;
large_allocsize = GEN_INT (large_size);
@@ -1282,7 +1289,8 @@ set_parm_rtl (tree parm, rtx x)
static void
expand_one_stack_var_1 (tree var)
{
- HOST_WIDE_INT size, offset;
+ HOST_WIDE_INT size;
+ poly_int64 offset;
unsigned byte_align;
if (TREE_CODE (var) == SSA_NAME)
@@ -2218,9 +2226,12 @@ expand_used_vars (void)
in addition to phase 1 and 2. */
expand_stack_vars (asan_decl_phase_3, &data);
- if (!data.asan_vec.is_empty ())
+ /* ASAN description strings don't yet have a syntax for expressing
+ polynomial offsets. */
+ HOST_WIDE_INT prev_offset;
+ if (!data.asan_vec.is_empty ()
+ && frame_offset.is_constant (&prev_offset))
{
- HOST_WIDE_INT prev_offset = frame_offset;
HOST_WIDE_INT offset, sz, redzonesz;
redzonesz = ASAN_RED_ZONE_SIZE;
sz = data.asan_vec[0] - prev_offset;
@@ -2229,8 +2240,10 @@ expand_used_vars (void)
&& sz + ASAN_RED_ZONE_SIZE >= (int) data.asan_alignb)
redzonesz = ((sz + ASAN_RED_ZONE_SIZE + data.asan_alignb - 1)
& ~(data.asan_alignb - HOST_WIDE_INT_1)) - sz;
- offset
- = alloc_stack_frame_space (redzonesz, ASAN_RED_ZONE_SIZE);
+ /* Allocating a constant amount of space from a constant
+ starting offset must give a constant result. */
+ offset = (alloc_stack_frame_space (redzonesz, ASAN_RED_ZONE_SIZE)
+ .to_constant ());
data.asan_vec.safe_push (prev_offset);
data.asan_vec.safe_push (offset);
/* Leave space for alignment if STRICT_ALIGNMENT. */
@@ -2275,9 +2288,10 @@ expand_used_vars (void)
if (STACK_ALIGNMENT_NEEDED)
{
HOST_WIDE_INT align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
- if (!FRAME_GROWS_DOWNWARD)
- frame_offset += align - 1;
- frame_offset &= -align;
+ if (FRAME_GROWS_DOWNWARD)
+ frame_offset = aligned_lower_bound (frame_offset, align);
+ else
+ frame_offset = aligned_upper_bound (frame_offset, align);
}
return var_end_seq;
diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
index ff6672f..e2db38d 100644
--- a/gcc/config/avr/avr.c
+++ b/gcc/config/avr/avr.c
@@ -2044,7 +2044,7 @@ avr_asm_function_end_prologue (FILE *file)
avr_outgoing_args_size());
fprintf (file, "/* frame size = " HOST_WIDE_INT_PRINT_DEC " */\n",
- get_frame_size());
+ (HOST_WIDE_INT) get_frame_size());
if (!cfun->machine->gasisr.yes)
{
diff --git a/gcc/config/m32r/m32r-protos.h b/gcc/config/m32r/m32r-protos.h
index 30dbc1d..83192da 100644
--- a/gcc/config/m32r/m32r-protos.h
+++ b/gcc/config/m32r/m32r-protos.h
@@ -22,7 +22,7 @@
extern void m32r_init (void);
extern void m32r_init_expanders (void);
-extern unsigned m32r_compute_frame_size (int);
+extern unsigned m32r_compute_frame_size (poly_int64);
extern void m32r_expand_prologue (void);
extern void m32r_expand_epilogue (void);
extern int direct_return (void);
diff --git a/gcc/config/m32r/m32r.c b/gcc/config/m32r/m32r.c
index dbc1bae..7f98dd5 100644
--- a/gcc/config/m32r/m32r.c
+++ b/gcc/config/m32r/m32r.c
@@ -1555,7 +1555,7 @@ static struct m32r_frame_info zero_frame_info;
SIZE is the size needed for local variables. */
unsigned int
-m32r_compute_frame_size (int size) /* # of var. bytes allocated. */
+m32r_compute_frame_size (poly_int64 size) /* # of var. bytes allocated. */
{
unsigned int regno;
unsigned int total_size, var_size, args_size, pretend_size, extra_size;
diff --git a/gcc/config/pa/pa-protos.h b/gcc/config/pa/pa-protos.h
index cbf25af..7c38c60 100644
--- a/gcc/config/pa/pa-protos.h
+++ b/gcc/config/pa/pa-protos.h
@@ -85,7 +85,7 @@ extern int pa_shadd_constant_p (int);
extern int pa_zdepi_cint_p (unsigned HOST_WIDE_INT);
extern void pa_output_ascii (FILE *, const char *, int);
-extern HOST_WIDE_INT pa_compute_frame_size (HOST_WIDE_INT, int *);
+extern HOST_WIDE_INT pa_compute_frame_size (poly_int64, int *);
extern void pa_expand_prologue (void);
extern void pa_expand_epilogue (void);
extern bool pa_can_use_return_insn (void);
diff --git a/gcc/config/pa/pa.c b/gcc/config/pa/pa.c
index 415d23e..15f22fc 100644
--- a/gcc/config/pa/pa.c
+++ b/gcc/config/pa/pa.c
@@ -3771,7 +3771,7 @@ set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
}
HOST_WIDE_INT
-pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
+pa_compute_frame_size (poly_int64 size, int *fregs_live)
{
int freg_saved = 0;
int i, j;
diff --git a/gcc/config/pa/pa.h b/gcc/config/pa/pa.h
index 7bd1f11..044f65e 100644
--- a/gcc/config/pa/pa.h
+++ b/gcc/config/pa/pa.h
@@ -691,7 +691,7 @@ void hppa_profile_hook (int label_no);
extern int may_call_alloca;
#define EXIT_IGNORE_STACK \
- (get_frame_size () != 0 \
+ (maybe_ne (get_frame_size (), 0) \
|| cfun->calls_alloca || crtl->outgoing_args_size)
/* Length in units of the trampoline for entering a nested function. */
diff --git a/gcc/config/v850/v850-protos.h b/gcc/config/v850/v850-protos.h
index 7cd4f8b..b59ca77 100644
--- a/gcc/config/v850/v850-protos.h
+++ b/gcc/config/v850/v850-protos.h
@@ -26,7 +26,7 @@ extern void expand_prologue (void);
extern void expand_epilogue (void);
extern int v850_handle_pragma (int (*)(void), void (*)(int), char *);
extern int compute_register_save_size (long *);
-extern int compute_frame_size (int, long *);
+extern int compute_frame_size (poly_int64, long *);
extern void v850_init_expanders (void);
#ifdef RTX_CODE
diff --git a/gcc/config/v850/v850.c b/gcc/config/v850/v850.c
index ab096ea..4f602f4 100644
--- a/gcc/config/v850/v850.c
+++ b/gcc/config/v850/v850.c
@@ -1574,7 +1574,7 @@ compute_register_save_size (long * p_reg_saved)
-------------------------- ---- ------------------ V */
int
-compute_frame_size (int size, long * p_reg_saved)
+compute_frame_size (poly_int64 size, long * p_reg_saved)
{
return (size
+ compute_register_save_size (p_reg_saved)
diff --git a/gcc/config/xtensa/xtensa-protos.h b/gcc/config/xtensa/xtensa-protos.h
index dac5657..3764340 100644
--- a/gcc/config/xtensa/xtensa-protos.h
+++ b/gcc/config/xtensa/xtensa-protos.h
@@ -67,7 +67,7 @@ extern rtx xtensa_return_addr (int, rtx);
extern void xtensa_setup_frame_addresses (void);
extern int xtensa_dbx_register_number (int);
-extern long compute_frame_size (int);
+extern long compute_frame_size (poly_int64);
extern bool xtensa_use_return_instruction_p (void);
extern void xtensa_expand_prologue (void);
extern void xtensa_expand_epilogue (void);
diff --git a/gcc/config/xtensa/xtensa.c b/gcc/config/xtensa/xtensa.c
index 665fe95..18b349c 100644
--- a/gcc/config/xtensa/xtensa.c
+++ b/gcc/config/xtensa/xtensa.c
@@ -2698,7 +2698,7 @@ xtensa_call_save_reg(int regno)
#define XTENSA_STACK_ALIGN(LOC) (((LOC) + STACK_BYTES-1) & ~(STACK_BYTES-1))
long
-compute_frame_size (int size)
+compute_frame_size (poly_int64 size)
{
int regno;
diff --git a/gcc/emit-rtl.h b/gcc/emit-rtl.h
index 3e01921..c46ebb4 100644
--- a/gcc/emit-rtl.h
+++ b/gcc/emit-rtl.h
@@ -126,7 +126,7 @@ struct GTY(()) rtl_data {
/* Offset to end of allocated area of stack frame.
If stack grows down, this is the address of the last stack slot allocated.
If stack grows up, this is the address for the next slot. */
- HOST_WIDE_INT x_frame_offset;
+ poly_int64_pod x_frame_offset;
/* Insn after which register parms and SAVE_EXPRs are born, if nonopt. */
rtx_insn *x_parm_birth_insn;
diff --git a/gcc/explow.c b/gcc/explow.c
index 1629595..4f3e0d4 100644
--- a/gcc/explow.c
+++ b/gcc/explow.c
@@ -1587,7 +1587,7 @@ allocate_dynamic_stack_space (rtx size, unsigned size_align,
of memory. */
rtx
-get_dynamic_stack_base (HOST_WIDE_INT offset, unsigned required_align)
+get_dynamic_stack_base (poly_int64 offset, unsigned required_align)
{
rtx target;
diff --git a/gcc/explow.h b/gcc/explow.h
index 8eca20b..e981a65 100644
--- a/gcc/explow.h
+++ b/gcc/explow.h
@@ -102,8 +102,7 @@ extern rtx allocate_dynamic_stack_space (rtx, unsigned, unsigned,
extern void get_dynamic_stack_size (rtx *, unsigned, unsigned, HOST_WIDE_INT *);
/* Returns the address of the dynamic stack space without allocating it. */
-extern rtx get_dynamic_stack_base (HOST_WIDE_INT offset,
- unsigned required_align);
+extern rtx get_dynamic_stack_base (poly_int64, unsigned);
/* Emit one stack probe at ADDRESS, an address within the stack. */
extern void emit_stack_probe (rtx);
diff --git a/gcc/final.c b/gcc/final.c
index 29a5c1e..1969ccb 100644
--- a/gcc/final.c
+++ b/gcc/final.c
@@ -1846,14 +1846,15 @@ final_start_function (rtx_insn *first, FILE *file,
TREE_ASM_WRITTEN (DECL_INITIAL (current_function_decl)) = 1;
}
+ HOST_WIDE_INT min_frame_size = constant_lower_bound (get_frame_size ());
if (warn_frame_larger_than
- && get_frame_size () > frame_larger_than_size)
- {
+ && min_frame_size > frame_larger_than_size)
+ {
/* Issue a warning */
warning (OPT_Wframe_larger_than_,
- "the frame size of %wd bytes is larger than %wd bytes",
- get_frame_size (), frame_larger_than_size);
- }
+ "the frame size of %wd bytes is larger than %wd bytes",
+ min_frame_size, frame_larger_than_size);
+ }
/* First output the function prologue: code to set up the stack frame. */
targetm.asm_out.function_prologue (file);
diff --git a/gcc/function.c b/gcc/function.c
index 3d27015..743d421 100644
--- a/gcc/function.c
+++ b/gcc/function.c
@@ -218,7 +218,7 @@ free_after_compilation (struct function *f)
This size counts from zero. It is not rounded to PREFERRED_STACK_BOUNDARY;
the caller may have to do that. */
-HOST_WIDE_INT
+poly_int64
get_frame_size (void)
{
if (FRAME_GROWS_DOWNWARD)
@@ -232,20 +232,22 @@ get_frame_size (void)
return FALSE. */
bool
-frame_offset_overflow (HOST_WIDE_INT offset, tree func)
+frame_offset_overflow (poly_int64 offset, tree func)
{
- unsigned HOST_WIDE_INT size = FRAME_GROWS_DOWNWARD ? -offset : offset;
+ poly_uint64 size = FRAME_GROWS_DOWNWARD ? -offset : offset;
+ unsigned HOST_WIDE_INT limit
+ = ((HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (Pmode) - 1))
+ /* Leave room for the fixed part of the frame. */
+ - 64 * UNITS_PER_WORD);
- if (size > (HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (Pmode) - 1))
- /* Leave room for the fixed part of the frame. */
- - 64 * UNITS_PER_WORD)
+ if (!coeffs_in_range_p (size, 0U, limit))
{
error_at (DECL_SOURCE_LOCATION (func),
"total size of local objects too large");
- return TRUE;
+ return true;
}
- return FALSE;
+ return false;
}
/* Return the minimum spill slot alignment for a register of mode MODE. */
@@ -284,11 +286,11 @@ get_stack_local_alignment (tree type, machine_mode mode)
given a start/length pair that lies at the end of the frame. */
static bool
-try_fit_stack_local (HOST_WIDE_INT start, HOST_WIDE_INT length,
- HOST_WIDE_INT size, unsigned int alignment,
- HOST_WIDE_INT *poffset)
+try_fit_stack_local (poly_int64 start, poly_int64 length,
+ poly_int64 size, unsigned int alignment,
+ poly_int64_pod *poffset)
{
- HOST_WIDE_INT this_frame_offset;
+ poly_int64 this_frame_offset;
int frame_off, frame_alignment, frame_phase;
/* Calculate how many bytes the start of local variables is off from
@@ -299,33 +301,31 @@ try_fit_stack_local (HOST_WIDE_INT start, HOST_WIDE_INT length,
/* Round the frame offset to the specified alignment. */
- /* We must be careful here, since FRAME_OFFSET might be negative and
- division with a negative dividend isn't as well defined as we might
- like. So we instead assume that ALIGNMENT is a power of two and
- use logical operations which are unambiguous. */
if (FRAME_GROWS_DOWNWARD)
this_frame_offset
- = (FLOOR_ROUND (start + length - size - frame_phase,
- (unsigned HOST_WIDE_INT) alignment)
+ = (aligned_lower_bound (start + length - size - frame_phase, alignment)
+ frame_phase);
else
this_frame_offset
- = (CEIL_ROUND (start - frame_phase,
- (unsigned HOST_WIDE_INT) alignment)
- + frame_phase);
+ = aligned_upper_bound (start - frame_phase, alignment) + frame_phase;
/* See if it fits. If this space is at the edge of the frame,
consider extending the frame to make it fit. Our caller relies on
this when allocating a new slot. */
- if (frame_offset == start && this_frame_offset < frame_offset)
- frame_offset = this_frame_offset;
- else if (this_frame_offset < start)
- return false;
- else if (start + length == frame_offset
- && this_frame_offset + size > start + length)
- frame_offset = this_frame_offset + size;
- else if (this_frame_offset + size > start + length)
- return false;
+ if (maybe_lt (this_frame_offset, start))
+ {
+ if (known_eq (frame_offset, start))
+ frame_offset = this_frame_offset;
+ else
+ return false;
+ }
+ else if (maybe_gt (this_frame_offset + size, start + length))
+ {
+ if (known_eq (frame_offset, start + length))
+ frame_offset = this_frame_offset + size;
+ else
+ return false;
+ }
*poffset = this_frame_offset;
return true;
@@ -336,7 +336,7 @@ try_fit_stack_local (HOST_WIDE_INT start, HOST_WIDE_INT length,
function's frame_space_list. */
static void
-add_frame_space (HOST_WIDE_INT start, HOST_WIDE_INT end)
+add_frame_space (poly_int64 start, poly_int64 end)
{
struct frame_space *space = ggc_alloc<frame_space> ();
space->next = crtl->frame_space_list;
@@ -363,12 +363,12 @@ add_frame_space (HOST_WIDE_INT start, HOST_WIDE_INT end)
We do not round to stack_boundary here. */
rtx
-assign_stack_local_1 (machine_mode mode, HOST_WIDE_INT size,
+assign_stack_local_1 (machine_mode mode, poly_int64 size,
int align, int kind)
{
rtx x, addr;
- int bigend_correction = 0;
- HOST_WIDE_INT slot_offset = 0, old_frame_offset;
+ poly_int64 bigend_correction = 0;
+ poly_int64 slot_offset = 0, old_frame_offset;
unsigned int alignment, alignment_in_bits;
if (align == 0)
@@ -379,7 +379,7 @@ assign_stack_local_1 (machine_mode mode, HOST_WIDE_INT size,
else if (align == -1)
{
alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
- size = CEIL_ROUND (size, alignment);
+ size = aligned_upper_bound (size, alignment);
}
else if (align == -2)
alignment = 1; /* BITS_PER_UNIT / BITS_PER_UNIT */
@@ -415,7 +415,7 @@ assign_stack_local_1 (machine_mode mode, HOST_WIDE_INT size,
requested size is 0 or the estimated stack
alignment >= mode alignment. */
gcc_assert ((kind & ASLK_REDUCE_ALIGN)
- || size == 0
+ || known_eq (size, 0)
|| (crtl->stack_alignment_estimated
>= GET_MODE_ALIGNMENT (mode)));
alignment_in_bits = crtl->stack_alignment_estimated;
@@ -430,7 +430,7 @@ assign_stack_local_1 (machine_mode mode, HOST_WIDE_INT size,
if (crtl->max_used_stack_slot_alignment < alignment_in_bits)
crtl->max_used_stack_slot_alignment = alignment_in_bits;
- if (mode != BLKmode || size != 0)
+ if (mode != BLKmode || maybe_ne (size, 0))
{
if (kind & ASLK_RECORD_PAD)
{
@@ -443,9 +443,9 @@ assign_stack_local_1 (machine_mode mode, HOST_WIDE_INT size,
alignment, &slot_offset))
continue;
*psp = space->next;
- if (slot_offset > space->start)
+ if (known_gt (slot_offset, space->start))
add_frame_space (space->start, slot_offset);
- if (slot_offset + size < space->start + space->length)
+ if (known_lt (slot_offset + size, space->start + space->length))
add_frame_space (slot_offset + size,
space->start + space->length);
goto found_space;
@@ -467,9 +467,9 @@ assign_stack_local_1 (machine_mode mode, HOST_WIDE_INT size,
if (kind & ASLK_RECORD_PAD)
{
- if (slot_offset > frame_offset)
+ if (known_gt (slot_offset, frame_offset))
add_frame_space (frame_offset, slot_offset);
- if (slot_offset + size < old_frame_offset)
+ if (known_lt (slot_offset + size, old_frame_offset))
add_frame_space (slot_offset + size, old_frame_offset);
}
}
@@ -480,9 +480,9 @@ assign_stack_local_1 (machine_mode mode, HOST_WIDE_INT size,
if (kind & ASLK_RECORD_PAD)
{
- if (slot_offset > old_frame_offset)
+ if (known_gt (slot_offset, old_frame_offset))
add_frame_space (old_frame_offset, slot_offset);
- if (slot_offset + size < frame_offset)
+ if (known_lt (slot_offset + size, frame_offset))
add_frame_space (slot_offset + size, frame_offset);
}
}
@@ -490,8 +490,17 @@ assign_stack_local_1 (machine_mode mode, HOST_WIDE_INT size,
found_space:
/* On a big-endian machine, if we are allocating more space than we will use,
use the least significant bytes of those that are allocated. */
- if (BYTES_BIG_ENDIAN && mode != BLKmode && GET_MODE_SIZE (mode) < size)
- bigend_correction = size - GET_MODE_SIZE (mode);
+ if (mode != BLKmode)
+ {
+ /* The slot size can sometimes be smaller than the mode size;
+ e.g. the rs6000 port allocates slots with a vector mode
+ that have the size of only one element. However, the slot
+ size must always be ordered wrt to the mode size, in the
+ same way as for a subreg. */
+ gcc_checking_assert (ordered_p (GET_MODE_SIZE (mode), size));
+ if (BYTES_BIG_ENDIAN && maybe_lt (GET_MODE_SIZE (mode), size))
+ bigend_correction = size - GET_MODE_SIZE (mode);
+ }
/* If we have already instantiated virtual registers, return the actual
address relative to the frame pointer. */
@@ -521,7 +530,7 @@ assign_stack_local_1 (machine_mode mode, HOST_WIDE_INT size,
/* Wrap up assign_stack_local_1 with last parameter as false. */
rtx
-assign_stack_local (machine_mode mode, HOST_WIDE_INT size, int align)
+assign_stack_local (machine_mode mode, poly_int64 size, int align)
{
return assign_stack_local_1 (mode, size, align, ASLK_RECORD_PAD);
}
@@ -548,7 +557,7 @@ struct GTY(()) temp_slot {
/* The rtx to used to reference the slot. */
rtx slot;
/* The size, in units, of the slot. */
- HOST_WIDE_INT size;
+ poly_int64 size;
/* The type of the object in the slot, or zero if it doesn't correspond
to a type. We use this to determine whether a slot can be reused.
It can be reused if objects of the type of the new slot will always
@@ -562,10 +571,10 @@ struct GTY(()) temp_slot {
int level;
/* The offset of the slot from the frame_pointer, including extra space
for alignment. This info is for combine_temp_slots. */
- HOST_WIDE_INT base_offset;
+ poly_int64 base_offset;
/* The size of the slot, including extra space for alignment. This
info is for combine_temp_slots. */
- HOST_WIDE_INT full_size;
+ poly_int64 full_size;
};
/* Entry for the below hash table. */
@@ -743,18 +752,14 @@ find_temp_slot_from_address (rtx x)
return p;
/* Last resort: Address is a virtual stack var address. */
- if (GET_CODE (x) == PLUS
- && XEXP (x, 0) == virtual_stack_vars_rtx
- && CONST_INT_P (XEXP (x, 1)))
+ poly_int64 offset;
+ if (strip_offset (x, &offset) == virtual_stack_vars_rtx)
{
int i;
for (i = max_slot_level (); i >= 0; i--)
for (p = *temp_slots_at_level (i); p; p = p->next)
- {
- if (INTVAL (XEXP (x, 1)) >= p->base_offset
- && INTVAL (XEXP (x, 1)) < p->base_offset + p->full_size)
- return p;
- }
+ if (known_in_range_p (offset, p->base_offset, p->full_size))
+ return p;
}
return NULL;
@@ -771,16 +776,13 @@ find_temp_slot_from_address (rtx x)
TYPE is the type that will be used for the stack slot. */
rtx
-assign_stack_temp_for_type (machine_mode mode, HOST_WIDE_INT size,
- tree type)
+assign_stack_temp_for_type (machine_mode mode, poly_int64 size, tree type)
{
unsigned int align;
struct temp_slot *p, *best_p = 0, *selected = NULL, **pp;
rtx slot;
- /* If SIZE is -1 it means that somebody tried to allocate a temporary
- of a variable size. */
- gcc_assert (size != -1);
+ gcc_assert (known_size_p (size));
align = get_stack_local_alignment (type, mode);
@@ -795,13 +797,16 @@ assign_stack_temp_for_type (machine_mode mode, HOST_WIDE_INT size,
{
for (p = avail_temp_slots; p; p = p->next)
{
- if (p->align >= align && p->size >= size
+ if (p->align >= align
+ && known_ge (p->size, size)
&& GET_MODE (p->slot) == mode
&& objects_must_conflict_p (p->type, type)
- && (best_p == 0 || best_p->size > p->size
- || (best_p->size == p->size && best_p->align > p->align)))
+ && (best_p == 0
+ || (known_eq (best_p->size, p->size)
+ ? best_p->align > p->align
+ : known_ge (best_p->size, p->size))))
{
- if (p->align == align && p->size == size)
+ if (p->align == align && known_eq (p->size, size))
{
selected = p;
cut_slot_from_list (selected, &avail_temp_slots);
@@ -825,9 +830,9 @@ assign_stack_temp_for_type (machine_mode mode, HOST_WIDE_INT size,
if (GET_MODE (best_p->slot) == BLKmode)
{
int alignment = best_p->align / BITS_PER_UNIT;
- HOST_WIDE_INT rounded_size = CEIL_ROUND (size, alignment);
+ poly_int64 rounded_size = aligned_upper_bound (size, alignment);
- if (best_p->size - rounded_size >= alignment)
+ if (known_ge (best_p->size - rounded_size, alignment))
{
p = ggc_alloc<temp_slot> ();
p->in_use = 0;
@@ -850,7 +855,7 @@ assign_stack_temp_for_type (machine_mode mode, HOST_WIDE_INT size,
/* If we still didn't find one, make a new temporary. */
if (selected == 0)
{
- HOST_WIDE_INT frame_offset_old = frame_offset;
+ poly_int64 frame_offset_old = frame_offset;
p = ggc_alloc<temp_slot> ();
@@ -864,9 +869,9 @@ assign_stack_temp_for_type (machine_mode mode, HOST_WIDE_INT size,
gcc_assert (mode != BLKmode || align == BIGGEST_ALIGNMENT);
p->slot = assign_stack_local_1 (mode,
(mode == BLKmode
- ? CEIL_ROUND (size,
- (int) align
- / BITS_PER_UNIT)
+ ? aligned_upper_bound (size,
+ (int) align
+ / BITS_PER_UNIT)
: size),
align, 0);
@@ -931,7 +936,7 @@ assign_stack_temp_for_type (machine_mode mode, HOST_WIDE_INT size,
reuse. First two arguments are same as in preceding function. */
rtx
-assign_stack_temp (machine_mode mode, HOST_WIDE_INT size)
+assign_stack_temp (machine_mode mode, poly_int64 size)
{
return assign_stack_temp_for_type (mode, size, NULL_TREE);
}
@@ -1050,14 +1055,14 @@ combine_temp_slots (void)
if (GET_MODE (q->slot) != BLKmode)
continue;
- if (p->base_offset + p->full_size == q->base_offset)
+ if (known_eq (p->base_offset + p->full_size, q->base_offset))
{
/* Q comes after P; combine Q into P. */
p->size += q->size;
p->full_size += q->full_size;
delete_q = 1;
}
- else if (q->base_offset + q->full_size == p->base_offset)
+ else if (known_eq (q->base_offset + q->full_size, p->base_offset))
{
/* P comes after Q; combine P into Q. */
q->size += p->size;
diff --git a/gcc/function.h b/gcc/function.h
index b94abb6..95cb2bd 100644
--- a/gcc/function.h
+++ b/gcc/function.h
@@ -187,8 +187,8 @@ struct GTY(()) frame_space
{
struct frame_space *next;
- HOST_WIDE_INT start;
- HOST_WIDE_INT length;
+ poly_int64 start;
+ poly_int64 length;
};
struct GTY(()) stack_usage
@@ -575,19 +575,19 @@ extern void free_after_compilation (struct function *);
/* Return size needed for stack frame based on slots so far allocated.
This size counts from zero. It is not rounded to STACK_BOUNDARY;
the caller may have to do that. */
-extern HOST_WIDE_INT get_frame_size (void);
+extern poly_int64 get_frame_size (void);
/* Issue an error message and return TRUE if frame OFFSET overflows in
the signed target pointer arithmetics for function FUNC. Otherwise
return FALSE. */
-extern bool frame_offset_overflow (HOST_WIDE_INT, tree);
+extern bool frame_offset_overflow (poly_int64, tree);
extern unsigned int spill_slot_alignment (machine_mode);
-extern rtx assign_stack_local_1 (machine_mode, HOST_WIDE_INT, int, int);
-extern rtx assign_stack_local (machine_mode, HOST_WIDE_INT, int);
-extern rtx assign_stack_temp_for_type (machine_mode, HOST_WIDE_INT, tree);
-extern rtx assign_stack_temp (machine_mode, HOST_WIDE_INT);
+extern rtx assign_stack_local_1 (machine_mode, poly_int64, int, int);
+extern rtx assign_stack_local (machine_mode, poly_int64, int);
+extern rtx assign_stack_temp_for_type (machine_mode, poly_int64, tree);
+extern rtx assign_stack_temp (machine_mode, poly_int64);
extern rtx assign_temp (tree, int, int);
extern void update_temp_slot_address (rtx, rtx);
extern void preserve_temp_slots (rtx);
diff --git a/gcc/ira.c b/gcc/ira.c
index 765a1d0..b3905b6 100644
--- a/gcc/ira.c
+++ b/gcc/ira.c
@@ -5553,13 +5553,13 @@ do_reload (void)
function's frame size is larger than we expect. */
if (flag_stack_check == GENERIC_STACK_CHECK)
{
- HOST_WIDE_INT size = get_frame_size () + STACK_CHECK_FIXED_FRAME_SIZE;
+ poly_int64 size = get_frame_size () + STACK_CHECK_FIXED_FRAME_SIZE;
for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (df_regs_ever_live_p (i) && !fixed_regs[i] && call_used_regs[i])
size += UNITS_PER_WORD;
- if (size > STACK_CHECK_MAX_FRAME_SIZE)
+ if (constant_lower_bound (size) > STACK_CHECK_MAX_FRAME_SIZE)
warning (0, "frame size too large for reliable stack checking");
}
diff --git a/gcc/lra.c b/gcc/lra.c
index c48ab72..fec9886 100644
--- a/gcc/lra.c
+++ b/gcc/lra.c
@@ -2397,7 +2397,7 @@ lra (FILE *f)
bitmap_initialize (&lra_optional_reload_pseudos, &reg_obstack);
bitmap_initialize (&lra_subreg_reload_pseudos, &reg_obstack);
live_p = false;
- if (get_frame_size () != 0 && crtl->stack_alignment_needed)
+ if (maybe_ne (get_frame_size (), 0) && crtl->stack_alignment_needed)
/* If we have a stack frame, we must align it now. The stack size
may be a part of the offset computation for register
elimination. */
diff --git a/gcc/reload1.c b/gcc/reload1.c
index 0f90a4c..7cf6412 100644
--- a/gcc/reload1.c
+++ b/gcc/reload1.c
@@ -887,7 +887,7 @@ reload (rtx_insn *first, int global)
for (;;)
{
int something_changed;
- HOST_WIDE_INT starting_frame_size;
+ poly_int64 starting_frame_size;
starting_frame_size = get_frame_size ();
something_was_spilled = false;
@@ -955,7 +955,7 @@ reload (rtx_insn *first, int global)
if (caller_save_needed)
setup_save_areas ();
- if (starting_frame_size && crtl->stack_alignment_needed)
+ if (maybe_ne (starting_frame_size, 0) && crtl->stack_alignment_needed)
{
/* If we have a stack frame, we must align it now. The
stack size may be a part of the offset computation for
@@ -968,7 +968,8 @@ reload (rtx_insn *first, int global)
assign_stack_local (BLKmode, 0, crtl->stack_alignment_needed);
}
/* If we allocated another stack slot, redo elimination bookkeeping. */
- if (something_was_spilled || starting_frame_size != get_frame_size ())
+ if (something_was_spilled
+ || maybe_ne (starting_frame_size, get_frame_size ()))
{
if (update_eliminables_and_spill ())
finish_spills (0);
@@ -994,7 +995,8 @@ reload (rtx_insn *first, int global)
/* If we allocated any new memory locations, make another pass
since it might have changed elimination offsets. */
- if (something_was_spilled || starting_frame_size != get_frame_size ())
+ if (something_was_spilled
+ || maybe_ne (starting_frame_size, get_frame_size ()))
something_changed = 1;
/* Even if the frame size remained the same, we might still have
@@ -1043,11 +1045,11 @@ reload (rtx_insn *first, int global)
if (insns_need_reload != 0 || something_needs_elimination
|| something_needs_operands_changed)
{
- HOST_WIDE_INT old_frame_size = get_frame_size ();
+ poly_int64 old_frame_size = get_frame_size ();
reload_as_needed (global);
- gcc_assert (old_frame_size == get_frame_size ());
+ gcc_assert (known_eq (old_frame_size, get_frame_size ()));
gcc_assert (verify_initial_elim_offsets ());
}
diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c
index 5a9da9e..859754d 100644
--- a/gcc/rtlanal.c
+++ b/gcc/rtlanal.c
@@ -344,7 +344,7 @@ rtx_varies_p (const_rtx x, bool for_alias)
FROM and TO for the current function, as it was at the start
of the routine. */
-static HOST_WIDE_INT
+static poly_int64
get_initial_register_offset (int from, int to)
{
static const struct elim_table_t
@@ -352,7 +352,7 @@ get_initial_register_offset (int from, int to)
const int from;
const int to;
} table[] = ELIMINABLE_REGS;
- HOST_WIDE_INT offset1, offset2;
+ poly_int64 offset1, offset2;
unsigned int i, j;
if (to == from)