aboutsummaryrefslogtreecommitdiff
path: root/gcc/config
diff options
context:
space:
mode:
authorJeff Law <law@redhat.com>2017-09-19 23:35:07 -0600
committerJeff Law <law@gcc.gnu.org>2017-09-19 23:35:07 -0600
commit8e7a09c353842f38c05b8cb171efa9784ab84862 (patch)
tree4113182962c97b316c32e9cfba060b8d769d0a98 /gcc/config
parent8a502a808ec70c87e7a0a4bc7719115859a9dd7a (diff)
downloadgcc-8e7a09c353842f38c05b8cb171efa9784ab84862.zip
gcc-8e7a09c353842f38c05b8cb171efa9784ab84862.tar.gz
gcc-8e7a09c353842f38c05b8cb171efa9784ab84862.tar.bz2
i386.c (ix86_adjust_stack_and_probe_stack_clash): New.
* config/i386/i386.c (ix86_adjust_stack_and_probe_stack_clash): New. (ix86_expand_prologue): Dump stack clash info as needed. Call ix86_adjust_stack_and_probe_stack_clash as needed. * gcc.dg/stack-check-4.c: New test. * gcc.dg/stack-check-5.c: New test. * gcc.dg/stack-check-6.c: New test. * gcc.dg/stack-check-6a.c: New test. * gcc.dg/stack-check-7.c: New test. * gcc.dg/stack-check-8.c: New test. * gcc.dg/stack-check-9.c: New test. * gcc.dg/stack-check-10.c: New test. * lib/target-supports.exp (check_effective_target_supports_stack_clash_protection): Enable for x86 and x86_64 targets. From-SVN: r252998
Diffstat (limited to 'gcc/config')
-rw-r--r--gcc/config/i386/i386.c152
1 files changed, 150 insertions, 2 deletions
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 05b0520..fdfe595 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -13924,6 +13924,147 @@ release_scratch_register_on_entry (struct scratch_reg *sr)
#define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
+/* Emit code to adjust the stack pointer by SIZE bytes while probing it.
+
+ This differs from the next routine in that it tries hard to prevent
+ attacks that jump the stack guard. Thus it is never allowed to allocate
+ more than PROBE_INTERVAL bytes of stack space without a suitable
+ probe. */
+
+static void
+ix86_adjust_stack_and_probe_stack_clash (const HOST_WIDE_INT size)
+{
+ struct machine_function *m = cfun->machine;
+
+ /* If this function does not statically allocate stack space, then
+ no probes are needed. */
+ if (!size)
+ {
+ dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
+ return;
+ }
+
+ /* If we are a noreturn function, then we have to consider the
+ possibility that we're called via a jump rather than a call.
+
+ Thus we don't have the implicit probe generated by saving the
+ return address into the stack at the call. Thus, the stack
+ pointer could be anywhere in the guard page. The safe thing
+ to do is emit a probe now.
+
+ ?!? This should be revamped to work like aarch64 and s390 where
+ we track the offset from the most recent probe. Normally that
+ offset would be zero. For a non-return function we would reset
+ it to PROBE_INTERVAL - (STACK_BOUNDARY / BITS_PER_UNIT). Then
+ we just probe when we cross PROBE_INTERVAL. */
+ if (TREE_THIS_VOLATILE (cfun->decl))
+ {
+ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+ -GET_MODE_SIZE (word_mode)));
+ emit_insn (gen_blockage ());
+ }
+
+ /* If we allocate less than the size of the guard statically,
+ then no probing is necessary, but we do need to allocate
+ the stack. */
+ if (size < (1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE)))
+ {
+ pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-size), -1,
+ m->fs.cfa_reg == stack_pointer_rtx);
+ dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
+ return;
+ }
+
+ /* We're allocating a large enough stack frame that we need to
+ emit probes. Either emit them inline or in a loop depending
+ on the size. */
+ HOST_WIDE_INT probe_interval
+ = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+ if (size <= 4 * probe_interval)
+ {
+ HOST_WIDE_INT i;
+ for (i = probe_interval; i <= size; i += probe_interval)
+ {
+ /* Allocate PROBE_INTERVAL bytes. */
+ pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-probe_interval), -1,
+ m->fs.cfa_reg == stack_pointer_rtx);
+
+ /* And probe at *sp. */
+ emit_stack_probe (stack_pointer_rtx);
+ emit_insn (gen_blockage ());
+ }
+
+ /* We need to allocate space for the residual, but we do not need
+ to probe the residual. */
+ HOST_WIDE_INT residual = (i - probe_interval - size);
+ if (residual)
+ pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (residual), -1,
+ m->fs.cfa_reg == stack_pointer_rtx);
+ dump_stack_clash_frame_info (PROBE_INLINE, residual != 0);
+ }
+ else
+ {
+ struct scratch_reg sr;
+ get_scratch_register_on_entry (&sr);
+
+ /* Step 1: round SIZE down to a multiple of the interval. */
+ HOST_WIDE_INT rounded_size = size & -probe_interval;
+
+ /* Step 2: compute final value of the loop counter. Use lea if
+ possible. */
+ rtx addr = plus_constant (Pmode, stack_pointer_rtx, -rounded_size);
+ rtx insn;
+ if (address_no_seg_operand (addr, Pmode))
+ insn = emit_insn (gen_rtx_SET (sr.reg, addr));
+ else
+ {
+ emit_move_insn (sr.reg, GEN_INT (-rounded_size));
+ insn = emit_insn (gen_rtx_SET (sr.reg,
+ gen_rtx_PLUS (Pmode, sr.reg,
+ stack_pointer_rtx)));
+ }
+ if (m->fs.cfa_reg == stack_pointer_rtx)
+ {
+ add_reg_note (insn, REG_CFA_DEF_CFA,
+ plus_constant (Pmode, sr.reg,
+ m->fs.cfa_offset + rounded_size));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ /* Step 3: the loop. */
+ rtx size_rtx = GEN_INT (rounded_size);
+ insn = emit_insn (ix86_gen_adjust_stack_and_probe (sr.reg, sr.reg,
+ size_rtx));
+ if (m->fs.cfa_reg == stack_pointer_rtx)
+ {
+ m->fs.cfa_offset += rounded_size;
+ add_reg_note (insn, REG_CFA_DEF_CFA,
+ plus_constant (Pmode, stack_pointer_rtx,
+ m->fs.cfa_offset));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ m->fs.sp_offset += rounded_size;
+ emit_insn (gen_blockage ());
+
+ /* Step 4: adjust SP if we cannot assert at compile-time that SIZE
+ is equal to ROUNDED_SIZE. */
+
+ if (size != rounded_size)
+ pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (rounded_size - size), -1,
+ m->fs.cfa_reg == stack_pointer_rtx);
+ dump_stack_clash_frame_info (PROBE_LOOP, size != rounded_size);
+
+ release_scratch_register_on_entry (&sr);
+ }
+
+ /* Make sure nothing is scheduled before we are done. */
+ emit_insn (gen_blockage ());
+}
+
/* Emit code to adjust the stack pointer by SIZE bytes while probing it. */
static void
@@ -14852,12 +14993,19 @@ ix86_expand_prologue (void)
/* The stack has already been decremented by the instruction calling us
so probe if the size is non-negative to preserve the protection area. */
- if (allocate >= 0 && flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
+ if (allocate >= 0
+ && (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
+ || flag_stack_clash_protection))
{
/* We expect the GP registers to be saved when probes are used. */
gcc_assert (int_registers_saved);
- if (STACK_CHECK_MOVING_SP)
+ if (flag_stack_clash_protection)
+ {
+ ix86_adjust_stack_and_probe_stack_clash (allocate);
+ allocate = 0;
+ }
+ else if (STACK_CHECK_MOVING_SP)
{
if (!(crtl->is_leaf && !cfun->calls_alloca
&& allocate <= PROBE_INTERVAL))