diff options
author | Martin Liska <mliska@suse.cz> | 2018-11-30 15:25:15 +0100 |
---|---|---|
committer | Martin Liska <marxin@gcc.gnu.org> | 2018-11-30 14:25:15 +0000 |
commit | 6e644a50045f8032b6d6ff19eb70d8b33dfc4dae (patch) | |
tree | b6ab18da4eaeb67ce5e78a93ed2b4b84bfa0b637 /gcc/asan.c | |
parent | b49f1a7e7c85add83637cf7df46b8fc9b5b299e9 (diff) | |
download | gcc-6e644a50045f8032b6d6ff19eb70d8b33dfc4dae.zip gcc-6e644a50045f8032b6d6ff19eb70d8b33dfc4dae.tar.gz gcc-6e644a50045f8032b6d6ff19eb70d8b33dfc4dae.tar.bz2 |
Make red zone size more flexible for stack variables (PR sanitizer/81715).
2018-11-30 Martin Liska <mliska@suse.cz>
PR sanitizer/81715
* asan.c (asan_shadow_cst): Remove, partially transform
into flush_redzone_payload.
(RZ_BUFFER_SIZE): New.
(struct asan_redzone_buffer): New.
(asan_redzone_buffer::emit_redzone_byte): Likewise.
(asan_redzone_buffer::flush_redzone_payload): Likewise.
(asan_redzone_buffer::flush_if_full): Likewise.
(asan_emit_stack_protection): Use asan_redzone_buffer class
that is responsible for proper aligned stores and flushing
of shadow memory payload.
* asan.h (ASAN_MIN_RED_ZONE_SIZE): New.
(asan_var_and_redzone_size): Likewise.
* cfgexpand.c (expand_stack_vars): Use smaller alignment
(ASAN_MIN_RED_ZONE_SIZE) in order to make shadow memory
for automatic variables more compact.
2018-11-30 Martin Liska <mliska@suse.cz>
PR sanitizer/81715
* c-c++-common/asan/asan-stack-small.c: New test.
From-SVN: r266664
Diffstat (limited to 'gcc/asan.c')
-rw-r--r-- | gcc/asan.c | 202 |
1 files changed, 157 insertions, 45 deletions
@@ -1155,20 +1155,6 @@ asan_pp_string (pretty_printer *pp) return build1 (ADDR_EXPR, shadow_ptr_types[0], ret); } -/* Return a CONST_INT representing 4 subsequent shadow memory bytes. */ - -static rtx -asan_shadow_cst (unsigned char shadow_bytes[4]) -{ - int i; - unsigned HOST_WIDE_INT val = 0; - gcc_assert (WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN); - for (i = 0; i < 4; i++) - val |= (unsigned HOST_WIDE_INT) shadow_bytes[BYTES_BIG_ENDIAN ? 3 - i : i] - << (BITS_PER_UNIT * i); - return gen_int_mode (val, SImode); -} - /* Clear shadow memory at SHADOW_MEM, LEN bytes. Can't call a library call here though. */ @@ -1235,6 +1221,136 @@ shadow_mem_size (unsigned HOST_WIDE_INT size) return ROUND_UP (size, ASAN_SHADOW_GRANULARITY) / ASAN_SHADOW_GRANULARITY; } +/* Always emit 4 bytes at a time. */ +#define RZ_BUFFER_SIZE 4 + +/* ASAN redzone buffer container that handles emission of shadow bytes. */ +struct asan_redzone_buffer +{ + /* Constructor. */ + asan_redzone_buffer (rtx shadow_mem, HOST_WIDE_INT prev_offset): + m_shadow_mem (shadow_mem), m_prev_offset (prev_offset), + m_original_offset (prev_offset), m_shadow_bytes (RZ_BUFFER_SIZE) + {} + + /* Emit VALUE shadow byte at a given OFFSET. */ + void emit_redzone_byte (HOST_WIDE_INT offset, unsigned char value); + + /* Emit RTX emission of the content of the buffer. */ + void flush_redzone_payload (void); + +private: + /* Flush if the content of the buffer is full + (equal to RZ_BUFFER_SIZE). */ + void flush_if_full (void); + + /* Memory where we last emitted a redzone payload. */ + rtx m_shadow_mem; + + /* Relative offset where we last emitted a redzone payload. */ + HOST_WIDE_INT m_prev_offset; + + /* Relative original offset. Used for checking only. */ + HOST_WIDE_INT m_original_offset; + +public: + /* Buffer with redzone payload. */ + auto_vec<unsigned char> m_shadow_bytes; +}; + +/* Emit VALUE shadow byte at a given OFFSET. */ + +void +asan_redzone_buffer::emit_redzone_byte (HOST_WIDE_INT offset, + unsigned char value) +{ + gcc_assert ((offset & (ASAN_SHADOW_GRANULARITY - 1)) == 0); + gcc_assert (offset >= m_prev_offset); + + HOST_WIDE_INT off + = m_prev_offset + ASAN_SHADOW_GRANULARITY * m_shadow_bytes.length (); + if (off == offset) + { + /* Consecutive shadow memory byte. */ + m_shadow_bytes.safe_push (value); + flush_if_full (); + } + else + { + if (!m_shadow_bytes.is_empty ()) + flush_redzone_payload (); + + /* Maybe start earlier in order to use aligned store. */ + HOST_WIDE_INT align = (offset - m_prev_offset) % ASAN_RED_ZONE_SIZE; + if (align) + { + offset -= align; + for (unsigned i = 0; i < align / BITS_PER_UNIT; i++) + m_shadow_bytes.safe_push (0); + } + + /* Adjust m_prev_offset and m_shadow_mem. */ + HOST_WIDE_INT diff = offset - m_prev_offset; + m_shadow_mem = adjust_address (m_shadow_mem, VOIDmode, + diff >> ASAN_SHADOW_SHIFT); + m_prev_offset = offset; + m_shadow_bytes.safe_push (value); + flush_if_full (); + } +} + +/* Emit RTX emission of the content of the buffer. */ + +void +asan_redzone_buffer::flush_redzone_payload (void) +{ + gcc_assert (WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN); + + if (m_shadow_bytes.is_empty ()) + return; + + /* Be sure we always emit to an aligned address. */ + gcc_assert (((m_prev_offset - m_original_offset) + & (ASAN_RED_ZONE_SIZE - 1)) == 0); + + /* Fill it to RZ_BUFFER_SIZE bytes with zeros if needed. */ + unsigned l = m_shadow_bytes.length (); + for (unsigned i = 0; i <= RZ_BUFFER_SIZE - l; i++) + m_shadow_bytes.safe_push (0); + + if (dump_file && (dump_flags & TDF_DETAILS)) + fprintf (dump_file, + "Flushing rzbuffer at offset %" PRId64 " with: ", m_prev_offset); + + unsigned HOST_WIDE_INT val = 0; + for (unsigned i = 0; i < RZ_BUFFER_SIZE; i++) + { + unsigned char v + = m_shadow_bytes[BYTES_BIG_ENDIAN ? RZ_BUFFER_SIZE - i : i]; + val |= (unsigned HOST_WIDE_INT)v << (BITS_PER_UNIT * i); + if (dump_file && (dump_flags & TDF_DETAILS)) + fprintf (dump_file, "%02x ", v); + } + + if (dump_file && (dump_flags & TDF_DETAILS)) + fprintf (dump_file, "\n"); + + rtx c = gen_int_mode (val, SImode); + m_shadow_mem = adjust_address (m_shadow_mem, SImode, 0); + emit_move_insn (m_shadow_mem, c); + m_shadow_bytes.truncate (0); +} + +/* Flush if the content of the buffer is full + (equal to RZ_BUFFER_SIZE). */ + +void +asan_redzone_buffer::flush_if_full (void) +{ + if (m_shadow_bytes.length () == RZ_BUFFER_SIZE) + flush_redzone_payload (); +} + /* Insert code to protect stack vars. The prologue sequence should be emitted directly, epilogue sequence returned. BASE is the register holding the stack base, against which OFFSETS array offsets are relative to, OFFSETS @@ -1256,7 +1372,6 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb, rtx_code_label *lab; rtx_insn *insns; char buf[32]; - unsigned char shadow_bytes[4]; HOST_WIDE_INT base_offset = offsets[length - 1]; HOST_WIDE_INT base_align_bias = 0, offset, prev_offset; HOST_WIDE_INT asan_frame_size = offsets[0] - base_offset; @@ -1421,46 +1536,43 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb, if (STRICT_ALIGNMENT) set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode))); prev_offset = base_offset; + + asan_redzone_buffer rz_buffer (shadow_mem, prev_offset); for (l = length; l; l -= 2) { if (l == 2) cur_shadow_byte = ASAN_STACK_MAGIC_RIGHT; offset = offsets[l - 1]; - if ((offset - base_offset) & (ASAN_RED_ZONE_SIZE - 1)) + + bool extra_byte = (offset - base_offset) & (ASAN_SHADOW_GRANULARITY - 1); + /* If a red-zone is not aligned to ASAN_SHADOW_GRANULARITY then + the previous stack variable has size % ASAN_SHADOW_GRANULARITY != 0. + In that case we have to emit one extra byte that will describe + how many bytes (our of ASAN_SHADOW_GRANULARITY) can be accessed. */ + if (extra_byte) { - int i; HOST_WIDE_INT aoff = base_offset + ((offset - base_offset) - & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1)); - shadow_mem = adjust_address (shadow_mem, VOIDmode, - (aoff - prev_offset) - >> ASAN_SHADOW_SHIFT); - prev_offset = aoff; - for (i = 0; i < 4; i++, aoff += ASAN_SHADOW_GRANULARITY) - if (aoff < offset) - { - if (aoff < offset - (HOST_WIDE_INT)ASAN_SHADOW_GRANULARITY + 1) - shadow_bytes[i] = 0; - else - shadow_bytes[i] = offset - aoff; - } - else - shadow_bytes[i] = ASAN_STACK_MAGIC_MIDDLE; - emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes)); - offset = aoff; + & ~(ASAN_SHADOW_GRANULARITY - HOST_WIDE_INT_1)); + rz_buffer.emit_redzone_byte (aoff, offset - aoff); + offset = aoff + ASAN_SHADOW_GRANULARITY; } - while (offset <= offsets[l - 2] - ASAN_RED_ZONE_SIZE) + + /* Calculate size of red zone payload. */ + while (offset < offsets[l - 2]) { - shadow_mem = adjust_address (shadow_mem, VOIDmode, - (offset - prev_offset) - >> ASAN_SHADOW_SHIFT); - prev_offset = offset; - memset (shadow_bytes, cur_shadow_byte, 4); - emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes)); - offset += ASAN_RED_ZONE_SIZE; + rz_buffer.emit_redzone_byte (offset, cur_shadow_byte); + offset += ASAN_SHADOW_GRANULARITY; } + cur_shadow_byte = ASAN_STACK_MAGIC_MIDDLE; } + + /* As the automatic variables are aligned to + ASAN_RED_ZONE_SIZE / ASAN_SHADOW_GRANULARITY, the buffer should be + flushed here. */ + gcc_assert (rz_buffer.m_shadow_bytes.is_empty ()); + do_pending_stack_adjust (); /* Construct epilogue sequence. */ @@ -1519,7 +1631,7 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb, for (l = length; l; l -= 2) { offset = base_offset + ((offsets[l - 1] - base_offset) - & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1)); + & ~(ASAN_MIN_RED_ZONE_SIZE - HOST_WIDE_INT_1)); if (last_offset + last_size != offset) { shadow_mem = adjust_address (shadow_mem, VOIDmode, @@ -1531,7 +1643,7 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb, last_size = 0; } last_size += base_offset + ((offsets[l - 2] - base_offset) - & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1)) + & ~(ASAN_MIN_RED_ZONE_SIZE - HOST_WIDE_INT_1)) - offset; /* Unpoison shadow memory that corresponds to a variable that is @@ -1552,7 +1664,7 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb, "%s (%" PRId64 " B)\n", n, size); } - last_size += size & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1); + last_size += size & ~(ASAN_MIN_RED_ZONE_SIZE - HOST_WIDE_INT_1); } } } |