aboutsummaryrefslogtreecommitdiff
path: root/libsanitizer/asan
diff options
context:
space:
mode:
Diffstat (limited to 'libsanitizer/asan')
-rw-r--r--libsanitizer/asan/asan_allocator.cpp440
-rw-r--r--libsanitizer/asan/asan_allocator.h11
-rw-r--r--libsanitizer/asan/asan_flags.cpp10
-rw-r--r--libsanitizer/asan/asan_fuchsia.cpp6
-rw-r--r--libsanitizer/asan/asan_globals.cpp19
-rw-r--r--libsanitizer/asan/asan_interceptors.h15
-rw-r--r--libsanitizer/asan/asan_interceptors_vfork.S3
-rw-r--r--libsanitizer/asan/asan_interface_internal.h4
-rw-r--r--libsanitizer/asan/asan_internal.h12
-rw-r--r--libsanitizer/asan/asan_linux.cpp42
-rw-r--r--libsanitizer/asan/asan_mac.cpp48
-rw-r--r--libsanitizer/asan/asan_malloc_linux.cpp10
-rw-r--r--libsanitizer/asan/asan_malloc_local.h2
-rw-r--r--libsanitizer/asan/asan_mapping.h23
-rw-r--r--libsanitizer/asan/asan_poisoning.cpp6
-rw-r--r--libsanitizer/asan/asan_posix.cpp28
-rw-r--r--libsanitizer/asan/asan_premap_shadow.cpp18
-rw-r--r--libsanitizer/asan/asan_report.cpp2
-rw-r--r--libsanitizer/asan/asan_rtems.cpp8
-rw-r--r--libsanitizer/asan/asan_rtl.cpp75
-rw-r--r--libsanitizer/asan/asan_shadow_setup.cpp45
-rw-r--r--libsanitizer/asan/asan_stack.h5
-rw-r--r--libsanitizer/asan/asan_win.cpp19
23 files changed, 470 insertions, 381 deletions
diff --git a/libsanitizer/asan/asan_allocator.cpp b/libsanitizer/asan/asan_allocator.cpp
index 65c51fb..58b496a 100644
--- a/libsanitizer/asan/asan_allocator.cpp
+++ b/libsanitizer/asan/asan_allocator.cpp
@@ -15,20 +15,21 @@
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
+
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_thread.h"
+#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
-#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_quarantine.h"
-#include "lsan/lsan_common.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
@@ -50,6 +51,22 @@ static u32 RZSize2Log(u32 rz_size) {
static AsanAllocator &get_allocator();
+static void AtomicContextStore(volatile atomic_uint64_t *atomic_context,
+ u32 tid, u32 stack) {
+ u64 context = tid;
+ context <<= 32;
+ context += stack;
+ atomic_store(atomic_context, context, memory_order_relaxed);
+}
+
+static void AtomicContextLoad(const volatile atomic_uint64_t *atomic_context,
+ u32 &tid, u32 &stack) {
+ u64 context = atomic_load(atomic_context, memory_order_relaxed);
+ stack = context;
+ context >>= 32;
+ tid = context;
+}
+
// The memory chunk allocated from the underlying allocator looks like this:
// L L L L L L H H U U U U U U R R
// L -- left redzone words (0 or more bytes)
@@ -67,32 +84,59 @@ static AsanAllocator &get_allocator();
// ---------------------|
// M -- magic value kAllocBegMagic
// B -- address of ChunkHeader pointing to the first 'H'
-static const uptr kAllocBegMagic = 0xCC6E96B9;
-
-struct ChunkHeader {
- // 1-st 8 bytes.
- u32 chunk_state : 8; // Must be first.
- u32 alloc_tid : 24;
-
- u32 free_tid : 24;
- u32 from_memalign : 1;
- u32 alloc_type : 2;
- u32 rz_log : 3;
- u32 lsan_tag : 2;
- // 2-nd 8 bytes
- // This field is used for small sizes. For large sizes it is equal to
- // SizeClassMap::kMaxSize and the actual size is stored in the
- // SecondaryAllocator's metadata.
- u32 user_requested_size : 29;
+
+class ChunkHeader {
+ public:
+ atomic_uint8_t chunk_state;
+ u8 alloc_type : 2;
+ u8 lsan_tag : 2;
+
// align < 8 -> 0
// else -> log2(min(align, 512)) - 2
- u32 user_requested_alignment_log : 3;
- u32 alloc_context_id;
+ u8 user_requested_alignment_log : 3;
+
+ private:
+ u16 user_requested_size_hi;
+ u32 user_requested_size_lo;
+ atomic_uint64_t alloc_context_id;
+
+ public:
+ uptr UsedSize() const {
+ uptr R = user_requested_size_lo;
+ if (sizeof(uptr) > sizeof(user_requested_size_lo))
+ R += (uptr)user_requested_size_hi << (8 * sizeof(user_requested_size_lo));
+ return R;
+ }
+
+ void SetUsedSize(uptr size) {
+ user_requested_size_lo = size;
+ if (sizeof(uptr) > sizeof(user_requested_size_lo)) {
+ size >>= (8 * sizeof(user_requested_size_lo));
+ user_requested_size_hi = size;
+ CHECK_EQ(user_requested_size_hi, size);
+ }
+ }
+
+ void SetAllocContext(u32 tid, u32 stack) {
+ AtomicContextStore(&alloc_context_id, tid, stack);
+ }
+
+ void GetAllocContext(u32 &tid, u32 &stack) const {
+ AtomicContextLoad(&alloc_context_id, tid, stack);
+ }
};
-struct ChunkBase : ChunkHeader {
- // Header2, intersects with user memory.
- u32 free_context_id;
+class ChunkBase : public ChunkHeader {
+ atomic_uint64_t free_context_id;
+
+ public:
+ void SetFreeContext(u32 tid, u32 stack) {
+ AtomicContextStore(&free_context_id, tid, stack);
+ }
+
+ void GetFreeContext(u32 &tid, u32 &stack) const {
+ AtomicContextLoad(&free_context_id, tid, stack);
+ }
};
static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
@@ -100,35 +144,50 @@ static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
COMPILER_CHECK(kChunkHeaderSize == 16);
COMPILER_CHECK(kChunkHeader2Size <= 16);
-// Every chunk of memory allocated by this allocator can be in one of 3 states:
-// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
-// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
-// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
enum {
- CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
- CHUNK_ALLOCATED = 2,
- CHUNK_QUARANTINE = 3
+ // Either just allocated by underlying allocator, but AsanChunk is not yet
+ // ready, or almost returned to undelying allocator and AsanChunk is already
+ // meaningless.
+ CHUNK_INVALID = 0,
+ // The chunk is allocated and not yet freed.
+ CHUNK_ALLOCATED = 2,
+ // The chunk was freed and put into quarantine zone.
+ CHUNK_QUARANTINE = 3,
};
-struct AsanChunk: ChunkBase {
+class AsanChunk : public ChunkBase {
+ public:
uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
- uptr UsedSize(bool locked_version = false) {
- if (user_requested_size != SizeClassMap::kMaxSize)
- return user_requested_size;
- return *reinterpret_cast<uptr *>(
- get_allocator().GetMetaData(AllocBeg(locked_version)));
+ bool AddrIsInside(uptr addr) {
+ return (addr >= Beg()) && (addr < Beg() + UsedSize());
}
- void *AllocBeg(bool locked_version = false) {
- if (from_memalign) {
- if (locked_version)
- return get_allocator().GetBlockBeginFastLocked(
- reinterpret_cast<void *>(this));
- return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
- }
- return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
+};
+
+class LargeChunkHeader {
+ static constexpr uptr kAllocBegMagic =
+ FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL);
+ atomic_uintptr_t magic;
+ AsanChunk *chunk_header;
+
+ public:
+ AsanChunk *Get() const {
+ return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic
+ ? chunk_header
+ : nullptr;
}
- bool AddrIsInside(uptr addr, bool locked_version = false) {
- return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
+
+ void Set(AsanChunk *p) {
+ if (p) {
+ chunk_header = p;
+ atomic_store(&magic, kAllocBegMagic, memory_order_release);
+ return;
+ }
+
+ uptr old = kAllocBegMagic;
+ if (!atomic_compare_exchange_strong(&magic, &old, 0,
+ memory_order_release)) {
+ CHECK_EQ(old, kAllocBegMagic);
+ }
}
};
@@ -139,23 +198,23 @@ struct QuarantineCallback {
}
void Recycle(AsanChunk *m) {
- CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
- atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
- CHECK_NE(m->alloc_tid, kInvalidTid);
- CHECK_NE(m->free_tid, kInvalidTid);
- PoisonShadow(m->Beg(),
- RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
- kAsanHeapLeftRedzoneMagic);
- void *p = reinterpret_cast<void *>(m->AllocBeg());
+ void *p = get_allocator().GetBlockBegin(m);
if (p != m) {
- uptr *alloc_magic = reinterpret_cast<uptr *>(p);
- CHECK_EQ(alloc_magic[0], kAllocBegMagic);
// Clear the magic value, as allocator internals may overwrite the
// contents of deallocated chunk, confusing GetAsanChunk lookup.
- alloc_magic[0] = 0;
- CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
+ reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr);
+ }
+
+ u8 old_chunk_state = CHUNK_QUARANTINE;
+ if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
+ CHUNK_INVALID, memory_order_acquire)) {
+ CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
}
+ PoisonShadow(m->Beg(),
+ RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
+ kAsanHeapLeftRedzoneMagic);
+
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.real_frees++;
@@ -299,23 +358,26 @@ struct Allocator {
// This could be a user-facing chunk (with redzones), or some internal
// housekeeping chunk, like TransferBatch. Start by assuming the former.
AsanChunk *ac = GetAsanChunk((void *)chunk);
- uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
- uptr beg = ac->Beg();
- uptr end = ac->Beg() + ac->UsedSize(true);
- uptr chunk_end = chunk + allocated_size;
- if (chunk < beg && beg < end && end <= chunk_end &&
- ac->chunk_state == CHUNK_ALLOCATED) {
- // Looks like a valid AsanChunk in use, poison redzones only.
- PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
- uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
- FastPoisonShadowPartialRightRedzone(
- end_aligned_down, end - end_aligned_down,
- chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
- } else {
- // This is either not an AsanChunk or freed or quarantined AsanChunk.
- // In either case, poison everything.
- PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
+ uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)chunk);
+ if (ac && atomic_load(&ac->chunk_state, memory_order_acquire) ==
+ CHUNK_ALLOCATED) {
+ uptr beg = ac->Beg();
+ uptr end = ac->Beg() + ac->UsedSize();
+ uptr chunk_end = chunk + allocated_size;
+ if (chunk < beg && beg < end && end <= chunk_end) {
+ // Looks like a valid AsanChunk in use, poison redzones only.
+ PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
+ uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
+ FastPoisonShadowPartialRightRedzone(
+ end_aligned_down, end - end_aligned_down,
+ chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
+ return;
+ }
}
+
+ // This is either not an AsanChunk or freed or quarantined AsanChunk.
+ // In either case, poison everything.
+ PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
}
void ReInitialize(const AllocatorOptions &options) {
@@ -348,17 +410,18 @@ struct Allocator {
// -------------------- Helper methods. -------------------------
uptr ComputeRZLog(uptr user_requested_size) {
- u32 rz_log =
- user_requested_size <= 64 - 16 ? 0 :
- user_requested_size <= 128 - 32 ? 1 :
- user_requested_size <= 512 - 64 ? 2 :
- user_requested_size <= 4096 - 128 ? 3 :
- user_requested_size <= (1 << 14) - 256 ? 4 :
- user_requested_size <= (1 << 15) - 512 ? 5 :
- user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
- u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
- u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
- return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
+ u32 rz_log = user_requested_size <= 64 - 16 ? 0
+ : user_requested_size <= 128 - 32 ? 1
+ : user_requested_size <= 512 - 64 ? 2
+ : user_requested_size <= 4096 - 128 ? 3
+ : user_requested_size <= (1 << 14) - 256 ? 4
+ : user_requested_size <= (1 << 15) - 512 ? 5
+ : user_requested_size <= (1 << 16) - 1024 ? 6
+ : 7;
+ u32 hdr_log = RZSize2Log(RoundUpToPowerOfTwo(sizeof(ChunkHeader)));
+ u32 min_log = RZSize2Log(atomic_load(&min_redzone, memory_order_acquire));
+ u32 max_log = RZSize2Log(atomic_load(&max_redzone, memory_order_acquire));
+ return Min(Max(rz_log, Max(min_log, hdr_log)), Max(max_log, hdr_log));
}
static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
@@ -378,16 +441,23 @@ struct Allocator {
// We have an address between two chunks, and we want to report just one.
AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
AsanChunk *right_chunk) {
+ if (!left_chunk)
+ return right_chunk;
+ if (!right_chunk)
+ return left_chunk;
// Prefer an allocated chunk over freed chunk and freed chunk
// over available chunk.
- if (left_chunk->chunk_state != right_chunk->chunk_state) {
- if (left_chunk->chunk_state == CHUNK_ALLOCATED)
+ u8 left_state = atomic_load(&left_chunk->chunk_state, memory_order_relaxed);
+ u8 right_state =
+ atomic_load(&right_chunk->chunk_state, memory_order_relaxed);
+ if (left_state != right_state) {
+ if (left_state == CHUNK_ALLOCATED)
return left_chunk;
- if (right_chunk->chunk_state == CHUNK_ALLOCATED)
+ if (right_state == CHUNK_ALLOCATED)
return right_chunk;
- if (left_chunk->chunk_state == CHUNK_QUARANTINE)
+ if (left_state == CHUNK_QUARANTINE)
return left_chunk;
- if (right_chunk->chunk_state == CHUNK_QUARANTINE)
+ if (right_state == CHUNK_QUARANTINE)
return right_chunk;
}
// Same chunk_state: choose based on offset.
@@ -402,10 +472,11 @@ struct Allocator {
bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) {
AsanChunk *m = GetAsanChunkByAddr(addr);
if (!m) return false;
- if (m->chunk_state != CHUNK_ALLOCATED) return false;
+ if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
+ return false;
if (m->Beg() != addr) return false;
- atomic_store((atomic_uint32_t *)&m->alloc_context_id, StackDepotPut(*stack),
- memory_order_relaxed);
+ AsanThread *t = GetCurrentThread();
+ m->SetAllocContext(t ? t->tid() : 0, StackDepotPut(*stack));
return true;
}
@@ -442,13 +513,10 @@ struct Allocator {
uptr needed_size = rounded_size + rz_size;
if (alignment > min_alignment)
needed_size += alignment;
- bool using_primary_allocator = true;
// If we are allocating from the secondary allocator, there will be no
// automatic right redzone, so add the right redzone manually.
- if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
+ if (!PrimaryAllocator::CanAllocate(needed_size, alignment))
needed_size += rz_size;
- using_primary_allocator = false;
- }
CHECK(IsAligned(needed_size, min_alignment));
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
size > max_user_defined_malloc_size) {
@@ -490,8 +558,7 @@ struct Allocator {
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
uptr alloc_end = alloc_beg + needed_size;
- uptr beg_plus_redzone = alloc_beg + rz_size;
- uptr user_beg = beg_plus_redzone;
+ uptr user_beg = alloc_beg + rz_size;
if (!IsAligned(user_beg, alignment))
user_beg = RoundUpTo(user_beg, alignment);
uptr user_end = user_beg + size;
@@ -499,31 +566,11 @@ struct Allocator {
uptr chunk_beg = user_beg - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
m->alloc_type = alloc_type;
- m->rz_log = rz_log;
- u32 alloc_tid = t ? t->tid() : 0;
- m->alloc_tid = alloc_tid;
- CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
- m->free_tid = kInvalidTid;
- m->from_memalign = user_beg != beg_plus_redzone;
- if (alloc_beg != chunk_beg) {
- CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
- reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
- reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
- }
- if (using_primary_allocator) {
- CHECK(size);
- m->user_requested_size = size;
- CHECK(allocator.FromPrimary(allocated));
- } else {
- CHECK(!allocator.FromPrimary(allocated));
- m->user_requested_size = SizeClassMap::kMaxSize;
- uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
- meta[0] = size;
- meta[1] = chunk_beg;
- }
+ CHECK(size);
+ m->SetUsedSize(size);
m->user_requested_alignment_log = user_requested_alignment_log;
- m->alloc_context_id = StackDepotPut(*stack);
+ m->SetAllocContext(t ? t->tid() : 0, StackDepotPut(*stack));
uptr size_rounded_down_to_granularity =
RoundDownTo(size, SHADOW_GRANULARITY);
@@ -556,7 +603,11 @@ struct Allocator {
: __lsan::kDirectlyLeaked;
#endif
// Must be the last mutation of metadata in this function.
- atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
+ atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release);
+ if (alloc_beg != chunk_beg) {
+ CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);
+ reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);
+ }
ASAN_MALLOC_HOOK(res, size);
return res;
}
@@ -564,10 +615,10 @@ struct Allocator {
// Set quarantine flag if chunk is allocated, issue ASan error report on
// available and quarantined chunks. Return true on success, false otherwise.
bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
- BufferedStackTrace *stack) {
+ BufferedStackTrace *stack) {
u8 old_chunk_state = CHUNK_ALLOCATED;
// Flip the chunk_state atomically to avoid race on double-free.
- if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
+ if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
CHUNK_QUARANTINE,
memory_order_acquire)) {
ReportInvalidFree(ptr, old_chunk_state, stack);
@@ -575,19 +626,18 @@ struct Allocator {
return false;
}
CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
+ // It was a user data.
+ m->SetFreeContext(kInvalidTid, 0);
return true;
}
// Expects the chunk to already be marked as quarantined by using
// AtomicallySetQuarantineFlagIfAllocated.
void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
- CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
- CHECK_GE(m->alloc_tid, 0);
- if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
- CHECK_EQ(m->free_tid, kInvalidTid);
+ CHECK_EQ(atomic_load(&m->chunk_state, memory_order_relaxed),
+ CHUNK_QUARANTINE);
AsanThread *t = GetCurrentThread();
- m->free_tid = t ? t->tid() : 0;
- m->free_context_id = StackDepotPut(*stack);
+ m->SetFreeContext(t ? t->tid() : 0, StackDepotPut(*stack));
Flags &fl = *flags();
if (fl.max_free_fill_size > 0) {
@@ -676,7 +726,7 @@ struct Allocator {
void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
if (new_ptr) {
- u8 chunk_state = m->chunk_state;
+ u8 chunk_state = atomic_load(&m->chunk_state, memory_order_acquire);
if (chunk_state != CHUNK_ALLOCATED)
ReportInvalidFree(old_ptr, chunk_state, stack);
CHECK_NE(REAL(memcpy), nullptr);
@@ -719,17 +769,24 @@ struct Allocator {
// -------------------------- Chunk lookup ----------------------
// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
+ // Returns nullptr if AsanChunk is not yet initialized just after
+ // get_allocator().Allocate(), or is being destroyed just before
+ // get_allocator().Deallocate().
AsanChunk *GetAsanChunk(void *alloc_beg) {
- if (!alloc_beg) return nullptr;
- if (!allocator.FromPrimary(alloc_beg)) {
- uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
- AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
- return m;
+ if (!alloc_beg)
+ return nullptr;
+ AsanChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get();
+ if (!p) {
+ if (!allocator.FromPrimary(alloc_beg))
+ return nullptr;
+ p = reinterpret_cast<AsanChunk *>(alloc_beg);
}
- uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
- if (alloc_magic[0] == kAllocBegMagic)
- return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
- return reinterpret_cast<AsanChunk *>(alloc_beg);
+ u8 state = atomic_load(&p->chunk_state, memory_order_relaxed);
+ // It does not guaranty that Chunk is initialized, but it's
+ // definitely not for any other value.
+ if (state == CHUNK_ALLOCATED || state == CHUNK_QUARANTINE)
+ return p;
+ return nullptr;
}
AsanChunk *GetAsanChunkByAddr(uptr p) {
@@ -747,16 +804,16 @@ struct Allocator {
uptr AllocationSize(uptr p) {
AsanChunk *m = GetAsanChunkByAddr(p);
if (!m) return 0;
- if (m->chunk_state != CHUNK_ALLOCATED) return 0;
+ if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
+ return 0;
if (m->Beg() != p) return 0;
return m->UsedSize();
}
AsanChunkView FindHeapChunkByAddress(uptr addr) {
AsanChunk *m1 = GetAsanChunkByAddr(addr);
- if (!m1) return AsanChunkView(m1);
sptr offset = 0;
- if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
+ if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
// The address is in the chunk's left redzone, so maybe it is actually
// a right buffer overflow from the other chunk to the left.
// Search a bit to the left to see if there is another chunk.
@@ -813,13 +870,16 @@ static AsanAllocator &get_allocator() {
}
bool AsanChunkView::IsValid() const {
- return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
+ return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) !=
+ CHUNK_INVALID;
}
bool AsanChunkView::IsAllocated() const {
- return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
+ return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
+ CHUNK_ALLOCATED;
}
bool AsanChunkView::IsQuarantined() const {
- return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE;
+ return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
+ CHUNK_QUARANTINE;
}
uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
@@ -827,8 +887,23 @@ uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
u32 AsanChunkView::UserRequestedAlignment() const {
return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
}
-uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
-uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
+
+uptr AsanChunkView::AllocTid() const {
+ u32 tid = 0;
+ u32 stack = 0;
+ chunk_->GetAllocContext(tid, stack);
+ return tid;
+}
+
+uptr AsanChunkView::FreeTid() const {
+ if (!IsQuarantined())
+ return kInvalidTid;
+ u32 tid = 0;
+ u32 stack = 0;
+ chunk_->GetFreeContext(tid, stack);
+ return tid;
+}
+
AllocType AsanChunkView::GetAllocType() const {
return (AllocType)chunk_->alloc_type;
}
@@ -840,8 +915,21 @@ static StackTrace GetStackTraceFromId(u32 id) {
return res;
}
-u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; }
-u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; }
+u32 AsanChunkView::GetAllocStackId() const {
+ u32 tid = 0;
+ u32 stack = 0;
+ chunk_->GetAllocContext(tid, stack);
+ return stack;
+}
+
+u32 AsanChunkView::GetFreeStackId() const {
+ if (!IsQuarantined())
+ return 0;
+ u32 tid = 0;
+ u32 stack = 0;
+ chunk_->GetFreeContext(tid, stack);
+ return stack;
+}
StackTrace AsanChunkView::GetAllocStack() const {
return GetStackTraceFromId(GetAllocStackId());
@@ -1005,7 +1093,7 @@ void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
instance.SetRssLimitExceeded(limit_exceeded);
}
-} // namespace __asan
+} // namespace __asan
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
@@ -1022,34 +1110,36 @@ void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
*end = *begin + sizeof(__asan::get_allocator());
}
-uptr PointsIntoChunk(void* p) {
+uptr PointsIntoChunk(void *p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
- if (!m) return 0;
- uptr chunk = m->Beg();
- if (m->chunk_state != __asan::CHUNK_ALLOCATED)
+ if (!m || atomic_load(&m->chunk_state, memory_order_acquire) !=
+ __asan::CHUNK_ALLOCATED)
return 0;
- if (m->AddrIsInside(addr, /*locked_version=*/true))
+ uptr chunk = m->Beg();
+ if (m->AddrIsInside(addr))
return chunk;
- if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
- addr))
+ if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(), addr))
return chunk;
return 0;
}
uptr GetUserBegin(uptr chunk) {
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
- CHECK(m);
- return m->Beg();
+ return m ? m->Beg() : 0;
}
LsanMetadata::LsanMetadata(uptr chunk) {
- metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
+ metadata_ = chunk ? reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize)
+ : nullptr;
}
bool LsanMetadata::allocated() const {
+ if (!metadata_)
+ return false;
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return m->chunk_state == __asan::CHUNK_ALLOCATED;
+ return atomic_load(&m->chunk_state, memory_order_relaxed) ==
+ __asan::CHUNK_ALLOCATED;
}
ChunkTag LsanMetadata::tag() const {
@@ -1064,12 +1154,15 @@ void LsanMetadata::set_tag(ChunkTag value) {
uptr LsanMetadata::requested_size() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return m->UsedSize(/*locked_version=*/true);
+ return m->UsedSize();
}
u32 LsanMetadata::stack_trace_id() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return m->alloc_context_id;
+ u32 tid = 0;
+ u32 stack = 0;
+ m->GetAllocContext(tid, stack);
+ return stack;
}
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
@@ -1079,15 +1172,16 @@ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
- if (!m) return kIgnoreObjectInvalid;
- if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
- if (m->lsan_tag == kIgnored)
- return kIgnoreObjectAlreadyIgnored;
- m->lsan_tag = __lsan::kIgnored;
- return kIgnoreObjectSuccess;
- } else {
+ if (!m ||
+ (atomic_load(&m->chunk_state, memory_order_acquire) !=
+ __asan::CHUNK_ALLOCATED) ||
+ !m->AddrIsInside(addr)) {
return kIgnoreObjectInvalid;
}
+ if (m->lsan_tag == kIgnored)
+ return kIgnoreObjectAlreadyIgnored;
+ m->lsan_tag = __lsan::kIgnored;
+ return kIgnoreObjectSuccess;
}
} // namespace __lsan
diff --git a/libsanitizer/asan/asan_allocator.h b/libsanitizer/asan/asan_allocator.h
index b37d8ef..2963e97 100644
--- a/libsanitizer/asan/asan_allocator.h
+++ b/libsanitizer/asan/asan_allocator.h
@@ -15,10 +15,11 @@
#define ASAN_ALLOCATOR_H
#include "asan_flags.h"
-#include "asan_internal.h"
#include "asan_interceptors.h"
+#include "asan_internal.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_list.h"
+#include "sanitizer_common/sanitizer_platform.h"
namespace __asan {
@@ -28,7 +29,7 @@ enum AllocType {
FROM_NEW_BR = 3 // Memory block came from operator new [ ]
};
-struct AsanChunk;
+class AsanChunk;
struct AllocatorOptions {
u32 quarantine_size_mb;
@@ -132,6 +133,10 @@ typedef DefaultSizeClassMap SizeClassMap;
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
typedef VeryCompactSizeClassMap SizeClassMap;
+#elif SANITIZER_RISCV64
+const uptr kAllocatorSpace = ~(uptr)0;
+const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
+typedef VeryDenseSizeClassMap SizeClassMap;
# elif defined(__aarch64__)
// AArch64/SANITIZER_CAN_USE_ALLOCATOR64 is only for 42-bit VMA
// so no need to different values for different VMA.
@@ -171,7 +176,7 @@ template <typename AddressSpaceViewTy>
struct AP32 {
static const uptr kSpaceBeg = 0;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
- static const uptr kMetadataSize = 16;
+ static const uptr kMetadataSize = 0;
typedef __asan::SizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = 20;
using AddressSpaceView = AddressSpaceViewTy;
diff --git a/libsanitizer/asan/asan_flags.cpp b/libsanitizer/asan/asan_flags.cpp
index c5c70ea..cb6a89f 100644
--- a/libsanitizer/asan/asan_flags.cpp
+++ b/libsanitizer/asan/asan_flags.cpp
@@ -26,10 +26,6 @@ namespace __asan {
Flags asan_flags_dont_use_directly; // use via flags().
-static const char *MaybeCallAsanDefaultOptions() {
- return (&__asan_default_options) ? __asan_default_options() : "";
-}
-
static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
#ifdef ASAN_DEFAULT_OPTIONS
return SANITIZER_STRINGIFY(ASAN_DEFAULT_OPTIONS);
@@ -108,14 +104,14 @@ void InitializeFlags() {
asan_parser.ParseString(asan_compile_def);
// Override from user-specified string.
- const char *asan_default_options = MaybeCallAsanDefaultOptions();
+ const char *asan_default_options = __asan_default_options();
asan_parser.ParseString(asan_default_options);
#if CAN_SANITIZE_UB
- const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
+ const char *ubsan_default_options = __ubsan_default_options();
ubsan_parser.ParseString(ubsan_default_options);
#endif
#if CAN_SANITIZE_LEAKS
- const char *lsan_default_options = __lsan::MaybeCallLsanDefaultOptions();
+ const char *lsan_default_options = __lsan_default_options();
lsan_parser.ParseString(lsan_default_options);
#endif
diff --git a/libsanitizer/asan/asan_fuchsia.cpp b/libsanitizer/asan/asan_fuchsia.cpp
index f8b2d5f..ec15abf 100644
--- a/libsanitizer/asan/asan_fuchsia.cpp
+++ b/libsanitizer/asan/asan_fuchsia.cpp
@@ -62,6 +62,8 @@ void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
UNIMPLEMENTED();
}
+bool PlatformUnpoisonStacks() { return false; }
+
// We can use a plain thread_local variable for TSD.
static thread_local void *per_thread;
@@ -196,6 +198,10 @@ bool HandleDlopenInit() {
return false;
}
+void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ __sanitizer_fill_shadow(p, size, 0, 0);
+}
+
} // namespace __asan
// These are declared (in extern "C") by <zircon/sanitizer.h>.
diff --git a/libsanitizer/asan/asan_globals.cpp b/libsanitizer/asan/asan_globals.cpp
index e045c31..9d7dbc6 100644
--- a/libsanitizer/asan/asan_globals.cpp
+++ b/libsanitizer/asan/asan_globals.cpp
@@ -154,6 +154,23 @@ static void CheckODRViolationViaIndicator(const Global *g) {
}
}
+// Check ODR violation for given global G by checking if it's already poisoned.
+// We use this method in case compiler doesn't use private aliases for global
+// variables.
+static void CheckODRViolationViaPoisoning(const Global *g) {
+ if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
+ // This check may not be enough: if the first global is much larger
+ // the entire redzone of the second global may be within the first global.
+ for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
+ if (g->beg == l->g->beg &&
+ (flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
+ !IsODRViolationSuppressed(g->name))
+ ReportODRViolation(g, FindRegistrationSite(g),
+ l->g, FindRegistrationSite(l->g));
+ }
+ }
+}
+
// Clang provides two different ways for global variables protection:
// it can poison the global itself or its private alias. In former
// case we may poison same symbol multiple times, that can help us to
@@ -199,6 +216,8 @@ static void RegisterGlobal(const Global *g) {
// where two globals with the same name are defined in different modules.
if (UseODRIndicator(g))
CheckODRViolationViaIndicator(g);
+ else
+ CheckODRViolationViaPoisoning(g);
}
if (CanPoisonMemory())
PoisonRedZones(*g);
diff --git a/libsanitizer/asan/asan_interceptors.h b/libsanitizer/asan/asan_interceptors.h
index b7a85fe..8e95256 100644
--- a/libsanitizer/asan/asan_interceptors.h
+++ b/libsanitizer/asan/asan_interceptors.h
@@ -13,9 +13,10 @@
#ifndef ASAN_INTERCEPTORS_H
#define ASAN_INTERCEPTORS_H
-#include "asan_internal.h"
#include "asan_interceptors_memintrinsics.h"
+#include "asan_internal.h"
#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_platform.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
namespace __asan {
@@ -80,12 +81,7 @@ void InitializePlatformInterceptors();
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
!SANITIZER_NETBSD
# define ASAN_INTERCEPT___CXA_THROW 1
-# if ! defined(ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION) \
- || ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION
-# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
-# else
-# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
-# endif
+# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
# else
@@ -116,8 +112,9 @@ void InitializePlatformInterceptors();
# define ASAN_INTERCEPT___STRDUP 0
#endif
-#if SANITIZER_LINUX && (defined(__arm__) || defined(__aarch64__) || \
- defined(__i386__) || defined(__x86_64__))
+#if SANITIZER_LINUX && \
+ (defined(__arm__) || defined(__aarch64__) || defined(__i386__) || \
+ defined(__x86_64__) || SANITIZER_RISCV64)
# define ASAN_INTERCEPT_VFORK 1
#else
# define ASAN_INTERCEPT_VFORK 0
diff --git a/libsanitizer/asan/asan_interceptors_vfork.S b/libsanitizer/asan/asan_interceptors_vfork.S
index 90a169d..3ae5503 100644
--- a/libsanitizer/asan/asan_interceptors_vfork.S
+++ b/libsanitizer/asan/asan_interceptors_vfork.S
@@ -5,8 +5,9 @@
#define COMMON_INTERCEPTOR_HANDLE_VFORK __asan_handle_vfork
#include "sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S"
#include "sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S"
-#include "sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S"
#include "sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S"
+#include "sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S"
+#include "sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S"
#endif
NO_EXEC_STACK_DIRECTIVE
diff --git a/libsanitizer/asan/asan_interface_internal.h b/libsanitizer/asan/asan_interface_internal.h
index f14cbbcb..3e6e660 100644
--- a/libsanitizer/asan/asan_interface_internal.h
+++ b/libsanitizer/asan/asan_interface_internal.h
@@ -173,8 +173,8 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE void __asan_print_accumulated_stats();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- const char* __asan_default_options();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ const char *__asan_default_options();
SANITIZER_INTERFACE_ATTRIBUTE
extern uptr __asan_shadow_memory_dynamic_address;
diff --git a/libsanitizer/asan/asan_internal.h b/libsanitizer/asan/asan_internal.h
index 72a4c3f..cfb5492 100644
--- a/libsanitizer/asan/asan_internal.h
+++ b/libsanitizer/asan/asan_internal.h
@@ -83,6 +83,16 @@ void *AsanDoesNotSupportStaticLinkage();
void AsanCheckDynamicRTPrereqs();
void AsanCheckIncompatibleRT();
+// Unpoisons platform-specific stacks.
+// Returns true if all stacks have been unpoisoned.
+bool PlatformUnpoisonStacks();
+
+// asan_rtl.cpp
+// Unpoison a region containing a stack.
+// Performs a sanity check and warns if the bounds don't look right.
+// The warning contains the type string to identify the stack type.
+void UnpoisonStack(uptr bottom, uptr top, const char *type);
+
// asan_thread.cpp
AsanThread *CreateMainThread();
@@ -108,8 +118,6 @@ void AppendToErrorMessageBuffer(const char *buffer);
void *AsanDlSymNext(const char *sym);
-void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name);
-
// Returns `true` iff most of ASan init process should be skipped due to the
// ASan library being loaded via `dlopen()`. Platforms may perform any
// `dlopen()` specific initialization inside this function.
diff --git a/libsanitizer/asan/asan_linux.cpp b/libsanitizer/asan/asan_linux.cpp
index ce5e873..fb1a442 100644
--- a/libsanitizer/asan/asan_linux.cpp
+++ b/libsanitizer/asan/asan_linux.cpp
@@ -87,25 +87,12 @@ void *AsanDoesNotSupportStaticLinkage() {
return &_DYNAMIC; // defined in link.h
}
-static void UnmapFromTo(uptr from, uptr to) {
- CHECK(to >= from);
- if (to == from) return;
- uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from);
- if (UNLIKELY(internal_iserror(res))) {
- Report(
- "ERROR: AddresSanitizer failed to unmap 0x%zx (%zd) bytes at address "
- "%p\n",
- to - from, to - from, from);
- CHECK("unable to unmap" && 0);
- }
-}
-
#if ASAN_PREMAP_SHADOW
-uptr FindPremappedShadowStart() {
+uptr FindPremappedShadowStart(uptr shadow_size_bytes) {
uptr granularity = GetMmapGranularity();
uptr shadow_start = reinterpret_cast<uptr>(&__asan_shadow);
uptr premap_shadow_size = PremapShadowSize();
- uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity);
+ uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity);
// We may have mapped too much. Release extra memory.
UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size);
return shadow_start;
@@ -113,31 +100,26 @@ uptr FindPremappedShadowStart() {
#endif
uptr FindDynamicShadowStart() {
+ uptr shadow_size_bytes = MemToShadowSize(kHighMemEnd);
#if ASAN_PREMAP_SHADOW
if (!PremapShadowFailed())
- return FindPremappedShadowStart();
+ return FindPremappedShadowStart(shadow_size_bytes);
#endif
- uptr granularity = GetMmapGranularity();
- uptr alignment = granularity * 8;
- uptr left_padding = granularity;
- uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity);
- uptr map_size = shadow_size + left_padding + alignment;
-
- uptr map_start = (uptr)MmapNoAccess(map_size);
- CHECK_NE(map_start, ~(uptr)0);
-
- uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
- UnmapFromTo(map_start, shadow_start - left_padding);
- UnmapFromTo(shadow_start + shadow_size, map_start + map_size);
-
- return shadow_start;
+ return MapDynamicShadow(shadow_size_bytes, SHADOW_SCALE,
+ /*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
UNIMPLEMENTED();
}
+void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
+}
+
#if SANITIZER_ANDROID
// FIXME: should we do anything for Android?
void AsanCheckDynamicRTPrereqs() {}
diff --git a/libsanitizer/asan/asan_mac.cpp b/libsanitizer/asan/asan_mac.cpp
index a8d3f5d..c695054 100644
--- a/libsanitizer/asan/asan_mac.cpp
+++ b/libsanitizer/asan/asan_mac.cpp
@@ -55,46 +55,8 @@ void *AsanDoesNotSupportStaticLinkage() {
}
uptr FindDynamicShadowStart() {
- uptr granularity = GetMmapGranularity();
- uptr alignment = 8 * granularity;
- uptr left_padding = granularity;
- uptr space_size = kHighShadowEnd + left_padding;
-
- uptr largest_gap_found = 0;
- uptr max_occupied_addr = 0;
- VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
- uptr shadow_start =
- FindAvailableMemoryRange(space_size, alignment, granularity,
- &largest_gap_found, &max_occupied_addr);
- // If the shadow doesn't fit, restrict the address space to make it fit.
- if (shadow_start == 0) {
- VReport(
- 2,
- "Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n",
- largest_gap_found, max_occupied_addr);
- uptr new_max_vm = RoundDownTo(largest_gap_found << SHADOW_SCALE, alignment);
- if (new_max_vm < max_occupied_addr) {
- Report("Unable to find a memory range for dynamic shadow.\n");
- Report(
- "space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
- "new_max_vm = %p\n",
- space_size, largest_gap_found, max_occupied_addr, new_max_vm);
- CHECK(0 && "cannot place shadow");
- }
- RestrictMemoryToMaxAddress(new_max_vm);
- kHighMemEnd = new_max_vm - 1;
- space_size = kHighShadowEnd + left_padding;
- VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
- shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,
- nullptr, nullptr);
- if (shadow_start == 0) {
- Report("Unable to find a memory range after restricting VM.\n");
- CHECK(0 && "cannot place shadow after restricting vm");
- }
- }
- CHECK_NE((uptr)0, shadow_start);
- CHECK(IsAligned(shadow_start, alignment));
- return shadow_start;
+ return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE,
+ /*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
// No-op. Mac does not support static linkage anyway.
@@ -127,6 +89,12 @@ void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
op(globals, size / sizeof(__asan_global));
}
+void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
+}
+
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED();
}
diff --git a/libsanitizer/asan/asan_malloc_linux.cpp b/libsanitizer/asan/asan_malloc_linux.cpp
index faa8968..9c3f0a5 100644
--- a/libsanitizer/asan/asan_malloc_linux.cpp
+++ b/libsanitizer/asan/asan_malloc_linux.cpp
@@ -34,7 +34,7 @@ static uptr last_dlsym_alloc_size_in_words;
static const uptr kDlsymAllocPoolSize = SANITIZER_RTEMS ? 4096 : 1024;
static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
-static INLINE bool IsInDlsymAllocPool(const void *ptr) {
+static inline bool IsInDlsymAllocPool(const void *ptr) {
uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
return off < allocated_for_dlsym * sizeof(alloc_memory_for_dlsym[0]);
}
@@ -95,12 +95,12 @@ bool IsFromLocalPool(const void *ptr) {
}
#endif
-static INLINE bool MaybeInDlsym() {
+static inline bool MaybeInDlsym() {
// Fuchsia doesn't use dlsym-based interceptors.
return !SANITIZER_FUCHSIA && asan_init_is_running;
}
-static INLINE bool UseLocalPool() {
+static inline bool UseLocalPool() {
return EarlyMalloc() || MaybeInDlsym();
}
@@ -120,19 +120,19 @@ static void *ReallocFromLocalPool(void *ptr, uptr size) {
}
INTERCEPTOR(void, free, void *ptr) {
- GET_STACK_TRACE_FREE;
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
DeallocateFromLocalPool(ptr);
return;
}
+ GET_STACK_TRACE_FREE;
asan_free(ptr, &stack, FROM_MALLOC);
}
#if SANITIZER_INTERCEPT_CFREE
INTERCEPTOR(void, cfree, void *ptr) {
- GET_STACK_TRACE_FREE;
if (UNLIKELY(IsInDlsymAllocPool(ptr)))
return;
+ GET_STACK_TRACE_FREE;
asan_free(ptr, &stack, FROM_MALLOC);
}
#endif // SANITIZER_INTERCEPT_CFREE
diff --git a/libsanitizer/asan/asan_malloc_local.h b/libsanitizer/asan/asan_malloc_local.h
index 3f784b9..e2c9be0 100644
--- a/libsanitizer/asan/asan_malloc_local.h
+++ b/libsanitizer/asan/asan_malloc_local.h
@@ -17,7 +17,7 @@
#include "sanitizer_common/sanitizer_platform.h"
#include "asan_internal.h"
-static INLINE bool EarlyMalloc() {
+static inline bool EarlyMalloc() {
return SANITIZER_RTEMS &&
(!__asan::asan_inited || __asan::asan_init_is_running);
}
diff --git a/libsanitizer/asan/asan_mapping.h b/libsanitizer/asan/asan_mapping.h
index 09be904..f239c3e 100644
--- a/libsanitizer/asan/asan_mapping.h
+++ b/libsanitizer/asan/asan_mapping.h
@@ -79,6 +79,20 @@
// || `[0x1000000000, 0x11ffffffff]` || lowshadow ||
// || `[0x0000000000, 0x0fffffffff]` || lowmem ||
//
+// RISC-V has only 38 bits for task size
+// Low mem size is set with kRiscv64_ShadowOffset64 in
+// compiler-rt/lib/asan/asan_allocator.h and in
+// llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp with
+// kRiscv64_ShadowOffset64, High mem top border is set with
+// GetMaxVirtualAddress() in
+// compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
+// Default Linux/RISCV64 Sv39/Sv48 mapping:
+// || `[0x000820000000, 0x003fffffffff]` || HighMem ||
+// || `[0x000124000000, 0x00081fffffff]` || HighShadow ||
+// || `[0x000024000000, 0x000123ffffff]` || ShadowGap ||
+// || `[0x000020000000, 0x000023ffffff]` || LowShadow ||
+// || `[0x000000000000, 0x00001fffffff]` || LowMem ||
+//
// Default Linux/AArch64 (42-bit VMA) mapping:
// || `[0x10000000000, 0x3ffffffffff]` || highmem ||
// || `[0x0a000000000, 0x0ffffffffff]` || highshadow ||
@@ -161,9 +175,10 @@ static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset =
0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G.
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
+static const u64 kRiscv64_ShadowOffset64 = 0x20000000;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
-static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
+static const u64 kPPC64_ShadowOffset64 = 1ULL << 44;
static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
@@ -206,6 +221,10 @@ static const u64 kMyriadCacheBitMask32 = 0x40000000ULL;
#else
# if SANITIZER_IOS
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+# elif SANITIZER_MAC && defined(__aarch64__)
+# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+#elif SANITIZER_RISCV64
+#define SHADOW_OFFSET kRiscv64_ShadowOffset64
# elif defined(__aarch64__)
# define SHADOW_OFFSET kAArch64_ShadowOffset64
# elif defined(__powerpc64__)
@@ -355,6 +374,8 @@ static inline bool AddrIsInShadowGap(uptr a) {
namespace __asan {
+static inline uptr MemToShadowSize(uptr size) { return size >> SHADOW_SCALE; }
+
static inline bool AddrIsInMem(uptr a) {
PROFILE_ASAN_MAPPING();
return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) ||
diff --git a/libsanitizer/asan/asan_poisoning.cpp b/libsanitizer/asan/asan_poisoning.cpp
index f3fbe68..44f872e 100644
--- a/libsanitizer/asan/asan_poisoning.cpp
+++ b/libsanitizer/asan/asan_poisoning.cpp
@@ -62,12 +62,6 @@ struct ShadowSegmentEndpoint {
}
};
-void FlushUnneededASanShadowMemory(uptr p, uptr size) {
- // Since asan's mapping is compacting, the shadow chunk may be
- // not page-aligned, so we only flush the page-aligned portion.
- ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
-}
-
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
uptr end = ptr + size;
if (Verbosity()) {
diff --git a/libsanitizer/asan/asan_posix.cpp b/libsanitizer/asan/asan_posix.cpp
index 920d216..d7f19d8 100644
--- a/libsanitizer/asan/asan_posix.cpp
+++ b/libsanitizer/asan/asan_posix.cpp
@@ -17,6 +17,7 @@
#include "asan_internal.h"
#include "asan_interceptors.h"
#include "asan_mapping.h"
+#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_libc.h"
@@ -24,6 +25,7 @@
#include "sanitizer_common/sanitizer_procmaps.h"
#include <pthread.h>
+#include <signal.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
@@ -37,6 +39,32 @@ void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
ReportDeadlySignal(sig);
}
+bool PlatformUnpoisonStacks() {
+ stack_t signal_stack;
+ CHECK_EQ(0, sigaltstack(nullptr, &signal_stack));
+ uptr sigalt_bottom = (uptr)signal_stack.ss_sp;
+ uptr sigalt_top = (uptr)((char *)signal_stack.ss_sp + signal_stack.ss_size);
+ // If we're executing on the signal alternate stack AND the Linux flag
+ // SS_AUTODISARM was used, then we cannot get the signal alternate stack
+ // bounds from sigaltstack -- sigaltstack's output looks just as if no
+ // alternate stack has ever been set up.
+ // We're always unpoisoning the signal alternate stack to support jumping
+ // between the default stack and signal alternate stack.
+ if (signal_stack.ss_flags != SS_DISABLE)
+ UnpoisonStack(sigalt_bottom, sigalt_top, "sigalt");
+
+ if (signal_stack.ss_flags != SS_ONSTACK)
+ return false;
+
+ // Since we're on the signal altnerate stack, we cannot find the DEFAULT
+ // stack bottom using a local variable.
+ uptr default_bottom, tls_addr, tls_size, stack_size;
+ GetThreadStackAndTls(/*main=*/false, &default_bottom, &stack_size, &tls_addr,
+ &tls_size);
+ UnpoisonStack(default_bottom, default_bottom + stack_size, "default");
+ return true;
+}
+
// ---------------------- TSD ---------------- {{{1
#if SANITIZER_NETBSD && !ASAN_DYNAMIC
diff --git a/libsanitizer/asan/asan_premap_shadow.cpp b/libsanitizer/asan/asan_premap_shadow.cpp
index 7835e99..666bb9b 100644
--- a/libsanitizer/asan/asan_premap_shadow.cpp
+++ b/libsanitizer/asan/asan_premap_shadow.cpp
@@ -32,22 +32,8 @@ uptr PremapShadowSize() {
// Returns an address aligned to 8 pages, such that one page on the left and
// PremapShadowSize() bytes on the right of it are mapped r/o.
uptr PremapShadow() {
- uptr granularity = GetMmapGranularity();
- uptr alignment = granularity * 8;
- uptr left_padding = granularity;
- uptr shadow_size = PremapShadowSize();
- uptr map_size = shadow_size + left_padding + alignment;
-
- uptr map_start = (uptr)MmapNoAccess(map_size);
- CHECK_NE(map_start, ~(uptr)0);
-
- uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
- uptr shadow_end = shadow_start + shadow_size;
- internal_munmap(reinterpret_cast<void *>(map_start),
- shadow_start - left_padding - map_start);
- internal_munmap(reinterpret_cast<void *>(shadow_end),
- map_start + map_size - shadow_end);
- return shadow_start;
+ return MapDynamicShadow(PremapShadowSize(), /*mmap_alignment_scale*/ 3,
+ /*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
bool PremapShadowFailed() {
diff --git a/libsanitizer/asan/asan_report.cpp b/libsanitizer/asan/asan_report.cpp
index 99e8678..4b4db1d 100644
--- a/libsanitizer/asan/asan_report.cpp
+++ b/libsanitizer/asan/asan_report.cpp
@@ -411,7 +411,7 @@ static bool IsInvalidPointerPair(uptr a1, uptr a2) {
return false;
}
-static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
+static inline void CheckForInvalidPointerPair(void *p1, void *p2) {
switch (flags()->detect_invalid_pointer_pairs) {
case 0:
return;
diff --git a/libsanitizer/asan/asan_rtems.cpp b/libsanitizer/asan/asan_rtems.cpp
index ecd568c..ea0b4ad 100644
--- a/libsanitizer/asan/asan_rtems.cpp
+++ b/libsanitizer/asan/asan_rtems.cpp
@@ -50,6 +50,12 @@ void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
UNIMPLEMENTED();
}
+void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
+}
+
void AsanCheckDynamicRTPrereqs() {}
void AsanCheckIncompatibleRT() {}
void InitializeAsanInterceptors() {}
@@ -64,6 +70,8 @@ void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
UNIMPLEMENTED();
}
+bool PlatformUnpoisonStacks() { return false; }
+
void EarlyInit() {
// Provide early initialization of shadow memory so that
// instrumented code running before full initialzation will not
diff --git a/libsanitizer/asan/asan_rtl.cpp b/libsanitizer/asan/asan_rtl.cpp
index 594d775..115733c 100644
--- a/libsanitizer/asan/asan_rtl.cpp
+++ b/libsanitizer/asan/asan_rtl.cpp
@@ -319,7 +319,7 @@ static void InitializeHighMemEnd() {
kHighMemEnd = GetMaxUserVirtualAddress();
// Increase kHighMemEnd to make sure it's properly
// aligned together with kHighMemBeg:
- kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1;
+ kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1;
#endif // !ASAN_FIXED_MAPPING
CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
#endif // !SANITIZER_MYRIAD2
@@ -551,22 +551,33 @@ class AsanInitializer {
static AsanInitializer asan_initializer;
#endif // ASAN_DYNAMIC
-} // namespace __asan
-
-// ---------------------- Interface ---------------- {{{1
-using namespace __asan;
-
-void NOINLINE __asan_handle_no_return() {
- if (asan_init_is_running)
+void UnpoisonStack(uptr bottom, uptr top, const char *type) {
+ static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
+ if (top - bottom > kMaxExpectedCleanupSize) {
+ static bool reported_warning = false;
+ if (reported_warning)
+ return;
+ reported_warning = true;
+ Report(
+ "WARNING: ASan is ignoring requested __asan_handle_no_return: "
+ "stack type: %s top: %p; bottom %p; size: %p (%zd)\n"
+ "False positive error reports may follow\n"
+ "For details see "
+ "https://github.com/google/sanitizers/issues/189\n",
+ type, top, bottom, top - bottom, top - bottom);
return;
+ }
+ PoisonShadow(bottom, top - bottom, 0);
+}
- int local_stack;
- AsanThread *curr_thread = GetCurrentThread();
- uptr PageSize = GetPageSizeCached();
- uptr top, bottom;
- if (curr_thread) {
+static void UnpoisonDefaultStack() {
+ uptr bottom, top;
+
+ if (AsanThread *curr_thread = GetCurrentThread()) {
+ int local_stack;
+ const uptr page_size = GetPageSizeCached();
top = curr_thread->stack_top();
- bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1);
+ bottom = ((uptr)&local_stack - page_size) & ~(page_size - 1);
} else if (SANITIZER_RTEMS) {
// Give up On RTEMS.
return;
@@ -578,25 +589,31 @@ void NOINLINE __asan_handle_no_return() {
&tls_size);
top = bottom + stack_size;
}
- static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
- if (top - bottom > kMaxExpectedCleanupSize) {
- static bool reported_warning = false;
- if (reported_warning)
- return;
- reported_warning = true;
- Report("WARNING: ASan is ignoring requested __asan_handle_no_return: "
- "stack top: %p; bottom %p; size: %p (%zd)\n"
- "False positive error reports may follow\n"
- "For details see "
- "https://github.com/google/sanitizers/issues/189\n",
- top, bottom, top - bottom, top - bottom);
- return;
- }
- PoisonShadow(bottom, top - bottom, 0);
+
+ UnpoisonStack(bottom, top, "default");
+}
+
+static void UnpoisonFakeStack() {
+ AsanThread *curr_thread = GetCurrentThread();
if (curr_thread && curr_thread->has_fake_stack())
curr_thread->fake_stack()->HandleNoReturn();
}
+} // namespace __asan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __asan;
+
+void NOINLINE __asan_handle_no_return() {
+ if (asan_init_is_running)
+ return;
+
+ if (!PlatformUnpoisonStacks())
+ UnpoisonDefaultStack();
+
+ UnpoisonFakeStack();
+}
+
extern "C" void *__asan_extra_spill_area() {
AsanThread *t = GetCurrentThread();
CHECK(t);
diff --git a/libsanitizer/asan/asan_shadow_setup.cpp b/libsanitizer/asan/asan_shadow_setup.cpp
index 1732493..2ead442 100644
--- a/libsanitizer/asan/asan_shadow_setup.cpp
+++ b/libsanitizer/asan/asan_shadow_setup.cpp
@@ -22,24 +22,6 @@
namespace __asan {
-// ---------------------- mmap -------------------- {{{1
-// Reserve memory range [beg, end].
-// We need to use inclusive range because end+1 may not be representable.
-void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
- CHECK_EQ((beg % GetMmapGranularity()), 0);
- CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
- uptr size = end - beg + 1;
- DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
- if (!MmapFixedSuperNoReserve(beg, size, name)) {
- Report(
- "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
- "Perhaps you're using ulimit -v\n",
- size);
- Abort();
- }
- if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(beg, size);
-}
-
static void ProtectGap(uptr addr, uptr size) {
if (!flags()->protect_shadow_gap) {
// The shadow gap is unprotected, so there is a chance that someone
@@ -57,30 +39,13 @@ static void ProtectGap(uptr addr, uptr size) {
"unprotected gap shadow");
return;
}
- void *res = MmapFixedNoAccess(addr, size, "shadow gap");
- if (addr == (uptr)res) return;
- // A few pages at the start of the address space can not be protected.
- // But we really want to protect as much as possible, to prevent this memory
- // being returned as a result of a non-FIXED mmap().
- if (addr == kZeroBaseShadowStart) {
- uptr step = GetMmapGranularity();
- while (size > step && addr < kZeroBaseMaxShadowStart) {
- addr += step;
- size -= step;
- void *res = MmapFixedNoAccess(addr, size, "shadow gap");
- if (addr == (uptr)res) return;
- }
- }
-
- Report(
- "ERROR: Failed to protect the shadow gap. "
- "ASan cannot proceed correctly. ABORTING.\n");
- DumpProcessMap();
- Die();
+ __sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart,
+ kZeroBaseMaxShadowStart);
}
static void MaybeReportLinuxPIEBug() {
-#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__aarch64__))
+#if SANITIZER_LINUX && \
+ (defined(__x86_64__) || defined(__aarch64__) || SANITIZER_RISCV64)
Report("This might be related to ELF_ET_DYN_BASE change in Linux 4.12.\n");
Report(
"See https://github.com/google/sanitizers/issues/856 for possible "
@@ -99,8 +64,6 @@ void InitializeShadowMemory() {
// |kDefaultShadowSentinel|.
bool full_shadow_is_available = false;
if (shadow_start == kDefaultShadowSentinel) {
- __asan_shadow_memory_dynamic_address = 0;
- CHECK_EQ(0, kLowShadowBeg);
shadow_start = FindDynamicShadowStart();
if (SANITIZER_LINUX) full_shadow_is_available = true;
}
diff --git a/libsanitizer/asan/asan_stack.h b/libsanitizer/asan/asan_stack.h
index 4089d3d..47ca85a 100644
--- a/libsanitizer/asan/asan_stack.h
+++ b/libsanitizer/asan/asan_stack.h
@@ -51,11 +51,6 @@ u32 GetMallocContextSize();
stack.Unwind(pc, bp, nullptr, \
common_flags()->fast_unwind_on_fatal)
-#define GET_STACK_TRACE_SIGNAL(sig) \
- BufferedStackTrace stack; \
- stack.Unwind((sig).pc, (sig).bp, (sig).context, \
- common_flags()->fast_unwind_on_fatal)
-
#define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
diff --git a/libsanitizer/asan/asan_win.cpp b/libsanitizer/asan/asan_win.cpp
index 417892a..8044ae1 100644
--- a/libsanitizer/asan/asan_win.cpp
+++ b/libsanitizer/asan/asan_win.cpp
@@ -191,6 +191,12 @@ void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
UNIMPLEMENTED();
}
+void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
+}
+
// ---------------------- TSD ---------------- {{{
static bool tsd_key_inited = false;
@@ -247,15 +253,8 @@ void *AsanDoesNotSupportStaticLinkage() {
}
uptr FindDynamicShadowStart() {
- uptr granularity = GetMmapGranularity();
- uptr alignment = 8 * granularity;
- uptr left_padding = granularity;
- uptr space_size = kHighShadowEnd + left_padding;
- uptr shadow_start = FindAvailableMemoryRange(space_size, alignment,
- granularity, nullptr, nullptr);
- CHECK_NE((uptr)0, shadow_start);
- CHECK(IsAligned(shadow_start, alignment));
- return shadow_start;
+ return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE,
+ /*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
void AsanCheckDynamicRTPrereqs() {}
@@ -268,6 +267,8 @@ void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
void AsanOnDeadlySignal(int, void *siginfo, void *context) { UNIMPLEMENTED(); }
+bool PlatformUnpoisonStacks() { return false; }
+
#if SANITIZER_WINDOWS64
// Exception handler for dealing with shadow memory.
static LONG CALLBACK