aboutsummaryrefslogtreecommitdiff
path: root/libsanitizer/asan/asan_fake_stack.cc
diff options
context:
space:
mode:
Diffstat (limited to 'libsanitizer/asan/asan_fake_stack.cc')
-rw-r--r--libsanitizer/asan/asan_fake_stack.cc283
1 files changed, 154 insertions, 129 deletions
diff --git a/libsanitizer/asan/asan_fake_stack.cc b/libsanitizer/asan/asan_fake_stack.cc
index 1fc0415..b9cce88 100644
--- a/libsanitizer/asan/asan_fake_stack.cc
+++ b/libsanitizer/asan/asan_fake_stack.cc
@@ -10,170 +10,195 @@
// FakeStack is used to detect use-after-return bugs.
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
+#include "asan_poisoning.h"
#include "asan_thread.h"
-#include "asan_thread_registry.h"
namespace __asan {
-FakeStack::FakeStack() {
- CHECK(REAL(memset) != 0);
- REAL(memset)(this, 0, sizeof(*this));
+static const u64 kMagic1 = kAsanStackAfterReturnMagic;
+static const u64 kMagic2 = (kMagic1 << 8) | kMagic1;
+static const u64 kMagic4 = (kMagic2 << 16) | kMagic2;
+static const u64 kMagic8 = (kMagic4 << 32) | kMagic4;
+
+// For small size classes inline PoisonShadow for better performance.
+ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
+ CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3.
+ u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
+ if (class_id <= 6) {
+ for (uptr i = 0; i < (1U << class_id); i++)
+ shadow[i] = magic;
+ } else {
+ // The size class is too big, it's cheaper to poison only size bytes.
+ PoisonShadow(ptr, size, static_cast<u8>(magic));
+ }
}
-bool FakeStack::AddrIsInSizeClass(uptr addr, uptr size_class) {
- uptr mem = allocated_size_classes_[size_class];
- uptr size = ClassMmapSize(size_class);
- bool res = mem && addr >= mem && addr < mem + size;
+FakeStack *FakeStack::Create(uptr stack_size_log) {
+ static uptr kMinStackSizeLog = 16;
+ static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28);
+ if (stack_size_log < kMinStackSizeLog)
+ stack_size_log = kMinStackSizeLog;
+ if (stack_size_log > kMaxStackSizeLog)
+ stack_size_log = kMaxStackSizeLog;
+ FakeStack *res = reinterpret_cast<FakeStack *>(
+ MmapOrDie(RequiredSize(stack_size_log), "FakeStack"));
+ res->stack_size_log_ = stack_size_log;
+ if (flags()->verbosity) {
+ u8 *p = reinterpret_cast<u8 *>(res);
+ Report("T%d: FakeStack created: %p -- %p stack_size_log: %zd \n",
+ GetCurrentTidOrInvalid(), p,
+ p + FakeStack::RequiredSize(stack_size_log), stack_size_log);
+ }
return res;
}
-uptr FakeStack::AddrIsInFakeStack(uptr addr) {
- for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
- if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i];
- }
- return 0;
+void FakeStack::Destroy() {
+ PoisonAll(0);
+ UnmapOrDie(this, RequiredSize(stack_size_log_));
}
-// We may want to compute this during compilation.
-inline uptr FakeStack::ComputeSizeClass(uptr alloc_size) {
- uptr rounded_size = RoundUpToPowerOfTwo(alloc_size);
- uptr log = Log2(rounded_size);
- CHECK(alloc_size <= (1UL << log));
- if (!(alloc_size > (1UL << (log-1)))) {
- Printf("alloc_size %zu log %zu\n", alloc_size, log);
- }
- CHECK(alloc_size > (1UL << (log-1)));
- uptr res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
- CHECK(res < kNumberOfSizeClasses);
- CHECK(ClassSize(res) >= rounded_size);
- return res;
+void FakeStack::PoisonAll(u8 magic) {
+ PoisonShadow(reinterpret_cast<uptr>(this), RequiredSize(stack_size_log()),
+ magic);
}
-void FakeFrameFifo::FifoPush(FakeFrame *node) {
- CHECK(node);
- node->next = 0;
- if (first_ == 0 && last_ == 0) {
- first_ = last_ = node;
- } else {
- CHECK(first_);
- CHECK(last_);
- last_->next = node;
- last_ = node;
+ALWAYS_INLINE USED
+FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
+ uptr real_stack) {
+ CHECK_LT(class_id, kNumberOfSizeClasses);
+ if (needs_gc_)
+ GC(real_stack);
+ uptr &hint_position = hint_position_[class_id];
+ const int num_iter = NumberOfFrames(stack_size_log, class_id);
+ u8 *flags = GetFlags(stack_size_log, class_id);
+ for (int i = 0; i < num_iter; i++) {
+ uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++);
+ // This part is tricky. On one hand, checking and setting flags[pos]
+ // should be atomic to ensure async-signal safety. But on the other hand,
+ // if the signal arrives between checking and setting flags[pos], the
+ // signal handler's fake stack will start from a different hint_position
+ // and so will not touch this particular byte. So, it is safe to do this
+ // with regular non-atimic load and store (at least I was not able to make
+ // this code crash).
+ if (flags[pos]) continue;
+ flags[pos] = 1;
+ FakeFrame *res = reinterpret_cast<FakeFrame *>(
+ GetFrame(stack_size_log, class_id, pos));
+ res->real_stack = real_stack;
+ *SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos];
+ return res;
}
+ return 0; // We are out of fake stack.
}
-FakeFrame *FakeFrameFifo::FifoPop() {
- CHECK(first_ && last_ && "Exhausted fake stack");
- FakeFrame *res = 0;
- if (first_ == last_) {
- res = first_;
- first_ = last_ = 0;
- } else {
- res = first_;
- first_ = first_->next;
- }
- return res;
+uptr FakeStack::AddrIsInFakeStack(uptr ptr) {
+ uptr stack_size_log = this->stack_size_log();
+ uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0));
+ uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
+ if (ptr < beg || ptr >= end) return 0;
+ uptr class_id = (ptr - beg) >> stack_size_log;
+ uptr base = beg + (class_id << stack_size_log);
+ CHECK_LE(base, ptr);
+ CHECK_LT(ptr, base + (1UL << stack_size_log));
+ uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
+ return base + pos * BytesInSizeClass(class_id);
}
-void FakeStack::Init(uptr stack_size) {
- stack_size_ = stack_size;
- alive_ = true;
+void FakeStack::HandleNoReturn() {
+ needs_gc_ = true;
}
-void FakeStack::Cleanup() {
- alive_ = false;
- for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
- uptr mem = allocated_size_classes_[i];
- if (mem) {
- PoisonShadow(mem, ClassMmapSize(i), 0);
- allocated_size_classes_[i] = 0;
- UnmapOrDie((void*)mem, ClassMmapSize(i));
+// When throw, longjmp or some such happens we don't call OnFree() and
+// as the result may leak one or more fake frames, but the good news is that
+// we are notified about all such events by HandleNoReturn().
+// If we recently had such no-return event we need to collect garbage frames.
+// We do it based on their 'real_stack' values -- everything that is lower
+// than the current real_stack is garbage.
+NOINLINE void FakeStack::GC(uptr real_stack) {
+ uptr collected = 0;
+ for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
+ u8 *flags = GetFlags(stack_size_log(), class_id);
+ for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
+ i++) {
+ if (flags[i] == 0) continue; // not allocated.
+ FakeFrame *ff = reinterpret_cast<FakeFrame *>(
+ GetFrame(stack_size_log(), class_id, i));
+ if (ff->real_stack < real_stack) {
+ flags[i] = 0;
+ collected++;
+ }
}
}
+ needs_gc_ = false;
}
-uptr FakeStack::ClassMmapSize(uptr size_class) {
- return RoundUpToPowerOfTwo(stack_size_);
-}
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+static THREADLOCAL FakeStack *fake_stack_tls;
-void FakeStack::AllocateOneSizeClass(uptr size_class) {
- CHECK(ClassMmapSize(size_class) >= GetPageSizeCached());
- uptr new_mem = (uptr)MmapOrDie(
- ClassMmapSize(size_class), __FUNCTION__);
- // Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
- // asanThreadRegistry().GetCurrent()->tid(),
- // size_class, new_mem, new_mem + ClassMmapSize(size_class),
- // ClassMmapSize(size_class));
- uptr i;
- for (i = 0; i < ClassMmapSize(size_class);
- i += ClassSize(size_class)) {
- size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i));
- }
- CHECK(i == ClassMmapSize(size_class));
- allocated_size_classes_[size_class] = new_mem;
+FakeStack *GetTLSFakeStack() {
+ return fake_stack_tls;
+}
+void SetTLSFakeStack(FakeStack *fs) {
+ fake_stack_tls = fs;
+}
+#else
+FakeStack *GetTLSFakeStack() { return 0; }
+void SetTLSFakeStack(FakeStack *fs) { }
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+static FakeStack *GetFakeStack() {
+ AsanThread *t = GetCurrentThread();
+ if (!t) return 0;
+ return t->fake_stack();
}
-uptr FakeStack::AllocateStack(uptr size, uptr real_stack) {
- if (!alive_) return real_stack;
- CHECK(size <= kMaxStackMallocSize && size > 1);
- uptr size_class = ComputeSizeClass(size);
- if (!allocated_size_classes_[size_class]) {
- AllocateOneSizeClass(size_class);
- }
- FakeFrame *fake_frame = size_classes_[size_class].FifoPop();
- CHECK(fake_frame);
- fake_frame->size_minus_one = size - 1;
- fake_frame->real_stack = real_stack;
- while (FakeFrame *top = call_stack_.top()) {
- if (top->real_stack > real_stack) break;
- call_stack_.LifoPop();
- DeallocateFrame(top);
- }
- call_stack_.LifoPush(fake_frame);
- uptr ptr = (uptr)fake_frame;
- PoisonShadow(ptr, size, 0);
- return ptr;
+static FakeStack *GetFakeStackFast() {
+ if (FakeStack *fs = GetTLSFakeStack())
+ return fs;
+ if (!__asan_option_detect_stack_use_after_return)
+ return 0;
+ return GetFakeStack();
}
-void FakeStack::DeallocateFrame(FakeFrame *fake_frame) {
- CHECK(alive_);
- uptr size = fake_frame->size_minus_one + 1;
- uptr size_class = ComputeSizeClass(size);
- CHECK(allocated_size_classes_[size_class]);
- uptr ptr = (uptr)fake_frame;
- CHECK(AddrIsInSizeClass(ptr, size_class));
- CHECK(AddrIsInSizeClass(ptr + size - 1, size_class));
- size_classes_[size_class].FifoPush(fake_frame);
+ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size, uptr real_stack) {
+ FakeStack *fs = GetFakeStackFast();
+ if (!fs) return real_stack;
+ FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
+ if (!ff)
+ return real_stack; // Out of fake stack, return the real one.
+ uptr ptr = reinterpret_cast<uptr>(ff);
+ SetShadow(ptr, size, class_id, 0);
+ return ptr;
}
-void FakeStack::OnFree(uptr ptr, uptr size, uptr real_stack) {
- FakeFrame *fake_frame = (FakeFrame*)ptr;
- CHECK(fake_frame->magic = kRetiredStackFrameMagic);
- CHECK(fake_frame->descr != 0);
- CHECK(fake_frame->size_minus_one == size - 1);
- PoisonShadow(ptr, size, kAsanStackAfterReturnMagic);
+ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) {
+ if (ptr == real_stack)
+ return;
+ FakeStack::Deallocate(ptr, class_id);
+ SetShadow(ptr, size, class_id, kMagic8);
}
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
-using namespace __asan; // NOLINT
-
-uptr __asan_stack_malloc(uptr size, uptr real_stack) {
- if (!flags()->use_fake_stack) return real_stack;
- AsanThread *t = asanThreadRegistry().GetCurrent();
- if (!t) {
- // TSD is gone, use the real stack.
- return real_stack;
+#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
+ __asan_stack_malloc_##class_id(uptr size, uptr real_stack) { \
+ return __asan::OnMalloc(class_id, size, real_stack); \
+ } \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
+ uptr ptr, uptr size, uptr real_stack) { \
+ __asan::OnFree(ptr, class_id, size, real_stack); \
}
- uptr ptr = t->fake_stack().AllocateStack(size, real_stack);
- // Printf("__asan_stack_malloc %p %zu %p\n", ptr, size, real_stack);
- return ptr;
-}
-void __asan_stack_free(uptr ptr, uptr size, uptr real_stack) {
- if (!flags()->use_fake_stack) return;
- if (ptr != real_stack) {
- FakeStack::OnFree(ptr, size, real_stack);
- }
-}
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)