aboutsummaryrefslogtreecommitdiff
path: root/libsanitizer/sanitizer_common/sanitizer_allocator.cc
diff options
context:
space:
mode:
Diffstat (limited to 'libsanitizer/sanitizer_common/sanitizer_allocator.cc')
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator.cc105
1 files changed, 87 insertions, 18 deletions
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator.cc b/libsanitizer/sanitizer_common/sanitizer_allocator.cc
index a54de9d..9d38e94 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator.cc
@@ -7,44 +7,103 @@
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
-// This allocator that is used inside run-times.
+// This allocator is used inside run-times.
//===----------------------------------------------------------------------===//
+#include "sanitizer_allocator.h"
+#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
-// FIXME: We should probably use more low-level allocator that would
-// mmap some pages and split them into chunks to fulfill requests.
-#if defined(__linux__) && !defined(__ANDROID__)
-extern "C" void *__libc_malloc(__sanitizer::uptr size);
+namespace __sanitizer {
+
+// ThreadSanitizer for Go uses libc malloc/free.
+#if defined(SANITIZER_GO)
+# if SANITIZER_LINUX && !SANITIZER_ANDROID
+extern "C" void *__libc_malloc(uptr size);
extern "C" void __libc_free(void *ptr);
-# define LIBC_MALLOC __libc_malloc
-# define LIBC_FREE __libc_free
-#else // __linux__ && !ANDROID
-# include <stdlib.h>
-# define LIBC_MALLOC malloc
-# define LIBC_FREE free
-#endif // __linux__ && !ANDROID
+# define LIBC_MALLOC __libc_malloc
+# define LIBC_FREE __libc_free
+# else
+# include <stdlib.h>
+# define LIBC_MALLOC malloc
+# define LIBC_FREE free
+# endif
-namespace __sanitizer {
+static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
+ (void)cache;
+ return LIBC_MALLOC(size);
+}
+
+static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
+ (void)cache;
+ LIBC_FREE(ptr);
+}
+
+InternalAllocator *internal_allocator() {
+ return 0;
+}
+
+#else // SANITIZER_GO
+
+static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
+static atomic_uint8_t internal_allocator_initialized;
+static StaticSpinMutex internal_alloc_init_mu;
+
+static InternalAllocatorCache internal_allocator_cache;
+static StaticSpinMutex internal_allocator_cache_mu;
+
+InternalAllocator *internal_allocator() {
+ InternalAllocator *internal_allocator_instance =
+ reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder);
+ if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) {
+ SpinMutexLock l(&internal_alloc_init_mu);
+ if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
+ 0) {
+ internal_allocator_instance->Init();
+ atomic_store(&internal_allocator_initialized, 1, memory_order_release);
+ }
+ }
+ return internal_allocator_instance;
+}
+
+static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
+ if (cache == 0) {
+ SpinMutexLock l(&internal_allocator_cache_mu);
+ return internal_allocator()->Allocate(&internal_allocator_cache, size, 8,
+ false);
+ }
+ return internal_allocator()->Allocate(cache, size, 8, false);
+}
+
+static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
+ if (cache == 0) {
+ SpinMutexLock l(&internal_allocator_cache_mu);
+ return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
+ }
+ internal_allocator()->Deallocate(cache, ptr);
+}
+
+#endif // SANITIZER_GO
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
-void *InternalAlloc(uptr size) {
+void *InternalAlloc(uptr size, InternalAllocatorCache *cache) {
if (size + sizeof(u64) < size)
return 0;
- void *p = LIBC_MALLOC(size + sizeof(u64));
+ void *p = RawInternalAlloc(size + sizeof(u64), cache);
if (p == 0)
return 0;
((u64*)p)[0] = kBlockMagic;
return (char*)p + sizeof(u64);
}
-void InternalFree(void *addr) {
+void InternalFree(void *addr, InternalAllocatorCache *cache) {
if (addr == 0)
return;
addr = (char*)addr - sizeof(u64);
- CHECK_EQ(((u64*)addr)[0], kBlockMagic);
+ CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
((u64*)addr)[0] = 0;
- LIBC_FREE(addr);
+ RawInternalFree(addr, cache);
}
// LowLevelAllocator
@@ -79,4 +138,14 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
return (max / size) < n;
}
+void *AllocatorReturnNull() {
+ if (common_flags()->allocator_may_return_null)
+ return 0;
+ Report("%s's allocator is terminating the process instead of returning 0\n",
+ SanitizerToolName);
+ Report("If you don't like this behavior set allocator_may_return_null=1\n");
+ CHECK(0);
+ return 0;
+}
+
} // namespace __sanitizer