aboutsummaryrefslogtreecommitdiff
path: root/libsanitizer/sanitizer_common
diff options
context:
space:
mode:
authorKostya Serebryany <kcc@google.com>2012-12-05 13:19:55 +0000
committerKostya Serebryany <kcc@gcc.gnu.org>2012-12-05 13:19:55 +0000
commita04084545806300525bb07d0c827480f5282bb55 (patch)
tree1a9c1fa8fc461362f209a6c9b1abdadaacf74938 /libsanitizer/sanitizer_common
parentcc4d934fa0d16330f29953d7ad14ff71e15f0d1b (diff)
downloadgcc-a04084545806300525bb07d0c827480f5282bb55.zip
gcc-a04084545806300525bb07d0c827480f5282bb55.tar.gz
gcc-a04084545806300525bb07d0c827480f5282bb55.tar.bz2
[libsanitizer] merge from upstream r169371
From-SVN: r194221
Diffstat (limited to 'libsanitizer/sanitizer_common')
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator.cc7
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator.h (renamed from libsanitizer/sanitizer_common/sanitizer_allocator64.h)79
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common.h3
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux.cc56
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mac.cc9
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_posix.cc5
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_procmaps.h22
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace.cc2
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer.cc15
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer.h1
10 files changed, 138 insertions, 61 deletions
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator.cc b/libsanitizer/sanitizer_common/sanitizer_allocator.cc
index 88905b7..d091127 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator.cc
@@ -47,13 +47,6 @@ void InternalFree(void *addr) {
LIBC_FREE(addr);
}
-void *InternalAllocBlock(void *p) {
- CHECK_NE(p, (void*)0);
- u64 *pp = (u64*)((uptr)p & ~0x7);
- for (; pp[0] != kBlockMagic; pp--) {}
- return pp + 1;
-}
-
// LowLevelAllocator
static LowLevelAllocateCallback low_level_alloc_callback;
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator64.h b/libsanitizer/sanitizer_common/sanitizer_allocator.h
index 222e3ad..63107bd 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator64.h
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator.h
@@ -1,25 +1,18 @@
-//===-- sanitizer_allocator64.h ---------------------------------*- C++ -*-===//
+//===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-// Specialized allocator which works only in 64-bit address space.
-// To be used by ThreadSanitizer, MemorySanitizer and possibly other tools.
-// The main feature of this allocator is that the header is located far away
-// from the user memory region, so that the tool does not use extra shadow
-// for the header.
//
-// Status: not yet ready.
+// Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc.
+//
//===----------------------------------------------------------------------===//
+
#ifndef SANITIZER_ALLOCATOR_H
#define SANITIZER_ALLOCATOR_H
#include "sanitizer_internal_defs.h"
-#if SANITIZER_WORDSIZE != 64
-# error "sanitizer_allocator64.h can only be used on 64-bit platforms"
-#endif
-
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
#include "sanitizer_list.h"
@@ -28,7 +21,10 @@
namespace __sanitizer {
// Maps size class id to size and back.
-class DefaultSizeClassMap {
+template <uptr l0, uptr l1, uptr l2, uptr l3, uptr l4, uptr l5,
+ uptr s0, uptr s1, uptr s2, uptr s3, uptr s4,
+ uptr c0, uptr c1, uptr c2, uptr c3, uptr c4>
+class SplineSizeClassMap {
private:
// Here we use a spline composed of 5 polynomials of oder 1.
// The first size class is l0, then the classes go with step s0
@@ -36,38 +32,20 @@ class DefaultSizeClassMap {
// Steps should be powers of two for cheap division.
// The size of the last size class should be a power of two.
// There should be at most 256 size classes.
- static const uptr l0 = 1 << 4;
- static const uptr l1 = 1 << 9;
- static const uptr l2 = 1 << 12;
- static const uptr l3 = 1 << 15;
- static const uptr l4 = 1 << 18;
- static const uptr l5 = 1 << 21;
-
- static const uptr s0 = 1 << 4;
- static const uptr s1 = 1 << 6;
- static const uptr s2 = 1 << 9;
- static const uptr s3 = 1 << 12;
- static const uptr s4 = 1 << 15;
-
static const uptr u0 = 0 + (l1 - l0) / s0;
static const uptr u1 = u0 + (l2 - l1) / s1;
static const uptr u2 = u1 + (l3 - l2) / s2;
static const uptr u3 = u2 + (l4 - l3) / s3;
static const uptr u4 = u3 + (l5 - l4) / s4;
- // Max cached in local cache blocks.
- static const uptr c0 = 256;
- static const uptr c1 = 64;
- static const uptr c2 = 16;
- static const uptr c3 = 4;
- static const uptr c4 = 1;
-
public:
+ // The number of size classes should be a power of two for fast division.
static const uptr kNumClasses = u4 + 1;
static const uptr kMaxSize = l5;
static const uptr kMinSize = l0;
COMPILER_CHECK(kNumClasses <= 256);
+ COMPILER_CHECK((kNumClasses & (kNumClasses - 1)) == 0);
COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0);
static uptr Size(uptr class_id) {
@@ -97,13 +75,30 @@ class DefaultSizeClassMap {
}
};
+class DefaultSizeClassMap: public SplineSizeClassMap<
+ /* l: */1 << 4, 1 << 9, 1 << 12, 1 << 15, 1 << 18, 1 << 21,
+ /* s: */1 << 4, 1 << 6, 1 << 9, 1 << 12, 1 << 15,
+ /* c: */256, 64, 16, 4, 1> {
+ private:
+ COMPILER_CHECK(kNumClasses == 256);
+};
+
+class CompactSizeClassMap: public SplineSizeClassMap<
+ /* l: */1 << 3, 1 << 4, 1 << 7, 1 << 8, 1 << 12, 1 << 15,
+ /* s: */1 << 3, 1 << 4, 1 << 7, 1 << 8, 1 << 12,
+ /* c: */256, 64, 16, 4, 1> {
+ private:
+ COMPILER_CHECK(kNumClasses <= 32);
+};
+
struct AllocatorListNode {
AllocatorListNode *next;
};
typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
-
+// SizeClassAllocator64 -- allocator for 64-bit address space.
+//
// Space: a portion of address space of kSpaceSize bytes starting at
// a fixed address (kSpaceBeg). Both constants are powers of two and
// kSpaceBeg is kSpaceSize-aligned.
@@ -217,14 +212,15 @@ class SizeClassAllocator64 {
static uptr AllocBeg() { return kSpaceBeg; }
static uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
- static const uptr kNumClasses = 256; // Power of two <= 256
typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses; // 2^k <= 256
private:
+ static const uptr kRegionSize = kSpaceSize / kNumClasses;
COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
- static const uptr kRegionSize = kSpaceSize / kNumClasses;
- COMPILER_CHECK((kRegionSize >> 32) > 0); // kRegionSize must be >= 2^32.
+ // kRegionSize must be >= 2^32.
+ COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
// Populate the free list with at most this number of bytes at once
// or with one element if its size is greater.
static const uptr kPopulateSize = 1 << 18;
@@ -239,8 +235,9 @@ class SizeClassAllocator64 {
COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
static uptr AdditionalSize() {
- uptr res = sizeof(RegionInfo) * kNumClasses;
- CHECK_EQ(res % GetPageSizeCached(), 0);
+ uptr PageSize = GetPageSizeCached();
+ uptr res = Max(sizeof(RegionInfo) * kNumClasses, PageSize);
+ CHECK_EQ(res % PageSize, 0);
return res;
}
@@ -305,8 +302,10 @@ class SizeClassAllocator64 {
// Objects of this type should be used as local caches for SizeClassAllocator64.
// Since the typical use of this class is to have one object per thread in TLS,
// is has to be POD.
-template<const uptr kNumClasses, class SizeClassAllocator>
+template<class SizeClassAllocator>
struct SizeClassAllocatorLocalCache {
+ typedef SizeClassAllocator Allocator;
+ static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
// Don't need to call Init if the object is a global (i.e. zero-initialized).
void Init() {
internal_memset(this, 0, sizeof(*this));
@@ -458,11 +457,13 @@ class LargeMmapAllocator {
};
Header *GetHeader(uptr p) {
+ CHECK_EQ(p % page_size_, 0);
return reinterpret_cast<Header*>(p - page_size_);
}
Header *GetHeader(void *p) { return GetHeader(reinterpret_cast<uptr>(p)); }
void *GetUser(Header *h) {
+ CHECK_EQ((uptr)h % page_size_, 0);
return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_common.h b/libsanitizer/sanitizer_common/sanitizer_common.h
index 18b1e1a..5639134 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common.h
+++ b/libsanitizer/sanitizer_common/sanitizer_common.h
@@ -49,9 +49,6 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
// Internal allocator
void *InternalAlloc(uptr size);
void InternalFree(void *p);
-// Given the pointer p into a valid allocated block,
-// returns a pointer to the beginning of the block.
-void *InternalAllocBlock(void *p);
// InternalScopedBuffer can be used instead of large stack arrays to
// keep frame size low.
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.cc b/libsanitizer/sanitizer_common/sanitizer_linux.cc
index 2145aa0..75f2ee1 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_linux.cc
@@ -14,6 +14,7 @@
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
+#include "sanitizer_mutex.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
@@ -215,21 +216,60 @@ void ReExec() {
}
// ----------------- sanitizer_procmaps.h
+// Linker initialized.
+ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
+StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
+
MemoryMappingLayout::MemoryMappingLayout() {
- proc_self_maps_buff_len_ =
- ReadFileToBuffer("/proc/self/maps", &proc_self_maps_buff_,
- &proc_self_maps_buff_mmaped_size_, 1 << 26);
- CHECK_GT(proc_self_maps_buff_len_, 0);
- // internal_write(2, proc_self_maps_buff_, proc_self_maps_buff_len_);
+ proc_self_maps_.len =
+ ReadFileToBuffer("/proc/self/maps", &proc_self_maps_.data,
+ &proc_self_maps_.mmaped_size, 1 << 26);
+ if (proc_self_maps_.mmaped_size == 0) {
+ LoadFromCache();
+ CHECK_GT(proc_self_maps_.len, 0);
+ }
+ // internal_write(2, proc_self_maps_.data, proc_self_maps_.len);
Reset();
+ // FIXME: in the future we may want to cache the mappings on demand only.
+ CacheMemoryMappings();
}
MemoryMappingLayout::~MemoryMappingLayout() {
- UnmapOrDie(proc_self_maps_buff_, proc_self_maps_buff_mmaped_size_);
+ // Only unmap the buffer if it is different from the cached one. Otherwise
+ // it will be unmapped when the cache is refreshed.
+ if (proc_self_maps_.data != cached_proc_self_maps_.data) {
+ UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size);
+ }
}
void MemoryMappingLayout::Reset() {
- current_ = proc_self_maps_buff_;
+ current_ = proc_self_maps_.data;
+}
+
+// static
+void MemoryMappingLayout::CacheMemoryMappings() {
+ SpinMutexLock l(&cache_lock_);
+ // Don't invalidate the cache if the mappings are unavailable.
+ ProcSelfMapsBuff old_proc_self_maps;
+ old_proc_self_maps = cached_proc_self_maps_;
+ cached_proc_self_maps_.len =
+ ReadFileToBuffer("/proc/self/maps", &cached_proc_self_maps_.data,
+ &cached_proc_self_maps_.mmaped_size, 1 << 26);
+ if (cached_proc_self_maps_.mmaped_size == 0) {
+ cached_proc_self_maps_ = old_proc_self_maps;
+ } else {
+ if (old_proc_self_maps.mmaped_size) {
+ UnmapOrDie(old_proc_self_maps.data,
+ old_proc_self_maps.mmaped_size);
+ }
+ }
+}
+
+void MemoryMappingLayout::LoadFromCache() {
+ SpinMutexLock l(&cache_lock_);
+ if (cached_proc_self_maps_.data) {
+ proc_self_maps_ = cached_proc_self_maps_;
+ }
}
// Parse a hex value in str and update str.
@@ -263,7 +303,7 @@ static bool IsDecimal(char c) {
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size) {
- char *last = proc_self_maps_buff_ + proc_self_maps_buff_len_;
+ char *last = proc_self_maps_.data + proc_self_maps_.len;
if (current_ >= last) return false;
uptr dummy;
if (!start) start = &dummy;
diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.cc b/libsanitizer/sanitizer_common/sanitizer_mac.cc
index aa313ba..465d0a3 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mac.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_mac.cc
@@ -160,6 +160,15 @@ void MemoryMappingLayout::Reset() {
current_filetype_ = 0;
}
+// static
+void MemoryMappingLayout::CacheMemoryMappings() {
+ // No-op on Mac for now.
+}
+
+void MemoryMappingLayout::LoadFromCache() {
+ // No-op on Mac for now.
+}
+
// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
// Google Perftools, http://code.google.com/p/google-perftools.
diff --git a/libsanitizer/sanitizer_common/sanitizer_posix.cc b/libsanitizer/sanitizer_common/sanitizer_posix.cc
index 75d1147..b9601eaa 100644
--- a/libsanitizer/sanitizer_common/sanitizer_posix.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_posix.cc
@@ -167,7 +167,10 @@ void SetStackSizeLimitInBytes(uptr limit) {
struct rlimit rlim;
rlim.rlim_cur = limit;
rlim.rlim_max = limit;
- CHECK_EQ(0, setrlimit(RLIMIT_STACK, &rlim));
+ if (setrlimit(RLIMIT_STACK, &rlim)) {
+ Report("setrlimit() failed %d\n", errno);
+ Die();
+ }
CHECK(!StackSizeIsUnlimited());
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps.h b/libsanitizer/sanitizer_common/sanitizer_procmaps.h
index 5541cfc..400fd7a 100644
--- a/libsanitizer/sanitizer_common/sanitizer_procmaps.h
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps.h
@@ -13,6 +13,7 @@
#define SANITIZER_PROCMAPS_H
#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
namespace __sanitizer {
@@ -27,6 +28,14 @@ class MemoryMappingLayout {
};
#else // _WIN32
+#if defined(__linux__)
+struct ProcSelfMapsBuff {
+ char *data;
+ uptr mmaped_size;
+ uptr len;
+};
+#endif // defined(__linux__)
+
class MemoryMappingLayout {
public:
MemoryMappingLayout();
@@ -37,9 +46,14 @@ class MemoryMappingLayout {
// address 'addr'. Returns true on success.
bool GetObjectNameAndOffset(uptr addr, uptr *offset,
char filename[], uptr filename_size);
+ // In some cases, e.g. when running under a sandbox on Linux, ASan is unable
+ // to obtain the memory mappings. It should fall back to pre-cached data
+ // instead of aborting.
+ static void CacheMemoryMappings();
~MemoryMappingLayout();
private:
+ void LoadFromCache();
// Default implementation of GetObjectNameAndOffset.
// Quite slow, because it iterates through the whole process map for each
// lookup.
@@ -71,10 +85,12 @@ class MemoryMappingLayout {
}
# if defined __linux__
- char *proc_self_maps_buff_;
- uptr proc_self_maps_buff_mmaped_size_;
- uptr proc_self_maps_buff_len_;
+ ProcSelfMapsBuff proc_self_maps_;
char *current_;
+
+ // Static mappings cache.
+ static ProcSelfMapsBuff cached_proc_self_maps_;
+ static StaticSpinMutex cache_lock_; // protects cached_proc_self_maps_.
# elif defined __APPLE__
template<u32 kLCSegment, typename SegmentCommand>
bool NextSegmentLoad(uptr *start, uptr *end, uptr *offset,
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
index 368d05d..308c2d9 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
@@ -34,6 +34,8 @@ static uptr patch_pc(uptr pc) {
#if defined(__powerpc__) || defined(__powerpc64__)
// PCs are always 4 byte aligned.
return pc - 4;
+#elif defined(__sparc__)
+ return pc - 8;
#else
return pc - 1;
#endif
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc
index efd1e81..11393e4 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc
@@ -254,6 +254,17 @@ class Symbolizer {
// Otherwise, the data was filled by external symbolizer.
return actual_frames;
}
+
+ bool SymbolizeData(uptr addr, AddressInfo *frame) {
+ LoadedModule *module = FindModuleForAddress(addr);
+ if (module == 0)
+ return false;
+ const char *module_name = module->full_name();
+ uptr module_offset = addr - module->base_address();
+ frame->FillAddressAndModuleInfo(addr, module_name, module_offset);
+ return true;
+ }
+
bool InitializeExternalSymbolizer(const char *path_to_symbolizer) {
int input_fd, output_fd;
if (!StartSymbolizerSubprocess(path_to_symbolizer, &input_fd, &output_fd))
@@ -305,6 +316,10 @@ uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames) {
return symbolizer.SymbolizeCode(address, frames, max_frames);
}
+bool SymbolizeData(uptr address, AddressInfo *frame) {
+ return symbolizer.SymbolizeData(address, frame);
+}
+
bool InitializeExternalSymbolizer(const char *path_to_symbolizer) {
return symbolizer.InitializeExternalSymbolizer(path_to_symbolizer);
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
index 13ec83f..4d7ec17 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
@@ -56,6 +56,7 @@ struct AddressInfo {
// of descriptions actually filled.
// This function should NOT be called from two threads simultaneously.
uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames);
+bool SymbolizeData(uptr address, AddressInfo *frame);
// Starts external symbolizer program in a subprocess. Sanitizer communicates
// with external symbolizer via pipes.