aboutsummaryrefslogtreecommitdiff
path: root/libsanitizer/sanitizer_common
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2021-07-20 10:44:37 -0700
committerH.J. Lu <hjl.tools@gmail.com>2021-07-20 14:21:51 -0700
commit90e46074e6b3561ae7d8ebd205127f286cc0c6b6 (patch)
tree6f21ee7eafae85d0aacc994e221c48d3bb172df0 /libsanitizer/sanitizer_common
parent8bf5b49ebd2176b8c535147377381dd07fbdd643 (diff)
downloadgcc-90e46074e6b3561ae7d8ebd205127f286cc0c6b6.zip
gcc-90e46074e6b3561ae7d8ebd205127f286cc0c6b6.tar.gz
gcc-90e46074e6b3561ae7d8ebd205127f286cc0c6b6.tar.bz2
libsanitizer: Merge with upstream
Merged revision: 7704fedfff6ef5676adb6415f3be0ac927d1a746
Diffstat (limited to 'libsanitizer/sanitizer_common')
-rw-r--r--libsanitizer/sanitizer_common/Makefile.am2
-rw-r--r--libsanitizer/sanitizer_common/Makefile.in19
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_addrhashmap.h106
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator.cpp38
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_combined.h4
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h19
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h4
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h170
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h8
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common.cpp15
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common.h23
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc84
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_libcdep.cpp4
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_nolibc.cpp1
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cpp2
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_deadlock_detector2.cpp32
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_errno.h3
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_fuchsia.cpp33
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_fuchsia.h2
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_libc.h3
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_libignore.cpp25
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_libignore.h35
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux.cpp46
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp41
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mac.cpp25
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mac.h20
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mutex.cpp39
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mutex.h272
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_netbsd.cpp11
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform.h25
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h12
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp7
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h12
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cpp15
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_printf.cpp27
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_quarantine.h3
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_rtems.cpp281
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_rtems.h20
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_solaris.cpp18
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp17
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace.h36
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cpp185
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_markup.cpp15
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_report.cpp22
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_rtems.h40
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_thread_registry.cpp29
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_thread_registry.h18
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_thread_safety.h42
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_win.cpp26
49 files changed, 994 insertions, 942 deletions
diff --git a/libsanitizer/sanitizer_common/Makefile.am b/libsanitizer/sanitizer_common/Makefile.am
index 7cba534..d04f2d8 100644
--- a/libsanitizer/sanitizer_common/Makefile.am
+++ b/libsanitizer/sanitizer_common/Makefile.am
@@ -41,6 +41,7 @@ sanitizer_common_files = \
sanitizer_linux_s390.cpp \
sanitizer_mac.cpp \
sanitizer_mac_libcdep.cpp \
+ sanitizer_mutex.cpp \
sanitizer_netbsd.cpp \
sanitizer_openbsd.cpp \
sanitizer_persistent_allocator.cpp \
@@ -57,7 +58,6 @@ sanitizer_common_files = \
sanitizer_procmaps_linux.cpp \
sanitizer_procmaps_mac.cpp \
sanitizer_procmaps_solaris.cpp \
- sanitizer_rtems.cpp \
sanitizer_solaris.cpp \
sanitizer_stackdepot.cpp \
sanitizer_stacktrace.cpp \
diff --git a/libsanitizer/sanitizer_common/Makefile.in b/libsanitizer/sanitizer_common/Makefile.in
index 7e5555c..2856894 100644
--- a/libsanitizer/sanitizer_common/Makefile.in
+++ b/libsanitizer/sanitizer_common/Makefile.in
@@ -128,8 +128,9 @@ am__objects_1 = sancov_flags.lo sanitizer_allocator.lo \
sanitizer_file.lo sanitizer_flags.lo sanitizer_flag_parser.lo \
sanitizer_libc.lo sanitizer_libignore.lo sanitizer_linux.lo \
sanitizer_linux_libcdep.lo sanitizer_linux_s390.lo \
- sanitizer_mac.lo sanitizer_mac_libcdep.lo sanitizer_netbsd.lo \
- sanitizer_openbsd.lo sanitizer_persistent_allocator.lo \
+ sanitizer_mac.lo sanitizer_mac_libcdep.lo sanitizer_mutex.lo \
+ sanitizer_netbsd.lo sanitizer_openbsd.lo \
+ sanitizer_persistent_allocator.lo \
sanitizer_platform_limits_freebsd.lo \
sanitizer_platform_limits_linux.lo \
sanitizer_platform_limits_openbsd.lo \
@@ -138,11 +139,11 @@ am__objects_1 = sancov_flags.lo sanitizer_allocator.lo \
sanitizer_posix_libcdep.lo sanitizer_printf.lo \
sanitizer_procmaps_bsd.lo sanitizer_procmaps_common.lo \
sanitizer_procmaps_linux.lo sanitizer_procmaps_mac.lo \
- sanitizer_procmaps_solaris.lo sanitizer_rtems.lo \
- sanitizer_solaris.lo sanitizer_stackdepot.lo \
- sanitizer_stacktrace.lo sanitizer_stacktrace_libcdep.lo \
- sanitizer_stacktrace_sparc.lo sanitizer_symbolizer_mac.lo \
- sanitizer_symbolizer_report.lo sanitizer_stacktrace_printer.lo \
+ sanitizer_procmaps_solaris.lo sanitizer_solaris.lo \
+ sanitizer_stackdepot.lo sanitizer_stacktrace.lo \
+ sanitizer_stacktrace_libcdep.lo sanitizer_stacktrace_sparc.lo \
+ sanitizer_symbolizer_mac.lo sanitizer_symbolizer_report.lo \
+ sanitizer_stacktrace_printer.lo \
sanitizer_stoptheworld_linux_libcdep.lo \
sanitizer_stoptheworld_mac.lo sanitizer_suppressions.lo \
sanitizer_symbolizer.lo sanitizer_symbolizer_libbacktrace.lo \
@@ -400,6 +401,7 @@ sanitizer_common_files = \
sanitizer_linux_s390.cpp \
sanitizer_mac.cpp \
sanitizer_mac_libcdep.cpp \
+ sanitizer_mutex.cpp \
sanitizer_netbsd.cpp \
sanitizer_openbsd.cpp \
sanitizer_persistent_allocator.cpp \
@@ -416,7 +418,6 @@ sanitizer_common_files = \
sanitizer_procmaps_linux.cpp \
sanitizer_procmaps_mac.cpp \
sanitizer_procmaps_solaris.cpp \
- sanitizer_rtems.cpp \
sanitizer_solaris.cpp \
sanitizer_stackdepot.cpp \
sanitizer_stacktrace.cpp \
@@ -557,6 +558,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_s390.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac_libcdep.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mutex.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_netbsd.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_openbsd.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_persistent_allocator.Plo@am__quote@
@@ -573,7 +575,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_solaris.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_rtems.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_solaris.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@
diff --git a/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h b/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h
index a033e78..15f81a0 100644
--- a/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h
+++ b/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h
@@ -162,8 +162,8 @@ AddrHashMap<T, kSize>::AddrHashMap() {
table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");
}
-template<typename T, uptr kSize>
-void AddrHashMap<T, kSize>::acquire(Handle *h) {
+template <typename T, uptr kSize>
+void AddrHashMap<T, kSize>::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
uptr addr = h->addr_;
uptr hash = calcHash(addr);
Bucket *b = &table_[hash];
@@ -289,57 +289,57 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) {
CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);
h->addidx_ = i;
h->cell_ = c;
-}
-
-template<typename T, uptr kSize>
-void AddrHashMap<T, kSize>::release(Handle *h) {
- if (!h->cell_)
- return;
- Bucket *b = h->bucket_;
- Cell *c = h->cell_;
- uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
- if (h->created_) {
- // Denote completion of insertion.
- CHECK_EQ(addr1, 0);
- // After the following store, the element becomes available
- // for lock-free reads.
- atomic_store(&c->addr, h->addr_, memory_order_release);
- b->mtx.Unlock();
- } else if (h->remove_) {
- // Denote that the cell is empty now.
- CHECK_EQ(addr1, h->addr_);
- atomic_store(&c->addr, 0, memory_order_release);
- // See if we need to compact the bucket.
- AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
- if (h->addidx_ == -1U) {
- // Removed from embed array, move an add element into the freed cell.
- if (add && add->size != 0) {
- uptr last = --add->size;
- Cell *c1 = &add->cells[last];
- c->val = c1->val;
- uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
- atomic_store(&c->addr, addr1, memory_order_release);
- atomic_store(&c1->addr, 0, memory_order_release);
- }
- } else {
- // Removed from add array, compact it.
- uptr last = --add->size;
- Cell *c1 = &add->cells[last];
- if (c != c1) {
- *c = *c1;
- atomic_store(&c1->addr, 0, memory_order_relaxed);
- }
- }
- if (add && add->size == 0) {
- // FIXME(dvyukov): free add?
- }
- b->mtx.Unlock();
- } else {
- CHECK_EQ(addr1, h->addr_);
- if (h->addidx_ != -1U)
- b->mtx.ReadUnlock();
- }
-}
+ }
+
+ template <typename T, uptr kSize>
+ void AddrHashMap<T, kSize>::release(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
+ if (!h->cell_)
+ return;
+ Bucket *b = h->bucket_;
+ Cell *c = h->cell_;
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (h->created_) {
+ // Denote completion of insertion.
+ CHECK_EQ(addr1, 0);
+ // After the following store, the element becomes available
+ // for lock-free reads.
+ atomic_store(&c->addr, h->addr_, memory_order_release);
+ b->mtx.Unlock();
+ } else if (h->remove_) {
+ // Denote that the cell is empty now.
+ CHECK_EQ(addr1, h->addr_);
+ atomic_store(&c->addr, 0, memory_order_release);
+ // See if we need to compact the bucket.
+ AddBucket *add = (AddBucket *)atomic_load(&b->add, memory_order_relaxed);
+ if (h->addidx_ == -1U) {
+ // Removed from embed array, move an add element into the freed cell.
+ if (add && add->size != 0) {
+ uptr last = --add->size;
+ Cell *c1 = &add->cells[last];
+ c->val = c1->val;
+ uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
+ atomic_store(&c->addr, addr1, memory_order_release);
+ atomic_store(&c1->addr, 0, memory_order_release);
+ }
+ } else {
+ // Removed from add array, compact it.
+ uptr last = --add->size;
+ Cell *c1 = &add->cells[last];
+ if (c != c1) {
+ *c = *c1;
+ atomic_store(&c1->addr, 0, memory_order_relaxed);
+ }
+ }
+ if (add && add->size == 0) {
+ // FIXME(dvyukov): free add?
+ }
+ b->mtx.Unlock();
+ } else {
+ CHECK_EQ(addr1, h->addr_);
+ if (h->addidx_ != -1U)
+ b->mtx.ReadUnlock();
+ }
+ }
template<typename T, uptr kSize>
uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator.cpp b/libsanitizer/sanitizer_common/sanitizer_allocator.cpp
index 3157b35..bcb7370 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator.cpp
@@ -137,14 +137,6 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
-namespace {
-const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
-
-struct BlockHeader {
- u64 magic;
-};
-} // namespace
-
static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
SetAllocatorOutOfMemory();
Report("FATAL: %s: internal allocator is out of memory trying to allocate "
@@ -153,28 +145,17 @@ static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
}
void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
- uptr s = size + sizeof(BlockHeader);
- if (s < size)
- return nullptr;
- BlockHeader *p = (BlockHeader *)RawInternalAlloc(s, cache, alignment);
+ void *p = RawInternalAlloc(size, cache, alignment);
if (UNLIKELY(!p))
- ReportInternalAllocatorOutOfMemory(s);
- p->magic = kBlockMagic;
- return p + 1;
+ ReportInternalAllocatorOutOfMemory(size);
+ return p;
}
void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
- if (!addr)
- return InternalAlloc(size, cache);
- uptr s = size + sizeof(BlockHeader);
- if (s < size)
- return nullptr;
- BlockHeader *p = (BlockHeader *)addr - 1;
- CHECK_EQ(kBlockMagic, p->magic);
- p = (BlockHeader *)RawInternalRealloc(p, s, cache);
+ void *p = RawInternalRealloc(addr, size, cache);
if (UNLIKELY(!p))
- ReportInternalAllocatorOutOfMemory(s);
- return p + 1;
+ ReportInternalAllocatorOutOfMemory(size);
+ return p;
}
void *InternalReallocArray(void *addr, uptr count, uptr size,
@@ -203,12 +184,7 @@ void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
}
void InternalFree(void *addr, InternalAllocatorCache *cache) {
- if (!addr)
- return;
- BlockHeader *p = (BlockHeader *)addr - 1;
- CHECK_EQ(kBlockMagic, p->magic);
- p->magic = 0;
- RawInternalFree(p, cache);
+ RawInternalFree(addr, cache);
}
// LowLevelAllocator
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_combined.h b/libsanitizer/sanitizer_common/sanitizer_allocator_combined.h
index eb836bc..0e81e67 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator_combined.h
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_combined.h
@@ -177,12 +177,12 @@ class CombinedAllocator {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() {
+ void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
primary_.ForceLock();
secondary_.ForceLock();
}
- void ForceUnlock() {
+ void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
secondary_.ForceUnlock();
primary_.ForceUnlock();
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h b/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h
index 108dfc2..e495c56 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h
@@ -17,6 +17,7 @@
template <class SizeClassAllocator>
struct SizeClassAllocator64LocalCache {
typedef SizeClassAllocator Allocator;
+ typedef MemoryMapper<Allocator> MemoryMapperT;
void Init(AllocatorGlobalStats *s) {
stats_.Init();
@@ -53,7 +54,7 @@ struct SizeClassAllocator64LocalCache {
PerClass *c = &per_class_[class_id];
InitCache(c);
if (UNLIKELY(c->count == c->max_count))
- Drain(c, allocator, class_id, c->max_count / 2);
+ DrainHalfMax(c, allocator, class_id);
CompactPtrT chunk = allocator->PointerToCompactPtr(
allocator->GetRegionBeginBySizeClass(class_id),
reinterpret_cast<uptr>(p));
@@ -62,10 +63,10 @@ struct SizeClassAllocator64LocalCache {
}
void Drain(SizeClassAllocator *allocator) {
+ MemoryMapperT memory_mapper(*allocator);
for (uptr i = 1; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
- while (c->count > 0)
- Drain(c, allocator, i, c->count);
+ while (c->count > 0) Drain(&memory_mapper, c, allocator, i, c->count);
}
}
@@ -106,12 +107,18 @@ struct SizeClassAllocator64LocalCache {
return true;
}
- NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
- uptr count) {
+ NOINLINE void DrainHalfMax(PerClass *c, SizeClassAllocator *allocator,
+ uptr class_id) {
+ MemoryMapperT memory_mapper(*allocator);
+ Drain(&memory_mapper, c, allocator, class_id, c->max_count / 2);
+ }
+
+ void Drain(MemoryMapperT *memory_mapper, PerClass *c,
+ SizeClassAllocator *allocator, uptr class_id, uptr count) {
CHECK_GE(c->count, count);
const uptr first_idx_to_drain = c->count - count;
c->count -= count;
- allocator->ReturnToAllocator(&stats_, class_id,
+ allocator->ReturnToAllocator(memory_mapper, &stats_, class_id,
&c->chunks[first_idx_to_drain], count);
}
};
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h b/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h
index fb5394c..38d2a7d 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h
@@ -237,13 +237,13 @@ class SizeClassAllocator32 {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() {
+ void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
for (uptr i = 0; i < kNumClasses; i++) {
GetSizeClassInfo(i)->mutex.Lock();
}
}
- void ForceUnlock() {
+ void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
for (int i = kNumClasses - 1; i >= 0; i--) {
GetSizeClassInfo(i)->mutex.Unlock();
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h b/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h
index db30e13..b142ee0 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h
@@ -42,6 +42,44 @@ struct SizeClassAllocator64FlagMasks { // Bit masks.
};
};
+template <typename Allocator>
+class MemoryMapper {
+ public:
+ typedef typename Allocator::CompactPtrT CompactPtrT;
+
+ explicit MemoryMapper(const Allocator &allocator) : allocator_(allocator) {}
+
+ bool GetAndResetStats(uptr &ranges, uptr &bytes) {
+ ranges = released_ranges_count_;
+ released_ranges_count_ = 0;
+ bytes = released_bytes_;
+ released_bytes_ = 0;
+ return ranges != 0;
+ }
+
+ u64 *MapPackedCounterArrayBuffer(uptr count) {
+ buffer_.clear();
+ buffer_.resize(count);
+ return buffer_.data();
+ }
+
+ // Releases [from, to) range of pages back to OS.
+ void ReleasePageRangeToOS(uptr class_id, CompactPtrT from, CompactPtrT to) {
+ const uptr region_base = allocator_.GetRegionBeginBySizeClass(class_id);
+ const uptr from_page = allocator_.CompactPtrToPointer(region_base, from);
+ const uptr to_page = allocator_.CompactPtrToPointer(region_base, to);
+ ReleaseMemoryPagesToOS(from_page, to_page);
+ released_ranges_count_++;
+ released_bytes_ += to_page - from_page;
+ }
+
+ private:
+ const Allocator &allocator_;
+ uptr released_ranges_count_ = 0;
+ uptr released_bytes_ = 0;
+ InternalMmapVector<u64> buffer_;
+};
+
template <class Params>
class SizeClassAllocator64 {
public:
@@ -57,6 +95,7 @@ class SizeClassAllocator64 {
typedef SizeClassAllocator64<Params> ThisT;
typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
+ typedef MemoryMapper<ThisT> MemoryMapperT;
// When we know the size class (the region base) we can represent a pointer
// as a 4-byte integer (offset from the region start shifted right by 4).
@@ -120,9 +159,10 @@ class SizeClassAllocator64 {
}
void ForceReleaseToOS() {
+ MemoryMapperT memory_mapper(*this);
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
- MaybeReleaseToOS(class_id, true /*force*/);
+ MaybeReleaseToOS(&memory_mapper, class_id, true /*force*/);
}
}
@@ -131,7 +171,8 @@ class SizeClassAllocator64 {
alignment <= SizeClassMap::kMaxSize;
}
- NOINLINE void ReturnToAllocator(AllocatorStats *stat, uptr class_id,
+ NOINLINE void ReturnToAllocator(MemoryMapperT *memory_mapper,
+ AllocatorStats *stat, uptr class_id,
const CompactPtrT *chunks, uptr n_chunks) {
RegionInfo *region = GetRegionInfo(class_id);
uptr region_beg = GetRegionBeginBySizeClass(class_id);
@@ -154,7 +195,7 @@ class SizeClassAllocator64 {
region->num_freed_chunks = new_num_freed_chunks;
region->stats.n_freed += n_chunks;
- MaybeReleaseToOS(class_id, false /*force*/);
+ MaybeReleaseToOS(memory_mapper, class_id, false /*force*/);
}
NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
@@ -312,13 +353,13 @@ class SizeClassAllocator64 {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() {
+ void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
for (uptr i = 0; i < kNumClasses; i++) {
GetRegionInfo(i)->mutex.Lock();
}
}
- void ForceUnlock() {
+ void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
for (int i = (int)kNumClasses - 1; i >= 0; i--) {
GetRegionInfo(i)->mutex.Unlock();
}
@@ -362,11 +403,11 @@ class SizeClassAllocator64 {
// For the performance sake, none of the accessors check the validity of the
// arguments, it is assumed that index is always in [0, n) range and the value
// is not incremented past max_value.
- template<class MemoryMapperT>
class PackedCounterArray {
public:
- PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapperT *mapper)
- : n(num_counters), memory_mapper(mapper) {
+ template <typename MemoryMapper>
+ PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapper *mapper)
+ : n(num_counters) {
CHECK_GT(num_counters, 0);
CHECK_GT(max_value, 0);
constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL;
@@ -383,16 +424,8 @@ class SizeClassAllocator64 {
packing_ratio_log = Log2(packing_ratio);
bit_offset_mask = packing_ratio - 1;
- buffer_size =
- (RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log) *
- sizeof(*buffer);
- buffer = reinterpret_cast<u64*>(
- memory_mapper->MapPackedCounterArrayBuffer(buffer_size));
- }
- ~PackedCounterArray() {
- if (buffer) {
- memory_mapper->UnmapPackedCounterArrayBuffer(buffer, buffer_size);
- }
+ buffer = mapper->MapPackedCounterArrayBuffer(
+ RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log);
}
bool IsAllocated() const {
@@ -429,19 +462,16 @@ class SizeClassAllocator64 {
u64 counter_mask;
u64 packing_ratio_log;
u64 bit_offset_mask;
-
- MemoryMapperT* const memory_mapper;
- u64 buffer_size;
u64* buffer;
};
- template<class MemoryMapperT>
+ template <class MemoryMapperT>
class FreePagesRangeTracker {
public:
- explicit FreePagesRangeTracker(MemoryMapperT* mapper)
+ FreePagesRangeTracker(MemoryMapperT *mapper, uptr class_id)
: memory_mapper(mapper),
- page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)),
- in_the_range(false), current_page(0), current_range_start_page(0) {}
+ class_id(class_id),
+ page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)) {}
void NextPage(bool freed) {
if (freed) {
@@ -463,28 +493,30 @@ class SizeClassAllocator64 {
void CloseOpenedRange() {
if (in_the_range) {
memory_mapper->ReleasePageRangeToOS(
- current_range_start_page << page_size_scaled_log,
+ class_id, current_range_start_page << page_size_scaled_log,
current_page << page_size_scaled_log);
in_the_range = false;
}
}
- MemoryMapperT* const memory_mapper;
- const uptr page_size_scaled_log;
- bool in_the_range;
- uptr current_page;
- uptr current_range_start_page;
+ MemoryMapperT *const memory_mapper = nullptr;
+ const uptr class_id = 0;
+ const uptr page_size_scaled_log = 0;
+ bool in_the_range = false;
+ uptr current_page = 0;
+ uptr current_range_start_page = 0;
};
// Iterates over the free_array to identify memory pages containing freed
// chunks only and returns these pages back to OS.
// allocated_pages_count is the total number of pages allocated for the
// current bucket.
- template<class MemoryMapperT>
+ template <typename MemoryMapper>
static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,
uptr free_array_count, uptr chunk_size,
uptr allocated_pages_count,
- MemoryMapperT *memory_mapper) {
+ MemoryMapper *memory_mapper,
+ uptr class_id) {
const uptr page_size = GetPageSizeCached();
// Figure out the number of chunks per page and whether we can take a fast
@@ -520,9 +552,8 @@ class SizeClassAllocator64 {
UNREACHABLE("All chunk_size/page_size ratios must be handled.");
}
- PackedCounterArray<MemoryMapperT> counters(allocated_pages_count,
- full_pages_chunk_count_max,
- memory_mapper);
+ PackedCounterArray counters(allocated_pages_count,
+ full_pages_chunk_count_max, memory_mapper);
if (!counters.IsAllocated())
return;
@@ -547,7 +578,7 @@ class SizeClassAllocator64 {
// Iterate over pages detecting ranges of pages with chunk counters equal
// to the expected number of chunks for the particular page.
- FreePagesRangeTracker<MemoryMapperT> range_tracker(memory_mapper);
+ FreePagesRangeTracker<MemoryMapper> range_tracker(memory_mapper, class_id);
if (same_chunk_count_per_page) {
// Fast path, every page has the same number of chunks affecting it.
for (uptr i = 0; i < counters.GetCount(); i++)
@@ -586,7 +617,7 @@ class SizeClassAllocator64 {
}
private:
- friend class MemoryMapper;
+ friend class MemoryMapper<ThisT>;
ReservedAddressRange address_range;
@@ -820,57 +851,13 @@ class SizeClassAllocator64 {
return true;
}
- class MemoryMapper {
- public:
- MemoryMapper(const ThisT& base_allocator, uptr class_id)
- : allocator(base_allocator),
- region_base(base_allocator.GetRegionBeginBySizeClass(class_id)),
- released_ranges_count(0),
- released_bytes(0) {
- }
-
- uptr GetReleasedRangesCount() const {
- return released_ranges_count;
- }
-
- uptr GetReleasedBytes() const {
- return released_bytes;
- }
-
- void *MapPackedCounterArrayBuffer(uptr buffer_size) {
- // TODO(alekseyshl): The idea to explore is to check if we have enough
- // space between num_freed_chunks*sizeof(CompactPtrT) and
- // mapped_free_array to fit buffer_size bytes and use that space instead
- // of mapping a temporary one.
- return MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters");
- }
-
- void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) {
- UnmapOrDie(buffer, buffer_size);
- }
-
- // Releases [from, to) range of pages back to OS.
- void ReleasePageRangeToOS(CompactPtrT from, CompactPtrT to) {
- const uptr from_page = allocator.CompactPtrToPointer(region_base, from);
- const uptr to_page = allocator.CompactPtrToPointer(region_base, to);
- ReleaseMemoryPagesToOS(from_page, to_page);
- released_ranges_count++;
- released_bytes += to_page - from_page;
- }
-
- private:
- const ThisT& allocator;
- const uptr region_base;
- uptr released_ranges_count;
- uptr released_bytes;
- };
-
// Attempts to release RAM occupied by freed chunks back to OS. The region is
// expected to be locked.
//
// TODO(morehouse): Support a callback on memory release so HWASan can release
// aliases as well.
- void MaybeReleaseToOS(uptr class_id, bool force) {
+ void MaybeReleaseToOS(MemoryMapperT *memory_mapper, uptr class_id,
+ bool force) {
RegionInfo *region = GetRegionInfo(class_id);
const uptr chunk_size = ClassIdToSize(class_id);
const uptr page_size = GetPageSizeCached();
@@ -894,17 +881,16 @@ class SizeClassAllocator64 {
}
}
- MemoryMapper memory_mapper(*this, class_id);
-
- ReleaseFreeMemoryToOS<MemoryMapper>(
+ ReleaseFreeMemoryToOS(
GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
- RoundUpTo(region->allocated_user, page_size) / page_size,
- &memory_mapper);
+ RoundUpTo(region->allocated_user, page_size) / page_size, memory_mapper,
+ class_id);
- if (memory_mapper.GetReleasedRangesCount() > 0) {
+ uptr ranges, bytes;
+ if (memory_mapper->GetAndResetStats(ranges, bytes)) {
region->rtoi.n_freed_at_last_release = region->stats.n_freed;
- region->rtoi.num_releases += memory_mapper.GetReleasedRangesCount();
- region->rtoi.last_released_bytes = memory_mapper.GetReleasedBytes();
+ region->rtoi.num_releases += ranges;
+ region->rtoi.last_released_bytes = bytes;
}
region->rtoi.last_release_at_ns = MonotonicNanoTime();
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h b/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h
index 61fb987..dd34fe8 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h
@@ -267,13 +267,9 @@ class LargeMmapAllocator {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() {
- mutex_.Lock();
- }
+ void ForceLock() ACQUIRE(mutex_) { mutex_.Lock(); }
- void ForceUnlock() {
- mutex_.Unlock();
- }
+ void ForceUnlock() RELEASE(mutex_) { mutex_.Unlock(); }
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
diff --git a/libsanitizer/sanitizer_common/sanitizer_common.cpp b/libsanitizer/sanitizer_common/sanitizer_common.cpp
index 33960d9..5fae8e3 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_common.cpp
@@ -37,10 +37,9 @@ void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
const char *mmap_type, error_t err,
bool raw_report) {
static int recursion_count;
- if (SANITIZER_RTEMS || raw_report || recursion_count) {
- // If we are on RTEMS or raw report is requested or we went into recursion,
- // just die. The Report() and CHECK calls below may call mmap recursively
- // and fail.
+ if (raw_report || recursion_count) {
+ // If raw report is requested or we went into recursion just die. The
+ // Report() and CHECK calls below may call mmap recursively and fail.
RawWrite("ERROR: Failed to mmap\n");
Die();
}
@@ -331,6 +330,14 @@ static int InstallMallocFreeHooks(void (*malloc_hook)(const void *, uptr),
return 0;
}
+void internal_sleep(unsigned seconds) {
+ internal_usleep((u64)seconds * 1000 * 1000);
+}
+void SleepForSeconds(unsigned seconds) {
+ internal_usleep((u64)seconds * 1000 * 1000);
+}
+void SleepForMillis(unsigned millis) { internal_usleep((u64)millis * 1000); }
+
} // namespace __sanitizer
using namespace __sanitizer;
diff --git a/libsanitizer/sanitizer_common/sanitizer_common.h b/libsanitizer/sanitizer_common/sanitizer_common.h
index 7b65dd7..cbdbb0c 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common.h
+++ b/libsanitizer/sanitizer_common/sanitizer_common.h
@@ -237,10 +237,16 @@ void SetPrintfAndReportCallback(void (*callback)(const char *));
// Lock sanitizer error reporting and protects against nested errors.
class ScopedErrorReportLock {
public:
- ScopedErrorReportLock();
- ~ScopedErrorReportLock();
+ ScopedErrorReportLock() ACQUIRE(mutex_) { Lock(); }
+ ~ScopedErrorReportLock() RELEASE(mutex_) { Unlock(); }
- static void CheckLocked();
+ static void Lock() ACQUIRE(mutex_);
+ static void Unlock() RELEASE(mutex_);
+ static void CheckLocked() CHECK_LOCKED(mutex_);
+
+ private:
+ static atomic_uintptr_t reporting_thread_;
+ static StaticSpinMutex mutex_;
};
extern uptr stoptheworld_tracer_pid;
@@ -288,8 +294,8 @@ void InitTlsSize();
uptr GetTlsSize();
// Other
-void SleepForSeconds(int seconds);
-void SleepForMillis(int millis);
+void SleepForSeconds(unsigned seconds);
+void SleepForMillis(unsigned millis);
u64 NanoTime();
u64 MonotonicNanoTime();
int Atexit(void (*function)(void));
@@ -1057,6 +1063,13 @@ class ArrayRef {
T *end_ = nullptr;
};
+#define PRINTF_128(v) \
+ (*((u8 *)&v + 0)), (*((u8 *)&v + 1)), (*((u8 *)&v + 2)), (*((u8 *)&v + 3)), \
+ (*((u8 *)&v + 4)), (*((u8 *)&v + 5)), (*((u8 *)&v + 6)), \
+ (*((u8 *)&v + 7)), (*((u8 *)&v + 8)), (*((u8 *)&v + 9)), \
+ (*((u8 *)&v + 10)), (*((u8 *)&v + 11)), (*((u8 *)&v + 12)), \
+ (*((u8 *)&v + 13)), (*((u8 *)&v + 14)), (*((u8 *)&v + 15))
+
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
index 7867fcc..6205d85 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
@@ -134,11 +134,11 @@ extern const short *_tolower_tab_;
// Platform-specific options.
#if SANITIZER_MAC
-#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
#elif SANITIZER_WINDOWS64
-#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
#else
-#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 1
#endif // SANITIZER_MAC
#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
@@ -823,11 +823,11 @@ INTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) {
// N.B.: If we switch this to internal_ we'll have to use internal_memmove
// due to memcpy being an alias of memmove on OS X.
void *ctx;
- if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
+#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
- } else {
+#else
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
- }
+#endif
}
#define INIT_MEMCPY \
@@ -957,6 +957,7 @@ INTERCEPTOR(double, frexp, double x, int *exp) {
// Assuming frexp() always writes to |exp|.
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
double res = REAL(frexp)(x, exp);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(exp, sizeof(*exp));
return res;
}
@@ -969,22 +970,18 @@ INTERCEPTOR(double, frexp, double x, int *exp) {
INTERCEPTOR(float, frexpf, float x, int *exp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, frexpf, x, exp);
- // FIXME: under ASan the call below may write to freed memory and corrupt
- // its metadata. See
- // https://github.com/google/sanitizers/issues/321.
- float res = REAL(frexpf)(x, exp);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
+ float res = REAL(frexpf)(x, exp);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(exp, sizeof(*exp));
return res;
}
INTERCEPTOR(long double, frexpl, long double x, int *exp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, frexpl, x, exp);
- // FIXME: under ASan the call below may write to freed memory and corrupt
- // its metadata. See
- // https://github.com/google/sanitizers/issues/321.
- long double res = REAL(frexpl)(x, exp);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
+ long double res = REAL(frexpl)(x, exp);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(exp, sizeof(*exp));
return res;
}
@@ -5303,6 +5300,12 @@ INTERCEPTOR(__sanitizer_clock_t, times, void *tms) {
#define INIT_TIMES
#endif
+#if SANITIZER_S390 && \
+ (SANITIZER_INTERCEPT_TLS_GET_ADDR || SANITIZER_INTERCEPT_TLS_GET_OFFSET)
+extern "C" uptr __tls_get_offset_wrapper(void *arg, uptr (*fn)(void *arg));
+DEFINE_REAL(uptr, __tls_get_offset, void *arg)
+#endif
+
#if SANITIZER_INTERCEPT_TLS_GET_ADDR
#if !SANITIZER_S390
#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_addr)
@@ -5342,11 +5345,7 @@ void *__tls_get_addr_opt(void *arg);
// descriptor offset as an argument instead of a pointer. GOT address
// is passed in r12, so it's necessary to write it in assembly. This is
// the function used by the compiler.
-extern "C" uptr __tls_get_offset_wrapper(void *arg, uptr (*fn)(void *arg));
#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_offset)
-DEFINE_REAL(uptr, __tls_get_offset, void *arg)
-extern "C" uptr __tls_get_offset(void *arg);
-extern "C" uptr __interceptor___tls_get_offset(void *arg);
INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr_internal, arg);
@@ -5362,6 +5361,15 @@ INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
}
return res;
}
+#endif // SANITIZER_S390
+#else
+#define INIT_TLS_GET_ADDR
+#endif
+
+#if SANITIZER_S390 && \
+ (SANITIZER_INTERCEPT_TLS_GET_ADDR || SANITIZER_INTERCEPT_TLS_GET_OFFSET)
+extern "C" uptr __tls_get_offset(void *arg);
+extern "C" uptr __interceptor___tls_get_offset(void *arg);
// We need a hidden symbol aliasing the above, so that we can jump
// directly to it from the assembly below.
extern "C" __attribute__((alias("__interceptor___tls_get_addr_internal"),
@@ -5400,9 +5408,6 @@ asm(
"br %r3\n"
".size __tls_get_offset_wrapper, .-__tls_get_offset_wrapper\n"
);
-#endif // SANITIZER_S390
-#else
-#define INIT_TLS_GET_ADDR
#endif
#if SANITIZER_INTERCEPT_LISTXATTR
@@ -6099,6 +6104,40 @@ INTERCEPTOR(__sanitizer_FILE *, freopen, const char *path, const char *mode,
#define INIT_FOPEN
#endif
+#if SANITIZER_INTERCEPT_FLOPEN
+INTERCEPTOR(int, flopen, const char *path, int flags, ...) {
+ void *ctx;
+ va_list ap;
+ va_start(ap, flags);
+ u16 mode = static_cast<u16>(va_arg(ap, u32));
+ va_end(ap);
+ COMMON_INTERCEPTOR_ENTER(ctx, flopen, path, flags, mode);
+ if (path) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ }
+ return REAL(flopen)(path, flags, mode);
+}
+
+INTERCEPTOR(int, flopenat, int dirfd, const char *path, int flags, ...) {
+ void *ctx;
+ va_list ap;
+ va_start(ap, flags);
+ u16 mode = static_cast<u16>(va_arg(ap, u32));
+ va_end(ap);
+ COMMON_INTERCEPTOR_ENTER(ctx, flopen, path, flags, mode);
+ if (path) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ }
+ return REAL(flopenat)(dirfd, path, flags, mode);
+}
+
+#define INIT_FLOPEN \
+ COMMON_INTERCEPT_FUNCTION(flopen); \
+ COMMON_INTERCEPT_FUNCTION(flopenat);
+#else
+#define INIT_FLOPEN
+#endif
+
#if SANITIZER_INTERCEPT_FOPEN64
INTERCEPTOR(__sanitizer_FILE *, fopen64, const char *path, const char *mode) {
void *ctx;
@@ -6463,7 +6502,7 @@ INTERCEPTOR(int, sem_wait, __sanitizer_sem_t *s) {
INTERCEPTOR(int, sem_trywait, __sanitizer_sem_t *s) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sem_trywait, s);
- int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_trywait)(s);
+ int res = REAL(sem_trywait)(s);
if (res == 0) {
COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);
}
@@ -10264,6 +10303,7 @@ static void InitializeCommonInterceptors() {
INIT_LIBIO_INTERNALS;
INIT_FOPEN;
INIT_FOPEN64;
+ INIT_FLOPEN;
INIT_OPEN_MEMSTREAM;
INIT_OBSTACK;
INIT_FFLUSH;
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cpp
index 1037938..01ccacc 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cpp
@@ -138,7 +138,7 @@ uptr ReservedAddressRange::InitAligned(uptr size, uptr align,
return start;
}
-#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+#if !SANITIZER_FUCHSIA
// Reserve memory range [beg, end].
// We need to use inclusive range because end+1 may not be representable.
@@ -189,7 +189,7 @@ void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
Die();
}
-#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+#endif // !SANITIZER_FUCHSIA
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_nolibc.cpp b/libsanitizer/sanitizer_common/sanitizer_common_nolibc.cpp
index 487a634..9a4e538 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_nolibc.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_common_nolibc.cpp
@@ -25,7 +25,6 @@ void LogMessageOnPrintf(const char *str) {}
#endif
void WriteToSyslog(const char *buffer) {}
void Abort() { internal__exit(1); }
-void SleepForSeconds(int seconds) { internal_sleep(seconds); }
#endif // !SANITIZER_WINDOWS
#if !SANITIZER_WINDOWS && !SANITIZER_MAC
diff --git a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cpp b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cpp
index 2c924f5..ccb7065 100644
--- a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cpp
@@ -136,7 +136,7 @@ void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {
DDMutex *m0 = (DDMutex*)dd.getData(from);
DDMutex *m1 = (DDMutex*)dd.getData(to);
- u32 stk_from = -1U, stk_to = -1U;
+ u32 stk_from = 0, stk_to = 0;
int unique_tid = 0;
dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid);
// Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
diff --git a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector2.cpp b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector2.cpp
index e3f8e1b..1fbbbcc 100644
--- a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector2.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector2.cpp
@@ -73,7 +73,7 @@ struct DDLogicalThread {
int nlocked;
};
-struct Mutex {
+struct MutexState {
StaticSpinMutex mtx;
u32 seq;
int nlink;
@@ -101,12 +101,12 @@ struct DD final : public DDetector {
void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
u32 allocateId(DDCallback *cb);
- Mutex *getMutex(u32 id);
- u32 getMutexId(Mutex *m);
+ MutexState *getMutex(u32 id);
+ u32 getMutexId(MutexState *m);
DDFlags flags;
- Mutex* mutex[kL1Size];
+ MutexState *mutex[kL1Size];
SpinMutex mtx;
InternalMmapVector<u32> free_id;
@@ -152,13 +152,11 @@ void DD::MutexInit(DDCallback *cb, DDMutex *m) {
atomic_store(&m->owner, 0, memory_order_relaxed);
}
-Mutex *DD::getMutex(u32 id) {
- return &mutex[id / kL2Size][id % kL2Size];
-}
+MutexState *DD::getMutex(u32 id) { return &mutex[id / kL2Size][id % kL2Size]; }
-u32 DD::getMutexId(Mutex *m) {
+u32 DD::getMutexId(MutexState *m) {
for (int i = 0; i < kL1Size; i++) {
- Mutex *tab = mutex[i];
+ MutexState *tab = mutex[i];
if (tab == 0)
break;
if (m >= tab && m < tab + kL2Size)
@@ -176,8 +174,8 @@ u32 DD::allocateId(DDCallback *cb) {
} else {
CHECK_LT(id_gen, kMaxMutex);
if ((id_gen % kL2Size) == 0) {
- mutex[id_gen / kL2Size] = (Mutex*)MmapOrDie(kL2Size * sizeof(Mutex),
- "deadlock detector (mutex table)");
+ mutex[id_gen / kL2Size] = (MutexState *)MmapOrDie(
+ kL2Size * sizeof(MutexState), "deadlock detector (mutex table)");
}
id = id_gen++;
}
@@ -216,11 +214,11 @@ void DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {
}
bool added = false;
- Mutex *mtx = getMutex(m->id);
+ MutexState *mtx = getMutex(m->id);
for (int i = 0; i < lt->nlocked - 1; i++) {
u32 id1 = lt->locked[i].id;
u32 stk1 = lt->locked[i].stk;
- Mutex *mtx1 = getMutex(id1);
+ MutexState *mtx1 = getMutex(id1);
SpinMutexLock l(&mtx1->mtx);
if (mtx1->nlink == kMaxLink) {
// FIXME(dvyukov): check stale links
@@ -342,7 +340,7 @@ void DD::MutexDestroy(DDCallback *cb, DDMutex *m) {
// Clear and invalidate the mutex descriptor.
{
- Mutex *mtx = getMutex(m->id);
+ MutexState *mtx = getMutex(m->id);
SpinMutexLock l(&mtx->mtx);
mtx->seq++;
mtx->nlink = 0;
@@ -361,7 +359,7 @@ void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
int npath = 0;
int npending = 0;
{
- Mutex *mtx = getMutex(m->id);
+ MutexState *mtx = getMutex(m->id);
SpinMutexLock l(&mtx->mtx);
for (int li = 0; li < mtx->nlink; li++)
pt->pending[npending++] = mtx->link[li];
@@ -374,7 +372,7 @@ void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
}
if (pt->visited[link.id])
continue;
- Mutex *mtx1 = getMutex(link.id);
+ MutexState *mtx1 = getMutex(link.id);
SpinMutexLock l(&mtx1->mtx);
if (mtx1->seq != link.seq)
continue;
@@ -387,7 +385,7 @@ void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
return Report(pt, lt, npath); // Bingo!
for (int li = 0; li < mtx1->nlink; li++) {
Link *link1 = &mtx1->link[li];
- // Mutex *mtx2 = getMutex(link->id);
+ // MutexState *mtx2 = getMutex(link->id);
// FIXME(dvyukov): fast seq check
// FIXME(dvyukov): fast nlink != 0 check
// FIXME(dvyukov): fast pending check?
diff --git a/libsanitizer/sanitizer_common/sanitizer_errno.h b/libsanitizer/sanitizer_common/sanitizer_errno.h
index 94f16b6..70a6e88 100644
--- a/libsanitizer/sanitizer_common/sanitizer_errno.h
+++ b/libsanitizer/sanitizer_common/sanitizer_errno.h
@@ -23,8 +23,7 @@
#if SANITIZER_FREEBSD || SANITIZER_MAC
# define __errno_location __error
-#elif SANITIZER_ANDROID || SANITIZER_NETBSD || \
- SANITIZER_RTEMS
+#elif SANITIZER_ANDROID || SANITIZER_NETBSD
# define __errno_location __errno
#elif SANITIZER_SOLARIS
# define __errno_location ___errno
diff --git a/libsanitizer/sanitizer_common/sanitizer_fuchsia.cpp b/libsanitizer/sanitizer_common/sanitizer_fuchsia.cpp
index 4f692f9..65bc398 100644
--- a/libsanitizer/sanitizer_common/sanitizer_fuchsia.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_fuchsia.cpp
@@ -36,16 +36,11 @@ uptr internal_sched_yield() {
return 0; // Why doesn't this return void?
}
-static void internal_nanosleep(zx_time_t ns) {
- zx_status_t status = _zx_nanosleep(_zx_deadline_after(ns));
+void internal_usleep(u64 useconds) {
+ zx_status_t status = _zx_nanosleep(_zx_deadline_after(ZX_USEC(useconds)));
CHECK_EQ(status, ZX_OK);
}
-unsigned int internal_sleep(unsigned int seconds) {
- internal_nanosleep(ZX_SEC(seconds));
- return 0;
-}
-
u64 NanoTime() {
zx_handle_t utc_clock = _zx_utc_reference_get();
CHECK_NE(utc_clock, ZX_HANDLE_INVALID);
@@ -78,10 +73,6 @@ void Abort() { abort(); }
int Atexit(void (*function)(void)) { return atexit(function); }
-void SleepForSeconds(int seconds) { internal_sleep(seconds); }
-
-void SleepForMillis(int millis) { internal_nanosleep(ZX_MSEC(millis)); }
-
void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
pthread_attr_t attr;
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
@@ -109,6 +100,18 @@ bool SignalContext::IsStackOverflow() const { return false; }
void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
+void FutexWait(atomic_uint32_t *p, u32 cmp) {
+ zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(p), cmp,
+ ZX_HANDLE_INVALID, ZX_TIME_INFINITE);
+ if (status != ZX_ERR_BAD_STATE) // Normal race.
+ CHECK_EQ(status, ZX_OK);
+}
+
+void FutexWake(atomic_uint32_t *p, u32 count) {
+ zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(p), count);
+ CHECK_EQ(status, ZX_OK);
+}
+
enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
BlockingMutex::BlockingMutex() {
@@ -145,8 +148,8 @@ void BlockingMutex::Unlock() {
}
}
-void BlockingMutex::CheckLocked() {
- atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+void BlockingMutex::CheckLocked() const {
+ auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
}
@@ -156,8 +159,10 @@ uptr GetMmapGranularity() { return _zx_system_get_page_size(); }
sanitizer_shadow_bounds_t ShadowBounds;
+void InitShadowBounds() { ShadowBounds = __sanitizer_shadow_bounds(); }
+
uptr GetMaxUserVirtualAddress() {
- ShadowBounds = __sanitizer_shadow_bounds();
+ InitShadowBounds();
return ShadowBounds.memory_limit - 1;
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_fuchsia.h b/libsanitizer/sanitizer_common/sanitizer_fuchsia.h
index 96f9cde..26c1dea 100644
--- a/libsanitizer/sanitizer_common/sanitizer_fuchsia.h
+++ b/libsanitizer/sanitizer_common/sanitizer_fuchsia.h
@@ -30,6 +30,8 @@ struct MemoryMappingLayoutData {
size_t current; // Current index into the vector.
};
+void InitShadowBounds();
+
} // namespace __sanitizer
#endif // SANITIZER_FUCHSIA
diff --git a/libsanitizer/sanitizer_common/sanitizer_libc.h b/libsanitizer/sanitizer_common/sanitizer_libc.h
index ec0a6de..bcb81eb 100644
--- a/libsanitizer/sanitizer_common/sanitizer_libc.h
+++ b/libsanitizer/sanitizer_common/sanitizer_libc.h
@@ -67,7 +67,8 @@ uptr internal_ftruncate(fd_t fd, uptr size);
// OS
void NORETURN internal__exit(int exitcode);
-unsigned int internal_sleep(unsigned int seconds);
+void internal_sleep(unsigned seconds);
+void internal_usleep(u64 useconds);
uptr internal_getpid();
uptr internal_getppid();
diff --git a/libsanitizer/sanitizer_common/sanitizer_libignore.cpp b/libsanitizer/sanitizer_common/sanitizer_libignore.cpp
index a65d3d8..431efc5 100644
--- a/libsanitizer/sanitizer_common/sanitizer_libignore.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_libignore.cpp
@@ -84,6 +84,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
ignored_code_ranges_[idx].begin = range.beg;
ignored_code_ranges_[idx].end = range.end;
atomic_store(&ignored_ranges_count_, idx + 1, memory_order_release);
+ atomic_store(&enabled_, 1, memory_order_release);
break;
}
}
@@ -114,6 +115,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
instrumented_code_ranges_[idx].end = range.end;
atomic_store(&instrumented_ranges_count_, idx + 1,
memory_order_release);
+ atomic_store(&enabled_, 1, memory_order_release);
}
}
}
@@ -123,6 +125,29 @@ void LibIgnore::OnLibraryUnloaded() {
OnLibraryLoaded(nullptr);
}
+bool LibIgnore::IsIgnoredSlow(uptr pc, bool *pc_in_ignored_lib) const {
+ const uptr n = atomic_load(&ignored_ranges_count_, memory_order_acquire);
+ for (uptr i = 0; i < n; i++) {
+ if (IsInRange(pc, ignored_code_ranges_[i])) {
+ *pc_in_ignored_lib = true;
+ return true;
+ }
+ }
+ *pc_in_ignored_lib = false;
+ if (track_instrumented_libs_ && !IsPcInstrumented(pc))
+ return true;
+ return false;
+}
+
+bool LibIgnore::IsPcInstrumented(uptr pc) const {
+ const uptr n = atomic_load(&instrumented_ranges_count_, memory_order_acquire);
+ for (uptr i = 0; i < n; i++) {
+ if (IsInRange(pc, instrumented_code_ranges_[i]))
+ return true;
+ }
+ return false;
+}
+
} // namespace __sanitizer
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC ||
diff --git a/libsanitizer/sanitizer_common/sanitizer_libignore.h b/libsanitizer/sanitizer_common/sanitizer_libignore.h
index 256f685..85452e5 100644
--- a/libsanitizer/sanitizer_common/sanitizer_libignore.h
+++ b/libsanitizer/sanitizer_common/sanitizer_libignore.h
@@ -45,9 +45,6 @@ class LibIgnore {
// "pc_in_ignored_lib" if the PC is in an ignored library, false otherwise.
bool IsIgnored(uptr pc, bool *pc_in_ignored_lib) const;
- // Checks whether the provided PC belongs to an instrumented module.
- bool IsPcInstrumented(uptr pc) const;
-
private:
struct Lib {
char *templ;
@@ -61,6 +58,10 @@ class LibIgnore {
uptr end;
};
+ // Checks whether the provided PC belongs to an instrumented module.
+ bool IsPcInstrumented(uptr pc) const;
+ bool IsIgnoredSlow(uptr pc, bool *pc_in_ignored_lib) const;
+
inline bool IsInRange(uptr pc, const LibCodeRange &range) const {
return (pc >= range.begin && pc < range.end);
}
@@ -70,6 +71,8 @@ class LibIgnore {
static const uptr kMaxLibs = 1024;
// Hot part:
+ atomic_uintptr_t enabled_;
+
atomic_uintptr_t ignored_ranges_count_;
LibCodeRange ignored_code_ranges_[kMaxIgnoredRanges];
@@ -87,27 +90,11 @@ class LibIgnore {
void operator = (const LibIgnore&); // not implemented
};
-inline bool LibIgnore::IsIgnored(uptr pc, bool *pc_in_ignored_lib) const {
- const uptr n = atomic_load(&ignored_ranges_count_, memory_order_acquire);
- for (uptr i = 0; i < n; i++) {
- if (IsInRange(pc, ignored_code_ranges_[i])) {
- *pc_in_ignored_lib = true;
- return true;
- }
- }
- *pc_in_ignored_lib = false;
- if (track_instrumented_libs_ && !IsPcInstrumented(pc))
- return true;
- return false;
-}
-
-inline bool LibIgnore::IsPcInstrumented(uptr pc) const {
- const uptr n = atomic_load(&instrumented_ranges_count_, memory_order_acquire);
- for (uptr i = 0; i < n; i++) {
- if (IsInRange(pc, instrumented_code_ranges_[i]))
- return true;
- }
- return false;
+ALWAYS_INLINE
+bool LibIgnore::IsIgnored(uptr pc, bool *pc_in_ignored_lib) const {
+ if (LIKELY(atomic_load(&enabled_, memory_order_acquire) == 0))
+ return false;
+ return IsIgnoredSlow(pc, pc_in_ignored_lib);
}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.cpp b/libsanitizer/sanitizer_common/sanitizer_linux.cpp
index b371477..9b7d87e 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_linux.cpp
@@ -430,13 +430,11 @@ uptr internal_sched_yield() {
return internal_syscall(SYSCALL(sched_yield));
}
-unsigned int internal_sleep(unsigned int seconds) {
+void internal_usleep(u64 useconds) {
struct timespec ts;
- ts.tv_sec = seconds;
- ts.tv_nsec = 0;
- int res = internal_syscall(SYSCALL(nanosleep), &ts, &ts);
- if (res) return ts.tv_sec;
- return 0;
+ ts.tv_sec = useconds / 1000000;
+ ts.tv_nsec = (useconds % 1000000) * 1000;
+ internal_syscall(SYSCALL(nanosleep), &ts, &ts);
}
uptr internal_execve(const char *filename, char *const argv[],
@@ -641,11 +639,27 @@ char **GetEnviron() {
}
#if !SANITIZER_SOLARIS
-enum MutexState {
- MtxUnlocked = 0,
- MtxLocked = 1,
- MtxSleeping = 2
-};
+void FutexWait(atomic_uint32_t *p, u32 cmp) {
+# if SANITIZER_FREEBSD
+ _umtx_op(p, UMTX_OP_WAIT_UINT, cmp, 0, 0);
+# elif SANITIZER_NETBSD
+ sched_yield(); /* No userspace futex-like synchronization */
+# else
+ internal_syscall(SYSCALL(futex), (uptr)p, FUTEX_WAIT_PRIVATE, cmp, 0, 0, 0);
+# endif
+}
+
+void FutexWake(atomic_uint32_t *p, u32 count) {
+# if SANITIZER_FREEBSD
+ _umtx_op(p, UMTX_OP_WAKE, count, 0, 0);
+# elif SANITIZER_NETBSD
+ /* No userspace futex-like synchronization */
+# else
+ internal_syscall(SYSCALL(futex), (uptr)p, FUTEX_WAKE_PRIVATE, count, 0, 0, 0);
+# endif
+}
+
+enum { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
BlockingMutex::BlockingMutex() {
internal_memset(this, 0, sizeof(*this));
@@ -683,11 +697,11 @@ void BlockingMutex::Unlock() {
}
}
-void BlockingMutex::CheckLocked() {
- atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+void BlockingMutex::CheckLocked() const {
+ auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
}
-#endif // !SANITIZER_SOLARIS
+# endif // !SANITIZER_SOLARIS
// ----------------- sanitizer_linux.h
// The actual size of this structure is specified by d_reclen.
@@ -884,7 +898,7 @@ void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
__sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);
const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
- k_set->sig[idx] &= ~(1 << bit);
+ k_set->sig[idx] &= ~((uptr)1 << bit);
}
bool internal_sigismember(__sanitizer_sigset_t *set, int signum) {
@@ -894,7 +908,7 @@ bool internal_sigismember(__sanitizer_sigset_t *set, int signum) {
__sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);
const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
- return k_set->sig[idx] & (1 << bit);
+ return k_set->sig[idx] & ((uptr)1 << bit);
}
#elif SANITIZER_FREEBSD
void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp
index 4f9577a..7ce9e25 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp
@@ -203,7 +203,7 @@ void InitTlsSize() {
g_use_dlpi_tls_data =
GetLibcVersion(&major, &minor, &patch) && major == 2 && minor >= 25;
-#if defined(__x86_64__) || defined(__powerpc64__)
+#if defined(__aarch64__) || defined(__x86_64__) || defined(__powerpc64__)
void *get_tls_static_info = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
size_t tls_align;
((void (*)(size_t *, size_t *))get_tls_static_info)(&g_tls_size, &tls_align);
@@ -317,21 +317,44 @@ struct TlsBlock {
};
} // namespace
+#ifdef __s390__
+extern "C" uptr __tls_get_offset(void *arg);
+
+static uptr TlsGetOffset(uptr ti_module, uptr ti_offset) {
+ // The __tls_get_offset ABI requires %r12 to point to GOT and %r2 to be an
+ // offset of a struct tls_index inside GOT. We don't possess either of the
+ // two, so violate the letter of the "ELF Handling For Thread-Local
+ // Storage" document and assume that the implementation just dereferences
+ // %r2 + %r12.
+ uptr tls_index[2] = {ti_module, ti_offset};
+ register uptr r2 asm("2") = 0;
+ register void *r12 asm("12") = tls_index;
+ asm("basr %%r14, %[__tls_get_offset]"
+ : "+r"(r2)
+ : [__tls_get_offset] "r"(__tls_get_offset), "r"(r12)
+ : "memory", "cc", "0", "1", "3", "4", "5", "14");
+ return r2;
+}
+#else
extern "C" void *__tls_get_addr(size_t *);
+#endif
static int CollectStaticTlsBlocks(struct dl_phdr_info *info, size_t size,
void *data) {
if (!info->dlpi_tls_modid)
return 0;
uptr begin = (uptr)info->dlpi_tls_data;
-#ifndef __s390__
if (!g_use_dlpi_tls_data) {
// Call __tls_get_addr as a fallback. This forces TLS allocation on glibc
// and FreeBSD.
+#ifdef __s390__
+ begin = (uptr)__builtin_thread_pointer() +
+ TlsGetOffset(info->dlpi_tls_modid, 0);
+#else
size_t mod_and_off[2] = {info->dlpi_tls_modid, 0};
begin = (uptr)__tls_get_addr(mod_and_off);
- }
#endif
+ }
for (unsigned i = 0; i != info->dlpi_phnum; ++i)
if (info->dlpi_phdr[i].p_type == PT_TLS) {
static_cast<InternalMmapVector<TlsBlock> *>(data)->push_back(
@@ -427,12 +450,16 @@ static void GetTls(uptr *addr, uptr *size) {
*size = 0;
}
#elif SANITIZER_GLIBC && defined(__x86_64__)
- // For x86-64, use an O(1) approach which requires precise
- // ThreadDescriptorSize. g_tls_size was initialized in InitTlsSize.
+ // For aarch64 and x86-64, use an O(1) approach which requires relatively
+ // precise ThreadDescriptorSize. g_tls_size was initialized in InitTlsSize.
asm("mov %%fs:16,%0" : "=r"(*addr));
*size = g_tls_size;
*addr -= *size;
*addr += ThreadDescriptorSize();
+#elif SANITIZER_GLIBC && defined(__aarch64__)
+ *addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
+ ThreadDescriptorSize();
+ *size = g_tls_size + ThreadDescriptorSize();
#elif SANITIZER_GLIBC && defined(__powerpc64__)
// Workaround for glibc<2.25(?). 2.27 is known to not need this.
uptr tp;
@@ -732,13 +759,9 @@ u32 GetNumberOfCPUs() {
#elif SANITIZER_SOLARIS
return sysconf(_SC_NPROCESSORS_ONLN);
#else
-#if defined(CPU_COUNT)
cpu_set_t CPUs;
CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
return CPU_COUNT(&CPUs);
-#else
- return 1;
-#endif
#endif
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.cpp b/libsanitizer/sanitizer_common/sanitizer_mac.cpp
index 30a94fc..125ecac 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mac.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_mac.cpp
@@ -37,7 +37,7 @@
extern char **environ;
#endif
-#if defined(__has_include) && __has_include(<os/trace.h>) && defined(__BLOCKS__)
+#if defined(__has_include) && __has_include(<os/trace.h>)
#define SANITIZER_OS_TRACE 1
#include <os/trace.h>
#else
@@ -70,15 +70,7 @@ extern "C" {
#include <mach/mach_time.h>
#include <mach/vm_statistics.h>
#include <malloc/malloc.h>
-#if defined(__has_builtin) && __has_builtin(__builtin_os_log_format)
-# include <os/log.h>
-#else
- /* Without support for __builtin_os_log_format, fall back to the older
- method. */
-# define OS_LOG_DEFAULT 0
-# define os_log_error(A,B,C) \
- asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", (C));
-#endif
+#include <os/log.h>
#include <pthread.h>
#include <sched.h>
#include <signal.h>
@@ -227,9 +219,7 @@ void internal__exit(int exitcode) {
_exit(exitcode);
}
-unsigned int internal_sleep(unsigned int seconds) {
- return sleep(seconds);
-}
+void internal_usleep(u64 useconds) { usleep(useconds); }
uptr internal_getpid() {
return getpid();
@@ -519,6 +509,13 @@ void MprotectMallocZones(void *addr, int prot) {
}
}
+void FutexWait(atomic_uint32_t *p, u32 cmp) {
+ // FIXME: implement actual blocking.
+ sched_yield();
+}
+
+void FutexWake(atomic_uint32_t *p, u32 count) {}
+
BlockingMutex::BlockingMutex() {
internal_memset(this, 0, sizeof(*this));
}
@@ -534,7 +531,7 @@ void BlockingMutex::Unlock() {
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
}
-void BlockingMutex::CheckLocked() {
+void BlockingMutex::CheckLocked() const {
CHECK_NE(*(OSSpinLock*)&opaque_storage_, 0);
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.h b/libsanitizer/sanitizer_common/sanitizer_mac.h
index 96a5986..0b6af5a 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mac.h
+++ b/libsanitizer/sanitizer_common/sanitizer_mac.h
@@ -14,26 +14,6 @@
#include "sanitizer_common.h"
#include "sanitizer_platform.h"
-
-/* TARGET_OS_OSX is not present in SDKs before Darwin16 (macOS 10.12) use
- TARGET_OS_MAC (we have no support for iOS in any form for these versions,
- so there's no ambiguity). */
-#if !defined(TARGET_OS_OSX) && TARGET_OS_MAC
-# define TARGET_OS_OSX 1
-#endif
-
-/* Other TARGET_OS_xxx are not present on earlier versions, define them to
- 0 (we have no support for them; they are not valid targets anyway). */
-#ifndef TARGET_OS_IOS
-#define TARGET_OS_IOS 0
-#endif
-#ifndef TARGET_OS_TV
-#define TARGET_OS_TV 0
-#endif
-#ifndef TARGET_OS_WATCH
-#define TARGET_OS_WATCH 0
-#endif
-
#if SANITIZER_MAC
#include "sanitizer_posix.h"
diff --git a/libsanitizer/sanitizer_common/sanitizer_mutex.cpp b/libsanitizer/sanitizer_common/sanitizer_mutex.cpp
new file mode 100644
index 0000000..bc2d83c
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_mutex.cpp
@@ -0,0 +1,39 @@
+//===-- sanitizer_mutex.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_mutex.h"
+
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+void Semaphore::Wait() {
+ u32 count = atomic_load(&state_, memory_order_relaxed);
+ for (;;) {
+ if (count == 0) {
+ FutexWait(&state_, 0);
+ count = atomic_load(&state_, memory_order_relaxed);
+ continue;
+ }
+ if (atomic_compare_exchange_weak(&state_, &count, count - 1,
+ memory_order_acquire))
+ break;
+ }
+}
+
+void Semaphore::Post(u32 count) {
+ CHECK_NE(count, 0);
+ atomic_fetch_add(&state_, count, memory_order_release);
+ FutexWake(&state_, count);
+}
+
+} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_mutex.h b/libsanitizer/sanitizer_common/sanitizer_mutex.h
index 40a6591..e3ff650 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mutex.h
+++ b/libsanitizer/sanitizer_common/sanitizer_mutex.h
@@ -16,30 +16,29 @@
#include "sanitizer_atomic.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
+#include "sanitizer_thread_safety.h"
namespace __sanitizer {
-class StaticSpinMutex {
+class MUTEX StaticSpinMutex {
public:
void Init() {
atomic_store(&state_, 0, memory_order_relaxed);
}
- void Lock() {
+ void Lock() ACQUIRE() {
if (TryLock())
return;
LockSlow();
}
- bool TryLock() {
+ bool TryLock() TRY_ACQUIRE(true) {
return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
}
- void Unlock() {
- atomic_store(&state_, 0, memory_order_release);
- }
+ void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
- void CheckLocked() {
+ void CheckLocked() const CHECK_LOCKED() {
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
}
@@ -59,24 +58,223 @@ class StaticSpinMutex {
}
};
-class SpinMutex : public StaticSpinMutex {
+class MUTEX SpinMutex : public StaticSpinMutex {
public:
SpinMutex() {
Init();
}
private:
- SpinMutex(const SpinMutex&);
- void operator=(const SpinMutex&);
+ SpinMutex(const SpinMutex &) = delete;
+ void operator=(const SpinMutex &) = delete;
+};
+
+// Semaphore provides an OS-dependent way to park/unpark threads.
+// The last thread returned from Wait can destroy the object
+// (destruction-safety).
+class Semaphore {
+ public:
+ constexpr Semaphore() {}
+ Semaphore(const Semaphore &) = delete;
+ void operator=(const Semaphore &) = delete;
+
+ void Wait();
+ void Post(u32 count = 1);
+
+ private:
+ atomic_uint32_t state_ = {0};
+};
+
+// Reader-writer mutex.
+class MUTEX Mutex2 {
+ public:
+ constexpr Mutex2() {}
+
+ void Lock() ACQUIRE() {
+ u64 reset_mask = ~0ull;
+ u64 state = atomic_load_relaxed(&state_);
+ const uptr kMaxSpinIters = 1500;
+ for (uptr spin_iters = 0;; spin_iters++) {
+ u64 new_state;
+ bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
+ if (LIKELY(!locked)) {
+ // The mutex is not read-/write-locked, try to lock.
+ new_state = (state | kWriterLock) & reset_mask;
+ } else if (spin_iters > kMaxSpinIters) {
+ // We've spun enough, increment waiting writers count and block.
+ // The counter will be decremented by whoever wakes us.
+ new_state = (state + kWaitingWriterInc) & reset_mask;
+ } else if ((state & kWriterSpinWait) == 0) {
+ // Active spinning, but denote our presence so that unlocking
+ // thread does not wake up other threads.
+ new_state = state | kWriterSpinWait;
+ } else {
+ // Active spinning.
+ state = atomic_load(&state_, memory_order_relaxed);
+ continue;
+ }
+ if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
+ memory_order_acquire)))
+ continue;
+ if (LIKELY(!locked))
+ return; // We've locked the mutex.
+ if (spin_iters > kMaxSpinIters) {
+ // We've incremented waiting writers, so now block.
+ writers_.Wait();
+ spin_iters = 0;
+ state = atomic_load(&state_, memory_order_relaxed);
+ DCHECK_NE(state & kWriterSpinWait, 0);
+ } else {
+ // We've set kWriterSpinWait, but we are still in active spinning.
+ }
+ // We either blocked and were unblocked,
+ // or we just spun but set kWriterSpinWait.
+ // Either way we need to reset kWriterSpinWait
+ // next time we take the lock or block again.
+ reset_mask = ~kWriterSpinWait;
+ }
+ }
+
+ void Unlock() RELEASE() {
+ bool wake_writer;
+ u64 wake_readers;
+ u64 new_state;
+ u64 state = atomic_load_relaxed(&state_);
+ do {
+ DCHECK_NE(state & kWriterLock, 0);
+ DCHECK_EQ(state & kReaderLockMask, 0);
+ new_state = state & ~kWriterLock;
+ wake_writer =
+ (state & kWriterSpinWait) == 0 && (state & kWaitingWriterMask) != 0;
+ if (wake_writer)
+ new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
+ wake_readers =
+ (state & (kWriterSpinWait | kWaitingWriterMask)) != 0
+ ? 0
+ : ((state & kWaitingReaderMask) >> kWaitingReaderShift);
+ if (wake_readers)
+ new_state = (new_state & ~kWaitingReaderMask) +
+ (wake_readers << kReaderLockShift);
+ } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
+ memory_order_release)));
+ if (UNLIKELY(wake_writer))
+ writers_.Post();
+ else if (UNLIKELY(wake_readers))
+ readers_.Post(wake_readers);
+ }
+
+ void ReadLock() ACQUIRE_SHARED() {
+ bool locked;
+ u64 new_state;
+ u64 state = atomic_load_relaxed(&state_);
+ do {
+ locked =
+ (state & kReaderLockMask) == 0 &&
+ (state & (kWriterLock | kWriterSpinWait | kWaitingWriterMask)) != 0;
+ if (LIKELY(!locked))
+ new_state = state + kReaderLockInc;
+ else
+ new_state = state + kWaitingReaderInc;
+ } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
+ memory_order_acquire)));
+ if (UNLIKELY(locked))
+ readers_.Wait();
+ DCHECK_EQ(atomic_load_relaxed(&state_) & kWriterLock, 0);
+ DCHECK_NE(atomic_load_relaxed(&state_) & kReaderLockMask, 0);
+ }
+
+ void ReadUnlock() RELEASE_SHARED() {
+ bool wake;
+ u64 new_state;
+ u64 state = atomic_load_relaxed(&state_);
+ do {
+ DCHECK_NE(state & kReaderLockMask, 0);
+ DCHECK_EQ(state & (kWaitingReaderMask | kWriterLock), 0);
+ new_state = state - kReaderLockInc;
+ wake = (new_state & (kReaderLockMask | kWriterSpinWait)) == 0 &&
+ (new_state & kWaitingWriterMask) != 0;
+ if (wake)
+ new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
+ } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
+ memory_order_release)));
+ if (UNLIKELY(wake))
+ writers_.Post();
+ }
+
+ // This function does not guarantee an explicit check that the calling thread
+ // is the thread which owns the mutex. This behavior, while more strictly
+ // correct, causes problems in cases like StopTheWorld, where a parent thread
+ // owns the mutex but a child checks that it is locked. Rather than
+ // maintaining complex state to work around those situations, the check only
+ // checks that the mutex is owned.
+ void CheckWriteLocked() const CHECK_LOCKED() {
+ CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
+ }
+
+ void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); }
+
+ void CheckReadLocked() const CHECK_LOCKED() {
+ CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
+ }
+
+ private:
+ atomic_uint64_t state_ = {0};
+ Semaphore writers_;
+ Semaphore readers_;
+
+ // The state has 3 counters:
+ // - number of readers holding the lock,
+ // if non zero, the mutex is read-locked
+ // - number of waiting readers,
+ // if not zero, the mutex is write-locked
+ // - number of waiting writers,
+ // if non zero, the mutex is read- or write-locked
+ // And 2 flags:
+ // - writer lock
+ // if set, the mutex is write-locked
+ // - a writer is awake and spin-waiting
+ // the flag is used to prevent thundering herd problem
+ // (new writers are not woken if this flag is set)
+ //
+ // Writer support active spinning, readers does not.
+ // But readers are more aggressive and always take the mutex
+ // if there are any other readers.
+ // Writers hand off the mutex to readers: after wake up readers
+ // already assume ownership of the mutex (don't need to do any
+ // state updates). But the mutex is not handed off to writers,
+ // after wake up writers compete to lock the mutex again.
+ // This is needed to allow repeated write locks even in presence
+ // of other blocked writers.
+ static constexpr u64 kCounterWidth = 20;
+ static constexpr u64 kReaderLockShift = 0;
+ static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
+ static constexpr u64 kReaderLockMask = ((1ull << kCounterWidth) - 1)
+ << kReaderLockShift;
+ static constexpr u64 kWaitingReaderShift = kCounterWidth;
+ static constexpr u64 kWaitingReaderInc = 1ull << kWaitingReaderShift;
+ static constexpr u64 kWaitingReaderMask = ((1ull << kCounterWidth) - 1)
+ << kWaitingReaderShift;
+ static constexpr u64 kWaitingWriterShift = 2 * kCounterWidth;
+ static constexpr u64 kWaitingWriterInc = 1ull << kWaitingWriterShift;
+ static constexpr u64 kWaitingWriterMask = ((1ull << kCounterWidth) - 1)
+ << kWaitingWriterShift;
+ static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
+ static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
+
+ Mutex2(const Mutex2 &) = delete;
+ void operator=(const Mutex2 &) = delete;
};
-class BlockingMutex {
+void FutexWait(atomic_uint32_t *p, u32 cmp);
+void FutexWake(atomic_uint32_t *p, u32 count);
+
+class MUTEX BlockingMutex {
public:
explicit constexpr BlockingMutex(LinkerInitialized)
: opaque_storage_ {0, }, owner_ {0} {}
BlockingMutex();
- void Lock();
- void Unlock();
+ void Lock() ACQUIRE();
+ void Unlock() RELEASE();
// This function does not guarantee an explicit check that the calling thread
// is the thread which owns the mutex. This behavior, while more strictly
@@ -85,7 +283,7 @@ class BlockingMutex {
// maintaining complex state to work around those situations, the check only
// checks that the mutex is owned, and assumes callers to be generally
// well-behaved.
- void CheckLocked();
+ void CheckLocked() const CHECK_LOCKED();
private:
// Solaris mutex_t has a member that requires 64-bit alignment.
@@ -94,7 +292,7 @@ class BlockingMutex {
};
// Reader-writer spin mutex.
-class RWMutex {
+class MUTEX RWMutex {
public:
RWMutex() {
atomic_store(&state_, kUnlocked, memory_order_relaxed);
@@ -104,7 +302,7 @@ class RWMutex {
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
}
- void Lock() {
+ void Lock() ACQUIRE() {
u32 cmp = kUnlocked;
if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
memory_order_acquire))
@@ -112,27 +310,27 @@ class RWMutex {
LockSlow();
}
- void Unlock() {
+ void Unlock() RELEASE() {
u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
DCHECK_NE(prev & kWriteLock, 0);
(void)prev;
}
- void ReadLock() {
+ void ReadLock() ACQUIRE_SHARED() {
u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
if ((prev & kWriteLock) == 0)
return;
ReadLockSlow();
}
- void ReadUnlock() {
+ void ReadUnlock() RELEASE_SHARED() {
u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
DCHECK_EQ(prev & kWriteLock, 0);
DCHECK_GT(prev & ~kWriteLock, 0);
(void)prev;
}
- void CheckLocked() {
+ void CheckLocked() const CHECK_LOCKED() {
CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
}
@@ -171,46 +369,40 @@ class RWMutex {
}
}
- RWMutex(const RWMutex&);
- void operator = (const RWMutex&);
+ RWMutex(const RWMutex &) = delete;
+ void operator=(const RWMutex &) = delete;
};
-template<typename MutexType>
-class GenericScopedLock {
+template <typename MutexType>
+class SCOPED_LOCK GenericScopedLock {
public:
- explicit GenericScopedLock(MutexType *mu)
- : mu_(mu) {
+ explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
mu_->Lock();
}
- ~GenericScopedLock() {
- mu_->Unlock();
- }
+ ~GenericScopedLock() RELEASE() { mu_->Unlock(); }
private:
MutexType *mu_;
- GenericScopedLock(const GenericScopedLock&);
- void operator=(const GenericScopedLock&);
+ GenericScopedLock(const GenericScopedLock &) = delete;
+ void operator=(const GenericScopedLock &) = delete;
};
-template<typename MutexType>
-class GenericScopedReadLock {
+template <typename MutexType>
+class SCOPED_LOCK GenericScopedReadLock {
public:
- explicit GenericScopedReadLock(MutexType *mu)
- : mu_(mu) {
+ explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
mu_->ReadLock();
}
- ~GenericScopedReadLock() {
- mu_->ReadUnlock();
- }
+ ~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
private:
MutexType *mu_;
- GenericScopedReadLock(const GenericScopedReadLock&);
- void operator=(const GenericScopedReadLock&);
+ GenericScopedReadLock(const GenericScopedReadLock &) = delete;
+ void operator=(const GenericScopedReadLock &) = delete;
};
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
diff --git a/libsanitizer/sanitizer_common/sanitizer_netbsd.cpp b/libsanitizer/sanitizer_common/sanitizer_netbsd.cpp
index ac20f91..5e601bd 100644
--- a/libsanitizer/sanitizer_common/sanitizer_netbsd.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_netbsd.cpp
@@ -215,15 +215,12 @@ void internal__exit(int exitcode) {
Die(); // Unreachable.
}
-unsigned int internal_sleep(unsigned int seconds) {
+void internal_usleep(u64 useconds) {
struct timespec ts;
- ts.tv_sec = seconds;
- ts.tv_nsec = 0;
+ ts.tv_sec = useconds / 1000000;
+ ts.tv_nsec = (useconds % 1000000) * 1000;
CHECK(&_sys___nanosleep50);
- int res = _sys___nanosleep50(&ts, &ts);
- if (res)
- return ts.tv_sec;
- return 0;
+ _sys___nanosleep50(&ts, &ts);
}
uptr internal_execve(const char *filename, char *const argv[],
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform.h b/libsanitizer/sanitizer_common/sanitizer_platform.h
index 2f64584..4d3c088 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform.h
@@ -13,10 +13,9 @@
#define SANITIZER_PLATFORM_H
#if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && \
- !defined(__APPLE__) && !defined(_WIN32) && \
- !defined(__Fuchsia__) && !defined(__rtems__) && \
- !(defined(__sun__) && defined(__svr4__))
-# error "This operating system is not supported"
+ !defined(__APPLE__) && !defined(_WIN32) && !defined(__Fuchsia__) && \
+ !(defined(__sun__) && defined(__svr4__))
+# error "This operating system is not supported"
#endif
// Get __GLIBC__ on a glibc platform. Exclude Android: features.h includes C
@@ -117,12 +116,6 @@
# define SANITIZER_FUCHSIA 0
#endif
-#if defined(__rtems__)
-# define SANITIZER_RTEMS 1
-#else
-# define SANITIZER_RTEMS 0
-#endif
-
#define SANITIZER_POSIX \
(SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \
SANITIZER_NETBSD || SANITIZER_SOLARIS)
@@ -226,12 +219,6 @@
# define SANITIZER_SOLARIS32 0
#endif
-#if defined(__myriad2__)
-# define SANITIZER_MYRIAD2 1
-#else
-# define SANITIZER_MYRIAD2 0
-#endif
-
#if defined(__riscv) && (__riscv_xlen == 64)
#define SANITIZER_RISCV64 1
#else
@@ -374,9 +361,9 @@
# define SANITIZER_CACHE_LINE_SIZE 64
#endif
-// Enable offline markup symbolizer for Fuchsia and RTEMS.
-#if SANITIZER_FUCHSIA || SANITIZER_RTEMS
-#define SANITIZER_SYMBOLIZER_MARKUP 1
+// Enable offline markup symbolizer for Fuchsia.
+#if SANITIZER_FUCHSIA
+# define SANITIZER_SYMBOLIZER_MARKUP 1
#else
#define SANITIZER_SYMBOLIZER_MARKUP 0
#endif
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
index 731df71..5b710c2 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
@@ -114,12 +114,6 @@
#define SI_NOT_FUCHSIA 1
#endif
-#if SANITIZER_RTEMS
-#define SI_NOT_RTEMS 0
-#else
-#define SI_NOT_RTEMS 1
-#endif
-
#if SANITIZER_SOLARIS
#define SI_SOLARIS 1
#else
@@ -482,13 +476,12 @@
#define SANITIZER_INTERCEPT_MMAP SI_POSIX
#define SANITIZER_INTERCEPT_MMAP64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO (SI_GLIBC || SI_ANDROID)
-#define SANITIZER_INTERCEPT_MEMALIGN \
- (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_RTEMS)
+#define SANITIZER_INTERCEPT_MEMALIGN (!SI_FREEBSD && !SI_MAC && !SI_NETBSD)
#define SANITIZER_INTERCEPT___LIBC_MEMALIGN SI_GLIBC
#define SANITIZER_INTERCEPT_PVALLOC (SI_GLIBC || SI_ANDROID)
#define SANITIZER_INTERCEPT_CFREE (SI_GLIBC && !SANITIZER_RISCV64)
#define SANITIZER_INTERCEPT_REALLOCARRAY SI_POSIX
-#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC && SI_NOT_RTEMS)
+#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC)
#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_NETBSD)
#define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_WCSCAT SI_POSIX
@@ -584,6 +577,7 @@
(SI_POSIX && !(SANITIZER_MAC && SANITIZER_I386))
#define SANITIZER_INTERCEPT_UNAME (SI_POSIX && !SI_FREEBSD)
#define SANITIZER_INTERCEPT___XUNAME SI_FREEBSD
+#define SANITIZER_INTERCEPT_FLOPEN SI_FREEBSD
// This macro gives a way for downstream users to override the above
// interceptor macros irrespective of the platform they are on. They have
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp
index f22f503..c51327e 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp
@@ -26,12 +26,9 @@
// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
// are not defined anywhere in userspace headers. Fake them. This seems to work
-// fine with newer headers, too. Beware that with <sys/stat.h>, struct stat
-// takes the form of struct stat64 on 32-bit platforms if _FILE_OFFSET_BITS=64.
-// Also, for some platforms (e.g. mips) there are additional members in the
-// <sys/stat.h> struct stat:s.
+// fine with newer headers, too.
#include <linux/posix_types.h>
-#if defined(__x86_64__)
+#if defined(__x86_64__) || defined(__mips__)
#include <sys/stat.h>
#else
#define ino_t __kernel_ino_t
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
index cba41ba..4dd2764 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -83,7 +83,7 @@ const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__mips__)
const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID
? FIRST_32_SECOND_64(104, 128)
- : FIRST_32_SECOND_64(144, 216);
+ : FIRST_32_SECOND_64(160, 216);
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__s390__) && !defined(__s390x__)
const unsigned struct_kernel_stat_sz = 64;
@@ -650,14 +650,14 @@ struct __sanitizer_sigaction {
#endif // !SANITIZER_ANDROID
#if defined(__mips__)
-struct __sanitizer_kernel_sigset_t {
- uptr sig[2];
-};
+#define __SANITIZER_KERNEL_NSIG 128
#else
+#define __SANITIZER_KERNEL_NSIG 64
+#endif
+
struct __sanitizer_kernel_sigset_t {
- u8 sig[8];
+ uptr sig[__SANITIZER_KERNEL_NSIG / (sizeof(uptr) * 8)];
};
-#endif
// Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
#if SANITIZER_MIPS
diff --git a/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cpp
index d1d8e50..ddf6844 100644
--- a/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cpp
@@ -128,14 +128,6 @@ void SetAddressSpaceUnlimited() {
CHECK(AddressSpaceIsUnlimited());
}
-void SleepForSeconds(int seconds) {
- sleep(seconds);
-}
-
-void SleepForMillis(int millis) {
- usleep(millis * 1000);
-}
-
void Abort() {
#if !SANITIZER_GO
// If we are handling SIGABRT, unhandle it first.
@@ -166,9 +158,10 @@ bool SupportsColoredOutput(fd_t fd) {
#if !SANITIZER_GO
// TODO(glider): different tools may require different altstack size.
static uptr GetAltStackSize() {
- // SIGSTKSZ is not enough.
- static const uptr kAltStackSize = SIGSTKSZ * 4;
- return kAltStackSize;
+ // Note: since GLIBC_2.31, SIGSTKSZ may be a function call, so this may be
+ // more costly that you think. However GetAltStackSize is only call 2-3 times
+ // per thread so don't cache the evaluation.
+ return SIGSTKSZ * 4;
}
void SetAlternateSignalStack() {
diff --git a/libsanitizer/sanitizer_common/sanitizer_printf.cpp b/libsanitizer/sanitizer_common/sanitizer_printf.cpp
index 5d16dfd..b913c92 100644
--- a/libsanitizer/sanitizer_common/sanitizer_printf.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_printf.cpp
@@ -20,6 +20,10 @@
#include <stdio.h>
#include <stdarg.h>
+#if defined(__x86_64__)
+# include <emmintrin.h>
+#endif
+
#if SANITIZER_WINDOWS && defined(_MSC_VER) && _MSC_VER < 1800 && \
!defined(va_copy)
# define va_copy(dst, src) ((dst) = (src))
@@ -128,7 +132,7 @@ static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
int VSNPrintf(char *buff, int buff_length,
const char *format, va_list args) {
static const char *kPrintfFormatsHelp =
- "Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; "
+ "Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x,X,V}; %p; "
"%[-]([0-9]*)?(\\.\\*)?s; %c\n";
RAW_CHECK(format);
RAW_CHECK(buff_length > 0);
@@ -162,17 +166,15 @@ int VSNPrintf(char *buff, int buff_length,
cur += have_z;
bool have_ll = !have_z && (cur[0] == 'l' && cur[1] == 'l');
cur += have_ll * 2;
- s64 dval;
- u64 uval;
const bool have_length = have_z || have_ll;
const bool have_flags = have_width || have_length;
// At the moment only %s supports precision and left-justification.
CHECK(!((precision >= 0 || left_justified) && *cur != 's'));
switch (*cur) {
case 'd': {
- dval = have_ll ? va_arg(args, s64)
- : have_z ? va_arg(args, sptr)
- : va_arg(args, int);
+ s64 dval = have_ll ? va_arg(args, s64)
+ : have_z ? va_arg(args, sptr)
+ : va_arg(args, int);
result += AppendSignedDecimal(&buff, buff_end, dval, width,
pad_with_zero);
break;
@@ -180,14 +182,21 @@ int VSNPrintf(char *buff, int buff_length,
case 'u':
case 'x':
case 'X': {
- uval = have_ll ? va_arg(args, u64)
- : have_z ? va_arg(args, uptr)
- : va_arg(args, unsigned);
+ u64 uval = have_ll ? va_arg(args, u64)
+ : have_z ? va_arg(args, uptr)
+ : va_arg(args, unsigned);
bool uppercase = (*cur == 'X');
result += AppendUnsigned(&buff, buff_end, uval, (*cur == 'u') ? 10 : 16,
width, pad_with_zero, uppercase);
break;
}
+ case 'V': {
+ for (uptr i = 0; i < 16; i++) {
+ unsigned x = va_arg(args, unsigned);
+ result += AppendUnsigned(&buff, buff_end, x, 16, 2, true, false);
+ }
+ break;
+ }
case 'p': {
RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
result += AppendPointer(&buff, buff_end, va_arg(args, uptr));
diff --git a/libsanitizer/sanitizer_common/sanitizer_quarantine.h b/libsanitizer/sanitizer_common/sanitizer_quarantine.h
index 992f231..1a074d2 100644
--- a/libsanitizer/sanitizer_common/sanitizer_quarantine.h
+++ b/libsanitizer/sanitizer_common/sanitizer_quarantine.h
@@ -149,7 +149,8 @@ class Quarantine {
Cache cache_;
char pad2_[kCacheLineSize];
- void NOINLINE Recycle(uptr min_size, Callback cb) {
+ void NOINLINE Recycle(uptr min_size, Callback cb) REQUIRES(recycle_mutex_)
+ RELEASE(recycle_mutex_) {
Cache tmp;
{
SpinMutexLock l(&cache_mutex_);
diff --git a/libsanitizer/sanitizer_common/sanitizer_rtems.cpp b/libsanitizer/sanitizer_common/sanitizer_rtems.cpp
deleted file mode 100644
index d58bd08..0000000
--- a/libsanitizer/sanitizer_common/sanitizer_rtems.cpp
+++ /dev/null
@@ -1,281 +0,0 @@
-//===-- sanitizer_rtems.cpp -----------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is shared between various sanitizers' runtime libraries and
-// implements RTEMS-specific functions.
-//===----------------------------------------------------------------------===//
-
-#include "sanitizer_rtems.h"
-#if SANITIZER_RTEMS
-
-#define posix_memalign __real_posix_memalign
-#define free __real_free
-#define memset __real_memset
-
-#include "sanitizer_file.h"
-#include "sanitizer_symbolizer.h"
-#include <errno.h>
-#include <fcntl.h>
-#include <pthread.h>
-#include <sched.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-
-// There is no mmap on RTEMS. Use memalign, etc.
-#define __mmap_alloc_aligned posix_memalign
-#define __mmap_free free
-#define __mmap_memset memset
-
-namespace __sanitizer {
-
-#include "sanitizer_syscall_generic.inc"
-
-void NORETURN internal__exit(int exitcode) {
- _exit(exitcode);
-}
-
-uptr internal_sched_yield() {
- return sched_yield();
-}
-
-uptr internal_getpid() {
- return getpid();
-}
-
-int internal_dlinfo(void *handle, int request, void *p) {
- UNIMPLEMENTED();
-}
-
-bool FileExists(const char *filename) {
- struct stat st;
- if (stat(filename, &st))
- return false;
- // Sanity check: filename is a regular file.
- return S_ISREG(st.st_mode);
-}
-
-uptr GetThreadSelf() { return static_cast<uptr>(pthread_self()); }
-
-tid_t GetTid() { return GetThreadSelf(); }
-
-void Abort() { abort(); }
-
-int Atexit(void (*function)(void)) { return atexit(function); }
-
-void SleepForSeconds(int seconds) { sleep(seconds); }
-
-void SleepForMillis(int millis) { usleep(millis * 1000); }
-
-bool SupportsColoredOutput(fd_t fd) { return false; }
-
-void GetThreadStackTopAndBottom(bool at_initialization,
- uptr *stack_top, uptr *stack_bottom) {
- pthread_attr_t attr;
- pthread_attr_init(&attr);
- CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
- void *base = nullptr;
- size_t size = 0;
- CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
- CHECK_EQ(pthread_attr_destroy(&attr), 0);
-
- *stack_bottom = reinterpret_cast<uptr>(base);
- *stack_top = *stack_bottom + size;
-}
-
-void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
- uptr *tls_addr, uptr *tls_size) {
- uptr stack_top, stack_bottom;
- GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
- *stk_addr = stack_bottom;
- *stk_size = stack_top - stack_bottom;
- *tls_addr = *tls_size = 0;
-}
-
-void InitializePlatformEarly() {}
-void MaybeReexec() {}
-void CheckASLR() {}
-void CheckMPROTECT() {}
-void DisableCoreDumperIfNecessary() {}
-void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
-void SetAlternateSignalStack() {}
-void UnsetAlternateSignalStack() {}
-void InitTlsSize() {}
-
-void SignalContext::DumpAllRegisters(void *context) {}
-const char *DescribeSignalOrException(int signo) { UNIMPLEMENTED(); }
-
-enum MutexState { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
-
-BlockingMutex::BlockingMutex() {
- internal_memset(this, 0, sizeof(*this));
-}
-
-void BlockingMutex::Lock() {
- CHECK_EQ(owner_, 0);
- atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
- if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
- return;
- while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
- internal_sched_yield();
- }
-}
-
-void BlockingMutex::Unlock() {
- atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
- u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
- CHECK_NE(v, MtxUnlocked);
-}
-
-void BlockingMutex::CheckLocked() {
- atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
- CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
-}
-
-uptr GetPageSize() { return getpagesize(); }
-
-uptr GetMmapGranularity() { return GetPageSize(); }
-
-uptr GetMaxVirtualAddress() {
- return (1ULL << 32) - 1; // 0xffffffff
-}
-
-void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
- void* ptr = 0;
- int res = __mmap_alloc_aligned(&ptr, GetPageSize(), size);
- if (UNLIKELY(res))
- ReportMmapFailureAndDie(size, mem_type, "allocate", res, raw_report);
- __mmap_memset(ptr, 0, size);
- IncreaseTotalMmap(size);
- return ptr;
-}
-
-void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
- void* ptr = 0;
- int res = __mmap_alloc_aligned(&ptr, GetPageSize(), size);
- if (UNLIKELY(res)) {
- if (res == ENOMEM)
- return nullptr;
- ReportMmapFailureAndDie(size, mem_type, "allocate", false);
- }
- __mmap_memset(ptr, 0, size);
- IncreaseTotalMmap(size);
- return ptr;
-}
-
-void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
- const char *mem_type) {
- CHECK(IsPowerOfTwo(size));
- CHECK(IsPowerOfTwo(alignment));
- void* ptr = 0;
- int res = __mmap_alloc_aligned(&ptr, alignment, size);
- if (res)
- ReportMmapFailureAndDie(size, mem_type, "align allocate", res, false);
- __mmap_memset(ptr, 0, size);
- IncreaseTotalMmap(size);
- return ptr;
-}
-
-void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
- return MmapOrDie(size, mem_type, false);
-}
-
-void UnmapOrDie(void *addr, uptr size) {
- if (!addr || !size) return;
- __mmap_free(addr);
- DecreaseTotalMmap(size);
-}
-
-fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) {
- int flags;
- switch (mode) {
- case RdOnly: flags = O_RDONLY; break;
- case WrOnly: flags = O_WRONLY | O_CREAT | O_TRUNC; break;
- case RdWr: flags = O_RDWR | O_CREAT; break;
- }
- fd_t res = open(filename, flags, 0660);
- if (internal_iserror(res, errno_p))
- return kInvalidFd;
- return res;
-}
-
-void CloseFile(fd_t fd) {
- close(fd);
-}
-
-bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
- error_t *error_p) {
- uptr res = read(fd, buff, buff_size);
- if (internal_iserror(res, error_p))
- return false;
- if (bytes_read)
- *bytes_read = res;
- return true;
-}
-
-bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
- error_t *error_p) {
- uptr res = write(fd, buff, buff_size);
- if (internal_iserror(res, error_p))
- return false;
- if (bytes_written)
- *bytes_written = res;
- return true;
-}
-
-void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
-void DumpProcessMap() {}
-
-// There is no page protection so everything is "accessible."
-bool IsAccessibleMemoryRange(uptr beg, uptr size) {
- return true;
-}
-
-char **GetArgv() { return nullptr; }
-char **GetEnviron() { return nullptr; }
-
-const char *GetEnv(const char *name) {
- return getenv(name);
-}
-
-uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
- internal_strncpy(buf, "StubBinaryName", buf_len);
- return internal_strlen(buf);
-}
-
-uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
- internal_strncpy(buf, "StubProcessName", buf_len);
- return internal_strlen(buf);
-}
-
-bool IsPathSeparator(const char c) {
- return c == '/';
-}
-
-bool IsAbsolutePath(const char *path) {
- return path != nullptr && IsPathSeparator(path[0]);
-}
-
-void ReportFile::Write(const char *buffer, uptr length) {
- SpinMutexLock l(mu);
- static const char *kWriteError =
- "ReportFile::Write() can't output requested buffer!\n";
- ReopenIfNecessary();
- if (length != write(fd, buffer, length)) {
- write(fd, kWriteError, internal_strlen(kWriteError));
- Die();
- }
-}
-
-uptr MainThreadStackBase, MainThreadStackSize;
-uptr MainThreadTlsBase, MainThreadTlsSize;
-
-} // namespace __sanitizer
-
-#endif // SANITIZER_RTEMS
diff --git a/libsanitizer/sanitizer_common/sanitizer_rtems.h b/libsanitizer/sanitizer_common/sanitizer_rtems.h
deleted file mode 100644
index e8adfd5..0000000
--- a/libsanitizer/sanitizer_common/sanitizer_rtems.h
+++ /dev/null
@@ -1,20 +0,0 @@
-//===-- sanitizer_rtems.h ---------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is shared between various sanitizers' runtime libraries and
-// provides definitions for RTEMS-specific functions.
-//===----------------------------------------------------------------------===//
-#ifndef SANITIZER_RTEMS_H
-#define SANITIZER_RTEMS_H
-
-#include "sanitizer_platform.h"
-#if SANITIZER_RTEMS
-#include "sanitizer_common.h"
-
-#endif // SANITIZER_RTEMS
-#endif // SANITIZER_RTEMS_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_solaris.cpp b/libsanitizer/sanitizer_common/sanitizer_solaris.cpp
index 8789dcd..cb53eab 100644
--- a/libsanitizer/sanitizer_common/sanitizer_solaris.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_solaris.cpp
@@ -160,6 +160,13 @@ DECLARE__REAL_AND_INTERNAL(uptr, sched_yield, void) {
return sched_yield();
}
+DECLARE__REAL_AND_INTERNAL(void, usleep, u64 useconds) {
+ struct timespec ts;
+ ts.tv_sec = useconds / 1000000;
+ ts.tv_nsec = (useconds % 1000000) * 1000;
+ nanosleep(&ts, nullptr);
+}
+
DECLARE__REAL_AND_INTERNAL(uptr, execve, const char *filename,
char *const argv[], char *const envp[]) {
return _REAL(execve)(filename, argv, envp);
@@ -211,6 +218,13 @@ uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) {
}
// ----------------- sanitizer_common.h
+void FutexWait(atomic_uint32_t *p, u32 cmp) {
+ // FIXME: implement actual blocking.
+ sched_yield();
+}
+
+void FutexWake(atomic_uint32_t *p, u32 count) {}
+
BlockingMutex::BlockingMutex() {
CHECK(sizeof(mutex_t) <= sizeof(opaque_storage_));
internal_memset(this, 0, sizeof(*this));
@@ -231,9 +245,7 @@ void BlockingMutex::Unlock() {
CHECK_EQ(mutex_unlock((mutex_t *)&opaque_storage_), 0);
}
-void BlockingMutex::CheckLocked() {
- CHECK_EQ((uptr)thr_self(), owner_);
-}
+void BlockingMutex::CheckLocked() const { CHECK_EQ((uptr)thr_self(), owner_); }
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp
index 515dedd..07e4409 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp
@@ -85,8 +85,8 @@ static inline uhwptr *GetCanonicFrame(uptr bp,
// Nope, this does not look right either. This means the frame after next does
// not have a valid frame pointer, but we can still extract the caller PC.
// Unfortunately, there is no way to decide between GCC and LLVM frame
- // layouts. Assume GCC.
- return bp_prev - 1;
+ // layouts. Assume LLVM.
+ return bp_prev;
#else
return (uhwptr*)bp;
#endif
@@ -109,21 +109,14 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
IsAligned((uptr)frame, sizeof(*frame)) &&
size < max_depth) {
#ifdef __powerpc__
- // PowerPC ABIs specify that the return address is saved on the
- // *caller's* stack frame. Thus we must dereference the back chain
- // to find the caller frame before extracting it.
+ // PowerPC ABIs specify that the return address is saved at offset
+ // 16 of the *caller's* stack frame. Thus we must dereference the
+ // back chain to find the caller frame before extracting it.
uhwptr *caller_frame = (uhwptr*)frame[0];
if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||
!IsAligned((uptr)caller_frame, sizeof(uhwptr)))
break;
- // For most ABIs the offset where the return address is saved is two
- // register sizes. The exception is the SVR4 ABI, which uses an
- // offset of only one register size.
-#ifdef _CALL_SYSV
- uhwptr pc1 = caller_frame[1];
-#else
uhwptr pc1 = caller_frame[2];
-#endif
#elif defined(__s390__)
uhwptr pc1 = frame[14];
#elif defined(__riscv)
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
index 15616f8..ea330f3 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
@@ -12,6 +12,7 @@
#ifndef SANITIZER_STACKTRACE_H
#define SANITIZER_STACKTRACE_H
+#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform.h"
@@ -32,8 +33,8 @@ static const u32 kStackTraceMax = 256;
// Fast unwind is the only option on Mac for now; we will need to
// revisit this macro when slow unwind works on Mac, see
// https://github.com/google/sanitizers/issues/137
-#if SANITIZER_MAC || SANITIZER_RTEMS
-# define SANITIZER_CAN_SLOW_UNWIND 0
+#if SANITIZER_MAC
+# define SANITIZER_CAN_SLOW_UNWIND 0
#else
# define SANITIZER_CAN_SLOW_UNWIND 1
#endif
@@ -56,6 +57,16 @@ struct StackTrace {
// Prints a symbolized stacktrace, followed by an empty line.
void Print() const;
+ // Prints a symbolized stacktrace to the output string, followed by an empty
+ // line.
+ void PrintTo(InternalScopedString *output) const;
+
+ // Prints a symbolized stacktrace to the output buffer, followed by an empty
+ // line. Returns the number of symbols that should have been written to buffer
+ // (not including trailing '\0'). Thus, the string is truncated iff return
+ // value is not less than "out_buf_size".
+ uptr PrintTo(char *out_buf, uptr out_buf_size) const;
+
static bool WillUseFastUnwind(bool request_fast_unwind) {
if (!SANITIZER_CAN_FAST_UNWIND)
return false;
@@ -185,5 +196,26 @@ static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
uptr local_stack; \
uptr sp = (uptr)&local_stack
+// GET_CURRENT_PC() is equivalent to StackTrace::GetCurrentPc().
+// Optimized x86 version is faster than GetCurrentPc because
+// it does not involve a function call, instead it reads RIP register.
+// Reads of RIP by an instruction return RIP pointing to the next
+// instruction, which is exactly what we want here, thus 0 offset.
+// It needs to be a macro because otherwise we will get the name
+// of this function on the top of most stacks. Attribute artificial
+// does not do what it claims to do, unfortunatley. And attribute
+// __nodebug__ is clang-only. If we would have an attribute that
+// would remove this function from debug info, we could simply make
+// StackTrace::GetCurrentPc() faster.
+#if defined(__x86_64__)
+# define GET_CURRENT_PC() \
+ ({ \
+ uptr pc; \
+ asm("lea 0(%%rip), %0" : "=r"(pc)); \
+ pc; \
+ })
+#else
+# define GET_CURRENT_PC() StackTrace::GetCurrentPc()
+#endif
#endif // SANITIZER_STACKTRACE_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cpp
index 7386332..f60ea77 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cpp
@@ -18,46 +18,119 @@
namespace __sanitizer {
-void StackTrace::Print() const {
+namespace {
+
+class StackTraceTextPrinter {
+ public:
+ StackTraceTextPrinter(const char *stack_trace_fmt, char frame_delimiter,
+ InternalScopedString *output,
+ InternalScopedString *dedup_token)
+ : stack_trace_fmt_(stack_trace_fmt),
+ frame_delimiter_(frame_delimiter),
+ output_(output),
+ dedup_token_(dedup_token),
+ symbolize_(RenderNeedsSymbolization(stack_trace_fmt)) {}
+
+ bool ProcessAddressFrames(uptr pc) {
+ SymbolizedStack *frames = symbolize_
+ ? Symbolizer::GetOrInit()->SymbolizePC(pc)
+ : SymbolizedStack::New(pc);
+ if (!frames)
+ return false;
+
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ uptr prev_len = output_->length();
+ RenderFrame(output_, stack_trace_fmt_, frame_num_++, cur->info.address,
+ symbolize_ ? &cur->info : nullptr,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
+
+ if (prev_len != output_->length())
+ output_->append("%c", frame_delimiter_);
+
+ ExtendDedupToken(cur);
+ }
+ frames->ClearAll();
+ return true;
+ }
+
+ private:
+ // Extend the dedup token by appending a new frame.
+ void ExtendDedupToken(SymbolizedStack *stack) {
+ if (!dedup_token_)
+ return;
+
+ if (dedup_frames_-- > 0) {
+ if (dedup_token_->length())
+ dedup_token_->append("--");
+ if (stack->info.function != nullptr)
+ dedup_token_->append(stack->info.function);
+ }
+ }
+
+ const char *stack_trace_fmt_;
+ const char frame_delimiter_;
+ int dedup_frames_ = common_flags()->dedup_token_length;
+ uptr frame_num_ = 0;
+ InternalScopedString *output_;
+ InternalScopedString *dedup_token_;
+ const bool symbolize_ = false;
+};
+
+static void CopyStringToBuffer(const InternalScopedString &str, char *out_buf,
+ uptr out_buf_size) {
+ if (!out_buf_size)
+ return;
+
+ CHECK_GT(out_buf_size, 0);
+ uptr copy_size = Min(str.length(), out_buf_size - 1);
+ internal_memcpy(out_buf, str.data(), copy_size);
+ out_buf[copy_size] = '\0';
+}
+
+} // namespace
+
+void StackTrace::PrintTo(InternalScopedString *output) const {
+ CHECK(output);
+
+ InternalScopedString dedup_token;
+ StackTraceTextPrinter printer(common_flags()->stack_trace_format, '\n',
+ output, &dedup_token);
+
if (trace == nullptr || size == 0) {
- Printf(" <empty stack>\n\n");
+ output->append(" <empty stack>\n\n");
return;
}
- InternalScopedString frame_desc;
- InternalScopedString dedup_token;
- int dedup_frames = common_flags()->dedup_token_length;
- bool symbolize = RenderNeedsSymbolization(common_flags()->stack_trace_format);
- uptr frame_num = 0;
+
for (uptr i = 0; i < size && trace[i]; i++) {
// PCs in stack traces are actually the return addresses, that is,
// addresses of the next instructions after the call.
uptr pc = GetPreviousInstructionPc(trace[i]);
- SymbolizedStack *frames;
- if (symbolize)
- frames = Symbolizer::GetOrInit()->SymbolizePC(pc);
- else
- frames = SymbolizedStack::New(pc);
- CHECK(frames);
- for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
- frame_desc.clear();
- RenderFrame(&frame_desc, common_flags()->stack_trace_format, frame_num++,
- cur->info.address, symbolize ? &cur->info : nullptr,
- common_flags()->symbolize_vs_style,
- common_flags()->strip_path_prefix);
- Printf("%s\n", frame_desc.data());
- if (dedup_frames-- > 0) {
- if (dedup_token.length())
- dedup_token.append("--");
- if (cur->info.function != nullptr)
- dedup_token.append(cur->info.function);
- }
- }
- frames->ClearAll();
+ CHECK(printer.ProcessAddressFrames(pc));
}
- // Always print a trailing empty line after stack trace.
- Printf("\n");
+
+ // Always add a trailing empty line after stack trace.
+ output->append("\n");
+
+ // Append deduplication token, if non-empty.
if (dedup_token.length())
- Printf("DEDUP_TOKEN: %s\n", dedup_token.data());
+ output->append("DEDUP_TOKEN: %s\n", dedup_token.data());
+}
+
+uptr StackTrace::PrintTo(char *out_buf, uptr out_buf_size) const {
+ CHECK(out_buf);
+
+ InternalScopedString output;
+ PrintTo(&output);
+ CopyStringToBuffer(output, out_buf, out_buf_size);
+
+ return output.length();
+}
+
+void StackTrace::Print() const {
+ InternalScopedString output;
+ PrintTo(&output);
+ Printf("%s", output.data());
}
void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context,
@@ -82,12 +155,15 @@ void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context,
UnwindSlow(pc, context, max_depth);
else
UnwindSlow(pc, max_depth);
+ // If there are too few frames, the program may be built with
+ // -fno-asynchronous-unwind-tables. Fall back to fast unwinder below.
+ if (size > 2 || size >= max_depth)
+ return;
#else
UNREACHABLE("slow unwind requested but not available");
#endif
- } else {
- UnwindFast(pc, bp, stack_top, stack_bottom, max_depth);
}
+ UnwindFast(pc, bp, stack_top, stack_bottom, max_depth);
}
static int GetModuleAndOffsetForPc(uptr pc, char *module_name,
@@ -112,41 +188,18 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf,
uptr out_buf_size) {
- if (!out_buf_size) return;
- pc = StackTrace::GetPreviousInstructionPc(pc);
- SymbolizedStack *frame;
- bool symbolize = RenderNeedsSymbolization(fmt);
- if (symbolize)
- frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
- else
- frame = SymbolizedStack::New(pc);
- if (!frame) {
- internal_strncpy(out_buf, "<can't symbolize>", out_buf_size);
- out_buf[out_buf_size - 1] = 0;
+ if (!out_buf_size)
return;
+
+ pc = StackTrace::GetPreviousInstructionPc(pc);
+
+ InternalScopedString output;
+ StackTraceTextPrinter printer(fmt, '\0', &output, nullptr);
+ if (!printer.ProcessAddressFrames(pc)) {
+ output.clear();
+ output.append("<can't symbolize>");
}
- InternalScopedString frame_desc;
- uptr frame_num = 0;
- // Reserve one byte for the final 0.
- char *out_end = out_buf + out_buf_size - 1;
- for (SymbolizedStack *cur = frame; cur && out_buf < out_end;
- cur = cur->next) {
- frame_desc.clear();
- RenderFrame(&frame_desc, fmt, frame_num++, cur->info.address,
- symbolize ? &cur->info : nullptr,
- common_flags()->symbolize_vs_style,
- common_flags()->strip_path_prefix);
- if (!frame_desc.length())
- continue;
- // Reserve one byte for the terminating 0.
- uptr n = out_end - out_buf - 1;
- internal_strncpy(out_buf, frame_desc.data(), n);
- out_buf += __sanitizer::Min<uptr>(n, frame_desc.length());
- *out_buf++ = 0;
- }
- CHECK(out_buf <= out_end);
- *out_buf = 0;
- frame->ClearAll();
+ CopyStringToBuffer(output, out_buf, out_buf_size);
}
SANITIZER_INTERFACE_ATTRIBUTE
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_markup.cpp b/libsanitizer/sanitizer_common/sanitizer_symbolizer_markup.cpp
index 01edef9..9a5b4a8 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_markup.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_markup.cpp
@@ -16,14 +16,13 @@
#if SANITIZER_FUCHSIA
#include "sanitizer_symbolizer_fuchsia.h"
-#elif SANITIZER_RTEMS
-#include "sanitizer_symbolizer_rtems.h"
-#endif
-#include "sanitizer_stacktrace.h"
-#include "sanitizer_symbolizer.h"
-
-#include <limits.h>
-#include <unwind.h>
+# endif
+
+# include <limits.h>
+# include <unwind.h>
+
+# include "sanitizer_stacktrace.h"
+# include "sanitizer_symbolizer.h"
namespace __sanitizer {
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_report.cpp b/libsanitizer/sanitizer_common/sanitizer_symbolizer_report.cpp
index 9287993..f330ed3 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_report.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_report.cpp
@@ -120,7 +120,7 @@ void ReportMmapWriteExec(int prot) {
#endif
}
-#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS && !SANITIZER_GO
+#if !SANITIZER_FUCHSIA && !SANITIZER_GO
void StartReportDeadlySignal() {
// Write the first message using fd=2, just in case.
// It may actually fail to write in case stderr is closed.
@@ -250,17 +250,17 @@ void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
#endif // !SANITIZER_FUCHSIA && !SANITIZER_GO
-static atomic_uintptr_t reporting_thread = {0};
-static StaticSpinMutex CommonSanitizerReportMutex;
+atomic_uintptr_t ScopedErrorReportLock::reporting_thread_ = {0};
+StaticSpinMutex ScopedErrorReportLock::mutex_;
-ScopedErrorReportLock::ScopedErrorReportLock() {
+void ScopedErrorReportLock::Lock() {
uptr current = GetThreadSelf();
for (;;) {
uptr expected = 0;
- if (atomic_compare_exchange_strong(&reporting_thread, &expected, current,
+ if (atomic_compare_exchange_strong(&reporting_thread_, &expected, current,
memory_order_relaxed)) {
// We've claimed reporting_thread so proceed.
- CommonSanitizerReportMutex.Lock();
+ mutex_.Lock();
return;
}
@@ -282,13 +282,11 @@ ScopedErrorReportLock::ScopedErrorReportLock() {
}
}
-ScopedErrorReportLock::~ScopedErrorReportLock() {
- CommonSanitizerReportMutex.Unlock();
- atomic_store_relaxed(&reporting_thread, 0);
+void ScopedErrorReportLock::Unlock() {
+ mutex_.Unlock();
+ atomic_store_relaxed(&reporting_thread_, 0);
}
-void ScopedErrorReportLock::CheckLocked() {
- CommonSanitizerReportMutex.CheckLocked();
-}
+void ScopedErrorReportLock::CheckLocked() { mutex_.CheckLocked(); }
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_rtems.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer_rtems.h
deleted file mode 100644
index 3371092..0000000
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_rtems.h
+++ /dev/null
@@ -1,40 +0,0 @@
-//===-- sanitizer_symbolizer_rtems.h -----------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is shared between various sanitizers' runtime libraries.
-//
-// Define RTEMS's string formats and limits for the markup symbolizer.
-//===----------------------------------------------------------------------===//
-#ifndef SANITIZER_SYMBOLIZER_RTEMS_H
-#define SANITIZER_SYMBOLIZER_RTEMS_H
-
-#include "sanitizer_internal_defs.h"
-
-namespace __sanitizer {
-
-// The Myriad RTEMS symbolizer currently only parses backtrace lines,
-// so use a format that the symbolizer understands. For other
-// markups, keep them the same as the Fuchsia's.
-
-// This is used by UBSan for type names, and by ASan for global variable names.
-constexpr const char *kFormatDemangle = "{{{symbol:%s}}}";
-constexpr uptr kFormatDemangleMax = 1024; // Arbitrary.
-
-// Function name or equivalent from PC location.
-constexpr const char *kFormatFunction = "{{{pc:%p}}}";
-constexpr uptr kFormatFunctionMax = 64; // More than big enough for 64-bit hex.
-
-// Global variable name or equivalent from data memory address.
-constexpr const char *kFormatData = "{{{data:%p}}}";
-
-// One frame in a backtrace (printed on a line by itself).
-constexpr const char *kFormatFrame = " [%u] IP: %p";
-
-} // namespace __sanitizer
-
-#endif // SANITIZER_SYMBOLIZER_RTEMS_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_thread_registry.cpp b/libsanitizer/sanitizer_common/sanitizer_thread_registry.cpp
index 3273da3..745fbf7 100644
--- a/libsanitizer/sanitizer_common/sanitizer_thread_registry.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_thread_registry.cpp
@@ -99,6 +99,9 @@ void ThreadContextBase::Reset() {
// ThreadRegistry implementation.
+ThreadRegistry::ThreadRegistry(ThreadContextFactory factory)
+ : ThreadRegistry(factory, UINT32_MAX, UINT32_MAX, 0) {}
+
ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
u32 thread_quarantine_size, u32 max_reuse)
: context_factory_(factory),
@@ -106,13 +109,10 @@ ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
thread_quarantine_size_(thread_quarantine_size),
max_reuse_(max_reuse),
mtx_(),
- n_contexts_(0),
total_threads_(0),
alive_threads_(0),
max_alive_threads_(0),
running_threads_(0) {
- threads_ = (ThreadContextBase **)MmapOrDie(max_threads_ * sizeof(threads_[0]),
- "ThreadRegistry");
dead_threads_.clear();
invalid_threads_.clear();
}
@@ -120,7 +120,8 @@ ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
void ThreadRegistry::GetNumberOfThreads(uptr *total, uptr *running,
uptr *alive) {
BlockingMutexLock l(&mtx_);
- if (total) *total = n_contexts_;
+ if (total)
+ *total = threads_.size();
if (running) *running = running_threads_;
if (alive) *alive = alive_threads_;
}
@@ -137,11 +138,11 @@ u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
ThreadContextBase *tctx = QuarantinePop();
if (tctx) {
tid = tctx->tid;
- } else if (n_contexts_ < max_threads_) {
+ } else if (threads_.size() < max_threads_) {
// Allocate new thread context and tid.
- tid = n_contexts_++;
+ tid = threads_.size();
tctx = context_factory_(tid);
- threads_[tid] = tctx;
+ threads_.push_back(tctx);
} else {
#if !SANITIZER_GO
Report("%s: Thread limit (%u threads) exceeded. Dying.\n",
@@ -169,7 +170,7 @@ u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
void ThreadRegistry::RunCallbackForEachThreadLocked(ThreadCallback cb,
void *arg) {
CheckLocked();
- for (u32 tid = 0; tid < n_contexts_; tid++) {
+ for (u32 tid = 0; tid < threads_.size(); tid++) {
ThreadContextBase *tctx = threads_[tid];
if (tctx == 0)
continue;
@@ -179,7 +180,7 @@ void ThreadRegistry::RunCallbackForEachThreadLocked(ThreadCallback cb,
u32 ThreadRegistry::FindThread(FindThreadCallback cb, void *arg) {
BlockingMutexLock l(&mtx_);
- for (u32 tid = 0; tid < n_contexts_; tid++) {
+ for (u32 tid = 0; tid < threads_.size(); tid++) {
ThreadContextBase *tctx = threads_[tid];
if (tctx != 0 && cb(tctx, arg))
return tctx->tid;
@@ -190,7 +191,7 @@ u32 ThreadRegistry::FindThread(FindThreadCallback cb, void *arg) {
ThreadContextBase *
ThreadRegistry::FindThreadContextLocked(FindThreadCallback cb, void *arg) {
CheckLocked();
- for (u32 tid = 0; tid < n_contexts_; tid++) {
+ for (u32 tid = 0; tid < threads_.size(); tid++) {
ThreadContextBase *tctx = threads_[tid];
if (tctx != 0 && cb(tctx, arg))
return tctx;
@@ -211,7 +212,6 @@ ThreadContextBase *ThreadRegistry::FindThreadContextByOsIDLocked(tid_t os_id) {
void ThreadRegistry::SetThreadName(u32 tid, const char *name) {
BlockingMutexLock l(&mtx_);
- CHECK_LT(tid, n_contexts_);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
CHECK_EQ(SANITIZER_FUCHSIA ? ThreadStatusCreated : ThreadStatusRunning,
@@ -221,7 +221,7 @@ void ThreadRegistry::SetThreadName(u32 tid, const char *name) {
void ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) {
BlockingMutexLock l(&mtx_);
- for (u32 tid = 0; tid < n_contexts_; tid++) {
+ for (u32 tid = 0; tid < threads_.size(); tid++) {
ThreadContextBase *tctx = threads_[tid];
if (tctx != 0 && tctx->user_id == user_id &&
tctx->status != ThreadStatusInvalid) {
@@ -233,7 +233,6 @@ void ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) {
void ThreadRegistry::DetachThread(u32 tid, void *arg) {
BlockingMutexLock l(&mtx_);
- CHECK_LT(tid, n_contexts_);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
if (tctx->status == ThreadStatusInvalid) {
@@ -254,7 +253,6 @@ void ThreadRegistry::JoinThread(u32 tid, void *arg) {
do {
{
BlockingMutexLock l(&mtx_);
- CHECK_LT(tid, n_contexts_);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
if (tctx->status == ThreadStatusInvalid) {
@@ -280,7 +278,6 @@ ThreadStatus ThreadRegistry::FinishThread(u32 tid) {
BlockingMutexLock l(&mtx_);
CHECK_GT(alive_threads_, 0);
alive_threads_--;
- CHECK_LT(tid, n_contexts_);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
bool dead = tctx->detached;
@@ -306,7 +303,6 @@ void ThreadRegistry::StartThread(u32 tid, tid_t os_id, ThreadType thread_type,
void *arg) {
BlockingMutexLock l(&mtx_);
running_threads_++;
- CHECK_LT(tid, n_contexts_);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
CHECK_EQ(ThreadStatusCreated, tctx->status);
@@ -339,7 +335,6 @@ ThreadContextBase *ThreadRegistry::QuarantinePop() {
void ThreadRegistry::SetThreadUserId(u32 tid, uptr user_id) {
BlockingMutexLock l(&mtx_);
- CHECK_LT(tid, n_contexts_);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
CHECK_NE(tctx->status, ThreadStatusInvalid);
diff --git a/libsanitizer/sanitizer_common/sanitizer_thread_registry.h b/libsanitizer/sanitizer_common/sanitizer_thread_registry.h
index dcd445c..0b28bbe 100644
--- a/libsanitizer/sanitizer_common/sanitizer_thread_registry.h
+++ b/libsanitizer/sanitizer_common/sanitizer_thread_registry.h
@@ -85,22 +85,22 @@ class ThreadContextBase {
typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid);
-class ThreadRegistry {
+class MUTEX ThreadRegistry {
public:
+ ThreadRegistry(ThreadContextFactory factory);
ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
- u32 thread_quarantine_size, u32 max_reuse = 0);
+ u32 thread_quarantine_size, u32 max_reuse);
void GetNumberOfThreads(uptr *total = nullptr, uptr *running = nullptr,
uptr *alive = nullptr);
uptr GetMaxAliveThreads();
- void Lock() { mtx_.Lock(); }
- void CheckLocked() { mtx_.CheckLocked(); }
- void Unlock() { mtx_.Unlock(); }
+ void Lock() ACQUIRE() { mtx_.Lock(); }
+ void CheckLocked() const CHECK_LOCKED() { mtx_.CheckLocked(); }
+ void Unlock() RELEASE() { mtx_.Unlock(); }
// Should be guarded by ThreadRegistryLock.
ThreadContextBase *GetThreadLocked(u32 tid) {
- DCHECK_LT(tid, n_contexts_);
- return threads_[tid];
+ return threads_.empty() ? nullptr : threads_[tid];
}
u32 CreateThread(uptr user_id, bool detached, u32 parent_tid, void *arg);
@@ -137,15 +137,13 @@ class ThreadRegistry {
BlockingMutex mtx_;
- u32 n_contexts_; // Number of created thread contexts,
- // at most max_threads_.
u64 total_threads_; // Total number of created threads. May be greater than
// max_threads_ if contexts were reused.
uptr alive_threads_; // Created or running.
uptr max_alive_threads_;
uptr running_threads_;
- ThreadContextBase **threads_; // Array of thread contexts is leaked.
+ InternalMmapVector<ThreadContextBase *> threads_;
IntrusiveList<ThreadContextBase> dead_threads_;
IntrusiveList<ThreadContextBase> invalid_threads_;
diff --git a/libsanitizer/sanitizer_common/sanitizer_thread_safety.h b/libsanitizer/sanitizer_common/sanitizer_thread_safety.h
new file mode 100644
index 0000000..52b25ed
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_thread_safety.h
@@ -0,0 +1,42 @@
+//===-- sanitizer_thread_safety.h -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizer tools.
+//
+// Wrappers around thread safety annotations.
+// https://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_THREAD_SAFETY_H
+#define SANITIZER_THREAD_SAFETY_H
+
+#if defined(__clang__)
+# define THREAD_ANNOTATION(x) __attribute__((x))
+#else
+# define THREAD_ANNOTATION(x)
+#endif
+
+#define MUTEX THREAD_ANNOTATION(capability("mutex"))
+#define SCOPED_LOCK THREAD_ANNOTATION(scoped_lockable)
+#define GUARDED_BY(x) THREAD_ANNOTATION(guarded_by(x))
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION(pt_guarded_by(x))
+#define REQUIRES(...) THREAD_ANNOTATION(requires_capability(__VA_ARGS__))
+#define REQUIRES_SHARED(...) \
+ THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__))
+#define ACQUIRE(...) THREAD_ANNOTATION(acquire_capability(__VA_ARGS__))
+#define ACQUIRE_SHARED(...) \
+ THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__))
+#define TRY_ACQUIRE(...) THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__))
+#define RELEASE(...) THREAD_ANNOTATION(release_capability(__VA_ARGS__))
+#define RELEASE_SHARED(...) \
+ THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__))
+#define EXCLUDES(...) THREAD_ANNOTATION(locks_excluded(__VA_ARGS__))
+#define CHECK_LOCKED(...) THREAD_ANNOTATION(assert_capability(__VA_ARGS__))
+#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION(no_thread_safety_analysis)
+
+#endif
diff --git a/libsanitizer/sanitizer_common/sanitizer_win.cpp b/libsanitizer/sanitizer_common/sanitizer_win.cpp
index f383e13..dddd885 100644
--- a/libsanitizer/sanitizer_common/sanitizer_win.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_win.cpp
@@ -44,6 +44,9 @@ TRACELOGGING_DEFINE_PROVIDER(g_asan_provider, "AddressSanitizerLoggingProvider",
#define TraceLoggingUnregister(x)
#endif
+// For WaitOnAddress
+# pragma comment(lib, "synchronization.lib")
+
// A macro to tell the compiler that this part of the code cannot be reached,
// if the compiler supports this feature. Since we're using this in
// code that is called when terminating the process, the expansion of the
@@ -541,13 +544,7 @@ bool IsAbsolutePath(const char *path) {
IsPathSeparator(path[2]);
}
-void SleepForSeconds(int seconds) {
- Sleep(seconds * 1000);
-}
-
-void SleepForMillis(int millis) {
- Sleep(millis);
-}
+void internal_usleep(u64 useconds) { Sleep(useconds / 1000); }
u64 NanoTime() {
static LARGE_INTEGER frequency = {};
@@ -819,6 +816,17 @@ uptr GetRSS() {
void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; }
void internal_join_thread(void *th) { }
+void FutexWait(atomic_uint32_t *p, u32 cmp) {
+ WaitOnAddress(p, &cmp, sizeof(cmp), INFINITE);
+}
+
+void FutexWake(atomic_uint32_t *p, u32 count) {
+ if (count == 1)
+ WakeByAddressSingle(p);
+ else
+ WakeByAddressAll(p);
+}
+
// ---------------------- BlockingMutex ---------------- {{{1
BlockingMutex::BlockingMutex() {
@@ -838,9 +846,7 @@ void BlockingMutex::Unlock() {
ReleaseSRWLockExclusive((PSRWLOCK)opaque_storage_);
}
-void BlockingMutex::CheckLocked() {
- CHECK_EQ(owner_, GetThreadSelf());
-}
+void BlockingMutex::CheckLocked() const { CHECK_EQ(owner_, GetThreadSelf()); }
uptr GetTlsSize() {
return 0;