diff options
author | Martin Liska <mliska@suse.cz> | 2020-06-01 21:15:18 +0200 |
---|---|---|
committer | Martin Liska <mliska@suse.cz> | 2020-06-02 08:02:07 +0200 |
commit | 3c6331c29f1376ed220246e7dead94bc527a9aa9 (patch) | |
tree | df4288713dd999fd2b0783cf0f82a7a127ed5964 /libsanitizer/tsan | |
parent | 2b11374cb8d864dff3792e7c84188ba7e8f136e7 (diff) | |
download | gcc-3c6331c29f1376ed220246e7dead94bc527a9aa9.zip gcc-3c6331c29f1376ed220246e7dead94bc527a9aa9.tar.gz gcc-3c6331c29f1376ed220246e7dead94bc527a9aa9.tar.bz2 |
Libsanitizer: merge from master.
Merged from revision b638b63b99d66786cb37336292604a2ae3490cfd.
The patch successfully bootstraps on x86_64-linux-gnu and
ppc64le-linux-gnu. I also tested ppc64-linux-gnu that exposed:
https://reviews.llvm.org/D80864 (which is fixed on master).
Abidiff looks happy and I made UBSAN and ASAN bootstrap on
x86_64-linux-gnu.
I'm planning to do merge from master twice a year, once now and
next time short before stage1 closes.
I am going to install the patches as merge from master is obvious
and I haven't made anything special.
libsanitizer/ChangeLog:
* MERGE: Merge from master.
Diffstat (limited to 'libsanitizer/tsan')
-rw-r--r-- | libsanitizer/tsan/tsan_clock.cpp | 68 | ||||
-rw-r--r-- | libsanitizer/tsan/tsan_clock.h | 58 | ||||
-rw-r--r-- | libsanitizer/tsan/tsan_interceptors_posix.cpp | 21 | ||||
-rw-r--r-- | libsanitizer/tsan/tsan_platform.h | 1 | ||||
-rw-r--r-- | libsanitizer/tsan/tsan_platform_mac.cpp | 10 | ||||
-rw-r--r-- | libsanitizer/tsan/tsan_rtl.cpp | 12 | ||||
-rw-r--r-- | libsanitizer/tsan/tsan_rtl.h | 4 | ||||
-rw-r--r-- | libsanitizer/tsan/tsan_rtl_mutex.cpp | 25 | ||||
-rw-r--r-- | libsanitizer/tsan/tsan_rtl_ppc64.S | 1 | ||||
-rw-r--r-- | libsanitizer/tsan/tsan_rtl_thread.cpp | 34 | ||||
-rw-r--r-- | libsanitizer/tsan/tsan_stat.h | 1 |
11 files changed, 208 insertions, 27 deletions
diff --git a/libsanitizer/tsan/tsan_clock.cpp b/libsanitizer/tsan/tsan_clock.cpp index 4b7aa06..c91b29c 100644 --- a/libsanitizer/tsan/tsan_clock.cpp +++ b/libsanitizer/tsan/tsan_clock.cpp @@ -30,6 +30,14 @@ // dst->clock[i] = max(dst->clock[i], clock[i]); // } // +// void ThreadClock::releaseStoreAcquire(SyncClock *sc) const { +// for (int i = 0; i < kMaxThreads; i++) { +// tmp = clock[i]; +// clock[i] = max(clock[i], sc->clock[i]); +// sc->clock[i] = tmp; +// } +// } +// // void ThreadClock::ReleaseStore(SyncClock *dst) const { // for (int i = 0; i < kMaxThreads; i++) // dst->clock[i] = clock[i]; @@ -107,13 +115,14 @@ static void UnrefClockBlock(ClockCache *c, u32 idx, uptr blocks) { ThreadClock::ThreadClock(unsigned tid, unsigned reused) : tid_(tid) , reused_(reused + 1) // 0 has special meaning + , last_acquire_() + , global_acquire_() , cached_idx_() , cached_size_() , cached_blocks_() { CHECK_LT(tid, kMaxTidInClock); CHECK_EQ(reused_, ((u64)reused_ << kClkBits) >> kClkBits); nclk_ = tid_ + 1; - last_acquire_ = 0; internal_memset(clk_, 0, sizeof(clk_)); } @@ -177,6 +186,49 @@ void ThreadClock::acquire(ClockCache *c, SyncClock *src) { } } +void ThreadClock::releaseStoreAcquire(ClockCache *c, SyncClock *sc) { + DCHECK_LE(nclk_, kMaxTid); + DCHECK_LE(sc->size_, kMaxTid); + + if (sc->size_ == 0) { + // ReleaseStore will correctly set release_store_tid_, + // which can be important for future operations. + ReleaseStore(c, sc); + return; + } + + nclk_ = max(nclk_, (uptr) sc->size_); + + // Check if we need to resize sc. + if (sc->size_ < nclk_) + sc->Resize(c, nclk_); + + bool acquired = false; + + sc->Unshare(c); + // Update sc->clk_. + sc->FlushDirty(); + uptr i = 0; + for (ClockElem &ce : *sc) { + u64 tmp = clk_[i]; + if (clk_[i] < ce.epoch) { + clk_[i] = ce.epoch; + acquired = true; + } + ce.epoch = tmp; + ce.reused = 0; + i++; + } + sc->release_store_tid_ = kInvalidTid; + sc->release_store_reused_ = 0; + + if (acquired) { + CPP_STAT_INC(StatClockAcquiredSomething); + last_acquire_ = clk_[tid_]; + ResetCached(c); + } +} + void ThreadClock::release(ClockCache *c, SyncClock *dst) { DCHECK_LE(nclk_, kMaxTid); DCHECK_LE(dst->size_, kMaxTid); @@ -196,7 +248,7 @@ void ThreadClock::release(ClockCache *c, SyncClock *dst) { // Check if we had not acquired anything from other threads // since the last release on dst. If so, we need to update // only dst->elem(tid_). - if (dst->elem(tid_).epoch > last_acquire_) { + if (!HasAcquiredAfterRelease(dst)) { UpdateCurrentThread(c, dst); if (dst->release_store_tid_ != tid_ || dst->release_store_reused_ != reused_) @@ -222,8 +274,6 @@ void ThreadClock::release(ClockCache *c, SyncClock *dst) { // Clear 'acquired' flag in the remaining elements. if (nclk_ < dst->size_) CPP_STAT_INC(StatClockReleaseClearTail); - for (uptr i = nclk_; i < dst->size_; i++) - dst->elem(i).reused = 0; dst->release_store_tid_ = kInvalidTid; dst->release_store_reused_ = 0; // If we've acquired dst, remember this fact, @@ -269,7 +319,7 @@ void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) { if (dst->release_store_tid_ == tid_ && dst->release_store_reused_ == reused_ && - dst->elem(tid_).epoch > last_acquire_) { + !HasAcquiredAfterRelease(dst)) { CPP_STAT_INC(StatClockStoreFast); UpdateCurrentThread(c, dst); return; @@ -351,6 +401,14 @@ bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const { return true; } +// Checks whether the current thread has acquired anything +// from other clocks after releasing to dst (directly or indirectly). +bool ThreadClock::HasAcquiredAfterRelease(const SyncClock *dst) const { + const u64 my_epoch = dst->elem(tid_).epoch; + return my_epoch <= last_acquire_ || + my_epoch <= atomic_load_relaxed(&global_acquire_); +} + // Sets a single element in the vector clock. // This function is called only from weird places like AcquireGlobal. void ThreadClock::set(ClockCache *c, unsigned tid, u64 v) { diff --git a/libsanitizer/tsan/tsan_clock.h b/libsanitizer/tsan/tsan_clock.h index 6a1d15a..736cdae 100644 --- a/libsanitizer/tsan/tsan_clock.h +++ b/libsanitizer/tsan/tsan_clock.h @@ -134,10 +134,12 @@ class ThreadClock { uptr size() const; void acquire(ClockCache *c, SyncClock *src); + void releaseStoreAcquire(ClockCache *c, SyncClock *src); void release(ClockCache *c, SyncClock *dst); void acq_rel(ClockCache *c, SyncClock *dst); void ReleaseStore(ClockCache *c, SyncClock *dst); void ResetCached(ClockCache *c); + void NoteGlobalAcquire(u64 v); void DebugReset(); void DebugDump(int(*printf)(const char *s, ...)); @@ -150,6 +152,53 @@ class ThreadClock { // Current thread time when it acquired something from other threads. u64 last_acquire_; + // Last time another thread has done a global acquire of this thread's clock. + // It helps to avoid problem described in: + // https://github.com/golang/go/issues/39186 + // See test/tsan/java_finalizer2.cpp for a regression test. + // Note the failuire is _extremely_ hard to hit, so if you are trying + // to reproduce it, you may want to run something like: + // $ go get golang.org/x/tools/cmd/stress + // $ stress -p=64 ./a.out + // + // The crux of the problem is roughly as follows. + // A number of O(1) optimizations in the clocks algorithm assume proper + // transitive cumulative propagation of clock values. The AcquireGlobal + // operation may produce an inconsistent non-linearazable view of + // thread clocks. Namely, it may acquire a later value from a thread + // with a higher ID, but fail to acquire an earlier value from a thread + // with a lower ID. If a thread that executed AcquireGlobal then releases + // to a sync clock, it will spoil the sync clock with the inconsistent + // values. If another thread later releases to the sync clock, the optimized + // algorithm may break. + // + // The exact sequence of events that leads to the failure. + // - thread 1 executes AcquireGlobal + // - thread 1 acquires value 1 for thread 2 + // - thread 2 increments clock to 2 + // - thread 2 releases to sync object 1 + // - thread 3 at time 1 + // - thread 3 acquires from sync object 1 + // - thread 3 increments clock to 2 + // - thread 1 acquires value 2 for thread 3 + // - thread 1 releases to sync object 2 + // - sync object 2 clock has 1 for thread 2 and 2 for thread 3 + // - thread 3 releases to sync object 2 + // - thread 3 sees value 2 in the clock for itself + // and decides that it has already released to the clock + // and did not acquire anything from other threads after that + // (the last_acquire_ check in release operation) + // - thread 3 does not update the value for thread 2 in the clock from 1 to 2 + // - thread 4 acquires from sync object 2 + // - thread 4 detects a false race with thread 2 + // as it should have been synchronized with thread 2 up to time 2, + // but because of the broken clock it is now synchronized only up to time 1 + // + // The global_acquire_ value helps to prevent this scenario. + // Namely, thread 3 will not trust any own clock values up to global_acquire_ + // for the purposes of the last_acquire_ optimization. + atomic_uint64_t global_acquire_; + // Cached SyncClock (without dirty entries and release_store_tid_). // We reuse it for subsequent store-release operations without intervening // acquire operations. Since it is shared (and thus constant), clock value @@ -164,6 +213,7 @@ class ThreadClock { u64 clk_[kMaxTidInClock]; // Fixed size vector clock. bool IsAlreadyAcquired(const SyncClock *src) const; + bool HasAcquiredAfterRelease(const SyncClock *dst) const; void UpdateCurrentThread(ClockCache *c, SyncClock *dst) const; }; @@ -185,6 +235,14 @@ ALWAYS_INLINE uptr ThreadClock::size() const { return nclk_; } +ALWAYS_INLINE void ThreadClock::NoteGlobalAcquire(u64 v) { + // Here we rely on the fact that AcquireGlobal is protected by + // ThreadRegistryLock, thus only one thread at a time executes it + // and values passed to this function should not go backwards. + CHECK_LE(atomic_load_relaxed(&global_acquire_), v); + atomic_store_relaxed(&global_acquire_, v); +} + ALWAYS_INLINE SyncClock::Iter SyncClock::begin() { return Iter(this); } diff --git a/libsanitizer/tsan/tsan_interceptors_posix.cpp b/libsanitizer/tsan/tsan_interceptors_posix.cpp index 8aea1e4..718957c 100644 --- a/libsanitizer/tsan/tsan_interceptors_posix.cpp +++ b/libsanitizer/tsan/tsan_interceptors_posix.cpp @@ -891,13 +891,16 @@ void DestroyThreadState() { ThreadFinish(thr); ProcUnwire(proc, thr); ProcDestroy(proc); + DTLS_Destroy(); + cur_thread_finalize(); +} + +void PlatformCleanUpThreadState(ThreadState *thr) { ThreadSignalContext *sctx = thr->signal_ctx; if (sctx) { thr->signal_ctx = 0; UnmapOrDie(sctx, sizeof(*sctx)); } - DTLS_Destroy(); - cur_thread_finalize(); } } // namespace __tsan @@ -1016,7 +1019,7 @@ TSAN_INTERCEPTOR(int, pthread_create, TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) { SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret); - int tid = ThreadTid(thr, pc, (uptr)th); + int tid = ThreadConsumeTid(thr, pc, (uptr)th); ThreadIgnoreBegin(thr, pc); int res = BLOCK_REAL(pthread_join)(th, ret); ThreadIgnoreEnd(thr, pc); @@ -1029,8 +1032,8 @@ TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) { DEFINE_REAL_PTHREAD_FUNCTIONS TSAN_INTERCEPTOR(int, pthread_detach, void *th) { - SCOPED_TSAN_INTERCEPTOR(pthread_detach, th); - int tid = ThreadTid(thr, pc, (uptr)th); + SCOPED_INTERCEPTOR_RAW(pthread_detach, th); + int tid = ThreadConsumeTid(thr, pc, (uptr)th); int res = REAL(pthread_detach)(th); if (res == 0) { ThreadDetach(thr, pc, tid); @@ -1050,8 +1053,8 @@ TSAN_INTERCEPTOR(void, pthread_exit, void *retval) { #if SANITIZER_LINUX TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) { - SCOPED_TSAN_INTERCEPTOR(pthread_tryjoin_np, th, ret); - int tid = ThreadTid(thr, pc, (uptr)th); + SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret); + int tid = ThreadConsumeTid(thr, pc, (uptr)th); ThreadIgnoreBegin(thr, pc); int res = REAL(pthread_tryjoin_np)(th, ret); ThreadIgnoreEnd(thr, pc); @@ -1064,8 +1067,8 @@ TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) { TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret, const struct timespec *abstime) { - SCOPED_TSAN_INTERCEPTOR(pthread_timedjoin_np, th, ret, abstime); - int tid = ThreadTid(thr, pc, (uptr)th); + SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime); + int tid = ThreadConsumeTid(thr, pc, (uptr)th); ThreadIgnoreBegin(thr, pc); int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime); ThreadIgnoreEnd(thr, pc); diff --git a/libsanitizer/tsan/tsan_platform.h b/libsanitizer/tsan/tsan_platform.h index 63eb14f..7256d64 100644 --- a/libsanitizer/tsan/tsan_platform.h +++ b/libsanitizer/tsan/tsan_platform.h @@ -1021,6 +1021,7 @@ int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, void(*cleanup)(void *arg), void *arg); void DestroyThreadState(); +void PlatformCleanUpThreadState(ThreadState *thr); } // namespace __tsan diff --git a/libsanitizer/tsan/tsan_platform_mac.cpp b/libsanitizer/tsan/tsan_platform_mac.cpp index 326ca85..f92ecc5 100644 --- a/libsanitizer/tsan/tsan_platform_mac.cpp +++ b/libsanitizer/tsan/tsan_platform_mac.cpp @@ -19,6 +19,7 @@ #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_posix.h" #include "sanitizer_common/sanitizer_procmaps.h" +#include "sanitizer_common/sanitizer_ptrauth.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "tsan_platform.h" #include "tsan_rtl.h" @@ -75,9 +76,14 @@ static uptr main_thread_identity = 0; ALIGNED(64) static char main_thread_state[sizeof(ThreadState)]; static ThreadState *main_thread_state_loc = (ThreadState *)main_thread_state; +// We cannot use pthread_self() before libpthread has been initialized. Our +// current heuristic for guarding this is checking `main_thread_identity` which +// is only assigned in `__tsan::InitializePlatform`. static ThreadState **cur_thread_location() { + if (main_thread_identity == 0) + return &main_thread_state_loc; uptr thread_identity = (uptr)pthread_self(); - if (thread_identity == main_thread_identity || main_thread_identity == 0) + if (thread_identity == main_thread_identity) return &main_thread_state_loc; return (ThreadState **)MemToShadow(thread_identity); } @@ -269,6 +275,8 @@ void InitializePlatform() { uptr ExtractLongJmpSp(uptr *env) { uptr mangled_sp = env[LONG_JMP_SP_ENV_SLOT]; uptr sp = mangled_sp ^ longjmp_xor_key; + sp = (uptr)ptrauth_auth_data((void *)sp, ptrauth_key_asdb, + ptrauth_string_discriminator("sp")); return sp; } diff --git a/libsanitizer/tsan/tsan_rtl.cpp b/libsanitizer/tsan/tsan_rtl.cpp index 3f3c0cc..13c9b77 100644 --- a/libsanitizer/tsan/tsan_rtl.cpp +++ b/libsanitizer/tsan/tsan_rtl.cpp @@ -144,7 +144,7 @@ static void MemoryProfiler(Context *ctx, fd_t fd, int i) { WriteToFile(fd, buf.data(), internal_strlen(buf.data())); } -static void BackgroundThread(void *arg) { +static void *BackgroundThread(void *arg) { // This is a non-initialized non-user thread, nothing to see here. // We don't use ScopedIgnoreInterceptors, because we want ignores to be // enabled even when the thread function exits (e.g. during pthread thread @@ -220,6 +220,7 @@ static void BackgroundThread(void *arg) { } } } + return nullptr; } static void StartBackgroundThread() { @@ -494,14 +495,23 @@ int Finalize(ThreadState *thr) { void ForkBefore(ThreadState *thr, uptr pc) { ctx->thread_registry->Lock(); ctx->report_mtx.Lock(); + // Ignore memory accesses in the pthread_atfork callbacks. + // If any of them triggers a data race we will deadlock + // on the report_mtx. + // We could ignore interceptors and sync operations as well, + // but so far it's unclear if it will do more good or harm. + // Unnecessarily ignoring things can lead to false positives later. + ThreadIgnoreBegin(thr, pc); } void ForkParentAfter(ThreadState *thr, uptr pc) { + ThreadIgnoreEnd(thr, pc); // Begin is in ForkBefore. ctx->report_mtx.Unlock(); ctx->thread_registry->Unlock(); } void ForkChildAfter(ThreadState *thr, uptr pc) { + ThreadIgnoreEnd(thr, pc); // Begin is in ForkBefore. ctx->report_mtx.Unlock(); ctx->thread_registry->Unlock(); diff --git a/libsanitizer/tsan/tsan_rtl.h b/libsanitizer/tsan/tsan_rtl.h index c38fc43..d3bb61f 100644 --- a/libsanitizer/tsan/tsan_rtl.h +++ b/libsanitizer/tsan/tsan_rtl.h @@ -775,7 +775,7 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached); void ThreadStart(ThreadState *thr, int tid, tid_t os_id, ThreadType thread_type); void ThreadFinish(ThreadState *thr); -int ThreadTid(ThreadState *thr, uptr pc, uptr uid); +int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid); void ThreadJoin(ThreadState *thr, uptr pc, int tid); void ThreadDetach(ThreadState *thr, uptr pc, int tid); void ThreadFinalize(ThreadState *thr); @@ -813,10 +813,12 @@ void Acquire(ThreadState *thr, uptr pc, uptr addr); // approximation of the actual required synchronization. void AcquireGlobal(ThreadState *thr, uptr pc); void Release(ThreadState *thr, uptr pc, uptr addr); +void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr); void ReleaseStore(ThreadState *thr, uptr pc, uptr addr); void AfterSleep(ThreadState *thr, uptr pc); void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c); void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); +void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c); void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c); void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); diff --git a/libsanitizer/tsan/tsan_rtl_mutex.cpp b/libsanitizer/tsan/tsan_rtl_mutex.cpp index ce6e7cb..ebd0d72 100644 --- a/libsanitizer/tsan/tsan_rtl_mutex.cpp +++ b/libsanitizer/tsan/tsan_rtl_mutex.cpp @@ -415,8 +415,10 @@ static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) { ThreadState *thr = reinterpret_cast<ThreadState*>(arg); ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); u64 epoch = tctx->epoch1; - if (tctx->status == ThreadStatusRunning) + if (tctx->status == ThreadStatusRunning) { epoch = tctx->thr->fast_state.epoch(); + tctx->thr->clock.NoteGlobalAcquire(epoch); + } thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch); } @@ -429,6 +431,18 @@ void AcquireGlobal(ThreadState *thr, uptr pc) { UpdateClockCallback, thr); } +void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) { + DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr); + if (thr->ignore_sync) + return; + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); + ReleaseStoreAcquireImpl(thr, pc, &s->clock); + s->mtx.Unlock(); +} + void Release(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: Release %zx\n", thr->tid, addr); if (thr->ignore_sync) @@ -482,6 +496,15 @@ void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) { StatInc(thr, StatSyncAcquire); } +void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) { + if (thr->ignore_sync) + return; + thr->clock.set(thr->fast_state.epoch()); + thr->fast_synch_epoch = thr->fast_state.epoch(); + thr->clock.releaseStoreAcquire(&thr->proc()->clock_cache, c); + StatInc(thr, StatSyncReleaseStoreAcquire); +} + void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) { if (thr->ignore_sync) return; diff --git a/libsanitizer/tsan/tsan_rtl_ppc64.S b/libsanitizer/tsan/tsan_rtl_ppc64.S index 9e533a7..8285e21 100644 --- a/libsanitizer/tsan/tsan_rtl_ppc64.S +++ b/libsanitizer/tsan/tsan_rtl_ppc64.S @@ -1,6 +1,5 @@ #include "tsan_ppc_regs.h" - .machine altivec .section .text .hidden __tsan_setjmp .globl _setjmp diff --git a/libsanitizer/tsan/tsan_rtl_thread.cpp b/libsanitizer/tsan/tsan_rtl_thread.cpp index 0ac1ee9..d801467 100644 --- a/libsanitizer/tsan/tsan_rtl_thread.cpp +++ b/libsanitizer/tsan/tsan_rtl_thread.cpp @@ -145,6 +145,9 @@ void ThreadContext::OnFinished() { #if !SANITIZER_GO thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache); #endif +#if !SANITIZER_GO + PlatformCleanUpThreadState(thr); +#endif thr->~ThreadState(); #if TSAN_COLLECT_STATS StatAggregate(ctx->stat, thr->stat); @@ -285,19 +288,34 @@ void ThreadFinish(ThreadState *thr) { ctx->thread_registry->FinishThread(thr->tid); } -static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) { - uptr uid = (uptr)arg; - if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) { +struct ConsumeThreadContext { + uptr uid; + ThreadContextBase *tctx; +}; + +static bool ConsumeThreadByUid(ThreadContextBase *tctx, void *arg) { + ConsumeThreadContext *findCtx = (ConsumeThreadContext *)arg; + if (tctx->user_id == findCtx->uid && tctx->status != ThreadStatusInvalid) { + if (findCtx->tctx) { + // Ensure that user_id is unique. If it's not the case we are screwed. + // Something went wrong before, but now there is no way to recover. + // Returning a wrong thread is not an option, it may lead to very hard + // to debug false positives (e.g. if we join a wrong thread). + Report("ThreadSanitizer: dup thread with used id 0x%zx\n", findCtx->uid); + Die(); + } + findCtx->tctx = tctx; tctx->user_id = 0; - return true; } return false; } -int ThreadTid(ThreadState *thr, uptr pc, uptr uid) { - int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid); - DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res); - return res; +int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) { + ConsumeThreadContext findCtx = {uid, nullptr}; + ctx->thread_registry->FindThread(ConsumeThreadByUid, &findCtx); + int tid = findCtx.tctx ? findCtx.tctx->tid : ThreadRegistry::kUnknownTid; + DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, tid); + return tid; } void ThreadJoin(ThreadState *thr, uptr pc, int tid) { diff --git a/libsanitizer/tsan/tsan_stat.h b/libsanitizer/tsan/tsan_stat.h index 94e18bc..8b26a59 100644 --- a/libsanitizer/tsan/tsan_stat.h +++ b/libsanitizer/tsan/tsan_stat.h @@ -68,6 +68,7 @@ enum StatType { StatSyncDestroyed, StatSyncAcquire, StatSyncRelease, + StatSyncReleaseStoreAcquire, // Clocks - acquire. StatClockAcquire, |