aboutsummaryrefslogtreecommitdiff
path: root/compiler-rt
diff options
context:
space:
mode:
authorFangrui Song <i@maskray.me>2024-06-04 13:06:41 -0700
committerGitHub <noreply@github.com>2024-06-04 13:06:41 -0700
commita5729b71d844c1444f7d348dc2d4ea5b98de5ec5 (patch)
tree03e07c5d3c3fdfc41acee683cf7a1c6e08fca911 /compiler-rt
parent3b2df5b6ee81cf2685c95728ff1baf795051c926 (diff)
downloadllvm-a5729b71d844c1444f7d348dc2d4ea5b98de5ec5.zip
llvm-a5729b71d844c1444f7d348dc2d4ea5b98de5ec5.tar.gz
llvm-a5729b71d844c1444f7d348dc2d4ea5b98de5ec5.tar.bz2
[atomics] Initialize pthread_mutex_t and avoid false sharing
PTHREAD_MUTEX_INITIALIZER is zeroes for glibc and musl, but it improves conformance and might work with more libc implementations. Follow-up to #94374 Pull Request: https://github.com/llvm/llvm-project/pull/94387
Diffstat (limited to 'compiler-rt')
-rw-r--r--compiler-rt/lib/builtins/atomic.c20
1 files changed, 16 insertions, 4 deletions
diff --git a/compiler-rt/lib/builtins/atomic.c b/compiler-rt/lib/builtins/atomic.c
index 159c364..c3a36a9 100644
--- a/compiler-rt/lib/builtins/atomic.c
+++ b/compiler-rt/lib/builtins/atomic.c
@@ -51,6 +51,14 @@
#endif
static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
+#ifndef CACHE_LINE_SIZE
+#define CACHE_LINE_SIZE 64
+#endif
+
+#ifdef __clang__
+#pragma clang diagnostic ignored "-Wgnu-designator"
+#endif
+
////////////////////////////////////////////////////////////////////////////////
// Platform-specific lock implementation. Falls back to spinlocks if none is
// defined. Each platform should define the Lock type, and corresponding
@@ -95,13 +103,17 @@ static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0
_Static_assert(__atomic_always_lock_free(sizeof(uintptr_t), 0),
"Implementation assumes lock-free pointer-size cmpxchg");
#include <pthread.h>
-typedef pthread_mutex_t Lock;
+#include <stdalign.h>
+typedef struct {
+ alignas(CACHE_LINE_SIZE) pthread_mutex_t m;
+} Lock;
/// Unlock a lock. This is a release operation.
-__inline static void unlock(Lock *l) { pthread_mutex_unlock(l); }
+__inline static void unlock(Lock *l) { pthread_mutex_unlock(&l->m); }
/// Locks a lock.
-__inline static void lock(Lock *l) { pthread_mutex_lock(l); }
+__inline static void lock(Lock *l) { pthread_mutex_lock(&l->m); }
/// locks for atomic operations
-static Lock locks[SPINLOCK_COUNT];
+static Lock locks[SPINLOCK_COUNT] = {
+ [0 ... SPINLOCK_COUNT - 1] = {PTHREAD_MUTEX_INITIALIZER}};
#endif
/// Returns a lock to use for a given pointer.