aboutsummaryrefslogtreecommitdiff
path: root/nptl/descr.h
diff options
context:
space:
mode:
authorTorvald Riegel <triegel@redhat.com>2016-12-24 00:40:46 +0100
committerTorvald Riegel <triegel@redhat.com>2017-01-13 23:12:32 +0100
commit8f9450a0b7a9e78267e8ae1ab1000ebca08e473e (patch)
treebca2a0f01266faddbb985b5e3751b64f068fa565 /nptl/descr.h
parent8e31cafb268938729a1314806a924d73fb1991c5 (diff)
downloadglibc-8f9450a0b7a9e78267e8ae1ab1000ebca08e473e.zip
glibc-8f9450a0b7a9e78267e8ae1ab1000ebca08e473e.tar.gz
glibc-8f9450a0b7a9e78267e8ae1ab1000ebca08e473e.tar.bz2
Add compiler barriers around modifications of the robust mutex list.
Any changes to the per-thread list of robust mutexes currently acquired as well as the pending-operations entry are not simply sequential code but basically concurrent with any actions taken by the kernel when it tries to clean up after a crash. This is not quite like multi-thread concurrency but more like signal-handler concurrency. This patch fixes latent bugs by adding compiler barriers where necessary so that it is ensured that the kernel crash handling sees consistent data. This is meant to be easy to backport, so we do not use C11-style signal fences yet. * nptl/descr.h (ENQUEUE_MUTEX_BOTH, DEQUEUE_MUTEX): Add compiler barriers and comments. * nptl/pthread_mutex_lock.c (__pthread_mutex_lock_full): Likewise. * nptl/pthread_mutex_timedlock.c (pthread_mutex_timedlock): Likewise. * nptl/pthread_mutex_unlock.c (__pthread_mutex_unlock_full): Likewise.
Diffstat (limited to 'nptl/descr.h')
-rw-r--r--nptl/descr.h21
1 files changed, 20 insertions, 1 deletions
diff --git a/nptl/descr.h b/nptl/descr.h
index 7a6a94f..a145860 100644
--- a/nptl/descr.h
+++ b/nptl/descr.h
@@ -179,7 +179,16 @@ struct pthread
but the pointer to the next/previous element of the list points
in the middle of the object, the __next element. Whenever
casting to __pthread_list_t we need to adjust the pointer
- first. */
+ first.
+ These operations are effectively concurrent code in that the thread
+ can get killed at any point in time and the kernel takes over. Thus,
+ the __next elements are a kind of concurrent list and we need to
+ enforce using compiler barriers that the individual operations happen
+ in such a way that the kernel always sees a consistent list. The
+ backward links (ie, the __prev elements) are not used by the kernel.
+ FIXME We should use relaxed MO atomic operations here and signal fences
+ because this kind of concurrency is similar to synchronizing with a
+ signal handler. */
# define QUEUE_PTR_ADJUST (offsetof (__pthread_list_t, __next))
# define ENQUEUE_MUTEX_BOTH(mutex, val) \
@@ -191,6 +200,8 @@ struct pthread
mutex->__data.__list.__next = THREAD_GETMEM (THREAD_SELF, \
robust_head.list); \
mutex->__data.__list.__prev = (void *) &THREAD_SELF->robust_head; \
+ /* Ensure that the new list entry is ready before we insert it. */ \
+ __asm ("" ::: "memory"); \
THREAD_SETMEM (THREAD_SELF, robust_head.list, \
(void *) (((uintptr_t) &mutex->__data.__list.__next) \
| val)); \
@@ -205,6 +216,9 @@ struct pthread
((char *) (((uintptr_t) mutex->__data.__list.__prev) & ~1ul) \
- QUEUE_PTR_ADJUST); \
prev->__next = mutex->__data.__list.__next; \
+ /* Ensure that we remove the entry from the list before we change the \
+ __next pointer of the entry, which is read by the kernel. */ \
+ __asm ("" ::: "memory"); \
mutex->__data.__list.__prev = NULL; \
mutex->__data.__list.__next = NULL; \
} while (0)
@@ -219,6 +233,8 @@ struct pthread
do { \
mutex->__data.__list.__next \
= THREAD_GETMEM (THREAD_SELF, robust_list.__next); \
+ /* Ensure that the new list entry is ready before we insert it. */ \
+ __asm ("" ::: "memory"); \
THREAD_SETMEM (THREAD_SELF, robust_list.__next, \
(void *) (((uintptr_t) &mutex->__data.__list) | val)); \
} while (0)
@@ -239,6 +255,9 @@ struct pthread
} \
\
runp->__next = next->__next; \
+ /* Ensure that we remove the entry from the list before we change the \
+ __next pointer of the entry, which is read by the kernel. */ \
+ __asm ("" ::: "memory"); \
mutex->__data.__list.__next = NULL; \
} \
} while (0)