aboutsummaryrefslogtreecommitdiff
path: root/src/thread/pthread_mutexattr_setrobust.c
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2014-08-17 00:46:26 -0400
committerRich Felker <dalias@aerifal.cx>2014-08-17 00:46:26 -0400
commitde7e99c58508ca70f0b1b8ef259a823a3766c434 (patch)
tree68fbc616cb85a0a5a2992f64080d19b25855088f /src/thread/pthread_mutexattr_setrobust.c
parentd338b506e39b1e2c68366b12be90704c635602ce (diff)
downloadmusl-de7e99c58508ca70f0b1b8ef259a823a3766c434.zip
musl-de7e99c58508ca70f0b1b8ef259a823a3766c434.tar.gz
musl-de7e99c58508ca70f0b1b8ef259a823a3766c434.tar.bz2
make pointers used in robust list volatile
when manipulating the robust list, the order of stores matters, because the code may be asynchronously interrupted by a fatal signal and the kernel will then access the robust list in what is essentially an async-signal context. previously, aliasing considerations made it seem unlikely that a compiler could reorder the stores, but proving that they could not be reordered incorrectly would have been extremely difficult. instead I've opted to make all the pointers used as part of the robust list, including those in the robust list head and in the individual mutexes, volatile. in addition, the format of the robust list has been changed to point back to the head at the end, rather than ending with a null pointer. this is to match the documented kernel robust list ABI. the null pointer, which was previously used, only worked because faults during access terminate the robust list processing.
Diffstat (limited to 'src/thread/pthread_mutexattr_setrobust.c')
-rw-r--r--src/thread/pthread_mutexattr_setrobust.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/src/thread/pthread_mutexattr_setrobust.c b/src/thread/pthread_mutexattr_setrobust.c
index 8948cba..d062788 100644
--- a/src/thread/pthread_mutexattr_setrobust.c
+++ b/src/thread/pthread_mutexattr_setrobust.c
@@ -4,16 +4,18 @@
void __do_private_robust_list()
{
pthread_t self = __pthread_self();
- void **p, **prev, **next;
+ volatile void *volatile *p;
+ volatile void *volatile *prev;
+ volatile void *volatile *next;
pthread_mutex_t *m;
- for (prev=0, p=self->robust_list.head; p; p=next) {
+ prev = &self->robust_list.head;
+ for (p=self->robust_list.head; p&&p!=&self->robust_list.head; p=next) {
next = *p;
m = (void *)((char *)p - offsetof(pthread_mutex_t, _m_next));
if (!(m->_m_type & 128)) {
int waiters = m->_m_waiters;
- if (prev) *prev = next;
- else self->robust_list.head = next;
+ *prev = next;
int cont = a_swap(&m->_m_lock, self->tid|0x40000000);
if (cont < 0 || waiters) __wake(&m->_m_lock, 1, 1);
} else {