aboutsummaryrefslogtreecommitdiff
path: root/linuxthreads/spinlock.c
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2002-08-29 10:42:30 +0000
committerUlrich Drepper <drepper@redhat.com>2002-08-29 10:42:30 +0000
commitc98d82db4c7fa8d94bcf8759f8f9ed622cc9b77f (patch)
tree7613124ead9a2e20487390fa7b421d4421355fe2 /linuxthreads/spinlock.c
parent69f8b5e8238659abddb2a7777eb33cbe6c5545d5 (diff)
downloadglibc-c98d82db4c7fa8d94bcf8759f8f9ed622cc9b77f.zip
glibc-c98d82db4c7fa8d94bcf8759f8f9ed622cc9b77f.tar.gz
glibc-c98d82db4c7fa8d94bcf8759f8f9ed622cc9b77f.tar.bz2
Update.
2002-08-29 Jakub Jelinek <jakub@redhat.com> * stdio-common/vfprintf.c (vfprintf): Add builtin_expect for string_malloced, it is unlikely to be set. Only call free with non-NULL workspace. * sysdeps/sparc/sparc32/sparcv9/Makefile (sysdep-CFLAGS): Use -mcpu=ultrasparc, not only tune for it. (ASFLAGS*): Set unconditionally. 2002-08-29 Jakub Jelinek <jakub@redhat.com> * sysdeps/generic/readelflib.c (process_elf_file): Make loadaddr ElfW(Addr). Don't mask upper 32-bits and lower 12 bits off from p_vaddr/p_offset when computing loadaddr.
Diffstat (limited to 'linuxthreads/spinlock.c')
-rw-r--r--linuxthreads/spinlock.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/linuxthreads/spinlock.c b/linuxthreads/spinlock.c
index 582a95c..47107bf 100644
--- a/linuxthreads/spinlock.c
+++ b/linuxthreads/spinlock.c
@@ -85,8 +85,6 @@ void internal_function __pthread_lock(struct _pthread_fastlock * lock,
spurious_wakeup_count = 0;
spin_count = 0;
-again:
-
/* On SMP, try spinning to get the lock. */
if (__pthread_smp_kernel) {
@@ -114,6 +112,8 @@ again:
lock->__spinlock += (spin_count - lock->__spinlock) / 8;
}
+again:
+
/* No luck, try once more or suspend. */
do {
@@ -130,7 +130,7 @@ again:
}
if (self != NULL) {
- THREAD_SETMEM(self, p_nextlock, (pthread_descr) (oldstatus & ~1L));
+ THREAD_SETMEM(self, p_nextlock, (pthread_descr) (oldstatus));
/* Make sure the store in p_nextlock completes before performing
the compare-and-swap */
MEMORY_BARRIER();
@@ -214,7 +214,7 @@ again:
maxprio = thr->p_priority;
}
ptr = &(thr->p_nextlock);
- thr = *ptr;
+ thr = (pthread_descr)((long)(thr->p_nextlock) & ~1L);
}
/* Remove max prio thread from waiting list. */
@@ -226,13 +226,13 @@ again:
least significant bit is clear. */
thr = (pthread_descr) (oldstatus & ~1L);
if (! __compare_and_swap_with_release_semantics
- (&lock->__status, oldstatus, (long)(thr->p_nextlock)))
+ (&lock->__status, oldstatus, (long)(thr->p_nextlock) & ~1L))
goto again;
} else {
/* No risk of concurrent access, remove max prio thread normally.
But in this case we must also flip the least significant bit
of the status to mark the lock as released. */
- thr = *maxptr;
+ thr = (pthread_descr)((long)*maxptr & ~1L);
*maxptr = thr->p_nextlock;
/* Ensure deletion from linked list completes before we