aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2003-03-21 08:03:25 +0000
committerUlrich Drepper <drepper@redhat.com>2003-03-21 08:03:25 +0000
commit5a3ab2fc180056cb14eaeae0f571421be81e371b (patch)
treeaf48122e19c238a39db145412a11b1b551d472e6
parent18627f615b80b51778a65cf588f2741ad5f9b0a7 (diff)
downloadglibc-5a3ab2fc180056cb14eaeae0f571421be81e371b.zip
glibc-5a3ab2fc180056cb14eaeae0f571421be81e371b.tar.gz
glibc-5a3ab2fc180056cb14eaeae0f571421be81e371b.tar.bz2
Update.
2003-03-21 Ulrich Drepper <drepper@redhat.com> * cancellation.c: Adjust for new form of compare&exchange macros. * cleanup_defer.c: Likewise. * init.c: Likewise. * libc-cancellation.c: Likewise. * old_pthread_cond_broadcast.c: Likewise. * old_pthread_cond_signal.c: Likewise. * old_pthread_cond_timedwait.c: Likewise. * old_pthread_cond_wait.c: Likewise. * pthread_cancel.c: Likewise. * pthread_create.c: Likewise. * pthread_detach.c: Likewise. * pthread_join.c: Likewise. * pthread_key_delete.c: Likewise. * pthread_setcancelstate.c: Likewise. * pthread_setcanceltype.c: Likewise. * pthread_timedjoin.c: Likewise. * pthread_tryjoin.c: Likewise. * sysdeps/pthread/createthread.c: Likewise.
-rw-r--r--gmon/mcount.c4
-rw-r--r--malloc/set-freeres.c2
-rw-r--r--nptl/ChangeLog21
-rw-r--r--nptl/cancellation.c12
-rw-r--r--nptl/cleanup_defer.c16
-rw-r--r--nptl/init.c4
-rw-r--r--nptl/libc-cancellation.c29
-rw-r--r--nptl/old_pthread_cond_broadcast.c2
-rw-r--r--nptl/old_pthread_cond_signal.c2
-rw-r--r--nptl/old_pthread_cond_timedwait.c2
-rw-r--r--nptl/old_pthread_cond_wait.c2
-rw-r--r--nptl/pthread_cancel.c18
-rw-r--r--nptl/pthread_create.c4
-rw-r--r--nptl/pthread_detach.c2
-rw-r--r--nptl/pthread_join.c5
-rw-r--r--nptl/pthread_key_delete.c6
-rw-r--r--nptl/pthread_setcancelstate.c6
-rw-r--r--nptl/pthread_setcanceltype.c6
-rw-r--r--nptl/pthread_timedjoin.c4
-rw-r--r--nptl/pthread_tryjoin.c4
-rw-r--r--nptl/sysdeps/pthread/createthread.c4
-rw-r--r--nscd/cache.c4
-rw-r--r--stdlib/cxa_finalize.c4
-rw-r--r--sysdeps/i386/i486/bits/atomic.h49
-rw-r--r--sysdeps/ia64/bits/atomic.h18
-rw-r--r--sysdeps/powerpc/bits/atomic.h62
-rw-r--r--sysdeps/s390/bits/atomic.h10
-rw-r--r--sysdeps/unix/sysv/linux/getsysstats.c2
-rw-r--r--sysdeps/x86_64/bits/atomic.h16
29 files changed, 172 insertions, 148 deletions
diff --git a/gmon/mcount.c b/gmon/mcount.c
index c146650..e18bf86 100644
--- a/gmon/mcount.c
+++ b/gmon/mcount.c
@@ -69,8 +69,8 @@ _MCOUNT_DECL(frompc, selfpc) /* _mcount; may be static, inline, etc */
* check that we are profiling
* and that we aren't recursively invoked.
*/
- if (atomic_compare_and_exchange_acq (&p->state, GMON_PROF_BUSY,
- GMON_PROF_ON))
+ if (atomic_compare_and_exchange_bool_acq (&p->state, GMON_PROF_BUSY,
+ GMON_PROF_ON))
return;
/*
diff --git a/malloc/set-freeres.c b/malloc/set-freeres.c
index 1c0733e..6123141 100644
--- a/malloc/set-freeres.c
+++ b/malloc/set-freeres.c
@@ -36,7 +36,7 @@ __libc_freeres (void)
protect for multiple executions since these are fatal. */
static long int already_called;
- if (! atomic_compare_and_exchange_acq (&already_called, 1, 0))
+ if (! atomic_compare_and_exchange_bool_acq (&already_called, 1, 0))
{
void * const *p;
diff --git a/nptl/ChangeLog b/nptl/ChangeLog
index c68ad8d..3029aaf 100644
--- a/nptl/ChangeLog
+++ b/nptl/ChangeLog
@@ -1,3 +1,24 @@
+2003-03-21 Ulrich Drepper <drepper@redhat.com>
+
+ * cancellation.c: Adjust for new form of compare&exchange macros.
+ * cleanup_defer.c: Likewise.
+ * init.c: Likewise.
+ * libc-cancellation.c: Likewise.
+ * old_pthread_cond_broadcast.c: Likewise.
+ * old_pthread_cond_signal.c: Likewise.
+ * old_pthread_cond_timedwait.c: Likewise.
+ * old_pthread_cond_wait.c: Likewise.
+ * pthread_cancel.c: Likewise.
+ * pthread_create.c: Likewise.
+ * pthread_detach.c: Likewise.
+ * pthread_join.c: Likewise.
+ * pthread_key_delete.c: Likewise.
+ * pthread_setcancelstate.c: Likewise.
+ * pthread_setcanceltype.c: Likewise.
+ * pthread_timedjoin.c: Likewise.
+ * pthread_tryjoin.c: Likewise.
+ * sysdeps/pthread/createthread.c: Likewise.
+
2003-03-20 Ulrich Drepper <drepper@redhat.com>
* sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Include <atomic.h>.
diff --git a/nptl/cancellation.c b/nptl/cancellation.c
index 1dfbe4b..d88cae3 100644
--- a/nptl/cancellation.c
+++ b/nptl/cancellation.c
@@ -41,8 +41,8 @@ __pthread_enable_asynccancel (void)
if (newval == oldval)
break;
- if (atomic_compare_and_exchange_acq (&self->cancelhandling, newval,
- oldval) == 0)
+ if (! atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ newval, oldval))
{
if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
{
@@ -72,8 +72,8 @@ __pthread_enable_asynccancel_2 (int *oldvalp)
if (newval == oldval)
break;
- if (atomic_compare_and_exchange_acq (&self->cancelhandling, newval,
- oldval) == 0)
+ if (! atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ newval, oldval))
{
if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
{
@@ -106,8 +106,8 @@ __pthread_disable_asynccancel (int oldtype)
if (newval == oldval)
break;
- if (atomic_compare_and_exchange_acq (&self->cancelhandling, newval,
- oldval) == 0)
+ if (! atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ newval, oldval))
break;
}
}
diff --git a/nptl/cleanup_defer.c b/nptl/cleanup_defer.c
index 084216e..b72553b 100644
--- a/nptl/cleanup_defer.c
+++ b/nptl/cleanup_defer.c
@@ -41,10 +41,10 @@ _pthread_cleanup_push_defer (buffer, routine, arg)
/* Disable asynchronous cancellation for now. */
if (__builtin_expect (cancelhandling & CANCELTYPE_BITMASK, 0))
{
- while (atomic_compare_and_exchange_acq (&self->cancelhandling,
- cancelhandling
- & ~CANCELTYPE_BITMASK,
- cancelhandling) != 0)
+ while (atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ cancelhandling
+ & ~CANCELTYPE_BITMASK,
+ cancelhandling))
cancelhandling = self->cancelhandling;
}
@@ -70,10 +70,10 @@ _pthread_cleanup_pop_restore (buffer, execute)
&& ((cancelhandling = THREAD_GETMEM (self, cancelhandling))
& CANCELTYPE_BITMASK) == 0)
{
- while (atomic_compare_and_exchange_acq (&self->cancelhandling,
- cancelhandling
- | CANCELTYPE_BITMASK,
- cancelhandling) != 0)
+ while (atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ cancelhandling
+ | CANCELTYPE_BITMASK,
+ cancelhandling))
cancelhandling = self->cancelhandling;
CANCELLATION_P (self);
diff --git a/nptl/init.c b/nptl/init.c
index 7ad2971..4237c6e 100644
--- a/nptl/init.c
+++ b/nptl/init.c
@@ -147,8 +147,8 @@ sigcancel_handler (int sig __attribute ((unused)))
/* Already canceled or exiting. */
break;
- if (atomic_compare_and_exchange_acq (&self->cancelhandling, newval,
- oldval) == 0)
+ if (! atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ newval, oldval))
{
/* Set the return value. */
THREAD_SETMEM (self, result, PTHREAD_CANCELED);
diff --git a/nptl/libc-cancellation.c b/nptl/libc-cancellation.c
index d9ad94f..0d584fb 100644
--- a/nptl/libc-cancellation.c
+++ b/nptl/libc-cancellation.c
@@ -34,11 +34,12 @@ __libc_enable_asynccancel (void)
{
struct pthread *self = THREAD_SELF;
int oldval;
+ int newval;
- while (1)
+ do
{
oldval = THREAD_GETMEM (self, cancelhandling);
- int newval = oldval | CANCELTYPE_BITMASK;
+ newval = oldval | CANCELTYPE_BITMASK;
if (__builtin_expect ((oldval & CANCELED_BITMASK) != 0, 0))
{
@@ -46,8 +47,8 @@ __libc_enable_asynccancel (void)
if ((oldval & EXITING_BITMASK) != 0)
break;
- if (atomic_compare_and_exchange_acq (&self->cancelhandling, newval,
- oldval) != 0)
+ if (atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ newval, oldval))
/* Somebody else modified the word, try again. */
continue;
@@ -60,11 +61,9 @@ __libc_enable_asynccancel (void)
/* NOTREACHED */
}
-
- if (atomic_compare_and_exchange_acq (&self->cancelhandling, newval,
- oldval) == 0)
- break;
}
+ while (atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ newval, oldval));
return oldval;
}
@@ -80,19 +79,19 @@ __libc_disable_asynccancel (int oldtype)
return;
struct pthread *self = THREAD_SELF;
+ int oldval;
+ int newval;
- while (1)
+ do
{
- int oldval = THREAD_GETMEM (self, cancelhandling);
- int newval = oldval & ~CANCELTYPE_BITMASK;
+ oldval = THREAD_GETMEM (self, cancelhandling);
+ newval = oldval & ~CANCELTYPE_BITMASK;
if (newval == oldval)
break;
-
- if (atomic_compare_and_exchange_acq (&self->cancelhandling, newval,
- oldval) == 0)
- break;
}
+ while (atomic_compare_and_exchange_bool_acq (&self->cancelhandling, newval,
+ oldval));
}
#endif
diff --git a/nptl/old_pthread_cond_broadcast.c b/nptl/old_pthread_cond_broadcast.c
index 0db0aea..3852943 100644
--- a/nptl/old_pthread_cond_broadcast.c
+++ b/nptl/old_pthread_cond_broadcast.c
@@ -46,7 +46,7 @@ __pthread_cond_broadcast_2_0 (cond)
(void) pthread_cond_init (newcond, NULL);
#endif
- if (atomic_compare_and_exchange_acq (&cond->cond, newcond, NULL) != 0)
+ if (atomic_compare_and_exchange_bool_acq (&cond->cond, newcond, NULL))
/* Somebody else just initialized the condvar. */
free (newcond);
}
diff --git a/nptl/old_pthread_cond_signal.c b/nptl/old_pthread_cond_signal.c
index ae54209..65beb0b 100644
--- a/nptl/old_pthread_cond_signal.c
+++ b/nptl/old_pthread_cond_signal.c
@@ -46,7 +46,7 @@ __pthread_cond_signal_2_0 (cond)
(void) pthread_cond_init (newcond, NULL);
#endif
- if (atomic_compare_and_exchange_acq (&cond->cond, newcond, NULL) != 0)
+ if (atomic_compare_and_exchange_bool_acq (&cond->cond, newcond, NULL))
/* Somebody else just initialized the condvar. */
free (newcond);
}
diff --git a/nptl/old_pthread_cond_timedwait.c b/nptl/old_pthread_cond_timedwait.c
index b30e182..27c1093 100644
--- a/nptl/old_pthread_cond_timedwait.c
+++ b/nptl/old_pthread_cond_timedwait.c
@@ -48,7 +48,7 @@ __pthread_cond_timedwait_2_0 (cond, mutex, abstime)
(void) pthread_cond_init (newcond, NULL);
#endif
- if (atomic_compare_and_exchange_acq (&cond->cond, newcond, NULL) != 0)
+ if (atomic_compare_and_exchange_bool_acq (&cond->cond, newcond, NULL))
/* Somebody else just initialized the condvar. */
free (newcond);
}
diff --git a/nptl/old_pthread_cond_wait.c b/nptl/old_pthread_cond_wait.c
index 50505a2..0a503a1 100644
--- a/nptl/old_pthread_cond_wait.c
+++ b/nptl/old_pthread_cond_wait.c
@@ -47,7 +47,7 @@ __pthread_cond_wait_2_0 (cond, mutex)
(void) pthread_cond_init (newcond, NULL);
#endif
- if (atomic_compare_and_exchange_acq (&cond->cond, newcond, NULL) != 0)
+ if (atomic_compare_and_exchange_bool_acq (&cond->cond, newcond, NULL))
/* Somebody else just initialized the condvar. */
free (newcond);
}
diff --git a/nptl/pthread_cancel.c b/nptl/pthread_cancel.c
index f11a924..43b65b6 100644
--- a/nptl/pthread_cancel.c
+++ b/nptl/pthread_cancel.c
@@ -36,10 +36,12 @@ pthread_cancel (th)
return ESRCH;
int result = 0;
- while (1)
+ int oldval;
+ int newval;
+ do
{
- int oldval = pd->cancelhandling;
- int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
+ oldval = pd->cancelhandling;
+ newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
/* Avoid doing unnecessary work. The atomic operation can
potentially be expensive if the bug has to be locked and
@@ -66,13 +68,11 @@ pthread_cancel (th)
break;
}
-
- /* Mark the thread as canceled. This has to be done
- atomically since other bits could be modified as well. */
- if (atomic_compare_and_exchange_acq (&pd->cancelhandling, newval,
- oldval) == 0)
- break;
}
+ /* Mark the thread as canceled. This has to be done
+ atomically since other bits could be modified as well. */
+ while (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling, newval,
+ oldval));
return result;
}
diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c
index ea05747..4121874 100644
--- a/nptl/pthread_create.c
+++ b/nptl/pthread_create.c
@@ -260,8 +260,8 @@ start_thread (void *arg)
do
pd->nextevent = __nptl_last_event;
- while (atomic_compare_and_exchange_acq (&__nptl_last_event, pd,
- pd->nextevent) != 0);
+ while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
+ pd, pd->nextevent));
}
/* Now call the function to signal the event. */
diff --git a/nptl/pthread_detach.c b/nptl/pthread_detach.c
index ff58e3b..ce13a2c 100644
--- a/nptl/pthread_detach.c
+++ b/nptl/pthread_detach.c
@@ -36,7 +36,7 @@ pthread_detach (th)
int result = 0;
/* Mark the thread as detached. */
- if (atomic_compare_and_exchange_acq (&pd->joinid, pd, NULL) != 0)
+ if (atomic_compare_and_exchange_bool_acq (&pd->joinid, pd, NULL))
{
/* There are two possibilities here. First, the thread might
already be detached. In this case we return EINVAL.
diff --git a/nptl/pthread_join.c b/nptl/pthread_join.c
index 5a0ec95..f77c2c9 100644
--- a/nptl/pthread_join.c
+++ b/nptl/pthread_join.c
@@ -66,8 +66,9 @@ pthread_join (threadid, thread_return)
/* Wait for the thread to finish. If it is already locked something
is wrong. There can only be one waiter. */
- if (__builtin_expect (atomic_compare_and_exchange_acq (&pd->joinid, self,
- NULL) != 0, 0))
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (&pd->joinid,
+ self,
+ NULL), 0))
/* There is already somebody waiting for the thread. */
return EINVAL;
diff --git a/nptl/pthread_key_delete.c b/nptl/pthread_key_delete.c
index a0145f8..ae7d7c4 100644
--- a/nptl/pthread_key_delete.c
+++ b/nptl/pthread_key_delete.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -33,8 +33,8 @@ pthread_key_delete (key)
unsigned int seq = __pthread_keys[key].seq;
if (__builtin_expect (! KEY_UNUSED (seq), 1)
- && atomic_compare_and_exchange_acq (&__pthread_keys[key].seq,
- seq + 1, seq) == 0)
+ && ! atomic_compare_and_exchange_bool_acq (&__pthread_keys[key].seq,
+ seq + 1, seq))
/* We deleted a valid key. */
result = 0;
}
diff --git a/nptl/pthread_setcancelstate.c b/nptl/pthread_setcancelstate.c
index 3f6ed86..a6af063 100644
--- a/nptl/pthread_setcancelstate.c
+++ b/nptl/pthread_setcancelstate.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -54,8 +54,8 @@ __pthread_setcancelstate (state, oldstate)
/* Update the cancel handling word. This has to be done
atomically since other bits could be modified as well. */
- if (atomic_compare_and_exchange_acq (&self->cancelhandling, newval,
- oldval) == 0)
+ if (! atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ newval, oldval))
{
if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
__do_cancel ();
diff --git a/nptl/pthread_setcanceltype.c b/nptl/pthread_setcanceltype.c
index bb4b249..5a04635 100644
--- a/nptl/pthread_setcanceltype.c
+++ b/nptl/pthread_setcanceltype.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -54,8 +54,8 @@ __pthread_setcanceltype (type, oldtype)
/* Update the cancel handling word. This has to be done
atomically since other bits could be modified as well. */
- if (atomic_compare_and_exchange_acq (&self->cancelhandling, newval,
- oldval) == 0)
+ if (! atomic_compare_and_exchange_bool_acq (&self->cancelhandling,
+ newval, oldval))
{
if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
{
diff --git a/nptl/pthread_timedjoin.c b/nptl/pthread_timedjoin.c
index c83c0ef..1cc0721 100644
--- a/nptl/pthread_timedjoin.c
+++ b/nptl/pthread_timedjoin.c
@@ -63,8 +63,8 @@ pthread_timedjoin_np (threadid, thread_return, abstime)
/* Wait for the thread to finish. If it is already locked something
is wrong. There can only be one waiter. */
- if (__builtin_expect (atomic_compare_and_exchange_acq (&pd->joinid, self,
- NULL) != 0, 0))
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (&pd->joinid,
+ self, NULL), 0))
/* There is already somebody waiting for the thread. */
return EINVAL;
diff --git a/nptl/pthread_tryjoin.c b/nptl/pthread_tryjoin.c
index 88d2e8b..904cb52 100644
--- a/nptl/pthread_tryjoin.c
+++ b/nptl/pthread_tryjoin.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -59,7 +59,7 @@ pthread_tryjoin_np (threadid, thread_return)
/* Wait for the thread to finish. If it is already locked something
is wrong. There can only be one waiter. */
- if (atomic_compare_and_exchange_acq (&pd->joinid, self, NULL) != 0)
+ if (atomic_compare_and_exchange_bool_acq (&pd->joinid, self, NULL))
/* There is already somebody waiting for the thread. */
return EINVAL;
diff --git a/nptl/sysdeps/pthread/createthread.c b/nptl/sysdeps/pthread/createthread.c
index 797176d..fae744f 100644
--- a/nptl/sysdeps/pthread/createthread.c
+++ b/nptl/sysdeps/pthread/createthread.c
@@ -100,8 +100,8 @@ create_thread (struct pthread *pd, STACK_VARIABLES_PARMS)
/* Enqueue the descriptor. */
do
pd->nextevent = __nptl_last_event;
- while (atomic_compare_and_exchange_acq (&__nptl_last_event, pd,
- pd->nextevent) != 0);
+ while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event, pd,
+ pd->nextevent) != 0);
/* Now call the function which signals the event. */
__nptl_create_event ();
diff --git a/nscd/cache.c b/nscd/cache.c
index 788d47b..6492092 100644
--- a/nscd/cache.c
+++ b/nscd/cache.c
@@ -101,8 +101,8 @@ cache_add (int type, void *key, size_t len, const void *packet, size_t total,
/* Put the new entry in the first position. */
do
newp->next = table->array[hash];
- while (atomic_compare_and_exchange_acq (&table->array[hash], newp,
- newp->next));
+ while (atomic_compare_and_exchange_bool_acq (&table->array[hash], newp,
+ newp->next));
/* Update the statistics. */
if (data == (void *) -1)
diff --git a/stdlib/cxa_finalize.c b/stdlib/cxa_finalize.c
index 19d6567..792aeeb 100644
--- a/stdlib/cxa_finalize.c
+++ b/stdlib/cxa_finalize.c
@@ -36,8 +36,8 @@ __cxa_finalize (void *d)
for (f = &funcs->fns[funcs->idx - 1]; f >= &funcs->fns[0]; --f)
if ((d == NULL || d == f->func.cxa.dso_handle)
/* We don't want to run this cleanup more than once. */
- && (atomic_compare_and_exchange_acq (&f->flavor, ef_free, ef_cxa)
- == 0))
+ && ! atomic_compare_and_exchange_bool_acq (&f->flavor, ef_free,
+ ef_cxa))
(*f->func.cxa.fn) (f->func.cxa.arg, 0);
}
diff --git a/sysdeps/i386/i486/bits/atomic.h b/sysdeps/i386/i486/bits/atomic.h
index e22e560..2255e19 100644
--- a/sysdeps/i386/i486/bits/atomic.h
+++ b/sysdeps/i386/i486/bits/atomic.h
@@ -55,23 +55,23 @@ typedef uintmax_t uatomic_max_t;
#endif
-#define __arch_compare_and_exchange_8_acq(mem, newval, oldval) \
- ({ unsigned char ret; \
- __asm __volatile (LOCK "cmpxchgb %b2, %1; setne %0" \
+#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
+ __asm __volatile (LOCK "cmpxchgb %b2, %1" \
: "=a" (ret), "=m" (*mem) \
: "q" (newval), "1" (*mem), "0" (oldval)); \
ret; })
-#define __arch_compare_and_exchange_16_acq(mem, newval, oldval) \
- ({ unsigned char ret; \
- __asm __volatile (LOCK "cmpxchgw %w2, %1; setne %0" \
+#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
+ __asm __volatile (LOCK "cmpxchgw %w2, %1" \
: "=a" (ret), "=m" (*mem) \
: "r" (newval), "1" (*mem), "0" (oldval)); \
ret; })
-#define __arch_compare_and_exchange_32_acq(mem, newval, oldval) \
- ({ unsigned char ret; \
- __asm __volatile (LOCK "cmpxchgl %2, %1; setne %0" \
+#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
+ __asm __volatile (LOCK "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (*mem) \
: "r" (newval), "1" (*mem), "0" (oldval)); \
ret; })
@@ -83,37 +83,34 @@ typedef uintmax_t uatomic_max_t;
really going to be used the code below can be used on Intel Pentium
and later, but NOT on i486. */
#if 1
-# define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
+# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
(abort (), 0)
#else
# ifdef __PIC__
-# define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
- ({ unsigned char ret; \
- int ignore; \
- __asm __volatile ("xchgl %3, %%ebx\n\t" \
- LOCK "cmpxchg8b %2, %1\n\t" \
- "setne %0\n\t" \
- "xchgl %3, %%ebx" \
- : "=a" (ret), "=m" (*mem), "=d" (ignore) \
+# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
+ __asm __volatile ("xchgl %2, %%ebx\n\t" \
+ LOCK "cmpxchg8b %1\n\t" \
+ "xchgl %2, %%ebx" \
+ : "=A" (ret), "=m" (*mem) \
: "DS" (((unsigned long long int) (newval)) \
& 0xffffffff), \
"c" (((unsigned long long int) (newval)) >> 32), \
- "1" (*mem), "0" (((unsigned long long int) (oldval)) \
+ "1" (*mem), "a" (((unsigned long long int) (oldval)) \
& 0xffffffff), \
- "2" (((unsigned long long int) (oldval)) >> 32)); \
+ "d" (((unsigned long long int) (oldval)) >> 32)); \
ret; })
# else
# define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
- ({ unsigned char ret; \
- int ignore; \
- __asm __volatile (LOCK "cmpxchg8b %2, %1; setne %0" \
- : "=a" (ret), "=m" (*mem), "=d" (ignore) \
+ ({ __typeof (*mem) ret; \
+ __asm __volatile (LOCK "cmpxchg8b %1" \
+ : "=A" (ret), "=m" (*mem) \
: "b" (((unsigned long long int) (newval)) \
& 0xffffffff), \
"c" (((unsigned long long int) (newval)) >> 32), \
- "1" (*mem), "0" (((unsigned long long int) (oldval)) \
+ "1" (*mem), "a" (((unsigned long long int) (oldval)) \
& 0xffffffff), \
- "2" (((unsigned long long int) (oldval)) >> 32)); \
+ "d" (((unsigned long long int) (oldval)) >> 32)); \
ret; })
# endif
#endif
diff --git a/sysdeps/ia64/bits/atomic.h b/sysdeps/ia64/bits/atomic.h
index 7c6ebf1..27789c0 100644
--- a/sysdeps/ia64/bits/atomic.h
+++ b/sysdeps/ia64/bits/atomic.h
@@ -45,25 +45,31 @@ typedef intmax_t atomic_max_t;
typedef uintmax_t uatomic_max_t;
-#define __arch_compare_and_exchange_8_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
(abort (), 0)
-#define __arch_compare_and_exchange_16_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
(abort (), 0)
-#define __arch_compare_and_exchange_32_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
(!__sync_bool_compare_and_swap_si ((int *) (mem), (int) (long) (oldval), \
(int) (long) (newval)))
-#define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
(!__sync_bool_compare_and_swap_di ((long *) (mem), (long) (oldval), \
(long) (newval)))
-#define __arch_compare_and_exchange_32_val_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
+ (abort (), 0)
+
+#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
+ (abort (), 0)
+
+#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
__sync_val_compare_and_swap_si ((int *) (mem), (int) (long) (oldval), \
(int) (long) (newval))
-#define __arch_compare_and_exchange_64_val_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
__sync_val_compare_and_swap_di ((long *) (mem), (long) (oldval), \
(long) (newval))
diff --git a/sysdeps/powerpc/bits/atomic.h b/sysdeps/powerpc/bits/atomic.h
index 956272c..49f1c14 100644
--- a/sysdeps/powerpc/bits/atomic.h
+++ b/sysdeps/powerpc/bits/atomic.h
@@ -46,10 +46,10 @@ typedef intmax_t atomic_max_t;
typedef uintmax_t uatomic_max_t;
-#define __arch_compare_and_exchange_8_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
(abort (), 0)
-#define __arch_compare_and_exchange_16_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
(abort (), 0)
#ifdef UP
@@ -69,41 +69,41 @@ typedef uintmax_t uatomic_max_t;
* XXX this may not work properly on 64-bit if the register
* containing oldval has the high half non-zero for some reason.
*/
-#define __arch_compare_and_exchange_32_acq(mem, newval, oldval) \
-({ \
- unsigned int __tmp; \
- __asm __volatile (__ARCH_REL_INSTR "\n" \
- "1: lwarx %0,0,%1\n" \
- " subf. %0,%2,%0\n" \
- " bne 2f\n" \
- " stwcx. %3,0,%1\n" \
- " bne- 1b\n" \
- "2: " __ARCH_ACQ_INSTR \
- : "=&r" (__tmp) \
- : "r" (mem), "r" (oldval), "r" (newval) \
- : "cr0", "memory"); \
- __tmp != 0; \
+#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
+({ \
+ unsigned int __tmp; \
+ __asm __volatile (__ARCH_REL_INSTR "\n" \
+ "1: lwarx %0,0,%1\n" \
+ " subf. %0,%2,%0\n" \
+ " bne 2f\n" \
+ " stwcx. %3,0,%1\n" \
+ " bne- 1b\n" \
+ "2: " __ARCH_ACQ_INSTR \
+ : "=&r" (__tmp) \
+ : "r" (mem), "r" (oldval), "r" (newval) \
+ : "cr0", "memory"); \
+ __tmp != 0; \
})
#ifdef __powerpc64__
-# define __arch_compare_and_exchange_64_acq(mem, newval, oldval)\
-({ \
- unsigned long __tmp; \
- __asm __volatile (__ARCH_REL_INSTR "\n" \
- "1: ldarx %0,0,%1\n" \
- " subf. %0,%2,%0\n" \
- " bne 2f\n" \
- " stdcx. %3,0,%1\n" \
- " bne- 1b\n" \
- "2: " __ARCH_ACQ_INSTR \
- : "=&r" (__tmp) \
- : "r" (mem), "r" (oldval), "r" (newval) \
- : "cr0", "memory"); \
- __tmp != 0; \
+# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
+({ \
+ unsigned long __tmp; \
+ __asm __volatile (__ARCH_REL_INSTR "\n" \
+ "1: ldarx %0,0,%1\n" \
+ " subf. %0,%2,%0\n" \
+ " bne 2f\n" \
+ " stdcx. %3,0,%1\n" \
+ " bne- 1b\n" \
+ "2: " __ARCH_ACQ_INSTR \
+ : "=&r" (__tmp) \
+ : "r" (mem), "r" (oldval), "r" (newval) \
+ : "cr0", "memory"); \
+ __tmp != 0; \
})
#else /* powerpc32 */
-# define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
+# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
(abort (), 0)
#endif
diff --git a/sysdeps/s390/bits/atomic.h b/sysdeps/s390/bits/atomic.h
index 375ae0d..74321b6 100644
--- a/sysdeps/s390/bits/atomic.h
+++ b/sysdeps/s390/bits/atomic.h
@@ -45,13 +45,13 @@ typedef intmax_t atomic_max_t;
typedef uintmax_t uatomic_max_t;
-#define __arch_compare_and_exchange_8_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
(abort (), 0)
-#define __arch_compare_and_exchange_16_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
(abort (), 0)
-#define __arch_compare_and_exchange_32_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
({ unsigned int *__mem = (unsigned int *) (mem); \
unsigned int __old = (unsigned int) (oldval); \
unsigned int __cmp = __old; \
@@ -61,7 +61,7 @@ typedef uintmax_t uatomic_max_t;
__cmp != __old; })
#ifdef __s390x__
-# define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
+# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
({ unsigned long int *__mem = (unsigned long int *) (mem); \
unsigned long int __old = (unsigned long int) (oldval); \
unsigned long int __cmp = __old; \
@@ -73,6 +73,6 @@ typedef uintmax_t uatomic_max_t;
/* For 31 bit we do not really need 64-bit compare-and-exchange. We can
implement them by use of the csd instruction. The straightforward
implementation causes warnings so we skip the definition for now. */
-# define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
+# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
(abort (), 0)
#endif
diff --git a/sysdeps/unix/sysv/linux/getsysstats.c b/sysdeps/unix/sysv/linux/getsysstats.c
index c44e45c..71e90fe 100644
--- a/sysdeps/unix/sysv/linux/getsysstats.c
+++ b/sysdeps/unix/sysv/linux/getsysstats.c
@@ -85,7 +85,7 @@ get_proc_path (char *buffer, size_t bufsize)
/* Now store the copied value. But do it atomically. */
assert (sizeof (long int) == sizeof (void *__unbounded));
- if (atomic_compare_and_exchange_acq (&mount_proc, copy_result, NULL) == 0)
+ if (! atomic_compare_and_exchange_bool_acq (&mount_proc, copy_result, NULL))
/* Replacing the value failed. This means another thread was
faster and we don't need the copy anymore. */
free (copy_result);
diff --git a/sysdeps/x86_64/bits/atomic.h b/sysdeps/x86_64/bits/atomic.h
index 22d3ab0..742f8b1 100644
--- a/sysdeps/x86_64/bits/atomic.h
+++ b/sysdeps/x86_64/bits/atomic.h
@@ -55,29 +55,29 @@ typedef uintmax_t uatomic_max_t;
#endif
-#define __arch_compare_and_exchange_8_acq(mem, newval, oldval) \
- ({ unsigned char ret; \
+#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
__asm __volatile (LOCK "cmpxchgb %b2, %1; setne %0" \
: "=a" (ret), "=m" (*mem) \
: "q" (newval), "1" (*mem), "0" (oldval)); \
ret; })
-#define __arch_compare_and_exchange_16_acq(mem, newval, oldval) \
- ({ unsigned char ret; \
+#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
__asm __volatile (LOCK "cmpxchgw %w2, %1; setne %0" \
: "=a" (ret), "=m" (*mem) \
: "r" (newval), "1" (*mem), "0" (oldval)); \
ret; })
-#define __arch_compare_and_exchange_32_acq(mem, newval, oldval) \
- ({ unsigned char ret; \
+#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
__asm __volatile (LOCK "cmpxchgl %2, %1; setne %0" \
: "=a" (ret), "=m" (*mem) \
: "r" (newval), "1" (*mem), "0" (oldval)); \
ret; })
-#define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
- ({ unsigned char ret; \
+#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
__asm __volatile (LOCK "cmpxchgq %q2, %1; setne %0" \
: "=a" (ret), "=m" (*mem) \
: "r" (newval), "1" (*mem), "0" (oldval)); \