aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/powerpc
diff options
context:
space:
mode:
authorAdhemerval Zanella <azanella@linux.vnet.ibm.com>2014-11-25 14:32:54 -0500
committerAdhemerval Zanella <azanella@linux.vnet.ibm.com>2014-11-26 07:06:28 -0500
commit704f794714704ba430d84d10d6809acaf7ca59bf (patch)
tree753816581f7d1f54d4af4921d776077b31a9a4e4 /sysdeps/powerpc
parentcdcb42d7f786fe5ee1ca60065924d0b5c6649dd0 (diff)
downloadglibc-704f794714704ba430d84d10d6809acaf7ca59bf.zip
glibc-704f794714704ba430d84d10d6809acaf7ca59bf.tar.gz
glibc-704f794714704ba430d84d10d6809acaf7ca59bf.tar.bz2
powerpc: Fix missing barriers in atomic_exchange_and_add_{acq,rel}
On powerpc, atomic_exchange_and_add is implemented without any barriers. This patchs adds the missing instruction and memory barrier for acquire and release semanthics.
Diffstat (limited to 'sysdeps/powerpc')
-rw-r--r--sysdeps/powerpc/bits/atomic.h50
-rw-r--r--sysdeps/powerpc/powerpc32/bits/atomic.h6
-rw-r--r--sysdeps/powerpc/powerpc64/bits/atomic.h28
3 files changed, 84 insertions, 0 deletions
diff --git a/sysdeps/powerpc/bits/atomic.h b/sysdeps/powerpc/bits/atomic.h
index f312676..b05b0f7 100644
--- a/sysdeps/powerpc/bits/atomic.h
+++ b/sysdeps/powerpc/bits/atomic.h
@@ -152,6 +152,34 @@ typedef uintmax_t uatomic_max_t;
__val; \
})
+#define __arch_atomic_exchange_and_add_32_acq(mem, value) \
+ ({ \
+ __typeof (*mem) __val, __tmp; \
+ __asm __volatile ("1: lwarx %0,0,%3" MUTEX_HINT_ACQ "\n" \
+ " add %1,%0,%4\n" \
+ " stwcx. %1,0,%3\n" \
+ " bne- 1b\n" \
+ __ARCH_ACQ_INSTR \
+ : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
+ : "b" (mem), "r" (value), "m" (*mem) \
+ : "cr0", "memory"); \
+ __val; \
+ })
+
+#define __arch_atomic_exchange_and_add_32_rel(mem, value) \
+ ({ \
+ __typeof (*mem) __val, __tmp; \
+ __asm __volatile (__ARCH_REL_INSTR "\n" \
+ "1: lwarx %0,0,%3" MUTEX_HINT_REL "\n" \
+ " add %1,%0,%4\n" \
+ " stwcx. %1,0,%3\n" \
+ " bne- 1b" \
+ : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
+ : "b" (mem), "r" (value), "m" (*mem) \
+ : "cr0", "memory"); \
+ __val; \
+ })
+
#define __arch_atomic_increment_val_32(mem) \
({ \
__typeof (*(mem)) __val; \
@@ -252,6 +280,28 @@ typedef uintmax_t uatomic_max_t;
abort (); \
__result; \
})
+#define atomic_exchange_and_add_acq(mem, value) \
+ ({ \
+ __typeof (*(mem)) __result; \
+ if (sizeof (*mem) == 4) \
+ __result = __arch_atomic_exchange_and_add_32_acq (mem, value); \
+ else if (sizeof (*mem) == 8) \
+ __result = __arch_atomic_exchange_and_add_64_acq (mem, value); \
+ else \
+ abort (); \
+ __result; \
+ })
+#define atomic_exchange_and_add_rel(mem, value) \
+ ({ \
+ __typeof (*(mem)) __result; \
+ if (sizeof (*mem) == 4) \
+ __result = __arch_atomic_exchange_and_add_32_rel (mem, value); \
+ else if (sizeof (*mem) == 8) \
+ __result = __arch_atomic_exchange_and_add_64_rel (mem, value); \
+ else \
+ abort (); \
+ __result; \
+ })
#define atomic_increment_val(mem) \
({ \
diff --git a/sysdeps/powerpc/powerpc32/bits/atomic.h b/sysdeps/powerpc/powerpc32/bits/atomic.h
index 117b5a0..e2a1bf4 100644
--- a/sysdeps/powerpc/powerpc32/bits/atomic.h
+++ b/sysdeps/powerpc/powerpc32/bits/atomic.h
@@ -98,6 +98,12 @@
#define __arch_atomic_exchange_and_add_64(mem, value) \
({ abort (); (*mem) = (value); })
+#define __arch_atomic_exchange_and_add_64_acq(mem, value) \
+ ({ abort (); (*mem) = (value); })
+
+#define __arch_atomic_exchange_and_add_64_rel(mem, value) \
+ ({ abort (); (*mem) = (value); })
+
#define __arch_atomic_increment_val_64(mem) \
({ abort (); (*mem)++; })
diff --git a/sysdeps/powerpc/powerpc64/bits/atomic.h b/sysdeps/powerpc/powerpc64/bits/atomic.h
index 83b5dfe..46117b0 100644
--- a/sysdeps/powerpc/powerpc64/bits/atomic.h
+++ b/sysdeps/powerpc/powerpc64/bits/atomic.h
@@ -186,6 +186,34 @@
__val; \
})
+#define __arch_atomic_exchange_and_add_64_acq(mem, value) \
+ ({ \
+ __typeof (*mem) __val, __tmp; \
+ __asm __volatile ("1: ldarx %0,0,%3" MUTEX_HINT_ACQ "\n" \
+ " add %1,%0,%4\n" \
+ " stdcx. %1,0,%3\n" \
+ " bne- 1b\n" \
+ __ARCH_ACQ_INSTR \
+ : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
+ : "b" (mem), "r" (value), "m" (*mem) \
+ : "cr0", "memory"); \
+ __val; \
+ })
+
+#define __arch_atomic_exchange_and_add_64_rel(mem, value) \
+ ({ \
+ __typeof (*mem) __val, __tmp; \
+ __asm __volatile (__ARCH_REL_INSTR "\n" \
+ "1: ldarx %0,0,%3" MUTEX_HINT_REL "\n" \
+ " add %1,%0,%4\n" \
+ " stdcx. %1,0,%3\n" \
+ " bne- 1b" \
+ : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
+ : "b" (mem), "r" (value), "m" (*mem) \
+ : "cr0", "memory"); \
+ __val; \
+ })
+
#define __arch_atomic_increment_val_64(mem) \
({ \
__typeof (*(mem)) __val; \