aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/unix
diff options
context:
space:
mode:
authorTulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>2015-12-28 12:24:43 -0200
committerTulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>2016-01-08 17:47:33 -0200
commit42bf1c897170ff951c7fd0ee9da25f97ff787396 (patch)
tree3e69cd2d5201944482407b86b4854fe75ab34f12 /sysdeps/unix
parentbc49a7afd38c1bd00f0ad9fd6592a5959d5ba72e (diff)
downloadglibc-42bf1c897170ff951c7fd0ee9da25f97ff787396.zip
glibc-42bf1c897170ff951c7fd0ee9da25f97ff787396.tar.gz
glibc-42bf1c897170ff951c7fd0ee9da25f97ff787396.tar.bz2
powerpc: Enforce compiler barriers on hardware transactions
Work around a GCC behavior with hardware transactional memory built-ins. GCC doesn't treat the PowerPC transactional built-ins as compiler barriers, moving instructions past the transaction boundaries and altering their atomicity.
Diffstat (limited to 'sysdeps/unix')
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/elision-lock.c4
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/elision-trylock.c6
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/elision-unlock.c2
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/htm.h39
4 files changed, 41 insertions, 10 deletions
diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
index 830d2cc..dd1e4c3 100644
--- a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
+++ b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
@@ -52,12 +52,12 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
for (int i = aconf.try_tbegin; i > 0; i--)
{
- if (__builtin_tbegin (0))
+ if (__libc_tbegin (0))
{
if (*lock == 0)
return 0;
/* Lock was busy. Fall back to normal locking. */
- __builtin_tabort (_ABORT_LOCK_BUSY);
+ __libc_tabort (_ABORT_LOCK_BUSY);
}
else
{
diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c b/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
index 9263f1d..0807a6a 100644
--- a/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
+++ b/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
@@ -31,7 +31,7 @@ int
__lll_trylock_elision (int *futex, short *adapt_count)
{
/* Implement POSIX semantics by forbiding nesting elided trylocks. */
- __builtin_tabort (_ABORT_NESTED_TRYLOCK);
+ __libc_tabort (_ABORT_NESTED_TRYLOCK);
/* Only try a transaction if it's worth it. */
if (*adapt_count > 0)
@@ -39,14 +39,14 @@ __lll_trylock_elision (int *futex, short *adapt_count)
goto use_lock;
}
- if (__builtin_tbegin (0))
+ if (__libc_tbegin (0))
{
if (*futex == 0)
return 0;
/* Lock was busy. This is never a nested transaction.
End it, and set the adapt count. */
- __builtin_tend (0);
+ __libc_tend (0);
if (aconf.skip_lock_busy > 0)
*adapt_count = aconf.skip_lock_busy;
diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c b/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
index 2561b1d..43c5a67 100644
--- a/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
+++ b/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
@@ -25,7 +25,7 @@ __lll_unlock_elision (int *lock, short *adapt_count, int pshared)
{
/* When the lock was free we're in a transaction. */
if (*lock == 0)
- __builtin_tend (0);
+ __libc_tend (0);
else
{
lll_unlock ((*lock), pshared);
diff --git a/sysdeps/unix/sysv/linux/powerpc/htm.h b/sysdeps/unix/sysv/linux/powerpc/htm.h
index b18b47e..16b2237 100644
--- a/sysdeps/unix/sysv/linux/powerpc/htm.h
+++ b/sysdeps/unix/sysv/linux/powerpc/htm.h
@@ -118,13 +118,44 @@
__ret; \
})
-#define __builtin_tbegin(tdb) _tbegin ()
-#define __builtin_tend(nested) _tend ()
-#define __builtin_tabort(abortcode) _tabort (abortcode)
-#define __builtin_get_texasru() _texasru ()
+#define __libc_tbegin(tdb) _tbegin ()
+#define __libc_tend(nested) _tend ()
+#define __libc_tabort(abortcode) _tabort (abortcode)
+#define __builtin_get_texasru() _texasru ()
#else
# include <htmintrin.h>
+
+# ifdef __TM_FENCE__
+ /* New GCC behavior. */
+# define __libc_tbegin(R) __builtin_tbegin (R);
+# define __libc_tend(R) __builtin_tend (R);
+# define __libc_tabort(R) __builtin_tabort (R);
+# else
+ /* Workaround an old GCC behavior. Earlier releases of GCC 4.9 and 5.0,
+ didn't use to treat __builtin_tbegin, __builtin_tend and
+ __builtin_tabort as compiler barriers, moving instructions into and
+ out the transaction.
+ Remove this when glibc drops support for GCC 5.0. */
+# define __libc_tbegin(R) \
+ ({ __asm__ volatile("" ::: "memory"); \
+ unsigned int __ret = __builtin_tbegin (R); \
+ __asm__ volatile("" ::: "memory"); \
+ __ret; \
+ })
+# define __libc_tabort(R) \
+ ({ __asm__ volatile("" ::: "memory"); \
+ unsigned int __ret = __builtin_tabort (R); \
+ __asm__ volatile("" ::: "memory"); \
+ __ret; \
+ })
+# define __libc_tend(R) \
+ ({ __asm__ volatile("" ::: "memory"); \
+ unsigned int __ret = __builtin_tend (R); \
+ __asm__ volatile("" ::: "memory"); \
+ __ret; \
+ })
+# endif /* __TM_FENCE__ */
#endif /* __HTM__ */
#endif /* __ASSEMBLER__ */