aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/unix/sysv/linux/s390/elision-trylock.c
diff options
context:
space:
mode:
authorStefan Liebler <stli@linux.vnet.ibm.com>2016-12-20 15:12:48 +0100
committerStefan Liebler <stli@linux.vnet.ibm.com>2016-12-20 15:12:48 +0100
commit8bfc4a2ab4bebdf86c151665aae8a266e2f18fb4 (patch)
treeb2b0f195981735dd34afe82f1cd524ba73d11285 /sysdeps/unix/sysv/linux/s390/elision-trylock.c
parentc813dae5d8e469262f96b1cda0191ea076f10809 (diff)
downloadglibc-8bfc4a2ab4bebdf86c151665aae8a266e2f18fb4.zip
glibc-8bfc4a2ab4bebdf86c151665aae8a266e2f18fb4.tar.gz
glibc-8bfc4a2ab4bebdf86c151665aae8a266e2f18fb4.tar.bz2
S390: Use own tbegin macro instead of __builtin_tbegin.
This patch defines __libc_tbegin, __libc_tend, __libc_tabort and __libc_tx_nesting_depth in htm.h which replaces the direct usage of equivalent gcc builtins. We have to use an own inline assembly instead of __builtin_tbegin, as tbegin has to filter program interruptions which can't be done with the builtin. Before this change, e.g. a segmentation fault within a transaction, leads to a coredump where the instruction pointer points behind the tbegin instruction instead of real failing one. Now the transaction aborts and the code should be reexecuted by the fallback path without transactions. The segmentation fault will produce a coredump with the real failing instruction. The fpc is not saved before starting the transaction. If e.g. the rounging mode is changed and the transaction is aborting afterwards, the builtin will not restore the fpc. This is now done with the __libc_tbegin macro. Now the call saved fprs have to be saved / restored in the __libc_tbegin macro. Using the gcc builtin had forced the saving / restoring of fprs at begin / end of e.g. __lll_lock_elision function. The new macro saves these fprs before tbegin instruction and only restores them on a transaction abort. Restoring is not needed on a successfully started transaction. The used inline assembly does not clobber the fprs / vrs! Clobbering the latter ones would force the compiler to save / restore the call saved fprs as those overlap with the vrs, but they only need to be restored if the transaction fails. Thus the user of the tbegin macros has to compile the file / function with -msoft-float. It prevents gcc from using fprs / vrs. ChangeLog: * sysdeps/unix/sysv/linux/s390/Makefile (elision-CFLAGS): Add -msoft-float. * sysdeps/unix/sysv/linux/s390/htm.h: New File. * sysdeps/unix/sysv/linux/s390/elision-lock.c: Use __libc_t* transaction macros instead of __builtin_t*. * sysdeps/unix/sysv/linux/s390/elision-trylock.c: Likewise. * sysdeps/unix/sysv/linux/s390/elision-unlock.c: Likewise.
Diffstat (limited to 'sysdeps/unix/sysv/linux/s390/elision-trylock.c')
-rw-r--r--sysdeps/unix/sysv/linux/s390/elision-trylock.c16
1 files changed, 6 insertions, 10 deletions
diff --git a/sysdeps/unix/sysv/linux/s390/elision-trylock.c b/sysdeps/unix/sysv/linux/s390/elision-trylock.c
index a3252b8..e21fc26 100644
--- a/sysdeps/unix/sysv/linux/s390/elision-trylock.c
+++ b/sysdeps/unix/sysv/linux/s390/elision-trylock.c
@@ -19,7 +19,7 @@
#include <pthread.h>
#include <pthreadP.h>
#include <lowlevellock.h>
-#include <htmintrin.h>
+#include <htm.h>
#include <elision-conf.h>
#define aconf __elision_aconf
@@ -30,15 +30,11 @@
int
__lll_trylock_elision (int *futex, short *adapt_count)
{
- __asm__ __volatile__ (".machinemode \"zarch_nohighgprs\"\n\t"
- ".machine \"all\""
- : : : "memory");
-
/* Implement POSIX semantics by forbiding nesting elided trylocks.
Sorry. After the abort the code is re-executed
non transactional and if the lock was already locked
return an error. */
- if (__builtin_tx_nesting_depth () > 0)
+ if (__libc_tx_nesting_depth () > 0)
{
/* Note that this abort may terminate an outermost transaction that
was created outside glibc.
@@ -46,7 +42,7 @@ __lll_trylock_elision (int *futex, short *adapt_count)
them to use the default lock instead of retrying transactions
until their try_tbegin is zero.
*/
- __builtin_tabort (_HTM_FIRST_USER_ABORT_CODE | 1);
+ __libc_tabort (_HTM_FIRST_USER_ABORT_CODE | 1);
}
/* Only try a transaction if it's worth it. See __lll_lock_elision for
@@ -54,17 +50,17 @@ __lll_trylock_elision (int *futex, short *adapt_count)
just a hint. */
if (atomic_load_relaxed (adapt_count) <= 0)
{
- unsigned status;
+ int status;
if (__builtin_expect
- ((status = __builtin_tbegin ((void *)0)) == _HTM_TBEGIN_STARTED, 1))
+ ((status = __libc_tbegin ((void *) 0)) == _HTM_TBEGIN_STARTED, 1))
{
if (*futex == 0)
return 0;
/* Lock was busy. Fall back to normal locking. */
/* Since we are in a non-nested transaction there is no need to abort,
which is expensive. */
- __builtin_tend ();
+ __libc_tend ();
/* Note: Changing the adapt_count here might abort a transaction on a
different cpu, but that could happen anyway when the futex is
acquired, so there's no need to check the nesting depth here.