aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdhemerval Zanella <adhemerval.zanella@linaro.org>2018-05-09 10:32:25 -0300
committerAdhemerval Zanella <adhemerval.zanella@linaro.org>2019-01-03 18:38:14 -0200
commit85c828a4626adda906f8844dc9c5a166c72d4f7d (patch)
tree034152296ec94a9e33e360833068b01515fea4c7
parentd0d7f85f66a19c3110d550c3c24247f7b4f2c58a (diff)
downloadglibc-85c828a4626adda906f8844dc9c5a166c72d4f7d.zip
glibc-85c828a4626adda906f8844dc9c5a166c72d4f7d.tar.gz
glibc-85c828a4626adda906f8844dc9c5a166c72d4f7d.tar.bz2
x86_64: Remove wrong THREAD_ATOMIC_* macros
The x86 defines optimized THREAD_ATOMIC_* macros where reference always the current thread instead of the one indicated by input 'descr' argument. It work as long the input is the self thread pointer, however it generates wrong code if the semantic is to set a bit atomicialy from another thread. This is not an issue for current GLIBC usage, however the new cancellation code expects that some synchronization code to atomically set bits from different threads. The generic code generates an additional load to reference to TLS segment, for instance the code: THREAD_ATOMIC_BIT_SET (THREAD_SELF, cancelhandling, CANCELED_BIT); Compiles to: lock;orl $4, %fs:776 Where with patch changes it now compiles to: mov %fs:16,%rax lock;orl $4, 776(%rax) If some usage indeed proves to be a hotspot we can add an extra macro with a more descriptive name (THREAD_ATOMIC_BIT_SET_SELF for instance) where x86_64 might optimize it. Checked on x86_64-linux-gnu. * sysdeps/x86_64/nptl/tls.h (THREAD_ATOMIC_CMPXCHG_VAL, THREAD_ATOMIC_AND, THREAD_ATOMIC_BIT_SET): Remove macros.
-rw-r--r--ChangeLog3
-rw-r--r--sysdeps/x86_64/nptl/tls.h37
2 files changed, 3 insertions, 37 deletions
diff --git a/ChangeLog b/ChangeLog
index b695780..8da62f1 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,8 @@
2019-01-03 Adhemerval Zanella <adhemerval.zanella@linaro.org>
+ * sysdeps/x86_64/nptl/tls.h (THREAD_ATOMIC_CMPXCHG_VAL,
+ THREAD_ATOMIC_AND, THREAD_ATOMIC_BIT_SET): Remove macros.
+
* debug/tst-backtrace5.c (handle_signal): Avoid cancellable wrappers
in backtrace analysis.
* nptl/tst-cancel4.c (tf_write): Handle cancelled syscall with
diff --git a/sysdeps/x86_64/nptl/tls.h b/sysdeps/x86_64/nptl/tls.h
index cb7f07a..e25430a 100644
--- a/sysdeps/x86_64/nptl/tls.h
+++ b/sysdeps/x86_64/nptl/tls.h
@@ -306,43 +306,6 @@ _Static_assert (offsetof (tcbhead_t, __glibc_unused2) == 0x80,
}})
-/* Atomic compare and exchange on TLS, returning old value. */
-# define THREAD_ATOMIC_CMPXCHG_VAL(descr, member, newval, oldval) \
- ({ __typeof (descr->member) __ret; \
- __typeof (oldval) __old = (oldval); \
- if (sizeof (descr->member) == 4) \
- asm volatile (LOCK_PREFIX "cmpxchgl %2, %%fs:%P3" \
- : "=a" (__ret) \
- : "0" (__old), "r" (newval), \
- "i" (offsetof (struct pthread, member))); \
- else \
- /* Not necessary for other sizes in the moment. */ \
- abort (); \
- __ret; })
-
-
-/* Atomic logical and. */
-# define THREAD_ATOMIC_AND(descr, member, val) \
- (void) ({ if (sizeof ((descr)->member) == 4) \
- asm volatile (LOCK_PREFIX "andl %1, %%fs:%P0" \
- :: "i" (offsetof (struct pthread, member)), \
- "ir" (val)); \
- else \
- /* Not necessary for other sizes in the moment. */ \
- abort (); })
-
-
-/* Atomic set bit. */
-# define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
- (void) ({ if (sizeof ((descr)->member) == 4) \
- asm volatile (LOCK_PREFIX "orl %1, %%fs:%P0" \
- :: "i" (offsetof (struct pthread, member)), \
- "ir" (1 << (bit))); \
- else \
- /* Not necessary for other sizes in the moment. */ \
- abort (); })
-
-
/* Set the stack guard field in TCB head. */
# define THREAD_SET_STACK_GUARD(value) \
THREAD_SETMEM (THREAD_SELF, header.stack_guard, value)