aboutsummaryrefslogtreecommitdiff
path: root/sysdeps
diff options
context:
space:
mode:
authorNoah Goldstein <goldstein.w.n@gmail.com>2022-09-30 21:13:27 -0700
committerNoah Goldstein <goldstein.w.n@gmail.com>2022-10-03 14:13:49 -0700
commit653c12c7d880340462bd963752619a7a61bcb4e3 (patch)
tree67c128963342987f4023beba5f752f20c59c8863 /sysdeps
parent10c779f44ab3e9525f2d2a3c9a0aa9dedea5f1ec (diff)
downloadglibc-653c12c7d880340462bd963752619a7a61bcb4e3.zip
glibc-653c12c7d880340462bd963752619a7a61bcb4e3.tar.gz
glibc-653c12c7d880340462bd963752619a7a61bcb4e3.tar.bz2
x86: Cleanup pthread_spin_{try}lock.S
Save a jmp on the lock path coming from an initial failure in pthread_spin_lock.S. This costs 4-bytes of code but since the function still fits in the same number of 16-byte blocks (default function alignment) it does not have affect on the total binary size of libc.so (unchanged after this commit). pthread_spin_trylock was using a CAS when a simple xchg works which is often more expensive. Full check passes on x86-64.
Diffstat (limited to 'sysdeps')
-rw-r--r--sysdeps/x86_64/nptl/pthread_spin_lock.S23
-rw-r--r--sysdeps/x86_64/nptl/pthread_spin_trylock.S18
2 files changed, 29 insertions, 12 deletions
diff --git a/sysdeps/x86_64/nptl/pthread_spin_lock.S b/sysdeps/x86_64/nptl/pthread_spin_lock.S
index 44b837d..1e09e59 100644
--- a/sysdeps/x86_64/nptl/pthread_spin_lock.S
+++ b/sysdeps/x86_64/nptl/pthread_spin_lock.S
@@ -19,18 +19,27 @@
#include <shlib-compat.h>
ENTRY(__pthread_spin_lock)
-1: LOCK
- decl 0(%rdi)
- jne 2f
+ /* Always return zero. */
xor %eax, %eax
+ LOCK
+ decl 0(%rdi)
+ jne 1f
ret
.align 16
-2: rep
+1:
+ /* `rep nop` == `pause`. */
+ rep
nop
- cmpl $0, 0(%rdi)
- jg 1b
- jmp 2b
+ cmpl %eax, 0(%rdi)
+ jle 1b
+ /* Just repeat the `lock decl` logic here. The code size save
+ of jumping back to entry doesn't change how many 16-byte
+ chunks (default function alignment) that the code fits in. */
+ LOCK
+ decl 0(%rdi)
+ jne 1b
+ ret
END(__pthread_spin_lock)
versioned_symbol (libc, __pthread_spin_lock, pthread_spin_lock, GLIBC_2_34)
diff --git a/sysdeps/x86_64/nptl/pthread_spin_trylock.S b/sysdeps/x86_64/nptl/pthread_spin_trylock.S
index fffdb27..a1f97cb 100644
--- a/sysdeps/x86_64/nptl/pthread_spin_trylock.S
+++ b/sysdeps/x86_64/nptl/pthread_spin_trylock.S
@@ -20,13 +20,21 @@
#include <shlib-compat.h>
ENTRY(__pthread_spin_trylock)
- movl $1, %eax
xorl %ecx, %ecx
- lock
- cmpxchgl %ecx, (%rdi)
+ /* xchg has implicit LOCK prefix. */
+ xchgl %ecx, (%rdi)
+
+ /* Branch on result. Expectation is the use of trylock will be
+ branching on success/failure so this branch can be used to
+ to predict the coming branch. It has the benefit of
+ breaking the likely expensive memory dependency on (%rdi). */
+ cmpl $1, %ecx
+ jnz 1f
+ xorl %eax, %eax
+ ret
+1:
movl $EBUSY, %eax
- cmovel %ecx, %eax
- retq
+ ret
END(__pthread_spin_trylock)
versioned_symbol (libc, __pthread_spin_trylock, pthread_spin_trylock,
GLIBC_2_34)