aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFangrui Song <maskray@google.com>2022-04-27 14:01:19 -0700
committerFangrui Song <maskray@google.com>2022-04-27 14:01:19 -0700
commit15c4b8cbcc596700c64deb4047a3ba7f4df7da1b (patch)
tree5b2d89c563f9167a1cd84dc7cea767925fe2b6b6
parentda683b1f1037bf63b0c42f79a8a46f58670b2fed (diff)
downloadglibc-15c4b8cbcc596700c64deb4047a3ba7f4df7da1b.zip
glibc-15c4b8cbcc596700c64deb4047a3ba7f4df7da1b.tar.gz
glibc-15c4b8cbcc596700c64deb4047a3ba7f4df7da1b.tar.bz2
Remove x86_64 specific lowlevellock/cancellation
The x86_64 specific implemention has CFI directives like `.cfi_adjust_cfa_offset 128` which are incorrect when RBP is used as the canonical frame address. This follows the spirit of the following two commits by removing the x86_64 specific implementation. The generic implementation will be used. * eb76e5b465a4b7b569cde4b4f57d1fcb4695c1c6 ("nptl: Reinstate pthread_timedjoin_np as a cancellation point (BZ#24215)") * c50e1c263ec15e98da3235e663049156fd1afcfa ("x86: Remove arch-specific low level lock implementation")
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/cancellation.S115
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S21
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S19
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S21
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/lll_timedlock_wait.c1
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/lll_timedwait_tid.c1
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/lowlevellock.S463
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/lowlevellock.h273
-rw-r--r--sysdeps/x86_64/nptl/pthread_spin_lock.S1
9 files changed, 0 insertions, 915 deletions
diff --git a/sysdeps/unix/sysv/linux/x86_64/cancellation.S b/sysdeps/unix/sysv/linux/x86_64/cancellation.S
deleted file mode 100644
index f3fb19d..0000000
--- a/sysdeps/unix/sysv/linux/x86_64/cancellation.S
+++ /dev/null
@@ -1,115 +0,0 @@
-/* Copyright (C) 2009-2018 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <sysdep.h>
-#include <tcb-offsets.h>
-#include <kernel-features.h>
-#include "lowlevellock.h"
-
-#define PTHREAD_UNWIND JUMPTARGET(__pthread_unwind)
-#if IS_IN (libpthread)
-# if defined SHARED && !defined NO_HIDDEN
-# undef PTHREAD_UNWIND
-# define PTHREAD_UNWIND __GI___pthread_unwind
-# endif
-#else
-# ifndef SHARED
- .weak __pthread_unwind
-# endif
-#endif
-
-
-#ifdef __ASSUME_PRIVATE_FUTEX
-# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
- movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
-#else
-# if FUTEX_WAIT == 0
-# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
- movl %fs:PRIVATE_FUTEX, reg
-# else
-# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
- movl %fs:PRIVATE_FUTEX, reg ; \
- orl $FUTEX_WAIT, reg
-# endif
-#endif
-
-/* It is crucial that the functions in this file don't modify registers
- other than %rax and %r11. The syscall wrapper code depends on this
- because it doesn't explicitly save the other registers which hold
- relevant values. */
- .text
-
- .hidden __pthread_enable_asynccancel
-ENTRY(__pthread_enable_asynccancel)
- movl %fs:CANCELHANDLING, %eax
-2: movl %eax, %r11d
- orl $TCB_CANCELTYPE_BITMASK, %r11d
- cmpl %eax, %r11d
- je 1f
-
- lock
- cmpxchgl %r11d, %fs:CANCELHANDLING
- jnz 2b
-
- andl $(TCB_CANCELSTATE_BITMASK|TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK|TCB_EXITING_BITMASK|TCB_CANCEL_RESTMASK|TCB_TERMINATED_BITMASK), %r11d
- cmpl $(TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK), %r11d
- je 3f
-
-1: ret
-
-3: subq $8, %rsp
- cfi_adjust_cfa_offset(8)
- LP_OP(mov) $TCB_PTHREAD_CANCELED, %fs:RESULT
- lock
- orl $TCB_EXITING_BITMASK, %fs:CANCELHANDLING
- mov %fs:CLEANUP_JMP_BUF, %RDI_LP
- call PTHREAD_UNWIND
- hlt
-END(__pthread_enable_asynccancel)
-
-
- .hidden __pthread_disable_asynccancel
-ENTRY(__pthread_disable_asynccancel)
- testl $TCB_CANCELTYPE_BITMASK, %edi
- jnz 1f
-
- movl %fs:CANCELHANDLING, %eax
-2: movl %eax, %r11d
- andl $~TCB_CANCELTYPE_BITMASK, %r11d
- lock
- cmpxchgl %r11d, %fs:CANCELHANDLING
- jnz 2b
-
- movl %r11d, %eax
-3: andl $(TCB_CANCELING_BITMASK|TCB_CANCELED_BITMASK), %eax
- cmpl $TCB_CANCELING_BITMASK, %eax
- je 4f
-1: ret
-
- /* Performance doesn't matter in this loop. We will
- delay until the thread is canceled. And we will unlikely
- enter the loop twice. */
-4: mov %fs:0, %RDI_LP
- movl $__NR_futex, %eax
- xorq %r10, %r10
- addq $CANCELHANDLING, %rdi
- LOAD_PRIVATE_FUTEX_WAIT (%esi)
- syscall
- movl %fs:CANCELHANDLING, %eax
- jmp 3b
-END(__pthread_disable_asynccancel)
diff --git a/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S b/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S
deleted file mode 100644
index ed7af0d..0000000
--- a/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Copyright (C) 2009-2018 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#define __pthread_enable_asynccancel __libc_enable_asynccancel
-#define __pthread_disable_asynccancel __libc_disable_asynccancel
-#include "cancellation.S"
diff --git a/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S b/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S
deleted file mode 100644
index a1ddaaf..0000000
--- a/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S
+++ /dev/null
@@ -1,19 +0,0 @@
-/* Copyright (C) 2002-2018 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include "lowlevellock.S"
diff --git a/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S b/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S
deleted file mode 100644
index d0f0ee4..0000000
--- a/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Copyright (C) 2009-2018 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#define __pthread_enable_asynccancel __librt_enable_asynccancel
-#define __pthread_disable_asynccancel __librt_disable_asynccancel
-#include "cancellation.S"
diff --git a/sysdeps/unix/sysv/linux/x86_64/lll_timedlock_wait.c b/sysdeps/unix/sysv/linux/x86_64/lll_timedlock_wait.c
deleted file mode 100644
index f6875b8..0000000
--- a/sysdeps/unix/sysv/linux/x86_64/lll_timedlock_wait.c
+++ /dev/null
@@ -1 +0,0 @@
-/* __lll_timedlock_wait is in lowlevellock.S. */
diff --git a/sysdeps/unix/sysv/linux/x86_64/lll_timedwait_tid.c b/sysdeps/unix/sysv/linux/x86_64/lll_timedwait_tid.c
deleted file mode 100644
index 43900c6..0000000
--- a/sysdeps/unix/sysv/linux/x86_64/lll_timedwait_tid.c
+++ /dev/null
@@ -1 +0,0 @@
-/* __lll_timedwait_tid is in lowlevellock.S. */
diff --git a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
deleted file mode 100644
index 92561e1..0000000
--- a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
+++ /dev/null
@@ -1,463 +0,0 @@
-/* Copyright (C) 2002-2018 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <sysdep.h>
-#include <pthread-errnos.h>
-#include <kernel-features.h>
-#include <lowlevellock.h>
-
-#include <stap-probe.h>
-
- .text
-
-#ifdef __ASSUME_PRIVATE_FUTEX
-# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
- movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
-# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
- movl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
-# define LOAD_FUTEX_WAIT(reg) \
- xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
-# define LOAD_FUTEX_WAIT_ABS(reg) \
- xorl $(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
-# define LOAD_FUTEX_WAKE(reg) \
- xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
-#else
-# if FUTEX_WAIT == 0
-# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
- movl %fs:PRIVATE_FUTEX, reg
-# else
-# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
- movl %fs:PRIVATE_FUTEX, reg ; \
- orl $FUTEX_WAIT, reg
-# endif
-# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
- movl %fs:PRIVATE_FUTEX, reg ; \
- orl $FUTEX_WAKE, reg
-# if FUTEX_WAIT == 0
-# define LOAD_FUTEX_WAIT(reg) \
- xorl $FUTEX_PRIVATE_FLAG, reg ; \
- andl %fs:PRIVATE_FUTEX, reg
-# else
-# define LOAD_FUTEX_WAIT(reg) \
- xorl $FUTEX_PRIVATE_FLAG, reg ; \
- andl %fs:PRIVATE_FUTEX, reg ; \
- orl $FUTEX_WAIT, reg
-# endif
-# define LOAD_FUTEX_WAIT_ABS(reg) \
- xorl $FUTEX_PRIVATE_FLAG, reg ; \
- andl %fs:PRIVATE_FUTEX, reg ; \
- orl $FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, reg
-# define LOAD_FUTEX_WAKE(reg) \
- xorl $FUTEX_PRIVATE_FLAG, reg ; \
- andl %fs:PRIVATE_FUTEX, reg ; \
- orl $FUTEX_WAKE, reg
-#endif
-
-
- .globl __lll_lock_wait_private
- .type __lll_lock_wait_private,@function
- .hidden __lll_lock_wait_private
- .align 16
-__lll_lock_wait_private:
- cfi_startproc
- pushq %r10
- cfi_adjust_cfa_offset(8)
- pushq %rdx
- cfi_adjust_cfa_offset(8)
- cfi_offset(%r10, -16)
- cfi_offset(%rdx, -24)
- xorq %r10, %r10 /* No timeout. */
- movl $2, %edx
- LOAD_PRIVATE_FUTEX_WAIT (%esi)
-
- cmpl %edx, %eax /* NB: %edx == 2 */
- jne 2f
-
-1: LIBC_PROBE (lll_lock_wait_private, 1, %rdi)
- movl $SYS_futex, %eax
- syscall
-
-2: movl %edx, %eax
- xchgl %eax, (%rdi) /* NB: lock is implied */
-
- testl %eax, %eax
- jnz 1b
-
- popq %rdx
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%rdx)
- popq %r10
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r10)
- retq
- cfi_endproc
- .size __lll_lock_wait_private,.-__lll_lock_wait_private
-
-#if !IS_IN (libc)
- .globl __lll_lock_wait
- .type __lll_lock_wait,@function
- .hidden __lll_lock_wait
- .align 16
-__lll_lock_wait:
- cfi_startproc
- pushq %r10
- cfi_adjust_cfa_offset(8)
- pushq %rdx
- cfi_adjust_cfa_offset(8)
- cfi_offset(%r10, -16)
- cfi_offset(%rdx, -24)
- xorq %r10, %r10 /* No timeout. */
- movl $2, %edx
- LOAD_FUTEX_WAIT (%esi)
-
- cmpl %edx, %eax /* NB: %edx == 2 */
- jne 2f
-
-1: LIBC_PROBE (lll_lock_wait, 2, %rdi, %rsi)
- movl $SYS_futex, %eax
- syscall
-
-2: movl %edx, %eax
- xchgl %eax, (%rdi) /* NB: lock is implied */
-
- testl %eax, %eax
- jnz 1b
-
- popq %rdx
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%rdx)
- popq %r10
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r10)
- retq
- cfi_endproc
- .size __lll_lock_wait,.-__lll_lock_wait
-
- /* %rdi: futex
- %rsi: flags
- %rdx: timeout
- %eax: futex value
- */
- .globl __lll_timedlock_wait
- .type __lll_timedlock_wait,@function
- .hidden __lll_timedlock_wait
- .align 16
-__lll_timedlock_wait:
- cfi_startproc
-# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
-# ifdef PIC
- cmpl $0, __have_futex_clock_realtime(%rip)
-# else
- cmpl $0, __have_futex_clock_realtime
-# endif
- je .Lreltmo
-# endif
-
- cmpq $0, (%rdx)
- js 5f
-
- pushq %r9
- cfi_adjust_cfa_offset(8)
- cfi_rel_offset(%r9, 0)
-
- movq %rdx, %r10
- movl $0xffffffff, %r9d
- LOAD_FUTEX_WAIT_ABS (%esi)
-
- movl $2, %edx
- cmpl %edx, %eax
- jne 2f
-
-1: movl $SYS_futex, %eax
- movl $2, %edx
- syscall
-
-2: xchgl %edx, (%rdi) /* NB: lock is implied */
-
- testl %edx, %edx
- jz 3f
-
- cmpl $-ETIMEDOUT, %eax
- je 4f
- cmpl $-EINVAL, %eax
- jne 1b
-4: movl %eax, %edx
- negl %edx
-
-3: movl %edx, %eax
- popq %r9
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r9)
- retq
-
-5: movl $ETIMEDOUT, %eax
- retq
-
-# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
-.Lreltmo:
- /* Check for a valid timeout value. */
- cmpq $1000000000, 8(%rdx)
- jae 3f
-
- pushq %r8
- cfi_adjust_cfa_offset(8)
- pushq %r9
- cfi_adjust_cfa_offset(8)
- pushq %r12
- cfi_adjust_cfa_offset(8)
- pushq %r13
- cfi_adjust_cfa_offset(8)
- pushq %r14
- cfi_adjust_cfa_offset(8)
- cfi_offset(%r8, -16)
- cfi_offset(%r9, -24)
- cfi_offset(%r12, -32)
- cfi_offset(%r13, -40)
- cfi_offset(%r14, -48)
- pushq %rsi
- cfi_adjust_cfa_offset(8)
-
- /* Stack frame for the timespec and timeval structs. */
- subq $24, %rsp
- cfi_adjust_cfa_offset(24)
-
- movq %rdi, %r12
- movq %rdx, %r13
-
- movl $2, %edx
- xchgl %edx, (%r12)
-
- testl %edx, %edx
- je 6f
-
-1:
- /* Get current time. */
- movq %rsp, %rdi
- xorl %esi, %esi
- /* This call works because we directly jump to a system call entry
- which preserves all the registers. */
- call JUMPTARGET(__gettimeofday)
-
- /* Compute relative timeout. */
- movq 8(%rsp), %rax
- movl $1000, %edi
- mul %rdi /* Milli seconds to nano seconds. */
- movq (%r13), %rdi
- movq 8(%r13), %rsi
- subq (%rsp), %rdi
- subq %rax, %rsi
- jns 4f
- addq $1000000000, %rsi
- decq %rdi
-4: testq %rdi, %rdi
- js 2f /* Time is already up. */
-
- /* Store relative timeout. */
- movq %rdi, (%rsp)
- movq %rsi, 8(%rsp)
-
- /* Futex call. */
- movl $2, %edx
- movl $1, %eax
- movq %rsp, %r10
- movl 24(%rsp), %esi
- LOAD_FUTEX_WAIT (%esi)
- movq %r12, %rdi
- movl $SYS_futex, %eax
- syscall
-
- /* NB: %edx == 2 */
- xchgl %edx, (%r12)
-
- testl %edx, %edx
- je 6f
-
- cmpl $-ETIMEDOUT, %eax
- jne 1b
-2: movl $ETIMEDOUT, %edx
-
-6: addq $32, %rsp
- cfi_adjust_cfa_offset(-32)
- popq %r14
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r14)
- popq %r13
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r13)
- popq %r12
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r12)
- popq %r9
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r9)
- popq %r8
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r8)
- movl %edx, %eax
- retq
-
-3: movl $EINVAL, %eax
- retq
-# endif
- cfi_endproc
- .size __lll_timedlock_wait,.-__lll_timedlock_wait
-#endif
-
-
- .globl __lll_unlock_wake_private
- .type __lll_unlock_wake_private,@function
- .hidden __lll_unlock_wake_private
- .align 16
-__lll_unlock_wake_private:
- cfi_startproc
- pushq %rsi
- cfi_adjust_cfa_offset(8)
- pushq %rdx
- cfi_adjust_cfa_offset(8)
- cfi_offset(%rsi, -16)
- cfi_offset(%rdx, -24)
-
- movl $0, (%rdi)
- LOAD_PRIVATE_FUTEX_WAKE (%esi)
- movl $1, %edx /* Wake one thread. */
- movl $SYS_futex, %eax
- syscall
-
- popq %rdx
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%rdx)
- popq %rsi
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%rsi)
- retq
- cfi_endproc
- .size __lll_unlock_wake_private,.-__lll_unlock_wake_private
-
-#if !IS_IN (libc)
- .globl __lll_unlock_wake
- .type __lll_unlock_wake,@function
- .hidden __lll_unlock_wake
- .align 16
-__lll_unlock_wake:
- cfi_startproc
- pushq %rsi
- cfi_adjust_cfa_offset(8)
- pushq %rdx
- cfi_adjust_cfa_offset(8)
- cfi_offset(%rsi, -16)
- cfi_offset(%rdx, -24)
-
- movl $0, (%rdi)
- LOAD_FUTEX_WAKE (%esi)
- movl $1, %edx /* Wake one thread. */
- movl $SYS_futex, %eax
- syscall
-
- popq %rdx
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%rdx)
- popq %rsi
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%rsi)
- retq
- cfi_endproc
- .size __lll_unlock_wake,.-__lll_unlock_wake
-
- .globl __lll_timedwait_tid
- .type __lll_timedwait_tid,@function
- .hidden __lll_timedwait_tid
- .align 16
-__lll_timedwait_tid:
- cfi_startproc
- pushq %r12
- cfi_adjust_cfa_offset(8)
- pushq %r13
- cfi_adjust_cfa_offset(8)
- cfi_offset(%r12, -16)
- cfi_offset(%r13, -24)
-
- movq %rdi, %r12
- movq %rsi, %r13
-
- /* Align stack to 16 bytes when calling __gettimeofday. */
- subq $24, %rsp
- cfi_adjust_cfa_offset(24)
-
- /* Get current time. */
-2: movq %rsp, %rdi
- xorl %esi, %esi
- /* This call works because we directly jump to a system call entry
- which preserves all the registers. */
- call JUMPTARGET(__gettimeofday)
-
- /* Compute relative timeout. */
- movq 8(%rsp), %rax
- movl $1000, %edi
- mul %rdi /* Milli seconds to nano seconds. */
- movq (%r13), %rdi
- movq 8(%r13), %rsi
- subq (%rsp), %rdi
- subq %rax, %rsi
- jns 5f
- addq $1000000000, %rsi
- decq %rdi
-5: testq %rdi, %rdi
- js 6f /* Time is already up. */
-
- movq %rdi, (%rsp) /* Store relative timeout. */
- movq %rsi, 8(%rsp)
-
- movl (%r12), %edx
- testl %edx, %edx
- jz 4f
-
- movq %rsp, %r10
- /* XXX The kernel so far uses global futex for the wakeup at
- all times. */
-#if FUTEX_WAIT == 0
- xorl %esi, %esi
-#else
- movl $FUTEX_WAIT, %esi
-#endif
- movq %r12, %rdi
- movl $SYS_futex, %eax
- syscall
-
- cmpl $0, (%rdi)
- jne 1f
-4: xorl %eax, %eax
-
-8: addq $24, %rsp
- cfi_adjust_cfa_offset(-24)
- popq %r13
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r13)
- popq %r12
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r12)
- retq
-
- cfi_adjust_cfa_offset(32)
-1: cmpq $-ETIMEDOUT, %rax
- jne 2b
-
-6: movl $ETIMEDOUT, %eax
- jmp 8b
- cfi_endproc
- .size __lll_timedwait_tid,.-__lll_timedwait_tid
-#endif
diff --git a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
deleted file mode 100644
index a8bcfbe..0000000
--- a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
+++ /dev/null
@@ -1,273 +0,0 @@
-/* Copyright (C) 2002-2018 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#ifndef _LOWLEVELLOCK_H
-#define _LOWLEVELLOCK_H 1
-
-#include <stap-probe.h>
-
-#ifndef __ASSEMBLER__
-# include <time.h>
-# include <sys/param.h>
-# include <bits/pthreadtypes.h>
-# include <kernel-features.h>
-# include <tcb-offsets.h>
-
-# ifndef LOCK_INSTR
-# ifdef UP
-# define LOCK_INSTR /* nothing */
-# else
-# define LOCK_INSTR "lock;"
-# endif
-# endif
-#else
-# ifndef LOCK
-# ifdef UP
-# define LOCK
-# else
-# define LOCK lock
-# endif
-# endif
-#endif
-
-#include <lowlevellock-futex.h>
-
-/* XXX Remove when no assembler code uses futexes anymore. */
-#define SYS_futex __NR_futex
-
-#ifndef __ASSEMBLER__
-
-/* Initializer for lock. */
-#define LLL_LOCK_INITIALIZER (0)
-#define LLL_LOCK_INITIALIZER_LOCKED (1)
-#define LLL_LOCK_INITIALIZER_WAITERS (2)
-
-
-/* NB: in the lll_trylock macro we simply return the value in %eax
- after the cmpxchg instruction. In case the operation succeded this
- value is zero. In case the operation failed, the cmpxchg instruction
- has loaded the current value of the memory work which is guaranteed
- to be nonzero. */
-#if !IS_IN (libc) || defined UP
-# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
-#else
-# define __lll_trylock_asm "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
- "je 0f\n\t" \
- "lock; cmpxchgl %2, %1\n\t" \
- "jmp 1f\n\t" \
- "0:\tcmpxchgl %2, %1\n\t" \
- "1:"
-#endif
-
-#define lll_trylock(futex) \
- ({ int ret; \
- __asm __volatile (__lll_trylock_asm \
- : "=a" (ret), "=m" (futex) \
- : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex), \
- "0" (LLL_LOCK_INITIALIZER) \
- : "memory"); \
- ret; })
-
-#define lll_cond_trylock(futex) \
- ({ int ret; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
- : "=a" (ret), "=m" (futex) \
- : "r" (LLL_LOCK_INITIALIZER_WAITERS), \
- "m" (futex), "0" (LLL_LOCK_INITIALIZER) \
- : "memory"); \
- ret; })
-
-#if !IS_IN (libc) || defined UP
-# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %4, %2\n\t" \
- "jz 24f\n\t"
-#else
-# define __lll_lock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
- "je 0f\n\t" \
- "lock; cmpxchgl %4, %2\n\t" \
- "jnz 1f\n\t" \
- "jmp 24f\n" \
- "0:\tcmpxchgl %4, %2\n\t" \
- "jz 24f\n\t"
-#endif
-
-#define lll_lock(futex, private) \
- (void) \
- ({ int ignore1, ignore2, ignore3; \
- if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
- __asm __volatile (__lll_lock_asm_start \
- "1:\tlea %2, %%" RDI_LP "\n" \
- "2:\tsub $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset 128\n" \
- "3:\tcallq __lll_lock_wait_private\n" \
- "4:\tadd $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset -128\n" \
- "24:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \
- "=a" (ignore3) \
- : "0" (1), "m" (futex), "3" (0) \
- : "cx", "r11", "cc", "memory"); \
- else \
- __asm __volatile (__lll_lock_asm_start \
- "1:\tlea %2, %%" RDI_LP "\n" \
- "2:\tsub $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset 128\n" \
- "3:\tcallq __lll_lock_wait\n" \
- "4:\tadd $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset -128\n" \
- "24:" \
- : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
- "=a" (ignore3) \
- : "1" (1), "m" (futex), "3" (0), "0" (private) \
- : "cx", "r11", "cc", "memory"); \
- }) \
-
-#define lll_cond_lock(futex, private) \
- (void) \
- ({ int ignore1, ignore2, ignore3; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
- "jz 24f\n" \
- "1:\tlea %2, %%" RDI_LP "\n" \
- "2:\tsub $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset 128\n" \
- "3:\tcallq __lll_lock_wait\n" \
- "4:\tadd $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset -128\n" \
- "24:" \
- : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
- "=a" (ignore3) \
- : "1" (2), "m" (futex), "3" (0), "0" (private) \
- : "cx", "r11", "cc", "memory"); \
- })
-
-#define lll_timedlock(futex, timeout, private) \
- ({ int result, ignore1, ignore2, ignore3; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
- "jz 24f\n" \
- "1:\tlea %4, %%" RDI_LP "\n" \
- "0:\tmov %8, %%" RDX_LP "\n" \
- "2:\tsub $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset 128\n" \
- "3:\tcallq __lll_timedlock_wait\n" \
- "4:\tadd $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset -128\n" \
- "24:" \
- : "=a" (result), "=D" (ignore1), "=S" (ignore2), \
- "=&d" (ignore3), "=m" (futex) \
- : "0" (0), "1" (1), "m" (futex), "m" (timeout), \
- "2" (private) \
- : "memory", "cx", "cc", "r10", "r11"); \
- result; })
-
-extern int __lll_timedlock_elision (int *futex, short *adapt_count,
- const struct timespec *timeout,
- int private) attribute_hidden;
-
-#define lll_timedlock_elision(futex, adapt_count, timeout, private) \
- __lll_timedlock_elision(&(futex), &(adapt_count), timeout, private)
-
-#if !IS_IN (libc) || defined UP
-# define __lll_unlock_asm_start LOCK_INSTR "decl %0\n\t" \
- "je 24f\n\t"
-#else
-# define __lll_unlock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
- "je 0f\n\t" \
- "lock; decl %0\n\t" \
- "jne 1f\n\t" \
- "jmp 24f\n\t" \
- "0:\tdecl %0\n\t" \
- "je 24f\n\t"
-#endif
-
-#define lll_unlock(futex, private) \
- (void) \
- ({ int ignore; \
- if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
- __asm __volatile (__lll_unlock_asm_start \
- "1:\tlea %0, %%" RDI_LP "\n" \
- "2:\tsub $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset 128\n" \
- "3:\tcallq __lll_unlock_wake_private\n" \
- "4:\tadd $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset -128\n" \
- "24:" \
- : "=m" (futex), "=&D" (ignore) \
- : "m" (futex) \
- : "ax", "cx", "r11", "cc", "memory"); \
- else \
- __asm __volatile (__lll_unlock_asm_start \
- "1:\tlea %0, %%" RDI_LP "\n" \
- "2:\tsub $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset 128\n" \
- "3:\tcallq __lll_unlock_wake\n" \
- "4:\tadd $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset -128\n" \
- "24:" \
- : "=m" (futex), "=&D" (ignore) \
- : "m" (futex), "S" (private) \
- : "ax", "cx", "r11", "cc", "memory"); \
- })
-
-#define lll_islocked(futex) \
- (futex != LLL_LOCK_INITIALIZER)
-
-
-/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
- wake-up when the clone terminates. The memory location contains the
- thread ID while the clone is running and is reset to zero by the kernel
- afterwards. The kernel up to version 3.16.3 does not use the private futex
- operations for futex wake-up when the clone terminates. */
-#define lll_wait_tid(tid) \
- do { \
- __typeof (tid) __tid; \
- while ((__tid = (tid)) != 0) \
- lll_futex_wait (&(tid), __tid, LLL_SHARED);\
- } while (0)
-
-extern int __lll_timedwait_tid (int *, const struct timespec *)
- attribute_hidden;
-
-/* As lll_wait_tid, but with a timeout. If the timeout occurs then return
- ETIMEDOUT. If ABSTIME is invalid, return EINVAL.
- XXX Note that this differs from the generic version in that we do the
- error checking here and not in __lll_timedwait_tid. */
-#define lll_timedwait_tid(tid, abstime) \
- ({ \
- int __result = 0; \
- if ((tid) != 0) \
- __result = __lll_timedwait_tid (&(tid), (abstime)); \
- __result; })
-
-extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
- attribute_hidden;
-
-extern int __lll_unlock_elision (int *lock, int private)
- attribute_hidden;
-
-extern int __lll_trylock_elision (int *lock, short *adapt_count)
- attribute_hidden;
-
-#define lll_lock_elision(futex, adapt_count, private) \
- __lll_lock_elision (&(futex), &(adapt_count), private)
-#define lll_unlock_elision(futex, adapt_count, private) \
- __lll_unlock_elision (&(futex), private)
-#define lll_trylock_elision(futex, adapt_count) \
- __lll_trylock_elision (&(futex), &(adapt_count))
-
-#endif /* !__ASSEMBLER__ */
-
-#endif /* lowlevellock.h */
diff --git a/sysdeps/x86_64/nptl/pthread_spin_lock.S b/sysdeps/x86_64/nptl/pthread_spin_lock.S
index 730fd65..1468944 100644
--- a/sysdeps/x86_64/nptl/pthread_spin_lock.S
+++ b/sysdeps/x86_64/nptl/pthread_spin_lock.S
@@ -15,7 +15,6 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <lowlevellock.h>
#include <sysdep.h>
ENTRY(pthread_spin_lock)