aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/i386
diff options
context:
space:
mode:
authorAdhemerval Zanella <adhemerval.zanella@linaro.org>2024-01-05 10:36:40 -0300
committerAdhemerval Zanella <adhemerval.zanella@linaro.org>2024-01-09 13:55:51 -0300
commit25f1e16ef03a6a8fb1701c4647d46c564480d88c (patch)
treec4f2b8148ab0fe004bae19105d8f8169ab9922a7 /sysdeps/i386
parentb7fc4a07f206a640e6d807d72f5c1ee3ea7a25b6 (diff)
downloadglibc-25f1e16ef03a6a8fb1701c4647d46c564480d88c.zip
glibc-25f1e16ef03a6a8fb1701c4647d46c564480d88c.tar.gz
glibc-25f1e16ef03a6a8fb1701c4647d46c564480d88c.tar.bz2
i386: Remove CET support
CET is only support for x86_64, this patch reverts: - faaee1f07ed x86: Support shadow stack pointer in setjmp/longjmp. - be9ccd27c09 i386: Add _CET_ENDBR to indirect jump targets in add_n.S/sub_n.S - c02695d7764 x86/CET: Update vfork to prevent child return - 5d844e1b725 i386: Enable CET support in ucontext functions - 124bcde683 x86: Add _CET_ENDBR to functions in crti.S - 562837c002 x86: Add _CET_ENDBR to functions in dl-tlsdesc.S - f753fa7dea x86: Support IBT and SHSTK in Intel CET [BZ #21598] - 825b58f3fb i386-mcount.S: Add _CET_ENDBR to _mcount and __fentry__ - 7e119cd582 i386: Use _CET_NOTRACK in i686/memcmp.S - 177824e232 i386: Use _CET_NOTRACK in memcmp-sse4.S - 0a899af097 i386: Use _CET_NOTRACK in memcpy-ssse3-rep.S - 7fb613361c i386: Use _CET_NOTRACK in memcpy-ssse3.S - 77a8ae0948 i386: Use _CET_NOTRACK in memset-sse2-rep.S - 00e7b76a8f i386: Use _CET_NOTRACK in memset-sse2.S - 90d15dc577 i386: Use _CET_NOTRACK in strcat-sse2.S - f1574581c7 i386: Use _CET_NOTRACK in strcpy-sse2.S - 4031d7484a i386/sub_n.S: Add a missing _CET_ENDBR to indirect jump - target - Checked on i686-linux-gnu.
Diffstat (limited to 'sysdeps/i386')
-rw-r--r--sysdeps/i386/__longjmp.S73
-rw-r--r--sysdeps/i386/add_n.S25
-rw-r--r--sysdeps/i386/bsd-_setjmp.S21
-rw-r--r--sysdeps/i386/bsd-setjmp.S21
-rw-r--r--sysdeps/i386/crti.S2
-rw-r--r--sysdeps/i386/dl-tlsdesc.S3
-rw-r--r--sysdeps/i386/dl-trampoline.S4
-rw-r--r--sysdeps/i386/i386-mcount.S2
-rw-r--r--sysdeps/i386/i686/add_n.S25
-rw-r--r--sysdeps/i386/i686/memcmp.S4
-rw-r--r--sysdeps/i386/i686/multiarch/memcmp-sse4.S4
-rw-r--r--sysdeps/i386/i686/multiarch/memcpy-ssse3-rep.S8
-rw-r--r--sysdeps/i386/i686/multiarch/memcpy-ssse3.S4
-rw-r--r--sysdeps/i386/i686/multiarch/memset-sse2-rep.S4
-rw-r--r--sysdeps/i386/i686/multiarch/memset-sse2.S4
-rw-r--r--sysdeps/i386/i686/multiarch/strcat-sse2.S4
-rw-r--r--sysdeps/i386/i686/multiarch/strcpy-sse2.S4
-rw-r--r--sysdeps/i386/setjmp.S21
-rw-r--r--sysdeps/i386/start.S1
-rw-r--r--sysdeps/i386/sub_n.S25
-rw-r--r--sysdeps/i386/sysdep.h11
21 files changed, 29 insertions, 241 deletions
diff --git a/sysdeps/i386/__longjmp.S b/sysdeps/i386/__longjmp.S
index 302c00a..bb83609 100644
--- a/sysdeps/i386/__longjmp.S
+++ b/sysdeps/i386/__longjmp.S
@@ -19,55 +19,14 @@
#include <sysdep.h>
#include <pointer_guard.h>
#include <jmpbuf-offsets.h>
-#include <jmp_buf-ssp.h>
#include <asm-syntax.h>
#include <stap-probe.h>
-/* Don't restore shadow stack register if
- 1. Shadow stack isn't enabled. Or
- 2. __longjmp is defined for __longjmp_cancel.
- */
-#if !SHSTK_ENABLED || defined __longjmp
-# undef SHADOW_STACK_POINTER_OFFSET
-#endif
-
.text
ENTRY (__longjmp)
#ifdef PTR_DEMANGLE
movl 4(%esp), %eax /* User's jmp_buf in %eax. */
-# ifdef SHADOW_STACK_POINTER_OFFSET
-# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET
- /* Check if Shadow Stack is enabled. */
- testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET
- jz L(skip_ssp)
-# else
- xorl %edx, %edx
-# endif
- /* Check and adjust the Shadow-Stack-Pointer. */
- rdsspd %edx
- /* And compare it with the saved ssp value. */
- subl SHADOW_STACK_POINTER_OFFSET(%eax), %edx
- je L(skip_ssp)
- /* Count the number of frames to adjust and adjust it
- with incssp instruction. The instruction can adjust
- the ssp by [0..255] value only thus use a loop if
- the number of frames is bigger than 255. */
- negl %edx
- shrl $2, %edx
- /* NB: We saved Shadow-Stack-Pointer of setjmp. Since we are
- restoring Shadow-Stack-Pointer of setjmp's caller, we
- need to unwind shadow stack by one more frame. */
- addl $1, %edx
- movl $255, %ebx
-L(loop):
- cmpl %ebx, %edx
- cmovb %edx, %ebx
- incsspd %ebx
- subl %ebx, %edx
- ja L(loop)
-L(skip_ssp):
-# endif
/* Save the return address now. */
movl (JB_PC*4)(%eax), %edx
/* Get the stack pointer. */
@@ -98,38 +57,6 @@ L(skip_ssp):
#else
movl 4(%esp), %ecx /* User's jmp_buf in %ecx. */
movl 8(%esp), %eax /* Second argument is return value. */
-# ifdef SHADOW_STACK_POINTER_OFFSET
-# if IS_IN (libc) && defined SHARED
- /* Check if Shadow Stack is enabled. */
- testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET
- jz L(skip_ssp)
-# endif
- /* Check and adjust the Shadow-Stack-Pointer. */
- xorl %edx, %edx
- /* Get the current ssp. */
- rdsspd %edx
- /* And compare it with the saved ssp value. */
- subl SHADOW_STACK_POINTER_OFFSET(%ecx), %edx
- je L(skip_ssp)
- /* Count the number of frames to adjust and adjust it
- with incssp instruction. The instruction can adjust
- the ssp by [0..255] value only thus use a loop if
- the number of frames is bigger than 255. */
- negl %edx
- shrl $2, %edx
- /* NB: We saved Shadow-Stack-Pointer of setjmp. Since we are
- restoring Shadow-Stack-Pointer of setjmp's caller, we
- need to unwind shadow stack by one more frame. */
- addl $1, %edx
- movl $255, %ebx
-L(loop):
- cmpl %ebx, %edx
- cmovb %edx, %ebx
- incsspd %ebx
- subl %ebx, %edx
- ja L(loop)
-L(skip_ssp):
-# endif
/* Save the return address now. */
movl (JB_PC*4)(%ecx), %edx
LIBC_PROBE (longjmp, 3, 4@%ecx, -4@%eax, 4@%edx)
diff --git a/sysdeps/i386/add_n.S b/sysdeps/i386/add_n.S
index d4af6d9..c1b7098b1 100644
--- a/sysdeps/i386/add_n.S
+++ b/sysdeps/i386/add_n.S
@@ -40,13 +40,6 @@ ENTRY (__mpn_add_n)
cfi_rel_offset (esi, 0)
movl S2(%esp),%edx
movl SIZE(%esp),%ecx
-
-#if IBT_ENABLED
- pushl %ebx
- cfi_adjust_cfa_offset (4)
- cfi_rel_offset (ebx, 0)
-#endif
-
movl %ecx,%eax
shrl $3,%ecx /* compute count for unrolled loop */
negl %eax
@@ -58,9 +51,6 @@ ENTRY (__mpn_add_n)
subl %eax,%esi /* ... by a constant when we ... */
subl %eax,%edx /* ... enter the loop */
shrl $2,%eax /* restore previous value */
-#if IBT_ENABLED
- leal -4(,%eax,4),%ebx /* Count for 4-byte endbr32 */
-#endif
#ifdef PIC
/* Calculate start address in loop for PIC. Due to limitations in some
assemblers, Loop-L0-3 cannot be put into the leal */
@@ -75,39 +65,29 @@ L(0): leal (%eax,%eax,8),%eax
/* Calculate start address in loop for non-PIC. */
leal (L(oop) - 3)(%eax,%eax,8),%eax
#endif
-#if IBT_ENABLED
- addl %ebx,%eax /* Adjust for endbr32 */
-#endif
jmp *%eax /* jump into loop */
ALIGN (3)
L(oop): movl (%esi),%eax
adcl (%edx),%eax
movl %eax,(%edi)
- _CET_ENDBR
movl 4(%esi),%eax
adcl 4(%edx),%eax
movl %eax,4(%edi)
- _CET_ENDBR
movl 8(%esi),%eax
adcl 8(%edx),%eax
movl %eax,8(%edi)
- _CET_ENDBR
movl 12(%esi),%eax
adcl 12(%edx),%eax
movl %eax,12(%edi)
- _CET_ENDBR
movl 16(%esi),%eax
adcl 16(%edx),%eax
movl %eax,16(%edi)
- _CET_ENDBR
movl 20(%esi),%eax
adcl 20(%edx),%eax
movl %eax,20(%edi)
- _CET_ENDBR
movl 24(%esi),%eax
adcl 24(%edx),%eax
movl %eax,24(%edi)
- _CET_ENDBR
movl 28(%esi),%eax
adcl 28(%edx),%eax
movl %eax,28(%edi)
@@ -120,11 +100,6 @@ L(oop): movl (%esi),%eax
sbbl %eax,%eax
negl %eax
-#if IBT_ENABLED
- popl %ebx
- cfi_adjust_cfa_offset (-4)
- cfi_restore (ebx)
-#endif
popl %esi
cfi_adjust_cfa_offset (-4)
cfi_restore (esi)
diff --git a/sysdeps/i386/bsd-_setjmp.S b/sysdeps/i386/bsd-_setjmp.S
index eb3ac9c..5fd671a 100644
--- a/sysdeps/i386/bsd-_setjmp.S
+++ b/sysdeps/i386/bsd-_setjmp.S
@@ -23,18 +23,12 @@
#include <sysdep.h>
#include <pointer_guard.h>
#include <jmpbuf-offsets.h>
-#include <jmp_buf-ssp.h>
#include <stap-probe.h>
#define PARMS 4 /* no space for saved regs */
#define JMPBUF PARMS
#define SIGMSK JMPBUF+4
-/* Don't save shadow stack register if shadow stack isn't enabled. */
-#if !SHSTK_ENABLED
-# undef SHADOW_STACK_POINTER_OFFSET
-#endif
-
ENTRY (_setjmp)
xorl %eax, %eax
@@ -58,21 +52,6 @@ ENTRY (_setjmp)
movl %ebp, (JB_BP*4)(%edx) /* Save caller's frame pointer. */
movl %eax, JB_SIZE(%edx) /* No signal mask set. */
-#ifdef SHADOW_STACK_POINTER_OFFSET
-# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET
- /* Check if Shadow Stack is enabled. */
- testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET
- jz L(skip_ssp)
-# else
- xorl %ecx, %ecx
-# endif
- /* Get the current Shadow-Stack-Pointer and save it. */
- rdsspd %ecx
- movl %ecx, SHADOW_STACK_POINTER_OFFSET(%edx)
-# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET
-L(skip_ssp):
-# endif
-#endif
ret
END (_setjmp)
libc_hidden_def (_setjmp)
diff --git a/sysdeps/i386/bsd-setjmp.S b/sysdeps/i386/bsd-setjmp.S
index c03f235..13338a6 100644
--- a/sysdeps/i386/bsd-setjmp.S
+++ b/sysdeps/i386/bsd-setjmp.S
@@ -23,18 +23,12 @@
#include <sysdep.h>
#include <pointer_guard.h>
#include <jmpbuf-offsets.h>
-#include <jmp_buf-ssp.h>
#include <stap-probe.h>
#define PARMS 4 /* no space for saved regs */
#define JMPBUF PARMS
#define SIGMSK JMPBUF+4
-/* Don't save shadow stack register if shadow stack isn't enabled. */
-#if !SHSTK_ENABLED
-# undef SHADOW_STACK_POINTER_OFFSET
-#endif
-
ENTRY (setjmp)
/* Note that we have to use a non-exported symbol in the next
jump since otherwise gas will emit it as a jump through the
@@ -58,21 +52,6 @@ ENTRY (setjmp)
#endif
movl %ecx, (JB_PC*4)(%eax)
movl %ebp, (JB_BP*4)(%eax) /* Save caller's frame pointer. */
-#ifdef SHADOW_STACK_POINTER_OFFSET
-# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET
- /* Check if Shadow Stack is enabled. */
- testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET
- jz L(skip_ssp)
-# else
- xorl %ecx, %ecx
-# endif
- /* Get the current Shadow-Stack-Pointer and save it. */
- rdsspd %ecx
- movl %ecx, SHADOW_STACK_POINTER_OFFSET(%eax)
-# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET
-L(skip_ssp):
-# endif
-#endif
/* Call __sigjmp_save. */
pushl $1
diff --git a/sysdeps/i386/crti.S b/sysdeps/i386/crti.S
index 71d19b6..f9662ee 100644
--- a/sysdeps/i386/crti.S
+++ b/sysdeps/i386/crti.S
@@ -61,7 +61,6 @@
.hidden _init
.type _init, @function
_init:
- _CET_ENDBR
pushl %ebx
/* Maintain 16-byte stack alignment for called functions. */
subl $8, %esp
@@ -82,7 +81,6 @@ _init:
.hidden _fini
.type _fini, @function
_fini:
- _CET_ENDBR
pushl %ebx
subl $8, %esp
LOAD_PIC_REG (bx)
diff --git a/sysdeps/i386/dl-tlsdesc.S b/sysdeps/i386/dl-tlsdesc.S
index 318b82a..90d93ca 100644
--- a/sysdeps/i386/dl-tlsdesc.S
+++ b/sysdeps/i386/dl-tlsdesc.S
@@ -37,7 +37,6 @@
cfi_startproc
.align 16
_dl_tlsdesc_return:
- _CET_ENDBR
movl 4(%eax), %eax
ret
cfi_endproc
@@ -59,7 +58,6 @@ _dl_tlsdesc_return:
cfi_startproc
.align 16
_dl_tlsdesc_undefweak:
- _CET_ENDBR
movl 4(%eax), %eax
subl %gs:0, %eax
ret
@@ -101,7 +99,6 @@ _dl_tlsdesc_dynamic (struct tlsdesc *tdp)
cfi_startproc
.align 16
_dl_tlsdesc_dynamic:
- _CET_ENDBR
/* Like all TLS resolvers, preserve call-clobbered registers.
We need two scratch regs anyway. */
subl $28, %esp
diff --git a/sysdeps/i386/dl-trampoline.S b/sysdeps/i386/dl-trampoline.S
index ecba034..2632020 100644
--- a/sysdeps/i386/dl-trampoline.S
+++ b/sysdeps/i386/dl-trampoline.S
@@ -26,7 +26,6 @@
.align 16
_dl_runtime_resolve:
cfi_adjust_cfa_offset (8)
- _CET_ENDBR
pushl %eax # Preserve registers otherwise clobbered.
cfi_adjust_cfa_offset (4)
pushl %ecx
@@ -53,7 +52,6 @@ _dl_runtime_resolve:
.align 16
_dl_runtime_resolve_shstk:
cfi_adjust_cfa_offset (8)
- _CET_ENDBR
pushl %eax # Preserve registers otherwise clobbered.
cfi_adjust_cfa_offset (4)
pushl %edx
@@ -78,7 +76,6 @@ _dl_runtime_resolve_shstk:
.align 16
_dl_runtime_profile_shstk:
cfi_adjust_cfa_offset (8)
- _CET_ENDBR
pushl %esp
cfi_adjust_cfa_offset (4)
addl $8, (%esp) # Account for the pushed PLT data
@@ -123,7 +120,6 @@ _dl_runtime_profile_shstk:
.align 16
_dl_runtime_profile:
cfi_adjust_cfa_offset (8)
- _CET_ENDBR
pushl %esp
cfi_adjust_cfa_offset (4)
addl $8, (%esp) # Account for the pushed PLT data
diff --git a/sysdeps/i386/i386-mcount.S b/sysdeps/i386/i386-mcount.S
index 8066649..6082e1a 100644
--- a/sysdeps/i386/i386-mcount.S
+++ b/sysdeps/i386/i386-mcount.S
@@ -29,7 +29,6 @@
.type C_SYMBOL_NAME(_mcount), @function
.align ALIGNARG(4)
C_LABEL(_mcount)
- _CET_ENDBR
/* Save the caller-clobbered registers. */
pushl %eax
pushl %ecx
@@ -58,7 +57,6 @@ weak_alias (_mcount, mcount)
.type C_SYMBOL_NAME(__fentry__), @function
.align ALIGNARG(4)
C_LABEL(__fentry__)
- _CET_ENDBR
/* Save the caller-clobbered registers. */
pushl %eax
pushl %ecx
diff --git a/sysdeps/i386/i686/add_n.S b/sysdeps/i386/i686/add_n.S
index c2bc798..bbc0cc7 100644
--- a/sysdeps/i386/i686/add_n.S
+++ b/sysdeps/i386/i686/add_n.S
@@ -44,13 +44,6 @@ ENTRY (__mpn_add_n)
cfi_rel_offset (esi, 0)
movl S2(%esp),%edx
movl SIZE(%esp),%ecx
-
-#if IBT_ENABLED
- pushl %ebx
- cfi_adjust_cfa_offset (4)
- cfi_rel_offset (ebx, 0)
-#endif
-
movl %ecx,%eax
shrl $3,%ecx /* compute count for unrolled loop */
negl %eax
@@ -62,9 +55,6 @@ ENTRY (__mpn_add_n)
subl %eax,%esi /* ... by a constant when we ... */
subl %eax,%edx /* ... enter the loop */
shrl $2,%eax /* restore previous value */
-#if IBT_ENABLED
- leal -4(,%eax,4),%ebx /* Count for 4-byte endbr32 */
-#endif
#ifdef PIC
/* Calculate start address in loop for PIC. */
leal (L(oop)-L(0)-3)(%eax,%eax,8),%eax
@@ -74,39 +64,29 @@ L(0):
/* Calculate start address in loop for non-PIC. */
leal (L(oop) - 3)(%eax,%eax,8),%eax
#endif
-#if IBT_ENABLED
- addl %ebx,%eax /* Adjust for endbr32 */
-#endif
jmp *%eax /* jump into loop */
ALIGN (3)
L(oop): movl (%esi),%eax
adcl (%edx),%eax
movl %eax,(%edi)
- _CET_ENDBR
movl 4(%esi),%eax
adcl 4(%edx),%eax
movl %eax,4(%edi)
- _CET_ENDBR
movl 8(%esi),%eax
adcl 8(%edx),%eax
movl %eax,8(%edi)
- _CET_ENDBR
movl 12(%esi),%eax
adcl 12(%edx),%eax
movl %eax,12(%edi)
- _CET_ENDBR
movl 16(%esi),%eax
adcl 16(%edx),%eax
movl %eax,16(%edi)
- _CET_ENDBR
movl 20(%esi),%eax
adcl 20(%edx),%eax
movl %eax,20(%edi)
- _CET_ENDBR
movl 24(%esi),%eax
adcl 24(%edx),%eax
movl %eax,24(%edi)
- _CET_ENDBR
movl 28(%esi),%eax
adcl 28(%edx),%eax
movl %eax,28(%edi)
@@ -119,11 +99,6 @@ L(oop): movl (%esi),%eax
sbbl %eax,%eax
negl %eax
-#if IBT_ENABLED
- popl %ebx
- cfi_adjust_cfa_offset (-4)
- cfi_restore (ebx)
-#endif
popl %esi
cfi_adjust_cfa_offset (-4)
cfi_restore (esi)
diff --git a/sysdeps/i386/i686/memcmp.S b/sysdeps/i386/i686/memcmp.S
index 94600f5..0738ee9 100644
--- a/sysdeps/i386/i686/memcmp.S
+++ b/sysdeps/i386/i686/memcmp.S
@@ -80,7 +80,7 @@ L(not_1):
LOAD_JUMP_TABLE_ENTRY (L(table_32bytes), %ecx)
addl %ecx, %edx
addl %ecx, %esi
- _CET_NOTRACK jmp *%ebx
+ jmp *%ebx
ALIGN (4)
L(28bytes):
@@ -326,7 +326,7 @@ L(32bytesormore):
LOAD_JUMP_TABLE_ENTRY (L(table_32bytes), %ecx)
addl %ecx, %edx
addl %ecx, %esi
- _CET_NOTRACK jmp *%ebx
+ jmp *%ebx
L(load_ecx_28):
addl $0x4, %edx
diff --git a/sysdeps/i386/i686/multiarch/memcmp-sse4.S b/sysdeps/i386/i686/multiarch/memcmp-sse4.S
index f0b2d84..2cdda5f 100644
--- a/sysdeps/i386/i686/multiarch/memcmp-sse4.S
+++ b/sysdeps/i386/i686/multiarch/memcmp-sse4.S
@@ -58,7 +58,7 @@
absolute address. */ \
addl (%ebx,INDEX,SCALE), %ebx; \
/* We loaded the jump table and adjusted EDX/ESI. Go. */ \
- _CET_NOTRACK jmp *%ebx
+ jmp *%ebx
# else
# define JMPTBL(I, B) I
@@ -66,7 +66,7 @@
jump table with relative offsets. INDEX is a register contains the
index into the jump table. SCALE is the scale of INDEX. */
# define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \
- _CET_NOTRACK jmp *TABLE(,INDEX,SCALE)
+ jmp *TABLE(,INDEX,SCALE)
# endif
diff --git a/sysdeps/i386/i686/multiarch/memcpy-ssse3-rep.S b/sysdeps/i386/i686/multiarch/memcpy-ssse3-rep.S
index abdde55..a7e80dc 100644
--- a/sysdeps/i386/i686/multiarch/memcpy-ssse3-rep.S
+++ b/sysdeps/i386/i686/multiarch/memcpy-ssse3-rep.S
@@ -64,7 +64,7 @@
absolute address. */ \
addl (%ebx,INDEX,SCALE), %ebx; \
/* We loaded the jump table. Go. */ \
- _CET_NOTRACK jmp *%ebx
+ jmp *%ebx
# define BRANCH_TO_JMPTBL_ENTRY_VALUE(TABLE) \
addl $(TABLE - .), %ebx
@@ -72,7 +72,7 @@
# define BRANCH_TO_JMPTBL_ENTRY_TAIL(TABLE, INDEX, SCALE) \
addl (%ebx,INDEX,SCALE), %ebx; \
/* We loaded the jump table. Go. */ \
- _CET_NOTRACK jmp *%ebx
+ jmp *%ebx
#else
# define PARMS 4
# define ENTRANCE
@@ -84,12 +84,12 @@
absolute offsets. INDEX is a register contains the index into the
jump table. SCALE is the scale of INDEX. */
# define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \
- _CET_NOTRACK jmp *TABLE(,INDEX,SCALE)
+ jmp *TABLE(,INDEX,SCALE)
# define BRANCH_TO_JMPTBL_ENTRY_VALUE(TABLE)
# define BRANCH_TO_JMPTBL_ENTRY_TAIL(TABLE, INDEX, SCALE) \
- _CET_NOTRACK jmp *TABLE(,INDEX,SCALE)
+ jmp *TABLE(,INDEX,SCALE)
#endif
.section .text.ssse3,"ax",@progbits
diff --git a/sysdeps/i386/i686/multiarch/memcpy-ssse3.S b/sysdeps/i386/i686/multiarch/memcpy-ssse3.S
index 60cc5f1..713c5bd 100644
--- a/sysdeps/i386/i686/multiarch/memcpy-ssse3.S
+++ b/sysdeps/i386/i686/multiarch/memcpy-ssse3.S
@@ -64,7 +64,7 @@
absolute address. */ \
addl (%ebx, INDEX, SCALE), %ebx; \
/* We loaded the jump table. Go. */ \
- _CET_NOTRACK jmp *%ebx
+ jmp *%ebx
# else
# define PARMS 4
@@ -78,7 +78,7 @@
jump table. SCALE is the scale of INDEX. */
# define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \
- _CET_NOTRACK jmp *TABLE(, INDEX, SCALE)
+ jmp *TABLE(, INDEX, SCALE)
# endif
.section .text.ssse3,"ax",@progbits
diff --git a/sysdeps/i386/i686/multiarch/memset-sse2-rep.S b/sysdeps/i386/i686/multiarch/memset-sse2-rep.S
index 52d046d..d1a0473 100644
--- a/sysdeps/i386/i686/multiarch/memset-sse2-rep.S
+++ b/sysdeps/i386/i686/multiarch/memset-sse2-rep.S
@@ -56,7 +56,7 @@
add (%ebx,%ecx,4), %ebx; \
add %ecx, %edx; \
/* We loaded the jump table and adjusted EDX. Go. */ \
- _CET_NOTRACK jmp *%ebx
+ jmp *%ebx
#else
# define ENTRANCE
# define RETURN_END ret
@@ -68,7 +68,7 @@
absolute offsets. */
# define BRANCH_TO_JMPTBL_ENTRY(TABLE) \
add %ecx, %edx; \
- _CET_NOTRACK jmp *TABLE(,%ecx,4)
+ jmp *TABLE(,%ecx,4)
#endif
.section .text.sse2,"ax",@progbits
diff --git a/sysdeps/i386/i686/multiarch/memset-sse2.S b/sysdeps/i386/i686/multiarch/memset-sse2.S
index ac21fcb..2e00743 100644
--- a/sysdeps/i386/i686/multiarch/memset-sse2.S
+++ b/sysdeps/i386/i686/multiarch/memset-sse2.S
@@ -56,7 +56,7 @@
add (%ebx,%ecx,4), %ebx; \
add %ecx, %edx; \
/* We loaded the jump table and adjusted EDX. Go. */ \
- _CET_NOTRACK jmp *%ebx
+ jmp *%ebx
#else
# define ENTRANCE
# define RETURN_END ret
@@ -68,7 +68,7 @@
absolute offsets. */
# define BRANCH_TO_JMPTBL_ENTRY(TABLE) \
add %ecx, %edx; \
- _CET_NOTRACK jmp *TABLE(,%ecx,4)
+ jmp *TABLE(,%ecx,4)
#endif
.section .text.sse2,"ax",@progbits
diff --git a/sysdeps/i386/i686/multiarch/strcat-sse2.S b/sysdeps/i386/i686/multiarch/strcat-sse2.S
index 7ac4827..682f43f 100644
--- a/sysdeps/i386/i686/multiarch/strcat-sse2.S
+++ b/sysdeps/i386/i686/multiarch/strcat-sse2.S
@@ -49,7 +49,7 @@
absolute address. */ \
addl (%ecx,INDEX,SCALE), %ecx; \
/* We loaded the jump table and adjusted ECX. Go. */ \
- _CET_NOTRACK jmp *%ecx
+ jmp *%ecx
# else
# define JMPTBL(I, B) I
@@ -58,7 +58,7 @@
jump table. SCALE is the scale of INDEX. */
# define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \
- _CET_NOTRACK jmp *TABLE(,INDEX,SCALE)
+ jmp *TABLE(,INDEX,SCALE)
# endif
# ifndef STRCAT
diff --git a/sysdeps/i386/i686/multiarch/strcpy-sse2.S b/sysdeps/i386/i686/multiarch/strcpy-sse2.S
index 5c358e5..a18a8ca 100644
--- a/sysdeps/i386/i686/multiarch/strcpy-sse2.S
+++ b/sysdeps/i386/i686/multiarch/strcpy-sse2.S
@@ -64,7 +64,7 @@
absolute address. */ \
addl (%ecx,INDEX,SCALE), %ecx; \
/* We loaded the jump table and adjusted ECX. Go. */ \
- _CET_NOTRACK jmp *%ecx
+ jmp *%ecx
# else
# define JMPTBL(I, B) I
@@ -73,7 +73,7 @@
jump table. SCALE is the scale of INDEX. */
# define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \
- _CET_NOTRACK jmp *TABLE(,INDEX,SCALE)
+ jmp *TABLE(,INDEX,SCALE)
# endif
.text
diff --git a/sysdeps/i386/setjmp.S b/sysdeps/i386/setjmp.S
index ab1a809..08a98f2 100644
--- a/sysdeps/i386/setjmp.S
+++ b/sysdeps/i386/setjmp.S
@@ -19,7 +19,6 @@
#include <sysdep.h>
#include <pointer_guard.h>
#include <jmpbuf-offsets.h>
-#include <jmp_buf-ssp.h>
#include <asm-syntax.h>
#include <stap-probe.h>
@@ -27,11 +26,6 @@
#define JMPBUF PARMS
#define SIGMSK JMPBUF+4
-/* Don't save shadow stack register if shadow stack isn't enabled. */
-#if !SHSTK_ENABLED
-# undef SHADOW_STACK_POINTER_OFFSET
-#endif
-
ENTRY (__sigsetjmp)
movl JMPBUF(%esp), %eax
@@ -53,21 +47,6 @@ ENTRY (__sigsetjmp)
movl %ecx, (JB_PC*4)(%eax)
movl %ebp, (JB_BP*4)(%eax) /* Save caller's frame pointer. */
-#ifdef SHADOW_STACK_POINTER_OFFSET
-# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET
- /* Check if Shadow Stack is enabled. */
- testl $X86_FEATURE_1_SHSTK, %gs:FEATURE_1_OFFSET
- jz L(skip_ssp)
-# else
- xorl %ecx, %ecx
-# endif
- /* Get the current Shadow-Stack-Pointer and save it. */
- rdsspd %ecx
- movl %ecx, SHADOW_STACK_POINTER_OFFSET(%eax)
-# if IS_IN (libc) && defined SHARED && defined FEATURE_1_OFFSET
-L(skip_ssp):
-# endif
-#endif
#if IS_IN (rtld)
/* In ld.so we never save the signal mask. */
xorl %eax, %eax
diff --git a/sysdeps/i386/start.S b/sysdeps/i386/start.S
index e042ed1..3057717 100644
--- a/sysdeps/i386/start.S
+++ b/sysdeps/i386/start.S
@@ -132,7 +132,6 @@ ENTRY (_start)
#if defined PIC && !defined SHARED
__wrap_main:
- _CET_ENDBR
jmp main@PLT
#endif
END (_start)
diff --git a/sysdeps/i386/sub_n.S b/sysdeps/i386/sub_n.S
index 3ebe984..c111bf3 100644
--- a/sysdeps/i386/sub_n.S
+++ b/sysdeps/i386/sub_n.S
@@ -40,13 +40,6 @@ ENTRY (__mpn_sub_n)
cfi_rel_offset (esi, 0)
movl S2(%esp),%edx
movl SIZE(%esp),%ecx
-
-#if IBT_ENABLED
- pushl %ebx
- cfi_adjust_cfa_offset (4)
- cfi_rel_offset (ebx, 0)
-#endif
-
movl %ecx,%eax
shrl $3,%ecx /* compute count for unrolled loop */
negl %eax
@@ -58,9 +51,6 @@ ENTRY (__mpn_sub_n)
subl %eax,%esi /* ... by a constant when we ... */
subl %eax,%edx /* ... enter the loop */
shrl $2,%eax /* restore previous value */
-#if defined __CET__ && (__CET__ & 1) != 0
- leal -4(,%eax,4),%ebx /* Count for 4-byte endbr32 */
-#endif
#ifdef PIC
/* Calculate start address in loop for PIC. Due to limitations in some
assemblers, Loop-L0-3 cannot be put into the leal */
@@ -75,39 +65,29 @@ L(0): leal (%eax,%eax,8),%eax
/* Calculate start address in loop for non-PIC. */
leal (L(oop) - 3)(%eax,%eax,8),%eax
#endif
-#if defined __CET__ && (__CET__ & 1) != 0
- addl %ebx,%eax /* Adjust for endbr32 */
-#endif
jmp *%eax /* jump into loop */
ALIGN (3)
L(oop): movl (%esi),%eax
sbbl (%edx),%eax
movl %eax,(%edi)
- _CET_ENDBR
movl 4(%esi),%eax
sbbl 4(%edx),%eax
movl %eax,4(%edi)
- _CET_ENDBR
movl 8(%esi),%eax
sbbl 8(%edx),%eax
movl %eax,8(%edi)
- _CET_ENDBR
movl 12(%esi),%eax
sbbl 12(%edx),%eax
movl %eax,12(%edi)
- _CET_ENDBR
movl 16(%esi),%eax
sbbl 16(%edx),%eax
movl %eax,16(%edi)
- _CET_ENDBR
movl 20(%esi),%eax
sbbl 20(%edx),%eax
movl %eax,20(%edi)
- _CET_ENDBR
movl 24(%esi),%eax
sbbl 24(%edx),%eax
movl %eax,24(%edi)
- _CET_ENDBR
movl 28(%esi),%eax
sbbl 28(%edx),%eax
movl %eax,28(%edi)
@@ -120,11 +100,6 @@ L(oop): movl (%esi),%eax
sbbl %eax,%eax
negl %eax
-#if defined __CET__ && (__CET__ & 1) != 0
- popl %ebx
- cfi_adjust_cfa_offset (-4)
- cfi_restore (ebx)
-#endif
popl %esi
cfi_adjust_cfa_offset (-4)
cfi_restore (esi)
diff --git a/sysdeps/i386/sysdep.h b/sysdeps/i386/sysdep.h
index 69c8b51..86b5fdd 100644
--- a/sysdeps/i386/sysdep.h
+++ b/sysdeps/i386/sysdep.h
@@ -18,6 +18,8 @@
#include <sysdeps/x86/sysdep.h>
+#define CET_ENABLED 0
+
/* It is desirable that the names of PIC thunks match those used by
GCC so that multiple copies are eliminated by the linker. Because
GCC 4.6 and earlier use __i686 in the names, it is necessary to
@@ -37,6 +39,15 @@
/* Syntactic details of assembler. */
+/* Define an entry point visible from C. */
+#define ENTRY_P2ALIGN(name, alignment) \
+ .globl C_SYMBOL_NAME(name); \
+ .type C_SYMBOL_NAME(name),@function; \
+ .align ALIGNARG(alignment); \
+ C_LABEL(name) \
+ cfi_startproc; \
+ CALL_MCOUNT
+
/* If compiled for profiling, call `mcount' at the start of each function. */
#ifdef PROF
/* The mcount code relies on a normal frame pointer being on the stack