aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFlorian Weimer <fweimer@redhat.com>2024-12-17 09:20:20 +0100
committerFlorian Weimer <fweimer@redhat.com>2024-12-27 07:29:56 +0100
commit7c22dcda27743658b6b8ea479283b384ad56bd5a (patch)
tree67b23611a152c62634458ccb96786d42e5522cba
parent5e249192cac7354af02a7347a0d8c984e0c88ed3 (diff)
downloadglibc-7c22dcda27743658b6b8ea479283b384ad56bd5a.zip
glibc-7c22dcda27743658b6b8ea479283b384ad56bd5a.tar.gz
glibc-7c22dcda27743658b6b8ea479283b384ad56bd5a.tar.bz2
nptl: More useful padding in struct pthread
The previous use of padding within a union made it impossible to re-use the padding for GLIBC_PRIVATE ABI preservation because tcbhead_t could use up all of the padding (as was historically the case on x86-64). Allocating padding unconditionally addresses this issue. Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
-rw-r--r--nptl/descr.h56
1 files changed, 25 insertions, 31 deletions
diff --git a/nptl/descr.h b/nptl/descr.h
index a69f5c9..4067f2f 100644
--- a/nptl/descr.h
+++ b/nptl/descr.h
@@ -129,41 +129,32 @@ struct priority_protection_data
/* Thread descriptor data structure. */
struct pthread
{
- union
- {
#if !TLS_DTV_AT_TP
- /* This overlaps the TCB as used for TLS without threads (see tls.h). */
- tcbhead_t header;
+ /* This overlaps the TCB as used for TLS without threads (see tls.h). */
+ tcbhead_t header;
#else
- struct
- {
- /* multiple_threads is enabled either when the process has spawned at
- least one thread or when a single-threaded process cancels itself.
- This enables additional code to introduce locking before doing some
- compare_and_exchange operations and also enable cancellation points.
- The concepts of multiple threads and cancellation points ideally
- should be separate, since it is not necessary for multiple threads to
- have been created for cancellation points to be enabled, as is the
- case is when single-threaded process cancels itself.
-
- Since enabling multiple_threads enables additional code in
- cancellation points and compare_and_exchange operations, there is a
- potential for an unneeded performance hit when it is enabled in a
- single-threaded, self-canceling process. This is OK though, since a
- single-threaded process will enable async cancellation only when it
- looks to cancel itself and is hence going to end anyway. */
- int multiple_threads;
- int gscope_flag;
- } header;
+ struct
+ {
+ /* multiple_threads is enabled either when the process has spawned at
+ least one thread or when a single-threaded process cancels itself.
+ This enables additional code to introduce locking before doing some
+ compare_and_exchange operations and also enable cancellation points.
+ The concepts of multiple threads and cancellation points ideally
+ should be separate, since it is not necessary for multiple threads to
+ have been created for cancellation points to be enabled, as is the
+ case is when single-threaded process cancels itself.
+
+ Since enabling multiple_threads enables additional code in
+ cancellation points and compare_and_exchange operations, there is a
+ potential for an unneeded performance hit when it is enabled in a
+ single-threaded, self-canceling process. This is OK though, since a
+ single-threaded process will enable async cancellation only when it
+ looks to cancel itself and is hence going to end anyway. */
+ int multiple_threads;
+ int gscope_flag;
+ } header;
#endif
- /* This extra padding has no special purpose, and this structure layout
- is private and subject to change without affecting the official ABI.
- We just have it here in case it might be convenient for some
- implementation-specific instrumentation hack or suchlike. */
- void *__padding[24];
- };
-
/* This descriptor's link on the GL (dl_stack_used) or
GL (dl_stack_user) list. */
list_t list;
@@ -407,6 +398,9 @@ struct pthread
/* getrandom vDSO per-thread opaque state. */
void *getrandom_buf;
+ /* Can be used for backports preserving internal TCB layout. */
+ void *padding[8];
+
/* rseq area registered with the kernel. Use a custom definition
here to isolate from kernel struct rseq changes. The
implementation of sched_getcpu needs acccess to the cpu_id field;