aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2025-01-11 22:57:02 +0800
committerH.J. Lu <hjl.tools@gmail.com>2025-01-12 07:08:27 +0800
commit0b6ad02b33448c0b8b6fdd781dffad329d1f0f7d (patch)
tree165d8350aa79c7c30bc1a1355e4e8a0407a87698
parent53a71b9f66dbc7f0ce44ec95bff7caa31fa0374b (diff)
downloadglibc-0b6ad02b33448c0b8b6fdd781dffad329d1f0f7d.zip
glibc-0b6ad02b33448c0b8b6fdd781dffad329d1f0f7d.tar.gz
glibc-0b6ad02b33448c0b8b6fdd781dffad329d1f0f7d.tar.bz2
x86-64: Cast __rseq_offset to long long int [BZ #32543]
commit 494d65129ed5ae1154b75cc189bbdde5e9ecf1df Author: Michael Jeanson <mjeanson@efficios.com> Date: Thu Aug 1 10:35:34 2024 -0400 nptl: Introduce <rseq-access.h> for RSEQ_* accessors added things like asm volatile ("movl %%fs:%P1(%q2),%0" \ : "=r" (__value) \ : "i" (offsetof (struct rseq_area, member)), \ "r" (__rseq_offset)); \ But this doesn't work for x32 when __rseq_offset is negative since the address is computed as FS + 32-bit to 64-bit zero extension of __rseq_offset + offsetof (struct rseq_area, member) Cast __rseq_offset to long long int "r" ((long long int) __rseq_offset)); \ to sign-extend 32-bit __rseq_offset to 64-bit. This is a no-op for x86-64 since x86-64 __rseq_offset is 64-bit. This fixes BZ #32543. Signed-off-by: H.J. Lu <hjl.tools@gmail.com> Reviewed-by: Florian Weimer <fweimer@redhat.com>
-rw-r--r--sysdeps/x86_64/nptl/rseq-access.h12
1 files changed, 6 insertions, 6 deletions
diff --git a/sysdeps/x86_64/nptl/rseq-access.h b/sysdeps/x86_64/nptl/rseq-access.h
index 535e362..bc966b2 100644
--- a/sysdeps/x86_64/nptl/rseq-access.h
+++ b/sysdeps/x86_64/nptl/rseq-access.h
@@ -27,18 +27,18 @@
asm volatile ("movb %%fs:%P2(%q3),%b0" \
: "=q" (__value) \
: "0" (0), "i" (offsetof (struct rseq_area, member)), \
- "r" (__rseq_offset)); \
+ "r" ((long long int) __rseq_offset)); \
else if (sizeof (__value) == 4) \
asm volatile ("movl %%fs:%P1(%q2),%0" \
: "=r" (__value) \
: "i" (offsetof (struct rseq_area, member)), \
- "r" (__rseq_offset)); \
+ "r" ((long long int) __rseq_offset)); \
else /* 8 */ \
{ \
asm volatile ("movq %%fs:%P1(%q2),%q0" \
: "=r" (__value) \
: "i" (offsetof (struct rseq_area, member)), \
- "r" (__rseq_offset)); \
+ "r" ((long long int) __rseq_offset)); \
} \
__value; })
@@ -56,12 +56,12 @@
asm volatile ("movb %b0,%%fs:%P1(%q2)" : \
: "iq" (value), \
"i" (offsetof (struct rseq_area, member)), \
- "r" (__rseq_offset)); \
+ "r" ((long long int) __rseq_offset)); \
else if (sizeof (RSEQ_SELF()->member) == 4) \
asm volatile ("movl %0,%%fs:%P1(%q2)" : \
: IMM_MODE (value), \
"i" (offsetof (struct rseq_area, member)), \
- "r" (__rseq_offset)); \
+ "r" ((long long int) __rseq_offset)); \
else /* 8 */ \
{ \
/* Since movq takes a signed 32-bit immediate or a register source \
@@ -70,7 +70,7 @@
asm volatile ("movq %q0,%%fs:%P1(%q2)" : \
: "er" ((uint64_t) cast_to_integer (value)), \
"i" (offsetof (struct rseq_area, member)), \
- "r" (__rseq_offset)); \
+ "r" ((long long int) __rseq_offset)); \
}})
/* Set member of the RSEQ area directly. */