aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorUros Bizjak <ubizjak@gmail.com>2025-08-29 09:05:23 +0200
committerFlorian Weimer <fweimer@redhat.com>2025-08-29 09:05:23 +0200
commit119d658ac2aad88e306b4a66c1717e5ebf86c73f (patch)
tree981d711d8d3598e569977b2e16bcb5c30f76fecf
parent9a1cb8f783cb98d4c5fd180c43855fdbb74fbe71 (diff)
downloadglibc-119d658ac2aad88e306b4a66c1717e5ebf86c73f.zip
glibc-119d658ac2aad88e306b4a66c1717e5ebf86c73f.tar.gz
glibc-119d658ac2aad88e306b4a66c1717e5ebf86c73f.tar.bz2
x86: Use flag output operands for inline asm in atomic-machine.h
Use the flag output constraints feature available in gcc 6+ ("=@cc<cond>") instead of explicitly setting a boolean variable with SETcc instruction. This approach decouples the instruction that sets the flags from the code that consumes them, allowing the compiler to create better code when working with flags users. Instead of e.g.: lock add %esi,(%rdi) sets %sil test %sil,%sil jne <...> the compiler now generates: lock add %esi,(%rdi) js <...> No functional changes intended. Signed-off-by: Uros Bizjak <ubizjak@gmail.com> Cc: H.J.Lu <hjl.tools@gmail.com> Cc: Florian Weimer <fweimer@redhat.com> Cc: Carlos O'Donell <carlos@redhat.com> Reviewed-by: Florian Weimer <fweimer@redhat.com>
-rw-r--r--sysdeps/x86/atomic-machine.h96
1 files changed, 48 insertions, 48 deletions
diff --git a/sysdeps/x86/atomic-machine.h b/sysdeps/x86/atomic-machine.h
index 5452716..ac59f77 100644
--- a/sysdeps/x86/atomic-machine.h
+++ b/sysdeps/x86/atomic-machine.h
@@ -256,22 +256,22 @@
#define atomic_add_negative(mem, value) \
- ({ unsigned char __result; \
+ ({ _Bool __result; \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1" \
- : "=m" (*mem), "=qm" (__result) \
+ __asm __volatile (LOCK_PREFIX "addb %b2, %0" \
+ : "=m" (*mem), "=@ccs" (__result) \
: IBR_CONSTRAINT (value), "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1" \
- : "=m" (*mem), "=qm" (__result) \
+ __asm __volatile (LOCK_PREFIX "addw %w2, %0" \
+ : "=m" (*mem), "=@ccs" (__result) \
: "ir" (value), "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1" \
- : "=m" (*mem), "=qm" (__result) \
+ __asm __volatile (LOCK_PREFIX "addl %2, %0" \
+ : "=m" (*mem), "=@ccs" (__result) \
: "ir" (value), "m" (*mem)); \
else if (__HAVE_64B_ATOMICS) \
- __asm __volatile (LOCK_PREFIX "addq %q2, %0; sets %1" \
- : "=m" (*mem), "=qm" (__result) \
+ __asm __volatile (LOCK_PREFIX "addq %q2, %0" \
+ : "=m" (*mem), "=@ccs" (__result) \
: "ir" ((int64_t) cast_to_integer (value)), \
"m" (*mem)); \
else \
@@ -280,26 +280,26 @@
#define atomic_add_zero(mem, value) \
- ({ unsigned char __result; \
+ ({ _Bool __result; \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1" \
- : "=m" (*mem), "=qm" (__result) \
+ __asm __volatile (LOCK_PREFIX "addb %b2, %0" \
+ : "=m" (*mem), "=@ccz" (__result) \
: IBR_CONSTRAINT (value), "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1" \
- : "=m" (*mem), "=qm" (__result) \
+ __asm __volatile (LOCK_PREFIX "addw %w2, %0" \
+ : "=m" (*mem), "=@ccz" (__result) \
: "ir" (value), "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1" \
- : "=m" (*mem), "=qm" (__result) \
+ __asm __volatile (LOCK_PREFIX "addl %2, %0" \
+ : "=m" (*mem), "=@ccz" (__result) \
: "ir" (value), "m" (*mem)); \
else if (__HAVE_64B_ATOMICS) \
- __asm __volatile (LOCK_PREFIX "addq %q2, %0; setz %1" \
- : "=m" (*mem), "=qm" (__result) \
+ __asm __volatile (LOCK_PREFIX "addq %q2, %0" \
+ : "=m" (*mem), "=@ccz" (__result) \
: "ir" ((int64_t) cast_to_integer (value)), \
"m" (*mem)); \
else \
- __atomic_link_error (); \
+ __atomic_link_error (); \
__result; })
@@ -339,25 +339,25 @@
#define atomic_increment_and_test(mem) \
- ({ unsigned char __result; \
+ ({ _Bool __result; \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK_PREFIX "incb %b0; sete %b1" \
- : "=m" (*mem), "=qm" (__result) \
+ __asm __volatile (LOCK_PREFIX "incb %b0" \
+ : "=m" (*mem), "=@cce" (__result) \
: "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK_PREFIX "incw %w0; sete %w1" \
- : "=m" (*mem), "=qm" (__result) \
+ __asm __volatile (LOCK_PREFIX "incw %w0" \
+ : "=m" (*mem), "=@cce" (__result) \
: "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK_PREFIX "incl %0; sete %1" \
- : "=m" (*mem), "=qm" (__result) \
+ __asm __volatile (LOCK_PREFIX "incl %0" \
+ : "=m" (*mem), "=@cce" (__result) \
: "m" (*mem)); \
else if (__HAVE_64B_ATOMICS) \
- __asm __volatile (LOCK_PREFIX "incq %q0; sete %1" \
- : "=m" (*mem), "=qm" (__result) \
+ __asm __volatile (LOCK_PREFIX "incq %q0" \
+ : "=m" (*mem), "=@cce" (__result) \
: "m" (*mem)); \
else \
- __atomic_link_error (); \
+ __atomic_link_error (); \
__result; })
@@ -397,22 +397,22 @@
#define atomic_decrement_and_test(mem) \
- ({ unsigned char __result; \
+ ({ _Bool __result; \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK_PREFIX "decb %b0; sete %1" \
- : "=m" (*mem), "=qm" (__result) \
+ __asm __volatile (LOCK_PREFIX "decb %b0" \
+ : "=m" (*mem), "=@cce" (__result) \
: "m" (*mem)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK_PREFIX "decw %w0; sete %1" \
- : "=m" (*mem), "=qm" (__result) \
+ __asm __volatile (LOCK_PREFIX "decw %w0" \
+ : "=m" (*mem), "=@cce" (__result) \
: "m" (*mem)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK_PREFIX "decl %0; sete %1" \
- : "=m" (*mem), "=qm" (__result) \
+ __asm __volatile (LOCK_PREFIX "decl %0" \
+ : "=m" (*mem), "=@cce" (__result) \
: "m" (*mem)); \
else \
- __asm __volatile (LOCK_PREFIX "decq %q0; sete %1" \
- : "=m" (*mem), "=qm" (__result) \
+ __asm __volatile (LOCK_PREFIX "decq %q0" \
+ : "=m" (*mem), "=@cce" (__result) \
: "m" (*mem)); \
__result; })
@@ -445,25 +445,25 @@
#define atomic_bit_test_set(mem, bit) \
- ({ unsigned char __result; \
+ ({ _Bool __result; \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0" \
- : "=q" (__result), "=m" (*mem) \
+ __asm __volatile (LOCK_PREFIX "btsb %3, %1" \
+ : "=@ccc" (__result), "=m" (*mem) \
: "m" (*mem), IBR_CONSTRAINT (bit)); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0" \
- : "=q" (__result), "=m" (*mem) \
+ __asm __volatile (LOCK_PREFIX "btsw %3, %1" \
+ : "=@ccc" (__result), "=m" (*mem) \
: "m" (*mem), "ir" (bit)); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0" \
- : "=q" (__result), "=m" (*mem) \
+ __asm __volatile (LOCK_PREFIX "btsl %3, %1" \
+ : "=@ccc" (__result), "=m" (*mem) \
: "m" (*mem), "ir" (bit)); \
else if (__HAVE_64B_ATOMICS) \
- __asm __volatile (LOCK_PREFIX "btsq %3, %1; setc %0" \
- : "=q" (__result), "=m" (*mem) \
+ __asm __volatile (LOCK_PREFIX "btsq %3, %1" \
+ : "=@ccc" (__result), "=m" (*mem) \
: "m" (*mem), "ir" (bit)); \
else \
- __atomic_link_error (); \
+ __atomic_link_error (); \
__result; })