aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorUros Bizjak <ubizjak@gmail.com>2024-07-18 16:58:09 +0200
committerUros Bizjak <ubizjak@gmail.com>2024-07-18 16:59:09 +0200
commitf7d01e080a54ea94586c8847857e5aef17906519 (patch)
tree8e598807caacc0be233ddd47b652bb93fc781221
parent1e60a6abfece40c7bf55d6ca0a439078d3f5159a (diff)
downloadgcc-f7d01e080a54ea94586c8847857e5aef17906519.zip
gcc-f7d01e080a54ea94586c8847857e5aef17906519.tar.gz
gcc-f7d01e080a54ea94586c8847857e5aef17906519.tar.bz2
libatomic: Improve cpuid usage in __libat_feat1_init
Check the result of __get_cpuid and process FEAT1_REGISTER only when __get_cpuid returns success. Use __cpuid instead of nested __get_cpuid. libatomic/ChangeLog: * config/x86/init.c (__libat_feat1_init): Check the result of __get_cpuid and process FEAT1_REGISTER only when __get_cpuid returns success. Use __cpuid instead of nested __get_cpuid.
-rw-r--r--libatomic/config/x86/init.c28
1 files changed, 15 insertions, 13 deletions
diff --git a/libatomic/config/x86/init.c b/libatomic/config/x86/init.c
index a75be3f..26168d4 100644
--- a/libatomic/config/x86/init.c
+++ b/libatomic/config/x86/init.c
@@ -33,21 +33,23 @@ __libat_feat1_init (void)
{
unsigned int eax, ebx, ecx, edx;
FEAT1_REGISTER = 0;
- __get_cpuid (1, &eax, &ebx, &ecx, &edx);
-#ifdef __x86_64__
- if ((FEAT1_REGISTER & (bit_AVX | bit_CMPXCHG16B))
- == (bit_AVX | bit_CMPXCHG16B))
+ if (__get_cpuid (1, &eax, &ebx, &ecx, &edx))
{
- /* Intel SDM guarantees that 16-byte VMOVDQA on 16-byte aligned address
- is atomic, and AMD is going to do something similar soon.
- We don't have a guarantee from vendors of other CPUs with AVX,
- like Zhaoxin and VIA. */
- unsigned int ecx2 = 0;
- __get_cpuid (0, &eax, &ebx, &ecx2, &edx);
- if (ecx2 != signature_INTEL_ecx && ecx2 != signature_AMD_ecx)
- FEAT1_REGISTER &= ~bit_AVX;
- }
+#ifdef __x86_64__
+ if ((FEAT1_REGISTER & (bit_AVX | bit_CMPXCHG16B))
+ == (bit_AVX | bit_CMPXCHG16B))
+ {
+ /* Intel SDM guarantees that 16-byte VMOVDQA on 16-byte aligned
+ address is atomic, and AMD is going to do something similar soon.
+ We don't have a guarantee from vendors of other CPUs with AVX,
+ like Zhaoxin and VIA. */
+ unsigned int ecx2;
+ __cpuid (0, eax, ebx, ecx2, edx);
+ if (ecx2 != signature_INTEL_ecx && ecx2 != signature_AMD_ecx)
+ FEAT1_REGISTER &= ~bit_AVX;
+ }
#endif
+ }
/* See the load in load_feat1. */
__atomic_store_n (&__libat_feat1, FEAT1_REGISTER, __ATOMIC_RELAXED);
return FEAT1_REGISTER;