diff options
author | H.J. Lu <hjl.tools@gmail.com> | 2016-03-23 10:33:19 -0700 |
---|---|---|
committer | H.J. Lu <hjl.tools@gmail.com> | 2016-03-23 10:56:38 -0700 |
commit | 327aadf6348bd41d1fae46ee7780e214c0a493c1 (patch) | |
tree | 3a1f3550ee36ea010e53e1ad8f4e1ffc450b5c18 /sysdeps/x86/cpu-features.c | |
parent | 7a25d6a84df9fea56963569ceccaaf7c2a88f161 (diff) | |
download | glibc-hjl/pr19583.zip glibc-hjl/pr19583.tar.gz glibc-hjl/pr19583.tar.bz2 |
[x86] Add a feature bit: Fast_Unaligned_Copyhjl/pr19583
On AMD processors, memcpy optimized with unaligned SSE load is
slower than emcpy optimized with aligned SSSE3 while other string
functions are faster with unaligned SSE load. A feature bit,
Fast_Unaligned_Copy, is added to select memcpy optimized with
unaligned SSE load.
[BZ #19583]
* sysdeps/x86/cpu-features.c (init_cpu_features): Set
Fast_Unaligned_Copy with Fast_Unaligned_Load for Intel
processors. Set Fast_Copy_Backward for AMD Excavator
processors.
* sysdeps/x86/cpu-features.h (bit_arch_Fast_Unaligned_Copy):
New.
(index_arch_Fast_Unaligned_Copy): Likewise.
* sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Check
Fast_Unaligned_Copy instead of Fast_Unaligned_Load.
Diffstat (limited to 'sysdeps/x86/cpu-features.c')
-rw-r--r-- | sysdeps/x86/cpu-features.c | 14 |
1 files changed, 13 insertions, 1 deletions
diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c index c8f81ef..de75c79 100644 --- a/sysdeps/x86/cpu-features.c +++ b/sysdeps/x86/cpu-features.c @@ -153,8 +153,12 @@ init_cpu_features (struct cpu_features *cpu_features) #if index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2 # error index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2 #endif +#if index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy +# error index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy +#endif cpu_features->feature[index_arch_Fast_Unaligned_Load] |= (bit_arch_Fast_Unaligned_Load + | bit_arch_Fast_Unaligned_Copy | bit_arch_Prefer_PMINUB_for_stringop | bit_arch_Slow_SSE4_2); break; @@ -183,10 +187,14 @@ init_cpu_features (struct cpu_features *cpu_features) #if index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop # error index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop #endif +#if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy +# error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy +#endif cpu_features->feature[index_arch_Fast_Rep_String] |= (bit_arch_Fast_Rep_String | bit_arch_Fast_Copy_Backward | bit_arch_Fast_Unaligned_Load + | bit_arch_Fast_Unaligned_Copy | bit_arch_Prefer_PMINUB_for_stringop); break; } @@ -220,10 +228,14 @@ init_cpu_features (struct cpu_features *cpu_features) if (family == 0x15) { +#if index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward +# error index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward +#endif /* "Excavator" */ if (model >= 0x60 && model <= 0x7f) cpu_features->feature[index_arch_Fast_Unaligned_Load] - |= bit_arch_Fast_Unaligned_Load; + |= (bit_arch_Fast_Unaligned_Load + | bit_arch_Fast_Copy_Backward); } } else |