diff options
author | Paul Pluzhnikov <ppluzhnikov@google.com> | 2023-05-22 03:40:33 +0000 |
---|---|---|
committer | Paul Pluzhnikov <ppluzhnikov@google.com> | 2023-05-23 03:28:58 +0000 |
commit | 1d2971b525396e9935f3d90616a1668ceca425e5 (patch) | |
tree | 088800042453157b8a27584240f937371929672c /sysdeps/x86_64 | |
parent | 196358ae26aa38a70fb6f19a77311c8a58bff929 (diff) | |
download | glibc-1d2971b525396e9935f3d90616a1668ceca425e5.zip glibc-1d2971b525396e9935f3d90616a1668ceca425e5.tar.gz glibc-1d2971b525396e9935f3d90616a1668ceca425e5.tar.bz2 |
Fix misspellings in sysdeps/x86_64/fpu/multiarch -- BZ 25337.
Applying this commit results in a bit-identical rebuild of
mathvec/libmvec.so.1 (which is the only binary that gets rebuilt).
Reviewed-by: Noah Goldstein <goldstein.w.n@gmail.com>
Diffstat (limited to 'sysdeps/x86_64')
112 files changed, 169 insertions, 169 deletions
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acos2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acos2_core_sse4.S index 840c3d6..a46ddc1 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_acos2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acos2_core_sse4.S @@ -222,7 +222,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acos4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acos4_core_avx2.S index 3c75200..808ea2f 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_acos4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acos4_core_avx2.S @@ -204,7 +204,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acos8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acos8_core_avx512.S index 0647a2e..878d145 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_acos8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acos8_core_avx512.S @@ -226,7 +226,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh2_core_sse4.S index 8a56813..b69e5ce 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh2_core_sse4.S @@ -321,7 +321,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh4_core_avx2.S index f16f539..825b231 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh4_core_avx2.S @@ -366,7 +366,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh8_core_avx512.S index 1a3211b..32ed85e 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh8_core_avx512.S @@ -311,7 +311,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asin2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asin2_core_sse4.S index 9fb9ddc..7bba3b5 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_asin2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asin2_core_sse4.S @@ -211,7 +211,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asin4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asin4_core_avx2.S index af6fa77..c7dbb72 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_asin4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asin4_core_avx2.S @@ -196,7 +196,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asin8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asin8_core_avx512.S index 2a0f6d4..c23665b 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_asin8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asin8_core_avx512.S @@ -218,7 +218,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh2_core_sse4.S index a3630b1..f4da4b2 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh2_core_sse4.S @@ -474,7 +474,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh4_core_avx2.S index d97a5f8..3ecec43 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh4_core_avx2.S @@ -423,7 +423,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S index b4d8884..82bd524 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S @@ -337,7 +337,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atan22_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atan22_core_sse4.S index 7d14cb8..39d8648 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_atan22_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atan22_core_sse4.S @@ -229,11 +229,11 @@ ENTRY(_ZGVbN2vv_atan2_sse4) /* Special branch for fast (vector) processing of zero arguments */ testb $3, %cl - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx rbp r12 r13 r14 r15 eax edx xmm0 xmm1 xmm2 xmm3 xmm4 xmm5 xmm6 xmm7 xmm8 xmm9 xmm10 xmm11 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -316,7 +316,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -96) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -336,7 +336,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx rbp r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atan24_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atan24_core_avx2.S index 35b635d..a4bcf9c 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_atan24_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atan24_core_avx2.S @@ -170,11 +170,11 @@ ENTRY(_ZGVdN4vv_atan2_avx2) /* Special branch for fast (vector) processing of zero arguments */ testl %eax, %eax - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx r12 r13 r14 r15 edx xmm3 ymm0 ymm1 ymm2 ymm4 ymm5 ymm6 ymm7 ymm8 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -271,7 +271,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x80, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -291,7 +291,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atan28_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atan28_core_avx512.S index 49662bc..def7af3 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_atan28_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atan28_core_avx512.S @@ -188,11 +188,11 @@ ENTRY(_ZGVeN8vv_atan2_skx) vmovups 64(%rsp), %zmm9 testl %eax, %eax - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx r12 r13 r14 r15 edx ymm6 zmm0 zmm2 zmm3 zmm4 zmm5 zmm7 zmm9 zmm11 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -289,7 +289,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x00, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm11 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -309,7 +309,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh2_core_sse4.S index 50345f0..0a87c8c 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh2_core_sse4.S @@ -367,7 +367,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh4_core_avx2.S index 0e2f6ca..44517be 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh4_core_avx2.S @@ -333,7 +333,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh8_core_avx512.S index 7ba45c0..99141c1 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh8_core_avx512.S @@ -268,7 +268,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt2_core_sse4.S index aa90322..98b276f 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt2_core_sse4.S @@ -241,7 +241,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm6 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt4_core_avx2.S index d0de65f..45f395d 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt4_core_avx2.S @@ -256,7 +256,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh2_core_sse4.S index c2a1324..dd89de0 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh2_core_sse4.S @@ -260,7 +260,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh4_core_avx2.S index c152307..8330968 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh4_core_avx2.S @@ -276,7 +276,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh8_core_avx512.S index b4b2284..3e2aa62 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh8_core_avx512.S @@ -225,7 +225,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc2_core_sse4.S index 5934986..a5f2f11 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc2_core_sse4.S @@ -251,7 +251,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc4_core_avx2.S index 2948e6b..376be17 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc4_core_avx2.S @@ -252,7 +252,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc8_core_avx512.S index 5c92653..debba0c 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc8_core_avx512.S @@ -255,7 +255,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp102_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp102_core_sse4.S index 65abd70..db25e5b 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp102_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp102_core_sse4.S @@ -68,7 +68,7 @@ ENTRY(_ZGVbN2v_exp10_sse4) /* R */ movaps %xmm0, %xmm12 - /* Load arument */ + /* Load argument */ movups _dbLg2_10+__svml_dexp10_data_internal(%rip), %xmm13 lea __svml_dexp10_data_internal(%rip), %rsi mulpd %xmm0, %xmm13 @@ -214,7 +214,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp104_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp104_core_avx2.S index 1c7c8e2..c5cec28 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp104_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp104_core_avx2.S @@ -73,7 +73,7 @@ ENTRY(_ZGVdN4v_exp10_avx2) vmovapd %ymm0, %ymm2 vmovupd _dbShifter+__svml_dexp10_data_internal(%rip), %ymm3 - /* Load arument */ + /* Load argument */ vmovupd _dbLg2_10+__svml_dexp10_data_internal(%rip), %ymm0 vfmadd213pd %ymm3, %ymm2, %ymm0 vsubpd %ymm3, %ymm0, %ymm1 @@ -225,7 +225,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp108_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp108_core_avx512.S index 2f45c92..9ea6a3d 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp108_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp108_core_avx512.S @@ -23,7 +23,7 @@ * - all arguments processed in the main path * - final VSCALEF assists branch-free design (correct overflow/underflow and special case responses) * - a VAND is used to ensure the reduced argument |R|<2, even for large inputs - * - RZ mode used to avoid oveflow to +/-Inf for x*log2(e); helps with special case handling + * - RZ mode used to avoid overflow to +/-Inf for x*log2(e); helps with special case handling * - SAE used to avoid spurious flag settings * */ @@ -185,7 +185,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp22_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp22_core_sse4.S index 0ffb56d..4c24aa8 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp22_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp22_core_sse4.S @@ -67,7 +67,7 @@ ENTRY(_ZGVbN2v_exp2_sse4) /* out, basePtr, iIndex, iBaseOfs, iSize, iGran, iOfs */ lea __svml_dexp2_data_internal(%rip), %rsi - /* Load arument */ + /* Load argument */ movaps %xmm1, %xmm10 addpd %xmm0, %xmm10 movaps %xmm10, %xmm6 @@ -201,7 +201,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp24_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp24_core_avx2.S index 9337921..1e55f3d 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp24_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp24_core_avx2.S @@ -71,7 +71,7 @@ ENTRY(_ZGVdN4v_exp2_avx2) vmovupd _lIndexMask+__svml_dexp2_data_internal(%rip), %ymm3 vmovapd %ymm0, %ymm1 - /* Load arument */ + /* Load argument */ vaddpd %ymm4, %ymm1, %ymm2 vsubpd %ymm4, %ymm2, %ymm0 @@ -217,7 +217,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S index ab3db00..7e759c4 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S @@ -221,7 +221,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_expm12_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_expm12_core_sse4.S index 7e1df11..05be907 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_expm12_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_expm12_core_sse4.S @@ -206,7 +206,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 xmm6 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_expm14_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_expm14_core_avx2.S index 815ef34..ad0b499 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_expm14_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_expm14_core_avx2.S @@ -199,7 +199,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_expm18_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_expm18_core_avx512.S index f38c694..968801a 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_expm18_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_expm18_core_avx512.S @@ -24,7 +24,7 @@ * - all arguments processed in the main path * - final VSCALEF assists branch-free design (correct overflow/underflow and special case responses) * - a VAND is used to ensure the reduced argument |R|<2, even for large inputs - * - RZ mode used to avoid oveflow to +/-Inf for x*log2(e); helps with special case handling + * - RZ mode used to avoid overflow to +/-Inf for x*log2(e); helps with special case handling * * */ @@ -205,7 +205,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot2_core_sse4.S index 136f5eb..07c3156 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot2_core_sse4.S @@ -47,7 +47,7 @@ * No multiprecision branch for _LA_ and _EP_ * _z = _VARG1 * _VARG1 + _VARG2 * _VARG2 * - * Check _z exponent to be withing borders [3BC ; 441] else goto Callout + * Check _z exponent to be within borders [3BC ; 441] else goto Callout * * _s ~ 1.0/sqrt(_z) * _s2 ~ 1.0/(sqrt(_z)*sqrt(_z)) ~ 1.0/_z = (1.0/_z + O) @@ -127,7 +127,7 @@ ENTRY(_ZGVbN2vv_hypot_sse4) mulpd %xmm10, %xmm11 mulpd %xmm10, %xmm2 - /* Check _z exponent to be withing borders [3BC ; 441] else goto Callout */ + /* Check _z exponent to be within borders [3BC ; 441] else goto Callout */ movq _LowBoundary+__svml_dhypot_data_internal(%rip), %xmm5 movq _HighBoundary+__svml_dhypot_data_internal(%rip), %xmm3 pshufd $221, %xmm10, %xmm4 @@ -215,7 +215,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -96) # LOE rbx rbp r12 r13 r14 r15 xmm2 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot4_core_avx2.S index 61d12c9..d8c6a3a 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot4_core_avx2.S @@ -47,7 +47,7 @@ * No multiprecision branch for _LA_ and _EP_ * _z = _VARG1 * _VARG1 + _VARG2 * _VARG2 * - * Check _z exponent to be withing borders [3BC ; 441] else goto Callout + * Check _z exponent to be within borders [3BC ; 441] else goto Callout * * _s ~ 1.0/sqrt(_z) * _s2 ~ 1.0/(sqrt(_z)*sqrt(_z)) ~ 1.0/_z = (1.0/_z + O) @@ -111,7 +111,7 @@ ENTRY(_ZGVdN4vv_hypot_avx2) */ vcvtpd2ps %ymm0, %xmm12 - /* Check _z exponent to be withing borders [3BC ; 441] else goto Callout */ + /* Check _z exponent to be within borders [3BC ; 441] else goto Callout */ vextractf128 $1, %ymm0, %xmm3 vrsqrtps %xmm12, %xmm13 vshufps $221, %xmm3, %xmm0, %xmm5 @@ -225,7 +225,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x80, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot8_core_avx512.S index fb53d5d..24ab764 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot8_core_avx512.S @@ -47,7 +47,7 @@ * No multiprecision branch for _LA_ and _EP_ * _z = _VARG1 * _VARG1 + _VARG2 * _VARG2 * - * Check _z exponent to be withing borders [3BC ; 441] else goto Callout + * Check _z exponent to be within borders [3BC ; 441] else goto Callout * * _s ~ 1.0/sqrt(_z) * _s2 ~ 1.0/(sqrt(_z)*sqrt(_z)) ~ 1.0/_z = (1.0/_z + O) @@ -188,7 +188,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x00, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm2 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log102_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log102_core_sse4.S index b2e75c1..de1583b 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log102_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log102_core_sse4.S @@ -227,7 +227,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 xmm3 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log104_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log104_core_avx2.S index 2e6ebac..8a9b8a8 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log104_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log104_core_avx2.S @@ -219,7 +219,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log108_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log108_core_avx512.S index b759306..b4e5a9c 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log108_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log108_core_avx512.S @@ -201,7 +201,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p2_core_sse4.S index d0372e8..618b7e1 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p2_core_sse4.S @@ -265,7 +265,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p4_core_avx2.S index d114653..dc2ccb3 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p4_core_avx2.S @@ -257,7 +257,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p8_core_avx512.S index 283c40b..f5ec27d 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p8_core_avx512.S @@ -219,7 +219,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log22_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log22_core_sse4.S index 93bf270..2946564 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log22_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log22_core_sse4.S @@ -225,7 +225,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 xmm3 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log24_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log24_core_avx2.S index 83d8d4c..30fa3e4 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log24_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log24_core_avx2.S @@ -217,7 +217,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log28_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log28_core_avx512.S index bc9db38..351e00d 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log28_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log28_core_avx512.S @@ -199,7 +199,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S index 03a703f..3b01840 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S @@ -260,7 +260,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S index 2607518..585e2e5 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S @@ -274,7 +274,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S index ce08de9..8158d14 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S @@ -265,7 +265,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tan2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tan2_core_sse4.S index 9fac5fa..9c20876 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_tan2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tan2_core_sse4.S @@ -181,11 +181,11 @@ ENTRY(_ZGVbN2v_tan_sse4) movmskpd %xmm4, %edx testl %edx, %edx - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx rbp r12 r13 r14 r15 eax xmm0 xmm1 xmm4 xmm5 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -264,7 +264,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -283,7 +283,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx rbp r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tan4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tan4_core_avx2.S index 8586565..82d2cef 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_tan4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tan4_core_avx2.S @@ -166,11 +166,11 @@ ENTRY(_ZGVdN4v_tan_avx2) vxorpd %ymm0, %ymm8, %ymm0 testl %eax, %eax - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx r12 r13 r14 r15 r9d ymm0 ymm1 ymm14 ymm15 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -261,7 +261,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -280,7 +280,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tan8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tan8_core_avx512.S index 79deb21..c5738ce 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_tan8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tan8_core_avx512.S @@ -96,11 +96,11 @@ ENTRY(_ZGVeN8v_tan_skx) vfnmadd231pd {rn-sae}, %zmm8, %zmm3, %zmm5 vfnmadd213pd {rn-sae}, %zmm5, %zmm4, %zmm8 - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx r12 r13 r14 r15 edx zmm0 zmm1 zmm8 zmm11 k1 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -233,7 +233,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -252,7 +252,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh2_core_sse4.S index 6fef5f0..cbcb0d6 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh2_core_sse4.S @@ -259,7 +259,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh4_core_avx2.S index c05f4c2..cf0182b 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh4_core_avx2.S @@ -266,7 +266,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh8_core_avx512.S index 70f0880..b3477a3 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh8_core_avx512.S @@ -280,7 +280,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xfe, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf16_core_avx512.S index 1c68130..5bdc356 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf16_core_avx512.S @@ -199,7 +199,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf4_core_sse4.S index 372beff..ac099d3 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf4_core_sse4.S @@ -198,7 +198,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm7 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf8_core_avx2.S index 9e2f3b0..76296d9 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf8_core_avx2.S @@ -192,7 +192,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf16_core_avx512.S index 9ba8150..ff70634 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf16_core_avx512.S @@ -284,7 +284,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf4_core_sse4.S index 6c3cbf0..6a213dc 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf4_core_sse4.S @@ -299,7 +299,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm9 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf8_core_avx2.S index 45aede2..17f6a19 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf8_core_avx2.S @@ -280,7 +280,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf16_core_avx512.S index daa5cfa..2ffe24e 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf16_core_avx512.S @@ -192,7 +192,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf4_core_sse4.S index 0718fa0..bc3e2f8 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf4_core_sse4.S @@ -184,7 +184,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf8_core_avx2.S index 2199ed3..41e015c 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf8_core_avx2.S @@ -181,7 +181,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf16_core_avx512.S index 720b58f..592caa8 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf16_core_avx512.S @@ -307,7 +307,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf4_core_sse4.S index c78550e..e5996b3 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf4_core_sse4.S @@ -403,7 +403,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf8_core_avx2.S index f9aeea6..1e8fc22 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf8_core_avx2.S @@ -355,7 +355,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f16_core_avx512.S index e031dad..08c193e 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f16_core_avx512.S @@ -150,11 +150,11 @@ ENTRY(_ZGVeN16vv_atan2f_skx) vaddps {rn-sae}, %zmm11, %zmm9, %zmm9{%k4} vorps %zmm6, %zmm9, %zmm10 - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx r12 r13 r14 r15 edx zmm0 zmm1 zmm2 zmm3 zmm4 zmm5 zmm6 zmm7 zmm8 zmm10 zmm11 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -251,7 +251,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x00, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm10 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -271,7 +271,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f4_core_sse4.S index 6042610..0ec9b19 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f4_core_sse4.S @@ -157,11 +157,11 @@ ENTRY(_ZGVbN4vv_atan2f_sse4) /* Special branch for fast (vector) processing of zero arguments */ testl %ecx, %ecx - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx rbp r12 r13 r14 r15 eax edx xmm0 xmm1 xmm4 xmm5 xmm6 xmm7 xmm8 xmm9 xmm10 xmm11 xmm12 xmm13 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -244,7 +244,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -96) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -264,7 +264,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx rbp r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f8_core_avx2.S index bf632c8..69619cb 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f8_core_avx2.S @@ -131,11 +131,11 @@ ENTRY(_ZGVdN8vv_atan2f_avx2) /* Special branch for fast (vector) processing of zero arguments */ testl %eax, %eax - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx r12 r13 r14 r15 edx ymm0 ymm1 ymm2 ymm3 ymm4 ymm5 ymm6 ymm7 ymm9 ymm10 ymm12 ymm13 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -232,7 +232,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x80, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm9 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -252,7 +252,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf16_core_avx512.S index f733c7a..6c3d40d 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf16_core_avx512.S @@ -221,7 +221,7 @@ L(SPECIAL_VALUES_LOOP): xorl %ebp, %ebp tzcntl %ebx, %ebp - /* Scalar math fucntion call to process special input. */ + /* Scalar math function call to process special input. */ vmovss 64(%rsp, %rbp, 4), %xmm0 call atanhf@PLT diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf4_core_sse4.S index 055484b..ab2ef46 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf4_core_sse4.S @@ -242,7 +242,7 @@ L(SPECIAL_VALUES_LOOP): xorl %ebp, %ebp bsfl %ebx, %ebp - /* Scalar math fucntion call to process special input. */ + /* Scalar math function call to process special input. */ movss 40(%rsp, %rbp, 4), %xmm0 call atanhf@PLT /* No good way to avoid the store-forwarding fault this will cause on diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf8_core_avx2.S index 8ffe98c..e70085b 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf8_core_avx2.S @@ -230,7 +230,7 @@ L(SPECIAL_VALUES_LOOP): xorl %ebp, %ebp tzcntl %ebx, %ebp - /* Scalar math fucntion call to process special input. */ + /* Scalar math function call to process special input. */ vmovss 32(%rsp, %rbp, 4), %xmm0 call atanhf@PLT diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf4_core_sse4.S index f5331db..270e620 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf4_core_sse4.S @@ -273,7 +273,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm12 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf8_core_avx2.S index 76db762..292eb5a 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf8_core_avx2.S @@ -298,7 +298,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf16_core_avx512.S index 14696ee..773594d 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf16_core_avx512.S @@ -222,7 +222,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm6 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf4_core_sse4.S index 654ac65..ee987dd 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf4_core_sse4.S @@ -233,7 +233,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm2 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf8_core_avx2.S index 474cb05..2469272 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf8_core_avx2.S @@ -236,7 +236,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf16_core_avx512.S index 03b7e4a..3d19dbd 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf16_core_avx512.S @@ -212,7 +212,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf4_core_sse4.S index 02aa2b4..e7cae80 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf4_core_sse4.S @@ -219,7 +219,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S index c3e8e39..958b46d 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S @@ -237,7 +237,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f16_core_avx512.S index e70e8c5..f2d8130 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f16_core_avx512.S @@ -23,7 +23,7 @@ * - all arguments processed in the main path * - final VSCALEF assists branch-free design (correct overflow/underflow and special case responses) * - a VAND is used to ensure the reduced argument |R|<2, even for large inputs - * - RZ mode used to avoid oveflow to +/-Inf for x*log2(e); helps with special case handling + * - RZ mode used to avoid overflow to +/-Inf for x*log2(e); helps with special case handling * - SAE used to avoid spurious flag settings * */ @@ -180,7 +180,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f4_core_sse4.S index 9de39a6..9eb215a 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f4_core_sse4.S @@ -63,7 +63,7 @@ ENTRY(_ZGVbN4v_exp10f_sse4) cfi_def_cfa_offset(80) movaps %xmm0, %xmm4 - /* Load arument */ + /* Load argument */ movups _sLg2_10+__svml_sexp10_data_internal(%rip), %xmm2 lea __svml_sexp10_data_internal(%rip), %r8 mulps %xmm4, %xmm2 @@ -212,7 +212,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f8_core_avx2.S index e3087a7..79563cc 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f8_core_avx2.S @@ -69,7 +69,7 @@ ENTRY(_ZGVdN8v_exp10f_avx2) lea __svml_sexp10_data_internal(%rip), %rax vmovups _sShifter+__svml_sexp10_data_internal(%rip), %ymm4 - /* Load arument */ + /* Load argument */ vmovups _sLg2_10+__svml_sexp10_data_internal(%rip), %ymm1 vmovups _iIndexMask+__svml_sexp10_data_internal(%rip), %ymm2 vmovaps %ymm0, %ymm3 @@ -232,7 +232,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f16_core_avx512.S index 1911c06..ce983b2 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f16_core_avx512.S @@ -203,7 +203,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S index f4ddfbe..512ea5c 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S @@ -175,7 +175,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f8_core_avx2.S index 277508b..4759298 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f8_core_avx2.S @@ -182,7 +182,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f16_core_avx512.S index 7aa1e3c..4683e54 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f16_core_avx512.S @@ -24,7 +24,7 @@ * - all arguments processed in the main path * - final VSCALEF assists branch-free design (correct overflow/underflow and special case responses) * - a VAND is used to ensure the reduced argument |R|<2, even for large inputs - * - RZ mode used to avoid oveflow to +/-Inf for x*log2(e); helps with special case handling + * - RZ mode used to avoid overflow to +/-Inf for x*log2(e); helps with special case handling * * */ @@ -188,7 +188,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f4_core_sse4.S index 6a3a9d2..5159b07 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f4_core_sse4.S @@ -207,7 +207,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 xmm10 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f8_core_avx2.S index ee442d8..aae9068 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f8_core_avx2.S @@ -206,7 +206,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf16_core_avx512.S index 06c6903..749deb0 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf16_core_avx512.S @@ -45,7 +45,7 @@ * No multiprecision branch for _LA_ and _EP_ * _z = _VARG1 * _VARG1 + _VARG2 * _VARG2 * - * Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout + * Check _z exponent to be within borders [1E3 ; 60A] else goto Callout * * Compute resciplicle sqrt s0 ~ 1.0/sqrt(_z), * that multiplied by _z, is final result for _EP_ version. @@ -196,7 +196,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x00, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm2 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S index c5a94d7..38ab12b 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S @@ -45,7 +45,7 @@ * No multiprecision branch for _LA_ and _EP_ * _z = _VARG1 * _VARG1 + _VARG2 * _VARG2 * - * Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout + * Check _z exponent to be within borders [1E3 ; 60A] else goto Callout * * Compute resciplicle sqrt s0 ~ 1.0/sqrt(_z), * that multiplied by _z, is final result for _EP_ version. @@ -117,7 +117,7 @@ ENTRY(_ZGVbN4vv_hypotf_sse4) movaps %xmm2, %xmm6 mulps %xmm10, %xmm6 - /* Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout */ + /* Check _z exponent to be within borders [1E3 ; 60A] else goto Callout */ movdqu _LowBoundary+__svml_shypot_data_internal(%rip), %xmm4 subps %xmm6, %xmm5 @@ -216,7 +216,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -96) # LOE rbx rbp r12 r13 r14 r15 xmm2 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf8_core_avx2.S index fe87678..80f1081 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf8_core_avx2.S @@ -45,7 +45,7 @@ * No multiprecision branch for _LA_ and _EP_ * _z = _VARG1 * _VARG1 + _VARG2 * _VARG2 * - * Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout + * Check _z exponent to be within borders [1E3 ; 60A] else goto Callout * * Compute resciplicle sqrt s0 ~ 1.0/sqrt(_z), * that multiplied by _z, is final result for _EP_ version. @@ -107,7 +107,7 @@ ENTRY(_ZGVdN8vv_hypotf_avx2) */ vmovups _sHalf+__svml_shypot_data_internal(%rip), %ymm7 - /* Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout */ + /* Check _z exponent to be within borders [1E3 ; 60A] else goto Callout */ vmovups _LowBoundary+__svml_shypot_data_internal(%rip), %ymm2 vfmadd231ps %ymm1, %ymm1, %ymm8 @@ -220,7 +220,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x80, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm2 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f16_core_avx512.S index 87a1694..0deb969 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f16_core_avx512.S @@ -155,7 +155,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f4_core_sse4.S index 80ded85..6baff56 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f4_core_sse4.S @@ -168,7 +168,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f8_core_avx2.S index 4804950..54ff0b1 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f8_core_avx2.S @@ -168,7 +168,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf16_core_avx512.S index d629dc4..e4f8a60 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf16_core_avx512.S @@ -201,7 +201,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S index 511e064..4a10457 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S @@ -182,7 +182,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf8_core_avx2.S index ea39f66..672c91e 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf8_core_avx2.S @@ -184,7 +184,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f16_core_avx512.S index c14fd3d..0428895 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f16_core_avx512.S @@ -152,7 +152,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f4_core_sse4.S index f4aa948..93ed642 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f4_core_sse4.S @@ -160,7 +160,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f8_core_avx2.S index d2441c3..02360e5 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f8_core_avx2.S @@ -163,7 +163,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S index dda1a05..03e7f34 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S @@ -246,7 +246,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S index 34ec276..59d6329 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S @@ -236,7 +236,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm14 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S index abf8d65..81e1f19 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S @@ -237,7 +237,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf16_core_avx512.S index 3d4dba3..ae95fba 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf16_core_avx512.S @@ -94,11 +94,11 @@ ENTRY(_ZGVeN16v_tanf_skx) vfnmadd231ps {rn-sae}, %zmm5, %zmm2, %zmm4 vfnmadd213ps {rn-sae}, %zmm4, %zmm3, %zmm5 - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx r12 r13 r14 r15 edx zmm0 zmm5 zmm10 zmm11 k6 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -229,7 +229,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -248,7 +248,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S index 1292e88..fab8664 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S @@ -175,11 +175,11 @@ ENTRY(_ZGVbN4v_tanf_sse4) testl %edx, %edx - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx rbp r12 r13 r14 r15 eax xmm0 xmm4 xmm11 xmm12 xmm13 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -258,7 +258,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -240) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -277,7 +277,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx rbp r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf8_core_avx2.S index ab52321..30585a7 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf8_core_avx2.S @@ -161,13 +161,13 @@ ENTRY(_ZGVdN8v_tanf_avx2) testl %edx, %edx - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) /* DW_CFA_expression: r3 (rbx) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -8; DW_OP_plus) */ .cfi_escape 0x10, 0x03, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xf8, 0xff, 0xff, 0xff, 0x22 # LOE r12 r13 r14 r15 eax ymm0 ymm1 ymm10 ymm11 ymm12 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -255,7 +255,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -273,7 +273,7 @@ L(SCALAR_MATH_CALL): cfi_restore(13) # LOE r14 r15 ebx r12d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf16_core_avx512.S index d72a889..e639c48 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf16_core_avx512.S @@ -220,7 +220,7 @@ L(SPECIAL_VALUES_LOOP): xorl %ebp, %ebp tzcntl %ebx, %ebp - /* Scalar math fucntion call to process special input. */ + /* Scalar math function call to process special input. */ vmovss 64(%rsp, %rbp, 4), %xmm0 call tanhf@PLT diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf4_core_sse4.S index dcbb188..357ad37 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf4_core_sse4.S @@ -73,7 +73,7 @@ #include <sysdep.h> -/* tanhf data tables for avx2 and sse4 implementatins defined here. +/* tanhf data tables for avx2 and sse4 implementations defined here. */ #define ONLY_DECL_OFFSET #include "svml_s_tanhf_rodata.S" @@ -217,7 +217,7 @@ L(SPECIAL_VALUES_LOOP): xorl %ebp, %ebp bsfl %ebx, %ebp - /* Scalar math fucntion call to process special input. */ + /* Scalar math function call to process special input. */ movss 40(%rsp, %rbp, 4), %xmm0 call tanhf@PLT /* No good way to avoid the store-forwarding fault this will cause on diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf8_core_avx2.S index b8d828e..ea19903 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf8_core_avx2.S @@ -72,7 +72,7 @@ #include <sysdep.h> -/* tanhf data tables for avx2 and sse4 implementatins defined here. +/* tanhf data tables for avx2 and sse4 implementations defined here. */ #include "svml_s_tanhf_rodata.S" |