diff options
-rw-r--r-- | libio/Makefile | 2 | ||||
-rw-r--r-- | libio/fileops.c | 10 | ||||
-rw-r--r-- | libio/tst-fflush-NULL.c | 24 | ||||
-rw-r--r-- | libio/tst-fflush-skeleton.c | 158 | ||||
-rw-r--r-- | libio/tst-fflush.c | 24 | ||||
-rw-r--r-- | malloc/malloc-check.c | 2 | ||||
-rw-r--r-- | malloc/malloc.c | 177 | ||||
-rw-r--r-- | manual/stdio.texi | 26 | ||||
-rw-r--r-- | manual/time.texi | 3 | ||||
-rw-r--r-- | manual/tunables.texi | 2 | ||||
-rw-r--r-- | math/test-fesetexcept-traps.c | 2 | ||||
-rw-r--r-- | math/test-fexcept-traps.c | 2 | ||||
-rw-r--r-- | posix/tst-truncate-common.c | 2 | ||||
-rw-r--r-- | stdio-common/Makefile | 1 | ||||
-rw-r--r-- | stdio-common/tst-fclose-devzero.c | 50 | ||||
-rw-r--r-- | sysdeps/aarch64/multiarch/memcpy_oryon1.S | 40 | ||||
-rw-r--r-- | sysdeps/aarch64/multiarch/memset_oryon1.S | 26 | ||||
-rw-r--r-- | sysdeps/pthread/tst-stdio2.c | 2 | ||||
-rw-r--r-- | sysdeps/x86/cpu-features.c | 12 |
19 files changed, 451 insertions, 114 deletions
diff --git a/libio/Makefile b/libio/Makefile index e143ccd..f020f8e 100644 --- a/libio/Makefile +++ b/libio/Makefile @@ -101,6 +101,8 @@ tests = \ tst-fclose-unopened \ tst-fclose-unopened2 \ tst-fdopen-seek-failure \ + tst-fflush \ + tst-fflush-NULL \ tst-fgetc-after-eof \ tst-fgetwc \ tst-fgetws \ diff --git a/libio/fileops.c b/libio/fileops.c index a59e248..0cce828 100644 --- a/libio/fileops.c +++ b/libio/fileops.c @@ -928,6 +928,16 @@ do_ftell (FILE *fp) if (result == EOF) return result; + if (result == 0 && offset < 0) + { + /* This happens for some character devices that always report + file offset 0 even after some data has been read (instead of + failing with ESPIPE). The fclose path ignores this + error. */ + __set_errno (ESPIPE); + return EOF; + } + result += offset; if (result < 0) diff --git a/libio/tst-fflush-NULL.c b/libio/tst-fflush-NULL.c new file mode 100644 index 0000000..4c8fe7c --- /dev/null +++ b/libio/tst-fflush-NULL.c @@ -0,0 +1,24 @@ +/* Test that fflush (FILE) and fflush (NULL) are semantically equivalent. + This is the `fflush (NULL)` part. + + Copyright (C) 2024 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <https://www.gnu.org/licenses/>. */ + +#define FILE_FLUSH_TYPE 0 +#define S_FLUSH_TYPE "NULL" + +#include "tst-fflush-skeleton.c" diff --git a/libio/tst-fflush-skeleton.c b/libio/tst-fflush-skeleton.c new file mode 100644 index 0000000..6341d3d --- /dev/null +++ b/libio/tst-fflush-skeleton.c @@ -0,0 +1,158 @@ +/* Test that fflush (FILE) and fflush (NULL) are semantically equivalent. + + Copyright (C) 2024 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <https://www.gnu.org/licenses/>. */ + +/* A success on this test doesn't imply the effectiveness of fflush as + we can't ensure that the file wasn't already in the expected state + before the call of the function. It only ensures that, if the test + fails, fflush is broken. */ + +#include <assert.h> +#include <fcntl.h> +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> + +#include <sys/stat.h> +#include <sys/mman.h> + +#include <support/check.h> +#include <support/support.h> +#include <support/temp_file.h> +#include <support/test-driver.h> +#include <support/xstdio.h> +#include <support/xunistd.h> + +#define CONTENT_SZ_MAX 32 +#define TEST_FILE_COUNT 10 + +struct file_tracking +{ + FILE *file; + char *name; + int fd; + char *mfile; +} files[TEST_FILE_COUNT]; + +static void +file_init (int file) +{ + int fd = -1; + + assert (file < TEST_FILE_COUNT); + + files[file] = (struct file_tracking) { .fd = -1, }; + + xclose (create_temp_file ("tst-fflush", &files[file].name)); + + fd = xopen (files[file].name, O_RDONLY, 0); + files[file].mfile = xmmap (NULL, CONTENT_SZ_MAX, PROT_READ, MAP_SHARED, fd); + xclose (fd); +} + +static void +file_cleanup (int file) +{ + free (files[file].name); + xmunmap (files[file].mfile, CONTENT_SZ_MAX); + files[file] = (struct file_tracking) { .fd = -1, }; +} + +static void +file_changed (int to_check, const char *mode) +{ + struct stat stats = { }; + char expected[CONTENT_SZ_MAX] = { }; + + verbose_printf ("Check that %s (%d) exactly contains the data we put in\n", + files[to_check].name, to_check); + + /* File should contain "N:M" where both N and M are one digit exactly. */ + snprintf (expected, sizeof (expected), "%d:%d", FILE_FLUSH_TYPE, to_check); + TEST_COMPARE_BLOB (files[to_check].mfile, sizeof (expected), + expected, sizeof (expected)); + + TEST_VERIFY (fstat (files[to_check].fd, &stats) >= 0); + TEST_VERIFY (stats.st_size == 3); + /* In read mode we expect to be at position 1, in write mode at position 3 */ + TEST_COMPARE (lseek (files[to_check].fd, 0, SEEK_CUR), + mode[0] == 'r' ? 1 : 3); + + if (support_record_failure_is_failed ()) + FAIL_EXIT1 ("exiting due to previous failure"); + + /* Not reached if the data doesn't match. */ +} + +static void +file_flush (const char *mode) +{ + for (int i = 0; i < TEST_FILE_COUNT; i++) + { + files[i].file = xfopen (files[i].name, mode); + files[i].fd = fileno (files[i].file); + } + + /* Print a unique identifier in each file, that is not too long nor contain + new line to not trigger _IO_OVERFLOW/_IO_SYNC. */ + for (int i = 0; i < TEST_FILE_COUNT; i++) + { + if (mode[0] == 'r') + fgetc (files[i].file); + else + fprintf (files[i].file, "%d:%d", FILE_FLUSH_TYPE, i); + } + + if (!FILE_FLUSH_TYPE) + TEST_VERIFY (fflush (NULL) == 0); + else + for (int i = 0; i < TEST_FILE_COUNT; i++) + TEST_VERIFY (fflush (files[i].file) == 0); + + for (int i = 0; i < TEST_FILE_COUNT; i++) + { + verbose_printf ("Check that file %s has been modified after fflush\n", + files[i].name); + file_changed (i, mode); + } + + for (int i = 0; i < TEST_FILE_COUNT; i++) + xfclose (files[i].file); +} + +static int +do_test (void) +{ + for (int i = 0; i < TEST_FILE_COUNT; i++) + file_init (i); + + verbose_printf ("Checking fflush(" S_FLUSH_TYPE "), WRITE mode\n"); + file_flush ("w"); + + verbose_printf ("Checking fflush(" S_FLUSH_TYPE "), READWRITE mode\n"); + file_flush ("r+"); + + for (int i = 0; i < TEST_FILE_COUNT; i++) + file_cleanup (i); + + return 0; +} + +#include <support/test-driver.c> diff --git a/libio/tst-fflush.c b/libio/tst-fflush.c new file mode 100644 index 0000000..d3a4b78 --- /dev/null +++ b/libio/tst-fflush.c @@ -0,0 +1,24 @@ +/* Test that fflush (FILE) and fflush (NULL) are semantically equivalent. + This is the `fflush (FILE)` part. + + Copyright (C) 2024 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <https://www.gnu.org/licenses/>. */ + +#define FILE_FLUSH_TYPE 1 +#define S_FLUSH_TYPE "FILE" + +#include "tst-fflush-skeleton.c" diff --git a/malloc/malloc-check.c b/malloc/malloc-check.c index 814a916..c5265ec 100644 --- a/malloc/malloc-check.c +++ b/malloc/malloc-check.c @@ -235,7 +235,7 @@ free_check (void *mem) { /* Mark the chunk as belonging to the library again. */ (void)tag_region (chunk2mem (p), memsize (p)); - _int_free (&main_arena, p, 1); + _int_free_chunk (&main_arena, p, chunksize (p), 1); __libc_lock_unlock (main_arena.mutex); } __set_errno (err); diff --git a/malloc/malloc.c b/malloc/malloc.c index a0bc733..f30fb4b 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -1086,8 +1086,6 @@ typedef struct malloc_chunk* mchunkptr; /* Internal routines. */ static void* _int_malloc(mstate, size_t); -static void _int_free (mstate, mchunkptr, int); -static void _int_free_check (mstate, mchunkptr, INTERNAL_SIZE_T); static void _int_free_chunk (mstate, mchunkptr, INTERNAL_SIZE_T, int); static void _int_free_merge_chunk (mstate, mchunkptr, INTERNAL_SIZE_T); static INTERNAL_SIZE_T _int_free_create_chunk (mstate, @@ -1101,6 +1099,9 @@ static void* _int_memalign(mstate, size_t, size_t); static void* _mid_memalign(size_t, size_t, void *); #endif +#if USE_TCACHE +static void malloc_printerr_tail(const char *str); +#endif static void malloc_printerr(const char *str) __attribute__ ((noreturn)); static void munmap_chunk(mchunkptr p); @@ -1273,7 +1274,6 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ sysmalloc: Returns untagged memory. _int_malloc: Returns untagged memory. - _int_free: Takes untagged memory. _int_memalign: Returns untagged memory. _int_memalign: Returns untagged memory. _mid_memalign: Returns tagged memory. @@ -3163,7 +3163,7 @@ tcache_put (mchunkptr chunk, size_t tc_idx) { tcache_entry *e = (tcache_entry *) chunk2mem (chunk); - /* Mark this chunk as "in the tcache" so the test in _int_free will + /* Mark this chunk as "in the tcache" so the test in __libc_free will detect a double free. */ e->key = tcache_key; @@ -3241,37 +3241,12 @@ tcache_double_free_verify (tcache_entry *e, size_t tc_idx) malloc_printerr ("free(): unaligned chunk detected in tcache 2"); if (tmp == e) malloc_printerr ("free(): double free detected in tcache 2"); - /* If we get here, it was a coincidence. We've wasted a - few cycles, but don't abort. */ } -} - -/* Try to free chunk to the tcache, if success return true. - Caller must ensure that chunk and size are valid. */ -static __always_inline bool -tcache_free (mchunkptr p, INTERNAL_SIZE_T size) -{ - bool done = false; - size_t tc_idx = csize2tidx (size); - if (tcache != NULL && tc_idx < mp_.tcache_bins) - { - /* Check to see if it's already in the tcache. */ - tcache_entry *e = (tcache_entry *) chunk2mem (p); - - /* This test succeeds on double free. However, we don't 100% - trust it (it also matches random payload data at a 1 in - 2^<size_t> chance), so verify it's not an unlikely - coincidence before aborting. */ - if (__glibc_unlikely (e->key == tcache_key)) - tcache_double_free_verify (e, tc_idx); - - if (tcache->counts[tc_idx] < mp_.tcache_count) - { - tcache_put (p, tc_idx); - done = true; - } - } - return done; + /* No double free detected - it might be in a tcache of another thread, + or user data that happens to match the key. Since we are not sure, + clear the key and retry freeing it. */ + e->key = 0; + __libc_free (e); } static void @@ -3316,6 +3291,14 @@ tcache_init(void) if (tcache_shutting_down) return; + /* Check minimum mmap chunk is larger than max tcache size. This means + mmap chunks with their different layout are never added to tcache. */ + if (MAX_TCACHE_SIZE >= GLRO (dl_pagesize) / 2) + malloc_printerr ("max tcache size too large"); + + /* Preserve errno when called from free() - _int_malloc may corrupt it. */ + int err = errno; + arena_get (ar_ptr, bytes); victim = _int_malloc (ar_ptr, bytes); if (!victim && ar_ptr != NULL) @@ -3328,6 +3311,8 @@ tcache_init(void) if (ar_ptr != NULL) __libc_lock_unlock (ar_ptr->mutex); + __set_errno (err); + /* In a low memory situation, we may not be able to allocate memory - in which case, we just keep trying later. However, we typically do this very early, so either there is sufficient @@ -3442,7 +3427,6 @@ libc_hidden_def (__libc_malloc) void __libc_free (void *mem) { - mstate ar_ptr; mchunkptr p; /* chunk corresponding to mem */ if (mem == NULL) /* free(0) has no effect */ @@ -3453,37 +3437,41 @@ __libc_free (void *mem) if (__glibc_unlikely (mtag_enabled)) *(volatile char *)mem; - int err = errno; - p = mem2chunk (mem); - if (chunk_is_mmapped (p)) /* release mmapped memory. */ - { - /* See if the dynamic brk/mmap threshold needs adjusting. - Dumped fake mmapped chunks do not affect the threshold. */ - if (!mp_.no_dyn_threshold - && chunksize_nomask (p) > mp_.mmap_threshold - && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX) - { - mp_.mmap_threshold = chunksize (p); - mp_.trim_threshold = 2 * mp_.mmap_threshold; - LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2, - mp_.mmap_threshold, mp_.trim_threshold); - } - munmap_chunk (p); - } - else + /* Mark the chunk as belonging to the library again. */ + tag_region (chunk2mem (p), memsize (p)); + + INTERNAL_SIZE_T size = chunksize (p); + + if (__glibc_unlikely (misaligned_chunk (p))) + return malloc_printerr_tail ("free(): invalid pointer"); + + check_inuse_chunk (arena_for_chunk (p), p); + +#if USE_TCACHE + size_t tc_idx = csize2tidx (size); + + if (__glibc_likely (tcache != NULL && tc_idx < mp_.tcache_bins)) { - MAYBE_INIT_TCACHE (); + /* Check to see if it's already in the tcache. */ + tcache_entry *e = (tcache_entry *) chunk2mem (p); - /* Mark the chunk as belonging to the library again. */ - (void)tag_region (chunk2mem (p), memsize (p)); + /* Check for double free - verify if the key matches. */ + if (__glibc_unlikely (e->key == tcache_key)) + return tcache_double_free_verify (e, tc_idx); - ar_ptr = arena_for_chunk (p); - _int_free (ar_ptr, p, 0); + if (__glibc_likely (tcache->counts[tc_idx] < mp_.tcache_count)) + return tcache_put (p, tc_idx); } +#endif - __set_errno (err); + /* Check size >= MINSIZE and p + size does not overflow. */ + if (__glibc_unlikely (__builtin_add_overflow_p ((uintptr_t) p, size - MINSIZE, + (uintptr_t) 0))) + return malloc_printerr_tail ("free(): invalid size"); + + _int_free_chunk (arena_for_chunk (p), p, size, 0); } libc_hidden_def (__libc_free) @@ -4560,24 +4548,6 @@ _int_malloc (mstate av, size_t bytes) ------------------------------ free ------------------------------ */ -static __always_inline void -_int_free_check (mstate av, mchunkptr p, INTERNAL_SIZE_T size) -{ - /* Little security check which won't hurt performance: the - allocator never wraps around at the end of the address space. - Therefore we can exclude some size values which might appear - here by accident or by "design" from some intruder. */ - if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0) - || __builtin_expect (misaligned_chunk (p), 0)) - malloc_printerr ("free(): invalid pointer"); - /* We know that each chunk is at least MINSIZE bytes in size or a - multiple of MALLOC_ALIGNMENT. */ - if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size))) - malloc_printerr ("free(): invalid size"); - - check_inuse_chunk (av, p); -} - /* Free chunk P of SIZE bytes to the arena. HAVE_LOCK indicates where the arena for P has already been locked. Caller must ensure chunk and size are valid. */ @@ -4586,6 +4556,8 @@ _int_free_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, int have_lock) { mfastbinptr *fb; /* associated fastbin */ + MAYBE_INIT_TCACHE (); + /* If eligible, place chunk on a fastbin so it can be found and used quickly in malloc. @@ -4669,6 +4641,9 @@ _int_free_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, int have_lock) else if (!chunk_is_mmapped(p)) { + /* Preserve errno in case block merging results in munmap. */ + int err = errno; + /* If we're single-threaded, don't lock the arena. */ if (SINGLE_THREAD_P) have_lock = true; @@ -4680,35 +4655,34 @@ _int_free_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, int have_lock) if (!have_lock) __libc_lock_unlock (av->mutex); + + __set_errno (err); } /* If the chunk was allocated via mmap, release via munmap(). */ else { - munmap_chunk (p); - } -} -/* Free chunk P to its arena AV. HAVE_LOCK indicates where the arena for - P has already been locked. It will perform sanity check, then try the - fast path to free into tcache. If the attempt not success, free the - chunk to arena. */ -static __always_inline void -_int_free (mstate av, mchunkptr p, int have_lock) -{ - INTERNAL_SIZE_T size; /* its size */ - - size = chunksize (p); + /* Preserve errno in case munmap sets it. */ + int err = errno; - _int_free_check (av, p, size); + /* See if the dynamic brk/mmap threshold needs adjusting. + Dumped fake mmapped chunks do not affect the threshold. */ + if (!mp_.no_dyn_threshold + && chunksize_nomask (p) > mp_.mmap_threshold + && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX) + { + mp_.mmap_threshold = chunksize (p); + mp_.trim_threshold = 2 * mp_.mmap_threshold; + LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2, + mp_.mmap_threshold, mp_.trim_threshold); + } -#if USE_TCACHE - if (tcache_free (p, size)) - return; -#endif + munmap_chunk (p); - _int_free_chunk (av, p, size, have_lock); + __set_errno (err); + } } /* Try to merge chunk P of SIZE bytes with its neighbors. Put the @@ -5845,6 +5819,17 @@ malloc_printerr (const char *str) __builtin_unreachable (); } +#if USE_TCACHE +static __attribute_noinline__ void +malloc_printerr_tail (const char *str) +{ + /* Ensure this cannot be a no-return function. */ + if (!__malloc_initialized) + return; + malloc_printerr (str); +} +#endif + #if IS_IN (libc) /* We need a wrapper function for one of the additions of POSIX. */ int diff --git a/manual/stdio.texi b/manual/stdio.texi index 01b9f47..29ed720 100644 --- a/manual/stdio.texi +++ b/manual/stdio.texi @@ -1231,17 +1231,18 @@ convenient to have functions to read a line of text from a stream. Standard C has functions to do this, but they aren't very safe: null characters and even (for @code{gets}) long lines can confuse them. So -@theglibc{} provides the nonstandard @code{getline} function that -makes it easy to read lines reliably. +@theglibc{} provides the @code{getline} function that makes it easy to +read lines reliably. -Another GNU extension, @code{getdelim}, generalizes @code{getline}. It -reads a delimited record, defined as everything through the next -occurrence of a specified delimiter character. +The @code{getdelim} function is a generalized version of @code{getline}. +It reads a delimited record, defined as everything through the next +occurrence of a specified delimiter character. These functions were +both GNU extensions until standardized by POSIX.1-2008. All these functions are declared in @file{stdio.h}. -@deftypefun ssize_t getline (char **@var{lineptr}, size_t *@var{n}, FILE *@var{stream}) -@standards{GNU, stdio.h} +@deftypefun ssize_t getline (char **restrict @var{lineptr}, size_t *restrict @var{n}, FILE *restrict @var{stream}) +@standards{POSIX.1-2008, stdio.h} @safety{@prelim{}@mtsafe{}@asunsafe{@asucorrupt{} @ascuheap{}}@acunsafe{@aculock{} @acucorrupt{} @acsmem{}}} @c Besides the usual possibility of getting an inconsistent stream in a @c signal handler or leaving it inconsistent in case of cancellation, @@ -1274,15 +1275,15 @@ read (including the newline, but not including the terminating null). This value enables you to distinguish null characters that are part of the line from the null character inserted as a terminator. -This function is a GNU extension, but it is the recommended way to read -lines from a stream. The alternative standard functions are unreliable. +This function was originally a GNU extension, but was added in +POSIX.1-2008. If an error occurs or end of file is reached without any bytes read, @code{getline} returns @code{-1}. @end deftypefun -@deftypefun ssize_t getdelim (char **@var{lineptr}, size_t *@var{n}, int @var{delimiter}, FILE *@var{stream}) -@standards{GNU, stdio.h} +@deftypefun ssize_t getdelim (char **restrict @var{lineptr}, size_t *restrict @var{n}, int @var{delimiter}, FILE *restrict @var{stream}) +@standards{POSIX.1-2008, stdio.h} @safety{@prelim{}@mtsafe{}@asunsafe{@asucorrupt{} @ascuheap{}}@acunsafe{@aculock{} @acucorrupt{} @acsmem{}}} @c See the getline @acucorrupt note. This function is like @code{getline} except that the character which @@ -1294,6 +1295,9 @@ The text is stored in @var{lineptr}, including the delimiter character and a terminating null. Like @code{getline}, @code{getdelim} makes @var{lineptr} bigger if it isn't big enough. +This function was originally a GNU extension, but was added in +POSIX.1-2008. + @code{getline} is in fact implemented in terms of @code{getdelim}, just like this: diff --git a/manual/time.texi b/manual/time.texi index 04c97f5..6b89b85 100644 --- a/manual/time.texi +++ b/manual/time.texi @@ -148,7 +148,8 @@ and pass them to the functions that convert them to broken-down time On POSIX-conformant systems, @code{time_t} is an integer type and its values represent the number of seconds elapsed since the @dfn{POSIX Epoch}, which is January 1, 1970, at 00:00:00 Coordinated Universal Time (UTC)@. -The count of seconds ignores leap seconds. +The count of seconds ignores leap seconds. Additionally, POSIX.1-2024 +added the requirement that @code{time_t} be at least 64 bits wide. @Theglibc{} additionally guarantees that @code{time_t} is a signed type, and that all of its functions operate correctly on negative diff --git a/manual/tunables.texi b/manual/tunables.texi index 67064f5..d11ca7e 100644 --- a/manual/tunables.texi +++ b/manual/tunables.texi @@ -367,7 +367,7 @@ stack is allowed from the main program. Setting the value to @code{0} disables the ABI auto-negotiation (meaning no executable stacks even if the ABI or ELF header requires it), @code{1} enables auto-negotiation (although the program might not need an executable stack), while @code{2} forces an executable -stack at process start. Tthis is provided for compatibility reasons, when +stack at process start. This is provided for compatibility reasons, when the program dynamically loads modules with @code{dlopen} which require an executable stack. diff --git a/math/test-fesetexcept-traps.c b/math/test-fesetexcept-traps.c index bfd5517..441cb9d 100644 --- a/math/test-fesetexcept-traps.c +++ b/math/test-fesetexcept-traps.c @@ -48,7 +48,7 @@ do_test (void) _Static_assert (!(EXCEPTION_SET_FORCES_TRAP && !EXCEPTION_TESTS(float)), "EXCEPTION_SET_FORCES_TRAP only makes sense if the " - "architecture suports exceptions"); + "architecture supports exceptions"); { int exc_before = fegetexcept (); ret = fesetexcept (FE_ALL_EXCEPT); diff --git a/math/test-fexcept-traps.c b/math/test-fexcept-traps.c index 67e8fc1..f832705 100644 --- a/math/test-fexcept-traps.c +++ b/math/test-fexcept-traps.c @@ -72,7 +72,7 @@ do_test (void) _Static_assert (!(EXCEPTION_SET_FORCES_TRAP && !EXCEPTION_TESTS(float)), "EXCEPTION_SET_FORCES_TRAP only makes sense if the " - "architecture suports exceptions"); + "architecture supports exceptions"); { int exc_before = fegetexcept (); ret = fesetexceptflag (&saved, FE_ALL_EXCEPT); diff --git a/posix/tst-truncate-common.c b/posix/tst-truncate-common.c index 19c96b7..20ce168 100644 --- a/posix/tst-truncate-common.c +++ b/posix/tst-truncate-common.c @@ -36,7 +36,7 @@ static int temp_fd; static void do_prepare (void) { - temp_fd = create_temp_file ("tst-trucate.", &temp_filename); + temp_fd = create_temp_file ("tst-truncate.", &temp_filename); if (temp_fd == -1) { printf ("cannot create temporary file: %m\n"); diff --git a/stdio-common/Makefile b/stdio-common/Makefile index 3709222..64b3575 100644 --- a/stdio-common/Makefile +++ b/stdio-common/Makefile @@ -262,6 +262,7 @@ tests := \ tst-bz11319-fortify2 \ tst-cookie \ tst-dprintf-length \ + tst-fclose-devzero \ tst-fclose-offset \ tst-fdopen \ tst-fdopen2 \ diff --git a/stdio-common/tst-fclose-devzero.c b/stdio-common/tst-fclose-devzero.c new file mode 100644 index 0000000..1c7b39a --- /dev/null +++ b/stdio-common/tst-fclose-devzero.c @@ -0,0 +1,50 @@ +/* Test that always-zero lseek does not cause fclose failure after fread. + Copyright (C) 2025 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <https://www.gnu.org/licenses/>. */ + +#include <errno.h> +#include <stdio.h> +#include <string.h> + +#include <support/check.h> +#include <support/xstdio.h> + +int +do_test (void) +{ + for (int do_ftello = 0; do_ftello < 2; ++do_ftello) + { + FILE *fp = xfopen ("/dev/zero", "r"); + char buf[17]; + memset (buf, 0xcc, sizeof (buf)); + xfread (buf, 1, sizeof (buf), fp); + static const char zeros[sizeof (buf)] = { 0 }; + TEST_COMPARE_BLOB (buf, sizeof (buf), zeros, sizeof (zeros)); + if (do_ftello) + { + errno = 0; + TEST_COMPARE (ftello (fp), -1); + TEST_COMPARE (errno, ESPIPE); + } + /* Do not use xfclose because it flushes first. */ + TEST_COMPARE (fclose (fp), 0); + } + + return 0; +} + +#include <support/test-driver.c> diff --git a/sysdeps/aarch64/multiarch/memcpy_oryon1.S b/sysdeps/aarch64/multiarch/memcpy_oryon1.S index e86d8b0..cc267db 100644 --- a/sysdeps/aarch64/multiarch/memcpy_oryon1.S +++ b/sysdeps/aarch64/multiarch/memcpy_oryon1.S @@ -152,6 +152,46 @@ L(copy96): .p2align 6 L(copy_long): + /* On oryon1 cores, large memcpy's are helped by using ldnp/stnp. + This loop is identical to the one below it but using ldnp/stnp + instructions. For loops that are less than 32768 bytes, + the ldnp/stnp instructions will not help and will cause a slow + down so only use the ldnp/stnp loop for the largest sizes. */ + + cmp count, #32768 + b.lo L(copy_long_without_nontemp) + and tmp1, dstin, 15 + bic dst, dstin, 15 + ldnp D_l, D_h, [src] + sub src, src, tmp1 + add count, count, tmp1 /* Count is now 16 too large. */ + ldnp A_l, A_h, [src, 16] + stnp D_l, D_h, [dstin] + ldnp B_l, B_h, [src, 32] + ldnp C_l, C_h, [src, 48] + ldnp D_l, D_h, [src, 64] + add src, src, #64 + subs count, count, 128 + 16 /* Test and readjust count. */ + +L(nontemp_loop64): + tbz src, #6, 1f +1: + stnp A_l, A_h, [dst, 16] + ldnp A_l, A_h, [src, 16] + stnp B_l, B_h, [dst, 32] + ldnp B_l, B_h, [src, 32] + stnp C_l, C_h, [dst, 48] + ldnp C_l, C_h, [src, 48] + stnp D_l, D_h, [dst, 64] + ldnp D_l, D_h, [src, 64] + add src, src, #64 + add dst, dst, #64 + subs count, count, 64 + b.hi L(nontemp_loop64) + b L(last64) + +L(copy_long_without_nontemp): + and tmp1, dstin, 15 bic dst, dstin, 15 ldp D_l, D_h, [src] diff --git a/sysdeps/aarch64/multiarch/memset_oryon1.S b/sysdeps/aarch64/multiarch/memset_oryon1.S index 0f9b718..88f4ef4 100644 --- a/sysdeps/aarch64/multiarch/memset_oryon1.S +++ b/sysdeps/aarch64/multiarch/memset_oryon1.S @@ -90,6 +90,8 @@ L(set_long): cmp count, 256 ccmp valw, 0, 0, cs b.eq L(try_zva) + cmp count, #32768 + b.hi L(set_long_with_nontemp) /* Small-size or non-zero memset does not use DC ZVA. */ sub count, dstend, dst @@ -112,6 +114,30 @@ L(set_long): stp val, val, [dstend, -16] ret +L(set_long_with_nontemp): + /* Small-size or non-zero memset does not use DC ZVA. */ + sub count, dstend, dst + + /* Adjust count and bias for loop. By subtracting extra 1 from count, + it is easy to use tbz instruction to check whether loop tailing + count is less than 33 bytes, so as to bypass 2 unnecessary stps. */ + sub count, count, 64+16+1 + +1: stnp val, val, [dst, 16] + stnp val, val, [dst, 32] + stnp val, val, [dst, 48] + stnp val, val, [dst, 64] + add dst, dst, #64 + subs count, count, 64 + b.hs 1b + + tbz count, 5, 1f /* Remaining count is less than 33 bytes? */ + stnp val, val, [dst, 16] + stnp val, val, [dst, 32] +1: stnp val, val, [dstend, -32] + stnp val, val, [dstend, -16] + ret + L(try_zva): /* Write the first and last 64 byte aligned block using stp rather than using DC ZVA as it is faster. */ diff --git a/sysdeps/pthread/tst-stdio2.c b/sysdeps/pthread/tst-stdio2.c index 08948cb..0876ed6 100644 --- a/sysdeps/pthread/tst-stdio2.c +++ b/sysdeps/pthread/tst-stdio2.c @@ -75,7 +75,7 @@ do_test (void) exit (1); } - puts ("join returned succsefully"); + puts ("join returned successfully"); return 0; } diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c index b13b7b7..e50f1d6 100644 --- a/sysdeps/x86/cpu-features.c +++ b/sysdeps/x86/cpu-features.c @@ -542,6 +542,7 @@ enum intel_microarch INTEL_BIGCORE_ARROWLAKE, INTEL_BIGCORE_PANTHERLAKE, INTEL_BIGCORE_GRANITERAPIDS, + INTEL_BIGCORE_DIAMONDRAPIDS, /* Mixed (bigcore + atom SOC). */ INTEL_MIXED_LAKEFIELD, @@ -817,6 +818,16 @@ disable_tsx: break; } } + else if (family == 19) + switch (model) + { + case 0x01: + microarch = INTEL_BIGCORE_DIAMONDRAPIDS; + break; + + default: + break; + } switch (microarch) { @@ -926,6 +937,7 @@ disable_tsx: case INTEL_BIGCORE_SAPPHIRERAPIDS: case INTEL_BIGCORE_EMERALDRAPIDS: case INTEL_BIGCORE_GRANITERAPIDS: + case INTEL_BIGCORE_DIAMONDRAPIDS: /* Default tuned Mixed (bigcore + atom SOC). */ case INTEL_MIXED_LAKEFIELD: case INTEL_MIXED_ALDERLAKE: |