diff options
Diffstat (limited to 'nptl')
-rw-r--r-- | nptl/Makefile | 4 | ||||
-rw-r--r-- | nptl/TODO-testing | 4 | ||||
-rw-r--r-- | nptl/allocatestack.c | 267 | ||||
-rw-r--r-- | nptl/descr.h | 8 | ||||
-rw-r--r-- | nptl/nptl-stack.c | 2 | ||||
-rw-r--r-- | nptl/pthread_cond_wait.c | 5 | ||||
-rw-r--r-- | nptl/pthread_create.c | 2 | ||||
-rw-r--r-- | nptl/pthread_getattr_np.c | 4 | ||||
-rw-r--r-- | nptl/tst-guard1.c | 369 |
9 files changed, 561 insertions, 104 deletions
diff --git a/nptl/Makefile b/nptl/Makefile index 82621c7..f70d1e5 100644 --- a/nptl/Makefile +++ b/nptl/Makefile @@ -289,6 +289,7 @@ tests = \ tst-dlsym1 \ tst-exec4 \ tst-exec5 \ + tst-guard1 \ tst-initializers1 \ tst-initializers1-c11 \ tst-initializers1-c89 \ @@ -701,6 +702,9 @@ $(objpfx)tst-execstack-threads.out: $(objpfx)tst-execstack-threads-mod.so LDFLAGS-tst-execstack-threads = -Wl,-z,noexecstack LDFLAGS-tst-execstack-threads-mod.so = -Wl,-z,execstack CFLAGS-tst-execstack-threads-mod.c += -Wno-trampolines +ifeq ($(have-no-error-execstack),yes) +LDFLAGS-tst-execstack-threads-mod.so += -Wl,--no-error-execstack +endif tst-stackguard1-ARGS = --command "$(host-test-program-cmd) --child" tst-stackguard1-static-ARGS = --command "$(objpfx)tst-stackguard1-static --child" diff --git a/nptl/TODO-testing b/nptl/TODO-testing index f50d2ce..46ebf3b 100644 --- a/nptl/TODO-testing +++ b/nptl/TODO-testing @@ -1,7 +1,3 @@ -pthread_attr_setguardsize - - test effectiveness - pthread_attr_[sg]etschedparam what to test? diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c index 9c1a72b..800ca89 100644 --- a/nptl/allocatestack.c +++ b/nptl/allocatestack.c @@ -146,10 +146,37 @@ get_cached_stack (size_t *sizep, void **memp) return result; } +/* Assume support for MADV_ADVISE_GUARD, setup_stack_prot will disable it + and fallback to ALLOCATE_GUARD_PROT_NONE if the madvise call fails. */ +static int allocate_stack_mode = ALLOCATE_GUARD_MADV_GUARD; + +static inline int stack_prot (void) +{ + return (PROT_READ | PROT_WRITE + | ((GL(dl_stack_flags) & PF_X) ? PROT_EXEC : 0)); +} + +static void * +allocate_thread_stack (size_t size, size_t guardsize) +{ + /* MADV_ADVISE_GUARD does not require an additional PROT_NONE mapping. */ + int prot = stack_prot (); + + if (atomic_load_relaxed (&allocate_stack_mode) == ALLOCATE_GUARD_PROT_NONE) + /* If a guard page is required, avoid committing memory by first allocate + with PROT_NONE and then reserve with required permission excluding the + guard page. */ + prot = guardsize == 0 ? prot : PROT_NONE; + + return __mmap (NULL, size, prot, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, + 0); +} + + /* Return the guard page position on allocated stack. */ static inline char * __attribute ((always_inline)) -guard_position (void *mem, size_t size, size_t guardsize, struct pthread *pd, +guard_position (void *mem, size_t size, size_t guardsize, const struct pthread *pd, size_t pagesize_m1) { #if _STACK_GROWS_DOWN @@ -159,27 +186,131 @@ guard_position (void *mem, size_t size, size_t guardsize, struct pthread *pd, #endif } -/* Based on stack allocated with PROT_NONE, setup the required portions with - 'prot' flags based on the guard page position. */ -static inline int -setup_stack_prot (char *mem, size_t size, char *guard, size_t guardsize, - const int prot) +/* Setup the MEM thread stack of SIZE bytes with the required protection flags + along with a guard area of GUARDSIZE size. It first tries with + MADV_GUARD_INSTALL, and then fallback to setup the guard area using the + extra PROT_NONE mapping. Update PD with the type of guard area setup. */ +static inline bool +setup_stack_prot (char *mem, size_t size, struct pthread *pd, + size_t guardsize, size_t pagesize_m1) { - char *guardend = guard + guardsize; + if (__glibc_unlikely (guardsize == 0)) + return true; + + char *guard = guard_position (mem, size, guardsize, pd, pagesize_m1); + if (atomic_load_relaxed (&allocate_stack_mode) == ALLOCATE_GUARD_MADV_GUARD) + { + if (__madvise (guard, guardsize, MADV_GUARD_INSTALL) == 0) + { + pd->stack_mode = ALLOCATE_GUARD_MADV_GUARD; + return true; + } + + /* If madvise fails it means the kernel does not support the guard + advise (we assume that the syscall is available, guard is page-aligned + and length is non negative). The stack has already the expected + protection flags, so it just need to PROT_NONE the guard area. */ + atomic_store_relaxed (&allocate_stack_mode, ALLOCATE_GUARD_PROT_NONE); + if (__mprotect (guard, guardsize, PROT_NONE) != 0) + return false; + } + else + { + const int prot = stack_prot (); + char *guardend = guard + guardsize; #if _STACK_GROWS_DOWN - /* As defined at guard_position, for architectures with downward stack - the guard page is always at start of the allocated area. */ - if (__mprotect (guardend, size - guardsize, prot) != 0) - return errno; + /* As defined at guard_position, for architectures with downward stack + the guard page is always at start of the allocated area. */ + if (__mprotect (guardend, size - guardsize, prot) != 0) + return false; #else - size_t mprots1 = (uintptr_t) guard - (uintptr_t) mem; - if (__mprotect (mem, mprots1, prot) != 0) - return errno; - size_t mprots2 = ((uintptr_t) mem + size) - (uintptr_t) guardend; - if (__mprotect (guardend, mprots2, prot) != 0) - return errno; + size_t mprots1 = (uintptr_t) guard - (uintptr_t) mem; + if (__mprotect (mem, mprots1, prot) != 0) + return false; + size_t mprots2 = ((uintptr_t) mem + size) - (uintptr_t) guardend; + if (__mprotect (guardend, mprots2, prot) != 0) + return false; #endif - return 0; + } + + pd->stack_mode = ALLOCATE_GUARD_PROT_NONE; + return true; +} + +/* Update the guard area of the thread stack MEM of size SIZE with the new + GUARDISZE. It uses the method defined by PD stack_mode. */ +static inline bool +adjust_stack_prot (char *mem, size_t size, const struct pthread *pd, + size_t guardsize, size_t pagesize_m1) +{ + /* The required guard area is larger than the current one. For + _STACK_GROWS_DOWN it means the guard should increase as: + + |guard|---------------------------------stack| + |new guard--|---------------------------stack| + + while for _STACK_GROWS_UP: + + |stack---------------------------|guard|-----| + |stack--------------------|new guard---|-----| + + Both madvise and mprotect allows overlap the required region, + so use the new guard placement with the new size. */ + if (guardsize > pd->guardsize) + { + char *guard = guard_position (mem, size, guardsize, pd, pagesize_m1); + if (pd->stack_mode == ALLOCATE_GUARD_MADV_GUARD) + return __madvise (guard, guardsize, MADV_GUARD_INSTALL) == 0; + else if (pd->stack_mode == ALLOCATE_GUARD_PROT_NONE) + return __mprotect (guard, guardsize, PROT_NONE) == 0; + } + /* The current guard area is larger than the required one. For + _STACK_GROWS_DOWN is means change the guard as: + + |guard-------|-------------------------stack| + |new guard|----------------------------stack| + + And for _STACK_GROWS_UP: + + |stack---------------------|guard-------|---| + |stack------------------------|new guard|---| + + For ALLOCATE_GUARD_MADV_GUARD it means remove the slack area + (disjointed region of guard and new guard), while for + ALLOCATE_GUARD_PROT_NONE it requires to mprotect it with the stack + protection flags. */ + else if (pd->guardsize > guardsize) + { + size_t slacksize = pd->guardsize - guardsize; + if (pd->stack_mode == ALLOCATE_GUARD_MADV_GUARD) + { + void *slack = +#if _STACK_GROWS_DOWN + mem + guardsize; +#else + guard_position (mem, size, pd->guardsize, pd, pagesize_m1); +#endif + return __madvise (slack, slacksize, MADV_GUARD_REMOVE) == 0; + } + else if (pd->stack_mode == ALLOCATE_GUARD_PROT_NONE) + { + const int prot = stack_prot (); +#if _STACK_GROWS_DOWN + return __mprotect (mem + guardsize, slacksize, prot) == 0; +#else + char *new_guard = (char *)(((uintptr_t) pd - guardsize) + & ~pagesize_m1); + char *old_guard = (char *)(((uintptr_t) pd - pd->guardsize) + & ~pagesize_m1); + /* The guard size difference might be > 0, but once rounded + to the nearest page the size difference might be zero. */ + if (new_guard > old_guard + && __mprotect (old_guard, new_guard - old_guard, prot) != 0) + return false; +#endif + } + } + return true; } /* Mark the memory of the stack as usable to the kernel. It frees everything @@ -291,7 +422,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, /* This is a user-provided stack. It will not be queued in the stack cache nor will the memory (except the TLS memory) be freed. */ - pd->user_stack = true; + pd->stack_mode = ALLOCATE_GUARD_USER; /* This is at least the second thread. */ pd->header.multiple_threads = 1; @@ -325,10 +456,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, /* Allocate some anonymous memory. If possible use the cache. */ size_t guardsize; size_t reported_guardsize; - size_t reqsize; void *mem; - const int prot = (PROT_READ | PROT_WRITE - | ((GL(dl_stack_flags) & PF_X) ? PROT_EXEC : 0)); /* Adjust the stack size for alignment. */ size &= ~tls_static_align_m1; @@ -358,16 +486,10 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, return EINVAL; /* Try to get a stack from the cache. */ - reqsize = size; pd = get_cached_stack (&size, &mem); if (pd == NULL) { - /* If a guard page is required, avoid committing memory by first - allocate with PROT_NONE and then reserve with required permission - excluding the guard page. */ - mem = __mmap (NULL, size, (guardsize == 0) ? prot : PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); - + mem = allocate_thread_stack (size, guardsize); if (__glibc_unlikely (mem == MAP_FAILED)) return errno; @@ -394,15 +516,10 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, #endif /* Now mprotect the required region excluding the guard area. */ - if (__glibc_likely (guardsize > 0)) + if (!setup_stack_prot (mem, size, pd, guardsize, pagesize_m1)) { - char *guard = guard_position (mem, size, guardsize, pd, - pagesize_m1); - if (setup_stack_prot (mem, size, guard, guardsize, prot) != 0) - { - __munmap (mem, size); - return errno; - } + __munmap (mem, size); + return errno; } /* Remember the stack-related values. */ @@ -456,59 +573,31 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, which will be read next. */ } - /* Create or resize the guard area if necessary. */ - if (__glibc_unlikely (guardsize > pd->guardsize)) + /* Create or resize the guard area if necessary on an already + allocated stack. */ + if (!adjust_stack_prot (mem, size, pd, guardsize, pagesize_m1)) { - char *guard = guard_position (mem, size, guardsize, pd, - pagesize_m1); - if (__mprotect (guard, guardsize, PROT_NONE) != 0) - { - mprot_error: - lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE); - - /* Remove the thread from the list. */ - __nptl_stack_list_del (&pd->list); + lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE); - lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE); + /* Remove the thread from the list. */ + __nptl_stack_list_del (&pd->list); - /* Get rid of the TLS block we allocated. */ - _dl_deallocate_tls (TLS_TPADJ (pd), false); + lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE); - /* Free the stack memory regardless of whether the size - of the cache is over the limit or not. If this piece - of memory caused problems we better do not use it - anymore. Uh, and we ignore possible errors. There - is nothing we could do. */ - (void) __munmap (mem, size); + /* Get rid of the TLS block we allocated. */ + _dl_deallocate_tls (TLS_TPADJ (pd), false); - return errno; - } + /* Free the stack memory regardless of whether the size + of the cache is over the limit or not. If this piece + of memory caused problems we better do not use it + anymore. Uh, and we ignore possible errors. There + is nothing we could do. */ + (void) __munmap (mem, size); - pd->guardsize = guardsize; + return errno; } - else if (__builtin_expect (pd->guardsize - guardsize > size - reqsize, - 0)) - { - /* The old guard area is too large. */ - -#if _STACK_GROWS_DOWN - if (__mprotect ((char *) mem + guardsize, pd->guardsize - guardsize, - prot) != 0) - goto mprot_error; -#elif _STACK_GROWS_UP - char *new_guard = (char *)(((uintptr_t) pd - guardsize) - & ~pagesize_m1); - char *old_guard = (char *)(((uintptr_t) pd - pd->guardsize) - & ~pagesize_m1); - /* The guard size difference might be > 0, but once rounded - to the nearest page the size difference might be zero. */ - if (new_guard > old_guard - && __mprotect (old_guard, new_guard - old_guard, prot) != 0) - goto mprot_error; -#endif - pd->guardsize = guardsize; - } + pd->guardsize = guardsize; /* The pthread_getattr_np() calls need to get passed the size requested in the attribute, regardless of how large the actually used guardsize is. */ @@ -549,10 +638,6 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, return 0; } -/* Maximum supported name from initial kernel support, not exported - by user API. */ -#define ANON_VMA_NAME_MAX_LEN 80 - #define SET_STACK_NAME(__prefix, __stack, __stacksize, __tid) \ ({ \ char __stack_name[sizeof (__prefix) + \ @@ -568,19 +653,21 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, static void name_stack_maps (struct pthread *pd, bool set) { + size_t adjust = pd->stack_mode == ALLOCATE_GUARD_PROT_NONE ? + pd->guardsize : 0; #if _STACK_GROWS_DOWN - void *stack = pd->stackblock + pd->guardsize; + void *stack = pd->stackblock + adjust; #else void *stack = pd->stackblock; #endif - size_t stacksize = pd->stackblock_size - pd->guardsize; + size_t stacksize = pd->stackblock_size - adjust; if (!set) - __set_vma_name (stack, stacksize, NULL); + __set_vma_name (stack, stacksize, " glibc: unused stack"); else { unsigned int tid = pd->tid; - if (pd->user_stack) + if (pd->stack_mode == ALLOCATE_GUARD_USER) SET_STACK_NAME (" glibc: pthread user stack: ", stack, stacksize, tid); else SET_STACK_NAME (" glibc: pthread stack: ", stack, stacksize, tid); diff --git a/nptl/descr.h b/nptl/descr.h index c60ca13..ada6867 100644 --- a/nptl/descr.h +++ b/nptl/descr.h @@ -125,6 +125,12 @@ struct priority_protection_data unsigned int priomap[]; }; +enum allocate_stack_mode_t +{ + ALLOCATE_GUARD_MADV_GUARD = 0, + ALLOCATE_GUARD_PROT_NONE = 1, + ALLOCATE_GUARD_USER = 2, +}; /* Thread descriptor data structure. */ struct pthread @@ -324,7 +330,7 @@ struct pthread bool report_events; /* True if the user provided the stack. */ - bool user_stack; + enum allocate_stack_mode_t stack_mode; /* True if thread must stop at startup time. */ bool stopped_start; diff --git a/nptl/nptl-stack.c b/nptl/nptl-stack.c index 503357f..c049c51 100644 --- a/nptl/nptl-stack.c +++ b/nptl/nptl-stack.c @@ -120,7 +120,7 @@ __nptl_deallocate_stack (struct pthread *pd) not reset the 'used' flag in the 'tid' field. This is done by the kernel. If no thread has been created yet this field is still zero. */ - if (__glibc_likely (! pd->user_stack)) + if (__glibc_likely (pd->stack_mode != ALLOCATE_GUARD_USER)) (void) queue_stack (pd); else /* Free the memory associated with the ELF TLS. */ diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c index f915873..c6461bd 100644 --- a/nptl/pthread_cond_wait.c +++ b/nptl/pthread_cond_wait.c @@ -273,11 +273,6 @@ __condvar_cleanup_waiting (void *arg) (If the format of __wrefs is changed, update nptl_lock_constants.pysym and the pretty printers.) For each of the two groups, we have: - __g_refs: Futex waiter reference count. - * LSB is true if waiters should run futex_wake when they remove the - last reference. - * Reference count used by waiters concurrently with signalers that have - acquired the condvar-internal lock. __g_signals: The number of signals that can still be consumed, relative to the current g1_start. (i.e. g1_start with the signal count added) * Used as a futex word by waiters. Used concurrently by waiters and diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c index 9ae5423..e1033d4 100644 --- a/nptl/pthread_create.c +++ b/nptl/pthread_create.c @@ -554,7 +554,7 @@ start_thread (void *arg) to avoid creating a new free-state block during thread release. */ __getrandom_vdso_release (pd); - if (!pd->user_stack) + if (pd->stack_mode != ALLOCATE_GUARD_USER) advise_stack_range (pd->stackblock, pd->stackblock_size, (uintptr_t) pd, pd->guardsize); diff --git a/nptl/pthread_getattr_np.c b/nptl/pthread_getattr_np.c index e98e2df..43dd16d 100644 --- a/nptl/pthread_getattr_np.c +++ b/nptl/pthread_getattr_np.c @@ -145,9 +145,9 @@ __pthread_getattr_np (pthread_t thread_id, pthread_attr_t *attr) > (size_t) iattr->stackaddr - last_to) iattr->stacksize = (size_t) iattr->stackaddr - last_to; #else - /* The limit might be too high. */ + /* The limit might be too low. */ if ((size_t) iattr->stacksize - > to - (size_t) iattr->stackaddr) + < to - (size_t) iattr->stackaddr) iattr->stacksize = to - (size_t) iattr->stackaddr; #endif /* We succeed and no need to look further. */ diff --git a/nptl/tst-guard1.c b/nptl/tst-guard1.c new file mode 100644 index 0000000..e3e06df --- /dev/null +++ b/nptl/tst-guard1.c @@ -0,0 +1,369 @@ +/* Basic tests for pthread guard area. + Copyright (C) 2025 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <https://www.gnu.org/licenses/>. */ + +#include <array_length.h> +#include <pthreaddef.h> +#include <setjmp.h> +#include <stackinfo.h> +#include <stdio.h> +#include <support/check.h> +#include <support/test-driver.h> +#include <support/xsignal.h> +#include <support/xthread.h> +#include <support/xunistd.h> +#include <sys/mman.h> +#include <stdlib.h> + +static long int pagesz; + +/* To check if the guard region is inaccessible, the thread tries read/writes + on it and checks if a SIGSEGV is generated. */ + +static volatile sig_atomic_t signal_jump_set; +static sigjmp_buf signal_jmp_buf; + +static void +sigsegv_handler (int sig) +{ + if (signal_jump_set == 0) + return; + + siglongjmp (signal_jmp_buf, sig); +} + +static bool +try_access_buf (char *ptr, bool write) +{ + signal_jump_set = true; + + bool failed = sigsetjmp (signal_jmp_buf, 0) != 0; + if (!failed) + { + if (write) + *(volatile char *)(ptr) = 'x'; + else + *(volatile char *)(ptr); + } + + signal_jump_set = false; + return !failed; +} + +static bool +try_read_buf (char *ptr) +{ + return try_access_buf (ptr, false); +} + +static bool +try_write_buf (char *ptr) +{ + return try_access_buf (ptr, true); +} + +static bool +try_read_write_buf (char *ptr) +{ + return try_read_buf (ptr) && try_write_buf(ptr); +} + + +/* Return the guard region of the current thread (it only makes sense on + a thread created by pthread_created). */ + +struct stack_t +{ + char *stack; + size_t stacksize; + char *guard; + size_t guardsize; +}; + +static inline size_t +adjust_stacksize (size_t stacksize) +{ + /* For some ABIs, The guard page depends of the thread descriptor, which in + turn rely on the require static TLS. The only supported _STACK_GROWS_UP + ABI, hppa, defines TLS_DTV_AT_TP and it is not straightforward to + calculate the guard region with current pthread APIs. So to get a + correct stack size assumes an extra page after the guard area. */ +#if _STACK_GROWS_DOWN + return stacksize; +#elif _STACK_GROWS_UP + return stacksize - pagesz; +#endif +} + +struct stack_t +get_current_stack_info (void) +{ + pthread_attr_t attr; + TEST_VERIFY_EXIT (pthread_getattr_np (pthread_self (), &attr) == 0); + void *stack; + size_t stacksize; + TEST_VERIFY_EXIT (pthread_attr_getstack (&attr, &stack, &stacksize) == 0); + size_t guardsize; + TEST_VERIFY_EXIT (pthread_attr_getguardsize (&attr, &guardsize) == 0); + /* The guardsize is reported as the current page size, although it might + be adjusted to a larger value (aarch64 for instance). */ + if (guardsize != 0 && guardsize < ARCH_MIN_GUARD_SIZE) + guardsize = ARCH_MIN_GUARD_SIZE; + +#if _STACK_GROWS_DOWN + void *guard = guardsize ? stack - guardsize : 0; +#elif _STACK_GROWS_UP + stacksize = adjust_stacksize (stacksize); + void *guard = guardsize ? stack + stacksize : 0; +#endif + + pthread_attr_destroy (&attr); + + return (struct stack_t) { stack, stacksize, guard, guardsize }; +} + +struct thread_args_t +{ + size_t stacksize; + size_t guardsize; +}; + +struct thread_args_t +get_thread_args (const pthread_attr_t *attr) +{ + size_t stacksize; + size_t guardsize; + + TEST_COMPARE (pthread_attr_getstacksize (attr, &stacksize), 0); + TEST_COMPARE (pthread_attr_getguardsize (attr, &guardsize), 0); + if (guardsize < ARCH_MIN_GUARD_SIZE) + guardsize = ARCH_MIN_GUARD_SIZE; + + return (struct thread_args_t) { stacksize, guardsize }; +} + +static void +set_thread_args (pthread_attr_t *attr, const struct thread_args_t *args) +{ + xpthread_attr_setstacksize (attr, args->stacksize); + xpthread_attr_setguardsize (attr, args->guardsize); +} + +static void * +tf (void *closure) +{ + struct thread_args_t *args = closure; + + struct stack_t s = get_current_stack_info (); + if (test_verbose) + printf ("debug: [tid=%jd] stack = { .stack=%p, stacksize=%#zx, guard=%p, " + "guardsize=%#zx }\n", + (intmax_t) gettid (), + s.stack, + s.stacksize, + s.guard, + s.guardsize); + + if (args != NULL) + { + TEST_COMPARE (adjust_stacksize (args->stacksize), s.stacksize); + TEST_COMPARE (args->guardsize, s.guardsize); + } + + /* Ensure we can access the stack area. */ + TEST_COMPARE (try_read_buf (s.stack), true); + TEST_COMPARE (try_read_buf (&s.stack[s.stacksize / 2]), true); + TEST_COMPARE (try_read_buf (&s.stack[s.stacksize - 1]), true); + + /* Check if accessing the guard area results in SIGSEGV. */ + if (s.guardsize > 0) + { + TEST_COMPARE (try_read_write_buf (s.guard), false); + TEST_COMPARE (try_read_write_buf (&s.guard[s.guardsize / 2]), false); + TEST_COMPARE (try_read_write_buf (&s.guard[s.guardsize] - 1), false); + } + + return NULL; +} + +/* Test 1: caller provided stack without guard. */ +static void +do_test1 (void) +{ + pthread_attr_t attr; + xpthread_attr_init (&attr); + + size_t stacksize = support_small_thread_stack_size (); + void *stack = xmmap (0, + stacksize, + PROT_READ | PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS|MAP_STACK, + -1); + xpthread_attr_setstack (&attr, stack, stacksize); + xpthread_attr_setguardsize (&attr, 0); + + struct thread_args_t args = { stacksize, 0 }; + pthread_t t = xpthread_create (&attr, tf, &args); + void *status = xpthread_join (t); + TEST_VERIFY (status == 0); + + xpthread_attr_destroy (&attr); + xmunmap (stack, stacksize); +} + +/* Test 2: same as 1., but with a guard area. */ +static void +do_test2 (void) +{ + pthread_attr_t attr; + xpthread_attr_init (&attr); + + size_t stacksize = support_small_thread_stack_size (); + void *stack = xmmap (0, + stacksize, + PROT_READ | PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS|MAP_STACK, + -1); + xpthread_attr_setstack (&attr, stack, stacksize); + xpthread_attr_setguardsize (&attr, pagesz); + + struct thread_args_t args = { stacksize, 0 }; + pthread_t t = xpthread_create (&attr, tf, &args); + void *status = xpthread_join (t); + TEST_VERIFY (status == 0); + + xpthread_attr_destroy (&attr); + xmunmap (stack, stacksize); +} + +/* Test 3: pthread_create with default values. */ +static void +do_test3 (void) +{ + pthread_t t = xpthread_create (NULL, tf, NULL); + void *status = xpthread_join (t); + TEST_VERIFY (status == 0); +} + +/* Test 4: pthread_create without a guard area. */ +static void +do_test4 (void) +{ + pthread_attr_t attr; + xpthread_attr_init (&attr); + struct thread_args_t args = get_thread_args (&attr); + args.stacksize += args.guardsize; + args.guardsize = 0; + set_thread_args (&attr, &args); + + pthread_t t = xpthread_create (&attr, tf, &args); + void *status = xpthread_join (t); + TEST_VERIFY (status == 0); + + xpthread_attr_destroy (&attr); +} + +/* Test 5: pthread_create with non default stack and guard size value. */ +static void +do_test5 (void) +{ + pthread_attr_t attr; + xpthread_attr_init (&attr); + struct thread_args_t args = get_thread_args (&attr); + args.guardsize += pagesz; + args.stacksize += pagesz; + set_thread_args (&attr, &args); + + pthread_t t = xpthread_create (&attr, tf, &args); + void *status = xpthread_join (t); + TEST_VERIFY (status == 0); + + xpthread_attr_destroy (&attr); +} + +/* Test 6: thread with the required size (stack + guard) that matches the + test 3, but with a larger guard area. The pthread_create will need to + increase the guard area. */ +static void +do_test6 (void) +{ + pthread_attr_t attr; + xpthread_attr_init (&attr); + struct thread_args_t args = get_thread_args (&attr); + args.guardsize += pagesz; + args.stacksize -= pagesz; + set_thread_args (&attr, &args); + + pthread_t t = xpthread_create (&attr, tf, &args); + void *status = xpthread_join (t); + TEST_VERIFY (status == 0); + + xpthread_attr_destroy (&attr); +} + +/* Test 7: pthread_create with default values, the requires size matches the + one from test 3 and 6 (but with a reduced guard ares). The + pthread_create should use the cached stack from previous tests, but it + would require to reduce the guard area. */ +static void +do_test7 (void) +{ + pthread_t t = xpthread_create (NULL, tf, NULL); + void *status = xpthread_join (t); + TEST_VERIFY (status == 0); +} + +static int +do_test (void) +{ + pagesz = sysconf (_SC_PAGESIZE); + + { + struct sigaction sa = { + .sa_handler = sigsegv_handler, + .sa_flags = SA_NODEFER, + }; + sigemptyset (&sa.sa_mask); + xsigaction (SIGSEGV, &sa, NULL); + /* Some system generates SIGBUS accessing the guard area when it is + setup with madvise. */ + xsigaction (SIGBUS, &sa, NULL); + } + + static const struct { + const char *descr; + void (*test)(void); + } tests[] = { + { "user provided stack without guard", do_test1 }, + { "user provided stack with guard", do_test2 }, + { "default attribute", do_test3 }, + { "default attribute without guard", do_test4 }, + { "non default stack and guard sizes", do_test5 }, + { "reused stack with larger guard", do_test6 }, + { "reused stack with smaller guard", do_test7 }, + }; + + for (int i = 0; i < array_length (tests); i++) + { + printf ("debug: test%01d: %s\n", i, tests[i].descr); + tests[i].test(); + } + + return 0; +} + +#include <support/test-driver.c> |