aboutsummaryrefslogtreecommitdiff
path: root/nptl/allocatestack.c
diff options
context:
space:
mode:
Diffstat (limited to 'nptl/allocatestack.c')
-rw-r--r--nptl/allocatestack.c34
1 files changed, 20 insertions, 14 deletions
diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c
index 800ca89..99f56b9 100644
--- a/nptl/allocatestack.c
+++ b/nptl/allocatestack.c
@@ -150,17 +150,11 @@ get_cached_stack (size_t *sizep, void **memp)
and fallback to ALLOCATE_GUARD_PROT_NONE if the madvise call fails. */
static int allocate_stack_mode = ALLOCATE_GUARD_MADV_GUARD;
-static inline int stack_prot (void)
-{
- return (PROT_READ | PROT_WRITE
- | ((GL(dl_stack_flags) & PF_X) ? PROT_EXEC : 0));
-}
-
static void *
allocate_thread_stack (size_t size, size_t guardsize)
{
/* MADV_ADVISE_GUARD does not require an additional PROT_NONE mapping. */
- int prot = stack_prot ();
+ int prot = GL(dl_stack_prot_flags);
if (atomic_load_relaxed (&allocate_stack_mode) == ALLOCATE_GUARD_PROT_NONE)
/* If a guard page is required, avoid committing memory by first allocate
@@ -216,7 +210,7 @@ setup_stack_prot (char *mem, size_t size, struct pthread *pd,
}
else
{
- const int prot = stack_prot ();
+ const int prot = GL(dl_stack_prot_flags);
char *guardend = guard + guardsize;
#if _STACK_GROWS_DOWN
/* As defined at guard_position, for architectures with downward stack
@@ -240,7 +234,7 @@ setup_stack_prot (char *mem, size_t size, struct pthread *pd,
/* Update the guard area of the thread stack MEM of size SIZE with the new
GUARDISZE. It uses the method defined by PD stack_mode. */
static inline bool
-adjust_stack_prot (char *mem, size_t size, const struct pthread *pd,
+adjust_stack_prot (char *mem, size_t size, struct pthread *pd,
size_t guardsize, size_t pagesize_m1)
{
/* The required guard area is larger than the current one. For
@@ -258,11 +252,23 @@ adjust_stack_prot (char *mem, size_t size, const struct pthread *pd,
so use the new guard placement with the new size. */
if (guardsize > pd->guardsize)
{
+ /* There was no need to previously setup a guard page, so we need
+ to check whether the kernel supports guard advise. */
char *guard = guard_position (mem, size, guardsize, pd, pagesize_m1);
- if (pd->stack_mode == ALLOCATE_GUARD_MADV_GUARD)
- return __madvise (guard, guardsize, MADV_GUARD_INSTALL) == 0;
- else if (pd->stack_mode == ALLOCATE_GUARD_PROT_NONE)
- return __mprotect (guard, guardsize, PROT_NONE) == 0;
+ if (atomic_load_relaxed (&allocate_stack_mode)
+ == ALLOCATE_GUARD_MADV_GUARD)
+ {
+ if (__madvise (guard, guardsize, MADV_GUARD_INSTALL) == 0)
+ {
+ pd->stack_mode = ALLOCATE_GUARD_MADV_GUARD;
+ return true;
+ }
+ atomic_store_relaxed (&allocate_stack_mode,
+ ALLOCATE_GUARD_PROT_NONE);
+ }
+
+ pd->stack_mode = ALLOCATE_GUARD_PROT_NONE;
+ return __mprotect (guard, guardsize, PROT_NONE) == 0;
}
/* The current guard area is larger than the required one. For
_STACK_GROWS_DOWN is means change the guard as:
@@ -294,7 +300,7 @@ adjust_stack_prot (char *mem, size_t size, const struct pthread *pd,
}
else if (pd->stack_mode == ALLOCATE_GUARD_PROT_NONE)
{
- const int prot = stack_prot ();
+ const int prot = GL(dl_stack_prot_flags);
#if _STACK_GROWS_DOWN
return __mprotect (mem + guardsize, slacksize, prot) == 0;
#else