Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert "Don't expose guard pages to malloc_stack API consumers" #55555

Merged
merged 3 commits into from
Aug 29, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 5 additions & 42 deletions src/gc-stacks.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,22 +23,13 @@
// number of stacks to always keep available per pool
#define MIN_STACK_MAPPINGS_PER_POOL 5

#if defined(_OS_WINDOWS_) || (!defined(_OS_OPENBSD_) && !defined(JL_HAVE_UCONTEXT) && !defined(JL_HAVE_SIGALTSTACK))
#define JL_USE_GUARD_PAGE 1
const size_t jl_guard_size = (4096 * 8);
#else
const size_t jl_guard_size = 0;
#endif

static _Atomic(uint32_t) num_stack_mappings = 0;

#ifdef _OS_WINDOWS_
#define MAP_FAILED NULL
static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
{
size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size);
bufsz += guard_size;

void *stk = VirtualAlloc(NULL, bufsz, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (stk == NULL)
return MAP_FAILED;
Expand All @@ -49,7 +40,6 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
VirtualFree(stk, 0, MEM_RELEASE);
return MAP_FAILED;
}
stk = (char *)stk + guard_size;

jl_atomic_fetch_add_relaxed(&num_stack_mappings, 1);
return stk;
Expand All @@ -58,68 +48,41 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT

static void free_stack(void *stkbuf, size_t bufsz) JL_NOTSAFEPOINT
{
#ifdef JL_USE_GUARD_PAGE
size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size);
bufsz += guard_size;
stkbuf = (char *)stkbuf - guard_size;
#endif

VirtualFree(stkbuf, 0, MEM_RELEASE);
jl_atomic_fetch_add_relaxed(&num_stack_mappings, -1);
}

#else

# ifdef _OS_OPENBSD_
static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
{
void* stk = mmap(0, bufsz, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
if (stk == MAP_FAILED)
return MAP_FAILED;

# ifdef _OS_OPENBSD_
// we don't set up a guard page to detect stack overflow: on OpenBSD, any
// mmap-ed region has guard page managed by the kernel, so there is no
// need for it. Additionally, a memory region used as stack (memory
// allocated with MAP_STACK option) has strict permission, and you can't
// "create" a guard page on such memory by using `mprotect` on it

jl_atomic_fetch_add_relaxed(&num_stack_mappings, 1);
return stk;
}
void* stk = mmap(0, bufsz, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
if (stk == MAP_FAILED)
return MAP_FAILED;
# else
static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
{
#ifdef JL_USE_GUARD_PAGE
size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size);
bufsz += guard_size;
#endif

void* stk = mmap(0, bufsz, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (stk == MAP_FAILED)
return MAP_FAILED;

#ifdef JL_USE_GUARD_PAGE
Copy link
Sponsor Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This used to be #if !defined(JL_HAVE_UCONTEXT) && !defined(JL_HAVE_SIGALTSTACK) before #54591

Copy link
Sponsor Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, those changed in a subsequent PR that I didn't want to revert

// set up a guard page to detect stack overflow
if (mprotect(stk, jl_guard_size, PROT_NONE) == -1) {
munmap(stk, bufsz);
return MAP_FAILED;
}
stk = (char *)stk + guard_size;
#endif
# endif

jl_atomic_fetch_add_relaxed(&num_stack_mappings, 1);
return stk;
}
# endif

static void free_stack(void *stkbuf, size_t bufsz) JL_NOTSAFEPOINT
{
#ifdef JL_USE_GUARD_PAGE
size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size);
bufsz += guard_size;
stkbuf = (char *)stkbuf - guard_size;
#endif

munmap(stkbuf, bufsz);
jl_atomic_fetch_add_relaxed(&num_stack_mappings, -1);
}
Expand Down