stacker/
lib.rs

1//! A library to help grow the stack when it runs out of space.
2//!
3//! This is an implementation of manually instrumented segmented stacks where points in a program's
4//! control flow are annotated with "maybe grow the stack here". Each point of annotation indicates
5//! how far away from the end of the stack it's allowed to be, plus the amount of stack to allocate
6//! if it does reach the end.
7//!
8//! Once a program has reached the end of its stack, a temporary stack on the heap is allocated and
9//! is switched to for the duration of a closure.
10//!
11//! For a set of lower-level primitives, consider the `psm` crate.
12//!
13//! # Examples
14//!
15//! ```
16//! // Grow the stack if we are within the "red zone" of 32K, and if we allocate
17//! // a new stack allocate 1MB of stack space.
18//! //
19//! // If we're already in bounds, just run the provided closure on current stack.
20//! stacker::maybe_grow(32 * 1024, 1024 * 1024, || {
21//!     // guaranteed to have at least 32K of stack
22//! });
23//! ```
24
25#![allow(improper_ctypes)]
26
27#[macro_use]
28extern crate cfg_if;
29extern crate libc;
30#[cfg(windows)]
31extern crate windows_sys;
32#[macro_use]
33extern crate psm;
34
35mod backends;
36
37use std::cell::Cell;
38
39/// Grows the call stack if necessary.
40///
41/// This function is intended to be called at manually instrumented points in a program where
42/// recursion is known to happen quite a bit. This function will check to see if we're within
43/// `red_zone` bytes of the end of the stack, and if so it will allocate a new stack of at least
44/// `stack_size` bytes.
45///
46/// The closure `f` is guaranteed to run on a stack with at least `red_zone` bytes, and it will be
47/// run on the current stack if there's space available.
48#[inline(always)]
49pub fn maybe_grow<R, F: FnOnce() -> R>(red_zone: usize, stack_size: usize, callback: F) -> R {
50    // if we can't guess the remaining stack (unsupported on some platforms) we immediately grow
51    // the stack and then cache the new stack size (which we do know now because we allocated it.
52    let enough_space = match remaining_stack() {
53        Some(remaining) => remaining >= red_zone,
54        None => false,
55    };
56    if enough_space {
57        callback()
58    } else {
59        grow(stack_size, callback)
60    }
61}
62
63/// Always creates a new stack for the passed closure to run on.
64/// The closure will still be on the same thread as the caller of `grow`.
65/// This will allocate a new stack with at least `stack_size` bytes.
66pub fn grow<R, F: FnOnce() -> R>(stack_size: usize, callback: F) -> R {
67    // To avoid monomorphizing `_grow()` and everything it calls,
68    // we convert the generic callback to a dynamic one.
69    let mut opt_callback = Some(callback);
70    let mut ret = None;
71    let ret_ref = &mut ret;
72
73    // This wrapper around `callback` achieves two things:
74    // * It converts the `impl FnOnce` to a `dyn FnMut`.
75    //   `dyn` because we want it to not be generic, and
76    //   `FnMut` because we can't pass a `dyn FnOnce` around without boxing it.
77    // * It eliminates the generic return value, by writing it to the stack of this function.
78    //   Otherwise the closure would have to return an unsized value, which isn't possible.
79    let dyn_callback: &mut dyn FnMut() = &mut || {
80        let taken_callback = opt_callback.take().unwrap();
81        *ret_ref = Some(taken_callback());
82    };
83
84    _grow(stack_size, dyn_callback);
85    ret.unwrap()
86}
87
88/// Queries the amount of remaining stack as interpreted by this library.
89///
90/// This function will return the amount of stack space left which will be used
91/// to determine whether a stack switch should be made or not.
92pub fn remaining_stack() -> Option<usize> {
93    let current_ptr = current_stack_ptr();
94    get_stack_limit().map(|limit| current_ptr - limit)
95}
96
97psm_stack_information!(
98    yes {
99        fn current_stack_ptr() -> usize {
100            psm::stack_pointer() as usize
101        }
102    }
103    no {
104        #[inline(always)]
105        fn current_stack_ptr() -> usize {
106            unsafe {
107                let mut x = std::mem::MaybeUninit::<u8>::uninit();
108                // Unlikely to be ever exercised. As a fallback we execute a volatile read to a
109                // local (to hopefully defeat the optimisations that would make this local a static
110                // global) and take its address. This way we get a very approximate address of the
111                // current frame.
112                x.as_mut_ptr().write_volatile(42);
113                x.as_ptr() as usize
114            }
115        }
116    }
117);
118
119thread_local! {
120    static STACK_LIMIT: Cell<Option<usize>> = Cell::new(unsafe {
121        backends::guess_os_stack_limit()
122    })
123}
124
125#[inline(always)]
126fn get_stack_limit() -> Option<usize> {
127    STACK_LIMIT.with(|s| s.get())
128}
129
130#[inline(always)]
131#[allow(unused)]
132fn set_stack_limit(l: Option<usize>) {
133    STACK_LIMIT.with(|s| s.set(l))
134}
135
136psm_stack_manipulation! {
137    yes {
138        #[cfg(not(any(target_arch = "wasm32",target_os = "hermit")))]
139        #[path = "mmap_stack_restore_guard.rs"]
140        mod stack_restore_guard;
141
142        #[cfg(any(target_arch = "wasm32",target_os = "hermit"))]
143        #[path = "alloc_stack_restore_guard.rs"]
144        mod stack_restore_guard;
145
146        use stack_restore_guard::StackRestoreGuard;
147
148        fn _grow(requested_stack_size: usize, callback: &mut dyn FnMut()) {
149            // Other than that this code has no meaningful gotchas.
150            unsafe {
151                // We use a guard pattern to ensure we deallocate the allocated stack when we leave
152                // this function and also try to uphold various safety invariants required by `psm`
153                // (such as not unwinding from the callback we pass to it).
154                // `StackRestoreGuard` allocates a memory area with suitable size and alignment.
155                // It also sets up stack guards if supported on target.
156                let guard = StackRestoreGuard::new(requested_stack_size);
157                let (stack_base, allocated_stack_size) = guard.stack_area();
158                debug_assert!(allocated_stack_size >= requested_stack_size);
159                set_stack_limit(Some(stack_base as usize));
160                // TODO should we not pass `allocated_stack_size` here?
161                let panic = psm::on_stack(stack_base, requested_stack_size, move || {
162                    std::panic::catch_unwind(std::panic::AssertUnwindSafe(callback)).err()
163                });
164                drop(guard);
165                if let Some(p) = panic {
166                    std::panic::resume_unwind(p);
167                }
168            }
169        }
170    }
171
172    no {
173        #[cfg(not(windows))]
174        fn _grow(stack_size: usize, callback: &mut dyn FnMut()) {
175            let _ = stack_size;
176            callback();
177        }
178        #[cfg(windows)]
179        use backends::windows::_grow;
180    }
181}