1 //! # generator stack
2 //!
3 //!
4 
5 use std::error::Error;
6 use std::fmt::{self, Display};
7 use std::io;
8 use std::mem::MaybeUninit;
9 use std::os::raw::c_void;
10 use std::ptr;
11 
12 #[cfg(all(unix, target_arch = "x86_64"))]
13 #[path = "unix.rs"]
14 pub mod sys;
15 
16 #[cfg(all(unix, target_arch = "aarch64"))]
17 #[path = "unix.rs"]
18 pub mod sys;
19 
20 #[cfg(all(windows, target_arch = "x86_64"))]
21 #[path = "windows.rs"]
22 pub mod sys;
23 
24 // must align with StackBoxHeader
25 const ALIGN: usize = std::mem::size_of::<StackBoxHeader>();
26 const HEADER_SIZE: usize = std::mem::size_of::<StackBoxHeader>() / std::mem::size_of::<usize>();
27 
28 struct StackBoxHeader {
29     // track the stack
30     stack: Stack,
31     // track how big the data is (in usize)
32     data_size: usize,
33     // non zero dealloc the stack
34     need_drop: usize,
35 }
36 
37 /// A pointer type for stack allocation.
38 pub struct StackBox<T> {
39     // the stack memory
40     ptr: ptr::NonNull<T>,
41 }
42 
43 impl<T> StackBox<T> {
44     /// create uninit stack box
new_unint(stack: &mut Stack, need_drop: usize) -> MaybeUninit<Self>45     fn new_unint(stack: &mut Stack, need_drop: usize) -> MaybeUninit<Self> {
46         let offset = unsafe { &mut *stack.get_offset() };
47         // alloc the data
48         let layout = std::alloc::Layout::new::<T>();
49         let align = std::cmp::max(layout.align(), ALIGN);
50         let size = ((layout.size() + align - 1) & !(align - 1)) / std::mem::size_of::<usize>();
51         let u_align = align / std::mem::size_of::<usize>();
52         let pad_size = u_align - (*offset + size) % u_align;
53         let data_size = size + pad_size;
54         *offset += data_size;
55         let ptr = unsafe { ptr::NonNull::new_unchecked(stack.end() as *mut T) };
56 
57         // init the header
58         *offset += HEADER_SIZE;
59         unsafe {
60             let mut header = ptr::NonNull::new_unchecked(stack.end() as *mut StackBoxHeader);
61             let header = header.as_mut();
62             header.data_size = data_size;
63             header.need_drop = need_drop;
64             header.stack = stack.shadow_clone();
65             std::mem::MaybeUninit::new(StackBox { ptr })
66         }
67     }
68 
get_header(&self) -> &StackBoxHeader69     fn get_header(&self) -> &StackBoxHeader {
70         unsafe {
71             let header = (self.ptr.as_ptr() as *mut usize).offset(0 - HEADER_SIZE as isize);
72             &*(header as *const StackBoxHeader)
73         }
74     }
75 
76     /// move data into the box
init(&mut self, data: T)77     pub(crate) unsafe fn init(&mut self, data: T) {
78         ptr::write(self.ptr.as_ptr(), data);
79     }
80 
81     // get the stack ptr
as_ptr(&self) -> *mut T82     pub(crate) fn as_ptr(&self) -> *mut T {
83         self.ptr.as_ptr()
84     }
85 
86     /// Constructs a StackBox from a raw pointer.
87     ///
88     /// # Safety
89     ///
90     /// This function is unsafe because improper use may lead to
91     /// memory problems. For example, a double-free may occur if the
92     /// function is called twice on the same raw pointer.
93     #[inline]
from_raw(raw: *mut T) -> Self94     pub(crate) unsafe fn from_raw(raw: *mut T) -> Self {
95         StackBox {
96             ptr: ptr::NonNull::new_unchecked(raw),
97         }
98     }
99 
100     // Consumes the `StackBox`, returning a wrapped raw pointer.
101     // #[inline]
102     // pub(crate) fn into_raw(b: StackBox<T>) -> *mut T {
103     //     let ret = b.ptr.as_ptr();
104     //     std::mem::forget(b);
105     //     ret
106     // }
107 }
108 
109 pub struct Func {
110     data: *mut (),
111     size: usize,
112     offset: *mut usize,
113     func: fn(*mut ()),
114     drop: fn(*mut ()),
115 }
116 
117 impl Func {
call_once(mut self)118     pub fn call_once(mut self) {
119         let data = self.data;
120         self.data = ptr::null_mut();
121         (self.func)(data);
122     }
123 }
124 
125 impl Drop for Func {
drop(&mut self)126     fn drop(&mut self) {
127         if !self.data.is_null() {
128             (self.drop)(self.data);
129         }
130         unsafe { *self.offset -= self.size };
131     }
132 }
133 
134 impl<F: FnOnce()> StackBox<F> {
call_once(data: *mut ())135     fn call_once(data: *mut ()) {
136         unsafe {
137             let data = data as *mut F;
138             let f = data.read();
139             f();
140         }
141     }
142 
drop_inner(data: *mut ())143     fn drop_inner(data: *mut ()) {
144         unsafe {
145             let data = data as *mut F;
146             ptr::drop_in_place(data);
147         }
148     }
149 
150     /// create a functor on the stack
new_fn_once(stack: &mut Stack, data: F) -> Func151     pub(crate) fn new_fn_once(stack: &mut Stack, data: F) -> Func {
152         unsafe {
153             let mut d = Self::new_unint(stack, 0).assume_init();
154             d.init(data);
155             let header = d.get_header();
156             let f = Func {
157                 data: d.ptr.as_ptr() as *mut (),
158                 size: header.data_size + HEADER_SIZE,
159                 offset: stack.get_offset(),
160                 func: Self::call_once,
161                 drop: Self::drop_inner,
162             };
163             std::mem::forget(d);
164             f
165         }
166     }
167 }
168 
169 impl<T> std::ops::Deref for StackBox<T> {
170     type Target = T;
171 
deref(&self) -> &T172     fn deref(&self) -> &T {
173         unsafe { &*self.ptr.as_ref() }
174     }
175 }
176 
177 impl<T> std::ops::DerefMut for StackBox<T> {
deref_mut(&mut self) -> &mut T178     fn deref_mut(&mut self) -> &mut T {
179         unsafe { &mut *self.ptr.as_mut() }
180     }
181 }
182 
183 impl<T> Drop for StackBox<T> {
drop(&mut self)184     fn drop(&mut self) {
185         let header = self.get_header();
186         unsafe {
187             *header.stack.get_offset() -= header.data_size + HEADER_SIZE;
188             ptr::drop_in_place(self.ptr.as_ptr());
189             if header.need_drop != 0 {
190                 header.stack.drop_stack();
191             }
192         }
193     }
194 }
195 
196 /// Error type returned by stack allocation methods.
197 #[derive(Debug)]
198 pub enum StackError {
199     /// Contains the maximum amount of memory allowed to be allocated as stack space.
200     ExceedsMaximumSize(usize),
201 
202     /// Returned if some kind of I/O error happens during allocation.
203     IoError(io::Error),
204 }
205 
206 impl Display for StackError {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result207     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
208         match *self {
209             StackError::ExceedsMaximumSize(size) => write!(
210                 fmt,
211                 "Requested more than max size of {} bytes for a stack",
212                 size
213             ),
214             StackError::IoError(ref e) => e.fmt(fmt),
215         }
216     }
217 }
218 
219 impl Error for StackError {
source(&self) -> Option<&(dyn Error + 'static)>220     fn source(&self) -> Option<&(dyn Error + 'static)> {
221         match *self {
222             StackError::ExceedsMaximumSize(_) => None,
223             StackError::IoError(ref e) => Some(e),
224         }
225     }
226 }
227 
228 /// Represents any kind of stack memory.
229 ///
230 /// `FixedSizeStack` as well as `ProtectedFixedSizeStack`
231 /// can be used to allocate actual stack space.
232 #[derive(Debug)]
233 pub struct SysStack {
234     top: *mut c_void,
235     bottom: *mut c_void,
236 }
237 
238 impl SysStack {
239     /// Creates a (non-owning) representation of some stack memory.
240     ///
241     /// It is unsafe because it is your responsibility to make sure that `top` and `bottom` are valid
242     /// addresses.
243     #[inline]
new(top: *mut c_void, bottom: *mut c_void) -> SysStack244     pub unsafe fn new(top: *mut c_void, bottom: *mut c_void) -> SysStack {
245         debug_assert!(top >= bottom);
246 
247         SysStack { top, bottom }
248     }
249 
250     /// Returns the top of the stack from which on it grows downwards towards bottom().
251     #[inline]
top(&self) -> *mut c_void252     pub fn top(&self) -> *mut c_void {
253         self.top
254     }
255 
256     /// Returns the bottom of the stack and thus it's end.
257     #[inline]
bottom(&self) -> *mut c_void258     pub fn bottom(&self) -> *mut c_void {
259         self.bottom
260     }
261 
262     /// Returns the size of the stack between top() and bottom().
263     #[inline]
len(&self) -> usize264     pub fn len(&self) -> usize {
265         self.top as usize - self.bottom as usize
266     }
267 
268     /// Returns the minimal stack size allowed by the current platform.
269     #[inline]
min_size() -> usize270     pub fn min_size() -> usize {
271         sys::min_stack_size()
272     }
273 
274     /// Allocates a new stack of `size`.
allocate(mut size: usize, protected: bool) -> Result<SysStack, StackError>275     fn allocate(mut size: usize, protected: bool) -> Result<SysStack, StackError> {
276         let page_size = sys::page_size();
277         let min_stack_size = sys::min_stack_size();
278         let max_stack_size = sys::max_stack_size();
279         let add_shift = if protected { 1 } else { 0 };
280         let add = page_size << add_shift;
281 
282         if size < min_stack_size {
283             size = min_stack_size;
284         }
285 
286         size = (size - 1) & !(page_size.overflowing_sub(1).0);
287 
288         if let Some(size) = size.checked_add(add) {
289             if size <= max_stack_size {
290                 let mut ret = unsafe { sys::allocate_stack(size) };
291 
292                 if protected {
293                     if let Ok(stack) = ret {
294                         ret = unsafe { sys::protect_stack(&stack) };
295                     }
296                 }
297 
298                 return ret.map_err(StackError::IoError);
299             }
300         }
301 
302         Err(StackError::ExceedsMaximumSize(max_stack_size - add))
303     }
304 }
305 
306 unsafe impl Send for SysStack {}
307 
308 /// generator stack
309 /// this struct will not dealloc the memroy
310 /// instead StackBox<> would track it's usage and dealloc it
311 pub struct Stack {
312     buf: SysStack,
313 }
314 
315 impl Stack {
316     /// Allocate a new stack of `size`. If size = 0, this is a `dummy_stack`
new(size: usize) -> Stack317     pub fn new(size: usize) -> Stack {
318         let track = (size & 1) != 0;
319         let mut bytes = size * std::mem::size_of::<usize>();
320         // the minimal size
321         let min_size = SysStack::min_size();
322 
323         if bytes < min_size {
324             bytes = min_size;
325         }
326 
327         let buf = SysStack::allocate(bytes, true).expect("failed to alloc sys stack");
328 
329         let stk = Stack { buf };
330 
331         // if size is not even we do the full foot print test
332         let count = if track {
333             stk.size()
334         } else {
335             // we only check the last few words
336             8
337         };
338 
339         unsafe {
340             let buf = stk.buf.bottom as *mut usize;
341             ptr::write_bytes(buf, 0xEE, count);
342         }
343 
344         // init the stack box usage
345         let offset = stk.get_offset();
346         unsafe { *offset = 1 };
347 
348         stk
349     }
350 
351     /// get used stack size
get_used_size(&self) -> usize352     pub fn get_used_size(&self) -> usize {
353         let mut offset: usize = 0;
354         unsafe {
355             let mut magic: usize = 0xEE;
356             ptr::write_bytes(&mut magic, 0xEE, 1);
357             let mut ptr = self.buf.bottom as *mut usize;
358             while *ptr == magic {
359                 offset += 1;
360                 ptr = ptr.offset(1);
361             }
362         }
363         let cap = self.size();
364         cap - offset
365     }
366 
367     /// get the stack cap
368     #[inline]
size(&self) -> usize369     pub fn size(&self) -> usize {
370         self.buf.len() / std::mem::size_of::<usize>()
371     }
372 
373     /// Point to the high end of the allocated stack
end(&self) -> *mut usize374     pub fn end(&self) -> *mut usize {
375         let offset = self.get_offset();
376         unsafe { (self.buf.top as *mut usize).offset(0 - *offset as isize) }
377     }
378 
379     /// Point to the low end of the allocated stack
380     #[allow(dead_code)]
begin(&self) -> *mut usize381     pub fn begin(&self) -> *mut usize {
382         self.buf.bottom as *mut _
383     }
384 
385     /// alloc buffer on this stack
alloc_uninit_box<T>(&mut self) -> MaybeUninit<StackBox<T>>386     pub fn alloc_uninit_box<T>(&mut self) -> MaybeUninit<StackBox<T>> {
387         // the first obj should set need drop to non zero
388         StackBox::<T>::new_unint(self, 1)
389     }
390 
391     // get offset
get_offset(&self) -> *mut usize392     fn get_offset(&self) -> *mut usize {
393         unsafe { (self.buf.top as *mut usize).offset(-1) }
394     }
395 
396     // dealloc the statck
drop_stack(&self)397     fn drop_stack(&self) {
398         if self.buf.len() == 0 {
399             return;
400         }
401         let page_size = sys::page_size();
402         let guard = (self.buf.bottom as usize - page_size) as *mut c_void;
403         let size_with_guard = self.buf.len() + page_size;
404         unsafe {
405             sys::deallocate_stack(guard, size_with_guard);
406         }
407     }
408 
shadow_clone(&self) -> Self409     fn shadow_clone(&self) -> Self {
410         Stack {
411             buf: SysStack {
412                 top: self.buf.top,
413                 bottom: self.buf.bottom,
414             },
415         }
416     }
417 }
418 
419 impl fmt::Debug for Stack {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result420     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
421         let offset = self.get_offset();
422         write!(f, "Statck<{:?}, Offset={}>", self.buf, unsafe { *offset })
423     }
424 }
425