1 /*!
2 
3 **A fast bump allocation arena for Rust.**
4 
5 [![](https://docs.rs/bumpalo/badge.svg)](https://docs.rs/bumpalo/)
6 [![](https://img.shields.io/crates/v/bumpalo.svg)](https://crates.io/crates/bumpalo)
7 [![](https://img.shields.io/crates/d/bumpalo.svg)](https://crates.io/crates/bumpalo)
8 [![Build Status](https://dev.azure.com/fitzgen/bumpalo/_apis/build/status/fitzgen.bumpalo?branchName=master)](https://dev.azure.com/fitzgen/bumpalo/_build/latest?definitionId=2&branchName=master)
9 
10 ![](https://github.com/fitzgen/bumpalo/raw/master/bumpalo.png)
11 
12 ## Bump Allocation
13 
14 Bump allocation is a fast, but limited approach to allocation. We have a chunk
15 of memory, and we maintain a pointer within that memory. Whenever we allocate an
16 object, we do a quick test that we have enough capacity left in our chunk to
17 allocate the object and then update the pointer by the object's size. *That's
18 it!*
19 
20 The disadvantage of bump allocation is that there is no general way to
21 deallocate individual objects or reclaim the memory region for a
22 no-longer-in-use object.
23 
24 These trade offs make bump allocation well-suited for *phase-oriented*
25 allocations. That is, a group of objects that will all be allocated during the
26 same program phase, used, and then can all be deallocated together as a group.
27 
28 ## Deallocation en Masse, but No `Drop`
29 
30 To deallocate all the objects in the arena at once, we can simply reset the bump
31 pointer back to the start of the arena's memory chunk. This makes mass
32 deallocation *extremely* fast, but allocated objects' `Drop` implementations are
33 not invoked.
34 
35 ## What happens when the memory chunk is full?
36 
37 This implementation will allocate a new memory chunk from the global allocator
38 and then start bump allocating into this new memory chunk.
39 
40 ## Example
41 
42 ```
43 use bumpalo::Bump;
44 use std::u64;
45 
46 struct Doggo {
47     cuteness: u64,
48     age: u8,
49     scritches_required: bool,
50 }
51 
52 // Create a new arena to bump allocate into.
53 let bump = Bump::new();
54 
55 // Allocate values into the arena.
56 let scooter = bump.alloc(Doggo {
57     cuteness: u64::max_value(),
58     age: 8,
59     scritches_required: true,
60 });
61 
62 assert!(scooter.scritches_required);
63 ```
64 
65 ## Collections
66 
67 When the `"collections"` cargo feature is enabled, a fork of some of the `std`
68 library's collections are available in the `collections` module. These
69 collection types are modified to allocate their space inside `bumpalo::Bump`
70 arenas.
71 
72 ```rust
73 # #[cfg(feature = "collections")]
74 # {
75 use bumpalo::{Bump, collections::Vec};
76 
77 // Create a new bump arena.
78 let bump = Bump::new();
79 
80 // Create a vector of integers whose storage is backed by the bump arena. The
81 // vector cannot outlive its backing arena, and this property is enforced with
82 // Rust's lifetime rules.
83 let mut v = Vec::new_in(&bump);
84 
85 // Push a bunch of integers onto `v`!
86 for i in 0..100 {
87     v.push(i);
88 }
89 # }
90 ```
91 
92 Eventually [all `std` collection types will be parameterized by an
93 allocator](https://github.com/rust-lang/rust/issues/42774) and we can remove
94 this `collections` module and use the `std` versions.
95 
96 ## `#![no_std]` Support
97 
98 Bumpalo is a `no_std` crate. It depends only on the `alloc` and `core` crates.
99 
100  */
101 
102 #![deny(missing_debug_implementations)]
103 #![deny(missing_docs)]
104 #![no_std]
105 
106 extern crate alloc as core_alloc;
107 
108 #[cfg(feature = "collections")]
109 pub mod collections;
110 
111 mod alloc;
112 
113 use core::cell::Cell;
114 use core::iter;
115 use core::marker::PhantomData;
116 use core::mem;
117 use core::ptr::{self, NonNull};
118 use core::slice;
119 use core::str;
120 use core_alloc::alloc::{alloc, dealloc, Layout};
121 
122 /// An arena to bump allocate into.
123 ///
124 /// ## No `Drop`s
125 ///
126 /// Objects that are bump-allocated will never have their `Drop` implementation
127 /// called — unless you do it manually yourself. This makes it relatively
128 /// easy to leak memory or other resources.
129 ///
130 /// If you have a type which internally manages
131 ///
132 /// * an allocation from the global heap (e.g. `Vec<T>`),
133 /// * open file descriptors (e.g. `std::fs::File`), or
134 /// * any other resource that must be cleaned up (e.g. an `mmap`)
135 ///
136 /// and relies on its `Drop` implementation to clean up the internal resource,
137 /// then if you allocate that type with a `Bump`, you need to find a new way to
138 /// clean up after it yourself.
139 ///
140 /// Potential solutions are
141 ///
142 /// * calling [`drop_in_place`][drop_in_place] or using
143 ///   [`std::mem::ManuallyDrop`][manuallydrop] to manually drop these types,
144 /// * using `bumpalo::collections::Vec` instead of `std::vec::Vec`, or
145 /// * simply avoiding allocating these problematic types within a `Bump`.
146 ///
147 /// Note that not calling `Drop` is memory safe! Destructors are never
148 /// guaranteed to run in Rust, you can't rely on them for enforcing memory
149 /// safety.
150 ///
151 /// [drop_in_place]: https://doc.rust-lang.org/stable/std/ptr/fn.drop_in_place.html
152 /// [manuallydrop]: https://doc.rust-lang.org/stable/std/mem/struct.ManuallyDrop.html
153 ///
154 /// ## Example
155 ///
156 /// ```
157 /// use bumpalo::Bump;
158 ///
159 /// // Create a new bump arena.
160 /// let bump = Bump::new();
161 ///
162 /// // Allocate values into the arena.
163 /// let forty_two = bump.alloc(42);
164 /// assert_eq!(*forty_two, 42);
165 ///
166 /// // Mutable references are returned from allocation.
167 /// let mut s = bump.alloc("bumpalo");
168 /// *s = "the bump allocator; and also is a buffalo";
169 /// ```
170 #[derive(Debug)]
171 pub struct Bump {
172     // The current chunk we are bump allocating within.
173     current_chunk_footer: Cell<NonNull<ChunkFooter>>,
174 }
175 
176 #[repr(C)]
177 #[derive(Debug)]
178 struct ChunkFooter {
179     // Pointer to the start of this chunk allocation. This footer is always at
180     // the end of the chunk.
181     data: NonNull<u8>,
182 
183     // The layout of this chunk's allocation.
184     layout: Layout,
185 
186     // Link to the previous chunk, if any.
187     prev: Cell<Option<NonNull<ChunkFooter>>>,
188 
189     // Bump allocation finger that is always in the range `self.data..=self`.
190     ptr: Cell<NonNull<u8>>,
191 }
192 
193 impl Default for Bump {
default() -> Bump194     fn default() -> Bump {
195         Bump::new()
196     }
197 }
198 
199 impl Drop for Bump {
drop(&mut self)200     fn drop(&mut self) {
201         unsafe {
202             dealloc_chunk_list(Some(self.current_chunk_footer.get()));
203         }
204     }
205 }
206 
207 #[inline]
dealloc_chunk_list(mut footer: Option<NonNull<ChunkFooter>>)208 unsafe fn dealloc_chunk_list(mut footer: Option<NonNull<ChunkFooter>>) {
209     while let Some(f) = footer {
210         footer = f.as_ref().prev.get();
211         dealloc(f.as_ref().data.as_ptr(), f.as_ref().layout);
212     }
213 }
214 
215 // `Bump`s are safe to send between threads because nothing aliases its owned
216 // chunks until you start allocating from it. But by the time you allocate from
217 // it, the returned references to allocations borrow the `Bump` and therefore
218 // prevent sending the `Bump` across threads until the borrows end.
219 unsafe impl Send for Bump {}
220 
221 #[inline]
round_up_to(n: usize, divisor: usize) -> Option<usize>222 pub(crate) fn round_up_to(n: usize, divisor: usize) -> Option<usize> {
223     debug_assert!(divisor > 0);
224     debug_assert!(divisor.is_power_of_two());
225     Some(n.checked_add(divisor - 1)? & !(divisor - 1))
226 }
227 
228 // After this point, we try to hit page boundaries instead of powers of 2
229 const PAGE_STRATEGY_CUTOFF: usize = 0x1000;
230 
231 // We only support alignments of up to 16 bytes for iter_allocated_chunks.
232 const SUPPORTED_ITER_ALIGNMENT: usize = 16;
233 const CHUNK_ALIGN: usize = SUPPORTED_ITER_ALIGNMENT;
234 const FOOTER_SIZE: usize = mem::size_of::<ChunkFooter>();
235 
236 // Assert that ChunkFooter is at most the supported alignment. This will give a compile time error if it is not the case
237 const _FOOTER_ALIGN_ASSERTION: bool = mem::align_of::<ChunkFooter>() <= CHUNK_ALIGN;
238 const _: [(); _FOOTER_ALIGN_ASSERTION as usize] = [()];
239 
240 // Maximum typical overhead per allocation imposed by allocators.
241 const MALLOC_OVERHEAD: usize = 16;
242 
243 // This is the overhead from malloc, footer and alignment. For instance, if
244 // we want to request a chunk of memory that has at least X bytes usable for
245 // allocations (where X is aligned to CHUNK_ALIGN), then we expect that the
246 // after adding a footer, malloc overhead and alignment, the chunk of memory
247 // the allocator actually sets asside for us is X+OVERHEAD rounded up to the
248 // nearest suitable size boundary.
249 const OVERHEAD: usize = (MALLOC_OVERHEAD + FOOTER_SIZE + (CHUNK_ALIGN - 1)) & !(CHUNK_ALIGN - 1);
250 
251 // Choose a relatively small default initial chunk size, since we double chunk
252 // sizes as we grow bump arenas to amortize costs of hitting the global
253 // allocator.
254 const FIRST_ALLOCATION_GOAL: usize = 1 << 9;
255 
256 // The actual size of the first allocation is going to be a bit smaller
257 // than the goal. We need to make room for the footer, and we also need
258 // take the alignment into account.
259 const DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER: usize = FIRST_ALLOCATION_GOAL - OVERHEAD;
260 
261 #[inline]
layout_for_array<T>(len: usize) -> Option<Layout>262 fn layout_for_array<T>(len: usize) -> Option<Layout> {
263     // TODO: use Layout::array once the rust feature `alloc_layout_extra`
264     // gets stabilized
265     //
266     // According to https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment
267     // the size of a value is always a multiple of it's alignment. But that does not seem to match
268     // with https://doc.rust-lang.org/std/alloc/struct.Layout.html#method.from_size_align
269     //
270     // Let's be on the safe size and round up to the padding in any case.
271     //
272     // An interesting question is whether there needs to be padding at the end of
273     // the last object in the array. Again, we take the safe approach and include it.
274 
275     let layout = Layout::new::<T>();
276     let size_rounded_up = round_up_to(layout.size(), layout.align())?;
277     let total_size = len.checked_mul(size_rounded_up)?;
278 
279     Layout::from_size_align(total_size, layout.align()).ok()
280 }
281 
282 /// Wrapper around `Layout::from_size_align` that adds debug assertions.
283 #[inline]
layout_from_size_align(size: usize, align: usize) -> Layout284 unsafe fn layout_from_size_align(size: usize, align: usize) -> Layout {
285     if cfg!(debug_assertions) {
286         Layout::from_size_align(size, align).unwrap()
287     } else {
288         Layout::from_size_align_unchecked(size, align)
289     }
290 }
291 
292 #[inline(never)]
allocation_size_overflow<T>() -> T293 fn allocation_size_overflow<T>() -> T {
294     panic!("requested allocation size overflowed")
295 }
296 
297 impl Bump {
298     /// Construct a new arena to bump allocate into.
299     ///
300     /// ## Example
301     ///
302     /// ```
303     /// let bump = bumpalo::Bump::new();
304     /// # let _ = bump;
305     /// ```
new() -> Bump306     pub fn new() -> Bump {
307         Self::with_capacity(0)
308     }
309 
310     /// Construct a new arena with the specified capacity to bump allocate into.
311     ///
312     /// ## Example
313     ///
314     /// ```
315     /// let bump = bumpalo::Bump::with_capacity(100);
316     /// # let _ = bump;
317     /// ```
with_capacity(capacity: usize) -> Bump318     pub fn with_capacity(capacity: usize) -> Bump {
319         let chunk_footer = Self::new_chunk(
320             None,
321             Some(unsafe { layout_from_size_align(capacity, 1) }),
322             None,
323         );
324         Bump {
325             current_chunk_footer: Cell::new(chunk_footer),
326         }
327     }
328 
329     /// Allocate a new chunk and return its initialized footer.
330     ///
331     /// If given, `layouts` is a tuple of the current chunk size and the
332     /// layout of the allocation request that triggered us to fall back to
333     /// allocating a new chunk of memory.
new_chunk( old_size_with_footer: Option<usize>, requested_layout: Option<Layout>, prev: Option<NonNull<ChunkFooter>>, ) -> NonNull<ChunkFooter>334     fn new_chunk(
335         old_size_with_footer: Option<usize>,
336         requested_layout: Option<Layout>,
337         prev: Option<NonNull<ChunkFooter>>,
338     ) -> NonNull<ChunkFooter> {
339         unsafe {
340             // As a sane default, we want our new allocation to be about twice as
341             // big as the previous allocation
342             let mut new_size_without_footer =
343                 if let Some(old_size_with_footer) = old_size_with_footer {
344                     let old_size_without_footer = old_size_with_footer - FOOTER_SIZE;
345                     old_size_without_footer
346                         .checked_mul(2)
347                         .unwrap_or_else(|| oom())
348                 } else {
349                     DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER
350                 };
351 
352             // We want to have CHUNK_ALIGN or better alignment
353             let mut align = CHUNK_ALIGN;
354 
355             // If we already know we need to fulfill some request,
356             // make sure we allocate at least enough to satisfy it
357             if let Some(requested_layout) = requested_layout {
358                 align = align.max(requested_layout.align());
359                 let requested_size = round_up_to(requested_layout.size(), align)
360                     .unwrap_or_else(allocation_size_overflow);
361                 new_size_without_footer = new_size_without_footer.max(requested_size);
362             }
363 
364             // We want our allocations to play nice with the memory allocator,
365             // and waste as little memory as possible.
366             // For small allocations, this means that the entire allocation
367             // including the chunk footer and mallocs internal overhead is
368             // as close to a power of two as we can go without going over.
369             // For larger allocations, we only need to get close to a page
370             // boundary without going over.
371             if new_size_without_footer < PAGE_STRATEGY_CUTOFF {
372                 new_size_without_footer =
373                     (new_size_without_footer + OVERHEAD).next_power_of_two() - OVERHEAD;
374             } else {
375                 new_size_without_footer = round_up_to(new_size_without_footer + OVERHEAD, 0x1000)
376                     .unwrap_or_else(|| oom())
377                     - OVERHEAD;
378             }
379 
380             debug_assert_eq!(align % CHUNK_ALIGN, 0);
381             debug_assert_eq!(new_size_without_footer % CHUNK_ALIGN, 0);
382             let size = new_size_without_footer
383                 .checked_add(FOOTER_SIZE)
384                 .unwrap_or_else(allocation_size_overflow);
385             let layout = layout_from_size_align(size, align);
386 
387             debug_assert!(size >= old_size_with_footer.unwrap_or(0) * 2);
388 
389             let data = alloc(layout);
390             let data = NonNull::new(data).unwrap_or_else(|| oom());
391 
392             // The `ChunkFooter` is at the end of the chunk.
393             let footer_ptr = data.as_ptr() as usize + new_size_without_footer;
394             debug_assert_eq!((data.as_ptr() as usize) % align, 0);
395             debug_assert_eq!(footer_ptr % CHUNK_ALIGN, 0);
396             let footer_ptr = footer_ptr as *mut ChunkFooter;
397 
398             // The bump pointer is initialized to the end of the range we will
399             // bump out of.
400             let ptr = Cell::new(NonNull::new_unchecked(footer_ptr as *mut u8));
401 
402             ptr::write(
403                 footer_ptr,
404                 ChunkFooter {
405                     data,
406                     layout,
407                     prev: Cell::new(prev),
408                     ptr,
409                 },
410             );
411 
412             NonNull::new_unchecked(footer_ptr)
413         }
414     }
415 
416     /// Reset this bump allocator.
417     ///
418     /// Performs mass deallocation on everything allocated in this arena by
419     /// resetting the pointer into the underlying chunk of memory to the start
420     /// of the chunk. Does not run any `Drop` implementations on deallocated
421     /// objects; see [the `Bump` type's top-level
422     /// documentation](./struct.Bump.html) for details.
423     ///
424     /// If this arena has allocated multiple chunks to bump allocate into, then
425     /// the excess chunks are returned to the global allocator.
426     ///
427     /// ## Example
428     ///
429     /// ```
430     /// let mut bump = bumpalo::Bump::new();
431     ///
432     /// // Allocate a bunch of things.
433     /// {
434     ///     for i in 0..100 {
435     ///         bump.alloc(i);
436     ///     }
437     /// }
438     ///
439     /// // Reset the arena.
440     /// bump.reset();
441     ///
442     /// // Allocate some new things in the space previously occupied by the
443     /// // original things.
444     /// for j in 200..400 {
445     ///     bump.alloc(j);
446     /// }
447     ///```
reset(&mut self)448     pub fn reset(&mut self) {
449         // Takes `&mut self` so `self` must be unique and there can't be any
450         // borrows active that would get invalidated by resetting.
451         unsafe {
452             let cur_chunk = self.current_chunk_footer.get();
453 
454             // Deallocate all chunks except the current one
455             let prev_chunk = cur_chunk.as_ref().prev.replace(None);
456             dealloc_chunk_list(prev_chunk);
457 
458             // Reset the bump finger to the end of the chunk.
459             cur_chunk.as_ref().ptr.set(cur_chunk.cast());
460 
461             debug_assert!(
462                 self.current_chunk_footer
463                     .get()
464                     .as_ref()
465                     .prev
466                     .get()
467                     .is_none(),
468                 "We should only have a single chunk"
469             );
470             debug_assert_eq!(
471                 self.current_chunk_footer.get().as_ref().ptr.get(),
472                 self.current_chunk_footer.get().cast(),
473                 "Our chunk's bump finger should be reset to the start of its allocation"
474             );
475         }
476     }
477 
478     /// Allocate an object in this `Bump` and return an exclusive reference to
479     /// it.
480     ///
481     /// ## Panics
482     ///
483     /// Panics if reserving space for `T` would cause an overflow.
484     ///
485     /// ## Example
486     ///
487     /// ```
488     /// let bump = bumpalo::Bump::new();
489     /// let x = bump.alloc("hello");
490     /// assert_eq!(*x, "hello");
491     /// ```
492     #[inline(always)]
493     #[allow(clippy::mut_from_ref)]
alloc<T>(&self, val: T) -> &mut T494     pub fn alloc<T>(&self, val: T) -> &mut T {
495         self.alloc_with(|| val)
496     }
497 
498     /// Pre-allocate space for an object in this `Bump`, initializes it using
499     /// the closure, then returns an exclusive reference to it.
500     ///
501     /// Calling `bump.alloc(x)` is essentially equivalent to calling
502     /// `bump.alloc_with(|| x)`. However if you use `alloc_with`, then the
503     /// closure will not be invoked until after allocating space for storing
504     /// `x` on the heap.
505     ///
506     /// This can be useful in certain edge-cases related to compiler
507     /// optimizations. When evaluating `bump.alloc(x)`, semantically `x` is
508     /// first put on the stack and then moved onto the heap. In some cases,
509     /// the compiler is able to optimize this into constructing `x` directly
510     /// on the heap, however in many cases it does not.
511     ///
512     /// The function `alloc_with` tries to help the compiler be smarter. In
513     /// most cases doing `bump.alloc_with(|| x)` on release mode will be
514     /// enough to help the compiler to realize this optimization is valid
515     /// and construct `x` directly onto the heap.
516     ///
517     /// ## Warning
518     ///
519     /// This function critically depends on compiler optimizations to achieve
520     /// its desired effect. This means that it is not an effective tool when
521     /// compiling without optimizations on.
522     ///
523     /// Even when optimizations are on, this function does not **guarantee**
524     /// that the value is constructed on the heap. To the best of our
525     /// knowledge no such guarantee can be made in stable Rust as of 1.33.
526     ///
527     /// ## Panics
528     ///
529     /// Panics if reserving space for `T` would cause an overflow.
530     ///
531     /// ## Example
532     ///
533     /// ```
534     /// let bump = bumpalo::Bump::new();
535     /// let x = bump.alloc_with(|| "hello");
536     /// assert_eq!(*x, "hello");
537     /// ```
538     #[inline(always)]
539     #[allow(clippy::mut_from_ref)]
alloc_with<F, T>(&self, f: F) -> &mut T where F: FnOnce() -> T,540     pub fn alloc_with<F, T>(&self, f: F) -> &mut T
541     where
542         F: FnOnce() -> T,
543     {
544         #[inline(always)]
545         unsafe fn inner_writer<T, F>(ptr: *mut T, f: F)
546         where
547             F: FnOnce() -> T,
548         {
549             // This function is translated as:
550             // - allocate space for a T on the stack
551             // - call f() with the return value being put onto this stack space
552             // - memcpy from the stack to the heap
553             //
554             // Ideally we want LLVM to always realize that doing a stack
555             // allocation is unnecessary and optimize the code so it writes
556             // directly into the heap instead. It seems we get it to realize
557             // this most consistently if we put this critical line into it's
558             // own function instead of inlining it into the surrounding code.
559             ptr::write(ptr, f())
560         }
561 
562         let layout = Layout::new::<T>();
563 
564         unsafe {
565             let p = self.alloc_layout(layout);
566             let p = p.as_ptr() as *mut T;
567             inner_writer(p, f);
568             &mut *p
569         }
570     }
571 
572     /// `Copy` a slice into this `Bump` and return an exclusive reference to
573     /// the copy.
574     ///
575     /// ## Panics
576     ///
577     /// Panics if reserving space for the slice would cause an overflow.
578     ///
579     /// ## Example
580     ///
581     /// ```
582     /// let bump = bumpalo::Bump::new();
583     /// let x = bump.alloc_slice_copy(&[1, 2, 3]);
584     /// assert_eq!(x, &[1, 2, 3]);
585     /// ```
586     #[inline(always)]
587     #[allow(clippy::mut_from_ref)]
alloc_slice_copy<T>(&self, src: &[T]) -> &mut [T] where T: Copy,588     pub fn alloc_slice_copy<T>(&self, src: &[T]) -> &mut [T]
589     where
590         T: Copy,
591     {
592         let layout = Layout::for_value(src);
593         let dst = self.alloc_layout(layout).cast::<T>();
594 
595         unsafe {
596             ptr::copy_nonoverlapping(src.as_ptr(), dst.as_ptr(), src.len());
597             slice::from_raw_parts_mut(dst.as_ptr(), src.len())
598         }
599     }
600 
601     /// `Clone` a slice into this `Bump` and return an exclusive reference to
602     /// the clone. Prefer `alloc_slice_copy` if `T` is `Copy`.
603     ///
604     /// ## Panics
605     ///
606     /// Panics if reserving space for the slice would cause an overflow.
607     ///
608     /// ## Example
609     ///
610     /// ```
611     /// #[derive(Clone, Debug, Eq, PartialEq)]
612     /// struct Sheep {
613     ///     name: String,
614     /// }
615     ///
616     /// let originals = vec![
617     ///     Sheep { name: "Alice".into() },
618     ///     Sheep { name: "Bob".into() },
619     ///     Sheep { name: "Cathy".into() },
620     /// ];
621     ///
622     /// let bump = bumpalo::Bump::new();
623     /// let clones = bump.alloc_slice_clone(&originals);
624     /// assert_eq!(originals, clones);
625     /// ```
626     #[inline(always)]
627     #[allow(clippy::mut_from_ref)]
alloc_slice_clone<T>(&self, src: &[T]) -> &mut [T] where T: Clone,628     pub fn alloc_slice_clone<T>(&self, src: &[T]) -> &mut [T]
629     where
630         T: Clone,
631     {
632         let layout = Layout::for_value(src);
633         let dst = self.alloc_layout(layout).cast::<T>();
634 
635         unsafe {
636             for (i, val) in src.iter().cloned().enumerate() {
637                 ptr::write(dst.as_ptr().add(i), val);
638             }
639 
640             slice::from_raw_parts_mut(dst.as_ptr(), src.len())
641         }
642     }
643 
644     /// `Copy` a string slice into this `Bump` and return an exclusive reference to it.
645     ///
646     /// ## Panics
647     ///
648     /// Panics if reserving space for the string would cause an overflow.
649     ///
650     /// ## Example
651     ///
652     /// ```
653     /// let bump = bumpalo::Bump::new();
654     /// let hello = bump.alloc_str("hello world");
655     /// assert_eq!("hello world", hello);
656     /// ```
657     #[inline(always)]
658     #[allow(clippy::mut_from_ref)]
alloc_str(&self, src: &str) -> &mut str659     pub fn alloc_str(&self, src: &str) -> &mut str {
660         let buffer = self.alloc_slice_copy(src.as_bytes());
661         unsafe {
662             // This is OK, because it already came in as str, so it is guaranteed to be utf8
663             str::from_utf8_unchecked_mut(buffer)
664         }
665     }
666 
667     /// Allocates a new slice of size `len` into this `Bump` and returns an
668     /// exclusive reference to the copy.
669     ///
670     /// The elements of the slice are initialized using the supplied closure.
671     /// The closure argument is the position in the slice.
672     ///
673     /// ## Panics
674     ///
675     /// Panics if reserving space for the slice would cause an overflow.
676     ///
677     /// ## Example
678     ///
679     /// ```
680     /// let bump = bumpalo::Bump::new();
681     /// let x = bump.alloc_slice_fill_with(5, |i| 5*(i+1));
682     /// assert_eq!(x, &[5, 10, 15, 20, 25]);
683     /// ```
684     #[inline(always)]
685     #[allow(clippy::mut_from_ref)]
alloc_slice_fill_with<T, F>(&self, len: usize, mut f: F) -> &mut [T] where F: FnMut(usize) -> T,686     pub fn alloc_slice_fill_with<T, F>(&self, len: usize, mut f: F) -> &mut [T]
687     where
688         F: FnMut(usize) -> T,
689     {
690         let layout = layout_for_array::<T>(len).unwrap_or_else(|| oom());
691         let dst = self.alloc_layout(layout).cast::<T>();
692 
693         unsafe {
694             for i in 0..len {
695                 ptr::write(dst.as_ptr().add(i), f(i));
696             }
697 
698             let result = slice::from_raw_parts_mut(dst.as_ptr(), len);
699             debug_assert_eq!(Layout::for_value(result), layout);
700             result
701         }
702     }
703 
704     /// Allocates a new slice of size `len` into this `Bump` and returns an
705     /// exclusive reference to the copy.
706     ///
707     /// All elements of the slice are initialized to `value`.
708     ///
709     /// ## Panics
710     ///
711     /// Panics if reserving space for the slice would cause an overflow.
712     ///
713     /// ## Example
714     ///
715     /// ```
716     /// let bump = bumpalo::Bump::new();
717     /// let x = bump.alloc_slice_fill_copy(5, 42);
718     /// assert_eq!(x, &[42, 42, 42, 42, 42]);
719     /// ```
720     #[inline(always)]
721     #[allow(clippy::mut_from_ref)]
alloc_slice_fill_copy<T: Copy>(&self, len: usize, value: T) -> &mut [T]722     pub fn alloc_slice_fill_copy<T: Copy>(&self, len: usize, value: T) -> &mut [T] {
723         self.alloc_slice_fill_with(len, |_| value)
724     }
725 
726     /// Allocates a new slice of size `len` slice into this `Bump` and return an
727     /// exclusive reference to the copy.
728     ///
729     /// All elements of the slice are initialized to `value.clone()`.
730     ///
731     /// ## Panics
732     ///
733     /// Panics if reserving space for the slice would cause an overflow.
734     ///
735     /// ## Example
736     ///
737     /// ```
738     /// let bump = bumpalo::Bump::new();
739     /// let s: String = "Hello Bump!".to_string();
740     /// let x: &[String] = bump.alloc_slice_fill_clone(2, &s);
741     /// assert_eq!(x.len(), 2);
742     /// assert_eq!(&x[0], &s);
743     /// assert_eq!(&x[1], &s);
744     /// ```
745     #[inline(always)]
746     #[allow(clippy::mut_from_ref)]
alloc_slice_fill_clone<T: Clone>(&self, len: usize, value: &T) -> &mut [T]747     pub fn alloc_slice_fill_clone<T: Clone>(&self, len: usize, value: &T) -> &mut [T] {
748         self.alloc_slice_fill_with(len, |_| value.clone())
749     }
750 
751     /// Allocates a new slice of size `len` slice into this `Bump` and return an
752     /// exclusive reference to the copy.
753     ///
754     /// The elements are initialized using the supplied iterator.
755     ///
756     /// ## Panics
757     ///
758     /// Panics if reserving space for the slice would cause an overflow, or if the supplied
759     /// iterator returns fewer elements than it promised.
760     ///
761     /// ## Example
762     ///
763     /// ```
764     /// let bump = bumpalo::Bump::new();
765     /// let x: &[i32] = bump.alloc_slice_fill_iter([2, 3, 5].iter().cloned().map(|i| i * i));
766     /// assert_eq!(x, [4, 9, 25]);
767     /// ```
768     #[inline(always)]
769     #[allow(clippy::mut_from_ref)]
alloc_slice_fill_iter<T, I>(&self, iter: I) -> &mut [T] where I: IntoIterator<Item = T>, I::IntoIter: ExactSizeIterator,770     pub fn alloc_slice_fill_iter<T, I>(&self, iter: I) -> &mut [T]
771     where
772         I: IntoIterator<Item = T>,
773         I::IntoIter: ExactSizeIterator,
774     {
775         let mut iter = iter.into_iter();
776         self.alloc_slice_fill_with(iter.len(), |_| {
777             iter.next().expect("Iterator supplied too few elements")
778         })
779     }
780 
781     /// Allocates a new slice of size `len` slice into this `Bump` and return an
782     /// exclusive reference to the copy.
783     ///
784     /// All elements of the slice are initialized to `T::default()`.
785     ///
786     /// ## Panics
787     ///
788     /// Panics if reserving space for the slice would cause an overflow.
789     ///
790     /// ## Example
791     ///
792     /// ```
793     /// let bump = bumpalo::Bump::new();
794     /// let x = bump.alloc_slice_fill_default::<u32>(5);
795     /// assert_eq!(x, &[0, 0, 0, 0, 0]);
796     /// ```
797     #[inline(always)]
798     #[allow(clippy::mut_from_ref)]
alloc_slice_fill_default<T: Default>(&self, len: usize) -> &mut [T]799     pub fn alloc_slice_fill_default<T: Default>(&self, len: usize) -> &mut [T] {
800         self.alloc_slice_fill_with(len, |_| T::default())
801     }
802 
803     /// Allocate space for an object with the given `Layout`.
804     ///
805     /// The returned pointer points at uninitialized memory, and should be
806     /// initialized with
807     /// [`std::ptr::write`](https://doc.rust-lang.org/stable/std/ptr/fn.write.html).
808     #[inline(always)]
alloc_layout(&self, layout: Layout) -> NonNull<u8>809     pub fn alloc_layout(&self, layout: Layout) -> NonNull<u8> {
810         if let Some(p) = self.try_alloc_layout_fast(layout) {
811             p
812         } else {
813             self.alloc_layout_slow(layout)
814         }
815     }
816 
817     #[inline(always)]
try_alloc_layout_fast(&self, layout: Layout) -> Option<NonNull<u8>>818     fn try_alloc_layout_fast(&self, layout: Layout) -> Option<NonNull<u8>> {
819         unsafe {
820             if layout.size() == 0 {
821                 // We want to use NonNull::dangling here, but that function uses mem::align_of::<T>
822                 // internally. For our use-case we cannot call dangling::<T>, since we are not generic
823                 // over T; we only have access to the Layout of T. Instead we re-implement the
824                 // functionality here.
825                 //
826                 // See https://github.com/rust-lang/rust/blob/9966af3/src/libcore/ptr/non_null.rs#L70
827                 // for the reference implementation.
828                 let ptr = layout.align() as *mut u8;
829                 return Some(NonNull::new_unchecked(ptr));
830             }
831 
832             let footer = self.current_chunk_footer.get();
833             let footer = footer.as_ref();
834             let ptr = footer.ptr.get().as_ptr() as usize;
835             let start = footer.data.as_ptr() as usize;
836             debug_assert!(start <= ptr);
837             debug_assert!(ptr <= footer as *const _ as usize);
838 
839             let ptr = ptr.checked_sub(layout.size())?;
840             let aligned_ptr = ptr & !(layout.align() - 1);
841 
842             if aligned_ptr >= start {
843                 let aligned_ptr = NonNull::new_unchecked(aligned_ptr as *mut u8);
844                 footer.ptr.set(aligned_ptr);
845                 Some(aligned_ptr)
846             } else {
847                 None
848             }
849         }
850     }
851 
852     // Slow path allocation for when we need to allocate a new chunk from the
853     // parent bump set because there isn't enough room in our current chunk.
854     #[inline(never)]
alloc_layout_slow(&self, layout: Layout) -> NonNull<u8>855     fn alloc_layout_slow(&self, layout: Layout) -> NonNull<u8> {
856         unsafe {
857             let size = layout.size();
858 
859             // Get a new chunk from the global allocator.
860             let current_footer = self.current_chunk_footer.get();
861             let current_layout = current_footer.as_ref().layout;
862             let new_footer = Bump::new_chunk(
863                 Some(current_layout.size()),
864                 Some(layout),
865                 Some(current_footer),
866             );
867             debug_assert_eq!(
868                 new_footer.as_ref().data.as_ptr() as usize % layout.align(),
869                 0
870             );
871 
872             // Set the new chunk as our new current chunk.
873             self.current_chunk_footer.set(new_footer);
874 
875             let new_footer = new_footer.as_ref();
876 
877             // Move the bump ptr finger down to allocate room for `val`. We know
878             // this can't overflow because we successfully allocated a chunk of
879             // at least the requested size.
880             let ptr = new_footer.ptr.get().as_ptr() as usize - size;
881             // Round the pointer down to the requested alignment.
882             let ptr = ptr & !(layout.align() - 1);
883             debug_assert!(
884                 ptr <= new_footer as *const _ as usize,
885                 "{:#x} <= {:#x}",
886                 ptr,
887                 new_footer as *const _ as usize
888             );
889             let ptr = NonNull::new_unchecked(ptr as *mut u8);
890             new_footer.ptr.set(ptr);
891 
892             // Return a pointer to the freshly allocated region in this chunk.
893             ptr
894         }
895     }
896 
897     /// Returns an iterator over each chunk of allocated memory that
898     /// this arena has bump allocated into.
899     ///
900     /// The chunks are returned ordered by allocation time, with the most
901     /// recently allocated chunk being returned first, and the least recently
902     /// allocated chunk being returned last.
903     ///
904     /// The values inside each chunk are also ordered by allocation time, with
905     /// the most recent allocation being earlier in the slice, and the least
906     /// recent allocation being towards the end of the slice.
907     ///
908     /// ## Safety
909     ///
910     /// Because this method takes `&mut self`, we know that the bump arena
911     /// reference is unique and therefore there aren't any active references to
912     /// any of the objects we've allocated in it either. This potential aliasing
913     /// of exclusive references is one common footgun for unsafe code that we
914     /// don't need to worry about here.
915     ///
916     /// However, there could be regions of uninitialized memory used as padding
917     /// between allocations, which is why this iterator has items of type
918     /// `[MaybeUninit<u8>]`, instead of simply `[u8]`.
919     ///
920     /// The only way to guarantee that there is no padding between allocations
921     /// or within allocated objects is if all of these properties hold:
922     ///
923     /// 1. Every object allocated in this arena has the same alignment,
924     ///    and that alignment is at most 16.
925     /// 2. Every object's size is a multiple of its alignment.
926     /// 3. None of the objects allocated in this arena contain any internal
927     ///    padding.
928     ///
929     /// If you want to use this `iter_allocated_chunks` method, it is *your*
930     /// responsibility to ensure that these properties hold before calling
931     /// `MaybeUninit::assume_init` or otherwise reading the returned values.
932     ///
933     /// ## Example
934     ///
935     /// ```
936     /// let mut bump = bumpalo::Bump::new();
937     ///
938     /// // Allocate a bunch of `i32`s in this bump arena, potentially causing
939     /// // additional memory chunks to be reserved.
940     /// for i in 0..10000 {
941     ///     bump.alloc(i);
942     /// }
943     ///
944     /// // Iterate over each chunk we've bump allocated into. This is safe
945     /// // because we have only allocated `i32`s in this arena, which fulfills
946     /// // the above requirements.
947     /// for ch in bump.iter_allocated_chunks() {
948     ///     println!("Used a chunk that is {} bytes long", ch.len());
949     ///     println!("The first byte is {:?}", unsafe {
950     ///         ch.get(0).unwrap().assume_init()
951     ///     });
952     /// }
953     ///
954     /// // Within a chunk, allocations are ordered from most recent to least
955     /// // recent. If we allocated 'a', then 'b', then 'c', when we iterate
956     /// // through the chunk's data, we get them in the order 'c', then 'b',
957     /// // then 'a'.
958     ///
959     /// bump.reset();
960     /// bump.alloc(b'a');
961     /// bump.alloc(b'b');
962     /// bump.alloc(b'c');
963     ///
964     /// assert_eq!(bump.iter_allocated_chunks().count(), 1);
965     /// let chunk = bump.iter_allocated_chunks().nth(0).unwrap();
966     /// assert_eq!(chunk.len(), 3);
967     ///
968     /// // Safe because we've only allocated `u8`s in this arena, which
969     /// // fulfills the above requirements.
970     /// unsafe {
971     ///     assert_eq!(chunk[0].assume_init(), b'c');
972     ///     assert_eq!(chunk[1].assume_init(), b'b');
973     ///     assert_eq!(chunk[2].assume_init(), b'a');
974     /// }
975     /// ```
iter_allocated_chunks(&mut self) -> ChunkIter<'_>976     pub fn iter_allocated_chunks(&mut self) -> ChunkIter<'_> {
977         ChunkIter {
978             footer: Some(self.current_chunk_footer.get()),
979             bump: PhantomData,
980         }
981     }
982 
983     /// Calculates the number of bytes currently allocated across all chunks.
984     ///
985     /// If you allocate types of different alignments or types with
986     /// larger-than-typical alignment in the same arena, some padding
987     /// bytes might get allocated in the bump arena. Note that those padding
988     /// bytes will add to this method's resulting sum, so you cannot rely
989     /// on it only counting the sum of the sizes of the things
990     /// you've allocated in the arena.
991     ///
992     /// ## Example
993     ///
994     /// ```
995     /// let bump = bumpalo::Bump::new();
996     /// let _x = bump.alloc_slice_fill_default::<u32>(5);
997     /// let bytes = bump.allocated_bytes();
998     /// assert!(bytes >= core::mem::size_of::<u32>() * 5);
999     /// ```
allocated_bytes(&self) -> usize1000     pub fn allocated_bytes(&self) -> usize {
1001         let mut footer = Some(self.current_chunk_footer.get());
1002 
1003         let mut bytes = 0;
1004 
1005         while let Some(f) = footer {
1006             let foot = unsafe { f.as_ref() };
1007 
1008             let ptr = foot.ptr.get().as_ptr() as usize;
1009             debug_assert!(ptr <= foot as *const _ as usize);
1010 
1011             bytes += foot as *const _ as usize - ptr;
1012 
1013             footer = foot.prev.get();
1014         }
1015 
1016         bytes
1017     }
1018 
1019     #[inline]
is_last_allocation(&self, ptr: NonNull<u8>) -> bool1020     unsafe fn is_last_allocation(&self, ptr: NonNull<u8>) -> bool {
1021         let footer = self.current_chunk_footer.get();
1022         let footer = footer.as_ref();
1023         footer.ptr.get() == ptr
1024     }
1025 }
1026 
1027 /// An iterator over each chunk of allocated memory that
1028 /// an arena has bump allocated into.
1029 ///
1030 /// The chunks are returned ordered by allocation time, with the most recently
1031 /// allocated chunk being returned first.
1032 ///
1033 /// The values inside each chunk is also ordered by allocation time, with the most
1034 /// recent allocation being earlier in the slice.
1035 ///
1036 /// This struct is created by the [`iter_allocated_chunks`] method on
1037 /// [`Bump`]. See that function for a safety description regarding reading from the returned items.
1038 ///
1039 /// [`Bump`]: ./struct.Bump.html
1040 /// [`iter_allocated_chunks`]: ./struct.Bump.html#method.iter_allocated_chunks
1041 #[derive(Debug)]
1042 pub struct ChunkIter<'a> {
1043     footer: Option<NonNull<ChunkFooter>>,
1044     bump: PhantomData<&'a mut Bump>,
1045 }
1046 
1047 impl<'a> Iterator for ChunkIter<'a> {
1048     type Item = &'a [mem::MaybeUninit<u8>];
next(&mut self) -> Option<&'a [mem::MaybeUninit<u8>]>1049     fn next(&mut self) -> Option<&'a [mem::MaybeUninit<u8>]> {
1050         unsafe {
1051             let foot = self.footer?;
1052             let foot = foot.as_ref();
1053             let data = foot.data.as_ptr() as usize;
1054             let ptr = foot.ptr.get().as_ptr() as usize;
1055             debug_assert!(data <= ptr);
1056             debug_assert!(ptr <= foot as *const _ as usize);
1057 
1058             let len = foot as *const _ as usize - ptr;
1059             let slice = slice::from_raw_parts(ptr as *const mem::MaybeUninit<u8>, len);
1060             self.footer = foot.prev.get();
1061             Some(slice)
1062         }
1063     }
1064 }
1065 
1066 impl<'a> iter::FusedIterator for ChunkIter<'a> {}
1067 
1068 #[inline(never)]
1069 #[cold]
oom() -> !1070 fn oom() -> ! {
1071     panic!("out of memory")
1072 }
1073 
1074 unsafe impl<'a> alloc::Alloc for &'a Bump {
1075     #[inline(always)]
alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, alloc::AllocErr>1076     unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, alloc::AllocErr> {
1077         Ok(self.alloc_layout(layout))
1078     }
1079 
1080     #[inline]
dealloc(&mut self, ptr: NonNull<u8>, layout: Layout)1081     unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
1082         // If the pointer is the last allocation we made, we can reuse the bytes,
1083         // otherwise they are simply leaked -- at least until somebody calls reset().
1084         if layout.size() != 0 && self.is_last_allocation(ptr) {
1085             let ptr = NonNull::new_unchecked(ptr.as_ptr().add(layout.size()));
1086             self.current_chunk_footer.get().as_ref().ptr.set(ptr);
1087         }
1088     }
1089 
1090     #[inline]
realloc( &mut self, ptr: NonNull<u8>, layout: Layout, new_size: usize, ) -> Result<NonNull<u8>, alloc::AllocErr>1091     unsafe fn realloc(
1092         &mut self,
1093         ptr: NonNull<u8>,
1094         layout: Layout,
1095         new_size: usize,
1096     ) -> Result<NonNull<u8>, alloc::AllocErr> {
1097         let old_size = layout.size();
1098 
1099         if old_size == 0 {
1100             return self.alloc(layout);
1101         }
1102 
1103         if new_size <= old_size {
1104             if self.is_last_allocation(ptr)
1105                 // Only reclaim the excess space (which requires a copy) if it
1106                 // is worth it: we are actually going to recover "enough" space
1107                 // and we can do a non-overlapping copy.
1108                 && new_size <= old_size / 2
1109             {
1110                 let delta = old_size - new_size;
1111                 let footer = self.current_chunk_footer.get();
1112                 let footer = footer.as_ref();
1113                 footer
1114                     .ptr
1115                     .set(NonNull::new_unchecked(footer.ptr.get().as_ptr().add(delta)));
1116                 let new_ptr = footer.ptr.get();
1117                 // NB: we know it is non-overlapping because of the size check
1118                 // in the `if` condition.
1119                 ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), new_size);
1120                 return Ok(new_ptr);
1121             } else {
1122                 return Ok(ptr);
1123             }
1124         }
1125 
1126         if self.is_last_allocation(ptr) {
1127             // Try to allocate the delta size within this same block so we can
1128             // reuse the currently allocated space.
1129             let delta = new_size - old_size;
1130             if let Some(p) =
1131                 self.try_alloc_layout_fast(layout_from_size_align(delta, layout.align()))
1132             {
1133                 ptr::copy(ptr.as_ptr(), p.as_ptr(), old_size);
1134                 return Ok(p);
1135             }
1136         }
1137 
1138         // Fallback: do a fresh allocation and copy the existing data into it.
1139         let new_layout = layout_from_size_align(new_size, layout.align());
1140         let new_ptr = self.alloc_layout(new_layout);
1141         ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), old_size);
1142         Ok(new_ptr)
1143     }
1144 }
1145 
1146 #[cfg(test)]
1147 mod tests {
1148     use super::*;
1149 
1150     #[test]
chunk_footer_is_five_words()1151     fn chunk_footer_is_five_words() {
1152         assert_eq!(mem::size_of::<ChunkFooter>(), mem::size_of::<usize>() * 5);
1153     }
1154 
1155     #[test]
1156     #[allow(clippy::cognitive_complexity)]
test_realloc()1157     fn test_realloc() {
1158         use crate::alloc::Alloc;
1159 
1160         unsafe {
1161             const CAPACITY: usize = 1024 - OVERHEAD;
1162             let mut b = Bump::with_capacity(CAPACITY);
1163 
1164             // `realloc` doesn't shrink allocations that aren't "worth it".
1165             let layout = Layout::from_size_align(100, 1).unwrap();
1166             let p = b.alloc_layout(layout);
1167             let q = (&b).realloc(p, layout, 51).unwrap();
1168             assert_eq!(p, q);
1169             b.reset();
1170 
1171             // `realloc` will shrink allocations that are "worth it".
1172             let layout = Layout::from_size_align(100, 1).unwrap();
1173             let p = b.alloc_layout(layout);
1174             let q = (&b).realloc(p, layout, 50).unwrap();
1175             assert!(p != q);
1176             b.reset();
1177 
1178             // `realloc` will reuse the last allocation when growing.
1179             let layout = Layout::from_size_align(10, 1).unwrap();
1180             let p = b.alloc_layout(layout);
1181             let q = (&b).realloc(p, layout, 11).unwrap();
1182             assert_eq!(q.as_ptr() as usize, p.as_ptr() as usize - 1);
1183             b.reset();
1184 
1185             // `realloc` will allocate a new chunk when growing the last
1186             // allocation, if need be.
1187             let layout = Layout::from_size_align(1, 1).unwrap();
1188             let p = b.alloc_layout(layout);
1189             let q = (&b).realloc(p, layout, CAPACITY + 1).unwrap();
1190             assert!(q.as_ptr() as usize != p.as_ptr() as usize - CAPACITY);
1191             b = Bump::with_capacity(CAPACITY);
1192 
1193             // `realloc` will allocate and copy when reallocating anything that
1194             // wasn't the last allocation.
1195             let layout = Layout::from_size_align(1, 1).unwrap();
1196             let p = b.alloc_layout(layout);
1197             let _ = b.alloc_layout(layout);
1198             let q = (&b).realloc(p, layout, 2).unwrap();
1199             assert!(q.as_ptr() as usize != p.as_ptr() as usize - 1);
1200             b.reset();
1201         }
1202     }
1203 
1204     #[test]
invalid_read()1205     fn invalid_read() {
1206         use alloc::Alloc;
1207 
1208         let mut b = &Bump::new();
1209 
1210         unsafe {
1211             let l1 = Layout::from_size_align(12000, 4).unwrap();
1212             let p1 = Alloc::alloc(&mut b, l1).unwrap();
1213 
1214             let l2 = Layout::from_size_align(1000, 4).unwrap();
1215             Alloc::alloc(&mut b, l2).unwrap();
1216 
1217             let p1 = b.realloc(p1, l1, 24000).unwrap();
1218             let l3 = Layout::from_size_align(24000, 4).unwrap();
1219             b.realloc(p1, l3, 48000).unwrap();
1220         }
1221     }
1222 }
1223