1 //! The arena, a fast but limited type of allocator.
2 //!
3 //! Arenas are a type of allocator that destroy the objects within, all at
4 //! once, once the arena itself is destroyed. They do not support deallocation
5 //! of individual objects while the arena itself is still alive. The benefit
6 //! of an arena is very fast allocation; just a pointer bump.
7 //!
8 //! This crate implements several kinds of arena.
9
10 #![doc(
11 html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/",
12 test(no_crate_inject, attr(deny(warnings)))
13 )]
14 #![feature(dropck_eyepatch)]
15 #![feature(new_uninit)]
16 #![feature(maybe_uninit_slice)]
17 #![feature(min_specialization)]
18 #![cfg_attr(test, feature(test))]
19
20 use rustc_data_structures::sync;
21 use smallvec::SmallVec;
22
23 use std::alloc::Layout;
24 use std::cell::{Cell, RefCell};
25 use std::cmp;
26 use std::marker::{PhantomData, Send};
27 use std::mem::{self, MaybeUninit};
28 use std::ptr;
29 use std::slice;
30
31 #[inline(never)]
32 #[cold]
cold_path<F: FnOnce() -> R, R>(f: F) -> R33 fn cold_path<F: FnOnce() -> R, R>(f: F) -> R {
34 f()
35 }
36
37 /// An arena that can hold objects of only one type.
38 pub struct TypedArena<T> {
39 /// A pointer to the next object to be allocated.
40 ptr: Cell<*mut T>,
41
42 /// A pointer to the end of the allocated area. When this pointer is
43 /// reached, a new chunk is allocated.
44 end: Cell<*mut T>,
45
46 /// A vector of arena chunks.
47 chunks: RefCell<Vec<TypedArenaChunk<T>>>,
48
49 /// Marker indicating that dropping the arena causes its owned
50 /// instances of `T` to be dropped.
51 _own: PhantomData<T>,
52 }
53
54 struct TypedArenaChunk<T> {
55 /// The raw storage for the arena chunk.
56 storage: Box<[MaybeUninit<T>]>,
57 /// The number of valid entries in the chunk.
58 entries: usize,
59 }
60
61 impl<T> TypedArenaChunk<T> {
62 #[inline]
new(capacity: usize) -> TypedArenaChunk<T>63 unsafe fn new(capacity: usize) -> TypedArenaChunk<T> {
64 TypedArenaChunk { storage: Box::new_uninit_slice(capacity), entries: 0 }
65 }
66
67 /// Destroys this arena chunk.
68 #[inline]
destroy(&mut self, len: usize)69 unsafe fn destroy(&mut self, len: usize) {
70 // The branch on needs_drop() is an -O1 performance optimization.
71 // Without the branch, dropping TypedArena<u8> takes linear time.
72 if mem::needs_drop::<T>() {
73 ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(&mut self.storage[..len]));
74 }
75 }
76
77 // Returns a pointer to the first allocated object.
78 #[inline]
start(&mut self) -> *mut T79 fn start(&mut self) -> *mut T {
80 MaybeUninit::slice_as_mut_ptr(&mut self.storage)
81 }
82
83 // Returns a pointer to the end of the allocated space.
84 #[inline]
end(&mut self) -> *mut T85 fn end(&mut self) -> *mut T {
86 unsafe {
87 if mem::size_of::<T>() == 0 {
88 // A pointer as large as possible for zero-sized elements.
89 !0 as *mut T
90 } else {
91 self.start().add(self.storage.len())
92 }
93 }
94 }
95 }
96
97 // The arenas start with PAGE-sized chunks, and then each new chunk is twice as
98 // big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon
99 // we stop growing. This scales well, from arenas that are barely used up to
100 // arenas that are used for 100s of MiBs. Note also that the chosen sizes match
101 // the usual sizes of pages and huge pages on Linux.
102 const PAGE: usize = 4096;
103 const HUGE_PAGE: usize = 2 * 1024 * 1024;
104
105 impl<T> Default for TypedArena<T> {
106 /// Creates a new `TypedArena`.
default() -> TypedArena<T>107 fn default() -> TypedArena<T> {
108 TypedArena {
109 // We set both `ptr` and `end` to 0 so that the first call to
110 // alloc() will trigger a grow().
111 ptr: Cell::new(ptr::null_mut()),
112 end: Cell::new(ptr::null_mut()),
113 chunks: RefCell::new(vec![]),
114 _own: PhantomData,
115 }
116 }
117 }
118
119 trait IterExt<T> {
alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T]120 fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T];
121 }
122
123 impl<I, T> IterExt<T> for I
124 where
125 I: IntoIterator<Item = T>,
126 {
127 #[inline]
alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T]128 default fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T] {
129 let vec: SmallVec<[_; 8]> = self.into_iter().collect();
130 vec.alloc_from_iter(arena)
131 }
132 }
133
134 impl<T, const N: usize> IterExt<T> for std::array::IntoIter<T, N> {
135 #[inline]
alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T]136 fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T] {
137 let len = self.len();
138 if len == 0 {
139 return &mut [];
140 }
141 // Move the content to the arena by copying and then forgetting it
142 unsafe {
143 let start_ptr = arena.alloc_raw_slice(len);
144 self.as_slice().as_ptr().copy_to_nonoverlapping(start_ptr, len);
145 mem::forget(self);
146 slice::from_raw_parts_mut(start_ptr, len)
147 }
148 }
149 }
150
151 impl<T> IterExt<T> for Vec<T> {
152 #[inline]
alloc_from_iter(mut self, arena: &TypedArena<T>) -> &mut [T]153 fn alloc_from_iter(mut self, arena: &TypedArena<T>) -> &mut [T] {
154 let len = self.len();
155 if len == 0 {
156 return &mut [];
157 }
158 // Move the content to the arena by copying and then forgetting it
159 unsafe {
160 let start_ptr = arena.alloc_raw_slice(len);
161 self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
162 self.set_len(0);
163 slice::from_raw_parts_mut(start_ptr, len)
164 }
165 }
166 }
167
168 impl<A: smallvec::Array> IterExt<A::Item> for SmallVec<A> {
169 #[inline]
alloc_from_iter(mut self, arena: &TypedArena<A::Item>) -> &mut [A::Item]170 fn alloc_from_iter(mut self, arena: &TypedArena<A::Item>) -> &mut [A::Item] {
171 let len = self.len();
172 if len == 0 {
173 return &mut [];
174 }
175 // Move the content to the arena by copying and then forgetting it
176 unsafe {
177 let start_ptr = arena.alloc_raw_slice(len);
178 self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
179 self.set_len(0);
180 slice::from_raw_parts_mut(start_ptr, len)
181 }
182 }
183 }
184
185 impl<T> TypedArena<T> {
186 /// Allocates an object in the `TypedArena`, returning a reference to it.
187 #[inline]
alloc(&self, object: T) -> &mut T188 pub fn alloc(&self, object: T) -> &mut T {
189 if self.ptr == self.end {
190 self.grow(1)
191 }
192
193 unsafe {
194 if mem::size_of::<T>() == 0 {
195 self.ptr.set((self.ptr.get() as *mut u8).wrapping_offset(1) as *mut T);
196 let ptr = mem::align_of::<T>() as *mut T;
197 // Don't drop the object. This `write` is equivalent to `forget`.
198 ptr::write(ptr, object);
199 &mut *ptr
200 } else {
201 let ptr = self.ptr.get();
202 // Advance the pointer.
203 self.ptr.set(self.ptr.get().offset(1));
204 // Write into uninitialized memory.
205 ptr::write(ptr, object);
206 &mut *ptr
207 }
208 }
209 }
210
211 #[inline]
can_allocate(&self, additional: usize) -> bool212 fn can_allocate(&self, additional: usize) -> bool {
213 let available_bytes = self.end.get() as usize - self.ptr.get() as usize;
214 let additional_bytes = additional.checked_mul(mem::size_of::<T>()).unwrap();
215 available_bytes >= additional_bytes
216 }
217
218 /// Ensures there's enough space in the current chunk to fit `len` objects.
219 #[inline]
ensure_capacity(&self, additional: usize)220 fn ensure_capacity(&self, additional: usize) {
221 if !self.can_allocate(additional) {
222 self.grow(additional);
223 debug_assert!(self.can_allocate(additional));
224 }
225 }
226
227 #[inline]
alloc_raw_slice(&self, len: usize) -> *mut T228 unsafe fn alloc_raw_slice(&self, len: usize) -> *mut T {
229 assert!(mem::size_of::<T>() != 0);
230 assert!(len != 0);
231
232 self.ensure_capacity(len);
233
234 let start_ptr = self.ptr.get();
235 self.ptr.set(start_ptr.add(len));
236 start_ptr
237 }
238
239 #[inline]
alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T]240 pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
241 assert!(mem::size_of::<T>() != 0);
242 iter.alloc_from_iter(self)
243 }
244
245 /// Grows the arena.
246 #[inline(never)]
247 #[cold]
grow(&self, additional: usize)248 fn grow(&self, additional: usize) {
249 unsafe {
250 // We need the element size to convert chunk sizes (ranging from
251 // PAGE to HUGE_PAGE bytes) to element counts.
252 let elem_size = cmp::max(1, mem::size_of::<T>());
253 let mut chunks = self.chunks.borrow_mut();
254 let mut new_cap;
255 if let Some(last_chunk) = chunks.last_mut() {
256 // If a type is `!needs_drop`, we don't need to keep track of how many elements
257 // the chunk stores - the field will be ignored anyway.
258 if mem::needs_drop::<T>() {
259 let used_bytes = self.ptr.get() as usize - last_chunk.start() as usize;
260 last_chunk.entries = used_bytes / mem::size_of::<T>();
261 }
262
263 // If the previous chunk's len is less than HUGE_PAGE
264 // bytes, then this chunk will be least double the previous
265 // chunk's size.
266 new_cap = last_chunk.storage.len().min(HUGE_PAGE / elem_size / 2);
267 new_cap *= 2;
268 } else {
269 new_cap = PAGE / elem_size;
270 }
271 // Also ensure that this chunk can fit `additional`.
272 new_cap = cmp::max(additional, new_cap);
273
274 let mut chunk = TypedArenaChunk::<T>::new(new_cap);
275 self.ptr.set(chunk.start());
276 self.end.set(chunk.end());
277 chunks.push(chunk);
278 }
279 }
280
281 // Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
282 // chunks.
clear_last_chunk(&self, last_chunk: &mut TypedArenaChunk<T>)283 fn clear_last_chunk(&self, last_chunk: &mut TypedArenaChunk<T>) {
284 // Determine how much was filled.
285 let start = last_chunk.start() as usize;
286 // We obtain the value of the pointer to the first uninitialized element.
287 let end = self.ptr.get() as usize;
288 // We then calculate the number of elements to be dropped in the last chunk,
289 // which is the filled area's length.
290 let diff = if mem::size_of::<T>() == 0 {
291 // `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
292 // the number of zero-sized values in the last and only chunk, just out of caution.
293 // Recall that `end` was incremented for each allocated value.
294 end - start
295 } else {
296 (end - start) / mem::size_of::<T>()
297 };
298 // Pass that to the `destroy` method.
299 unsafe {
300 last_chunk.destroy(diff);
301 }
302 // Reset the chunk.
303 self.ptr.set(last_chunk.start());
304 }
305 }
306
307 unsafe impl<#[may_dangle] T> Drop for TypedArena<T> {
drop(&mut self)308 fn drop(&mut self) {
309 unsafe {
310 // Determine how much was filled.
311 let mut chunks_borrow = self.chunks.borrow_mut();
312 if let Some(mut last_chunk) = chunks_borrow.pop() {
313 // Drop the contents of the last chunk.
314 self.clear_last_chunk(&mut last_chunk);
315 // The last chunk will be dropped. Destroy all other chunks.
316 for chunk in chunks_borrow.iter_mut() {
317 chunk.destroy(chunk.entries);
318 }
319 }
320 // Box handles deallocation of `last_chunk` and `self.chunks`.
321 }
322 }
323 }
324
325 unsafe impl<T: Send> Send for TypedArena<T> {}
326
327 pub struct DroplessArena {
328 /// A pointer to the start of the free space.
329 start: Cell<*mut u8>,
330
331 /// A pointer to the end of free space.
332 ///
333 /// The allocation proceeds from the end of the chunk towards the start.
334 /// When this pointer crosses the start pointer, a new chunk is allocated.
335 end: Cell<*mut u8>,
336
337 /// A vector of arena chunks.
338 chunks: RefCell<Vec<TypedArenaChunk<u8>>>,
339 }
340
341 unsafe impl Send for DroplessArena {}
342
343 impl Default for DroplessArena {
344 #[inline]
default() -> DroplessArena345 fn default() -> DroplessArena {
346 DroplessArena {
347 start: Cell::new(ptr::null_mut()),
348 end: Cell::new(ptr::null_mut()),
349 chunks: Default::default(),
350 }
351 }
352 }
353
354 impl DroplessArena {
355 #[inline(never)]
356 #[cold]
grow(&self, additional: usize)357 fn grow(&self, additional: usize) {
358 unsafe {
359 let mut chunks = self.chunks.borrow_mut();
360 let mut new_cap;
361 if let Some(last_chunk) = chunks.last_mut() {
362 // There is no need to update `last_chunk.entries` because that
363 // field isn't used by `DroplessArena`.
364
365 // If the previous chunk's len is less than HUGE_PAGE
366 // bytes, then this chunk will be least double the previous
367 // chunk's size.
368 new_cap = last_chunk.storage.len().min(HUGE_PAGE / 2);
369 new_cap *= 2;
370 } else {
371 new_cap = PAGE;
372 }
373 // Also ensure that this chunk can fit `additional`.
374 new_cap = cmp::max(additional, new_cap);
375
376 let mut chunk = TypedArenaChunk::<u8>::new(new_cap);
377 self.start.set(chunk.start());
378 self.end.set(chunk.end());
379 chunks.push(chunk);
380 }
381 }
382
383 /// Allocates a byte slice with specified layout from the current memory
384 /// chunk. Returns `None` if there is no free space left to satisfy the
385 /// request.
386 #[inline]
alloc_raw_without_grow(&self, layout: Layout) -> Option<*mut u8>387 fn alloc_raw_without_grow(&self, layout: Layout) -> Option<*mut u8> {
388 let start = self.start.get() as usize;
389 let end = self.end.get() as usize;
390
391 let align = layout.align();
392 let bytes = layout.size();
393
394 let new_end = end.checked_sub(bytes)? & !(align - 1);
395 if start <= new_end {
396 let new_end = new_end as *mut u8;
397 self.end.set(new_end);
398 Some(new_end)
399 } else {
400 None
401 }
402 }
403
404 #[inline]
alloc_raw(&self, layout: Layout) -> *mut u8405 pub fn alloc_raw(&self, layout: Layout) -> *mut u8 {
406 assert!(layout.size() != 0);
407 loop {
408 if let Some(a) = self.alloc_raw_without_grow(layout) {
409 break a;
410 }
411 // No free space left. Allocate a new chunk to satisfy the request.
412 // On failure the grow will panic or abort.
413 self.grow(layout.size());
414 }
415 }
416
417 #[inline]
alloc<T>(&self, object: T) -> &mut T418 pub fn alloc<T>(&self, object: T) -> &mut T {
419 assert!(!mem::needs_drop::<T>());
420
421 let mem = self.alloc_raw(Layout::for_value::<T>(&object)) as *mut T;
422
423 unsafe {
424 // Write into uninitialized memory.
425 ptr::write(mem, object);
426 &mut *mem
427 }
428 }
429
430 /// Allocates a slice of objects that are copied into the `DroplessArena`, returning a mutable
431 /// reference to it. Will panic if passed a zero-sized type.
432 ///
433 /// Panics:
434 ///
435 /// - Zero-sized types
436 /// - Zero-length slices
437 #[inline]
alloc_slice<T>(&self, slice: &[T]) -> &mut [T] where T: Copy,438 pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
439 where
440 T: Copy,
441 {
442 assert!(!mem::needs_drop::<T>());
443 assert!(mem::size_of::<T>() != 0);
444 assert!(!slice.is_empty());
445
446 let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
447
448 unsafe {
449 mem.copy_from_nonoverlapping(slice.as_ptr(), slice.len());
450 slice::from_raw_parts_mut(mem, slice.len())
451 }
452 }
453
454 #[inline]
write_from_iter<T, I: Iterator<Item = T>>( &self, mut iter: I, len: usize, mem: *mut T, ) -> &mut [T]455 unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
456 &self,
457 mut iter: I,
458 len: usize,
459 mem: *mut T,
460 ) -> &mut [T] {
461 let mut i = 0;
462 // Use a manual loop since LLVM manages to optimize it better for
463 // slice iterators
464 loop {
465 let value = iter.next();
466 if i >= len || value.is_none() {
467 // We only return as many items as the iterator gave us, even
468 // though it was supposed to give us `len`
469 return slice::from_raw_parts_mut(mem, i);
470 }
471 ptr::write(mem.add(i), value.unwrap());
472 i += 1;
473 }
474 }
475
476 #[inline]
alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T]477 pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
478 let iter = iter.into_iter();
479 assert!(mem::size_of::<T>() != 0);
480 assert!(!mem::needs_drop::<T>());
481
482 let size_hint = iter.size_hint();
483
484 match size_hint {
485 (min, Some(max)) if min == max => {
486 // We know the exact number of elements the iterator will produce here
487 let len = min;
488
489 if len == 0 {
490 return &mut [];
491 }
492
493 let mem = self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T;
494 unsafe { self.write_from_iter(iter, len, mem) }
495 }
496 (_, _) => {
497 cold_path(move || -> &mut [T] {
498 let mut vec: SmallVec<[_; 8]> = iter.collect();
499 if vec.is_empty() {
500 return &mut [];
501 }
502 // Move the content to the arena by copying it and then forgetting
503 // the content of the SmallVec
504 unsafe {
505 let len = vec.len();
506 let start_ptr =
507 self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T;
508 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
509 vec.set_len(0);
510 slice::from_raw_parts_mut(start_ptr, len)
511 }
512 })
513 }
514 }
515 }
516 }
517
518 /// Calls the destructor for an object when dropped.
519 struct DropType {
520 drop_fn: unsafe fn(*mut u8),
521 obj: *mut u8,
522 }
523
524 // SAFETY: we require `T: Send` before type-erasing into `DropType`.
525 #[cfg(parallel_compiler)]
526 unsafe impl sync::Send for DropType {}
527
528 impl DropType {
529 #[inline]
new<T: sync::Send>(obj: *mut T) -> Self530 unsafe fn new<T: sync::Send>(obj: *mut T) -> Self {
531 unsafe fn drop_for_type<T>(to_drop: *mut u8) {
532 std::ptr::drop_in_place(to_drop as *mut T)
533 }
534
535 DropType { drop_fn: drop_for_type::<T>, obj: obj as *mut u8 }
536 }
537 }
538
539 impl Drop for DropType {
drop(&mut self)540 fn drop(&mut self) {
541 unsafe { (self.drop_fn)(self.obj) }
542 }
543 }
544
545 /// An arena which can be used to allocate any type.
546 ///
547 /// # Safety
548 ///
549 /// Allocating in this arena is unsafe since the type system
550 /// doesn't know which types it contains. In order to
551 /// allocate safely, you must store a `PhantomData<T>`
552 /// alongside this arena for each type `T` you allocate.
553 #[derive(Default)]
554 pub struct DropArena {
555 /// A list of destructors to run when the arena drops.
556 /// Ordered so `destructors` gets dropped before the arena
557 /// since its destructor can reference memory in the arena.
558 destructors: RefCell<Vec<DropType>>,
559 arena: DroplessArena,
560 }
561
562 impl DropArena {
563 #[inline]
alloc<T>(&self, object: T) -> &mut T where T: sync::Send,564 pub unsafe fn alloc<T>(&self, object: T) -> &mut T
565 where
566 T: sync::Send,
567 {
568 let mem = self.arena.alloc_raw(Layout::new::<T>()) as *mut T;
569 // Write into uninitialized memory.
570 ptr::write(mem, object);
571 let result = &mut *mem;
572 // Record the destructor after doing the allocation as that may panic
573 // and would cause `object`'s destructor to run twice if it was recorded before.
574 self.destructors.borrow_mut().push(DropType::new(result));
575 result
576 }
577
578 #[inline]
alloc_from_iter<T, I>(&self, iter: I) -> &mut [T] where T: sync::Send, I: IntoIterator<Item = T>,579 pub unsafe fn alloc_from_iter<T, I>(&self, iter: I) -> &mut [T]
580 where
581 T: sync::Send,
582 I: IntoIterator<Item = T>,
583 {
584 let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
585 if vec.is_empty() {
586 return &mut [];
587 }
588 let len = vec.len();
589
590 let start_ptr = self.arena.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T;
591
592 let mut destructors = self.destructors.borrow_mut();
593 // Reserve space for the destructors so we can't panic while adding them.
594 destructors.reserve(len);
595
596 // Move the content to the arena by copying it and then forgetting
597 // the content of the SmallVec.
598 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
599 mem::forget(vec.drain(..));
600
601 // Record the destructors after doing the allocation as that may panic
602 // and would cause `object`'s destructor to run twice if it was recorded before.
603 for i in 0..len {
604 destructors.push(DropType::new(start_ptr.add(i)));
605 }
606
607 slice::from_raw_parts_mut(start_ptr, len)
608 }
609 }
610
611 #[macro_export]
612 macro_rules! arena_for_type {
613 ([][$ty:ty]) => {
614 $crate::TypedArena<$ty>
615 };
616 ([few $(, $attrs:ident)*][$ty:ty]) => {
617 ::std::marker::PhantomData<$ty>
618 };
619 ([$ignore:ident $(, $attrs:ident)*]$args:tt) => {
620 $crate::arena_for_type!([$($attrs),*]$args)
621 };
622 }
623
624 #[macro_export]
625 macro_rules! which_arena_for_type {
626 ([][$arena:expr]) => {
627 ::std::option::Option::Some($arena)
628 };
629 ([few$(, $attrs:ident)*][$arena:expr]) => {
630 ::std::option::Option::None
631 };
632 ([$ignore:ident$(, $attrs:ident)*]$args:tt) => {
633 $crate::which_arena_for_type!([$($attrs),*]$args)
634 };
635 }
636
637 #[macro_export]
638 macro_rules! declare_arena {
639 ([], [$($a:tt $name:ident: $ty:ty,)*], $tcx:lifetime) => {
640 #[derive(Default)]
641 pub struct Arena<$tcx> {
642 pub dropless: $crate::DroplessArena,
643 drop: $crate::DropArena,
644 $($name: $crate::arena_for_type!($a[$ty]),)*
645 }
646
647 pub trait ArenaAllocatable<'tcx, T = Self>: Sized {
648 fn allocate_on<'a>(self, arena: &'a Arena<'tcx>) -> &'a mut Self;
649 fn allocate_from_iter<'a>(
650 arena: &'a Arena<'tcx>,
651 iter: impl ::std::iter::IntoIterator<Item = Self>,
652 ) -> &'a mut [Self];
653 }
654
655 impl<'tcx, T: Copy> ArenaAllocatable<'tcx, ()> for T {
656 #[inline]
657 fn allocate_on<'a>(self, arena: &'a Arena<'tcx>) -> &'a mut Self {
658 arena.dropless.alloc(self)
659 }
660 #[inline]
661 fn allocate_from_iter<'a>(
662 arena: &'a Arena<'tcx>,
663 iter: impl ::std::iter::IntoIterator<Item = Self>,
664 ) -> &'a mut [Self] {
665 arena.dropless.alloc_from_iter(iter)
666 }
667
668 }
669 $(
670 impl<$tcx> ArenaAllocatable<$tcx, $ty> for $ty {
671 #[inline]
672 fn allocate_on<'a>(self, arena: &'a Arena<$tcx>) -> &'a mut Self {
673 if !::std::mem::needs_drop::<Self>() {
674 return arena.dropless.alloc(self);
675 }
676 match $crate::which_arena_for_type!($a[&arena.$name]) {
677 ::std::option::Option::<&$crate::TypedArena<Self>>::Some(ty_arena) => {
678 ty_arena.alloc(self)
679 }
680 ::std::option::Option::None => unsafe { arena.drop.alloc(self) },
681 }
682 }
683
684 #[inline]
685 fn allocate_from_iter<'a>(
686 arena: &'a Arena<$tcx>,
687 iter: impl ::std::iter::IntoIterator<Item = Self>,
688 ) -> &'a mut [Self] {
689 if !::std::mem::needs_drop::<Self>() {
690 return arena.dropless.alloc_from_iter(iter);
691 }
692 match $crate::which_arena_for_type!($a[&arena.$name]) {
693 ::std::option::Option::<&$crate::TypedArena<Self>>::Some(ty_arena) => {
694 ty_arena.alloc_from_iter(iter)
695 }
696 ::std::option::Option::None => unsafe { arena.drop.alloc_from_iter(iter) },
697 }
698 }
699 }
700 )*
701
702 impl<'tcx> Arena<'tcx> {
703 #[inline]
704 pub fn alloc<T: ArenaAllocatable<'tcx, U>, U>(&self, value: T) -> &mut T {
705 value.allocate_on(self)
706 }
707
708 #[inline]
709 pub fn alloc_slice<T: ::std::marker::Copy>(&self, value: &[T]) -> &mut [T] {
710 if value.is_empty() {
711 return &mut [];
712 }
713 self.dropless.alloc_slice(value)
714 }
715
716 pub fn alloc_from_iter<'a, T: ArenaAllocatable<'tcx, U>, U>(
717 &'a self,
718 iter: impl ::std::iter::IntoIterator<Item = T>,
719 ) -> &'a mut [T] {
720 T::allocate_from_iter(self, iter)
721 }
722 }
723 }
724 }
725
726 #[cfg(test)]
727 mod tests;
728