1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #ifndef ds_LifoAlloc_h
8 #define ds_LifoAlloc_h
9
10 #include "mozilla/Attributes.h"
11 #include "mozilla/MathAlgorithms.h"
12 #include "mozilla/MemoryChecking.h"
13 #include "mozilla/MemoryReporting.h"
14 #include "mozilla/PodOperations.h"
15 #include "mozilla/TemplateLib.h"
16
17 #include <algorithm>
18 #include <new>
19 #include <stddef.h> // size_t
20 #include <type_traits>
21 #include <utility>
22
23 // This data structure supports stacky LIFO allocation (mark/release and
24 // LifoAllocScope). It does not maintain one contiguous segment; instead, it
25 // maintains a bunch of linked memory segments. In order to prevent malloc/free
26 // thrashing, unused segments are deallocated when garbage collection occurs.
27
28 #include "js/UniquePtr.h"
29 #include "util/Memory.h"
30 #include "util/Poison.h"
31
32 namespace js {
33
34 namespace detail {
35
36 template <typename T, typename D>
37 class SingleLinkedList;
38
39 template <typename T, typename D = JS::DeletePolicy<T>>
40 class SingleLinkedListElement {
41 friend class SingleLinkedList<T, D>;
42 js::UniquePtr<T, D> next_;
43
44 public:
SingleLinkedListElement()45 SingleLinkedListElement() : next_(nullptr) {}
~SingleLinkedListElement()46 ~SingleLinkedListElement() { MOZ_ASSERT(!next_); }
47
next()48 T* next() const { return next_.get(); }
49 };
50
51 // Single linked list which is using UniquePtr to hold the next pointers.
52 // UniquePtr are used to ensure that none of the elements are used
53 // silmutaneously in 2 different list.
54 template <typename T, typename D = JS::DeletePolicy<T>>
55 class SingleLinkedList {
56 private:
57 // First element of the list which owns the next element, and ensure that
58 // that this list is the only owner of the element.
59 UniquePtr<T, D> head_;
60
61 // Weak pointer to the last element of the list.
62 T* last_;
63
assertInvariants()64 void assertInvariants() {
65 MOZ_ASSERT(bool(head_) == bool(last_));
66 MOZ_ASSERT_IF(last_, !last_->next_);
67 }
68
69 public:
SingleLinkedList()70 SingleLinkedList() : head_(nullptr), last_(nullptr) { assertInvariants(); }
71
SingleLinkedList(SingleLinkedList && other)72 SingleLinkedList(SingleLinkedList&& other)
73 : head_(std::move(other.head_)), last_(other.last_) {
74 other.last_ = nullptr;
75 assertInvariants();
76 other.assertInvariants();
77 }
78
~SingleLinkedList()79 ~SingleLinkedList() {
80 MOZ_ASSERT(!head_);
81 MOZ_ASSERT(!last_);
82 }
83
84 // Move the elements of the |other| list in the current one, and implicitly
85 // remove all the elements of the current list.
86 SingleLinkedList& operator=(SingleLinkedList&& other) {
87 head_ = std::move(other.head_);
88 last_ = other.last_;
89 other.last_ = nullptr;
90 assertInvariants();
91 other.assertInvariants();
92 return *this;
93 }
94
empty()95 bool empty() const { return !last_; }
96
97 // Iterates over the elements of the list, this is used to avoid raw
98 // manipulation of pointers as much as possible.
99 class Iterator {
100 T* current_;
101
102 public:
Iterator(T * current)103 explicit Iterator(T* current) : current_(current) {}
104
105 T& operator*() const { return *current_; }
106 T* operator->() const { return current_; }
get()107 T* get() const { return current_; }
108
109 const Iterator& operator++() {
110 current_ = current_->next();
111 return *this;
112 }
113
114 bool operator!=(const Iterator& other) const {
115 return current_ != other.current_;
116 }
117 bool operator==(const Iterator& other) const {
118 return current_ == other.current_;
119 }
120 };
121
begin()122 Iterator begin() const { return Iterator(head_.get()); }
end()123 Iterator end() const { return Iterator(nullptr); }
last()124 Iterator last() const { return Iterator(last_); }
125
126 // Split the list in 2 single linked lists after the element given as
127 // argument. The front of the list remains in the current list, while the
128 // back goes in the newly create linked list.
129 //
130 // This is used for example to extract one element from a list. (see
131 // LifoAlloc::getOrCreateChunk)
splitAfter(T * newLast)132 SingleLinkedList splitAfter(T* newLast) {
133 MOZ_ASSERT(newLast);
134 SingleLinkedList result;
135 if (newLast->next_) {
136 result.head_ = std::move(newLast->next_);
137 result.last_ = last_;
138 last_ = newLast;
139 }
140 assertInvariants();
141 result.assertInvariants();
142 return result;
143 }
144
pushFront(UniquePtr<T,D> && elem)145 void pushFront(UniquePtr<T, D>&& elem) {
146 if (!last_) {
147 last_ = elem.get();
148 }
149 elem->next_ = std::move(head_);
150 head_ = std::move(elem);
151 assertInvariants();
152 }
153
append(UniquePtr<T,D> && elem)154 void append(UniquePtr<T, D>&& elem) {
155 if (last_) {
156 last_->next_ = std::move(elem);
157 last_ = last_->next_.get();
158 } else {
159 head_ = std::move(elem);
160 last_ = head_.get();
161 }
162 assertInvariants();
163 }
appendAll(SingleLinkedList && list)164 void appendAll(SingleLinkedList&& list) {
165 if (list.empty()) {
166 return;
167 }
168 if (last_) {
169 last_->next_ = std::move(list.head_);
170 } else {
171 head_ = std::move(list.head_);
172 }
173 last_ = list.last_;
174 list.last_ = nullptr;
175 assertInvariants();
176 list.assertInvariants();
177 }
steal(SingleLinkedList && list)178 void steal(SingleLinkedList&& list) {
179 head_ = std::move(list.head_);
180 last_ = list.last_;
181 list.last_ = nullptr;
182 assertInvariants();
183 list.assertInvariants();
184 }
prependAll(SingleLinkedList && list)185 void prependAll(SingleLinkedList&& list) {
186 list.appendAll(std::move(*this));
187 steal(std::move(list));
188 }
popFirst()189 UniquePtr<T, D> popFirst() {
190 MOZ_ASSERT(head_);
191 UniquePtr<T, D> result = std::move(head_);
192 head_ = std::move(result->next_);
193 if (!head_) {
194 last_ = nullptr;
195 }
196 assertInvariants();
197 return result;
198 }
199 };
200
201 static const size_t LIFO_ALLOC_ALIGN = 8;
202
203 MOZ_ALWAYS_INLINE
AlignPtr(uint8_t * orig)204 uint8_t* AlignPtr(uint8_t* orig) {
205 static_assert(mozilla::IsPowerOfTwo(LIFO_ALLOC_ALIGN),
206 "LIFO_ALLOC_ALIGN must be a power of two");
207
208 uint8_t* result = (uint8_t*)AlignBytes(uintptr_t(orig), LIFO_ALLOC_ALIGN);
209 MOZ_ASSERT(uintptr_t(result) % LIFO_ALLOC_ALIGN == 0);
210 return result;
211 }
212
213 // A Chunk represent a single memory allocation made with the system
214 // allocator. As the owner of the memory, it is responsible for the allocation
215 // and the deallocation.
216 //
217 // This structure is only move-able, but not copyable.
218 class BumpChunk : public SingleLinkedListElement<BumpChunk> {
219 private:
220 // Pointer to the last byte allocated in this chunk.
221 uint8_t* bump_;
222 // Pointer to the first byte after this chunk.
223 uint8_t* const capacity_;
224
225 #ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
226 // Magic number used to check against poisoned values.
227 const uintptr_t magic_ : 24;
228 static constexpr uintptr_t magicNumber = uintptr_t(0x4c6966);
229 #endif
230
231 #if defined(DEBUG)
232 # define LIFO_CHUNK_PROTECT 1
233 #endif
234
235 // Poison the memory with memset, in order to catch errors due to
236 // use-after-free, with JS_LIFO_UNDEFINED_PATTERN pattern, or to catch
237 // use-before-init with JS_LIFO_UNINITIALIZED_PATTERN.
238 #if defined(DEBUG)
239 # define LIFO_HAVE_MEM_CHECKS 1
240 # define LIFO_MAKE_MEM_NOACCESS(addr, size) \
241 do { \
242 uint8_t* base = (addr); \
243 size_t sz = (size); \
244 MOZ_MAKE_MEM_UNDEFINED(base, sz); \
245 memset(base, JS_LIFO_UNDEFINED_PATTERN, sz); \
246 MOZ_MAKE_MEM_NOACCESS(base, sz); \
247 } while (0)
248
249 # define LIFO_MAKE_MEM_UNDEFINED(addr, size) \
250 do { \
251 uint8_t* base = (addr); \
252 size_t sz = (size); \
253 MOZ_MAKE_MEM_UNDEFINED(base, sz); \
254 memset(base, JS_LIFO_UNINITIALIZED_PATTERN, sz); \
255 MOZ_MAKE_MEM_UNDEFINED(base, sz); \
256 } while (0)
257
258 #elif defined(MOZ_HAVE_MEM_CHECKS)
259 # define LIFO_HAVE_MEM_CHECKS 1
260 # define LIFO_MAKE_MEM_NOACCESS(addr, size) \
261 MOZ_MAKE_MEM_NOACCESS((addr), (size))
262 # define LIFO_MAKE_MEM_UNDEFINED(addr, size) \
263 MOZ_MAKE_MEM_UNDEFINED((addr), (size))
264 #endif
265
266 #ifdef LIFO_HAVE_MEM_CHECKS
267 // Red zone reserved after each allocation.
268 static constexpr size_t RedZoneSize = 16;
269 #else
270 static constexpr size_t RedZoneSize = 0;
271 #endif
272
assertInvariants()273 void assertInvariants() {
274 MOZ_DIAGNOSTIC_ASSERT(magic_ == magicNumber);
275 MOZ_ASSERT(begin() <= end());
276 MOZ_ASSERT(end() <= capacity_);
277 }
278
279 BumpChunk& operator=(const BumpChunk&) = delete;
280 BumpChunk(const BumpChunk&) = delete;
281
BumpChunk(uintptr_t capacity)282 explicit BumpChunk(uintptr_t capacity)
283 : bump_(begin()),
284 capacity_(base() + capacity)
285 #ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
286 ,
287 magic_(magicNumber)
288 #endif
289 {
290 assertInvariants();
291 #if defined(LIFO_HAVE_MEM_CHECKS)
292 // The memory is freshly allocated and marked as undefined by the
293 // allocator of the BumpChunk. Instead, we mark this memory as
294 // no-access, as it has not been allocated within the BumpChunk.
295 LIFO_MAKE_MEM_NOACCESS(bump_, capacity_ - bump_);
296 #endif
297 }
298
299 // Cast |this| into a uint8_t* pointer.
300 //
301 // Warning: Are you sure you do not want to use begin() instead?
base()302 const uint8_t* base() const { return reinterpret_cast<const uint8_t*>(this); }
base()303 uint8_t* base() { return reinterpret_cast<uint8_t*>(this); }
304
305 // Update the bump pointer to any value contained in this chunk, which is
306 // above the private fields of this chunk.
307 //
308 // The memory is poisoned and flagged as no-access when the bump pointer is
309 // moving backward, such as when memory is released, or when a Mark is used
310 // to unwind previous allocations.
311 //
312 // The memory is flagged as undefined when the bump pointer is moving
313 // forward.
setBump(uint8_t * newBump)314 void setBump(uint8_t* newBump) {
315 assertInvariants();
316 MOZ_ASSERT(begin() <= newBump);
317 MOZ_ASSERT(newBump <= capacity_);
318 #if defined(LIFO_HAVE_MEM_CHECKS)
319 // Poison/Unpoison memory that we just free'd/allocated.
320 if (bump_ > newBump) {
321 LIFO_MAKE_MEM_NOACCESS(newBump, bump_ - newBump);
322 } else if (newBump > bump_) {
323 MOZ_ASSERT(newBump - RedZoneSize >= bump_);
324 LIFO_MAKE_MEM_UNDEFINED(bump_, newBump - RedZoneSize - bump_);
325 // The area [newBump - RedZoneSize .. newBump[ is already flagged as
326 // no-access either with the previous if-branch or with the
327 // BumpChunk constructor. No need to mark it twice.
328 }
329 #endif
330 bump_ = newBump;
331 }
332
333 public:
~BumpChunk()334 ~BumpChunk() { release(); }
335
336 // Returns true if this chunk contains no allocated content.
empty()337 bool empty() const { return end() == begin(); }
338
339 // Returns the size in bytes of the number of allocated space. This includes
340 // the size consumed by the alignment of the allocations.
used()341 size_t used() const { return end() - begin(); }
342
343 // These are used for manipulating a chunk as if it was a vector of bytes,
344 // and used for iterating over the content of the buffer (see
345 // LifoAlloc::Enum)
346 inline const uint8_t* begin() const;
347 inline uint8_t* begin();
end()348 uint8_t* end() const { return bump_; }
349
350 // This function is the only way to allocate and construct a chunk. It
351 // returns a UniquePtr to the newly allocated chunk. The size given as
352 // argument includes the space needed for the header of the chunk.
353 static UniquePtr<BumpChunk> newWithCapacity(size_t size);
354
355 // Report allocation.
sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf)356 size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
357 return mallocSizeOf(this);
358 }
359
360 // Report allocation size.
computedSizeOfIncludingThis()361 size_t computedSizeOfIncludingThis() const { return capacity_ - base(); }
362
363 // Opaque type used to carry a pointer to the current location of the bump_
364 // pointer, within a BumpChunk.
365 class Mark {
366 // Chunk which owns the pointer.
367 BumpChunk* chunk_;
368 // Recorded of the bump_ pointer of the BumpChunk.
369 uint8_t* bump_;
370
371 friend class BumpChunk;
Mark(BumpChunk * chunk,uint8_t * bump)372 Mark(BumpChunk* chunk, uint8_t* bump) : chunk_(chunk), bump_(bump) {}
373
374 public:
Mark()375 Mark() : chunk_(nullptr), bump_(nullptr) {}
376
markedChunk()377 BumpChunk* markedChunk() const { return chunk_; }
378 };
379
380 // Return a mark to be able to unwind future allocations with the release
381 // function. (see LifoAllocScope)
mark()382 Mark mark() { return Mark(this, end()); }
383
384 // Check if a pointer is part of the allocated data of this chunk.
contains(void * ptr)385 bool contains(void* ptr) const {
386 // Note: We cannot check "ptr < end()" because the mark have a 0-size
387 // length.
388 return begin() <= ptr && ptr <= end();
389 }
390
391 // Check if a mark is part of the allocated data of this chunk.
contains(Mark m)392 bool contains(Mark m) const {
393 MOZ_ASSERT(m.chunk_ == this);
394 return contains(m.bump_);
395 }
396
397 // Release the memory allocated in this chunk. This function does not call
398 // any of the destructors.
release()399 void release() { setBump(begin()); }
400
401 // Release the memory allocated in this chunk since the corresponding mark
402 // got created. This function does not call any of the destructors.
release(Mark m)403 void release(Mark m) {
404 MOZ_RELEASE_ASSERT(contains(m));
405 setBump(m.bump_);
406 }
407
408 // Given an amount, compute the total size of a chunk for it: reserved
409 // space before |begin()|, space for |amount| bytes, and red-zone space
410 // after those bytes that will ultimately end at |capacity_|.
411 static inline MOZ_MUST_USE bool allocSizeWithRedZone(size_t amount,
412 size_t* size);
413
414 // Given a bump chunk pointer, find the next base/end pointers. This is
415 // useful for having consistent allocations, and iterating over known size
416 // allocations.
nextAllocBase(uint8_t * e)417 static uint8_t* nextAllocBase(uint8_t* e) { return detail::AlignPtr(e); }
nextAllocEnd(uint8_t * b,size_t n)418 static uint8_t* nextAllocEnd(uint8_t* b, size_t n) {
419 return b + n + RedZoneSize;
420 }
421
422 // Returns true, if the unused space is large enough for an allocation of
423 // |n| bytes.
canAlloc(size_t n)424 bool canAlloc(size_t n) const {
425 uint8_t* newBump = nextAllocEnd(nextAllocBase(end()), n);
426 // bump_ <= newBump, is necessary to catch overflow.
427 return bump_ <= newBump && newBump <= capacity_;
428 }
429
430 // Space remaining in the current chunk.
unused()431 size_t unused() const {
432 uint8_t* aligned = nextAllocBase(end());
433 if (aligned < capacity_) {
434 return capacity_ - aligned;
435 }
436 return 0;
437 }
438
439 // Try to perform an allocation of size |n|, returns nullptr if not possible.
440 MOZ_ALWAYS_INLINE
tryAlloc(size_t n)441 void* tryAlloc(size_t n) {
442 uint8_t* aligned = nextAllocBase(end());
443 uint8_t* newBump = nextAllocEnd(aligned, n);
444
445 if (newBump > capacity_) {
446 return nullptr;
447 }
448
449 // Check for overflow.
450 if (MOZ_UNLIKELY(newBump < bump_)) {
451 return nullptr;
452 }
453
454 MOZ_ASSERT(canAlloc(n)); // Ensure consistency between "can" and "try".
455 setBump(newBump);
456 return aligned;
457 }
458
459 #ifdef LIFO_CHUNK_PROTECT
460 void setReadOnly();
461 void setReadWrite();
462 #else
setReadOnly()463 void setReadOnly() const {}
setReadWrite()464 void setReadWrite() const {}
465 #endif
466 };
467
468 // Space reserved for the BumpChunk internal data, and the alignment of the
469 // first allocation content. This can be used to ensure there is enough space
470 // for the next allocation (see LifoAlloc::newChunkWithCapacity).
471 static constexpr size_t BumpChunkReservedSpace =
472 AlignBytes(sizeof(BumpChunk), LIFO_ALLOC_ALIGN);
473
allocSizeWithRedZone(size_t amount,size_t * size)474 /* static */ inline MOZ_MUST_USE bool BumpChunk::allocSizeWithRedZone(
475 size_t amount, size_t* size) {
476 constexpr size_t SpaceBefore = BumpChunkReservedSpace;
477 static_assert((SpaceBefore % LIFO_ALLOC_ALIGN) == 0,
478 "reserved space presumed already aligned");
479
480 constexpr size_t SpaceAfter = RedZoneSize; // may be zero
481
482 constexpr size_t SpaceBeforeAndAfter = SpaceBefore + SpaceAfter;
483 static_assert(SpaceBeforeAndAfter >= SpaceBefore,
484 "intermediate addition must not overflow");
485
486 *size = SpaceBeforeAndAfter + amount;
487 return MOZ_LIKELY(*size >= SpaceBeforeAndAfter);
488 }
489
begin()490 inline const uint8_t* BumpChunk::begin() const {
491 return base() + BumpChunkReservedSpace;
492 }
493
begin()494 inline uint8_t* BumpChunk::begin() { return base() + BumpChunkReservedSpace; }
495
496 } // namespace detail
497
498 // LIFO bump allocator: used for phase-oriented and fast LIFO allocations.
499 //
500 // Note: We leave BumpChunks latent in the set of unused chunks after they've
501 // been released to avoid thrashing before a GC.
502 class LifoAlloc {
503 using UniqueBumpChunk = js::UniquePtr<detail::BumpChunk>;
504 using BumpChunkList = detail::SingleLinkedList<detail::BumpChunk>;
505
506 // List of chunks containing allocated data of size smaller than the default
507 // chunk size. In the common case, the last chunk of this list is always
508 // used to perform the allocations. When the allocation cannot be performed,
509 // we move a Chunk from the unused set to the list of used chunks.
510 BumpChunkList chunks_;
511
512 // List of chunks containing allocated data where each allocation is larger
513 // than the oversize threshold. Each chunk contains exactly one allocation.
514 // This reduces wasted space in the chunk list.
515 //
516 // Oversize chunks are allocated on demand and freed as soon as they are
517 // released, instead of being pushed to the unused list.
518 BumpChunkList oversize_;
519
520 // Set of unused chunks, which can be reused for future allocations.
521 BumpChunkList unused_;
522
523 size_t markCount;
524 size_t defaultChunkSize_;
525 size_t oversizeThreshold_;
526
527 // Size of all chunks in chunks_, oversize_, unused_ lists.
528 size_t curSize_;
529 size_t peakSize_;
530
531 // Size of all chunks containing small bump allocations. This heuristic is
532 // used to compute growth rate while ignoring chunks such as oversized,
533 // now-unused, or transferred (which followed their own growth patterns).
534 size_t smallAllocsSize_;
535
536 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
537 bool fallibleScope_;
538 #endif
539
540 void operator=(const LifoAlloc&) = delete;
541 LifoAlloc(const LifoAlloc&) = delete;
542
543 // Return a BumpChunk that can perform an allocation of at least size |n|.
544 UniqueBumpChunk newChunkWithCapacity(size_t n, bool oversize);
545
546 // Reuse or allocate a BumpChunk that can perform an allocation of at least
547 // size |n|, if successful it is placed at the end the list of |chunks_|.
548 UniqueBumpChunk getOrCreateChunk(size_t n);
549
550 void reset(size_t defaultChunkSize);
551
552 // Append unused chunks to the end of this LifoAlloc.
appendUnused(BumpChunkList && otherUnused)553 void appendUnused(BumpChunkList&& otherUnused) {
554 #ifdef DEBUG
555 for (detail::BumpChunk& bc : otherUnused) {
556 MOZ_ASSERT(bc.empty());
557 }
558 #endif
559 unused_.appendAll(std::move(otherUnused));
560 }
561
562 // Append used chunks to the end of this LifoAlloc. We act as if all the
563 // chunks in |this| are used, even if they're not, so memory may be wasted.
appendUsed(BumpChunkList && otherChunks)564 void appendUsed(BumpChunkList&& otherChunks) {
565 chunks_.appendAll(std::move(otherChunks));
566 }
567
568 // Track the amount of space allocated in used and unused chunks.
incrementCurSize(size_t size)569 void incrementCurSize(size_t size) {
570 curSize_ += size;
571 if (curSize_ > peakSize_) {
572 peakSize_ = curSize_;
573 }
574 }
decrementCurSize(size_t size)575 void decrementCurSize(size_t size) {
576 MOZ_ASSERT(curSize_ >= size);
577 curSize_ -= size;
578 MOZ_ASSERT(curSize_ >= smallAllocsSize_);
579 }
580
581 void* allocImplColdPath(size_t n);
582 void* allocImplOversize(size_t n);
583
584 MOZ_ALWAYS_INLINE
allocImpl(size_t n)585 void* allocImpl(size_t n) {
586 void* result;
587 // Give oversized allocations their own chunk instead of wasting space
588 // due to fragmentation at the end of normal chunk.
589 if (MOZ_UNLIKELY(n > oversizeThreshold_)) {
590 return allocImplOversize(n);
591 }
592 if (MOZ_LIKELY(!chunks_.empty() &&
593 (result = chunks_.last()->tryAlloc(n)))) {
594 return result;
595 }
596 return allocImplColdPath(n);
597 }
598
599 // Check for space in unused chunks or allocate a new unused chunk.
600 MOZ_MUST_USE bool ensureUnusedApproximateColdPath(size_t n, size_t total);
601
602 public:
LifoAlloc(size_t defaultChunkSize)603 explicit LifoAlloc(size_t defaultChunkSize)
604 : peakSize_(0)
605 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
606 ,
607 fallibleScope_(true)
608 #endif
609 {
610 reset(defaultChunkSize);
611 }
612
613 // Set the threshold to allocate data in its own chunk outside the space for
614 // small allocations.
disableOversize()615 void disableOversize() { oversizeThreshold_ = SIZE_MAX; }
setOversizeThreshold(size_t oversizeThreshold)616 void setOversizeThreshold(size_t oversizeThreshold) {
617 MOZ_ASSERT(oversizeThreshold <= defaultChunkSize_);
618 oversizeThreshold_ = oversizeThreshold;
619 }
620
621 // Steal allocated chunks from |other|.
622 void steal(LifoAlloc* other);
623
624 // Append all chunks from |other|. They are removed from |other|.
625 void transferFrom(LifoAlloc* other);
626
627 // Append unused chunks from |other|. They are removed from |other|.
628 void transferUnusedFrom(LifoAlloc* other);
629
~LifoAlloc()630 ~LifoAlloc() { freeAll(); }
631
defaultChunkSize()632 size_t defaultChunkSize() const { return defaultChunkSize_; }
633
634 // Frees all held memory.
635 void freeAll();
636
637 static const unsigned HUGE_ALLOCATION = 50 * 1024 * 1024;
freeAllIfHugeAndUnused()638 void freeAllIfHugeAndUnused() {
639 if (markCount == 0 && curSize_ > HUGE_ALLOCATION) {
640 freeAll();
641 }
642 }
643
644 MOZ_ALWAYS_INLINE
alloc(size_t n)645 void* alloc(size_t n) {
646 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
647 // Only simulate OOMs when we are not using the LifoAlloc as an
648 // infallible allocator.
649 if (fallibleScope_) {
650 JS_OOM_POSSIBLY_FAIL();
651 }
652 #endif
653 return allocImpl(n);
654 }
655
656 // Allocates |n| bytes if we can guarantee that we will have
657 // |needed| unused bytes remaining. Returns nullptr if we can't.
658 // This is useful for maintaining our ballast invariants while
659 // attempting fallible allocations.
660 MOZ_ALWAYS_INLINE
allocEnsureUnused(size_t n,size_t needed)661 void* allocEnsureUnused(size_t n, size_t needed) {
662 JS_OOM_POSSIBLY_FAIL();
663 MOZ_ASSERT(fallibleScope_);
664
665 Mark m = mark();
666 void* result = allocImpl(n);
667 if (!ensureUnusedApproximate(needed)) {
668 release(m);
669 return nullptr;
670 }
671 cancelMark(m);
672 return result;
673 }
674
675 template <typename T, typename... Args>
newWithSize(size_t n,Args &&...args)676 MOZ_ALWAYS_INLINE T* newWithSize(size_t n, Args&&... args) {
677 MOZ_ASSERT(n >= sizeof(T), "must request enough space to store a T");
678 static_assert(alignof(T) <= detail::LIFO_ALLOC_ALIGN,
679 "LifoAlloc must provide enough alignment to store T");
680 void* ptr = alloc(n);
681 if (!ptr) {
682 return nullptr;
683 }
684
685 return new (ptr) T(std::forward<Args>(args)...);
686 }
687
688 MOZ_ALWAYS_INLINE
allocInfallible(size_t n)689 void* allocInfallible(size_t n) {
690 AutoEnterOOMUnsafeRegion oomUnsafe;
691 if (void* result = allocImpl(n)) {
692 return result;
693 }
694 oomUnsafe.crash("LifoAlloc::allocInfallible");
695 return nullptr;
696 }
697
698 // Ensures that enough space exists to satisfy N bytes worth of
699 // allocation requests, not necessarily contiguous. Note that this does
700 // not guarantee a successful single allocation of N bytes.
701 MOZ_ALWAYS_INLINE
ensureUnusedApproximate(size_t n)702 MOZ_MUST_USE bool ensureUnusedApproximate(size_t n) {
703 AutoFallibleScope fallibleAllocator(this);
704 size_t total = 0;
705 if (!chunks_.empty()) {
706 total += chunks_.last()->unused();
707 if (total >= n) {
708 return true;
709 }
710 }
711
712 return ensureUnusedApproximateColdPath(n, total);
713 }
714
715 MOZ_ALWAYS_INLINE
setAsInfallibleByDefault()716 void setAsInfallibleByDefault() {
717 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
718 fallibleScope_ = false;
719 #endif
720 }
721
722 class MOZ_NON_TEMPORARY_CLASS AutoFallibleScope {
723 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
724 LifoAlloc* lifoAlloc_;
725 bool prevFallibleScope_;
726 MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
727
728 public:
AutoFallibleScope(LifoAlloc * lifoAlloc MOZ_GUARD_OBJECT_NOTIFIER_PARAM)729 explicit AutoFallibleScope(
730 LifoAlloc* lifoAlloc MOZ_GUARD_OBJECT_NOTIFIER_PARAM) {
731 MOZ_GUARD_OBJECT_NOTIFIER_INIT;
732 lifoAlloc_ = lifoAlloc;
733 prevFallibleScope_ = lifoAlloc->fallibleScope_;
734 lifoAlloc->fallibleScope_ = true;
735 }
736
~AutoFallibleScope()737 ~AutoFallibleScope() { lifoAlloc_->fallibleScope_ = prevFallibleScope_; }
738 #else
739 public:
740 explicit AutoFallibleScope(LifoAlloc*) {}
741 #endif
742 };
743
744 template <typename T>
newArray(size_t count)745 T* newArray(size_t count) {
746 static_assert(std::is_trivial_v<T>,
747 "T must be trivially constructible so that constructors need "
748 "not be called");
749 static_assert(std::is_trivially_destructible_v<T>,
750 "T must be trivially destructible so destructors don't need "
751 "to be called when the LifoAlloc is freed");
752 return newArrayUninitialized<T>(count);
753 }
754
755 // Create an array with uninitialized elements of type |T|.
756 // The caller is responsible for initialization.
757 template <typename T>
newArrayUninitialized(size_t count)758 T* newArrayUninitialized(size_t count) {
759 size_t bytes;
760 if (MOZ_UNLIKELY(!CalculateAllocSize<T>(count, &bytes))) {
761 return nullptr;
762 }
763 return static_cast<T*>(alloc(bytes));
764 }
765
766 class Mark {
767 friend class LifoAlloc;
768 detail::BumpChunk::Mark chunk;
769 detail::BumpChunk::Mark oversize;
770 };
771
772 // Note: MOZ_NEVER_INLINE is a work around for a Clang 9 (PGO) miscompilation.
773 // See bug 1583907.
774 MOZ_NEVER_INLINE Mark mark();
775
776 void release(Mark mark);
777
778 private:
cancelMark(Mark mark)779 void cancelMark(Mark mark) { markCount--; }
780
781 public:
releaseAll()782 void releaseAll() {
783 MOZ_ASSERT(!markCount);
784
785 // When releasing all chunks, we can no longer determine which chunks were
786 // transferred and which were not, so simply clear the heuristic to zero
787 // right away.
788 smallAllocsSize_ = 0;
789
790 for (detail::BumpChunk& bc : chunks_) {
791 bc.release();
792 }
793 unused_.appendAll(std::move(chunks_));
794
795 // On release, we free any oversize allocations instead of keeping them
796 // in unused chunks.
797 while (!oversize_.empty()) {
798 UniqueBumpChunk bc = oversize_.popFirst();
799 decrementCurSize(bc->computedSizeOfIncludingThis());
800 }
801 }
802
803 // Protect the content of the LifoAlloc chunks.
804 #ifdef LIFO_CHUNK_PROTECT
805 void setReadOnly();
806 void setReadWrite();
807 #else
setReadOnly()808 void setReadOnly() const {}
setReadWrite()809 void setReadWrite() const {}
810 #endif
811
812 // Get the total "used" (occupied bytes) count for the arena chunks.
used()813 size_t used() const {
814 size_t accum = 0;
815 for (const detail::BumpChunk& chunk : chunks_) {
816 accum += chunk.used();
817 }
818 return accum;
819 }
820
821 // Return true if the LifoAlloc does not currently contain any allocations.
isEmpty()822 bool isEmpty() const {
823 bool empty = chunks_.empty() ||
824 (chunks_.begin() == chunks_.last() && chunks_.last()->empty());
825 MOZ_ASSERT_IF(!oversize_.empty(), !oversize_.last()->empty());
826 return empty && oversize_.empty();
827 }
828
829 // Return the number of bytes remaining to allocate in the current chunk.
830 // e.g. How many bytes we can allocate before needing a new block.
availableInCurrentChunk()831 size_t availableInCurrentChunk() const {
832 if (chunks_.empty()) {
833 return 0;
834 }
835 return chunks_.last()->unused();
836 }
837
838 // Get the total size of the arena chunks (including unused space).
sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf)839 size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
840 size_t n = 0;
841 for (const detail::BumpChunk& chunk : chunks_) {
842 n += chunk.sizeOfIncludingThis(mallocSizeOf);
843 }
844 for (const detail::BumpChunk& chunk : oversize_) {
845 n += chunk.sizeOfIncludingThis(mallocSizeOf);
846 }
847 for (const detail::BumpChunk& chunk : unused_) {
848 n += chunk.sizeOfIncludingThis(mallocSizeOf);
849 }
850 return n;
851 }
852
853 // Like sizeOfExcludingThis(), but includes the size of the LifoAlloc itself.
sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf)854 size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
855 return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
856 }
857
858 // Get the current size of the arena chunks (including unused space and
859 // bookkeeping space).
computedSizeOfExcludingThis()860 size_t computedSizeOfExcludingThis() const { return curSize_; }
861
862 // Get the peak size of the arena chunks (including unused space and
863 // bookkeeping space).
peakSizeOfExcludingThis()864 size_t peakSizeOfExcludingThis() const { return peakSize_; }
865
866 // Doesn't perform construction; useful for lazily-initialized POD types.
867 template <typename T>
pod_malloc()868 MOZ_ALWAYS_INLINE T* pod_malloc() {
869 return static_cast<T*>(alloc(sizeof(T)));
870 }
871
JS_DECLARE_NEW_METHODS(new_,alloc,MOZ_ALWAYS_INLINE)872 JS_DECLARE_NEW_METHODS(new_, alloc, MOZ_ALWAYS_INLINE)
873 JS_DECLARE_NEW_METHODS(newInfallible, allocInfallible, MOZ_ALWAYS_INLINE)
874
875 #ifdef DEBUG
876 bool contains(void* ptr) const {
877 for (const detail::BumpChunk& chunk : chunks_) {
878 if (chunk.contains(ptr)) {
879 return true;
880 }
881 }
882 for (const detail::BumpChunk& chunk : oversize_) {
883 if (chunk.contains(ptr)) {
884 return true;
885 }
886 }
887 return false;
888 }
889 #endif
890
891 // Iterate over the data allocated in a LifoAlloc, and interpret it.
892 class Enum {
893 friend class LifoAlloc;
894 friend class detail::BumpChunk;
895
896 // Iterator over the list of bump chunks.
897 BumpChunkList::Iterator chunkIt_;
898 BumpChunkList::Iterator chunkEnd_;
899 // Read head (must be within chunk_).
900 uint8_t* head_;
901
902 // If there is not enough room in the remaining block for |size|,
903 // advance to the next block and update the position.
seekBaseAndAdvanceBy(size_t size)904 uint8_t* seekBaseAndAdvanceBy(size_t size) {
905 MOZ_ASSERT(!empty());
906
907 uint8_t* aligned = detail::BumpChunk::nextAllocBase(head_);
908 if (detail::BumpChunk::nextAllocEnd(aligned, size) > chunkIt_->end()) {
909 ++chunkIt_;
910 aligned = chunkIt_->begin();
911 // The current code assumes that if we have a chunk, then we
912 // have allocated something it in.
913 MOZ_ASSERT(!chunkIt_->empty());
914 }
915 head_ = detail::BumpChunk::nextAllocEnd(aligned, size);
916 MOZ_ASSERT(head_ <= chunkIt_->end());
917 return aligned;
918 }
919
920 public:
Enum(LifoAlloc & alloc)921 explicit Enum(LifoAlloc& alloc)
922 : chunkIt_(alloc.chunks_.begin()),
923 chunkEnd_(alloc.chunks_.end()),
924 head_(nullptr) {
925 MOZ_RELEASE_ASSERT(alloc.oversize_.empty());
926 if (chunkIt_ != chunkEnd_) {
927 head_ = chunkIt_->begin();
928 }
929 }
930
931 // Return true if there are no more bytes to enumerate.
empty()932 bool empty() {
933 return chunkIt_ == chunkEnd_ ||
934 (chunkIt_->next() == chunkEnd_.get() && head_ >= chunkIt_->end());
935 }
936
937 // Move the read position forward by the size of one T.
938 template <typename T>
939 T* read(size_t size = sizeof(T)) {
940 return reinterpret_cast<T*>(read(size));
941 }
942
943 // Return a pointer to the item at the current position. This returns a
944 // pointer to the inline storage, not a copy, and moves the read-head by
945 // the requested |size|.
read(size_t size)946 void* read(size_t size) { return seekBaseAndAdvanceBy(size); }
947 };
948 };
949
950 class MOZ_NON_TEMPORARY_CLASS LifoAllocScope {
951 LifoAlloc* lifoAlloc;
952 LifoAlloc::Mark mark;
953 LifoAlloc::AutoFallibleScope fallibleScope;
954 MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
955
956 public:
LifoAllocScope(LifoAlloc * lifoAlloc MOZ_GUARD_OBJECT_NOTIFIER_PARAM)957 explicit LifoAllocScope(LifoAlloc* lifoAlloc MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
958 : lifoAlloc(lifoAlloc),
959 mark(lifoAlloc->mark()),
960 fallibleScope(lifoAlloc) {
961 MOZ_GUARD_OBJECT_NOTIFIER_INIT;
962 }
963
~LifoAllocScope()964 ~LifoAllocScope() {
965 lifoAlloc->release(mark);
966
967 /*
968 * The parser can allocate enormous amounts of memory for large functions.
969 * Eagerly free the memory now (which otherwise won't be freed until the
970 * next GC) to avoid unnecessary OOMs.
971 */
972 lifoAlloc->freeAllIfHugeAndUnused();
973 }
974
alloc()975 LifoAlloc& alloc() { return *lifoAlloc; }
976 };
977
978 enum Fallibility { Fallible, Infallible };
979
980 template <Fallibility fb>
981 class LifoAllocPolicy {
982 LifoAlloc& alloc_;
983
984 public:
LifoAllocPolicy(LifoAlloc & alloc)985 MOZ_IMPLICIT LifoAllocPolicy(LifoAlloc& alloc) : alloc_(alloc) {}
986 template <typename T>
maybe_pod_malloc(size_t numElems)987 T* maybe_pod_malloc(size_t numElems) {
988 size_t bytes;
989 if (MOZ_UNLIKELY(!CalculateAllocSize<T>(numElems, &bytes))) {
990 return nullptr;
991 }
992 void* p =
993 fb == Fallible ? alloc_.alloc(bytes) : alloc_.allocInfallible(bytes);
994 return static_cast<T*>(p);
995 }
996 template <typename T>
maybe_pod_calloc(size_t numElems)997 T* maybe_pod_calloc(size_t numElems) {
998 T* p = maybe_pod_malloc<T>(numElems);
999 if (MOZ_UNLIKELY(!p)) {
1000 return nullptr;
1001 }
1002 memset(p, 0, numElems * sizeof(T));
1003 return p;
1004 }
1005 template <typename T>
maybe_pod_realloc(T * p,size_t oldSize,size_t newSize)1006 T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) {
1007 T* n = maybe_pod_malloc<T>(newSize);
1008 if (MOZ_UNLIKELY(!n)) {
1009 return nullptr;
1010 }
1011 MOZ_ASSERT(!(oldSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value));
1012 memcpy(n, p, std::min(oldSize * sizeof(T), newSize * sizeof(T)));
1013 return n;
1014 }
1015 template <typename T>
pod_malloc(size_t numElems)1016 T* pod_malloc(size_t numElems) {
1017 return maybe_pod_malloc<T>(numElems);
1018 }
1019 template <typename T>
pod_calloc(size_t numElems)1020 T* pod_calloc(size_t numElems) {
1021 return maybe_pod_calloc<T>(numElems);
1022 }
1023 template <typename T>
pod_realloc(T * p,size_t oldSize,size_t newSize)1024 T* pod_realloc(T* p, size_t oldSize, size_t newSize) {
1025 return maybe_pod_realloc<T>(p, oldSize, newSize);
1026 }
1027 template <typename T>
free_(T * p,size_t numElems)1028 void free_(T* p, size_t numElems) {}
reportAllocOverflow()1029 void reportAllocOverflow() const {}
checkSimulatedOOM()1030 MOZ_MUST_USE bool checkSimulatedOOM() const {
1031 return fb == Infallible || !js::oom::ShouldFailWithOOM();
1032 }
1033 };
1034
1035 } // namespace js
1036
1037 #endif /* ds_LifoAlloc_h */
1038