1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #ifndef ds_LifoAlloc_h
8 #define ds_LifoAlloc_h
9 
10 #include "mozilla/Attributes.h"
11 #include "mozilla/MathAlgorithms.h"
12 #include "mozilla/MemoryChecking.h"
13 #include "mozilla/MemoryReporting.h"
14 #include "mozilla/PodOperations.h"
15 #include "mozilla/TemplateLib.h"
16 
17 #include <algorithm>
18 #include <new>
19 #include <stddef.h>  // size_t
20 #include <type_traits>
21 #include <utility>
22 
23 // [SMDOC] LifoAlloc bump allocator
24 //
25 // This file defines an allocator named LifoAlloc which is a Bump allocator,
26 // which has the property of making fast allocation but is not able to reclaim
27 // individual allocations.
28 //
29 // * Allocation principle
30 //
31 // In practice a LifoAlloc is implemented using a list of BumpChunks, which are
32 // contiguous memory areas which are chained in a single linked list.
33 //
34 // When an allocation is performed, we check if there is space in the last
35 // chunk. If there is we bump the pointer of the last chunk and return the
36 // previous value of the pointer. Otherwise we allocate a new chunk which is
37 // large enough and perform the allocation the same way.
38 //
39 // Each allocation is made with 2 main functions, called
40 // BumpChunk::nextAllocBase and BumpChunk::nextAllocEnd. These functions are
41 // made to avoid duplicating logic, such as allocating, checking if we can
42 // allocate or reserving a given buffer space. They are made to align the
43 // pointer for the next allocation (8-byte aligned), and also to reserve some
44 // red-zones to improve reports of our security instrumentation. (see Security
45 // features below)
46 //
47 // The Chunks sizes are following the heuristics implemented in NextSize
48 // (LifoAlloc.cpp), which doubles the size until we reach 1 MB and then
49 // continues with a smaller geometric series. This heuristic is meant to reduce
50 // the number of allocations, such that we spend less time allocating/freeing
51 // chunks of a few KB at a time.
52 //
53 // ** Oversize allocations
54 //
55 // When allocating with a LifoAlloc, we distinguish 2 different kinds of
56 // allocations, the small allocations and the large allocations. The reason for
57 // splitting in 2 sets is to avoid wasting memory.
58 //
59 // If you had a single linked list of chunks, then making oversized allocations
60 // can cause chunks to contain a lot of wasted space as new chunks would have to
61 // be allocated to fit these allocations, and the space of the previous chunk
62 // would remain unused.
63 //
64 // Oversize allocation size can be disabled or customized with disableOversize
65 // and setOversizeThreshold, which must be smaller than the default chunk size
66 // with which the LifoAlloc was initialized.
67 //
68 // ** LifoAllocScope (mark & release)
69 //
70 // As the memory cannot be reclaimed except when the LifoAlloc structure is
71 // deleted, the LifoAllocScope structure is used to create scopes, related to a
72 // stacked task. When going out of a LifoAllocScope the memory associated to the
73 // scope is marked as unused but not reclaimed. This implies that the memory
74 // allocated for one task can be reused for a similar task later on. (see
75 // Safety)
76 //
77 // LifoAllocScope is based on mark and release functions. The mark function is
78 // used to recall the offsets at which a LifoAllocScope got created. The release
79 // function takes the Mark as input and will flag all memory allocated after the
80 // mark creation as unused.
81 //
82 // When releasing all the memory of BumpChunks, these are moved to a list of
83 // unused chunks which will later be reused by new allocations.
84 //
85 // A bump chunk allocator normally has a single bump pointers, whereas we have
86 // 2. (see Oversize allocations) By doing so, we lose the ordering of allocation
87 // coming from a single linked list of allocation.
88 //
89 // However, we rely on the ordering of allocation with LifoAllocScope, i-e when
90 // mark and release functions are used. Thus the LifoAlloc::Mark is composed of
91 // 2 marks, One for each singled linked list of allocations, to keep both lists
92 // of allocations ordered.
93 //
94 // ** Infallible Allocator
95 //
96 // LifoAlloc can also be used as an infallible allocator. This requires the user
97 // to periodically ensure that enough space has been reserved to satisfy the
98 // upcoming set of allocations by calling LifoAlloc::ensureUnusedApproximate or
99 // LifoAlloc::allocEnsureUnused functions. Between 2 calls of these functions,
100 // functions such as allocInfallible can be used without checking against
101 // nullptr, as long as there is a bounded number of such calls and that all
102 // allocations including their red-zone fit in the reserved space.
103 //
104 // The infallible allocator mode can be toggle as being the default by calling
105 // setAsInfallibleByDefault, in which case an AutoFallibleScope should be used
106 // to make any large allocations. Failing to do so will raise an issue when
107 // running the LifoAlloc with the OOM Simulator. (see Security features)
108 //
109 // * LifoAlloc::Enum Iterator
110 //
111 // A LifoAlloc is used for backing the store-buffer of the Garbage Collector
112 // (GC). The store-buffer is appending data as it is being reported during
113 // incremental GC. The LifoAlloc::Enum class is used for iterating over the set
114 // of allocations made within the LifoAlloc.
115 //
116 // However, one must take extra care into having the proper associated types for
117 // the data which are being written and read out of the LifoAlloc. The iterator
118 // is reusing the same logic as the allocator in order to skip red-zones.
119 //
120 // At the moment, the iterator will cause a hard failure if any oversize
121 // allocation are made.
122 //
123 // * Safety
124 //
125 // A LifoAlloc is neither thread-safe nor interrupt-safe. It should only be
126 // manipulated in one thread of execution at a time. It can be transferred from
127 // one thread to another but should not be used concurrently.
128 //
129 // When using LifoAllocScope, no pointer to the data allocated within a
130 // LifoAllocScope should be stored in data allocated before the latest
131 // LifoAllocScope. This kind of issue can hide in different forms, such as
132 // appending to a Vector backed by a LifoAlloc, which can resize and move the
133 // data below the LifoAllocScope. Thus causing a use-after-free once leaving a
134 // LifoAllocScope.
135 //
136 // * Security features
137 //
138 // ** Single Linked List
139 //
140 // For sanity reasons this LifoAlloc implementation makes use of its own single
141 // linked list implementation based on unique pointers (UniquePtr). The reason
142 // for this is to ensure that a BumpChunk is owned only once, thus preventing
143 // use-after-free issues.
144 //
145 // ** OOM Simulator
146 //
147 // The OOM simulator is controlled by the JS_OOM_BREAKPOINT macro, and used to
148 // check any fallible allocation for potential OOM. Fallible functions are
149 // instrumented with JS_OOM_POSSIBLY_FAIL(); function calls, and are expected to
150 // return null on failures.
151 //
152 // Except for simulating OOMs, LifoAlloc is instrumented in DEBUG and OOM
153 // Simulator builds to checks for the correctness of the Infallible Allocator
154 // state. When using a LifoAlloc as an infallible allocator, enough space should
155 // always be reserved for the next allocations. Therefore, to check this
156 // invariant LifoAlloc::newChunkWithCapacity checks that any new chunks are
157 // allocated within a fallible scope, under AutoFallibleScope.
158 //
159 // ** Address Sanitizers & Valgrind
160 //
161 // When manipulating memory in a LifoAlloc, the memory remains contiguous and
162 // therefore subject to potential buffer overflow/underflow. To check for these
163 // memory corruptions, the macro LIFO_HAVE_MEM_CHECK is used to add red-zones
164 // with LIFO_MAKE_MEM_NOACCESS and LIFO_MAKE_MEM_UNDEFINED.
165 //
166 // The red-zone is a minimum space left in between 2 allocations. Any access to
167 // these red-zones should warn in both valgrind / ASan builds.
168 //
169 // The red-zone size is defined in BumpChunk::RedZoneSize and default to 0 if
170 // not instrumentation is expected, and 16 otherwise.
171 //
172 // ** Magic Number
173 //
174 // A simple sanity check is present in all BumpChunk under the form of a
175 // constant field which is never mutated. the BumpChunk::magic_ is initalized to
176 // the "Lif" string. Any mutation of this value indicate a memory corruption.
177 //
178 // This magic number is enabled in all MOZ_DIAGNOSTIC_ASSERT_ENABLED builds,
179 // which implies that all Nightly and dev-edition versions of
180 // Firefox/SpiderMonkey contain this instrumentation.
181 //
182 // ** Memory protection
183 //
184 // LifoAlloc chunks are holding a lot of memory. When the memory is known to be
185 // unused, unchanged for some period of time, such as moving from one thread to
186 // another. Then the memory can be set as read-only with LifoAlloc::setReadOnly
187 // and reset as read-write with LifoAlloc::setReadWrite.
188 //
189 // This code is guarded by LIFO_CHUNK_PROTECT and at the moment only enabled in
190 // DEBUG builds in order to avoid the fragmentation of the TLB which might run
191 // out-of-memory when calling mprotect.
192 //
193 
194 #include "js/UniquePtr.h"
195 #include "util/Memory.h"
196 #include "util/Poison.h"
197 
198 namespace js {
199 
200 namespace detail {
201 
202 template <typename T, typename D>
203 class SingleLinkedList;
204 
205 template <typename T, typename D = JS::DeletePolicy<T>>
206 class SingleLinkedListElement {
207   friend class SingleLinkedList<T, D>;
208   js::UniquePtr<T, D> next_;
209 
210  public:
SingleLinkedListElement()211   SingleLinkedListElement() : next_(nullptr) {}
~SingleLinkedListElement()212   ~SingleLinkedListElement() { MOZ_ASSERT(!next_); }
213 
next()214   T* next() const { return next_.get(); }
215 };
216 
217 // Single linked list which is using UniquePtr to hold the next pointers.
218 // UniquePtr are used to ensure that none of the elements are used
219 // silmutaneously in 2 different list.
220 template <typename T, typename D = JS::DeletePolicy<T>>
221 class SingleLinkedList {
222  private:
223   // First element of the list which owns the next element, and ensure that
224   // that this list is the only owner of the element.
225   UniquePtr<T, D> head_;
226 
227   // Weak pointer to the last element of the list.
228   T* last_;
229 
assertInvariants()230   void assertInvariants() {
231     MOZ_ASSERT(bool(head_) == bool(last_));
232     MOZ_ASSERT_IF(last_, !last_->next_);
233   }
234 
235  public:
SingleLinkedList()236   SingleLinkedList() : head_(nullptr), last_(nullptr) { assertInvariants(); }
237 
SingleLinkedList(SingleLinkedList && other)238   SingleLinkedList(SingleLinkedList&& other)
239       : head_(std::move(other.head_)), last_(other.last_) {
240     other.last_ = nullptr;
241     assertInvariants();
242     other.assertInvariants();
243   }
244 
~SingleLinkedList()245   ~SingleLinkedList() {
246     MOZ_ASSERT(!head_);
247     MOZ_ASSERT(!last_);
248   }
249 
250   // Move the elements of the |other| list in the current one, and implicitly
251   // remove all the elements of the current list.
252   SingleLinkedList& operator=(SingleLinkedList&& other) {
253     head_ = std::move(other.head_);
254     last_ = other.last_;
255     other.last_ = nullptr;
256     assertInvariants();
257     other.assertInvariants();
258     return *this;
259   }
260 
empty()261   bool empty() const { return !last_; }
262 
263   // Iterates over the elements of the list, this is used to avoid raw
264   // manipulation of pointers as much as possible.
265   class Iterator {
266     T* current_;
267 
268    public:
Iterator(T * current)269     explicit Iterator(T* current) : current_(current) {}
270 
271     T& operator*() const { return *current_; }
272     T* operator->() const { return current_; }
get()273     T* get() const { return current_; }
274 
275     const Iterator& operator++() {
276       current_ = current_->next();
277       return *this;
278     }
279 
280     bool operator!=(const Iterator& other) const {
281       return current_ != other.current_;
282     }
283     bool operator==(const Iterator& other) const {
284       return current_ == other.current_;
285     }
286   };
287 
begin()288   Iterator begin() const { return Iterator(head_.get()); }
end()289   Iterator end() const { return Iterator(nullptr); }
last()290   Iterator last() const { return Iterator(last_); }
291 
292   // Split the list in 2 single linked lists after the element given as
293   // argument.  The front of the list remains in the current list, while the
294   // back goes in the newly create linked list.
295   //
296   // This is used for example to extract one element from a list. (see
297   // LifoAlloc::getOrCreateChunk)
splitAfter(T * newLast)298   SingleLinkedList splitAfter(T* newLast) {
299     MOZ_ASSERT(newLast);
300     SingleLinkedList result;
301     if (newLast->next_) {
302       result.head_ = std::move(newLast->next_);
303       result.last_ = last_;
304       last_ = newLast;
305     }
306     assertInvariants();
307     result.assertInvariants();
308     return result;
309   }
310 
pushFront(UniquePtr<T,D> && elem)311   void pushFront(UniquePtr<T, D>&& elem) {
312     if (!last_) {
313       last_ = elem.get();
314     }
315     elem->next_ = std::move(head_);
316     head_ = std::move(elem);
317     assertInvariants();
318   }
319 
append(UniquePtr<T,D> && elem)320   void append(UniquePtr<T, D>&& elem) {
321     if (last_) {
322       last_->next_ = std::move(elem);
323       last_ = last_->next_.get();
324     } else {
325       head_ = std::move(elem);
326       last_ = head_.get();
327     }
328     assertInvariants();
329   }
appendAll(SingleLinkedList && list)330   void appendAll(SingleLinkedList&& list) {
331     if (list.empty()) {
332       return;
333     }
334     if (last_) {
335       last_->next_ = std::move(list.head_);
336     } else {
337       head_ = std::move(list.head_);
338     }
339     last_ = list.last_;
340     list.last_ = nullptr;
341     assertInvariants();
342     list.assertInvariants();
343   }
steal(SingleLinkedList && list)344   void steal(SingleLinkedList&& list) {
345     head_ = std::move(list.head_);
346     last_ = list.last_;
347     list.last_ = nullptr;
348     assertInvariants();
349     list.assertInvariants();
350   }
prependAll(SingleLinkedList && list)351   void prependAll(SingleLinkedList&& list) {
352     list.appendAll(std::move(*this));
353     steal(std::move(list));
354   }
popFirst()355   UniquePtr<T, D> popFirst() {
356     MOZ_ASSERT(head_);
357     UniquePtr<T, D> result = std::move(head_);
358     head_ = std::move(result->next_);
359     if (!head_) {
360       last_ = nullptr;
361     }
362     assertInvariants();
363     return result;
364   }
365 };
366 
367 static const size_t LIFO_ALLOC_ALIGN = 8;
368 
369 MOZ_ALWAYS_INLINE
AlignPtr(uint8_t * orig)370 uint8_t* AlignPtr(uint8_t* orig) {
371   static_assert(mozilla::IsPowerOfTwo(LIFO_ALLOC_ALIGN),
372                 "LIFO_ALLOC_ALIGN must be a power of two");
373 
374   uint8_t* result = (uint8_t*)AlignBytes(uintptr_t(orig), LIFO_ALLOC_ALIGN);
375   MOZ_ASSERT(uintptr_t(result) % LIFO_ALLOC_ALIGN == 0);
376   return result;
377 }
378 
379 // A Chunk represent a single memory allocation made with the system
380 // allocator. As the owner of the memory, it is responsible for the allocation
381 // and the deallocation.
382 //
383 // This structure is only move-able, but not copyable.
384 class BumpChunk : public SingleLinkedListElement<BumpChunk> {
385  private:
386   // Pointer to the last byte allocated in this chunk.
387   uint8_t* bump_;
388   // Pointer to the first byte after this chunk.
389   uint8_t* const capacity_;
390 
391 #ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
392   // Magic number used to check against poisoned values.
393   const uintptr_t magic_ : 24;
394   static constexpr uintptr_t magicNumber = uintptr_t(0x4c6966);
395 #endif
396 
397 #if defined(DEBUG)
398 #  define LIFO_CHUNK_PROTECT 1
399 #endif
400 
401   // Poison the memory with memset, in order to catch errors due to
402   // use-after-free, with JS_LIFO_UNDEFINED_PATTERN pattern, or to catch
403   // use-before-init with JS_LIFO_UNINITIALIZED_PATTERN.
404 #if defined(DEBUG)
405 #  define LIFO_HAVE_MEM_CHECKS 1
406 #  define LIFO_MAKE_MEM_NOACCESS(addr, size)       \
407     do {                                           \
408       uint8_t* base = (addr);                      \
409       size_t sz = (size);                          \
410       MOZ_MAKE_MEM_UNDEFINED(base, sz);            \
411       memset(base, JS_LIFO_UNDEFINED_PATTERN, sz); \
412       MOZ_MAKE_MEM_NOACCESS(base, sz);             \
413     } while (0)
414 
415 #  define LIFO_MAKE_MEM_UNDEFINED(addr, size)          \
416     do {                                               \
417       uint8_t* base = (addr);                          \
418       size_t sz = (size);                              \
419       MOZ_MAKE_MEM_UNDEFINED(base, sz);                \
420       memset(base, JS_LIFO_UNINITIALIZED_PATTERN, sz); \
421       MOZ_MAKE_MEM_UNDEFINED(base, sz);                \
422     } while (0)
423 
424 #elif defined(MOZ_HAVE_MEM_CHECKS)
425 #  define LIFO_HAVE_MEM_CHECKS 1
426 #  define LIFO_MAKE_MEM_NOACCESS(addr, size) \
427     MOZ_MAKE_MEM_NOACCESS((addr), (size))
428 #  define LIFO_MAKE_MEM_UNDEFINED(addr, size) \
429     MOZ_MAKE_MEM_UNDEFINED((addr), (size))
430 #endif
431 
432 #ifdef LIFO_HAVE_MEM_CHECKS
433   // Red zone reserved after each allocation.
434   static constexpr size_t RedZoneSize = 16;
435 #else
436   static constexpr size_t RedZoneSize = 0;
437 #endif
438 
assertInvariants()439   void assertInvariants() {
440     MOZ_DIAGNOSTIC_ASSERT(magic_ == magicNumber);
441     MOZ_ASSERT(begin() <= end());
442     MOZ_ASSERT(end() <= capacity_);
443   }
444 
445   BumpChunk& operator=(const BumpChunk&) = delete;
446   BumpChunk(const BumpChunk&) = delete;
447 
BumpChunk(uintptr_t capacity)448   explicit BumpChunk(uintptr_t capacity)
449       : bump_(begin()),
450         capacity_(base() + capacity)
451 #ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
452         ,
453         magic_(magicNumber)
454 #endif
455   {
456     assertInvariants();
457 #if defined(LIFO_HAVE_MEM_CHECKS)
458     // The memory is freshly allocated and marked as undefined by the
459     // allocator of the BumpChunk. Instead, we mark this memory as
460     // no-access, as it has not been allocated within the BumpChunk.
461     LIFO_MAKE_MEM_NOACCESS(bump_, capacity_ - bump_);
462 #endif
463   }
464 
465   // Cast |this| into a uint8_t* pointer.
466   //
467   // Warning: Are you sure you do not want to use begin() instead?
base()468   const uint8_t* base() const { return reinterpret_cast<const uint8_t*>(this); }
base()469   uint8_t* base() { return reinterpret_cast<uint8_t*>(this); }
470 
471   // Update the bump pointer to any value contained in this chunk, which is
472   // above the private fields of this chunk.
473   //
474   // The memory is poisoned and flagged as no-access when the bump pointer is
475   // moving backward, such as when memory is released, or when a Mark is used
476   // to unwind previous allocations.
477   //
478   // The memory is flagged as undefined when the bump pointer is moving
479   // forward.
setBump(uint8_t * newBump)480   void setBump(uint8_t* newBump) {
481     assertInvariants();
482     MOZ_ASSERT(begin() <= newBump);
483     MOZ_ASSERT(newBump <= capacity_);
484 #if defined(LIFO_HAVE_MEM_CHECKS)
485     // Poison/Unpoison memory that we just free'd/allocated.
486     if (bump_ > newBump) {
487       LIFO_MAKE_MEM_NOACCESS(newBump, bump_ - newBump);
488     } else if (newBump > bump_) {
489       MOZ_ASSERT(newBump - RedZoneSize >= bump_);
490       LIFO_MAKE_MEM_UNDEFINED(bump_, newBump - RedZoneSize - bump_);
491       // The area [newBump - RedZoneSize .. newBump[ is already flagged as
492       // no-access either with the previous if-branch or with the
493       // BumpChunk constructor. No need to mark it twice.
494     }
495 #endif
496     bump_ = newBump;
497   }
498 
499  public:
~BumpChunk()500   ~BumpChunk() { release(); }
501 
502   // Returns true if this chunk contains no allocated content.
empty()503   bool empty() const { return end() == begin(); }
504 
505   // Returns the size in bytes of the number of allocated space. This includes
506   // the size consumed by the alignment of the allocations.
used()507   size_t used() const { return end() - begin(); }
508 
509   // These are used for manipulating a chunk as if it was a vector of bytes,
510   // and used for iterating over the content of the buffer (see
511   // LifoAlloc::Enum)
512   inline const uint8_t* begin() const;
513   inline uint8_t* begin();
end()514   uint8_t* end() const { return bump_; }
515 
516   // This function is the only way to allocate and construct a chunk. It
517   // returns a UniquePtr to the newly allocated chunk.  The size given as
518   // argument includes the space needed for the header of the chunk.
519   static UniquePtr<BumpChunk> newWithCapacity(size_t size);
520 
521   // Report allocation.
sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf)522   size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
523     return mallocSizeOf(this);
524   }
525 
526   // Report allocation size.
computedSizeOfIncludingThis()527   size_t computedSizeOfIncludingThis() const { return capacity_ - base(); }
528 
529   // Opaque type used to carry a pointer to the current location of the bump_
530   // pointer, within a BumpChunk.
531   class Mark {
532     // Chunk which owns the pointer.
533     BumpChunk* chunk_;
534     // Recorded of the bump_ pointer of the BumpChunk.
535     uint8_t* bump_;
536 
537     friend class BumpChunk;
Mark(BumpChunk * chunk,uint8_t * bump)538     Mark(BumpChunk* chunk, uint8_t* bump) : chunk_(chunk), bump_(bump) {}
539 
540    public:
Mark()541     Mark() : chunk_(nullptr), bump_(nullptr) {}
542 
markedChunk()543     BumpChunk* markedChunk() const { return chunk_; }
544   };
545 
546   // Return a mark to be able to unwind future allocations with the release
547   // function. (see LifoAllocScope)
mark()548   Mark mark() { return Mark(this, end()); }
549 
550   // Check if a pointer is part of the allocated data of this chunk.
contains(const void * ptr)551   bool contains(const void* ptr) const {
552     // Note: We cannot check "ptr < end()" because the mark have a 0-size
553     // length.
554     return begin() <= ptr && ptr <= end();
555   }
556 
557   // Check if a mark is part of the allocated data of this chunk.
contains(Mark m)558   bool contains(Mark m) const {
559     MOZ_ASSERT(m.chunk_ == this);
560     return contains(m.bump_);
561   }
562 
563   // Release the memory allocated in this chunk. This function does not call
564   // any of the destructors.
release()565   void release() { setBump(begin()); }
566 
567   // Release the memory allocated in this chunk since the corresponding mark
568   // got created. This function does not call any of the destructors.
release(Mark m)569   void release(Mark m) {
570     MOZ_RELEASE_ASSERT(contains(m));
571     setBump(m.bump_);
572   }
573 
574   // Given an amount, compute the total size of a chunk for it: reserved
575   // space before |begin()|, space for |amount| bytes, and red-zone space
576   // after those bytes that will ultimately end at |capacity_|.
577   [[nodiscard]] static inline bool allocSizeWithRedZone(size_t amount,
578                                                         size_t* size);
579 
580   // Given a bump chunk pointer, find the next base/end pointers. This is
581   // useful for having consistent allocations, and iterating over known size
582   // allocations.
nextAllocBase(uint8_t * e)583   static uint8_t* nextAllocBase(uint8_t* e) { return detail::AlignPtr(e); }
nextAllocEnd(uint8_t * b,size_t n)584   static uint8_t* nextAllocEnd(uint8_t* b, size_t n) {
585     return b + n + RedZoneSize;
586   }
587 
588   // Returns true, if the unused space is large enough for an allocation of
589   // |n| bytes.
canAlloc(size_t n)590   bool canAlloc(size_t n) const {
591     uint8_t* newBump = nextAllocEnd(nextAllocBase(end()), n);
592     // bump_ <= newBump, is necessary to catch overflow.
593     return bump_ <= newBump && newBump <= capacity_;
594   }
595 
596   // Space remaining in the current chunk.
unused()597   size_t unused() const {
598     uint8_t* aligned = nextAllocBase(end());
599     if (aligned < capacity_) {
600       return capacity_ - aligned;
601     }
602     return 0;
603   }
604 
605   // Try to perform an allocation of size |n|, returns nullptr if not possible.
606   MOZ_ALWAYS_INLINE
tryAlloc(size_t n)607   void* tryAlloc(size_t n) {
608     uint8_t* aligned = nextAllocBase(end());
609     uint8_t* newBump = nextAllocEnd(aligned, n);
610 
611     if (newBump > capacity_) {
612       return nullptr;
613     }
614 
615     // Check for overflow.
616     if (MOZ_UNLIKELY(newBump < bump_)) {
617       return nullptr;
618     }
619 
620     MOZ_ASSERT(canAlloc(n));  // Ensure consistency between "can" and "try".
621     setBump(newBump);
622     return aligned;
623   }
624 
625 #ifdef LIFO_CHUNK_PROTECT
626   void setReadOnly();
627   void setReadWrite();
628 #else
setReadOnly()629   void setReadOnly() const {}
setReadWrite()630   void setReadWrite() const {}
631 #endif
632 };
633 
634 // Space reserved for the BumpChunk internal data, and the alignment of the
635 // first allocation content. This can be used to ensure there is enough space
636 // for the next allocation (see LifoAlloc::newChunkWithCapacity).
637 static constexpr size_t BumpChunkReservedSpace =
638     AlignBytes(sizeof(BumpChunk), LIFO_ALLOC_ALIGN);
639 
allocSizeWithRedZone(size_t amount,size_t * size)640 [[nodiscard]] /* static */ inline bool BumpChunk::allocSizeWithRedZone(
641     size_t amount, size_t* size) {
642   constexpr size_t SpaceBefore = BumpChunkReservedSpace;
643   static_assert((SpaceBefore % LIFO_ALLOC_ALIGN) == 0,
644                 "reserved space presumed already aligned");
645 
646   constexpr size_t SpaceAfter = RedZoneSize;  // may be zero
647 
648   constexpr size_t SpaceBeforeAndAfter = SpaceBefore + SpaceAfter;
649   static_assert(SpaceBeforeAndAfter >= SpaceBefore,
650                 "intermediate addition must not overflow");
651 
652   *size = SpaceBeforeAndAfter + amount;
653   return MOZ_LIKELY(*size >= SpaceBeforeAndAfter);
654 }
655 
begin()656 inline const uint8_t* BumpChunk::begin() const {
657   return base() + BumpChunkReservedSpace;
658 }
659 
begin()660 inline uint8_t* BumpChunk::begin() { return base() + BumpChunkReservedSpace; }
661 
662 }  // namespace detail
663 
664 // LIFO bump allocator: used for phase-oriented and fast LIFO allocations.
665 //
666 // Note: We leave BumpChunks latent in the set of unused chunks after they've
667 // been released to avoid thrashing before a GC.
668 class LifoAlloc {
669   using UniqueBumpChunk = js::UniquePtr<detail::BumpChunk>;
670   using BumpChunkList = detail::SingleLinkedList<detail::BumpChunk>;
671 
672   // List of chunks containing allocated data of size smaller than the default
673   // chunk size. In the common case, the last chunk of this list is always
674   // used to perform the allocations. When the allocation cannot be performed,
675   // we move a Chunk from the unused set to the list of used chunks.
676   BumpChunkList chunks_;
677 
678   // List of chunks containing allocated data where each allocation is larger
679   // than the oversize threshold. Each chunk contains exactly one allocation.
680   // This reduces wasted space in the chunk list.
681   //
682   // Oversize chunks are allocated on demand and freed as soon as they are
683   // released, instead of being pushed to the unused list.
684   BumpChunkList oversize_;
685 
686   // Set of unused chunks, which can be reused for future allocations.
687   BumpChunkList unused_;
688 
689   size_t markCount;
690   size_t defaultChunkSize_;
691   size_t oversizeThreshold_;
692 
693   // Size of all chunks in chunks_, oversize_, unused_ lists.
694   size_t curSize_;
695   size_t peakSize_;
696 
697   // Size of all chunks containing small bump allocations. This heuristic is
698   // used to compute growth rate while ignoring chunks such as oversized,
699   // now-unused, or transferred (which followed their own growth patterns).
700   size_t smallAllocsSize_;
701 
702 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
703   bool fallibleScope_;
704 #endif
705 
706   void operator=(const LifoAlloc&) = delete;
707   LifoAlloc(const LifoAlloc&) = delete;
708 
709   // Return a BumpChunk that can perform an allocation of at least size |n|.
710   UniqueBumpChunk newChunkWithCapacity(size_t n, bool oversize);
711 
712   // Reuse or allocate a BumpChunk that can perform an allocation of at least
713   // size |n|, if successful it is placed at the end the list of |chunks_|.
714   UniqueBumpChunk getOrCreateChunk(size_t n);
715 
716   void reset(size_t defaultChunkSize);
717 
718   // Append unused chunks to the end of this LifoAlloc.
appendUnused(BumpChunkList && otherUnused)719   void appendUnused(BumpChunkList&& otherUnused) {
720 #ifdef DEBUG
721     for (detail::BumpChunk& bc : otherUnused) {
722       MOZ_ASSERT(bc.empty());
723     }
724 #endif
725     unused_.appendAll(std::move(otherUnused));
726   }
727 
728   // Append used chunks to the end of this LifoAlloc. We act as if all the
729   // chunks in |this| are used, even if they're not, so memory may be wasted.
appendUsed(BumpChunkList && otherChunks)730   void appendUsed(BumpChunkList&& otherChunks) {
731     chunks_.appendAll(std::move(otherChunks));
732   }
733 
734   // Track the amount of space allocated in used and unused chunks.
incrementCurSize(size_t size)735   void incrementCurSize(size_t size) {
736     curSize_ += size;
737     if (curSize_ > peakSize_) {
738       peakSize_ = curSize_;
739     }
740   }
decrementCurSize(size_t size)741   void decrementCurSize(size_t size) {
742     MOZ_ASSERT(curSize_ >= size);
743     curSize_ -= size;
744     MOZ_ASSERT(curSize_ >= smallAllocsSize_);
745   }
746 
747   void* allocImplColdPath(size_t n);
748   void* allocImplOversize(size_t n);
749 
750   MOZ_ALWAYS_INLINE
allocImpl(size_t n)751   void* allocImpl(size_t n) {
752     void* result;
753     // Give oversized allocations their own chunk instead of wasting space
754     // due to fragmentation at the end of normal chunk.
755     if (MOZ_UNLIKELY(n > oversizeThreshold_)) {
756       return allocImplOversize(n);
757     }
758     if (MOZ_LIKELY(!chunks_.empty() &&
759                    (result = chunks_.last()->tryAlloc(n)))) {
760       return result;
761     }
762     return allocImplColdPath(n);
763   }
764 
765   // Check for space in unused chunks or allocate a new unused chunk.
766   [[nodiscard]] bool ensureUnusedApproximateColdPath(size_t n, size_t total);
767 
768  public:
LifoAlloc(size_t defaultChunkSize)769   explicit LifoAlloc(size_t defaultChunkSize)
770       : peakSize_(0)
771 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
772         ,
773         fallibleScope_(true)
774 #endif
775   {
776     reset(defaultChunkSize);
777   }
778 
779   // Set the threshold to allocate data in its own chunk outside the space for
780   // small allocations.
disableOversize()781   void disableOversize() { oversizeThreshold_ = SIZE_MAX; }
setOversizeThreshold(size_t oversizeThreshold)782   void setOversizeThreshold(size_t oversizeThreshold) {
783     MOZ_ASSERT(oversizeThreshold <= defaultChunkSize_);
784     oversizeThreshold_ = oversizeThreshold;
785   }
786 
787   // Steal allocated chunks from |other|.
788   void steal(LifoAlloc* other);
789 
790   // Append all chunks from |other|. They are removed from |other|.
791   void transferFrom(LifoAlloc* other);
792 
793   // Append unused chunks from |other|. They are removed from |other|.
794   void transferUnusedFrom(LifoAlloc* other);
795 
~LifoAlloc()796   ~LifoAlloc() { freeAll(); }
797 
defaultChunkSize()798   size_t defaultChunkSize() const { return defaultChunkSize_; }
799 
800   // Frees all held memory.
801   void freeAll();
802 
803   static const unsigned HUGE_ALLOCATION = 50 * 1024 * 1024;
freeAllIfHugeAndUnused()804   void freeAllIfHugeAndUnused() {
805     if (markCount == 0 && curSize_ > HUGE_ALLOCATION) {
806       freeAll();
807     }
808   }
809 
810   MOZ_ALWAYS_INLINE
alloc(size_t n)811   void* alloc(size_t n) {
812 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
813     // Only simulate OOMs when we are not using the LifoAlloc as an
814     // infallible allocator.
815     if (fallibleScope_) {
816       JS_OOM_POSSIBLY_FAIL();
817     }
818 #endif
819     return allocImpl(n);
820   }
821 
822   // Allocates |n| bytes if we can guarantee that we will have
823   // |needed| unused bytes remaining. Returns nullptr if we can't.
824   // This is useful for maintaining our ballast invariants while
825   // attempting fallible allocations.
826   MOZ_ALWAYS_INLINE
allocEnsureUnused(size_t n,size_t needed)827   void* allocEnsureUnused(size_t n, size_t needed) {
828     JS_OOM_POSSIBLY_FAIL();
829     MOZ_ASSERT(fallibleScope_);
830 
831     Mark m = mark();
832     void* result = allocImpl(n);
833     if (!ensureUnusedApproximate(needed)) {
834       release(m);
835       return nullptr;
836     }
837     cancelMark(m);
838     return result;
839   }
840 
841   template <typename T, typename... Args>
newWithSize(size_t n,Args &&...args)842   MOZ_ALWAYS_INLINE T* newWithSize(size_t n, Args&&... args) {
843     MOZ_ASSERT(n >= sizeof(T), "must request enough space to store a T");
844     static_assert(alignof(T) <= detail::LIFO_ALLOC_ALIGN,
845                   "LifoAlloc must provide enough alignment to store T");
846     void* ptr = alloc(n);
847     if (!ptr) {
848       return nullptr;
849     }
850 
851     return new (ptr) T(std::forward<Args>(args)...);
852   }
853 
854   MOZ_ALWAYS_INLINE
allocInfallible(size_t n)855   void* allocInfallible(size_t n) {
856     AutoEnterOOMUnsafeRegion oomUnsafe;
857     if (void* result = allocImpl(n)) {
858       return result;
859     }
860     oomUnsafe.crash("LifoAlloc::allocInfallible");
861     return nullptr;
862   }
863 
864   // Ensures that enough space exists to satisfy N bytes worth of
865   // allocation requests, not necessarily contiguous. Note that this does
866   // not guarantee a successful single allocation of N bytes.
ensureUnusedApproximate(size_t n)867   [[nodiscard]] MOZ_ALWAYS_INLINE bool ensureUnusedApproximate(size_t n) {
868     AutoFallibleScope fallibleAllocator(this);
869     size_t total = 0;
870     if (!chunks_.empty()) {
871       total += chunks_.last()->unused();
872       if (total >= n) {
873         return true;
874       }
875     }
876 
877     return ensureUnusedApproximateColdPath(n, total);
878   }
879 
880   MOZ_ALWAYS_INLINE
setAsInfallibleByDefault()881   void setAsInfallibleByDefault() {
882 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
883     fallibleScope_ = false;
884 #endif
885   }
886 
887   class MOZ_NON_TEMPORARY_CLASS AutoFallibleScope {
888 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
889     LifoAlloc* lifoAlloc_;
890     bool prevFallibleScope_;
891 
892    public:
AutoFallibleScope(LifoAlloc * lifoAlloc)893     explicit AutoFallibleScope(LifoAlloc* lifoAlloc) {
894       lifoAlloc_ = lifoAlloc;
895       prevFallibleScope_ = lifoAlloc->fallibleScope_;
896       lifoAlloc->fallibleScope_ = true;
897     }
898 
~AutoFallibleScope()899     ~AutoFallibleScope() { lifoAlloc_->fallibleScope_ = prevFallibleScope_; }
900 #else
901    public:
902     explicit AutoFallibleScope(LifoAlloc*) {}
903 #endif
904   };
905 
906   template <typename T>
newArray(size_t count)907   T* newArray(size_t count) {
908     static_assert(std::is_trivial_v<T>,
909                   "T must be trivially constructible so that constructors need "
910                   "not be called");
911     static_assert(std::is_trivially_destructible_v<T>,
912                   "T must be trivially destructible so destructors don't need "
913                   "to be called when the LifoAlloc is freed");
914     return newArrayUninitialized<T>(count);
915   }
916 
917   // Create an array with uninitialized elements of type |T|.
918   // The caller is responsible for initialization.
919   template <typename T>
newArrayUninitialized(size_t count)920   T* newArrayUninitialized(size_t count) {
921     size_t bytes;
922     if (MOZ_UNLIKELY(!CalculateAllocSize<T>(count, &bytes))) {
923       return nullptr;
924     }
925     return static_cast<T*>(alloc(bytes));
926   }
927 
928   class Mark {
929     friend class LifoAlloc;
930     detail::BumpChunk::Mark chunk;
931     detail::BumpChunk::Mark oversize;
932   };
933 
934   // Note: MOZ_NEVER_INLINE is a work around for a Clang 9 (PGO) miscompilation.
935   // See bug 1583907.
936   MOZ_NEVER_INLINE Mark mark();
937 
938   void release(Mark mark);
939 
940  private:
cancelMark(Mark mark)941   void cancelMark(Mark mark) { markCount--; }
942 
943  public:
releaseAll()944   void releaseAll() {
945     MOZ_ASSERT(!markCount);
946 
947     // When releasing all chunks, we can no longer determine which chunks were
948     // transferred and which were not, so simply clear the heuristic to zero
949     // right away.
950     smallAllocsSize_ = 0;
951 
952     for (detail::BumpChunk& bc : chunks_) {
953       bc.release();
954     }
955     unused_.appendAll(std::move(chunks_));
956 
957     // On release, we free any oversize allocations instead of keeping them
958     // in unused chunks.
959     while (!oversize_.empty()) {
960       UniqueBumpChunk bc = oversize_.popFirst();
961       decrementCurSize(bc->computedSizeOfIncludingThis());
962     }
963   }
964 
965   // Protect the content of the LifoAlloc chunks.
966 #ifdef LIFO_CHUNK_PROTECT
967   void setReadOnly();
968   void setReadWrite();
969 #else
setReadOnly()970   void setReadOnly() const {}
setReadWrite()971   void setReadWrite() const {}
972 #endif
973 
974   // Get the total "used" (occupied bytes) count for the arena chunks.
used()975   size_t used() const {
976     size_t accum = 0;
977     for (const detail::BumpChunk& chunk : chunks_) {
978       accum += chunk.used();
979     }
980     return accum;
981   }
982 
983   // Return true if the LifoAlloc does not currently contain any allocations.
isEmpty()984   bool isEmpty() const {
985     bool empty = chunks_.empty() ||
986                  (chunks_.begin() == chunks_.last() && chunks_.last()->empty());
987     MOZ_ASSERT_IF(!oversize_.empty(), !oversize_.last()->empty());
988     return empty && oversize_.empty();
989   }
990 
991   // Return the number of bytes remaining to allocate in the current chunk.
992   // e.g. How many bytes we can allocate before needing a new block.
availableInCurrentChunk()993   size_t availableInCurrentChunk() const {
994     if (chunks_.empty()) {
995       return 0;
996     }
997     return chunks_.last()->unused();
998   }
999 
1000   // Get the total size of the arena chunks (including unused space).
sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf)1001   size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
1002     size_t n = 0;
1003     for (const detail::BumpChunk& chunk : chunks_) {
1004       n += chunk.sizeOfIncludingThis(mallocSizeOf);
1005     }
1006     for (const detail::BumpChunk& chunk : oversize_) {
1007       n += chunk.sizeOfIncludingThis(mallocSizeOf);
1008     }
1009     for (const detail::BumpChunk& chunk : unused_) {
1010       n += chunk.sizeOfIncludingThis(mallocSizeOf);
1011     }
1012     return n;
1013   }
1014 
1015   // Like sizeOfExcludingThis(), but includes the size of the LifoAlloc itself.
sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf)1016   size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
1017     return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
1018   }
1019 
1020   // Get the current size of the arena chunks (including unused space and
1021   // bookkeeping space).
computedSizeOfExcludingThis()1022   size_t computedSizeOfExcludingThis() const { return curSize_; }
1023 
1024   // Get the peak size of the arena chunks (including unused space and
1025   // bookkeeping space).
peakSizeOfExcludingThis()1026   size_t peakSizeOfExcludingThis() const { return peakSize_; }
1027 
1028   // Doesn't perform construction; useful for lazily-initialized POD types.
1029   template <typename T>
pod_malloc()1030   MOZ_ALWAYS_INLINE T* pod_malloc() {
1031     return static_cast<T*>(alloc(sizeof(T)));
1032   }
1033 
JS_DECLARE_NEW_METHODS(new_,alloc,MOZ_ALWAYS_INLINE)1034   JS_DECLARE_NEW_METHODS(new_, alloc, MOZ_ALWAYS_INLINE)
1035   JS_DECLARE_NEW_METHODS(newInfallible, allocInfallible, MOZ_ALWAYS_INLINE)
1036 
1037 #ifdef DEBUG
1038   bool contains(const void* ptr) const {
1039     for (const detail::BumpChunk& chunk : chunks_) {
1040       if (chunk.contains(ptr)) {
1041         return true;
1042       }
1043     }
1044     for (const detail::BumpChunk& chunk : oversize_) {
1045       if (chunk.contains(ptr)) {
1046         return true;
1047       }
1048     }
1049     return false;
1050   }
1051 #endif
1052 
1053   // Iterate over the data allocated in a LifoAlloc, and interpret it.
1054   class Enum {
1055     friend class LifoAlloc;
1056     friend class detail::BumpChunk;
1057 
1058     // Iterator over the list of bump chunks.
1059     BumpChunkList::Iterator chunkIt_;
1060     BumpChunkList::Iterator chunkEnd_;
1061     // Read head (must be within chunk_).
1062     uint8_t* head_;
1063 
1064     // If there is not enough room in the remaining block for |size|,
1065     // advance to the next block and update the position.
seekBaseAndAdvanceBy(size_t size)1066     uint8_t* seekBaseAndAdvanceBy(size_t size) {
1067       MOZ_ASSERT(!empty());
1068 
1069       uint8_t* aligned = detail::BumpChunk::nextAllocBase(head_);
1070       if (detail::BumpChunk::nextAllocEnd(aligned, size) > chunkIt_->end()) {
1071         ++chunkIt_;
1072         aligned = chunkIt_->begin();
1073         // The current code assumes that if we have a chunk, then we
1074         // have allocated something it in.
1075         MOZ_ASSERT(!chunkIt_->empty());
1076       }
1077       head_ = detail::BumpChunk::nextAllocEnd(aligned, size);
1078       MOZ_ASSERT(head_ <= chunkIt_->end());
1079       return aligned;
1080     }
1081 
1082    public:
Enum(LifoAlloc & alloc)1083     explicit Enum(LifoAlloc& alloc)
1084         : chunkIt_(alloc.chunks_.begin()),
1085           chunkEnd_(alloc.chunks_.end()),
1086           head_(nullptr) {
1087       MOZ_RELEASE_ASSERT(alloc.oversize_.empty());
1088       if (chunkIt_ != chunkEnd_) {
1089         head_ = chunkIt_->begin();
1090       }
1091     }
1092 
1093     // Return true if there are no more bytes to enumerate.
empty()1094     bool empty() {
1095       return chunkIt_ == chunkEnd_ ||
1096              (chunkIt_->next() == chunkEnd_.get() && head_ >= chunkIt_->end());
1097     }
1098 
1099     // Move the read position forward by the size of one T.
1100     template <typename T>
1101     T* read(size_t size = sizeof(T)) {
1102       return reinterpret_cast<T*>(read(size));
1103     }
1104 
1105     // Return a pointer to the item at the current position. This returns a
1106     // pointer to the inline storage, not a copy, and moves the read-head by
1107     // the requested |size|.
read(size_t size)1108     void* read(size_t size) { return seekBaseAndAdvanceBy(size); }
1109   };
1110 };
1111 
1112 class MOZ_NON_TEMPORARY_CLASS LifoAllocScope {
1113   LifoAlloc* lifoAlloc;
1114   LifoAlloc::Mark mark;
1115   LifoAlloc::AutoFallibleScope fallibleScope;
1116 
1117  public:
LifoAllocScope(LifoAlloc * lifoAlloc)1118   explicit LifoAllocScope(LifoAlloc* lifoAlloc)
1119       : lifoAlloc(lifoAlloc),
1120         mark(lifoAlloc->mark()),
1121         fallibleScope(lifoAlloc) {}
1122 
~LifoAllocScope()1123   ~LifoAllocScope() {
1124     lifoAlloc->release(mark);
1125 
1126     /*
1127      * The parser can allocate enormous amounts of memory for large functions.
1128      * Eagerly free the memory now (which otherwise won't be freed until the
1129      * next GC) to avoid unnecessary OOMs.
1130      */
1131     lifoAlloc->freeAllIfHugeAndUnused();
1132   }
1133 
alloc()1134   LifoAlloc& alloc() { return *lifoAlloc; }
1135 };
1136 
1137 enum Fallibility { Fallible, Infallible };
1138 
1139 template <Fallibility fb>
1140 class LifoAllocPolicy {
1141   LifoAlloc& alloc_;
1142 
1143  public:
LifoAllocPolicy(LifoAlloc & alloc)1144   MOZ_IMPLICIT LifoAllocPolicy(LifoAlloc& alloc) : alloc_(alloc) {}
1145   template <typename T>
maybe_pod_malloc(size_t numElems)1146   T* maybe_pod_malloc(size_t numElems) {
1147     size_t bytes;
1148     if (MOZ_UNLIKELY(!CalculateAllocSize<T>(numElems, &bytes))) {
1149       return nullptr;
1150     }
1151     void* p =
1152         fb == Fallible ? alloc_.alloc(bytes) : alloc_.allocInfallible(bytes);
1153     return static_cast<T*>(p);
1154   }
1155   template <typename T>
maybe_pod_calloc(size_t numElems)1156   T* maybe_pod_calloc(size_t numElems) {
1157     T* p = maybe_pod_malloc<T>(numElems);
1158     if (MOZ_UNLIKELY(!p)) {
1159       return nullptr;
1160     }
1161     memset(p, 0, numElems * sizeof(T));
1162     return p;
1163   }
1164   template <typename T>
maybe_pod_realloc(T * p,size_t oldSize,size_t newSize)1165   T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) {
1166     T* n = maybe_pod_malloc<T>(newSize);
1167     if (MOZ_UNLIKELY(!n)) {
1168       return nullptr;
1169     }
1170     MOZ_ASSERT(!(oldSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value));
1171     memcpy(n, p, std::min(oldSize * sizeof(T), newSize * sizeof(T)));
1172     return n;
1173   }
1174   template <typename T>
pod_malloc(size_t numElems)1175   T* pod_malloc(size_t numElems) {
1176     return maybe_pod_malloc<T>(numElems);
1177   }
1178   template <typename T>
pod_calloc(size_t numElems)1179   T* pod_calloc(size_t numElems) {
1180     return maybe_pod_calloc<T>(numElems);
1181   }
1182   template <typename T>
pod_realloc(T * p,size_t oldSize,size_t newSize)1183   T* pod_realloc(T* p, size_t oldSize, size_t newSize) {
1184     return maybe_pod_realloc<T>(p, oldSize, newSize);
1185   }
1186   template <typename T>
free_(T * p,size_t numElems)1187   void free_(T* p, size_t numElems) {}
reportAllocOverflow()1188   void reportAllocOverflow() const {}
checkSimulatedOOM()1189   [[nodiscard]] bool checkSimulatedOOM() const {
1190     return fb == Infallible || !js::oom::ShouldFailWithOOM();
1191   }
1192 };
1193 
1194 }  // namespace js
1195 
1196 #endif /* ds_LifoAlloc_h */
1197