1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #ifndef ds_LifoAlloc_h
8 #define ds_LifoAlloc_h
9
10 #include "mozilla/Attributes.h"
11 #include "mozilla/MathAlgorithms.h"
12 #include "mozilla/MemoryChecking.h"
13 #include "mozilla/MemoryReporting.h"
14 #include "mozilla/Move.h"
15 #include "mozilla/PodOperations.h"
16 #include "mozilla/TemplateLib.h"
17 #include "mozilla/TypeTraits.h"
18
19 // This data structure supports stacky LIFO allocation (mark/release and
20 // LifoAllocScope). It does not maintain one contiguous segment; instead, it
21 // maintains a bunch of linked memory segments. In order to prevent malloc/free
22 // thrashing, unused segments are deallocated when garbage collection occurs.
23
24 #include "jsutil.h"
25
26 #include "js/UniquePtr.h"
27
28 namespace js {
29
30 namespace detail {
31
32 template <typename T>
33 class SingleLinkedList;
34
35 template <typename T>
36 class SingleLinkedListElement {
37 friend class SingleLinkedList<T>;
38 js::UniquePtr<T> next_;
39
40 public:
SingleLinkedListElement()41 SingleLinkedListElement() : next_(nullptr) {}
~SingleLinkedListElement()42 ~SingleLinkedListElement() { MOZ_ASSERT(!next_); }
43
next()44 T* next() const { return next_.get(); }
45 };
46
47 // Single linked list which is using UniquePtr to hold the next pointers.
48 // UniquePtr are used to ensure that none of the elements are used
49 // silmutaneously in 2 different list.
50 template <typename T>
51 class SingleLinkedList {
52 private:
53 // First element of the list which owns the next element, and ensure that
54 // that this list is the only owner of the element.
55 UniquePtr<T> head_;
56
57 // Weak pointer to the last element of the list.
58 T* last_;
59
assertInvariants()60 void assertInvariants() {
61 MOZ_ASSERT(bool(head_) == bool(last_));
62 MOZ_ASSERT_IF(last_, !last_->next_);
63 }
64
65 public:
SingleLinkedList()66 SingleLinkedList() : head_(nullptr), last_(nullptr) { assertInvariants(); }
67
SingleLinkedList(SingleLinkedList && other)68 SingleLinkedList(SingleLinkedList&& other)
69 : head_(mozilla::Move(other.head_)), last_(other.last_) {
70 other.last_ = nullptr;
71 assertInvariants();
72 other.assertInvariants();
73 }
74
~SingleLinkedList()75 ~SingleLinkedList() {
76 MOZ_ASSERT(!head_);
77 MOZ_ASSERT(!last_);
78 }
79
80 // Move the elements of the |other| list in the current one, and implicitly
81 // remove all the elements of the current list.
82 SingleLinkedList& operator=(SingleLinkedList&& other) {
83 head_ = mozilla::Move(other.head_);
84 last_ = other.last_;
85 other.last_ = nullptr;
86 assertInvariants();
87 other.assertInvariants();
88 return *this;
89 }
90
empty()91 bool empty() const { return !last_; }
92
93 // Iterates over the elements of the list, this is used to avoid raw
94 // manipulation of pointers as much as possible.
95 class Iterator {
96 T* current_;
97
98 public:
Iterator(T * current)99 explicit Iterator(T* current) : current_(current) {}
100
101 T& operator*() const { return *current_; }
102 T* operator->() const { return current_; }
get()103 T* get() const { return current_; }
104
105 const Iterator& operator++() {
106 current_ = current_->next();
107 return *this;
108 }
109
110 bool operator!=(Iterator& other) const {
111 return current_ != other.current_;
112 }
113 bool operator==(Iterator& other) const {
114 return current_ == other.current_;
115 }
116 };
117
begin()118 Iterator begin() const { return Iterator(head_.get()); }
end()119 Iterator end() const { return Iterator(nullptr); }
last()120 Iterator last() const { return Iterator(last_); }
121
122 // Split the list in 2 single linked lists after the element given as
123 // argument. The front of the list remains in the current list, while the
124 // back goes in the newly create linked list.
125 //
126 // This is used for example to extract one element from a list. (see
127 // LifoAlloc::getOrCreateChunk)
splitAfter(T * newLast)128 SingleLinkedList splitAfter(T* newLast) {
129 MOZ_ASSERT(newLast);
130 SingleLinkedList result;
131 if (newLast->next_) {
132 result.head_ = mozilla::Move(newLast->next_);
133 result.last_ = last_;
134 last_ = newLast;
135 }
136 assertInvariants();
137 result.assertInvariants();
138 return result;
139 }
140
pushFront(UniquePtr<T> && elem)141 void pushFront(UniquePtr<T>&& elem) {
142 if (!last_) last_ = elem.get();
143 elem->next_ = mozilla::Move(head_);
144 head_ = mozilla::Move(elem);
145 assertInvariants();
146 }
147
append(UniquePtr<T> && elem)148 void append(UniquePtr<T>&& elem) {
149 if (last_) {
150 last_->next_ = mozilla::Move(elem);
151 last_ = last_->next_.get();
152 } else {
153 head_ = mozilla::Move(elem);
154 last_ = head_.get();
155 }
156 assertInvariants();
157 }
appendAll(SingleLinkedList && list)158 void appendAll(SingleLinkedList&& list) {
159 if (list.empty()) return;
160 if (last_)
161 last_->next_ = mozilla::Move(list.head_);
162 else
163 head_ = mozilla::Move(list.head_);
164 last_ = list.last_;
165 list.last_ = nullptr;
166 assertInvariants();
167 list.assertInvariants();
168 }
popFirst()169 UniquePtr<T> popFirst() {
170 MOZ_ASSERT(head_);
171 UniquePtr<T> result = mozilla::Move(head_);
172 head_ = mozilla::Move(result->next_);
173 if (!head_) last_ = nullptr;
174 assertInvariants();
175 return result;
176 }
177 };
178
179 static const size_t LIFO_ALLOC_ALIGN = 8;
180
181 MOZ_ALWAYS_INLINE
AlignPtr(uint8_t * orig)182 uint8_t* AlignPtr(uint8_t* orig) {
183 static_assert(mozilla::tl::FloorLog2<LIFO_ALLOC_ALIGN>::value ==
184 mozilla::tl::CeilingLog2<LIFO_ALLOC_ALIGN>::value,
185 "LIFO_ALLOC_ALIGN must be a power of two");
186
187 uint8_t* result = (uint8_t*)AlignBytes(uintptr_t(orig), LIFO_ALLOC_ALIGN);
188 MOZ_ASSERT(uintptr_t(result) % LIFO_ALLOC_ALIGN == 0);
189 return result;
190 }
191
192 // A Chunk represent a single memory allocation made with the system
193 // allocator. As the owner of the memory, it is responsible for the allocation
194 // and the deallocation.
195 //
196 // This structure is only move-able, but not copyable.
197 class BumpChunk : public SingleLinkedListElement<BumpChunk> {
198 private:
199 // Pointer to the last byte allocated in this chunk.
200 uint8_t* bump_;
201 // Pointer to the last byte available in this chunk.
202 const uint8_t* capacity_;
203
204 #ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
205 // Magic number used to check against poisoned values.
206 const uintptr_t magic_;
207 static constexpr uintptr_t magicNumber = sizeof(uintptr_t) == 4
208 ? uintptr_t(0x4c69666f)
209 : uintptr_t(0x4c69666f42756d70);
210 #endif
211
212 // Poison the memory with memset, in order to catch errors due to
213 // use-after-free, with undefinedChunkMemory pattern, or to catch
214 // use-before-init with uninitializedChunkMemory.
215 #if defined(DEBUG)
216 #define LIFO_HAVE_MEM_CHECKS 1
217
218 // Byte used for poisoning unused memory after releasing memory.
219 static constexpr int undefinedChunkMemory = 0xcd;
220 // Byte used for poisoning uninitialized memory after reserving memory.
221 static constexpr int uninitializedChunkMemory = 0xce;
222
223 #define LIFO_MAKE_MEM_NOACCESS(addr, size) \
224 do { \
225 uint8_t* base = (addr); \
226 size_t sz = (size); \
227 memset(base, undefinedChunkMemory, sz); \
228 MOZ_MAKE_MEM_NOACCESS(base, sz); \
229 } while (0)
230
231 #define LIFO_MAKE_MEM_UNDEFINED(addr, size) \
232 do { \
233 uint8_t* base = (addr); \
234 size_t sz = (size); \
235 MOZ_MAKE_MEM_UNDEFINED(base, sz); \
236 memset(base, uninitializedChunkMemory, sz); \
237 MOZ_MAKE_MEM_UNDEFINED(base, sz); \
238 } while (0)
239
240 #elif defined(MOZ_HAVE_MEM_CHECKS)
241 #define LIFO_HAVE_MEM_CHECKS 1
242 #define LIFO_MAKE_MEM_NOACCESS(addr, size) MOZ_MAKE_MEM_NOACCESS((addr), (size))
243 #define LIFO_MAKE_MEM_UNDEFINED(addr, size) \
244 MOZ_MAKE_MEM_UNDEFINED((addr), (size))
245 #endif
246
assertInvariants()247 void assertInvariants() {
248 MOZ_DIAGNOSTIC_ASSERT(magic_ == magicNumber);
249 MOZ_ASSERT(begin() <= end());
250 MOZ_ASSERT(end() <= capacity_);
251 }
252
253 BumpChunk& operator=(const BumpChunk&) = delete;
254 BumpChunk(const BumpChunk&) = delete;
255
BumpChunk(uintptr_t capacity)256 explicit BumpChunk(uintptr_t capacity)
257 : bump_(begin()),
258 capacity_(base() + capacity)
259 #ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
260 ,
261 magic_(magicNumber)
262 #endif
263 {
264 // We cannot bake this value inside the BumpChunk class, because
265 // sizeof(BumpChunk) can only be computed after the closing brace of the
266 // BumpChunk class, or within one of its methods. As a work-around, the
267 // reservedSpace value is baked in, and we check that it indeed matches
268 // with the space taken by the data of the BumpChunk class, and the
269 // alignment of a pointer.
270 MOZ_ASSERT(BumpChunk::reservedSpace ==
271 AlignBytes(sizeof(BumpChunk), LIFO_ALLOC_ALIGN),
272 "Checked that the baked-in value correspond to computed value");
273
274 assertInvariants();
275 #if defined(LIFO_HAVE_MEM_CHECKS)
276 // The memory is freshly allocated and marked as undefined by the
277 // allocator of the BumpChunk. Instead, we mark this memory as
278 // no-access, as it has not been allocated within the BumpChunk.
279 LIFO_MAKE_MEM_NOACCESS(bump_, capacity_ - bump_);
280 #endif
281 }
282
283 // Cast |this| into a uint8_t* pointer.
284 //
285 // Warning: Are you sure you do not want to use begin() instead?
base()286 const uint8_t* base() const { return reinterpret_cast<const uint8_t*>(this); }
base()287 uint8_t* base() { return reinterpret_cast<uint8_t*>(this); }
288
289 // Update the bump pointer to any value contained in this chunk, which is
290 // above the private fields of this chunk.
291 //
292 // The memory is poisoned and flagged as no-access when the bump pointer is
293 // moving backward, such as when memory is released, or when a Mark is used
294 // to unwind previous allocations.
295 //
296 // The memory is flagged as undefined when the bump pointer is moving
297 // forward.
setBump(uint8_t * newBump)298 void setBump(uint8_t* newBump) {
299 assertInvariants();
300 MOZ_ASSERT(begin() <= newBump);
301 MOZ_ASSERT(newBump <= capacity_);
302 #if defined(LIFO_HAVE_MEM_CHECKS)
303 // Poison/Unpoison memory that we just free'd/allocated.
304 if (bump_ > newBump)
305 LIFO_MAKE_MEM_NOACCESS(newBump, bump_ - newBump);
306 else if (newBump > bump_)
307 LIFO_MAKE_MEM_UNDEFINED(bump_, newBump - bump_);
308 #endif
309 bump_ = newBump;
310 }
311
312 public:
~BumpChunk()313 ~BumpChunk() { release(); }
314
315 // Space reserved for the BumpChunk internal data, and the alignment of the
316 // first allocation content. This can be used to ensure there is enough
317 // space for the next allocation. (see LifoAlloc::newChunkWithCapacity)
318 static constexpr size_t reservedSpace = 4 * sizeof(uintptr_t);
319
320 // Returns true if this chunk contains no allocated content.
empty()321 bool empty() const { return end() == begin(); }
322
323 // Returns the size in bytes of the number of allocated space. This includes
324 // the size consumed by the alignment of the allocations.
used()325 size_t used() const { return end() - begin(); }
326
327 // These are used for manipulating a chunk as if it was a vector of bytes,
328 // and used for iterating over the content of the buffer (see
329 // LifoAlloc::Enum)
begin()330 const uint8_t* begin() const { return base() + reservedSpace; }
begin()331 uint8_t* begin() { return base() + reservedSpace; }
end()332 uint8_t* end() const { return bump_; }
333
334 // This function is the only way to allocate and construct a chunk. It
335 // returns a UniquePtr to the newly allocated chunk. The size given as
336 // argument includes the space needed for the header of the chunk.
337 static UniquePtr<BumpChunk> newWithCapacity(size_t size);
338
339 // Report allocation.
sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf)340 size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
341 return mallocSizeOf(this);
342 }
343
344 // Report allocation size.
computedSizeOfIncludingThis()345 size_t computedSizeOfIncludingThis() const { return capacity_ - base(); }
346
347 // Opaque type used to carry a pointer to the current location of the bump_
348 // pointer, within a BumpChunk.
349 class Mark {
350 // Chunk which owns the pointer.
351 BumpChunk* chunk_;
352 // Recorded of the bump_ pointer of the BumpChunk.
353 uint8_t* bump_;
354
355 friend class BumpChunk;
Mark(BumpChunk * chunk,uint8_t * bump)356 explicit Mark(BumpChunk* chunk, uint8_t* bump)
357 : chunk_(chunk), bump_(bump) {}
358
359 public:
Mark()360 Mark() : chunk_(nullptr), bump_(nullptr) {}
361
markedChunk()362 BumpChunk* markedChunk() const { return chunk_; }
363 };
364
365 // Return a mark to be able to unwind future allocations with the release
366 // function. (see LifoAllocScope)
mark()367 Mark mark() { return Mark(this, end()); }
368
369 // Check if a pointer is part of the allocated data of this chunk.
contains(void * ptr)370 bool contains(void* ptr) const {
371 // Note: We cannot check "ptr < end()" because the mark have a 0-size
372 // length.
373 return begin() <= ptr && ptr <= end();
374 }
375
376 // Check if a mark is part of the allocated data of this chunk.
contains(Mark m)377 bool contains(Mark m) const {
378 MOZ_ASSERT(m.chunk_ == this);
379 return contains(m.bump_);
380 }
381
382 // Release the memory allocated in this chunk. This function does not call
383 // any of the destructors.
release()384 void release() { setBump(begin()); }
385
386 // Release the memory allocated in this chunk since the corresponding mark
387 // got created. This function does not call any of the destructors.
release(Mark m)388 void release(Mark m) {
389 MOZ_RELEASE_ASSERT(contains(m));
390 setBump(m.bump_);
391 }
392
393 // Returns true, if the unused space is large enough for an allocation of
394 // |n| bytes.
395 bool canAlloc(size_t n);
396
397 // Space remaining in the current chunk.
unused()398 size_t unused() const {
399 uint8_t* aligned = AlignPtr(end());
400 if (aligned < capacity_) return capacity_ - aligned;
401 return 0;
402 }
403
404 // Try to perform an allocation of size |n|, returns nullptr if not possible.
405 MOZ_ALWAYS_INLINE
tryAlloc(size_t n)406 void* tryAlloc(size_t n) {
407 uint8_t* aligned = AlignPtr(end());
408 uint8_t* newBump = aligned + n;
409
410 if (newBump > capacity_) return nullptr;
411
412 // Check for overflow.
413 if (MOZ_UNLIKELY(newBump < bump_)) return nullptr;
414
415 MOZ_ASSERT(canAlloc(n)); // Ensure consistency between "can" and "try".
416 setBump(newBump);
417 return aligned;
418 }
419 };
420
421 } // namespace detail
422
423 // LIFO bump allocator: used for phase-oriented and fast LIFO allocations.
424 //
425 // Note: We leave BumpChunks latent in the set of unused chunks after they've
426 // been released to avoid thrashing before a GC.
427 class LifoAlloc {
428 using BumpChunk = js::UniquePtr<detail::BumpChunk>;
429 using BumpChunkList = detail::SingleLinkedList<detail::BumpChunk>;
430
431 // List of chunks containing allocated data. In the common case, the last
432 // chunk of this list is always used to perform the allocations. When the
433 // allocation cannot be performed, we move a Chunk from the unused set to
434 // the list of used chunks.
435 BumpChunkList chunks_;
436
437 // Set of unused chunks, which can be reused for future allocations.
438 BumpChunkList unused_;
439
440 size_t markCount;
441 size_t defaultChunkSize_;
442 size_t curSize_;
443 size_t peakSize_;
444 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
445 bool fallibleScope_;
446 #endif
447
448 void operator=(const LifoAlloc&) = delete;
449 LifoAlloc(const LifoAlloc&) = delete;
450
451 // Return a BumpChunk that can perform an allocation of at least size |n|.
452 BumpChunk newChunkWithCapacity(size_t n);
453
454 // Reuse or allocate a BumpChunk that can perform an allocation of at least
455 // size |n|, if successful it is placed at the end the list of |chunks_|.
456 MOZ_MUST_USE bool getOrCreateChunk(size_t n);
457
reset(size_t defaultChunkSize)458 void reset(size_t defaultChunkSize) {
459 MOZ_ASSERT(mozilla::RoundUpPow2(defaultChunkSize) == defaultChunkSize);
460 while (!chunks_.empty()) chunks_.popFirst();
461 while (!unused_.empty()) unused_.popFirst();
462 defaultChunkSize_ = defaultChunkSize;
463 markCount = 0;
464 curSize_ = 0;
465 }
466
467 // Append unused chunks to the end of this LifoAlloc.
appendUnused(BumpChunkList && otherUnused)468 void appendUnused(BumpChunkList&& otherUnused) {
469 #ifdef DEBUG
470 for (detail::BumpChunk& bc : otherUnused) MOZ_ASSERT(bc.empty());
471 #endif
472 unused_.appendAll(mozilla::Move(otherUnused));
473 }
474
475 // Append used chunks to the end of this LifoAlloc. We act as if all the
476 // chunks in |this| are used, even if they're not, so memory may be wasted.
appendUsed(BumpChunkList && otherChunks)477 void appendUsed(BumpChunkList&& otherChunks) {
478 chunks_.appendAll(mozilla::Move(otherChunks));
479 }
480
481 // Track the amount of space allocated in used and unused chunks.
incrementCurSize(size_t size)482 void incrementCurSize(size_t size) {
483 curSize_ += size;
484 if (curSize_ > peakSize_) peakSize_ = curSize_;
485 }
decrementCurSize(size_t size)486 void decrementCurSize(size_t size) {
487 MOZ_ASSERT(curSize_ >= size);
488 curSize_ -= size;
489 }
490
491 MOZ_ALWAYS_INLINE
allocImpl(size_t n)492 void* allocImpl(size_t n) {
493 void* result;
494 if (!chunks_.empty() && (result = chunks_.last()->tryAlloc(n)))
495 return result;
496
497 if (!getOrCreateChunk(n)) return nullptr;
498
499 // Since we just created a large enough chunk, this can't fail.
500 result = chunks_.last()->tryAlloc(n);
501 MOZ_ASSERT(result);
502 return result;
503 }
504
505 public:
LifoAlloc(size_t defaultChunkSize)506 explicit LifoAlloc(size_t defaultChunkSize)
507 : peakSize_(0)
508 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
509 ,
510 fallibleScope_(true)
511 #endif
512 {
513 reset(defaultChunkSize);
514 }
515
516 // Steal allocated chunks from |other|.
steal(LifoAlloc * other)517 void steal(LifoAlloc* other) {
518 MOZ_ASSERT(!other->markCount);
519 MOZ_ASSERT(chunks_.empty());
520
521 // Copy everything from |other| to |this| except for |peakSize_|, which
522 // requires some care.
523 chunks_ = mozilla::Move(other->chunks_);
524 unused_ = mozilla::Move(other->unused_);
525 markCount = other->markCount;
526 defaultChunkSize_ = other->defaultChunkSize_;
527 curSize_ = other->curSize_;
528 peakSize_ = Max(peakSize_, other->peakSize_);
529 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
530 fallibleScope_ = other->fallibleScope_;
531 #endif
532
533 other->reset(defaultChunkSize_);
534 }
535
536 // Append all chunks from |other|. They are removed from |other|.
537 void transferFrom(LifoAlloc* other);
538
539 // Append unused chunks from |other|. They are removed from |other|.
540 void transferUnusedFrom(LifoAlloc* other);
541
~LifoAlloc()542 ~LifoAlloc() { freeAll(); }
543
defaultChunkSize()544 size_t defaultChunkSize() const { return defaultChunkSize_; }
545
546 // Frees all held memory.
547 void freeAll();
548
549 static const unsigned HUGE_ALLOCATION = 50 * 1024 * 1024;
freeAllIfHugeAndUnused()550 void freeAllIfHugeAndUnused() {
551 if (markCount == 0 && curSize_ > HUGE_ALLOCATION) freeAll();
552 }
553
554 MOZ_ALWAYS_INLINE
alloc(size_t n)555 void* alloc(size_t n) {
556 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
557 // Only simulate OOMs when we are not using the LifoAlloc as an
558 // infallible allocator.
559 if (fallibleScope_) JS_OOM_POSSIBLY_FAIL();
560 #endif
561 return allocImpl(n);
562 }
563
564 MOZ_ALWAYS_INLINE
allocInfallible(size_t n)565 void* allocInfallible(size_t n) {
566 AutoEnterOOMUnsafeRegion oomUnsafe;
567 if (void* result = allocImpl(n)) return result;
568 oomUnsafe.crash("LifoAlloc::allocInfallible");
569 return nullptr;
570 }
571
572 // Ensures that enough space exists to satisfy N bytes worth of
573 // allocation requests, not necessarily contiguous. Note that this does
574 // not guarantee a successful single allocation of N bytes.
575 MOZ_ALWAYS_INLINE
ensureUnusedApproximate(size_t n)576 MOZ_MUST_USE bool ensureUnusedApproximate(size_t n) {
577 AutoFallibleScope fallibleAllocator(this);
578 size_t total = 0;
579 if (!chunks_.empty()) {
580 total += chunks_.last()->unused();
581 if (total >= n) return true;
582 }
583
584 for (detail::BumpChunk& bc : unused_) {
585 total += bc.unused();
586 if (total >= n) return true;
587 }
588
589 BumpChunk newChunk = newChunkWithCapacity(n);
590 if (!newChunk) return false;
591 size_t size = newChunk->computedSizeOfIncludingThis();
592 unused_.pushFront(mozilla::Move(newChunk));
593 incrementCurSize(size);
594 return true;
595 }
596
597 MOZ_ALWAYS_INLINE
setAsInfallibleByDefault()598 void setAsInfallibleByDefault() {
599 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
600 fallibleScope_ = false;
601 #endif
602 }
603
604 class MOZ_NON_TEMPORARY_CLASS AutoFallibleScope {
605 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
606 LifoAlloc* lifoAlloc_;
607 bool prevFallibleScope_;
608 MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
609
610 public:
AutoFallibleScope(LifoAlloc * lifoAlloc MOZ_GUARD_OBJECT_NOTIFIER_PARAM)611 explicit AutoFallibleScope(
612 LifoAlloc* lifoAlloc MOZ_GUARD_OBJECT_NOTIFIER_PARAM) {
613 MOZ_GUARD_OBJECT_NOTIFIER_INIT;
614 lifoAlloc_ = lifoAlloc;
615 prevFallibleScope_ = lifoAlloc->fallibleScope_;
616 lifoAlloc->fallibleScope_ = true;
617 }
618
~AutoFallibleScope()619 ~AutoFallibleScope() { lifoAlloc_->fallibleScope_ = prevFallibleScope_; }
620 #else
621 public:
622 explicit AutoFallibleScope(LifoAlloc*) {}
623 #endif
624 };
625
626 template <typename T>
newArray(size_t count)627 T* newArray(size_t count) {
628 static_assert(mozilla::IsPod<T>::value,
629 "T must be POD so that constructors (and destructors, "
630 "when the LifoAlloc is freed) need not be called");
631 return newArrayUninitialized<T>(count);
632 }
633
634 // Create an array with uninitialized elements of type |T|.
635 // The caller is responsible for initialization.
636 template <typename T>
newArrayUninitialized(size_t count)637 T* newArrayUninitialized(size_t count) {
638 size_t bytes;
639 if (MOZ_UNLIKELY(!CalculateAllocSize<T>(count, &bytes))) return nullptr;
640 return static_cast<T*>(alloc(bytes));
641 }
642
643 using Mark = detail::BumpChunk::Mark;
644
mark()645 Mark mark() {
646 markCount++;
647 if (chunks_.empty()) return Mark();
648 return chunks_.last()->mark();
649 }
650
release(Mark mark)651 void release(Mark mark) {
652 markCount--;
653
654 // Move the blocks which are after the mark to the set of unused chunks.
655 BumpChunkList released;
656 if (!mark.markedChunk())
657 released = mozilla::Move(chunks_);
658 else
659 released = mozilla::Move(chunks_.splitAfter(mark.markedChunk()));
660
661 // Release the content of all the blocks which are after the marks.
662 for (detail::BumpChunk& bc : released) bc.release();
663 unused_.appendAll(mozilla::Move(released));
664
665 // Release everything which follows the mark in the last chunk.
666 if (!chunks_.empty()) chunks_.last()->release(mark);
667 }
668
releaseAll()669 void releaseAll() {
670 MOZ_ASSERT(!markCount);
671 for (detail::BumpChunk& bc : chunks_) bc.release();
672 unused_.appendAll(mozilla::Move(chunks_));
673 }
674
675 // Get the total "used" (occupied bytes) count for the arena chunks.
used()676 size_t used() const {
677 size_t accum = 0;
678 for (const detail::BumpChunk& chunk : chunks_) accum += chunk.used();
679 return accum;
680 }
681
682 // Return true if the LifoAlloc does not currently contain any allocations.
isEmpty()683 bool isEmpty() const { return chunks_.empty() || !chunks_.last()->empty(); }
684
685 // Return the number of bytes remaining to allocate in the current chunk.
686 // e.g. How many bytes we can allocate before needing a new block.
availableInCurrentChunk()687 size_t availableInCurrentChunk() const {
688 if (!chunks_.empty()) return 0;
689 return chunks_.last()->unused();
690 }
691
692 // Get the total size of the arena chunks (including unused space).
sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf)693 size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
694 size_t n = 0;
695 for (const detail::BumpChunk& chunk : chunks_)
696 n += chunk.sizeOfIncludingThis(mallocSizeOf);
697 for (const detail::BumpChunk& chunk : unused_)
698 n += chunk.sizeOfIncludingThis(mallocSizeOf);
699 return n;
700 }
701
702 // Get the total size of the arena chunks (including unused space).
computedSizeOfExcludingThis()703 size_t computedSizeOfExcludingThis() const {
704 size_t n = 0;
705 for (const detail::BumpChunk& chunk : chunks_)
706 n += chunk.computedSizeOfIncludingThis();
707 for (const detail::BumpChunk& chunk : unused_)
708 n += chunk.computedSizeOfIncludingThis();
709 return n;
710 }
711
712 // Like sizeOfExcludingThis(), but includes the size of the LifoAlloc itself.
sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf)713 size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
714 return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
715 }
716
717 // Get the peak size of the arena chunks (including unused space and
718 // bookkeeping space).
peakSizeOfExcludingThis()719 size_t peakSizeOfExcludingThis() const { return peakSize_; }
720
721 // Doesn't perform construction; useful for lazily-initialized POD types.
722 template <typename T>
pod_malloc()723 MOZ_ALWAYS_INLINE T* pod_malloc() {
724 return static_cast<T*>(alloc(sizeof(T)));
725 }
726
JS_DECLARE_NEW_METHODS(new_,alloc,MOZ_ALWAYS_INLINE)727 JS_DECLARE_NEW_METHODS(new_, alloc, MOZ_ALWAYS_INLINE)
728 JS_DECLARE_NEW_METHODS(newInfallible, allocInfallible, MOZ_ALWAYS_INLINE)
729
730 #ifdef DEBUG
731 bool contains(void* ptr) const {
732 for (const detail::BumpChunk& chunk : chunks_) {
733 if (chunk.contains(ptr)) return true;
734 }
735 return false;
736 }
737 #endif
738
739 // Iterate over the data allocated in a LifoAlloc, and interpret it.
740 class Enum {
741 friend class LifoAlloc;
742 friend class detail::BumpChunk;
743
744 // Iterator over the list of bump chunks.
745 BumpChunkList::Iterator chunkIt_;
746 BumpChunkList::Iterator chunkEnd_;
747 // Read head (must be within chunk_).
748 uint8_t* head_;
749
750 // If there is not enough room in the remaining block for |size|,
751 // advance to the next block and update the position.
seekBaseAndAdvanceBy(size_t size)752 uint8_t* seekBaseAndAdvanceBy(size_t size) {
753 MOZ_ASSERT(!empty());
754
755 uint8_t* aligned = detail::AlignPtr(head_);
756 if (aligned + size > chunkIt_->end()) {
757 ++chunkIt_;
758 aligned = chunkIt_->begin();
759 // The current code assumes that if we have a chunk, then we
760 // have allocated something it in.
761 MOZ_ASSERT(!chunkIt_->empty());
762 }
763 head_ = aligned + size;
764 MOZ_ASSERT(head_ <= chunkIt_->end());
765 return aligned;
766 }
767
768 public:
Enum(LifoAlloc & alloc)769 explicit Enum(LifoAlloc& alloc)
770 : chunkIt_(alloc.chunks_.begin()),
771 chunkEnd_(alloc.chunks_.end()),
772 head_(nullptr) {
773 if (chunkIt_ != chunkEnd_) head_ = chunkIt_->begin();
774 }
775
776 // Return true if there are no more bytes to enumerate.
empty()777 bool empty() {
778 return chunkIt_ == chunkEnd_ ||
779 (chunkIt_->next() == chunkEnd_.get() && head_ >= chunkIt_->end());
780 }
781
782 // Move the read position forward by the size of one T.
783 template <typename T>
784 T* read(size_t size = sizeof(T)) {
785 return reinterpret_cast<T*>(read(size));
786 }
787
788 // Return a pointer to the item at the current position. This returns a
789 // pointer to the inline storage, not a copy, and moves the read-head by
790 // the requested |size|.
read(size_t size)791 void* read(size_t size) { return seekBaseAndAdvanceBy(size); }
792 };
793 };
794
795 class MOZ_NON_TEMPORARY_CLASS LifoAllocScope {
796 LifoAlloc* lifoAlloc;
797 LifoAlloc::Mark mark;
798 LifoAlloc::AutoFallibleScope fallibleScope;
799 bool shouldRelease;
800 MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
801
802 public:
LifoAllocScope(LifoAlloc * lifoAlloc MOZ_GUARD_OBJECT_NOTIFIER_PARAM)803 explicit LifoAllocScope(LifoAlloc* lifoAlloc MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
804 : lifoAlloc(lifoAlloc),
805 mark(lifoAlloc->mark()),
806 fallibleScope(lifoAlloc),
807 shouldRelease(true) {
808 MOZ_GUARD_OBJECT_NOTIFIER_INIT;
809 }
810
~LifoAllocScope()811 ~LifoAllocScope() {
812 if (shouldRelease) lifoAlloc->release(mark);
813 }
814
alloc()815 LifoAlloc& alloc() { return *lifoAlloc; }
816
releaseEarly()817 void releaseEarly() {
818 MOZ_ASSERT(shouldRelease);
819 lifoAlloc->release(mark);
820 shouldRelease = false;
821 }
822 };
823
824 enum Fallibility { Fallible, Infallible };
825
826 template <Fallibility fb>
827 class LifoAllocPolicy {
828 LifoAlloc& alloc_;
829
830 public:
LifoAllocPolicy(LifoAlloc & alloc)831 MOZ_IMPLICIT LifoAllocPolicy(LifoAlloc& alloc) : alloc_(alloc) {}
832 template <typename T>
maybe_pod_malloc(size_t numElems)833 T* maybe_pod_malloc(size_t numElems) {
834 size_t bytes;
835 if (MOZ_UNLIKELY(!CalculateAllocSize<T>(numElems, &bytes))) return nullptr;
836 void* p =
837 fb == Fallible ? alloc_.alloc(bytes) : alloc_.allocInfallible(bytes);
838 return static_cast<T*>(p);
839 }
840 template <typename T>
maybe_pod_calloc(size_t numElems)841 T* maybe_pod_calloc(size_t numElems) {
842 T* p = maybe_pod_malloc<T>(numElems);
843 if (MOZ_UNLIKELY(!p)) return nullptr;
844 memset(p, 0, numElems * sizeof(T));
845 return p;
846 }
847 template <typename T>
maybe_pod_realloc(T * p,size_t oldSize,size_t newSize)848 T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) {
849 T* n = maybe_pod_malloc<T>(newSize);
850 if (MOZ_UNLIKELY(!n)) return nullptr;
851 MOZ_ASSERT(!(oldSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value));
852 memcpy(n, p, Min(oldSize * sizeof(T), newSize * sizeof(T)));
853 return n;
854 }
855 template <typename T>
pod_malloc(size_t numElems)856 T* pod_malloc(size_t numElems) {
857 return maybe_pod_malloc<T>(numElems);
858 }
859 template <typename T>
pod_calloc(size_t numElems)860 T* pod_calloc(size_t numElems) {
861 return maybe_pod_calloc<T>(numElems);
862 }
863 template <typename T>
pod_realloc(T * p,size_t oldSize,size_t newSize)864 T* pod_realloc(T* p, size_t oldSize, size_t newSize) {
865 return maybe_pod_realloc<T>(p, oldSize, newSize);
866 }
free_(void * p)867 void free_(void* p) {}
reportAllocOverflow()868 void reportAllocOverflow() const {}
checkSimulatedOOM()869 MOZ_MUST_USE bool checkSimulatedOOM() const {
870 return fb == Infallible || !js::oom::ShouldFailWithOOM();
871 }
872 };
873
874 } // namespace js
875
876 #endif /* ds_LifoAlloc_h */
877