1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_
7 
8 #include <atomic>
9 #include <list>
10 #include <map>
11 #include <memory>
12 #include <unordered_map>
13 #include <unordered_set>
14 #include <vector>
15 
16 #include "src/base/atomic-utils.h"
17 #include "src/base/bounded-page-allocator.h"
18 #include "src/base/export-template.h"
19 #include "src/base/iterator.h"
20 #include "src/base/list.h"
21 #include "src/base/macros.h"
22 #include "src/base/platform/mutex.h"
23 #include "src/common/globals.h"
24 #include "src/flags/flags.h"
25 #include "src/heap/basic-memory-chunk.h"
26 #include "src/heap/heap.h"
27 #include "src/heap/invalidated-slots.h"
28 #include "src/heap/marking.h"
29 #include "src/heap/slot-set.h"
30 #include "src/objects/free-space.h"
31 #include "src/objects/heap-object.h"
32 #include "src/objects/map.h"
33 #include "src/objects/objects.h"
34 #include "src/tasks/cancelable-task.h"
35 #include "src/utils/allocation.h"
36 #include "src/utils/utils.h"
37 #include "testing/gtest/include/gtest/gtest_prod.h"  // nogncheck
38 
39 namespace v8 {
40 namespace internal {
41 
42 namespace heap {
43 class HeapTester;
44 class TestCodePageAllocatorScope;
45 }  // namespace heap
46 
47 class AllocationObserver;
48 class CompactionSpace;
49 class CompactionSpaceCollection;
50 class FreeList;
51 class Isolate;
52 class LargeObjectSpace;
53 class LinearAllocationArea;
54 class LocalArrayBufferTracker;
55 class LocalSpace;
56 class MemoryAllocator;
57 class MemoryChunk;
58 class MemoryChunkLayout;
59 class OffThreadSpace;
60 class Page;
61 class PagedSpace;
62 class SemiSpace;
63 class SlotsBuffer;
64 class SlotSet;
65 class TypedSlotSet;
66 class Space;
67 
68 // -----------------------------------------------------------------------------
69 // Heap structures:
70 //
71 // A JS heap consists of a young generation, an old generation, and a large
72 // object space. The young generation is divided into two semispaces. A
73 // scavenger implements Cheney's copying algorithm. The old generation is
74 // separated into a map space and an old object space. The map space contains
75 // all (and only) map objects, the rest of old objects go into the old space.
76 // The old generation is collected by a mark-sweep-compact collector.
77 //
78 // The semispaces of the young generation are contiguous.  The old and map
79 // spaces consists of a list of pages. A page has a page header and an object
80 // area.
81 //
82 // There is a separate large object space for objects larger than
83 // kMaxRegularHeapObjectSize, so that they do not have to move during
84 // collection. The large object space is paged. Pages in large object space
85 // may be larger than the page size.
86 //
87 // A store-buffer based write barrier is used to keep track of intergenerational
88 // references.  See heap/store-buffer.h.
89 //
90 // During scavenges and mark-sweep collections we sometimes (after a store
91 // buffer overflow) iterate intergenerational pointers without decoding heap
92 // object maps so if the page belongs to old space or large object space
93 // it is essential to guarantee that the page does not contain any
94 // garbage pointers to new space: every pointer aligned word which satisfies
95 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
96 // new space. Thus objects in old space and large object spaces should have a
97 // special layout (e.g. no bare integer fields). This requirement does not
98 // apply to map space which is iterated in a special fashion. However we still
99 // require pointer fields of dead maps to be cleaned.
100 //
101 // To enable lazy cleaning of old space pages we can mark chunks of the page
102 // as being garbage.  Garbage sections are marked with a special map.  These
103 // sections are skipped when scanning the page, even if we are otherwise
104 // scanning without regard for object boundaries.  Garbage sections are chained
105 // together to form a free list after a GC.  Garbage sections created outside
106 // of GCs by object trunctation etc. may not be in the free list chain.  Very
107 // small free spaces are ignored, they need only be cleaned of bogus pointers
108 // into new space.
109 //
110 // Each page may have up to one special garbage section.  The start of this
111 // section is denoted by the top field in the space.  The end of the section
112 // is denoted by the limit field in the space.  This special garbage section
113 // is not marked with a free space map in the data.  The point of this section
114 // is to enable linear allocation without having to constantly update the byte
115 // array every time the top field is updated and a new object is created.  The
116 // special garbage section is not in the chain of garbage sections.
117 //
118 // Since the top and limit fields are in the space, not the page, only one page
119 // has a special garbage section, and if the top and limit are equal then there
120 // is no special garbage section.
121 
122 // Some assertion macros used in the debugging mode.
123 
124 #define DCHECK_OBJECT_SIZE(size) \
125   DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
126 
127 #define DCHECK_CODEOBJECT_SIZE(size, code_space) \
128   DCHECK((0 < size) && (size <= code_space->AreaSize()))
129 
130 using FreeListCategoryType = int32_t;
131 
132 static const FreeListCategoryType kFirstCategory = 0;
133 static const FreeListCategoryType kInvalidCategory = -1;
134 
135 enum FreeMode { kLinkCategory, kDoNotLinkCategory };
136 
137 enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
138 
139 // A free list category maintains a linked list of free memory blocks.
140 class FreeListCategory {
141  public:
Initialize(FreeListCategoryType type)142   void Initialize(FreeListCategoryType type) {
143     type_ = type;
144     available_ = 0;
145     prev_ = nullptr;
146     next_ = nullptr;
147   }
148 
149   void Reset(FreeList* owner);
150 
151   void RepairFreeList(Heap* heap);
152 
153   // Relinks the category into the currently owning free list. Requires that the
154   // category is currently unlinked.
155   void Relink(FreeList* owner);
156 
157   void Free(Address address, size_t size_in_bytes, FreeMode mode,
158             FreeList* owner);
159 
160   // Performs a single try to pick a node of at least |minimum_size| from the
161   // category. Stores the actual size in |node_size|. Returns nullptr if no
162   // node is found.
163   FreeSpace PickNodeFromList(size_t minimum_size, size_t* node_size);
164 
165   // Picks a node of at least |minimum_size| from the category. Stores the
166   // actual size in |node_size|. Returns nullptr if no node is found.
167   FreeSpace SearchForNodeInList(size_t minimum_size, size_t* node_size);
168 
169   inline bool is_linked(FreeList* owner) const;
is_empty()170   bool is_empty() { return top().is_null(); }
available()171   uint32_t available() const { return available_; }
172 
173   size_t SumFreeList();
174   int FreeListLength();
175 
176  private:
177   // For debug builds we accurately compute free lists lengths up until
178   // {kVeryLongFreeList} by manually walking the list.
179   static const int kVeryLongFreeList = 500;
180 
181   // Updates |available_|, |length_| and free_list_->Available() after an
182   // allocation of size |allocation_size|.
183   inline void UpdateCountersAfterAllocation(size_t allocation_size);
184 
top()185   FreeSpace top() { return top_; }
set_top(FreeSpace top)186   void set_top(FreeSpace top) { top_ = top; }
prev()187   FreeListCategory* prev() { return prev_; }
set_prev(FreeListCategory * prev)188   void set_prev(FreeListCategory* prev) { prev_ = prev; }
next()189   FreeListCategory* next() { return next_; }
set_next(FreeListCategory * next)190   void set_next(FreeListCategory* next) { next_ = next; }
191 
192   // |type_|: The type of this free list category.
193   FreeListCategoryType type_ = kInvalidCategory;
194 
195   // |available_|: Total available bytes in all blocks of this free list
196   // category.
197   uint32_t available_ = 0;
198 
199   // |top_|: Points to the top FreeSpace in the free list category.
200   FreeSpace top_;
201 
202   FreeListCategory* prev_ = nullptr;
203   FreeListCategory* next_ = nullptr;
204 
205   friend class FreeList;
206   friend class FreeListManyCached;
207   friend class PagedSpace;
208   friend class MapSpace;
209 };
210 
211 // A free list maintains free blocks of memory. The free list is organized in
212 // a way to encourage objects allocated around the same time to be near each
213 // other. The normal way to allocate is intended to be by bumping a 'top'
214 // pointer until it hits a 'limit' pointer.  When the limit is hit we need to
215 // find a new space to allocate from. This is done with the free list, which is
216 // divided up into rough categories to cut down on waste. Having finer
217 // categories would scatter allocation more.
218 class FreeList {
219  public:
220   // Creates a Freelist of the default class (FreeListLegacy for now).
221   V8_EXPORT_PRIVATE static FreeList* CreateFreeList();
222 
223   virtual ~FreeList() = default;
224 
225   // Returns how much memory can be allocated after freeing maximum_freed
226   // memory.
227   virtual size_t GuaranteedAllocatable(size_t maximum_freed) = 0;
228 
229   // Adds a node on the free list. The block of size {size_in_bytes} starting
230   // at {start} is placed on the free list. The return value is the number of
231   // bytes that were not added to the free list, because the freed memory block
232   // was too small. Bookkeeping information will be written to the block, i.e.,
233   // its contents will be destroyed. The start address should be word aligned,
234   // and the size should be a non-zero multiple of the word size.
235   virtual size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
236 
237   // Allocates a free space node frome the free list of at least size_in_bytes
238   // bytes. Returns the actual node size in node_size which can be bigger than
239   // size_in_bytes. This method returns null if the allocation request cannot be
240   // handled by the free list.
241   virtual V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
242                                                    size_t* node_size,
243                                                    AllocationOrigin origin) = 0;
244 
245   // Returns a page containing an entry for a given type, or nullptr otherwise.
246   V8_EXPORT_PRIVATE virtual Page* GetPageForSize(size_t size_in_bytes) = 0;
247 
248   virtual void Reset();
249 
250   // Return the number of bytes available on the free list.
Available()251   size_t Available() {
252     DCHECK(available_ == SumFreeLists());
253     return available_;
254   }
255 
256   // Update number of available  bytes on the Freelists.
IncreaseAvailableBytes(size_t bytes)257   void IncreaseAvailableBytes(size_t bytes) { available_ += bytes; }
DecreaseAvailableBytes(size_t bytes)258   void DecreaseAvailableBytes(size_t bytes) { available_ -= bytes; }
259 
IsEmpty()260   bool IsEmpty() {
261     bool empty = true;
262     ForAllFreeListCategories([&empty](FreeListCategory* category) {
263       if (!category->is_empty()) empty = false;
264     });
265     return empty;
266   }
267 
268   // Used after booting the VM.
269   void RepairLists(Heap* heap);
270 
271   V8_EXPORT_PRIVATE size_t EvictFreeListItems(Page* page);
272 
number_of_categories()273   int number_of_categories() { return number_of_categories_; }
last_category()274   FreeListCategoryType last_category() { return last_category_; }
275 
wasted_bytes()276   size_t wasted_bytes() { return wasted_bytes_; }
277 
278   template <typename Callback>
ForAllFreeListCategories(FreeListCategoryType type,Callback callback)279   void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
280     FreeListCategory* current = categories_[type];
281     while (current != nullptr) {
282       FreeListCategory* next = current->next();
283       callback(current);
284       current = next;
285     }
286   }
287 
288   template <typename Callback>
ForAllFreeListCategories(Callback callback)289   void ForAllFreeListCategories(Callback callback) {
290     for (int i = kFirstCategory; i < number_of_categories(); i++) {
291       ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
292     }
293   }
294 
295   virtual bool AddCategory(FreeListCategory* category);
296   virtual V8_EXPORT_PRIVATE void RemoveCategory(FreeListCategory* category);
297   void PrintCategories(FreeListCategoryType type);
298 
299  protected:
300   class FreeListCategoryIterator final {
301    public:
FreeListCategoryIterator(FreeList * free_list,FreeListCategoryType type)302     FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
303         : current_(free_list->categories_[type]) {}
304 
HasNext()305     bool HasNext() const { return current_ != nullptr; }
306 
Next()307     FreeListCategory* Next() {
308       DCHECK(HasNext());
309       FreeListCategory* tmp = current_;
310       current_ = current_->next();
311       return tmp;
312     }
313 
314    private:
315     FreeListCategory* current_;
316   };
317 
318 #ifdef DEBUG
319   V8_EXPORT_PRIVATE size_t SumFreeLists();
320   bool IsVeryLong();
321 #endif
322 
323   // Tries to retrieve a node from the first category in a given |type|.
324   // Returns nullptr if the category is empty or the top entry is smaller
325   // than minimum_size.
326   FreeSpace TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
327                           size_t* node_size);
328 
329   // Searches a given |type| for a node of at least |minimum_size|.
330   FreeSpace SearchForNodeInList(FreeListCategoryType type, size_t minimum_size,
331                                 size_t* node_size);
332 
333   // Returns the smallest category in which an object of |size_in_bytes| could
334   // fit.
335   virtual FreeListCategoryType SelectFreeListCategoryType(
336       size_t size_in_bytes) = 0;
337 
top(FreeListCategoryType type)338   FreeListCategory* top(FreeListCategoryType type) const {
339     return categories_[type];
340   }
341 
342   inline Page* GetPageForCategoryType(FreeListCategoryType type);
343 
344   int number_of_categories_ = 0;
345   FreeListCategoryType last_category_ = 0;
346   size_t min_block_size_ = 0;
347 
348   std::atomic<size_t> wasted_bytes_{0};
349   FreeListCategory** categories_ = nullptr;
350 
351   // |available_|: The number of bytes in this freelist.
352   size_t available_ = 0;
353 
354   friend class FreeListCategory;
355   friend class Page;
356   friend class MemoryChunk;
357   friend class ReadOnlyPage;
358   friend class MapSpace;
359 };
360 
361 // FreeList used for spaces that don't have freelists
362 // (only the LargeObject space for now).
363 class NoFreeList final : public FreeList {
364  public:
GuaranteedAllocatable(size_t maximum_freed)365   size_t GuaranteedAllocatable(size_t maximum_freed) final {
366     FATAL("NoFreeList can't be used as a standard FreeList. ");
367   }
Free(Address start,size_t size_in_bytes,FreeMode mode)368   size_t Free(Address start, size_t size_in_bytes, FreeMode mode) final {
369     FATAL("NoFreeList can't be used as a standard FreeList.");
370   }
Allocate(size_t size_in_bytes,size_t * node_size,AllocationOrigin origin)371   V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
372                                            size_t* node_size,
373                                            AllocationOrigin origin) final {
374     FATAL("NoFreeList can't be used as a standard FreeList.");
375   }
GetPageForSize(size_t size_in_bytes)376   Page* GetPageForSize(size_t size_in_bytes) final {
377     FATAL("NoFreeList can't be used as a standard FreeList.");
378   }
379 
380  private:
SelectFreeListCategoryType(size_t size_in_bytes)381   FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) final {
382     FATAL("NoFreeList can't be used as a standard FreeList.");
383   }
384 };
385 
386 // ----------------------------------------------------------------------------
387 // Space is the abstract superclass for all allocation spaces.
388 class V8_EXPORT_PRIVATE Space : public Malloced {
389  public:
Space(Heap * heap,AllocationSpace id,FreeList * free_list)390   Space(Heap* heap, AllocationSpace id, FreeList* free_list)
391       : allocation_observers_paused_(false),
392         heap_(heap),
393         id_(id),
394         committed_(0),
395         max_committed_(0),
396         free_list_(std::unique_ptr<FreeList>(free_list)) {
397     external_backing_store_bytes_ =
398         new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
399     external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
400     external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
401         0;
402   }
403 
404   static inline void MoveExternalBackingStoreBytes(
405       ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
406 
~Space()407   virtual ~Space() {
408     delete[] external_backing_store_bytes_;
409     external_backing_store_bytes_ = nullptr;
410   }
411 
heap()412   Heap* heap() const {
413     DCHECK_NOT_NULL(heap_);
414     return heap_;
415   }
416 
IsDetached()417   bool IsDetached() const { return heap_ == nullptr; }
418 
identity()419   AllocationSpace identity() { return id_; }
420 
name()421   const char* name() { return Heap::GetSpaceName(id_); }
422 
423   virtual void AddAllocationObserver(AllocationObserver* observer);
424 
425   virtual void RemoveAllocationObserver(AllocationObserver* observer);
426 
427   virtual void PauseAllocationObservers();
428 
429   virtual void ResumeAllocationObservers();
430 
StartNextInlineAllocationStep()431   virtual void StartNextInlineAllocationStep() {}
432 
433   void AllocationStep(int bytes_since_last, Address soon_object, int size);
434 
435   // An AllocationStep equivalent to be called after merging a contiguous
436   // chunk of an off-thread space into this space. The chunk is treated as a
437   // single allocation-folding group.
438   void AllocationStepAfterMerge(Address first_object_in_chunk, int size);
439 
440   // Return the total amount committed memory for this space, i.e., allocatable
441   // memory and page headers.
CommittedMemory()442   virtual size_t CommittedMemory() { return committed_; }
443 
MaximumCommittedMemory()444   virtual size_t MaximumCommittedMemory() { return max_committed_; }
445 
446   // Returns allocated size.
447   virtual size_t Size() = 0;
448 
449   // Returns size of objects. Can differ from the allocated size
450   // (e.g. see OldLargeObjectSpace).
SizeOfObjects()451   virtual size_t SizeOfObjects() { return Size(); }
452 
453   // Approximate amount of physical memory committed for this space.
454   virtual size_t CommittedPhysicalMemory() = 0;
455 
456   // Return the available bytes without growing.
457   virtual size_t Available() = 0;
458 
RoundSizeDownToObjectAlignment(int size)459   virtual int RoundSizeDownToObjectAlignment(int size) {
460     if (id_ == CODE_SPACE) {
461       return RoundDown(size, kCodeAlignment);
462     } else {
463       return RoundDown(size, kTaggedSize);
464     }
465   }
466 
467   virtual std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) = 0;
468 
AccountCommitted(size_t bytes)469   void AccountCommitted(size_t bytes) {
470     DCHECK_GE(committed_ + bytes, committed_);
471     committed_ += bytes;
472     if (committed_ > max_committed_) {
473       max_committed_ = committed_;
474     }
475   }
476 
AccountUncommitted(size_t bytes)477   void AccountUncommitted(size_t bytes) {
478     DCHECK_GE(committed_, committed_ - bytes);
479     committed_ -= bytes;
480   }
481 
482   inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
483                                                  size_t amount);
484 
485   inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
486                                                  size_t amount);
487 
488   // Returns amount of off-heap memory in-use by objects in this Space.
ExternalBackingStoreBytes(ExternalBackingStoreType type)489   virtual size_t ExternalBackingStoreBytes(
490       ExternalBackingStoreType type) const {
491     return external_backing_store_bytes_[type];
492   }
493 
494   void* GetRandomMmapAddr();
495 
first_page()496   MemoryChunk* first_page() { return memory_chunk_list_.front(); }
last_page()497   MemoryChunk* last_page() { return memory_chunk_list_.back(); }
498 
memory_chunk_list()499   base::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
500 
free_list()501   FreeList* free_list() { return free_list_.get(); }
502 
503 #ifdef DEBUG
504   virtual void Print() = 0;
505 #endif
506 
507  protected:
508   intptr_t GetNextInlineAllocationStepSize();
AllocationObserversActive()509   bool AllocationObserversActive() {
510     return !allocation_observers_paused_ && !allocation_observers_.empty();
511   }
512 
DetachFromHeap()513   void DetachFromHeap() { heap_ = nullptr; }
514 
515   std::vector<AllocationObserver*> allocation_observers_;
516 
517   // The List manages the pages that belong to the given space.
518   base::List<MemoryChunk> memory_chunk_list_;
519 
520   // Tracks off-heap memory used by this space.
521   std::atomic<size_t>* external_backing_store_bytes_;
522 
523   bool allocation_observers_paused_;
524   Heap* heap_;
525   AllocationSpace id_;
526 
527   // Keeps track of committed memory in a space.
528   size_t committed_;
529   size_t max_committed_;
530 
531   std::unique_ptr<FreeList> free_list_;
532 
533   DISALLOW_COPY_AND_ASSIGN(Space);
534 };
535 
536 // The CodeObjectRegistry holds all start addresses of code objects of a given
537 // MemoryChunk. Each MemoryChunk owns a separate CodeObjectRegistry. The
538 // CodeObjectRegistry allows fast lookup from an inner pointer of a code object
539 // to the actual code object.
540 class V8_EXPORT_PRIVATE CodeObjectRegistry {
541  public:
542   void RegisterNewlyAllocatedCodeObject(Address code);
543   void RegisterAlreadyExistingCodeObject(Address code);
544   void Clear();
545   void Finalize();
546   bool Contains(Address code) const;
547   Address GetCodeObjectStartFromInnerAddress(Address address) const;
548 
549  private:
550   std::vector<Address> code_object_registry_already_existing_;
551   std::set<Address> code_object_registry_newly_allocated_;
552 };
553 
554 class V8_EXPORT_PRIVATE MemoryChunkLayout {
555  public:
556   static size_t CodePageGuardStartOffset();
557   static size_t CodePageGuardSize();
558   static intptr_t ObjectStartOffsetInCodePage();
559   static intptr_t ObjectEndOffsetInCodePage();
560   static size_t AllocatableMemoryInCodePage();
561   static intptr_t ObjectStartOffsetInDataPage();
562   static size_t AllocatableMemoryInDataPage();
563   static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
564   static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
565 };
566 
567 // MemoryChunk represents a memory region owned by a specific space.
568 // It is divided into the header and the body. Chunk start is always
569 // 1MB aligned. Start of the body is aligned so it can accommodate
570 // any heap object.
571 class MemoryChunk : public BasicMemoryChunk {
572  public:
573   // Use with std data structures.
574   struct Hasher {
operatorHasher575     size_t operator()(MemoryChunk* const chunk) const {
576       return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
577     }
578   };
579 
580   using Flags = uintptr_t;
581 
582   static const Flags kPointersToHereAreInterestingMask =
583       POINTERS_TO_HERE_ARE_INTERESTING;
584 
585   static const Flags kPointersFromHereAreInterestingMask =
586       POINTERS_FROM_HERE_ARE_INTERESTING;
587 
588   static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
589 
590   static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
591 
592   static const Flags kIsLargePageMask = LARGE_PAGE;
593 
594   static const Flags kSkipEvacuationSlotsRecordingMask =
595       kEvacuationCandidateMask | kIsInYoungGenerationMask;
596 
597   // |kDone|: The page state when sweeping is complete or sweeping must not be
598   //   performed on that page. Sweeper threads that are done with their work
599   //   will set this value and not touch the page anymore.
600   // |kPending|: This page is ready for parallel sweeping.
601   // |kInProgress|: This page is currently swept by a sweeper thread.
602   enum class ConcurrentSweepingState : intptr_t {
603     kDone,
604     kPending,
605     kInProgress,
606   };
607 
608   static const size_t kHeaderSize =
609       BasicMemoryChunk::kHeaderSize  // Parent size.
610       + 3 * kSystemPointerSize       // VirtualMemory reservation_
611       + kSystemPointerSize           // Address owner_
612       + kSizetSize                   // size_t progress_bar_
613       + kIntptrSize                  // intptr_t live_byte_count_
614       + kSystemPointerSize           // SlotSet* sweeping_slot_set_
615       + kSystemPointerSize *
616             NUMBER_OF_REMEMBERED_SET_TYPES  // TypedSlotSet* array
617       + kSystemPointerSize *
618             NUMBER_OF_REMEMBERED_SET_TYPES  // InvalidatedSlots* array
619       + kSystemPointerSize  // std::atomic<intptr_t> high_water_mark_
620       + kSystemPointerSize  // base::Mutex* mutex_
621       + kSystemPointerSize  // std::atomic<ConcurrentSweepingState>
622                             // concurrent_sweeping_
623       + kSystemPointerSize  // base::Mutex* page_protection_change_mutex_
624       + kSystemPointerSize  // unitptr_t write_unprotect_counter_
625       + kSizetSize * ExternalBackingStoreType::kNumTypes
626       // std::atomic<size_t> external_backing_store_bytes_
627       + kSizetSize              // size_t allocated_bytes_
628       + kSizetSize              // size_t wasted_memory_
629       + kSystemPointerSize * 2  // base::ListNode
630       + kSystemPointerSize      // FreeListCategory** categories__
631       + kSystemPointerSize      // LocalArrayBufferTracker* local_tracker_
632       + kIntptrSize  // std::atomic<intptr_t> young_generation_live_byte_count_
633       + kSystemPointerSize   // Bitmap* young_generation_bitmap_
634       + kSystemPointerSize   // CodeObjectRegistry* code_object_registry_
635       + kSystemPointerSize;  // PossiblyEmptyBuckets possibly_empty_buckets_
636 
637   // Page size in bytes.  This must be a multiple of the OS page size.
638   static const int kPageSize = 1 << kPageSizeBits;
639 
640   // Maximum number of nested code memory modification scopes.
641   static const int kMaxWriteUnprotectCounter = 3;
642 
643   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
FromAddress(Address a)644   static MemoryChunk* FromAddress(Address a) {
645     DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
646     return reinterpret_cast<MemoryChunk*>(BaseAddress(a));
647   }
648   // Only works if the object is in the first kPageSize of the MemoryChunk.
FromHeapObject(HeapObject o)649   static MemoryChunk* FromHeapObject(HeapObject o) {
650     DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
651     return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr()));
652   }
653 
654   void SetOldGenerationPageFlags(bool is_marking);
655   void SetYoungGenerationPageFlags(bool is_marking);
656 
UpdateHighWaterMark(Address mark)657   static inline void UpdateHighWaterMark(Address mark) {
658     if (mark == kNullAddress) return;
659     // Need to subtract one from the mark because when a chunk is full the
660     // top points to the next address after the chunk, which effectively belongs
661     // to another chunk. See the comment to Page::FromAllocationAreaAddress.
662     MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
663     intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
664     intptr_t old_mark = chunk->high_water_mark_.load(std::memory_order_relaxed);
665     while ((new_mark > old_mark) &&
666            !chunk->high_water_mark_.compare_exchange_weak(
667                old_mark, new_mark, std::memory_order_acq_rel)) {
668     }
669   }
670 
671   static inline void MoveExternalBackingStoreBytes(
672       ExternalBackingStoreType type, MemoryChunk* from, MemoryChunk* to,
673       size_t amount);
674 
675   void DiscardUnusedMemory(Address addr, size_t size);
676 
mutex()677   base::Mutex* mutex() { return mutex_; }
678 
set_concurrent_sweeping_state(ConcurrentSweepingState state)679   void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
680     concurrent_sweeping_ = state;
681   }
682 
concurrent_sweeping_state()683   ConcurrentSweepingState concurrent_sweeping_state() {
684     return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
685   }
686 
SweepingDone()687   bool SweepingDone() {
688     return concurrent_sweeping_ == ConcurrentSweepingState::kDone;
689   }
690 
heap()691   inline Heap* heap() const {
692     DCHECK_NOT_NULL(heap_);
693     return heap_;
694   }
695 
696 #ifdef THREAD_SANITIZER
697   // Perform a dummy acquire load to tell TSAN that there is no data race in
698   // mark-bit initialization. See MemoryChunk::Initialize for the corresponding
699   // release store.
700   void SynchronizedHeapLoad();
701 #endif
702 
703   template <RememberedSetType type>
ContainsSlots()704   bool ContainsSlots() {
705     return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
706            invalidated_slots<type>() != nullptr;
707   }
708 
709   template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
slot_set()710   SlotSet* slot_set() {
711     if (access_mode == AccessMode::ATOMIC)
712       return base::AsAtomicPointer::Acquire_Load(&slot_set_[type]);
713     return slot_set_[type];
714   }
715 
716   template <AccessMode access_mode = AccessMode::ATOMIC>
sweeping_slot_set()717   SlotSet* sweeping_slot_set() {
718     if (access_mode == AccessMode::ATOMIC)
719       return base::AsAtomicPointer::Acquire_Load(&sweeping_slot_set_);
720     return sweeping_slot_set_;
721   }
722 
723   template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
typed_slot_set()724   TypedSlotSet* typed_slot_set() {
725     if (access_mode == AccessMode::ATOMIC)
726       return base::AsAtomicPointer::Acquire_Load(&typed_slot_set_[type]);
727     return typed_slot_set_[type];
728   }
729 
730   template <RememberedSetType type>
731   V8_EXPORT_PRIVATE SlotSet* AllocateSlotSet();
732   SlotSet* AllocateSweepingSlotSet();
733   SlotSet* AllocateSlotSet(SlotSet** slot_set);
734 
735   // Not safe to be called concurrently.
736   template <RememberedSetType type>
737   void ReleaseSlotSet();
738   void ReleaseSlotSet(SlotSet** slot_set);
739   void ReleaseSweepingSlotSet();
740   template <RememberedSetType type>
741   TypedSlotSet* AllocateTypedSlotSet();
742   // Not safe to be called concurrently.
743   template <RememberedSetType type>
744   void ReleaseTypedSlotSet();
745 
746   template <RememberedSetType type>
747   InvalidatedSlots* AllocateInvalidatedSlots();
748   template <RememberedSetType type>
749   void ReleaseInvalidatedSlots();
750   template <RememberedSetType type>
751   V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object);
752   void InvalidateRecordedSlots(HeapObject object);
753   template <RememberedSetType type>
754   bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
755   template <RememberedSetType type>
invalidated_slots()756   InvalidatedSlots* invalidated_slots() {
757     return invalidated_slots_[type];
758   }
759 
760   void ReleaseLocalTracker();
761 
762   void AllocateYoungGenerationBitmap();
763   void ReleaseYoungGenerationBitmap();
764 
765   int FreeListsLength();
766 
767   // Approximate amount of physical memory committed for this chunk.
768   V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory();
769 
HighWaterMark()770   Address HighWaterMark() { return address() + high_water_mark_; }
771 
ProgressBar()772   size_t ProgressBar() {
773     DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
774     return progress_bar_.load(std::memory_order_acquire);
775   }
776 
TrySetProgressBar(size_t old_value,size_t new_value)777   bool TrySetProgressBar(size_t old_value, size_t new_value) {
778     DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
779     return progress_bar_.compare_exchange_strong(old_value, new_value,
780                                                  std::memory_order_acq_rel);
781   }
782 
ResetProgressBar()783   void ResetProgressBar() {
784     if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
785       progress_bar_.store(0, std::memory_order_release);
786     }
787   }
788 
789   inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
790                                                  size_t amount);
791 
792   inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
793                                                  size_t amount);
794 
ExternalBackingStoreBytes(ExternalBackingStoreType type)795   size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) {
796     return external_backing_store_bytes_[type];
797   }
798 
799   // Some callers rely on the fact that this can operate on both
800   // tagged and aligned object addresses.
AddressToMarkbitIndex(Address addr)801   inline uint32_t AddressToMarkbitIndex(Address addr) const {
802     return static_cast<uint32_t>(addr - this->address()) >> kTaggedSizeLog2;
803   }
804 
MarkbitIndexToAddress(uint32_t index)805   inline Address MarkbitIndexToAddress(uint32_t index) const {
806     return this->address() + (index << kTaggedSizeLog2);
807   }
808 
NeverEvacuate()809   bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
810 
MarkNeverEvacuate()811   void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
812 
CanAllocate()813   bool CanAllocate() {
814     return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
815   }
816 
817   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
IsEvacuationCandidate()818   bool IsEvacuationCandidate() {
819     DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
820              IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
821     return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
822   }
823 
824   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
ShouldSkipEvacuationSlotRecording()825   bool ShouldSkipEvacuationSlotRecording() {
826     uintptr_t flags = GetFlags<access_mode>();
827     return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
828            ((flags & COMPACTION_WAS_ABORTED) == 0);
829   }
830 
executable()831   Executability executable() {
832     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
833   }
834 
IsFromPage()835   bool IsFromPage() const { return IsFlagSet(FROM_PAGE); }
IsToPage()836   bool IsToPage() const { return IsFlagSet(TO_PAGE); }
IsLargePage()837   bool IsLargePage() const { return IsFlagSet(LARGE_PAGE); }
InYoungGeneration()838   bool InYoungGeneration() const {
839     return (GetFlags() & kIsInYoungGenerationMask) != 0;
840   }
InNewSpace()841   bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
InNewLargeObjectSpace()842   bool InNewLargeObjectSpace() const {
843     return InYoungGeneration() && IsLargePage();
844   }
845   bool InOldSpace() const;
846   V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
847 
848   // Gets the chunk's owner or null if the space has been detached.
owner()849   Space* owner() const { return owner_; }
850 
set_owner(Space * space)851   void set_owner(Space* space) { owner_ = space; }
852 
IsWritable()853   bool IsWritable() const {
854     // If this is a read-only space chunk but heap_ is non-null, it has not yet
855     // been sealed and can be written to.
856     return !InReadOnlySpace() || heap_ != nullptr;
857   }
858 
859   // Gets the chunk's allocation space, potentially dealing with a null owner_
860   // (like read-only chunks have).
861   inline AllocationSpace owner_identity() const;
862 
863   // Emits a memory barrier. For TSAN builds the other thread needs to perform
864   // MemoryChunk::synchronized_heap() to simulate the barrier.
865   void InitializationMemoryFence();
866 
867   V8_EXPORT_PRIVATE void SetReadable();
868   V8_EXPORT_PRIVATE void SetReadAndExecutable();
869   V8_EXPORT_PRIVATE void SetReadAndWritable();
870 
SetDefaultCodePermissions()871   void SetDefaultCodePermissions() {
872     if (FLAG_jitless) {
873       SetReadable();
874     } else {
875       SetReadAndExecutable();
876     }
877   }
878 
list_node()879   base::ListNode<MemoryChunk>& list_node() { return list_node_; }
880 
GetCodeObjectRegistry()881   CodeObjectRegistry* GetCodeObjectRegistry() { return code_object_registry_; }
882 
free_list()883   FreeList* free_list() { return owner()->free_list(); }
884 
possibly_empty_buckets()885   PossiblyEmptyBuckets* possibly_empty_buckets() {
886     return &possibly_empty_buckets_;
887   }
888 
889  protected:
890   static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
891                                  Address area_start, Address area_end,
892                                  Executability executable, Space* owner,
893                                  VirtualMemory reservation);
894 
895   // Release all memory allocated by the chunk. Should be called when memory
896   // chunk is about to be freed.
897   void ReleaseAllAllocatedMemory();
898   // Release memory allocated by the chunk, except that which is needed by
899   // read-only space chunks.
900   void ReleaseAllocatedMemoryNeededForWritableChunk();
901 
902   // Sets the requested page permissions only if the write unprotect counter
903   // has reached 0.
904   void DecrementWriteUnprotectCounterAndMaybeSetPermissions(
905       PageAllocator::Permission permission);
906 
reserved_memory()907   VirtualMemory* reserved_memory() { return &reservation_; }
908 
909   template <AccessMode mode>
marking_bitmap()910   ConcurrentBitmap<mode>* marking_bitmap() const {
911     return reinterpret_cast<ConcurrentBitmap<mode>*>(marking_bitmap_);
912   }
913 
914   template <AccessMode mode>
young_generation_bitmap()915   ConcurrentBitmap<mode>* young_generation_bitmap() const {
916     return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
917   }
918 
919   // If the chunk needs to remember its memory reservation, it is stored here.
920   VirtualMemory reservation_;
921 
922   // The space owning this memory chunk.
923   std::atomic<Space*> owner_;
924 
925   // Used by the incremental marker to keep track of the scanning progress in
926   // large objects that have a progress bar and are scanned in increments.
927   std::atomic<size_t> progress_bar_;
928 
929   // Count of bytes marked black on page.
930   intptr_t live_byte_count_;
931 
932   // A single slot set for small pages (of size kPageSize) or an array of slot
933   // set for large pages. In the latter case the number of entries in the array
934   // is ceil(size() / kPageSize).
935   SlotSet* sweeping_slot_set_;
936   TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
937   InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
938 
939   // Assuming the initial allocation on a page is sequential,
940   // count highest number of bytes ever allocated on the page.
941   std::atomic<intptr_t> high_water_mark_;
942 
943   base::Mutex* mutex_;
944 
945   std::atomic<ConcurrentSweepingState> concurrent_sweeping_;
946 
947   base::Mutex* page_protection_change_mutex_;
948 
949   // This field is only relevant for code pages. It depicts the number of
950   // times a component requested this page to be read+writeable. The
951   // counter is decremented when a component resets to read+executable.
952   // If Value() == 0 => The memory is read and executable.
953   // If Value() >= 1 => The Memory is read and writable (and maybe executable).
954   // The maximum value is limited by {kMaxWriteUnprotectCounter} to prevent
955   // excessive nesting of scopes.
956   // All executable MemoryChunks are allocated rw based on the assumption that
957   // they will be used immediatelly for an allocation. They are initialized
958   // with the number of open CodeSpaceMemoryModificationScopes. The caller
959   // that triggers the page allocation is responsible for decrementing the
960   // counter.
961   uintptr_t write_unprotect_counter_;
962 
963   // Byte allocated on the page, which includes all objects on the page
964   // and the linear allocation area.
965   size_t allocated_bytes_;
966 
967   // Tracks off-heap memory used by this memory chunk.
968   std::atomic<size_t> external_backing_store_bytes_[kNumTypes];
969 
970   // Freed memory that was not added to the free list.
971   size_t wasted_memory_;
972 
973   base::ListNode<MemoryChunk> list_node_;
974 
975   FreeListCategory** categories_;
976 
977   LocalArrayBufferTracker* local_tracker_;
978 
979   std::atomic<intptr_t> young_generation_live_byte_count_;
980   Bitmap* young_generation_bitmap_;
981 
982   CodeObjectRegistry* code_object_registry_;
983 
984   PossiblyEmptyBuckets possibly_empty_buckets_;
985 
986  private:
InitializeReservedMemory()987   void InitializeReservedMemory() { reservation_.Reset(); }
988 
989   friend class ConcurrentMarkingState;
990   friend class MajorMarkingState;
991   friend class MajorAtomicMarkingState;
992   friend class MajorNonAtomicMarkingState;
993   friend class MemoryAllocator;
994   friend class MinorMarkingState;
995   friend class MinorNonAtomicMarkingState;
996   friend class PagedSpace;
997 };
998 
999 STATIC_ASSERT(sizeof(std::atomic<intptr_t>) == kSystemPointerSize);
1000 
1001 // -----------------------------------------------------------------------------
1002 // A page is a memory chunk of a size 256K. Large object pages may be larger.
1003 //
1004 // The only way to get a page pointer is by calling factory methods:
1005 //   Page* p = Page::FromAddress(addr); or
1006 //   Page* p = Page::FromAllocationAreaAddress(address);
1007 class Page : public MemoryChunk {
1008  public:
1009   static const intptr_t kCopyAllFlags = ~0;
1010 
1011   // Page flags copied from from-space to to-space when flipping semispaces.
1012   static const intptr_t kCopyOnFlipFlagsMask =
1013       static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
1014       static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
1015       static_cast<intptr_t>(MemoryChunk::INCREMENTAL_MARKING);
1016 
1017   // Returns the page containing a given address. The address ranges
1018   // from [page_addr .. page_addr + kPageSize[. This only works if the object
1019   // is in fact in a page.
FromAddress(Address addr)1020   static Page* FromAddress(Address addr) {
1021     return reinterpret_cast<Page*>(addr & ~kPageAlignmentMask);
1022   }
FromHeapObject(HeapObject o)1023   static Page* FromHeapObject(HeapObject o) {
1024     return reinterpret_cast<Page*>(o.ptr() & ~kAlignmentMask);
1025   }
1026 
1027   // Returns the page containing the address provided. The address can
1028   // potentially point righter after the page. To be also safe for tagged values
1029   // we subtract a hole word. The valid address ranges from
1030   // [page_addr + area_start_ .. page_addr + kPageSize + kTaggedSize].
FromAllocationAreaAddress(Address address)1031   static Page* FromAllocationAreaAddress(Address address) {
1032     return Page::FromAddress(address - kTaggedSize);
1033   }
1034 
1035   // Checks if address1 and address2 are on the same new space page.
OnSamePage(Address address1,Address address2)1036   static bool OnSamePage(Address address1, Address address2) {
1037     return Page::FromAddress(address1) == Page::FromAddress(address2);
1038   }
1039 
1040   // Checks whether an address is page aligned.
IsAlignedToPageSize(Address addr)1041   static bool IsAlignedToPageSize(Address addr) {
1042     return (addr & kPageAlignmentMask) == 0;
1043   }
1044 
1045   static Page* ConvertNewToOld(Page* old_page);
1046 
1047   inline void MarkNeverAllocateForTesting();
1048   inline void MarkEvacuationCandidate();
1049   inline void ClearEvacuationCandidate();
1050 
next_page()1051   Page* next_page() { return static_cast<Page*>(list_node_.next()); }
prev_page()1052   Page* prev_page() { return static_cast<Page*>(list_node_.prev()); }
1053 
1054   template <typename Callback>
ForAllFreeListCategories(Callback callback)1055   inline void ForAllFreeListCategories(Callback callback) {
1056     for (int i = kFirstCategory; i < free_list()->number_of_categories(); i++) {
1057       callback(categories_[i]);
1058     }
1059   }
1060 
1061   // Returns the offset of a given address to this page.
Offset(Address a)1062   inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
1063 
1064   // Returns the address for a given offset to the this page.
OffsetToAddress(size_t offset)1065   Address OffsetToAddress(size_t offset) {
1066     Address address_in_page = address() + offset;
1067     DCHECK_GE(address_in_page, area_start());
1068     DCHECK_LT(address_in_page, area_end());
1069     return address_in_page;
1070   }
1071 
1072   void AllocateLocalTracker();
local_tracker()1073   inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
1074   bool contains_array_buffers();
1075 
1076   size_t AvailableInFreeList();
1077 
AvailableInFreeListFromAllocatedBytes()1078   size_t AvailableInFreeListFromAllocatedBytes() {
1079     DCHECK_GE(area_size(), wasted_memory() + allocated_bytes());
1080     return area_size() - wasted_memory() - allocated_bytes();
1081   }
1082 
free_list_category(FreeListCategoryType type)1083   FreeListCategory* free_list_category(FreeListCategoryType type) {
1084     return categories_[type];
1085   }
1086 
wasted_memory()1087   size_t wasted_memory() { return wasted_memory_; }
add_wasted_memory(size_t waste)1088   void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
allocated_bytes()1089   size_t allocated_bytes() { return allocated_bytes_; }
IncreaseAllocatedBytes(size_t bytes)1090   void IncreaseAllocatedBytes(size_t bytes) {
1091     DCHECK_LE(bytes, area_size());
1092     allocated_bytes_ += bytes;
1093   }
DecreaseAllocatedBytes(size_t bytes)1094   void DecreaseAllocatedBytes(size_t bytes) {
1095     DCHECK_LE(bytes, area_size());
1096     DCHECK_GE(allocated_bytes(), bytes);
1097     allocated_bytes_ -= bytes;
1098   }
1099 
1100   void ResetAllocationStatistics();
1101 
1102   size_t ShrinkToHighWaterMark();
1103 
1104   V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
1105   void DestroyBlackArea(Address start, Address end);
1106 
1107   void InitializeFreeListCategories();
1108   void AllocateFreeListCategories();
1109   void ReleaseFreeListCategories();
1110 
1111   void MoveOldToNewRememberedSetForSweeping();
1112   void MergeOldToNewRememberedSets();
1113 
1114  private:
1115   friend class MemoryAllocator;
1116 };
1117 
1118 class ReadOnlyPage : public Page {
1119  public:
1120   // Clears any pointers in the header that point out of the page that would
1121   // otherwise make the header non-relocatable.
1122   void MakeHeaderRelocatable();
1123 
1124  private:
1125   friend class ReadOnlySpace;
1126 };
1127 
1128 class LargePage : public MemoryChunk {
1129  public:
1130   // A limit to guarantee that we do not overflow typed slot offset in
1131   // the old to old remembered set.
1132   // Note that this limit is higher than what assembler already imposes on
1133   // x64 and ia32 architectures.
1134   static const int kMaxCodePageSize = 512 * MB;
1135 
FromHeapObject(HeapObject o)1136   static LargePage* FromHeapObject(HeapObject o) {
1137     return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
1138   }
1139 
1140   inline HeapObject GetObject();
1141 
next_page()1142   inline LargePage* next_page() {
1143     return static_cast<LargePage*>(list_node_.next());
1144   }
1145 
1146   // Uncommit memory that is not in use anymore by the object. If the object
1147   // cannot be shrunk 0 is returned.
1148   Address GetAddressToShrink(Address object_address, size_t object_size);
1149 
1150   void ClearOutOfLiveRangeSlots(Address free_start);
1151 
1152  private:
1153   static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
1154                                Executability executable);
1155 
1156   friend class MemoryAllocator;
1157 };
1158 
1159 // Validate our estimates on the header size.
1160 STATIC_ASSERT(sizeof(BasicMemoryChunk) <= BasicMemoryChunk::kHeaderSize);
1161 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
1162 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
1163 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
1164 
1165 // The process-wide singleton that keeps track of code range regions with the
1166 // intention to reuse free code range regions as a workaround for CFG memory
1167 // leaks (see crbug.com/870054).
1168 class CodeRangeAddressHint {
1169  public:
1170   // Returns the most recently freed code range start address for the given
1171   // size. If there is no such entry, then a random address is returned.
1172   V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
1173 
1174   V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
1175                                               size_t code_range_size);
1176 
1177  private:
1178   base::Mutex mutex_;
1179   // A map from code range size to an array of recently freed code range
1180   // addresses. There should be O(1) different code range sizes.
1181   // The length of each array is limited by the peak number of code ranges,
1182   // which should be also O(1).
1183   std::unordered_map<size_t, std::vector<Address>> recently_freed_;
1184 };
1185 
1186 // ----------------------------------------------------------------------------
1187 // A space acquires chunks of memory from the operating system. The memory
1188 // allocator allocates and deallocates pages for the paged heap spaces and large
1189 // pages for large object space.
1190 class MemoryAllocator {
1191  public:
1192   // Unmapper takes care of concurrently unmapping and uncommitting memory
1193   // chunks.
1194   class Unmapper {
1195    public:
1196     class UnmapFreeMemoryTask;
1197 
Unmapper(Heap * heap,MemoryAllocator * allocator)1198     Unmapper(Heap* heap, MemoryAllocator* allocator)
1199         : heap_(heap),
1200           allocator_(allocator),
1201           pending_unmapping_tasks_semaphore_(0),
1202           pending_unmapping_tasks_(0),
1203           active_unmapping_tasks_(0) {
1204       chunks_[kRegular].reserve(kReservedQueueingSlots);
1205       chunks_[kPooled].reserve(kReservedQueueingSlots);
1206     }
1207 
AddMemoryChunkSafe(MemoryChunk * chunk)1208     void AddMemoryChunkSafe(MemoryChunk* chunk) {
1209       if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
1210         AddMemoryChunkSafe<kRegular>(chunk);
1211       } else {
1212         AddMemoryChunkSafe<kNonRegular>(chunk);
1213       }
1214     }
1215 
TryGetPooledMemoryChunkSafe()1216     MemoryChunk* TryGetPooledMemoryChunkSafe() {
1217       // Procedure:
1218       // (1) Try to get a chunk that was declared as pooled and already has
1219       // been uncommitted.
1220       // (2) Try to steal any memory chunk of kPageSize that would've been
1221       // unmapped.
1222       MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
1223       if (chunk == nullptr) {
1224         chunk = GetMemoryChunkSafe<kRegular>();
1225         if (chunk != nullptr) {
1226           // For stolen chunks we need to manually free any allocated memory.
1227           chunk->ReleaseAllAllocatedMemory();
1228         }
1229       }
1230       return chunk;
1231     }
1232 
1233     V8_EXPORT_PRIVATE void FreeQueuedChunks();
1234     void CancelAndWaitForPendingTasks();
1235     void PrepareForGC();
1236     V8_EXPORT_PRIVATE void EnsureUnmappingCompleted();
1237     V8_EXPORT_PRIVATE void TearDown();
1238     size_t NumberOfCommittedChunks();
1239     V8_EXPORT_PRIVATE int NumberOfChunks();
1240     size_t CommittedBufferedMemory();
1241 
1242    private:
1243     static const int kReservedQueueingSlots = 64;
1244     static const int kMaxUnmapperTasks = 4;
1245 
1246     enum ChunkQueueType {
1247       kRegular,     // Pages of kPageSize that do not live in a CodeRange and
1248                     // can thus be used for stealing.
1249       kNonRegular,  // Large chunks and executable chunks.
1250       kPooled,      // Pooled chunks, already uncommited and ready for reuse.
1251       kNumberOfChunkQueues,
1252     };
1253 
1254     enum class FreeMode {
1255       kUncommitPooled,
1256       kReleasePooled,
1257     };
1258 
1259     template <ChunkQueueType type>
AddMemoryChunkSafe(MemoryChunk * chunk)1260     void AddMemoryChunkSafe(MemoryChunk* chunk) {
1261       base::MutexGuard guard(&mutex_);
1262       chunks_[type].push_back(chunk);
1263     }
1264 
1265     template <ChunkQueueType type>
GetMemoryChunkSafe()1266     MemoryChunk* GetMemoryChunkSafe() {
1267       base::MutexGuard guard(&mutex_);
1268       if (chunks_[type].empty()) return nullptr;
1269       MemoryChunk* chunk = chunks_[type].back();
1270       chunks_[type].pop_back();
1271       return chunk;
1272     }
1273 
1274     bool MakeRoomForNewTasks();
1275 
1276     template <FreeMode mode>
1277     void PerformFreeMemoryOnQueuedChunks();
1278 
1279     void PerformFreeMemoryOnQueuedNonRegularChunks();
1280 
1281     Heap* const heap_;
1282     MemoryAllocator* const allocator_;
1283     base::Mutex mutex_;
1284     std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
1285     CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
1286     base::Semaphore pending_unmapping_tasks_semaphore_;
1287     intptr_t pending_unmapping_tasks_;
1288     std::atomic<intptr_t> active_unmapping_tasks_;
1289 
1290     friend class MemoryAllocator;
1291   };
1292 
1293   enum AllocationMode {
1294     kRegular,
1295     kPooled,
1296   };
1297 
1298   enum FreeMode {
1299     kFull,
1300     kAlreadyPooled,
1301     kPreFreeAndQueue,
1302     kPooledAndQueue,
1303   };
1304 
1305   V8_EXPORT_PRIVATE static intptr_t GetCommitPageSize();
1306 
1307   // Computes the memory area of discardable memory within a given memory area
1308   // [addr, addr+size) and returns the result as base::AddressRegion. If the
1309   // memory is not discardable base::AddressRegion is an empty region.
1310   V8_EXPORT_PRIVATE static base::AddressRegion ComputeDiscardMemoryArea(
1311       Address addr, size_t size);
1312 
1313   V8_EXPORT_PRIVATE MemoryAllocator(Isolate* isolate, size_t max_capacity,
1314                                     size_t code_range_size);
1315 
1316   V8_EXPORT_PRIVATE void TearDown();
1317 
1318   // Allocates a Page from the allocator. AllocationMode is used to indicate
1319   // whether pooled allocation, which only works for MemoryChunk::kPageSize,
1320   // should be tried first.
1321   template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
1322             typename SpaceType>
1323   EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
1324   Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
1325 
1326   LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
1327                                Executability executable);
1328 
1329   template <MemoryAllocator::FreeMode mode = kFull>
1330   EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
1331   void Free(MemoryChunk* chunk);
1332 
1333   // Returns allocated spaces in bytes.
Size()1334   size_t Size() { return size_; }
1335 
1336   // Returns allocated executable spaces in bytes.
SizeExecutable()1337   size_t SizeExecutable() { return size_executable_; }
1338 
1339   // Returns the maximum available bytes of heaps.
Available()1340   size_t Available() {
1341     const size_t size = Size();
1342     return capacity_ < size ? 0 : capacity_ - size;
1343   }
1344 
1345   // Returns an indication of whether a pointer is in a space that has
1346   // been allocated by this MemoryAllocator.
IsOutsideAllocatedSpace(Address address)1347   V8_INLINE bool IsOutsideAllocatedSpace(Address address) {
1348     return address < lowest_ever_allocated_ ||
1349            address >= highest_ever_allocated_;
1350   }
1351 
1352   // Returns a MemoryChunk in which the memory region from commit_area_size to
1353   // reserve_area_size of the chunk area is reserved but not committed, it
1354   // could be committed later by calling MemoryChunk::CommitArea.
1355   V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size,
1356                                                size_t commit_area_size,
1357                                                Executability executable,
1358                                                Space* space);
1359 
1360   Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1361                                 size_t alignment, Executability executable,
1362                                 void* hint, VirtualMemory* controller);
1363 
1364   void FreeMemory(v8::PageAllocator* page_allocator, Address addr, size_t size);
1365 
1366   // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
1367   // internally memory is freed from |start_free| to the end of the reservation.
1368   // Additional memory beyond the page is not accounted though, so
1369   // |bytes_to_free| is computed by the caller.
1370   void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
1371                          size_t bytes_to_free, Address new_area_end);
1372 
1373   // Checks if an allocated MemoryChunk was intended to be used for executable
1374   // memory.
IsMemoryChunkExecutable(MemoryChunk * chunk)1375   bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
1376     return executable_memory_.find(chunk) != executable_memory_.end();
1377   }
1378 
1379   // Commit memory region owned by given reservation object.  Returns true if
1380   // it succeeded and false otherwise.
1381   bool CommitMemory(VirtualMemory* reservation);
1382 
1383   // Uncommit memory region owned by given reservation object. Returns true if
1384   // it succeeded and false otherwise.
1385   bool UncommitMemory(VirtualMemory* reservation);
1386 
1387   // Zaps a contiguous block of memory [start..(start+size)[ with
1388   // a given zap value.
1389   void ZapBlock(Address start, size_t size, uintptr_t zap_value);
1390 
1391   V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
1392                                                     Address start,
1393                                                     size_t commit_size,
1394                                                     size_t reserved_size);
1395 
1396   // Page allocator instance for allocating non-executable pages.
1397   // Guaranteed to be a valid pointer.
data_page_allocator()1398   v8::PageAllocator* data_page_allocator() { return data_page_allocator_; }
1399 
1400   // Page allocator instance for allocating executable pages.
1401   // Guaranteed to be a valid pointer.
code_page_allocator()1402   v8::PageAllocator* code_page_allocator() { return code_page_allocator_; }
1403 
1404   // Returns page allocator suitable for allocating pages with requested
1405   // executability.
page_allocator(Executability executable)1406   v8::PageAllocator* page_allocator(Executability executable) {
1407     return executable == EXECUTABLE ? code_page_allocator_
1408                                     : data_page_allocator_;
1409   }
1410 
1411   // A region of memory that may contain executable code including reserved
1412   // OS page with read-write access in the beginning.
code_range()1413   const base::AddressRegion& code_range() const {
1414     // |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|
1415     DCHECK_IMPLIES(!code_range_.is_empty(), code_page_allocator_instance_);
1416     DCHECK_IMPLIES(!code_range_.is_empty(),
1417                    code_range_.contains(code_page_allocator_instance_->begin(),
1418                                         code_page_allocator_instance_->size()));
1419     return code_range_;
1420   }
1421 
unmapper()1422   Unmapper* unmapper() { return &unmapper_; }
1423 
1424   // Performs all necessary bookkeeping to free the memory, but does not free
1425   // it.
1426   void UnregisterMemory(MemoryChunk* chunk);
1427 
1428  private:
1429   void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
1430                                    size_t requested);
1431 
1432   // PreFreeMemory logically frees the object, i.e., it unregisters the memory,
1433   // logs a delete event and adds the chunk to remembered unmapped pages.
1434   void PreFreeMemory(MemoryChunk* chunk);
1435 
1436   // PerformFreeMemory can be called concurrently when PreFree was executed
1437   // before.
1438   void PerformFreeMemory(MemoryChunk* chunk);
1439 
1440   // See AllocatePage for public interface. Note that currently we only support
1441   // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
1442   template <typename SpaceType>
1443   MemoryChunk* AllocatePagePooled(SpaceType* owner);
1444 
1445   // Initializes pages in a chunk. Returns the first page address.
1446   // This function and GetChunkId() are provided for the mark-compact
1447   // collector to rebuild page headers in the from space, which is
1448   // used as a marking stack and its page headers are destroyed.
1449   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1450                                PagedSpace* owner);
1451 
UpdateAllocatedSpaceLimits(Address low,Address high)1452   void UpdateAllocatedSpaceLimits(Address low, Address high) {
1453     // The use of atomic primitives does not guarantee correctness (wrt.
1454     // desired semantics) by default. The loop here ensures that we update the
1455     // values only if they did not change in between.
1456     Address ptr = lowest_ever_allocated_.load(std::memory_order_relaxed);
1457     while ((low < ptr) && !lowest_ever_allocated_.compare_exchange_weak(
1458                               ptr, low, std::memory_order_acq_rel)) {
1459     }
1460     ptr = highest_ever_allocated_.load(std::memory_order_relaxed);
1461     while ((high > ptr) && !highest_ever_allocated_.compare_exchange_weak(
1462                                ptr, high, std::memory_order_acq_rel)) {
1463     }
1464   }
1465 
RegisterExecutableMemoryChunk(MemoryChunk * chunk)1466   void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
1467     DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
1468     DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
1469     executable_memory_.insert(chunk);
1470   }
1471 
UnregisterExecutableMemoryChunk(MemoryChunk * chunk)1472   void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
1473     DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
1474     executable_memory_.erase(chunk);
1475     chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
1476   }
1477 
1478   Isolate* isolate_;
1479 
1480   // This object controls virtual space reserved for code on the V8 heap. This
1481   // is only valid for 64-bit architectures where kRequiresCodeRange.
1482   VirtualMemory code_reservation_;
1483 
1484   // Page allocator used for allocating data pages. Depending on the
1485   // configuration it may be a page allocator instance provided by v8::Platform
1486   // or a BoundedPageAllocator (when pointer compression is enabled).
1487   v8::PageAllocator* data_page_allocator_;
1488 
1489   // Page allocator used for allocating code pages. Depending on the
1490   // configuration it may be a page allocator instance provided by v8::Platform
1491   // or a BoundedPageAllocator (when pointer compression is enabled or
1492   // on those 64-bit architectures where pc-relative 32-bit displacement
1493   // can be used for call and jump instructions).
1494   v8::PageAllocator* code_page_allocator_;
1495 
1496   // A part of the |code_reservation_| that may contain executable code
1497   // including reserved page with read-write access in the beginning.
1498   // See details below.
1499   base::AddressRegion code_range_;
1500 
1501   // This unique pointer owns the instance of bounded code allocator
1502   // that controls executable pages allocation. It does not control the
1503   // optionally existing page in the beginning of the |code_range_|.
1504   // So, summarizing all above, the following conditions hold:
1505   // 1) |code_reservation_| >= |code_range_|
1506   // 2) |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|.
1507   // 3) |code_reservation_| is AllocatePageSize()-aligned
1508   // 4) |code_page_allocator_instance_| is MemoryChunk::kAlignment-aligned
1509   // 5) |code_range_| is CommitPageSize()-aligned
1510   std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
1511 
1512   // Maximum space size in bytes.
1513   size_t capacity_;
1514 
1515   // Allocated space size in bytes.
1516   std::atomic<size_t> size_;
1517   // Allocated executable space size in bytes.
1518   std::atomic<size_t> size_executable_;
1519 
1520   // We keep the lowest and highest addresses allocated as a quick way
1521   // of determining that pointers are outside the heap. The estimate is
1522   // conservative, i.e. not all addresses in 'allocated' space are allocated
1523   // to our heap. The range is [lowest, highest[, inclusive on the low end
1524   // and exclusive on the high end.
1525   std::atomic<Address> lowest_ever_allocated_;
1526   std::atomic<Address> highest_ever_allocated_;
1527 
1528   VirtualMemory last_chunk_;
1529   Unmapper unmapper_;
1530 
1531   // Data structure to remember allocated executable memory chunks.
1532   std::unordered_set<MemoryChunk*> executable_memory_;
1533 
1534   friend class heap::TestCodePageAllocatorScope;
1535 
1536   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1537 };
1538 
1539 extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
1540     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
1541         size_t size, PagedSpace* owner, Executability executable);
1542 extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
1543     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
1544         size_t size, SemiSpace* owner, Executability executable);
1545 extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
1546     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
1547         size_t size, SemiSpace* owner, Executability executable);
1548 
1549 extern template EXPORT_TEMPLATE_DECLARE(
1550     V8_EXPORT_PRIVATE) void MemoryAllocator::
1551     Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
1552 extern template EXPORT_TEMPLATE_DECLARE(
1553     V8_EXPORT_PRIVATE) void MemoryAllocator::
1554     Free<MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
1555 extern template EXPORT_TEMPLATE_DECLARE(
1556     V8_EXPORT_PRIVATE) void MemoryAllocator::
1557     Free<MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
1558 extern template EXPORT_TEMPLATE_DECLARE(
1559     V8_EXPORT_PRIVATE) void MemoryAllocator::
1560     Free<MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
1561 
1562 // -----------------------------------------------------------------------------
1563 // Interface for heap object iterator to be implemented by all object space
1564 // object iterators.
1565 
1566 class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
1567  public:
1568   virtual ~ObjectIterator() = default;
1569   virtual HeapObject Next() = 0;
1570 };
1571 
1572 template <class PAGE_TYPE>
1573 class PageIteratorImpl
1574     : public base::iterator<std::forward_iterator_tag, PAGE_TYPE> {
1575  public:
PageIteratorImpl(PAGE_TYPE * p)1576   explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE> & other)1577   PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
1578   PAGE_TYPE* operator*() { return p_; }
1579   bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1580     return rhs.p_ == p_;
1581   }
1582   bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1583     return rhs.p_ != p_;
1584   }
1585   inline PageIteratorImpl<PAGE_TYPE>& operator++();
1586   inline PageIteratorImpl<PAGE_TYPE> operator++(int);
1587 
1588  private:
1589   PAGE_TYPE* p_;
1590 };
1591 
1592 using PageIterator = PageIteratorImpl<Page>;
1593 using LargePageIterator = PageIteratorImpl<LargePage>;
1594 
1595 class PageRange {
1596  public:
1597   using iterator = PageIterator;
PageRange(Page * begin,Page * end)1598   PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
PageRange(Page * page)1599   explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
1600   inline PageRange(Address start, Address limit);
1601 
begin()1602   iterator begin() { return iterator(begin_); }
end()1603   iterator end() { return iterator(end_); }
1604 
1605  private:
1606   Page* begin_;
1607   Page* end_;
1608 };
1609 
1610 // -----------------------------------------------------------------------------
1611 // Heap object iterator in new/old/map spaces.
1612 //
1613 // A PagedSpaceObjectIterator iterates objects from the bottom of the given
1614 // space to its top or from the bottom of the given page to its top.
1615 //
1616 // If objects are allocated in the page during iteration the iterator may
1617 // or may not iterate over those objects.  The caller must create a new
1618 // iterator in order to be sure to visit these new objects.
1619 class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
1620  public:
1621   // Creates a new object iterator in a given space.
1622   PagedSpaceObjectIterator(Heap* heap, PagedSpace* space);
1623   PagedSpaceObjectIterator(Heap* heap, PagedSpace* space, Page* page);
1624 
1625   // Creates a new object iterator in a given off-thread space.
1626   explicit PagedSpaceObjectIterator(OffThreadSpace* space);
1627 
1628   // Advance to the next object, skipping free spaces and other fillers and
1629   // skipping the special garbage section of which there is one per space.
1630   // Returns nullptr when the iteration has ended.
1631   inline HeapObject Next() override;
1632 
1633  private:
1634   // Fast (inlined) path of next().
1635   inline HeapObject FromCurrentPage();
1636 
1637   // Slow path of next(), goes into the next page.  Returns false if the
1638   // iteration has ended.
1639   bool AdvanceToNextPage();
1640 
1641   Address cur_addr_;  // Current iteration point.
1642   Address cur_end_;   // End iteration point.
1643   PagedSpace* space_;
1644   PageRange page_range_;
1645   PageRange::iterator current_page_;
1646 };
1647 
1648 // -----------------------------------------------------------------------------
1649 // A space has a circular list of pages. The next page can be accessed via
1650 // Page::next_page() call.
1651 
1652 // An abstraction of allocation and relocation pointers in a page-structured
1653 // space.
1654 class LinearAllocationArea {
1655  public:
LinearAllocationArea()1656   LinearAllocationArea() : top_(kNullAddress), limit_(kNullAddress) {}
LinearAllocationArea(Address top,Address limit)1657   LinearAllocationArea(Address top, Address limit) : top_(top), limit_(limit) {}
1658 
Reset(Address top,Address limit)1659   void Reset(Address top, Address limit) {
1660     set_top(top);
1661     set_limit(limit);
1662   }
1663 
set_top(Address top)1664   V8_INLINE void set_top(Address top) {
1665     SLOW_DCHECK(top == kNullAddress || (top & kHeapObjectTagMask) == 0);
1666     top_ = top;
1667   }
1668 
top()1669   V8_INLINE Address top() const {
1670     SLOW_DCHECK(top_ == kNullAddress || (top_ & kHeapObjectTagMask) == 0);
1671     return top_;
1672   }
1673 
top_address()1674   Address* top_address() { return &top_; }
1675 
set_limit(Address limit)1676   V8_INLINE void set_limit(Address limit) { limit_ = limit; }
1677 
limit()1678   V8_INLINE Address limit() const { return limit_; }
1679 
limit_address()1680   Address* limit_address() { return &limit_; }
1681 
1682 #ifdef DEBUG
VerifyPagedAllocation()1683   bool VerifyPagedAllocation() {
1684     return (Page::FromAllocationAreaAddress(top_) ==
1685             Page::FromAllocationAreaAddress(limit_)) &&
1686            (top_ <= limit_);
1687   }
1688 #endif
1689 
1690  private:
1691   // Current allocation top.
1692   Address top_;
1693   // Current allocation limit.
1694   Address limit_;
1695 };
1696 
1697 // An abstraction of the accounting statistics of a page-structured space.
1698 //
1699 // The stats are only set by functions that ensure they stay balanced. These
1700 // functions increase or decrease one of the non-capacity stats in conjunction
1701 // with capacity, or else they always balance increases and decreases to the
1702 // non-capacity stats.
1703 class AllocationStats {
1704  public:
AllocationStats()1705   AllocationStats() { Clear(); }
1706 
1707   // Zero out all the allocation statistics (i.e., no capacity).
Clear()1708   void Clear() {
1709     capacity_ = 0;
1710     max_capacity_ = 0;
1711     ClearSize();
1712   }
1713 
ClearSize()1714   void ClearSize() {
1715     size_ = 0;
1716 #ifdef DEBUG
1717     allocated_on_page_.clear();
1718 #endif
1719   }
1720 
1721   // Accessors for the allocation statistics.
Capacity()1722   size_t Capacity() { return capacity_; }
MaxCapacity()1723   size_t MaxCapacity() { return max_capacity_; }
Size()1724   size_t Size() { return size_; }
1725 #ifdef DEBUG
AllocatedOnPage(Page * page)1726   size_t AllocatedOnPage(Page* page) { return allocated_on_page_[page]; }
1727 #endif
1728 
IncreaseAllocatedBytes(size_t bytes,Page * page)1729   void IncreaseAllocatedBytes(size_t bytes, Page* page) {
1730     DCHECK_GE(size_ + bytes, size_);
1731     size_ += bytes;
1732 #ifdef DEBUG
1733     allocated_on_page_[page] += bytes;
1734 #endif
1735   }
1736 
DecreaseAllocatedBytes(size_t bytes,Page * page)1737   void DecreaseAllocatedBytes(size_t bytes, Page* page) {
1738     DCHECK_GE(size_, bytes);
1739     size_ -= bytes;
1740 #ifdef DEBUG
1741     DCHECK_GE(allocated_on_page_[page], bytes);
1742     allocated_on_page_[page] -= bytes;
1743 #endif
1744   }
1745 
DecreaseCapacity(size_t bytes)1746   void DecreaseCapacity(size_t bytes) {
1747     DCHECK_GE(capacity_, bytes);
1748     DCHECK_GE(capacity_ - bytes, size_);
1749     capacity_ -= bytes;
1750   }
1751 
IncreaseCapacity(size_t bytes)1752   void IncreaseCapacity(size_t bytes) {
1753     DCHECK_GE(capacity_ + bytes, capacity_);
1754     capacity_ += bytes;
1755     if (capacity_ > max_capacity_) {
1756       max_capacity_ = capacity_;
1757     }
1758   }
1759 
1760  private:
1761   // |capacity_|: The number of object-area bytes (i.e., not including page
1762   // bookkeeping structures) currently in the space.
1763   // During evacuation capacity of the main spaces is accessed from multiple
1764   // threads to check the old generation hard limit.
1765   std::atomic<size_t> capacity_;
1766 
1767   // |max_capacity_|: The maximum capacity ever observed.
1768   size_t max_capacity_;
1769 
1770   // |size_|: The number of allocated bytes.
1771   size_t size_;
1772 
1773 #ifdef DEBUG
1774   std::unordered_map<Page*, size_t, Page::Hasher> allocated_on_page_;
1775 #endif
1776 };
1777 
1778 // The free list is organized in categories as follows:
1779 // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
1780 //   allocation, when categories >= small do not have entries anymore.
1781 // 11-31 words (tiny): The tiny blocks are only used for allocation, when
1782 //   categories >= small do not have entries anymore.
1783 // 32-255 words (small): Used for allocating free space between 1-31 words in
1784 //   size.
1785 // 256-2047 words (medium): Used for allocating free space between 32-255 words
1786 //   in size.
1787 // 1048-16383 words (large): Used for allocating free space between 256-2047
1788 //   words in size.
1789 // At least 16384 words (huge): This list is for objects of 2048 words or
1790 //   larger. Empty pages are also added to this list.
1791 class V8_EXPORT_PRIVATE FreeListLegacy : public FreeList {
1792  public:
GuaranteedAllocatable(size_t maximum_freed)1793   size_t GuaranteedAllocatable(size_t maximum_freed) override {
1794     if (maximum_freed <= kTiniestListMax) {
1795       // Since we are not iterating over all list entries, we cannot guarantee
1796       // that we can find the maximum freed block in that free list.
1797       return 0;
1798     } else if (maximum_freed <= kTinyListMax) {
1799       return kTinyAllocationMax;
1800     } else if (maximum_freed <= kSmallListMax) {
1801       return kSmallAllocationMax;
1802     } else if (maximum_freed <= kMediumListMax) {
1803       return kMediumAllocationMax;
1804     } else if (maximum_freed <= kLargeListMax) {
1805       return kLargeAllocationMax;
1806     }
1807     return maximum_freed;
1808   }
1809 
1810   inline Page* GetPageForSize(size_t size_in_bytes) override;
1811 
1812   FreeListLegacy();
1813   ~FreeListLegacy();
1814 
1815   V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
1816                                            size_t* node_size,
1817                                            AllocationOrigin origin) override;
1818 
1819  private:
1820   enum { kTiniest, kTiny, kSmall, kMedium, kLarge, kHuge };
1821 
1822   static const size_t kMinBlockSize = 3 * kTaggedSize;
1823 
1824   // This is a conservative upper bound. The actual maximum block size takes
1825   // padding and alignment of data and code pages into account.
1826   static const size_t kMaxBlockSize = Page::kPageSize;
1827 
1828   static const size_t kTiniestListMax = 0xa * kTaggedSize;
1829   static const size_t kTinyListMax = 0x1f * kTaggedSize;
1830   static const size_t kSmallListMax = 0xff * kTaggedSize;
1831   static const size_t kMediumListMax = 0x7ff * kTaggedSize;
1832   static const size_t kLargeListMax = 0x1fff * kTaggedSize;
1833   static const size_t kTinyAllocationMax = kTiniestListMax;
1834   static const size_t kSmallAllocationMax = kTinyListMax;
1835   static const size_t kMediumAllocationMax = kSmallListMax;
1836   static const size_t kLargeAllocationMax = kMediumListMax;
1837 
SelectFreeListCategoryType(size_t size_in_bytes)1838   FreeListCategoryType SelectFreeListCategoryType(
1839       size_t size_in_bytes) override {
1840     if (size_in_bytes <= kTiniestListMax) {
1841       return kTiniest;
1842     } else if (size_in_bytes <= kTinyListMax) {
1843       return kTiny;
1844     } else if (size_in_bytes <= kSmallListMax) {
1845       return kSmall;
1846     } else if (size_in_bytes <= kMediumListMax) {
1847       return kMedium;
1848     } else if (size_in_bytes <= kLargeListMax) {
1849       return kLarge;
1850     }
1851     return kHuge;
1852   }
1853 
1854   // Returns the category to be used to allocate |size_in_bytes| in the fast
1855   // path. The tiny categories are not used for fast allocation.
SelectFastAllocationFreeListCategoryType(size_t size_in_bytes)1856   FreeListCategoryType SelectFastAllocationFreeListCategoryType(
1857       size_t size_in_bytes) {
1858     if (size_in_bytes <= kSmallAllocationMax) {
1859       return kSmall;
1860     } else if (size_in_bytes <= kMediumAllocationMax) {
1861       return kMedium;
1862     } else if (size_in_bytes <= kLargeAllocationMax) {
1863       return kLarge;
1864     }
1865     return kHuge;
1866   }
1867 
1868   friend class FreeListCategory;
1869   friend class heap::HeapTester;
1870 };
1871 
1872 // Inspired by FreeListLegacy.
1873 // Only has 3 categories: Medium, Large and Huge.
1874 // Any block that would have belong to tiniest, tiny or small in FreeListLegacy
1875 // is considered wasted.
1876 // Allocation is done only in Huge, Medium and Large (in that order),
1877 // using a first-fit strategy (only the first block of each freelist is ever
1878 // considered though). Performances is supposed to be better than
1879 // FreeListLegacy, but memory usage should be higher (because fragmentation will
1880 // probably be higher).
1881 class V8_EXPORT_PRIVATE FreeListFastAlloc : public FreeList {
1882  public:
GuaranteedAllocatable(size_t maximum_freed)1883   size_t GuaranteedAllocatable(size_t maximum_freed) override {
1884     if (maximum_freed <= kMediumListMax) {
1885       // Since we are not iterating over all list entries, we cannot guarantee
1886       // that we can find the maximum freed block in that free list.
1887       return 0;
1888     } else if (maximum_freed <= kLargeListMax) {
1889       return kLargeAllocationMax;
1890     }
1891     return kHugeAllocationMax;
1892   }
1893 
1894   inline Page* GetPageForSize(size_t size_in_bytes) override;
1895 
1896   FreeListFastAlloc();
1897   ~FreeListFastAlloc();
1898 
1899   V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
1900                                            size_t* node_size,
1901                                            AllocationOrigin origin) override;
1902 
1903  private:
1904   enum { kMedium, kLarge, kHuge };
1905 
1906   static const size_t kMinBlockSize = 0xff * kTaggedSize;
1907 
1908   // This is a conservative upper bound. The actual maximum block size takes
1909   // padding and alignment of data and code pages into account.
1910   static const size_t kMaxBlockSize = Page::kPageSize;
1911 
1912   static const size_t kMediumListMax = 0x7ff * kTaggedSize;
1913   static const size_t kLargeListMax = 0x1fff * kTaggedSize;
1914   static const size_t kMediumAllocationMax = kMinBlockSize;
1915   static const size_t kLargeAllocationMax = kMediumListMax;
1916   static const size_t kHugeAllocationMax = kLargeListMax;
1917 
1918   // Returns the category used to hold an object of size |size_in_bytes|.
SelectFreeListCategoryType(size_t size_in_bytes)1919   FreeListCategoryType SelectFreeListCategoryType(
1920       size_t size_in_bytes) override {
1921     if (size_in_bytes <= kMediumListMax) {
1922       return kMedium;
1923     } else if (size_in_bytes <= kLargeListMax) {
1924       return kLarge;
1925     }
1926     return kHuge;
1927   }
1928 };
1929 
1930 // Use 24 Freelists: on per 16 bytes between 24 and 256, and then a few ones for
1931 // larger sizes. See the variable |categories_min| for the size of each
1932 // Freelist.  Allocation is done using a best-fit strategy (considering only the
1933 // first element of each category though).
1934 // Performances are expected to be worst than FreeListLegacy, but memory
1935 // consumption should be lower (since fragmentation should be lower).
1936 class V8_EXPORT_PRIVATE FreeListMany : public FreeList {
1937  public:
1938   size_t GuaranteedAllocatable(size_t maximum_freed) override;
1939 
1940   Page* GetPageForSize(size_t size_in_bytes) override;
1941 
1942   FreeListMany();
1943   ~FreeListMany();
1944 
1945   V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
1946                                            size_t* node_size,
1947                                            AllocationOrigin origin) override;
1948 
1949  protected:
1950   static const size_t kMinBlockSize = 3 * kTaggedSize;
1951 
1952   // This is a conservative upper bound. The actual maximum block size takes
1953   // padding and alignment of data and code pages into account.
1954   static const size_t kMaxBlockSize = Page::kPageSize;
1955   // Largest size for which categories are still precise, and for which we can
1956   // therefore compute the category in constant time.
1957   static const size_t kPreciseCategoryMaxSize = 256;
1958 
1959   // Categories boundaries generated with:
1960   // perl -E '
1961   //      @cat = (24, map {$_*16} 2..16, 48, 64);
1962   //      while ($cat[-1] <= 32768) {
1963   //        push @cat, $cat[-1]*2
1964   //      }
1965   //      say join ", ", @cat;
1966   //      say "\n", scalar @cat'
1967   static const int kNumberOfCategories = 24;
1968   static constexpr unsigned int categories_min[kNumberOfCategories] = {
1969       24,  32,  48,  64,  80,  96,   112,  128,  144,  160,   176,   192,
1970       208, 224, 240, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536};
1971 
1972   // Return the smallest category that could hold |size_in_bytes| bytes.
SelectFreeListCategoryType(size_t size_in_bytes)1973   FreeListCategoryType SelectFreeListCategoryType(
1974       size_t size_in_bytes) override {
1975     if (size_in_bytes <= kPreciseCategoryMaxSize) {
1976       if (size_in_bytes < categories_min[1]) return 0;
1977       return static_cast<FreeListCategoryType>(size_in_bytes >> 4) - 1;
1978     }
1979     for (int cat = (kPreciseCategoryMaxSize >> 4) - 1; cat < last_category_;
1980          cat++) {
1981       if (size_in_bytes < categories_min[cat + 1]) {
1982         return cat;
1983       }
1984     }
1985     return last_category_;
1986   }
1987 
1988   FRIEND_TEST(SpacesTest, FreeListManySelectFreeListCategoryType);
1989   FRIEND_TEST(SpacesTest, FreeListManyGuaranteedAllocatable);
1990 };
1991 
1992 // Same as FreeListMany but uses a cache to know which categories are empty.
1993 // The cache (|next_nonempty_category|) is maintained in a way such that for
1994 // each category c, next_nonempty_category[c] contains the first non-empty
1995 // category greater or equal to c, that may hold an object of size c.
1996 // Allocation is done using the same strategy as FreeListMany (ie, best fit).
1997 class V8_EXPORT_PRIVATE FreeListManyCached : public FreeListMany {
1998  public:
1999   FreeListManyCached();
2000 
2001   V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
2002                                            size_t* node_size,
2003                                            AllocationOrigin origin) override;
2004 
2005   size_t Free(Address start, size_t size_in_bytes, FreeMode mode) override;
2006 
2007   void Reset() override;
2008 
2009   bool AddCategory(FreeListCategory* category) override;
2010   void RemoveCategory(FreeListCategory* category) override;
2011 
2012  protected:
2013   // Updates the cache after adding something in the category |cat|.
UpdateCacheAfterAddition(FreeListCategoryType cat)2014   void UpdateCacheAfterAddition(FreeListCategoryType cat) {
2015     for (int i = cat; i >= kFirstCategory && next_nonempty_category[i] > cat;
2016          i--) {
2017       next_nonempty_category[i] = cat;
2018     }
2019   }
2020 
2021   // Updates the cache after emptying category |cat|.
UpdateCacheAfterRemoval(FreeListCategoryType cat)2022   void UpdateCacheAfterRemoval(FreeListCategoryType cat) {
2023     for (int i = cat; i >= kFirstCategory && next_nonempty_category[i] == cat;
2024          i--) {
2025       next_nonempty_category[i] = next_nonempty_category[cat + 1];
2026     }
2027   }
2028 
2029 #ifdef DEBUG
CheckCacheIntegrity()2030   void CheckCacheIntegrity() {
2031     for (int i = 0; i <= last_category_; i++) {
2032       DCHECK(next_nonempty_category[i] == last_category_ + 1 ||
2033              categories_[next_nonempty_category[i]] != nullptr);
2034       for (int j = i; j < next_nonempty_category[i]; j++) {
2035         DCHECK(categories_[j] == nullptr);
2036       }
2037     }
2038   }
2039 #endif
2040 
2041   // The cache is overallocated by one so that the last element is always
2042   // defined, and when updating the cache, we can always use cache[i+1] as long
2043   // as i is < kNumberOfCategories.
2044   int next_nonempty_category[kNumberOfCategories + 1];
2045 
2046  private:
ResetCache()2047   void ResetCache() {
2048     for (int i = 0; i < kNumberOfCategories; i++) {
2049       next_nonempty_category[i] = kNumberOfCategories;
2050     }
2051     // Setting the after-last element as well, as explained in the cache's
2052     // declaration.
2053     next_nonempty_category[kNumberOfCategories] = kNumberOfCategories;
2054   }
2055 };
2056 
2057 // Same as FreeListManyCached but uses a fast path.
2058 // The fast path overallocates by at least 1.85k bytes. The idea of this 1.85k
2059 // is: we want the fast path to always overallocate, even for larger
2060 // categories. Therefore, we have two choices: either overallocate by
2061 // "size_in_bytes * something" or overallocate by "size_in_bytes +
2062 // something". We choose the later, as the former will tend to overallocate too
2063 // much for larger objects. The 1.85k (= 2048 - 128) has been chosen such that
2064 // for tiny objects (size <= 128 bytes), the first category considered is the
2065 // 36th (which holds objects of 2k to 3k), while for larger objects, the first
2066 // category considered will be one that guarantees a 1.85k+ bytes
2067 // overallocation. Using 2k rather than 1.85k would have resulted in either a
2068 // more complex logic for SelectFastAllocationFreeListCategoryType, or the 36th
2069 // category (2k to 3k) not being used; both of which are undesirable.
2070 // A secondary fast path is used for tiny objects (size <= 128), in order to
2071 // consider categories from 256 to 2048 bytes for them.
2072 // Note that this class uses a precise GetPageForSize (inherited from
2073 // FreeListMany), which makes its fast path less fast in the Scavenger. This is
2074 // done on purpose, since this class's only purpose is to be used by
2075 // FreeListManyCachedOrigin, which is precise for the scavenger.
2076 class V8_EXPORT_PRIVATE FreeListManyCachedFastPath : public FreeListManyCached {
2077  public:
2078   V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
2079                                            size_t* node_size,
2080                                            AllocationOrigin origin) override;
2081 
2082  protected:
2083   // Objects in the 18th category are at least 2048 bytes
2084   static const FreeListCategoryType kFastPathFirstCategory = 18;
2085   static const size_t kFastPathStart = 2048;
2086   static const size_t kTinyObjectMaxSize = 128;
2087   static const size_t kFastPathOffset = kFastPathStart - kTinyObjectMaxSize;
2088   // Objects in the 15th category are at least 256 bytes
2089   static const FreeListCategoryType kFastPathFallBackTiny = 15;
2090 
2091   STATIC_ASSERT(categories_min[kFastPathFirstCategory] == kFastPathStart);
2092   STATIC_ASSERT(categories_min[kFastPathFallBackTiny] ==
2093                 kTinyObjectMaxSize * 2);
2094 
SelectFastAllocationFreeListCategoryType(size_t size_in_bytes)2095   FreeListCategoryType SelectFastAllocationFreeListCategoryType(
2096       size_t size_in_bytes) {
2097     DCHECK(size_in_bytes < kMaxBlockSize);
2098 
2099     if (size_in_bytes >= categories_min[last_category_]) return last_category_;
2100 
2101     size_in_bytes += kFastPathOffset;
2102     for (int cat = kFastPathFirstCategory; cat < last_category_; cat++) {
2103       if (size_in_bytes <= categories_min[cat]) {
2104         return cat;
2105       }
2106     }
2107     return last_category_;
2108   }
2109 
2110   FRIEND_TEST(
2111       SpacesTest,
2112       FreeListManyCachedFastPathSelectFastAllocationFreeListCategoryType);
2113 };
2114 
2115 // Uses FreeListManyCached if in the GC; FreeListManyCachedFastPath otherwise.
2116 // The reasonning behind this FreeList is the following: the GC runs in
2117 // parallel, and therefore, more expensive allocations there are less
2118 // noticeable. On the other hand, the generated code and runtime need to be very
2119 // fast. Therefore, the strategy for the former is one that is not very
2120 // efficient, but reduces fragmentation (FreeListManyCached), while the strategy
2121 // for the later is one that is very efficient, but introduces some
2122 // fragmentation (FreeListManyCachedFastPath).
2123 class V8_EXPORT_PRIVATE FreeListManyCachedOrigin
2124     : public FreeListManyCachedFastPath {
2125  public:
2126   V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
2127                                            size_t* node_size,
2128                                            AllocationOrigin origin) override;
2129 };
2130 
2131 // FreeList for maps: since maps are all the same size, uses a single freelist.
2132 class V8_EXPORT_PRIVATE FreeListMap : public FreeList {
2133  public:
2134   size_t GuaranteedAllocatable(size_t maximum_freed) override;
2135 
2136   Page* GetPageForSize(size_t size_in_bytes) override;
2137 
2138   FreeListMap();
2139   ~FreeListMap();
2140 
2141   V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
2142                                            size_t* node_size,
2143                                            AllocationOrigin origin) override;
2144 
2145  private:
2146   static const size_t kMinBlockSize = Map::kSize;
2147   static const size_t kMaxBlockSize = Page::kPageSize;
2148   static const FreeListCategoryType kOnlyCategory = 0;
2149 
SelectFreeListCategoryType(size_t size_in_bytes)2150   FreeListCategoryType SelectFreeListCategoryType(
2151       size_t size_in_bytes) override {
2152     return kOnlyCategory;
2153   }
2154 };
2155 
2156 // LocalAllocationBuffer represents a linear allocation area that is created
2157 // from a given {AllocationResult} and can be used to allocate memory without
2158 // synchronization.
2159 //
2160 // The buffer is properly closed upon destruction and reassignment.
2161 // Example:
2162 //   {
2163 //     AllocationResult result = ...;
2164 //     LocalAllocationBuffer a(heap, result, size);
2165 //     LocalAllocationBuffer b = a;
2166 //     CHECK(!a.IsValid());
2167 //     CHECK(b.IsValid());
2168 //     // {a} is invalid now and cannot be used for further allocations.
2169 //   }
2170 //   // Since {b} went out of scope, the LAB is closed, resulting in creating a
2171 //   // filler object for the remaining area.
2172 class LocalAllocationBuffer {
2173  public:
2174   // Indicates that a buffer cannot be used for allocations anymore. Can result
2175   // from either reassigning a buffer, or trying to construct it from an
2176   // invalid {AllocationResult}.
InvalidBuffer()2177   static LocalAllocationBuffer InvalidBuffer() {
2178     return LocalAllocationBuffer(
2179         nullptr, LinearAllocationArea(kNullAddress, kNullAddress));
2180   }
2181 
2182   // Creates a new LAB from a given {AllocationResult}. Results in
2183   // InvalidBuffer if the result indicates a retry.
2184   static inline LocalAllocationBuffer FromResult(Heap* heap,
2185                                                  AllocationResult result,
2186                                                  intptr_t size);
2187 
~LocalAllocationBuffer()2188   ~LocalAllocationBuffer() { Close(); }
2189 
2190   // Convert to C++11 move-semantics once allowed by the style guide.
2191   LocalAllocationBuffer(const LocalAllocationBuffer& other) V8_NOEXCEPT;
2192   LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other)
2193       V8_NOEXCEPT;
2194 
2195   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
2196       int size_in_bytes, AllocationAlignment alignment);
2197 
IsValid()2198   inline bool IsValid() { return allocation_info_.top() != kNullAddress; }
2199 
2200   // Try to merge LABs, which is only possible when they are adjacent in memory.
2201   // Returns true if the merge was successful, false otherwise.
2202   inline bool TryMerge(LocalAllocationBuffer* other);
2203 
2204   inline bool TryFreeLast(HeapObject object, int object_size);
2205 
2206   // Close a LAB, effectively invalidating it. Returns the unused area.
2207   V8_EXPORT_PRIVATE LinearAllocationArea Close();
2208 
2209  private:
2210   V8_EXPORT_PRIVATE LocalAllocationBuffer(
2211       Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT;
2212 
2213   Heap* heap_;
2214   LinearAllocationArea allocation_info_;
2215 };
2216 
2217 class SpaceWithLinearArea : public Space {
2218  public:
SpaceWithLinearArea(Heap * heap,AllocationSpace id,FreeList * free_list)2219   SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list)
2220       : Space(heap, id, free_list), top_on_previous_step_(0) {
2221     allocation_info_.Reset(kNullAddress, kNullAddress);
2222   }
2223 
2224   virtual bool SupportsInlineAllocation() = 0;
2225 
2226   // Returns the allocation pointer in this space.
top()2227   Address top() { return allocation_info_.top(); }
limit()2228   Address limit() { return allocation_info_.limit(); }
2229 
2230   // The allocation top address.
allocation_top_address()2231   Address* allocation_top_address() { return allocation_info_.top_address(); }
2232 
2233   // The allocation limit address.
allocation_limit_address()2234   Address* allocation_limit_address() {
2235     return allocation_info_.limit_address();
2236   }
2237 
2238   V8_EXPORT_PRIVATE void AddAllocationObserver(
2239       AllocationObserver* observer) override;
2240   V8_EXPORT_PRIVATE void RemoveAllocationObserver(
2241       AllocationObserver* observer) override;
2242   V8_EXPORT_PRIVATE void ResumeAllocationObservers() override;
2243   V8_EXPORT_PRIVATE void PauseAllocationObservers() override;
2244 
2245   // When allocation observers are active we may use a lower limit to allow the
2246   // observers to 'interrupt' earlier than the natural limit. Given a linear
2247   // area bounded by [start, end), this function computes the limit to use to
2248   // allow proper observation based on existing observers. min_size specifies
2249   // the minimum size that the limited area should have.
2250   Address ComputeLimit(Address start, Address end, size_t min_size);
2251   V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
2252       size_t min_size) = 0;
2253 
2254   V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
2255 
2256   void PrintAllocationsOrigins();
2257 
2258  protected:
2259   // If we are doing inline allocation in steps, this method performs the 'step'
2260   // operation. top is the memory address of the bump pointer at the last
2261   // inline allocation (i.e. it determines the numbers of bytes actually
2262   // allocated since the last step.) top_for_next_step is the address of the
2263   // bump pointer where the next byte is going to be allocated from. top and
2264   // top_for_next_step may be different when we cross a page boundary or reset
2265   // the space.
2266   // TODO(ofrobots): clarify the precise difference between this and
2267   // Space::AllocationStep.
2268   void InlineAllocationStep(Address top, Address top_for_next_step,
2269                             Address soon_object, size_t size);
2270   V8_EXPORT_PRIVATE void StartNextInlineAllocationStep() override;
2271 
2272   // TODO(ofrobots): make these private after refactoring is complete.
2273   LinearAllocationArea allocation_info_;
2274   Address top_on_previous_step_;
2275 
2276   size_t allocations_origins_[static_cast<int>(
2277       AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
2278 };
2279 
2280 class V8_EXPORT_PRIVATE PagedSpace
NON_EXPORTED_BASE(public SpaceWithLinearArea)2281     : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
2282  public:
2283   using iterator = PageIterator;
2284 
2285   static const size_t kCompactionMemoryWanted = 500 * KB;
2286 
2287   // Creates a space with an id.
2288   PagedSpace(Heap* heap, AllocationSpace id, Executability executable,
2289              FreeList* free_list,
2290              LocalSpaceKind local_space_kind = LocalSpaceKind::kNone);
2291 
2292   ~PagedSpace() override { TearDown(); }
2293 
2294   // Checks whether an object/address is in this space.
2295   inline bool Contains(Address a);
2296   inline bool Contains(Object o);
2297   bool ContainsSlow(Address addr);
2298 
2299   // Does the space need executable memory?
2300   Executability executable() { return executable_; }
2301 
2302   // Prepares for a mark-compact GC.
2303   void PrepareForMarkCompact();
2304 
2305   // Current capacity without growing (Size() + Available()).
2306   size_t Capacity() { return accounting_stats_.Capacity(); }
2307 
2308   // Approximate amount of physical memory committed for this space.
2309   size_t CommittedPhysicalMemory() override;
2310 
2311   // Sets the capacity, the available space and the wasted space to zero.
2312   // The stats are rebuilt during sweeping by adding each page to the
2313   // capacity and the size when it is encountered.  As free spaces are
2314   // discovered during the sweeping they are subtracted from the size and added
2315   // to the available and wasted totals. The free list is cleared as well.
2316   void ClearAllocatorState() {
2317     accounting_stats_.ClearSize();
2318     free_list_->Reset();
2319   }
2320 
2321   // Available bytes without growing.  These are the bytes on the free list.
2322   // The bytes in the linear allocation area are not included in this total
2323   // because updating the stats would slow down allocation.  New pages are
2324   // immediately added to the free list so they show up here.
2325   size_t Available() override { return free_list_->Available(); }
2326 
2327   // Allocated bytes in this space.  Garbage bytes that were not found due to
2328   // concurrent sweeping are counted as being allocated!  The bytes in the
2329   // current linear allocation area (between top and limit) are also counted
2330   // here.
2331   size_t Size() override { return accounting_stats_.Size(); }
2332 
2333   // As size, but the bytes in lazily swept pages are estimated and the bytes
2334   // in the current linear allocation area are not included.
2335   size_t SizeOfObjects() override;
2336 
2337   // Wasted bytes in this space.  These are just the bytes that were thrown away
2338   // due to being too small to use for allocation.
2339   virtual size_t Waste() { return free_list_->wasted_bytes(); }
2340 
2341   // Allocate the requested number of bytes in the space if possible, return a
2342   // failure object if not.
2343   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
2344       int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
2345 
2346   // Allocate the requested number of bytes in the space double aligned if
2347   // possible, return a failure object if not.
2348   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
2349       int size_in_bytes, AllocationAlignment alignment,
2350       AllocationOrigin origin = AllocationOrigin::kRuntime);
2351 
2352   // Allocate the requested number of bytes in the space and consider allocation
2353   // alignment if needed.
2354   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
2355       int size_in_bytes, AllocationAlignment alignment,
2356       AllocationOrigin origin = AllocationOrigin::kRuntime);
2357 
2358   size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
2359     if (size_in_bytes == 0) return 0;
2360     heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
2361                                  ClearRecordedSlots::kNo);
2362     if (mode == SpaceAccountingMode::kSpaceAccounted) {
2363       return AccountedFree(start, size_in_bytes);
2364     } else {
2365       return UnaccountedFree(start, size_in_bytes);
2366     }
2367   }
2368 
2369   // Give a block of memory to the space's free list.  It might be added to
2370   // the free list or accounted as waste.
2371   // If add_to_freelist is false then just accounting stats are updated and
2372   // no attempt to add area to free list is made.
2373   size_t AccountedFree(Address start, size_t size_in_bytes) {
2374     size_t wasted = free_list_->Free(start, size_in_bytes, kLinkCategory);
2375     Page* page = Page::FromAddress(start);
2376     accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
2377     DCHECK_GE(size_in_bytes, wasted);
2378     return size_in_bytes - wasted;
2379   }
2380 
2381   size_t UnaccountedFree(Address start, size_t size_in_bytes) {
2382     size_t wasted = free_list_->Free(start, size_in_bytes, kDoNotLinkCategory);
2383     DCHECK_GE(size_in_bytes, wasted);
2384     return size_in_bytes - wasted;
2385   }
2386 
2387   inline bool TryFreeLast(HeapObject object, int object_size);
2388 
2389   void ResetFreeList();
2390 
2391   // Empty space linear allocation area, returning unused area to free list.
2392   void FreeLinearAllocationArea();
2393 
2394   void MarkLinearAllocationAreaBlack();
2395   void UnmarkLinearAllocationArea();
2396 
2397   void DecreaseAllocatedBytes(size_t bytes, Page* page) {
2398     accounting_stats_.DecreaseAllocatedBytes(bytes, page);
2399   }
2400   void IncreaseAllocatedBytes(size_t bytes, Page* page) {
2401     accounting_stats_.IncreaseAllocatedBytes(bytes, page);
2402   }
2403   void DecreaseCapacity(size_t bytes) {
2404     accounting_stats_.DecreaseCapacity(bytes);
2405   }
2406   void IncreaseCapacity(size_t bytes) {
2407     accounting_stats_.IncreaseCapacity(bytes);
2408   }
2409 
2410   void RefineAllocatedBytesAfterSweeping(Page* page);
2411 
2412   Page* InitializePage(MemoryChunk* chunk);
2413 
2414   void ReleasePage(Page* page);
2415 
2416   // Adds the page to this space and returns the number of bytes added to the
2417   // free list of the space.
2418   size_t AddPage(Page* page);
2419   void RemovePage(Page* page);
2420   // Remove a page if it has at least |size_in_bytes| bytes available that can
2421   // be used for allocation.
2422   Page* RemovePageSafe(int size_in_bytes);
2423 
2424   void SetReadable();
2425   void SetReadAndExecutable();
2426   void SetReadAndWritable();
2427 
2428   void SetDefaultCodePermissions() {
2429     if (FLAG_jitless) {
2430       SetReadable();
2431     } else {
2432       SetReadAndExecutable();
2433     }
2434   }
2435 
2436 #ifdef VERIFY_HEAP
2437   // Verify integrity of this space.
2438   virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);
2439 
2440   void VerifyLiveBytes();
2441 
2442   // Overridden by subclasses to verify space-specific object
2443   // properties (e.g., only maps or free-list nodes are in map space).
2444   virtual void VerifyObject(HeapObject obj) {}
2445 #endif
2446 
2447 #ifdef DEBUG
2448   void VerifyCountersAfterSweeping(Heap* heap);
2449   void VerifyCountersBeforeConcurrentSweeping();
2450   // Print meta info and objects in this space.
2451   void Print() override;
2452 
2453   // Report code object related statistics
2454   static void ReportCodeStatistics(Isolate* isolate);
2455   static void ResetCodeStatistics(Isolate* isolate);
2456 #endif
2457 
2458   bool CanExpand(size_t size);
2459 
2460   // Returns the number of total pages in this space.
2461   int CountTotalPages();
2462 
2463   // Return size of allocatable area on a page in this space.
2464   inline int AreaSize() { return static_cast<int>(area_size_); }
2465 
2466   bool is_local_space() { return local_space_kind_ != LocalSpaceKind::kNone; }
2467 
2468   bool is_off_thread_space() {
2469     return local_space_kind_ == LocalSpaceKind::kOffThreadSpace;
2470   }
2471 
2472   bool is_compaction_space() {
2473     return base::IsInRange(local_space_kind_,
2474                            LocalSpaceKind::kFirstCompactionSpace,
2475                            LocalSpaceKind::kLastCompactionSpace);
2476   }
2477 
2478   LocalSpaceKind local_space_kind() { return local_space_kind_; }
2479 
2480   // Merges {other} into the current space. Note that this modifies {other},
2481   // e.g., removes its bump pointer area and resets statistics.
2482   void MergeLocalSpace(LocalSpace* other);
2483 
2484   // Refills the free list from the corresponding free list filled by the
2485   // sweeper.
2486   virtual void RefillFreeList();
2487 
2488   base::Mutex* mutex() { return &space_mutex_; }
2489 
2490   inline void UnlinkFreeListCategories(Page* page);
2491   inline size_t RelinkFreeListCategories(Page* page);
2492 
2493   Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
2494 
2495   iterator begin() { return iterator(first_page()); }
2496   iterator end() { return iterator(nullptr); }
2497 
2498   // Shrink immortal immovable pages of the space to be exactly the size needed
2499   // using the high water mark.
2500   void ShrinkImmortalImmovablePages();
2501 
2502   size_t ShrinkPageToHighWaterMark(Page* page);
2503 
2504   std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
2505 
2506   void SetLinearAllocationArea(Address top, Address limit);
2507 
2508  private:
2509   // Set space linear allocation area.
2510   void SetTopAndLimit(Address top, Address limit) {
2511     DCHECK(top == limit ||
2512            Page::FromAddress(top) == Page::FromAddress(limit - 1));
2513     MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2514     allocation_info_.Reset(top, limit);
2515   }
2516   void DecreaseLimit(Address new_limit);
2517   void UpdateInlineAllocationLimit(size_t min_size) override;
2518   bool SupportsInlineAllocation() override {
2519     return identity() == OLD_SPACE && !is_local_space();
2520   }
2521 
2522  protected:
2523   // PagedSpaces that should be included in snapshots have different, i.e.,
2524   // smaller, initial pages.
2525   virtual bool snapshotable() { return true; }
2526 
2527   bool HasPages() { return first_page() != nullptr; }
2528 
2529   // Cleans up the space, frees all pages in this space except those belonging
2530   // to the initial chunk, uncommits addresses in the initial chunk.
2531   void TearDown();
2532 
2533   // Expands the space by allocating a fixed number of pages. Returns false if
2534   // it cannot allocate requested number of pages from OS, or if the hard heap
2535   // size limit has been hit.
2536   bool Expand();
2537 
2538   // Sets up a linear allocation area that fits the given number of bytes.
2539   // Returns false if there is not enough space and the caller has to retry
2540   // after collecting garbage.
2541   inline bool EnsureLinearAllocationArea(int size_in_bytes,
2542                                          AllocationOrigin origin);
2543   // Allocates an object from the linear allocation area. Assumes that the
2544   // linear allocation area is large enought to fit the object.
2545   inline HeapObject AllocateLinearly(int size_in_bytes);
2546   // Tries to allocate an aligned object from the linear allocation area.
2547   // Returns nullptr if the linear allocation area does not fit the object.
2548   // Otherwise, returns the object pointer and writes the allocation size
2549   // (object size + alignment filler size) to the size_in_bytes.
2550   inline HeapObject TryAllocateLinearlyAligned(int* size_in_bytes,
2551                                                AllocationAlignment alignment);
2552 
2553   V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
2554       size_t size_in_bytes, AllocationOrigin origin);
2555 
2556   // If sweeping is still in progress try to sweep unswept pages. If that is
2557   // not successful, wait for the sweeper threads and retry free-list
2558   // allocation. Returns false if there is not enough space and the caller
2559   // has to retry after collecting garbage.
2560   V8_WARN_UNUSED_RESULT bool EnsureSweptAndRetryAllocation(
2561       int size_in_bytes, AllocationOrigin origin);
2562 
2563   V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(int required_freed_bytes,
2564                                                      int max_pages,
2565                                                      int size_in_bytes,
2566                                                      AllocationOrigin origin);
2567 
2568   // Slow path of AllocateRaw. This function is space-dependent. Returns false
2569   // if there is not enough space and the caller has to retry after
2570   // collecting garbage.
2571   V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
2572       int size_in_bytes, AllocationOrigin origin);
2573 
2574   // Implementation of SlowAllocateRaw. Returns false if there is not enough
2575   // space and the caller has to retry after collecting garbage.
2576   V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
2577       int size_in_bytes, AllocationOrigin origin);
2578 
2579   Executability executable_;
2580 
2581   LocalSpaceKind local_space_kind_;
2582 
2583   size_t area_size_;
2584 
2585   // Accounting information for this space.
2586   AllocationStats accounting_stats_;
2587 
2588   // Mutex guarding any concurrent access to the space.
2589   base::Mutex space_mutex_;
2590 
2591   friend class IncrementalMarking;
2592   friend class MarkCompactCollector;
2593 
2594   // Used in cctest.
2595   friend class heap::HeapTester;
2596 };
2597 
2598 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
2599 
2600 // -----------------------------------------------------------------------------
2601 // SemiSpace in young generation
2602 //
2603 // A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
2604 // The mark-compact collector  uses the memory of the first page in the from
2605 // space as a marking stack when tracing live objects.
2606 class SemiSpace : public Space {
2607  public:
2608   using iterator = PageIterator;
2609 
2610   static void Swap(SemiSpace* from, SemiSpace* to);
2611 
SemiSpace(Heap * heap,SemiSpaceId semispace)2612   SemiSpace(Heap* heap, SemiSpaceId semispace)
2613       : Space(heap, NEW_SPACE, new NoFreeList()),
2614         current_capacity_(0),
2615         maximum_capacity_(0),
2616         minimum_capacity_(0),
2617         age_mark_(kNullAddress),
2618         committed_(false),
2619         id_(semispace),
2620         current_page_(nullptr),
2621         pages_used_(0) {}
2622 
2623   inline bool Contains(HeapObject o);
2624   inline bool Contains(Object o);
2625   inline bool ContainsSlow(Address a);
2626 
2627   void SetUp(size_t initial_capacity, size_t maximum_capacity);
2628   void TearDown();
2629 
2630   bool Commit();
2631   bool Uncommit();
is_committed()2632   bool is_committed() { return committed_; }
2633 
2634   // Grow the semispace to the new capacity.  The new capacity requested must
2635   // be larger than the current capacity and less than the maximum capacity.
2636   bool GrowTo(size_t new_capacity);
2637 
2638   // Shrinks the semispace to the new capacity.  The new capacity requested
2639   // must be more than the amount of used memory in the semispace and less
2640   // than the current capacity.
2641   bool ShrinkTo(size_t new_capacity);
2642 
2643   bool EnsureCurrentCapacity();
2644 
space_end()2645   Address space_end() { return memory_chunk_list_.back()->area_end(); }
2646 
2647   // Returns the start address of the first page of the space.
space_start()2648   Address space_start() {
2649     DCHECK_NE(memory_chunk_list_.front(), nullptr);
2650     return memory_chunk_list_.front()->area_start();
2651   }
2652 
current_page()2653   Page* current_page() { return current_page_; }
pages_used()2654   int pages_used() { return pages_used_; }
2655 
2656   // Returns the start address of the current page of the space.
page_low()2657   Address page_low() { return current_page_->area_start(); }
2658 
2659   // Returns one past the end address of the current page of the space.
page_high()2660   Address page_high() { return current_page_->area_end(); }
2661 
AdvancePage()2662   bool AdvancePage() {
2663     Page* next_page = current_page_->next_page();
2664     // We cannot expand if we reached the maximum number of pages already. Note
2665     // that we need to account for the next page already for this check as we
2666     // could potentially fill the whole page after advancing.
2667     const bool reached_max_pages = (pages_used_ + 1) == max_pages();
2668     if (next_page == nullptr || reached_max_pages) {
2669       return false;
2670     }
2671     current_page_ = next_page;
2672     pages_used_++;
2673     return true;
2674   }
2675 
2676   // Resets the space to using the first page.
2677   void Reset();
2678 
2679   void RemovePage(Page* page);
2680   void PrependPage(Page* page);
2681 
2682   Page* InitializePage(MemoryChunk* chunk);
2683 
2684   // Age mark accessors.
age_mark()2685   Address age_mark() { return age_mark_; }
2686   void set_age_mark(Address mark);
2687 
2688   // Returns the current capacity of the semispace.
current_capacity()2689   size_t current_capacity() { return current_capacity_; }
2690 
2691   // Returns the maximum capacity of the semispace.
maximum_capacity()2692   size_t maximum_capacity() { return maximum_capacity_; }
2693 
2694   // Returns the initial capacity of the semispace.
minimum_capacity()2695   size_t minimum_capacity() { return minimum_capacity_; }
2696 
id()2697   SemiSpaceId id() { return id_; }
2698 
2699   // Approximate amount of physical memory committed for this space.
2700   size_t CommittedPhysicalMemory() override;
2701 
2702   // If we don't have these here then SemiSpace will be abstract.  However
2703   // they should never be called:
2704 
Size()2705   size_t Size() override { UNREACHABLE(); }
2706 
SizeOfObjects()2707   size_t SizeOfObjects() override { return Size(); }
2708 
Available()2709   size_t Available() override { UNREACHABLE(); }
2710 
first_page()2711   Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
last_page()2712   Page* last_page() { return reinterpret_cast<Page*>(Space::last_page()); }
2713 
begin()2714   iterator begin() { return iterator(first_page()); }
end()2715   iterator end() { return iterator(nullptr); }
2716 
2717   std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
2718 
2719 #ifdef DEBUG
2720   V8_EXPORT_PRIVATE void Print() override;
2721   // Validate a range of of addresses in a SemiSpace.
2722   // The "from" address must be on a page prior to the "to" address,
2723   // in the linked page order, or it must be earlier on the same page.
2724   static void AssertValidRange(Address from, Address to);
2725 #else
2726   // Do nothing.
AssertValidRange(Address from,Address to)2727   inline static void AssertValidRange(Address from, Address to) {}
2728 #endif
2729 
2730 #ifdef VERIFY_HEAP
2731   virtual void Verify();
2732 #endif
2733 
2734  private:
2735   void RewindPages(int num_pages);
2736 
max_pages()2737   inline int max_pages() {
2738     return static_cast<int>(current_capacity_ / Page::kPageSize);
2739   }
2740 
2741   // Copies the flags into the masked positions on all pages in the space.
2742   void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
2743 
2744   // The currently committed space capacity.
2745   size_t current_capacity_;
2746 
2747   // The maximum capacity that can be used by this space. A space cannot grow
2748   // beyond that size.
2749   size_t maximum_capacity_;
2750 
2751   // The minimum capacity for the space. A space cannot shrink below this size.
2752   size_t minimum_capacity_;
2753 
2754   // Used to govern object promotion during mark-compact collection.
2755   Address age_mark_;
2756 
2757   bool committed_;
2758   SemiSpaceId id_;
2759 
2760   Page* current_page_;
2761 
2762   int pages_used_;
2763 
2764   friend class NewSpace;
2765   friend class SemiSpaceObjectIterator;
2766 };
2767 
2768 // A SemiSpaceObjectIterator is an ObjectIterator that iterates over the active
2769 // semispace of the heap's new space.  It iterates over the objects in the
2770 // semispace from a given start address (defaulting to the bottom of the
2771 // semispace) to the top of the semispace.  New objects allocated after the
2772 // iterator is created are not iterated.
2773 class SemiSpaceObjectIterator : public ObjectIterator {
2774  public:
2775   // Create an iterator over the allocated objects in the given to-space.
2776   explicit SemiSpaceObjectIterator(NewSpace* space);
2777 
2778   inline HeapObject Next() override;
2779 
2780  private:
2781   void Initialize(Address start, Address end);
2782 
2783   // The current iteration point.
2784   Address current_;
2785   // The end of iteration.
2786   Address limit_;
2787 };
2788 
2789 // -----------------------------------------------------------------------------
2790 // The young generation space.
2791 //
2792 // The new space consists of a contiguous pair of semispaces.  It simply
2793 // forwards most functions to the appropriate semispace.
2794 
2795 class V8_EXPORT_PRIVATE NewSpace
NON_EXPORTED_BASE(public SpaceWithLinearArea)2796     : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
2797  public:
2798   using iterator = PageIterator;
2799 
2800   NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
2801            size_t initial_semispace_capacity, size_t max_semispace_capacity);
2802 
2803   ~NewSpace() override { TearDown(); }
2804 
2805   inline bool ContainsSlow(Address a);
2806   inline bool Contains(Object o);
2807   inline bool Contains(HeapObject o);
2808 
2809   // Tears down the space.  Heap memory was not allocated by the space, so it
2810   // is not deallocated here.
2811   void TearDown();
2812 
2813   // Flip the pair of spaces.
2814   void Flip();
2815 
2816   // Grow the capacity of the semispaces.  Assumes that they are not at
2817   // their maximum capacity.
2818   void Grow();
2819 
2820   // Shrink the capacity of the semispaces.
2821   void Shrink();
2822 
2823   // Return the allocated bytes in the active semispace.
2824   size_t Size() final {
2825     DCHECK_GE(top(), to_space_.page_low());
2826     return to_space_.pages_used() *
2827                MemoryChunkLayout::AllocatableMemoryInDataPage() +
2828            static_cast<size_t>(top() - to_space_.page_low());
2829   }
2830 
2831   size_t SizeOfObjects() final { return Size(); }
2832 
2833   // Return the allocatable capacity of a semispace.
2834   size_t Capacity() {
2835     SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2836     return (to_space_.current_capacity() / Page::kPageSize) *
2837            MemoryChunkLayout::AllocatableMemoryInDataPage();
2838   }
2839 
2840   // Return the current size of a semispace, allocatable and non-allocatable
2841   // memory.
2842   size_t TotalCapacity() {
2843     DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2844     return to_space_.current_capacity();
2845   }
2846 
2847   // Committed memory for NewSpace is the committed memory of both semi-spaces
2848   // combined.
2849   size_t CommittedMemory() final {
2850     return from_space_.CommittedMemory() + to_space_.CommittedMemory();
2851   }
2852 
2853   size_t MaximumCommittedMemory() final {
2854     return from_space_.MaximumCommittedMemory() +
2855            to_space_.MaximumCommittedMemory();
2856   }
2857 
2858   // Approximate amount of physical memory committed for this space.
2859   size_t CommittedPhysicalMemory() final;
2860 
2861   // Return the available bytes without growing.
2862   size_t Available() final {
2863     DCHECK_GE(Capacity(), Size());
2864     return Capacity() - Size();
2865   }
2866 
2867   size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
2868     if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
2869         type == ExternalBackingStoreType::kArrayBuffer)
2870       return heap()->YoungArrayBufferBytes();
2871     DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes(type));
2872     return to_space_.ExternalBackingStoreBytes(type);
2873   }
2874 
2875   size_t ExternalBackingStoreBytes() {
2876     size_t result = 0;
2877     for (int i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
2878       result +=
2879           ExternalBackingStoreBytes(static_cast<ExternalBackingStoreType>(i));
2880     }
2881     return result;
2882   }
2883 
2884   size_t AllocatedSinceLastGC() {
2885     const Address age_mark = to_space_.age_mark();
2886     DCHECK_NE(age_mark, kNullAddress);
2887     DCHECK_NE(top(), kNullAddress);
2888     Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
2889     Page* const last_page = Page::FromAllocationAreaAddress(top());
2890     Page* current_page = age_mark_page;
2891     size_t allocated = 0;
2892     if (current_page != last_page) {
2893       DCHECK_EQ(current_page, age_mark_page);
2894       DCHECK_GE(age_mark_page->area_end(), age_mark);
2895       allocated += age_mark_page->area_end() - age_mark;
2896       current_page = current_page->next_page();
2897     } else {
2898       DCHECK_GE(top(), age_mark);
2899       return top() - age_mark;
2900     }
2901     while (current_page != last_page) {
2902       DCHECK_NE(current_page, age_mark_page);
2903       allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
2904       current_page = current_page->next_page();
2905     }
2906     DCHECK_GE(top(), current_page->area_start());
2907     allocated += top() - current_page->area_start();
2908     DCHECK_LE(allocated, Size());
2909     return allocated;
2910   }
2911 
2912   void MovePageFromSpaceToSpace(Page* page) {
2913     DCHECK(page->IsFromPage());
2914     from_space_.RemovePage(page);
2915     to_space_.PrependPage(page);
2916   }
2917 
2918   bool Rebalance();
2919 
2920   // Return the maximum capacity of a semispace.
2921   size_t MaximumCapacity() {
2922     DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
2923     return to_space_.maximum_capacity();
2924   }
2925 
2926   bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2927 
2928   // Returns the initial capacity of a semispace.
2929   size_t InitialTotalCapacity() {
2930     DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
2931     return to_space_.minimum_capacity();
2932   }
2933 
2934   void ResetOriginalTop() {
2935     DCHECK_GE(top(), original_top_);
2936     DCHECK_LE(top(), original_limit_);
2937     original_top_.store(top(), std::memory_order_release);
2938   }
2939 
2940   Address original_top_acquire() {
2941     return original_top_.load(std::memory_order_acquire);
2942   }
2943   Address original_limit_relaxed() {
2944     return original_limit_.load(std::memory_order_relaxed);
2945   }
2946 
2947   // Return the address of the first allocatable address in the active
2948   // semispace. This may be the address where the first object resides.
2949   Address first_allocatable_address() { return to_space_.space_start(); }
2950 
2951   // Get the age mark of the inactive semispace.
2952   Address age_mark() { return from_space_.age_mark(); }
2953   // Set the age mark in the active semispace.
2954   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2955 
2956   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
2957   AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
2958                      AllocationOrigin origin = AllocationOrigin::kRuntime);
2959 
2960   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRawUnaligned(
2961       int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
2962 
2963   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
2964   AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
2965               AllocationOrigin origin = AllocationOrigin::kRuntime);
2966 
2967   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
2968       int size_in_bytes, AllocationAlignment alignment,
2969       AllocationOrigin origin = AllocationOrigin::kRuntime);
2970 
2971   // Reset the allocation pointer to the beginning of the active semispace.
2972   void ResetLinearAllocationArea();
2973 
2974   // When inline allocation stepping is active, either because of incremental
2975   // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
2976   // inline allocation every once in a while. This is done by setting
2977   // allocation_info_.limit to be lower than the actual limit and and increasing
2978   // it in steps to guarantee that the observers are notified periodically.
2979   void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
2980 
2981   inline bool ToSpaceContainsSlow(Address a);
2982   inline bool ToSpaceContains(Object o);
2983   inline bool FromSpaceContains(Object o);
2984 
2985   // Try to switch the active semispace to a new, empty, page.
2986   // Returns false if this isn't possible or reasonable (i.e., there
2987   // are no pages, or the current page is already empty), or true
2988   // if successful.
2989   bool AddFreshPage();
2990   bool AddFreshPageSynchronized();
2991 
2992 #ifdef VERIFY_HEAP
2993   // Verify the active semispace.
2994   virtual void Verify(Isolate* isolate);
2995 #endif
2996 
2997 #ifdef DEBUG
2998   // Print the active semispace.
2999   void Print() override { to_space_.Print(); }
3000 #endif
3001 
3002   // Return whether the operation succeeded.
3003   bool CommitFromSpaceIfNeeded() {
3004     if (from_space_.is_committed()) return true;
3005     return from_space_.Commit();
3006   }
3007 
3008   bool UncommitFromSpace() {
3009     if (!from_space_.is_committed()) return true;
3010     return from_space_.Uncommit();
3011   }
3012 
3013   bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
3014 
3015   SemiSpace* active_space() { return &to_space_; }
3016 
3017   Page* first_page() { return to_space_.first_page(); }
3018   Page* last_page() { return to_space_.last_page(); }
3019 
3020   iterator begin() { return to_space_.begin(); }
3021   iterator end() { return to_space_.end(); }
3022 
3023   std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
3024 
3025   SemiSpace& from_space() { return from_space_; }
3026   SemiSpace& to_space() { return to_space_; }
3027 
3028  private:
3029   // Update linear allocation area to match the current to-space page.
3030   void UpdateLinearAllocationArea();
3031 
3032   base::Mutex mutex_;
3033 
3034   // The top and the limit at the time of setting the linear allocation area.
3035   // These values can be accessed by background tasks.
3036   std::atomic<Address> original_top_;
3037   std::atomic<Address> original_limit_;
3038 
3039   // The semispaces.
3040   SemiSpace to_space_;
3041   SemiSpace from_space_;
3042   VirtualMemory reservation_;
3043 
3044   bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
3045   bool SupportsInlineAllocation() override { return true; }
3046 
3047   friend class SemiSpaceObjectIterator;
3048 };
3049 
3050 class V8_EXPORT_PRIVATE PauseAllocationObserversScope {
3051  public:
3052   explicit PauseAllocationObserversScope(Heap* heap);
3053   ~PauseAllocationObserversScope();
3054 
3055  private:
3056   Heap* heap_;
3057   DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
3058 };
3059 
3060 // -----------------------------------------------------------------------------
3061 // Base class for compaction space and off-thread space.
3062 
3063 class V8_EXPORT_PRIVATE LocalSpace : public PagedSpace {
3064  public:
LocalSpace(Heap * heap,AllocationSpace id,Executability executable,LocalSpaceKind local_space_kind)3065   LocalSpace(Heap* heap, AllocationSpace id, Executability executable,
3066              LocalSpaceKind local_space_kind)
3067       : PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
3068                    local_space_kind) {
3069     DCHECK_NE(local_space_kind, LocalSpaceKind::kNone);
3070   }
3071 
3072  protected:
3073   // The space is temporary and not included in any snapshots.
snapshotable()3074   bool snapshotable() override { return false; }
3075 };
3076 
3077 // -----------------------------------------------------------------------------
3078 // Compaction space that is used temporarily during compaction.
3079 
3080 class V8_EXPORT_PRIVATE CompactionSpace : public LocalSpace {
3081  public:
CompactionSpace(Heap * heap,AllocationSpace id,Executability executable,LocalSpaceKind local_space_kind)3082   CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
3083                   LocalSpaceKind local_space_kind)
3084       : LocalSpace(heap, id, executable, local_space_kind) {
3085     DCHECK(is_compaction_space());
3086   }
3087 
3088  protected:
3089   V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
3090       int size_in_bytes, AllocationOrigin origin) override;
3091 };
3092 
3093 // A collection of |CompactionSpace|s used by a single compaction task.
3094 class CompactionSpaceCollection : public Malloced {
3095  public:
CompactionSpaceCollection(Heap * heap,LocalSpaceKind local_space_kind)3096   explicit CompactionSpaceCollection(Heap* heap,
3097                                      LocalSpaceKind local_space_kind)
3098       : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE,
3099                    local_space_kind),
3100         code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
3101                     local_space_kind) {}
3102 
Get(AllocationSpace space)3103   CompactionSpace* Get(AllocationSpace space) {
3104     switch (space) {
3105       case OLD_SPACE:
3106         return &old_space_;
3107       case CODE_SPACE:
3108         return &code_space_;
3109       default:
3110         UNREACHABLE();
3111     }
3112     UNREACHABLE();
3113   }
3114 
3115  private:
3116   CompactionSpace old_space_;
3117   CompactionSpace code_space_;
3118 };
3119 
3120 // -----------------------------------------------------------------------------
3121 // Old generation regular object space.
3122 
3123 class OldSpace : public PagedSpace {
3124  public:
3125   // Creates an old space object. The constructor does not allocate pages
3126   // from OS.
OldSpace(Heap * heap)3127   explicit OldSpace(Heap* heap)
3128       : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
3129                    FreeList::CreateFreeList()) {}
3130 
IsAtPageStart(Address addr)3131   static bool IsAtPageStart(Address addr) {
3132     return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
3133            MemoryChunkLayout::ObjectStartOffsetInDataPage();
3134   }
3135 
ExternalBackingStoreBytes(ExternalBackingStoreType type)3136   size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
3137     if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
3138         type == ExternalBackingStoreType::kArrayBuffer)
3139       return heap()->OldArrayBufferBytes();
3140     return external_backing_store_bytes_[type];
3141   }
3142 };
3143 
3144 // -----------------------------------------------------------------------------
3145 // Old generation code object space.
3146 
3147 class CodeSpace : public PagedSpace {
3148  public:
3149   // Creates an old space object. The constructor does not allocate pages
3150   // from OS.
CodeSpace(Heap * heap)3151   explicit CodeSpace(Heap* heap)
3152       : PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList()) {}
3153 };
3154 
3155 // For contiguous spaces, top should be in the space (or at the end) and limit
3156 // should be the end of the space.
3157 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
3158   SLOW_DCHECK((space).page_low() <= (info).top() &&   \
3159               (info).top() <= (space).page_high() &&  \
3160               (info).limit() <= (space).page_high())
3161 
3162 // -----------------------------------------------------------------------------
3163 // Old space for all map objects
3164 
3165 class MapSpace : public PagedSpace {
3166  public:
3167   // Creates a map space object.
MapSpace(Heap * heap)3168   explicit MapSpace(Heap* heap)
3169       : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, new FreeListMap()) {}
3170 
RoundSizeDownToObjectAlignment(int size)3171   int RoundSizeDownToObjectAlignment(int size) override {
3172     if (base::bits::IsPowerOfTwo(Map::kSize)) {
3173       return RoundDown(size, Map::kSize);
3174     } else {
3175       return (size / Map::kSize) * Map::kSize;
3176     }
3177   }
3178 
3179   void SortFreeList();
3180 
3181 #ifdef VERIFY_HEAP
3182   void VerifyObject(HeapObject obj) override;
3183 #endif
3184 };
3185 
3186 // -----------------------------------------------------------------------------
3187 // Off-thread space that is used for folded allocation on a different thread.
3188 
3189 class V8_EXPORT_PRIVATE OffThreadSpace : public LocalSpace {
3190  public:
OffThreadSpace(Heap * heap)3191   explicit OffThreadSpace(Heap* heap)
3192       : LocalSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
3193                    LocalSpaceKind::kOffThreadSpace) {
3194 #ifdef V8_ENABLE_THIRD_PARTY_HEAP
3195     // OffThreadSpace doesn't work with third-party heap.
3196     UNREACHABLE();
3197 #endif
3198   }
3199 
3200  protected:
3201   V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
3202       int size_in_bytes, AllocationOrigin origin) override;
3203 
3204   void RefillFreeList() override;
3205 };
3206 
3207 // -----------------------------------------------------------------------------
3208 // Read Only space for all Immortal Immovable and Immutable objects
3209 
3210 class ReadOnlySpace : public PagedSpace {
3211  public:
3212   explicit ReadOnlySpace(Heap* heap);
3213 
3214   // TODO(v8:7464): Remove this once PagedSpace::Unseal no longer writes to
3215   // memory_chunk_list_.
~ReadOnlySpace()3216   ~ReadOnlySpace() override { Unseal(); }
3217 
writable()3218   bool writable() const { return !is_marked_read_only_; }
3219 
3220   bool Contains(Address a) = delete;
3221   bool Contains(Object o) = delete;
3222 
3223   V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();
3224 
3225   enum class SealMode { kDetachFromHeapAndForget, kDoNotDetachFromHeap };
3226 
3227   // Seal the space by marking it read-only, optionally detaching it
3228   // from the heap and forgetting it for memory bookkeeping purposes (e.g.
3229   // prevent space's memory from registering as leaked).
3230   void Seal(SealMode ro_mode);
3231 
3232   // During boot the free_space_map is created, and afterwards we may need
3233   // to write it into the free list nodes that were already created.
3234   void RepairFreeListsAfterDeserialization();
3235 
Available()3236   size_t Available() override { return 0; }
3237 
3238  private:
3239   // Unseal the space after is has been sealed, by making it writable.
3240   // TODO(v8:7464): Only possible if the space hasn't been detached.
3241   void Unseal();
3242   void SetPermissionsForPages(MemoryAllocator* memory_allocator,
3243                               PageAllocator::Permission access);
3244 
3245   bool is_marked_read_only_ = false;
3246 
3247   //
3248   // String padding must be cleared just before serialization and therefore the
3249   // string padding in the space will already have been cleared if the space was
3250   // deserialized.
3251   bool is_string_padding_cleared_;
3252 };
3253 
3254 // -----------------------------------------------------------------------------
3255 // Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
3256 // managed by the large object space.
3257 // Large objects do not move during garbage collections.
3258 
3259 class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
3260  public:
3261   using iterator = LargePageIterator;
3262 
~LargeObjectSpace()3263   ~LargeObjectSpace() override { TearDown(); }
3264 
3265   // Releases internal resources, frees objects in this space.
3266   void TearDown();
3267 
3268   // Available bytes for objects in this space.
3269   size_t Available() override;
3270 
Size()3271   size_t Size() override { return size_; }
SizeOfObjects()3272   size_t SizeOfObjects() override { return objects_size_; }
3273 
3274   // Approximate amount of physical memory committed for this space.
3275   size_t CommittedPhysicalMemory() override;
3276 
PageCount()3277   int PageCount() { return page_count_; }
3278 
3279   // Frees unmarked objects.
3280   virtual void FreeUnmarkedObjects();
3281 
3282   // Checks whether a heap object is in this space; O(1).
3283   bool Contains(HeapObject obj);
3284   // Checks whether an address is in the object area in this space. Iterates
3285   // all objects in the space. May be slow.
3286   bool ContainsSlow(Address addr);
3287 
3288   // Checks whether the space is empty.
IsEmpty()3289   bool IsEmpty() { return first_page() == nullptr; }
3290 
3291   virtual void AddPage(LargePage* page, size_t object_size);
3292   virtual void RemovePage(LargePage* page, size_t object_size);
3293 
first_page()3294   LargePage* first_page() {
3295     return reinterpret_cast<LargePage*>(Space::first_page());
3296   }
3297 
begin()3298   iterator begin() { return iterator(first_page()); }
end()3299   iterator end() { return iterator(nullptr); }
3300 
3301   std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
3302 
is_off_thread()3303   virtual bool is_off_thread() const { return false; }
3304 
3305 #ifdef VERIFY_HEAP
3306   virtual void Verify(Isolate* isolate);
3307 #endif
3308 
3309 #ifdef DEBUG
3310   void Print() override;
3311 #endif
3312 
3313  protected:
3314   LargeObjectSpace(Heap* heap, AllocationSpace id);
3315 
3316   LargePage* AllocateLargePage(int object_size, Executability executable);
3317 
3318   size_t size_;          // allocated bytes
3319   int page_count_;       // number of chunks
3320   size_t objects_size_;  // size of objects
3321 
3322  private:
3323   friend class LargeObjectSpaceObjectIterator;
3324 };
3325 
3326 class OffThreadLargeObjectSpace;
3327 
3328 class OldLargeObjectSpace : public LargeObjectSpace {
3329  public:
3330   explicit OldLargeObjectSpace(Heap* heap);
3331 
3332   V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
3333   AllocateRaw(int object_size);
3334 
3335   // Clears the marking state of live objects.
3336   void ClearMarkingStateOfLiveObjects();
3337 
3338   void PromoteNewLargeObject(LargePage* page);
3339 
3340   V8_EXPORT_PRIVATE void MergeOffThreadSpace(OffThreadLargeObjectSpace* other);
3341 
3342  protected:
3343   explicit OldLargeObjectSpace(Heap* heap, AllocationSpace id);
3344   V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
3345                                                      Executability executable);
3346 };
3347 
3348 class NewLargeObjectSpace : public LargeObjectSpace {
3349  public:
3350   NewLargeObjectSpace(Heap* heap, size_t capacity);
3351 
3352   V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
3353   AllocateRaw(int object_size);
3354 
3355   // Available bytes for objects in this space.
3356   size_t Available() override;
3357 
3358   void Flip();
3359 
3360   void FreeDeadObjects(const std::function<bool(HeapObject)>& is_dead);
3361 
3362   void SetCapacity(size_t capacity);
3363 
3364   // The last allocated object that is not guaranteed to be initialized when
3365   // the concurrent marker visits it.
pending_object()3366   Address pending_object() {
3367     return pending_object_.load(std::memory_order_relaxed);
3368   }
3369 
ResetPendingObject()3370   void ResetPendingObject() { pending_object_.store(0); }
3371 
3372  private:
3373   std::atomic<Address> pending_object_;
3374   size_t capacity_;
3375 };
3376 
3377 class CodeLargeObjectSpace : public OldLargeObjectSpace {
3378  public:
3379   explicit CodeLargeObjectSpace(Heap* heap);
3380 
3381   V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
3382   AllocateRaw(int object_size);
3383 
3384   // Finds a large object page containing the given address, returns nullptr
3385   // if such a page doesn't exist.
3386   LargePage* FindPage(Address a);
3387 
3388  protected:
3389   void AddPage(LargePage* page, size_t object_size) override;
3390   void RemovePage(LargePage* page, size_t object_size) override;
3391 
3392  private:
3393   static const size_t kInitialChunkMapCapacity = 1024;
3394   void InsertChunkMapEntries(LargePage* page);
3395   void RemoveChunkMapEntries(LargePage* page);
3396 
3397   // Page-aligned addresses to their corresponding LargePage.
3398   std::unordered_map<Address, LargePage*> chunk_map_;
3399 };
3400 
3401 class V8_EXPORT_PRIVATE OffThreadLargeObjectSpace : public LargeObjectSpace {
3402  public:
3403   explicit OffThreadLargeObjectSpace(Heap* heap);
3404 
3405   V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
3406 
3407   void FreeUnmarkedObjects() override;
3408 
is_off_thread()3409   bool is_off_thread() const override { return true; }
3410 
3411  protected:
3412   // OldLargeObjectSpace can mess with OffThreadLargeObjectSpace during merging.
3413   friend class OldLargeObjectSpace;
3414 
3415   V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
3416                                                      Executability executable);
3417 };
3418 
3419 class LargeObjectSpaceObjectIterator : public ObjectIterator {
3420  public:
3421   explicit LargeObjectSpaceObjectIterator(LargeObjectSpace* space);
3422 
3423   HeapObject Next() override;
3424 
3425  private:
3426   LargePage* current_;
3427 };
3428 
3429 // Iterates over the chunks (pages and large object pages) that can contain
3430 // pointers to new space or to evacuation candidates.
3431 class OldGenerationMemoryChunkIterator {
3432  public:
3433   inline explicit OldGenerationMemoryChunkIterator(Heap* heap);
3434 
3435   // Return nullptr when the iterator is done.
3436   inline MemoryChunk* next();
3437 
3438  private:
3439   enum State {
3440     kOldSpaceState,
3441     kMapState,
3442     kCodeState,
3443     kLargeObjectState,
3444     kCodeLargeObjectState,
3445     kFinishedState
3446   };
3447   Heap* heap_;
3448   State state_;
3449   PageIterator old_iterator_;
3450   PageIterator code_iterator_;
3451   PageIterator map_iterator_;
3452   LargePageIterator lo_iterator_;
3453   LargePageIterator code_lo_iterator_;
3454 };
3455 
3456 }  // namespace internal
3457 }  // namespace v8
3458 
3459 #endif  // V8_HEAP_SPACES_H_
3460