1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_
7 
8 #include <list>
9 #include <map>
10 #include <memory>
11 #include <unordered_map>
12 #include <unordered_set>
13 #include <vector>
14 
15 #include "src/allocation.h"
16 #include "src/base/atomic-utils.h"
17 #include "src/base/iterator.h"
18 #include "src/base/platform/mutex.h"
19 #include "src/cancelable-task.h"
20 #include "src/flags.h"
21 #include "src/globals.h"
22 #include "src/heap/heap.h"
23 #include "src/heap/invalidated-slots.h"
24 #include "src/heap/marking.h"
25 #include "src/objects.h"
26 #include "src/objects/map.h"
27 #include "src/utils.h"
28 
29 namespace v8 {
30 namespace internal {
31 
32 namespace heap {
33 class HeapTester;
34 class TestCodeRangeScope;
35 }  // namespace heap
36 
37 class AllocationObserver;
38 class CompactionSpace;
39 class CompactionSpaceCollection;
40 class FreeList;
41 class Isolate;
42 class LinearAllocationArea;
43 class LocalArrayBufferTracker;
44 class MemoryAllocator;
45 class MemoryChunk;
46 class Page;
47 class PagedSpace;
48 class SemiSpace;
49 class SkipList;
50 class SlotsBuffer;
51 class SlotSet;
52 class TypedSlotSet;
53 class Space;
54 
55 // -----------------------------------------------------------------------------
56 // Heap structures:
57 //
58 // A JS heap consists of a young generation, an old generation, and a large
59 // object space. The young generation is divided into two semispaces. A
60 // scavenger implements Cheney's copying algorithm. The old generation is
61 // separated into a map space and an old object space. The map space contains
62 // all (and only) map objects, the rest of old objects go into the old space.
63 // The old generation is collected by a mark-sweep-compact collector.
64 //
65 // The semispaces of the young generation are contiguous.  The old and map
66 // spaces consists of a list of pages. A page has a page header and an object
67 // area.
68 //
69 // There is a separate large object space for objects larger than
70 // kMaxRegularHeapObjectSize, so that they do not have to move during
71 // collection. The large object space is paged. Pages in large object space
72 // may be larger than the page size.
73 //
74 // A store-buffer based write barrier is used to keep track of intergenerational
75 // references.  See heap/store-buffer.h.
76 //
77 // During scavenges and mark-sweep collections we sometimes (after a store
78 // buffer overflow) iterate intergenerational pointers without decoding heap
79 // object maps so if the page belongs to old space or large object space
80 // it is essential to guarantee that the page does not contain any
81 // garbage pointers to new space: every pointer aligned word which satisfies
82 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
83 // new space. Thus objects in old space and large object spaces should have a
84 // special layout (e.g. no bare integer fields). This requirement does not
85 // apply to map space which is iterated in a special fashion. However we still
86 // require pointer fields of dead maps to be cleaned.
87 //
88 // To enable lazy cleaning of old space pages we can mark chunks of the page
89 // as being garbage.  Garbage sections are marked with a special map.  These
90 // sections are skipped when scanning the page, even if we are otherwise
91 // scanning without regard for object boundaries.  Garbage sections are chained
92 // together to form a free list after a GC.  Garbage sections created outside
93 // of GCs by object trunctation etc. may not be in the free list chain.  Very
94 // small free spaces are ignored, they need only be cleaned of bogus pointers
95 // into new space.
96 //
97 // Each page may have up to one special garbage section.  The start of this
98 // section is denoted by the top field in the space.  The end of the section
99 // is denoted by the limit field in the space.  This special garbage section
100 // is not marked with a free space map in the data.  The point of this section
101 // is to enable linear allocation without having to constantly update the byte
102 // array every time the top field is updated and a new object is created.  The
103 // special garbage section is not in the chain of garbage sections.
104 //
105 // Since the top and limit fields are in the space, not the page, only one page
106 // has a special garbage section, and if the top and limit are equal then there
107 // is no special garbage section.
108 
109 // Some assertion macros used in the debugging mode.
110 
111 #define DCHECK_PAGE_ALIGNED(address) \
112   DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
113 
114 #define DCHECK_OBJECT_ALIGNED(address) \
115   DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
116 
117 #define DCHECK_OBJECT_SIZE(size) \
118   DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
119 
120 #define DCHECK_CODEOBJECT_SIZE(size, code_space) \
121   DCHECK((0 < size) && (size <= code_space->AreaSize()))
122 
123 #define DCHECK_PAGE_OFFSET(offset) \
124   DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
125 
126 enum FreeListCategoryType {
127   kTiniest,
128   kTiny,
129   kSmall,
130   kMedium,
131   kLarge,
132   kHuge,
133 
134   kFirstCategory = kTiniest,
135   kLastCategory = kHuge,
136   kNumberOfCategories = kLastCategory + 1,
137   kInvalidCategory
138 };
139 
140 enum FreeMode { kLinkCategory, kDoNotLinkCategory };
141 
142 enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
143 
144 enum RememberedSetType {
145   OLD_TO_NEW,
146   OLD_TO_OLD,
147   NUMBER_OF_REMEMBERED_SET_TYPES = OLD_TO_OLD + 1
148 };
149 
150 // A free list category maintains a linked list of free memory blocks.
151 class FreeListCategory {
152  public:
FreeListCategory(FreeList * free_list,Page * page)153   FreeListCategory(FreeList* free_list, Page* page)
154       : free_list_(free_list),
155         page_(page),
156         type_(kInvalidCategory),
157         available_(0),
158         top_(nullptr),
159         prev_(nullptr),
160         next_(nullptr) {}
161 
Initialize(FreeListCategoryType type)162   void Initialize(FreeListCategoryType type) {
163     type_ = type;
164     available_ = 0;
165     top_ = nullptr;
166     prev_ = nullptr;
167     next_ = nullptr;
168   }
169 
170   void Reset();
171 
ResetStats()172   void ResetStats() { Reset(); }
173 
174   void RepairFreeList(Heap* heap);
175 
176   // Relinks the category into the currently owning free list. Requires that the
177   // category is currently unlinked.
178   void Relink();
179 
180   void Free(Address address, size_t size_in_bytes, FreeMode mode);
181 
182   // Performs a single try to pick a node of at least |minimum_size| from the
183   // category. Stores the actual size in |node_size|. Returns nullptr if no
184   // node is found.
185   FreeSpace* PickNodeFromList(size_t minimum_size, size_t* node_size);
186 
187   // Picks a node of at least |minimum_size| from the category. Stores the
188   // actual size in |node_size|. Returns nullptr if no node is found.
189   FreeSpace* SearchForNodeInList(size_t minimum_size, size_t* node_size);
190 
191   inline FreeList* owner();
page()192   inline Page* page() const { return page_; }
193   inline bool is_linked();
is_empty()194   bool is_empty() { return top() == nullptr; }
available()195   size_t available() const { return available_; }
196 
set_free_list(FreeList * free_list)197   void set_free_list(FreeList* free_list) { free_list_ = free_list; }
198 
199 #ifdef DEBUG
200   size_t SumFreeList();
201   int FreeListLength();
202 #endif
203 
204  private:
205   // For debug builds we accurately compute free lists lengths up until
206   // {kVeryLongFreeList} by manually walking the list.
207   static const int kVeryLongFreeList = 500;
208 
top()209   FreeSpace* top() { return top_; }
set_top(FreeSpace * top)210   void set_top(FreeSpace* top) { top_ = top; }
prev()211   FreeListCategory* prev() { return prev_; }
set_prev(FreeListCategory * prev)212   void set_prev(FreeListCategory* prev) { prev_ = prev; }
next()213   FreeListCategory* next() { return next_; }
set_next(FreeListCategory * next)214   void set_next(FreeListCategory* next) { next_ = next; }
215 
216   // This FreeListCategory is owned by the given free_list_.
217   FreeList* free_list_;
218 
219   // This FreeListCategory holds free list entries of the given page_.
220   Page* const page_;
221 
222   // |type_|: The type of this free list category.
223   FreeListCategoryType type_;
224 
225   // |available_|: Total available bytes in all blocks of this free list
226   // category.
227   size_t available_;
228 
229   // |top_|: Points to the top FreeSpace* in the free list category.
230   FreeSpace* top_;
231 
232   FreeListCategory* prev_;
233   FreeListCategory* next_;
234 
235   friend class FreeList;
236   friend class PagedSpace;
237 
238   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
239 };
240 
241 // MemoryChunk represents a memory region owned by a specific space.
242 // It is divided into the header and the body. Chunk start is always
243 // 1MB aligned. Start of the body is aligned so it can accommodate
244 // any heap object.
245 class MemoryChunk {
246  public:
247   // Use with std data structures.
248   struct Hasher {
operatorHasher249     size_t operator()(MemoryChunk* const chunk) const {
250       return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
251     }
252   };
253 
254   enum Flag {
255     NO_FLAGS = 0u,
256     IS_EXECUTABLE = 1u << 0,
257     POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
258     POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
259     // A page in new space has one of the next to flags set.
260     IN_FROM_SPACE = 1u << 3,
261     IN_TO_SPACE = 1u << 4,
262     NEW_SPACE_BELOW_AGE_MARK = 1u << 5,
263     EVACUATION_CANDIDATE = 1u << 6,
264     NEVER_EVACUATE = 1u << 7,
265 
266     // Large objects can have a progress bar in their page header. These object
267     // are scanned in increments and will be kept black while being scanned.
268     // Even if the mutator writes to them they will be kept black and a white
269     // to grey transition is performed in the value.
270     HAS_PROGRESS_BAR = 1u << 8,
271 
272     // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
273     // from new to old space during evacuation.
274     PAGE_NEW_OLD_PROMOTION = 1u << 9,
275 
276     // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
277     // within the new space during evacuation.
278     PAGE_NEW_NEW_PROMOTION = 1u << 10,
279 
280     // This flag is intended to be used for testing. Works only when both
281     // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
282     // are set. It forces the page to become an evacuation candidate at next
283     // candidates selection cycle.
284     FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
285 
286     // This flag is intended to be used for testing.
287     NEVER_ALLOCATE_ON_PAGE = 1u << 12,
288 
289     // The memory chunk is already logically freed, however the actual freeing
290     // still has to be performed.
291     PRE_FREED = 1u << 13,
292 
293     // |POOLED|: When actually freeing this chunk, only uncommit and do not
294     // give up the reservation as we still reuse the chunk at some point.
295     POOLED = 1u << 14,
296 
297     // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
298     //   has been aborted and needs special handling by the sweeper.
299     COMPACTION_WAS_ABORTED = 1u << 15,
300 
301     // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
302     // on pages is sometimes aborted. The flag is used to avoid repeatedly
303     // triggering on the same page.
304     COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
305 
306     // |ANCHOR|: Flag is set if page is an anchor.
307     ANCHOR = 1u << 17,
308 
309     // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
310     // to iterate the page.
311     SWEEP_TO_ITERATE = 1u << 18
312   };
313 
314   using Flags = uintptr_t;
315 
316   static const Flags kPointersToHereAreInterestingMask =
317       POINTERS_TO_HERE_ARE_INTERESTING;
318 
319   static const Flags kPointersFromHereAreInterestingMask =
320       POINTERS_FROM_HERE_ARE_INTERESTING;
321 
322   static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
323 
324   static const Flags kIsInNewSpaceMask = IN_FROM_SPACE | IN_TO_SPACE;
325 
326   static const Flags kSkipEvacuationSlotsRecordingMask =
327       kEvacuationCandidateMask | kIsInNewSpaceMask;
328 
329   // |kSweepingDone|: The page state when sweeping is complete or sweeping must
330   //   not be performed on that page. Sweeper threads that are done with their
331   //   work will set this value and not touch the page anymore.
332   // |kSweepingPending|: This page is ready for parallel sweeping.
333   // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
334   enum ConcurrentSweepingState {
335     kSweepingDone,
336     kSweepingPending,
337     kSweepingInProgress,
338   };
339 
340   static const intptr_t kAlignment =
341       (static_cast<uintptr_t>(1) << kPageSizeBits);
342 
343   static const intptr_t kAlignmentMask = kAlignment - 1;
344 
345   static const intptr_t kSizeOffset = 0;
346   static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
347   static const intptr_t kAreaStartOffset = kFlagsOffset + kIntptrSize;
348   static const intptr_t kAreaEndOffset = kAreaStartOffset + kPointerSize;
349   static const intptr_t kReservationOffset = kAreaEndOffset + kPointerSize;
350   static const intptr_t kOwnerOffset = kReservationOffset + 2 * kPointerSize;
351 
352   static const size_t kMinHeaderSize =
353       kSizeOffset         // NOLINT
354       + kSizetSize        // size_t size
355       + kUIntptrSize      // uintptr_t flags_
356       + kPointerSize      // Address area_start_
357       + kPointerSize      // Address area_end_
358       + 2 * kPointerSize  // VirtualMemory reservation_
359       + kPointerSize      // Address owner_
360       + kPointerSize      // Heap* heap_
361       + kIntptrSize       // intptr_t progress_bar_
362       + kIntptrSize       // intptr_t live_byte_count_
363       + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES  // SlotSet* array
364       + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES  // TypedSlotSet* array
365       + kPointerSize  // InvalidatedSlots* invalidated_slots_
366       + kPointerSize  // SkipList* skip_list_
367       + kPointerSize  // AtomicValue high_water_mark_
368       + kPointerSize  // base::Mutex* mutex_
369       + kPointerSize  // base::AtomicWord concurrent_sweeping_
370       + kPointerSize  // base::Mutex* page_protection_change_mutex_
371       + kPointerSize  // unitptr_t write_unprotect_counter_
372       + kSizetSize    // size_t allocated_bytes_
373       + kSizetSize    // size_t wasted_memory_
374       + kPointerSize  // AtomicValue next_chunk_
375       + kPointerSize  // AtomicValue prev_chunk_
376       + kPointerSize * kNumberOfCategories
377       // FreeListCategory categories_[kNumberOfCategories]
378       + kPointerSize   // LocalArrayBufferTracker* local_tracker_
379       + kIntptrSize    // intptr_t young_generation_live_byte_count_
380       + kPointerSize;  // Bitmap* young_generation_bitmap_
381 
382   // We add some more space to the computed header size to amount for missing
383   // alignment requirements in our computation.
384   // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
385   static const size_t kHeaderSize = kMinHeaderSize;
386 
387   static const int kBodyOffset =
388       CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
389 
390   // The start offset of the object area in a page. Aligned to both maps and
391   // code alignment to be suitable for both.  Also aligned to 32 words because
392   // the marking bitmap is arranged in 32 bit chunks.
393   static const int kObjectStartAlignment = 32 * kPointerSize;
394   static const int kObjectStartOffset =
395       kBodyOffset - 1 +
396       (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
397 
398   // Page size in bytes.  This must be a multiple of the OS page size.
399   static const int kPageSize = 1 << kPageSizeBits;
400   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
401 
402   static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
403 
404   // Maximum number of nested code memory modification scopes.
405   // TODO(6792,mstarzinger): Drop to 3 or lower once WebAssembly is off heap.
406   static const int kMaxWriteUnprotectCounter = 4;
407 
408   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
FromAddress(Address a)409   static MemoryChunk* FromAddress(Address a) {
410     return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
411   }
412   // Only works if the object is in the first kPageSize of the MemoryChunk.
FromHeapObject(HeapObject * o)413   static MemoryChunk* FromHeapObject(HeapObject* o) {
414     return reinterpret_cast<MemoryChunk*>(reinterpret_cast<Address>(o) &
415                                           ~kAlignmentMask);
416   }
417 
418   static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
419 
UpdateHighWaterMark(Address mark)420   static inline void UpdateHighWaterMark(Address mark) {
421     if (mark == kNullAddress) return;
422     // Need to subtract one from the mark because when a chunk is full the
423     // top points to the next address after the chunk, which effectively belongs
424     // to another chunk. See the comment to Page::FromTopOrLimit.
425     MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
426     intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
427     intptr_t old_mark = 0;
428     do {
429       old_mark = chunk->high_water_mark_.Value();
430     } while ((new_mark > old_mark) &&
431              !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
432   }
433 
address()434   Address address() const {
435     return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
436   }
437 
mutex()438   base::Mutex* mutex() { return mutex_; }
439 
Contains(Address addr)440   bool Contains(Address addr) {
441     return addr >= area_start() && addr < area_end();
442   }
443 
444   // Checks whether |addr| can be a limit of addresses in this page. It's a
445   // limit if it's in the page, or if it's just after the last byte of the page.
ContainsLimit(Address addr)446   bool ContainsLimit(Address addr) {
447     return addr >= area_start() && addr <= area_end();
448   }
449 
concurrent_sweeping_state()450   base::AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
451     return concurrent_sweeping_;
452   }
453 
SweepingDone()454   bool SweepingDone() {
455     return concurrent_sweeping_state().Value() == kSweepingDone;
456   }
457 
size()458   size_t size() const { return size_; }
set_size(size_t size)459   void set_size(size_t size) { size_ = size; }
460 
heap()461   inline Heap* heap() const { return heap_; }
462 
463   Heap* synchronized_heap();
464 
skip_list()465   inline SkipList* skip_list() { return skip_list_; }
466 
set_skip_list(SkipList * skip_list)467   inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
468 
469   template <RememberedSetType type>
ContainsSlots()470   bool ContainsSlots() {
471     return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
472            invalidated_slots() != nullptr;
473   }
474 
475   template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
slot_set()476   SlotSet* slot_set() {
477     if (access_mode == AccessMode::ATOMIC)
478       return base::AsAtomicPointer::Acquire_Load(&slot_set_[type]);
479     return slot_set_[type];
480   }
481 
482   template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
typed_slot_set()483   TypedSlotSet* typed_slot_set() {
484     if (access_mode == AccessMode::ATOMIC)
485       return base::AsAtomicPointer::Acquire_Load(&typed_slot_set_[type]);
486     return typed_slot_set_[type];
487   }
488 
489   template <RememberedSetType type>
490   SlotSet* AllocateSlotSet();
491   // Not safe to be called concurrently.
492   template <RememberedSetType type>
493   void ReleaseSlotSet();
494   template <RememberedSetType type>
495   TypedSlotSet* AllocateTypedSlotSet();
496   // Not safe to be called concurrently.
497   template <RememberedSetType type>
498   void ReleaseTypedSlotSet();
499 
500   InvalidatedSlots* AllocateInvalidatedSlots();
501   void ReleaseInvalidatedSlots();
502   void RegisterObjectWithInvalidatedSlots(HeapObject* object, int size);
invalidated_slots()503   InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
504 
505   void AllocateLocalTracker();
506   void ReleaseLocalTracker();
local_tracker()507   inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
508   bool contains_array_buffers();
509 
510   void AllocateYoungGenerationBitmap();
511   void ReleaseYoungGenerationBitmap();
512 
area_start()513   Address area_start() { return area_start_; }
area_end()514   Address area_end() { return area_end_; }
area_size()515   size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
516 
517   // Approximate amount of physical memory committed for this chunk.
518   size_t CommittedPhysicalMemory();
519 
HighWaterMark()520   Address HighWaterMark() { return address() + high_water_mark_.Value(); }
521 
progress_bar()522   int progress_bar() {
523     DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
524     return static_cast<int>(progress_bar_);
525   }
526 
set_progress_bar(int progress_bar)527   void set_progress_bar(int progress_bar) {
528     DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
529     progress_bar_ = progress_bar;
530   }
531 
ResetProgressBar()532   void ResetProgressBar() {
533     if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
534       set_progress_bar(0);
535     }
536   }
537 
AddressToMarkbitIndex(Address addr)538   inline uint32_t AddressToMarkbitIndex(Address addr) const {
539     return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
540   }
541 
MarkbitIndexToAddress(uint32_t index)542   inline Address MarkbitIndexToAddress(uint32_t index) const {
543     return this->address() + (index << kPointerSizeLog2);
544   }
545 
546   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
SetFlag(Flag flag)547   void SetFlag(Flag flag) {
548     if (access_mode == AccessMode::NON_ATOMIC) {
549       flags_ |= flag;
550     } else {
551       base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
552     }
553   }
554 
555   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
IsFlagSet(Flag flag)556   bool IsFlagSet(Flag flag) {
557     return (GetFlags<access_mode>() & flag) != 0;
558   }
559 
ClearFlag(Flag flag)560   void ClearFlag(Flag flag) { flags_ &= ~flag; }
561   // Set or clear multiple flags at a time. The flags in the mask are set to
562   // the value in "flags", the rest retain the current value in |flags_|.
SetFlags(uintptr_t flags,uintptr_t mask)563   void SetFlags(uintptr_t flags, uintptr_t mask) {
564     flags_ = (flags_ & ~mask) | (flags & mask);
565   }
566 
567   // Return all current flags.
568   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
GetFlags()569   uintptr_t GetFlags() {
570     if (access_mode == AccessMode::NON_ATOMIC) {
571       return flags_;
572     } else {
573       return base::AsAtomicWord::Relaxed_Load(&flags_);
574     }
575   }
576 
NeverEvacuate()577   bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
578 
MarkNeverEvacuate()579   void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
580 
CanAllocate()581   bool CanAllocate() {
582     return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
583   }
584 
585   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
IsEvacuationCandidate()586   bool IsEvacuationCandidate() {
587     DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
588              IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
589     return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
590   }
591 
592   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
ShouldSkipEvacuationSlotRecording()593   bool ShouldSkipEvacuationSlotRecording() {
594     uintptr_t flags = GetFlags<access_mode>();
595     return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
596            ((flags & COMPACTION_WAS_ABORTED) == 0);
597   }
598 
executable()599   Executability executable() {
600     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
601   }
602 
InNewSpace()603   bool InNewSpace() { return (flags_ & kIsInNewSpaceMask) != 0; }
604 
InToSpace()605   bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
606 
InFromSpace()607   bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
608 
next_chunk()609   MemoryChunk* next_chunk() { return next_chunk_.Value(); }
610 
prev_chunk()611   MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
612 
set_next_chunk(MemoryChunk * next)613   void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
614 
set_prev_chunk(MemoryChunk * prev)615   void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
616 
owner()617   Space* owner() const { return owner_.Value(); }
618 
set_owner(Space * space)619   void set_owner(Space* space) { owner_.SetValue(space); }
620 
621   bool IsPagedSpace() const;
622 
623   void InsertAfter(MemoryChunk* other);
624   void Unlink();
625 
626   // Emits a memory barrier. For TSAN builds the other thread needs to perform
627   // MemoryChunk::synchronized_heap() to simulate the barrier.
628   void InitializationMemoryFence();
629 
630   void SetReadAndExecutable();
631   void SetReadAndWritable();
632 
633  protected:
634   static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
635                                  Address area_start, Address area_end,
636                                  Executability executable, Space* owner,
637                                  VirtualMemory* reservation);
638 
639   // Should be called when memory chunk is about to be freed.
640   void ReleaseAllocatedMemory();
641 
reserved_memory()642   VirtualMemory* reserved_memory() { return &reservation_; }
643 
644   size_t size_;
645   uintptr_t flags_;
646 
647   // Start and end of allocatable memory on this chunk.
648   Address area_start_;
649   Address area_end_;
650 
651   // If the chunk needs to remember its memory reservation, it is stored here.
652   VirtualMemory reservation_;
653 
654   // The space owning this memory chunk.
655   base::AtomicValue<Space*> owner_;
656 
657   Heap* heap_;
658 
659   // Used by the incremental marker to keep track of the scanning progress in
660   // large objects that have a progress bar and are scanned in increments.
661   intptr_t progress_bar_;
662 
663   // Count of bytes marked black on page.
664   intptr_t live_byte_count_;
665 
666   // A single slot set for small pages (of size kPageSize) or an array of slot
667   // set for large pages. In the latter case the number of entries in the array
668   // is ceil(size() / kPageSize).
669   SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
670   TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
671   InvalidatedSlots* invalidated_slots_;
672 
673   SkipList* skip_list_;
674 
675   // Assuming the initial allocation on a page is sequential,
676   // count highest number of bytes ever allocated on the page.
677   base::AtomicValue<intptr_t> high_water_mark_;
678 
679   base::Mutex* mutex_;
680 
681   base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
682 
683   base::Mutex* page_protection_change_mutex_;
684 
685   // This field is only relevant for code pages. It depicts the number of
686   // times a component requested this page to be read+writeable. The
687   // counter is decremented when a component resets to read+executable.
688   // If Value() == 0 => The memory is read and executable.
689   // If Value() >= 1 => The Memory is read and writable (and maybe executable).
690   // The maximum value is limited by {kMaxWriteUnprotectCounter} to prevent
691   // excessive nesting of scopes.
692   // All executable MemoryChunks are allocated rw based on the assumption that
693   // they will be used immediatelly for an allocation. They are initialized
694   // with the number of open CodeSpaceMemoryModificationScopes. The caller
695   // that triggers the page allocation is responsible for decrementing the
696   // counter.
697   uintptr_t write_unprotect_counter_;
698 
699   // Byte allocated on the page, which includes all objects on the page
700   // and the linear allocation area.
701   size_t allocated_bytes_;
702   // Freed memory that was not added to the free list.
703   size_t wasted_memory_;
704 
705   // next_chunk_ holds a pointer of type MemoryChunk
706   base::AtomicValue<MemoryChunk*> next_chunk_;
707   // prev_chunk_ holds a pointer of type MemoryChunk
708   base::AtomicValue<MemoryChunk*> prev_chunk_;
709 
710   FreeListCategory* categories_[kNumberOfCategories];
711 
712   LocalArrayBufferTracker* local_tracker_;
713 
714   intptr_t young_generation_live_byte_count_;
715   Bitmap* young_generation_bitmap_;
716 
717  private:
InitializeReservedMemory()718   void InitializeReservedMemory() { reservation_.Reset(); }
719 
720   friend class ConcurrentMarkingState;
721   friend class IncrementalMarkingState;
722   friend class MajorAtomicMarkingState;
723   friend class MajorMarkingState;
724   friend class MajorNonAtomicMarkingState;
725   friend class MemoryAllocator;
726   friend class MemoryChunkValidator;
727   friend class MinorMarkingState;
728   friend class MinorNonAtomicMarkingState;
729   friend class PagedSpace;
730 };
731 
732 static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
733               "kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
734 
735 
736 // -----------------------------------------------------------------------------
737 // A page is a memory chunk of a size 512K. Large object pages may be larger.
738 //
739 // The only way to get a page pointer is by calling factory methods:
740 //   Page* p = Page::FromAddress(addr); or
741 //   Page* p = Page::FromTopOrLimit(top);
742 class Page : public MemoryChunk {
743  public:
744   static const intptr_t kCopyAllFlags = ~0;
745 
746   // Page flags copied from from-space to to-space when flipping semispaces.
747   static const intptr_t kCopyOnFlipFlagsMask =
748       static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
749       static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
750 
751   // Returns the page containing a given address. The address ranges
752   // from [page_addr .. page_addr + kPageSize[. This only works if the object
753   // is in fact in a page.
FromAddress(Address addr)754   static Page* FromAddress(Address addr) {
755     return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask);
756   }
757 
758   // Returns the page containing the address provided. The address can
759   // potentially point righter after the page. To be also safe for tagged values
760   // we subtract a hole word. The valid address ranges from
761   // [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize].
FromAllocationAreaAddress(Address address)762   static Page* FromAllocationAreaAddress(Address address) {
763     return Page::FromAddress(address - kPointerSize);
764   }
765 
766   // Checks if address1 and address2 are on the same new space page.
OnSamePage(Address address1,Address address2)767   static bool OnSamePage(Address address1, Address address2) {
768     return Page::FromAddress(address1) == Page::FromAddress(address2);
769   }
770 
771   // Checks whether an address is page aligned.
IsAlignedToPageSize(Address addr)772   static bool IsAlignedToPageSize(Address addr) {
773     return (OffsetFrom(addr) & kPageAlignmentMask) == 0;
774   }
775 
IsAtObjectStart(Address addr)776   static bool IsAtObjectStart(Address addr) {
777     return (addr & kPageAlignmentMask) == kObjectStartOffset;
778   }
779 
780   static Page* ConvertNewToOld(Page* old_page);
781 
782   // Create a Page object that is only used as anchor for the doubly-linked
783   // list of real pages.
Page(Space * owner)784   explicit Page(Space* owner) { InitializeAsAnchor(owner); }
785 
786   inline void MarkNeverAllocateForTesting();
787   inline void MarkEvacuationCandidate();
788   inline void ClearEvacuationCandidate();
789 
next_page()790   Page* next_page() { return static_cast<Page*>(next_chunk()); }
prev_page()791   Page* prev_page() { return static_cast<Page*>(prev_chunk()); }
set_next_page(Page * page)792   void set_next_page(Page* page) { set_next_chunk(page); }
set_prev_page(Page * page)793   void set_prev_page(Page* page) { set_prev_chunk(page); }
794 
795   template <typename Callback>
ForAllFreeListCategories(Callback callback)796   inline void ForAllFreeListCategories(Callback callback) {
797     for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
798       callback(categories_[i]);
799     }
800   }
801 
802   // Returns the offset of a given address to this page.
Offset(Address a)803   inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
804 
805   // Returns the address for a given offset to the this page.
OffsetToAddress(size_t offset)806   Address OffsetToAddress(size_t offset) {
807     DCHECK_PAGE_OFFSET(offset);
808     return address() + offset;
809   }
810 
811   // WaitUntilSweepingCompleted only works when concurrent sweeping is in
812   // progress. In particular, when we know that right before this call a
813   // sweeper thread was sweeping this page.
WaitUntilSweepingCompleted()814   void WaitUntilSweepingCompleted() {
815     mutex_->Lock();
816     mutex_->Unlock();
817     DCHECK(SweepingDone());
818   }
819 
820   void ResetFreeListStatistics();
821 
822   size_t AvailableInFreeList();
823 
AvailableInFreeListFromAllocatedBytes()824   size_t AvailableInFreeListFromAllocatedBytes() {
825     DCHECK_GE(area_size(), wasted_memory() + allocated_bytes());
826     return area_size() - wasted_memory() - allocated_bytes();
827   }
828 
free_list_category(FreeListCategoryType type)829   FreeListCategory* free_list_category(FreeListCategoryType type) {
830     return categories_[type];
831   }
832 
is_anchor()833   bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
834 
wasted_memory()835   size_t wasted_memory() { return wasted_memory_; }
add_wasted_memory(size_t waste)836   void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
allocated_bytes()837   size_t allocated_bytes() { return allocated_bytes_; }
IncreaseAllocatedBytes(size_t bytes)838   void IncreaseAllocatedBytes(size_t bytes) {
839     DCHECK_LE(bytes, area_size());
840     allocated_bytes_ += bytes;
841   }
DecreaseAllocatedBytes(size_t bytes)842   void DecreaseAllocatedBytes(size_t bytes) {
843     DCHECK_LE(bytes, area_size());
844     DCHECK_GE(allocated_bytes(), bytes);
845     allocated_bytes_ -= bytes;
846   }
847 
848   void ResetAllocatedBytes();
849 
850   size_t ShrinkToHighWaterMark();
851 
852   V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
853   void DestroyBlackArea(Address start, Address end);
854 
855   void InitializeFreeListCategories();
856   void AllocateFreeListCategories();
857   void ReleaseFreeListCategories();
858 
859 #ifdef DEBUG
860   void Print();
861 #endif  // DEBUG
862 
863  private:
864   enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
865 
866   void InitializeAsAnchor(Space* owner);
867 
868   friend class MemoryAllocator;
869 };
870 
871 class ReadOnlyPage : public Page {
872  public:
873   // Clears any pointers in the header that point out of the page that would
874   // otherwise make the header non-relocatable.
875   void MakeHeaderRelocatable();
876 };
877 
878 class LargePage : public MemoryChunk {
879  public:
GetObject()880   HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
881 
next_page()882   inline LargePage* next_page() {
883     return static_cast<LargePage*>(next_chunk());
884   }
885 
set_next_page(LargePage * page)886   inline void set_next_page(LargePage* page) { set_next_chunk(page); }
887 
888   // Uncommit memory that is not in use anymore by the object. If the object
889   // cannot be shrunk 0 is returned.
890   Address GetAddressToShrink(Address object_address, size_t object_size);
891 
892   void ClearOutOfLiveRangeSlots(Address free_start);
893 
894   // A limit to guarantee that we do not overflow typed slot offset in
895   // the old to old remembered set.
896   // Note that this limit is higher than what assembler already imposes on
897   // x64 and ia32 architectures.
898   static const int kMaxCodePageSize = 512 * MB;
899 
900  private:
901   static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
902                                Executability executable);
903 
904   friend class MemoryAllocator;
905 };
906 
907 
908 // ----------------------------------------------------------------------------
909 // Space is the abstract superclass for all allocation spaces.
910 class Space : public Malloced {
911  public:
Space(Heap * heap,AllocationSpace id)912   Space(Heap* heap, AllocationSpace id)
913       : allocation_observers_paused_(false),
914         heap_(heap),
915         id_(id),
916         committed_(0),
917         max_committed_(0),
918         external_backing_store_bytes_(0) {}
919 
~Space()920   virtual ~Space() {}
921 
heap()922   Heap* heap() const { return heap_; }
923 
924   // Identity used in error reporting.
identity()925   AllocationSpace identity() { return id_; }
926 
927   V8_EXPORT_PRIVATE virtual void AddAllocationObserver(
928       AllocationObserver* observer);
929 
930   V8_EXPORT_PRIVATE virtual void RemoveAllocationObserver(
931       AllocationObserver* observer);
932 
933   V8_EXPORT_PRIVATE virtual void PauseAllocationObservers();
934 
935   V8_EXPORT_PRIVATE virtual void ResumeAllocationObservers();
936 
StartNextInlineAllocationStep()937   V8_EXPORT_PRIVATE virtual void StartNextInlineAllocationStep() {}
938 
939   void AllocationStep(int bytes_since_last, Address soon_object, int size);
940 
941   // Return the total amount committed memory for this space, i.e., allocatable
942   // memory and page headers.
CommittedMemory()943   virtual size_t CommittedMemory() { return committed_; }
944 
MaximumCommittedMemory()945   virtual size_t MaximumCommittedMemory() { return max_committed_; }
946 
947   // Returns allocated size.
948   virtual size_t Size() = 0;
949 
950   // Returns size of objects. Can differ from the allocated size
951   // (e.g. see LargeObjectSpace).
SizeOfObjects()952   virtual size_t SizeOfObjects() { return Size(); }
953 
954   // Returns amount of off-heap memory in-use by objects in this Space.
ExternalBackingStoreBytes()955   virtual size_t ExternalBackingStoreBytes() const {
956     return external_backing_store_bytes_;
957   }
958 
959   // Approximate amount of physical memory committed for this space.
960   virtual size_t CommittedPhysicalMemory() = 0;
961 
962   // Return the available bytes without growing.
963   virtual size_t Available() = 0;
964 
RoundSizeDownToObjectAlignment(int size)965   virtual int RoundSizeDownToObjectAlignment(int size) {
966     if (id_ == CODE_SPACE) {
967       return RoundDown(size, kCodeAlignment);
968     } else {
969       return RoundDown(size, kPointerSize);
970     }
971   }
972 
973   virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;
974 
AccountCommitted(size_t bytes)975   void AccountCommitted(size_t bytes) {
976     DCHECK_GE(committed_ + bytes, committed_);
977     committed_ += bytes;
978     if (committed_ > max_committed_) {
979       max_committed_ = committed_;
980     }
981   }
982 
AccountUncommitted(size_t bytes)983   void AccountUncommitted(size_t bytes) {
984     DCHECK_GE(committed_, committed_ - bytes);
985     committed_ -= bytes;
986   }
987 
IncrementExternalBackingStoreBytes(size_t amount)988   void IncrementExternalBackingStoreBytes(size_t amount) {
989     external_backing_store_bytes_ += amount;
990   }
DecrementExternalBackingStoreBytes(size_t amount)991   void DecrementExternalBackingStoreBytes(size_t amount) {
992     external_backing_store_bytes_ -= amount;
993   }
994 
995   V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
996 
997 #ifdef DEBUG
998   virtual void Print() = 0;
999 #endif
1000 
1001  protected:
1002   intptr_t GetNextInlineAllocationStepSize();
AllocationObserversActive()1003   bool AllocationObserversActive() {
1004     return !allocation_observers_paused_ && !allocation_observers_.empty();
1005   }
1006 
1007   std::vector<AllocationObserver*> allocation_observers_;
1008 
1009  private:
1010   bool allocation_observers_paused_;
1011   Heap* heap_;
1012   AllocationSpace id_;
1013 
1014   // Keeps track of committed memory in a space.
1015   size_t committed_;
1016   size_t max_committed_;
1017 
1018   // Tracks off-heap memory used by this space.
1019   std::atomic<size_t> external_backing_store_bytes_;
1020 
1021   DISALLOW_COPY_AND_ASSIGN(Space);
1022 };
1023 
1024 
1025 class MemoryChunkValidator {
1026   // Computed offsets should match the compiler generated ones.
1027   STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
1028 
1029   // Validate our estimates on the header size.
1030   STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
1031   STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
1032   STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
1033 };
1034 
1035 
1036 // ----------------------------------------------------------------------------
1037 // All heap objects containing executable code (code objects) must be allocated
1038 // from a 2 GB range of memory, so that they can call each other using 32-bit
1039 // displacements.  This happens automatically on 32-bit platforms, where 32-bit
1040 // displacements cover the entire 4GB virtual address space.  On 64-bit
1041 // platforms, we support this using the CodeRange object, which reserves and
1042 // manages a range of virtual memory.
1043 class CodeRange {
1044  public:
1045   explicit CodeRange(Isolate* isolate);
~CodeRange()1046   ~CodeRange() {
1047     if (virtual_memory_.IsReserved()) virtual_memory_.Free();
1048   }
1049 
1050   // Reserves a range of virtual memory, but does not commit any of it.
1051   // Can only be called once, at heap initialization time.
1052   // Returns false on failure.
1053   bool SetUp(size_t requested_size);
1054 
valid()1055   bool valid() { return virtual_memory_.IsReserved(); }
start()1056   Address start() {
1057     DCHECK(valid());
1058     return virtual_memory_.address();
1059   }
size()1060   size_t size() {
1061     DCHECK(valid());
1062     return virtual_memory_.size();
1063   }
contains(Address address)1064   bool contains(Address address) {
1065     if (!valid()) return false;
1066     Address start = virtual_memory_.address();
1067     return start <= address && address < start + virtual_memory_.size();
1068   }
1069 
1070   // Allocates a chunk of memory from the large-object portion of
1071   // the code range.  On platforms with no separate code range, should
1072   // not be called.
1073   V8_WARN_UNUSED_RESULT Address AllocateRawMemory(const size_t requested_size,
1074                                                   const size_t commit_size,
1075                                                   size_t* allocated);
1076   void FreeRawMemory(Address buf, size_t length);
1077 
1078  private:
1079   class FreeBlock {
1080    public:
FreeBlock()1081     FreeBlock() : start(0), size(0) {}
FreeBlock(Address start_arg,size_t size_arg)1082     FreeBlock(Address start_arg, size_t size_arg)
1083         : start(start_arg), size(size_arg) {
1084       DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1085       DCHECK(size >= static_cast<size_t>(Page::kPageSize));
1086     }
FreeBlock(void * start_arg,size_t size_arg)1087     FreeBlock(void* start_arg, size_t size_arg)
1088         : start(reinterpret_cast<Address>(start_arg)), size(size_arg) {
1089       DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1090       DCHECK(size >= static_cast<size_t>(Page::kPageSize));
1091     }
1092 
1093     Address start;
1094     size_t size;
1095   };
1096 
1097   // Finds a block on the allocation list that contains at least the
1098   // requested amount of memory.  If none is found, sorts and merges
1099   // the existing free memory blocks, and searches again.
1100   // If none can be found, returns false.
1101   bool GetNextAllocationBlock(size_t requested);
1102   // Compares the start addresses of two free blocks.
1103   static bool CompareFreeBlockAddress(const FreeBlock& left,
1104                                       const FreeBlock& right);
1105   bool ReserveBlock(const size_t requested_size, FreeBlock* block);
1106   void ReleaseBlock(const FreeBlock* block);
1107 
1108   Isolate* isolate_;
1109 
1110   // The reserved range of virtual memory that all code objects are put in.
1111   VirtualMemory virtual_memory_;
1112 
1113   // The global mutex guards free_list_ and allocation_list_ as GC threads may
1114   // access both lists concurrently to the main thread.
1115   base::Mutex code_range_mutex_;
1116 
1117   // Freed blocks of memory are added to the free list.  When the allocation
1118   // list is exhausted, the free list is sorted and merged to make the new
1119   // allocation list.
1120   std::vector<FreeBlock> free_list_;
1121 
1122   // Memory is allocated from the free blocks on the allocation list.
1123   // The block at current_allocation_block_index_ is the current block.
1124   std::vector<FreeBlock> allocation_list_;
1125   size_t current_allocation_block_index_;
1126 
1127   DISALLOW_COPY_AND_ASSIGN(CodeRange);
1128 };
1129 
1130 
1131 class SkipList {
1132  public:
SkipList()1133   SkipList() { Clear(); }
1134 
Clear()1135   void Clear() {
1136     for (int idx = 0; idx < kSize; idx++) {
1137       starts_[idx] = static_cast<Address>(-1);
1138     }
1139   }
1140 
StartFor(Address addr)1141   Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
1142 
AddObject(Address addr,int size)1143   void AddObject(Address addr, int size) {
1144     int start_region = RegionNumber(addr);
1145     int end_region = RegionNumber(addr + size - kPointerSize);
1146     for (int idx = start_region; idx <= end_region; idx++) {
1147       if (starts_[idx] > addr) {
1148         starts_[idx] = addr;
1149       } else {
1150         // In the first region, there may already be an object closer to the
1151         // start of the region. Do not change the start in that case. If this
1152         // is not the first region, you probably added overlapping objects.
1153         DCHECK_EQ(start_region, idx);
1154       }
1155     }
1156   }
1157 
RegionNumber(Address addr)1158   static inline int RegionNumber(Address addr) {
1159     return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1160   }
1161 
Update(Address addr,int size)1162   static void Update(Address addr, int size) {
1163     Page* page = Page::FromAddress(addr);
1164     SkipList* list = page->skip_list();
1165     if (list == nullptr) {
1166       list = new SkipList();
1167       page->set_skip_list(list);
1168     }
1169 
1170     list->AddObject(addr, size);
1171   }
1172 
1173  private:
1174   static const int kRegionSizeLog2 = 13;
1175   static const int kRegionSize = 1 << kRegionSizeLog2;
1176   static const int kSize = Page::kPageSize / kRegionSize;
1177 
1178   STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1179 
1180   Address starts_[kSize];
1181 };
1182 
1183 
1184 // ----------------------------------------------------------------------------
1185 // A space acquires chunks of memory from the operating system. The memory
1186 // allocator allocates and deallocates pages for the paged heap spaces and large
1187 // pages for large object space.
1188 class V8_EXPORT_PRIVATE MemoryAllocator {
1189  public:
1190   // Unmapper takes care of concurrently unmapping and uncommitting memory
1191   // chunks.
1192   class Unmapper {
1193    public:
1194     class UnmapFreeMemoryTask;
1195 
Unmapper(Heap * heap,MemoryAllocator * allocator)1196     Unmapper(Heap* heap, MemoryAllocator* allocator)
1197         : heap_(heap),
1198           allocator_(allocator),
1199           pending_unmapping_tasks_semaphore_(0),
1200           pending_unmapping_tasks_(0),
1201           active_unmapping_tasks_(0) {
1202       chunks_[kRegular].reserve(kReservedQueueingSlots);
1203       chunks_[kPooled].reserve(kReservedQueueingSlots);
1204     }
1205 
AddMemoryChunkSafe(MemoryChunk * chunk)1206     void AddMemoryChunkSafe(MemoryChunk* chunk) {
1207       if (chunk->IsPagedSpace() && chunk->executable() != EXECUTABLE) {
1208         AddMemoryChunkSafe<kRegular>(chunk);
1209       } else {
1210         AddMemoryChunkSafe<kNonRegular>(chunk);
1211       }
1212     }
1213 
TryGetPooledMemoryChunkSafe()1214     MemoryChunk* TryGetPooledMemoryChunkSafe() {
1215       // Procedure:
1216       // (1) Try to get a chunk that was declared as pooled and already has
1217       // been uncommitted.
1218       // (2) Try to steal any memory chunk of kPageSize that would've been
1219       // unmapped.
1220       MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
1221       if (chunk == nullptr) {
1222         chunk = GetMemoryChunkSafe<kRegular>();
1223         if (chunk != nullptr) {
1224           // For stolen chunks we need to manually free any allocated memory.
1225           chunk->ReleaseAllocatedMemory();
1226         }
1227       }
1228       return chunk;
1229     }
1230 
1231     void FreeQueuedChunks();
1232     void CancelAndWaitForPendingTasks();
1233     void PrepareForMarkCompact();
1234     void EnsureUnmappingCompleted();
1235     void TearDown();
1236     int NumberOfChunks();
1237 
1238    private:
1239     static const int kReservedQueueingSlots = 64;
1240     static const int kMaxUnmapperTasks = 4;
1241 
1242     enum ChunkQueueType {
1243       kRegular,     // Pages of kPageSize that do not live in a CodeRange and
1244                     // can thus be used for stealing.
1245       kNonRegular,  // Large chunks and executable chunks.
1246       kPooled,      // Pooled chunks, already uncommited and ready for reuse.
1247       kNumberOfChunkQueues,
1248     };
1249 
1250     enum class FreeMode {
1251       kUncommitPooled,
1252       kReleasePooled,
1253     };
1254 
1255     template <ChunkQueueType type>
AddMemoryChunkSafe(MemoryChunk * chunk)1256     void AddMemoryChunkSafe(MemoryChunk* chunk) {
1257       base::LockGuard<base::Mutex> guard(&mutex_);
1258       chunks_[type].push_back(chunk);
1259     }
1260 
1261     template <ChunkQueueType type>
GetMemoryChunkSafe()1262     MemoryChunk* GetMemoryChunkSafe() {
1263       base::LockGuard<base::Mutex> guard(&mutex_);
1264       if (chunks_[type].empty()) return nullptr;
1265       MemoryChunk* chunk = chunks_[type].back();
1266       chunks_[type].pop_back();
1267       return chunk;
1268     }
1269 
1270     bool MakeRoomForNewTasks();
1271 
1272     template <FreeMode mode>
1273     void PerformFreeMemoryOnQueuedChunks();
1274 
1275     void PerformFreeMemoryOnQueuedNonRegularChunks();
1276 
1277     Heap* const heap_;
1278     MemoryAllocator* const allocator_;
1279     base::Mutex mutex_;
1280     std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
1281     CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
1282     base::Semaphore pending_unmapping_tasks_semaphore_;
1283     intptr_t pending_unmapping_tasks_;
1284     base::AtomicNumber<intptr_t> active_unmapping_tasks_;
1285 
1286     friend class MemoryAllocator;
1287   };
1288 
1289   enum AllocationMode {
1290     kRegular,
1291     kPooled,
1292   };
1293 
1294   enum FreeMode {
1295     kFull,
1296     kAlreadyPooled,
1297     kPreFreeAndQueue,
1298     kPooledAndQueue,
1299   };
1300 
1301   static size_t CodePageGuardStartOffset();
1302 
1303   static size_t CodePageGuardSize();
1304 
1305   static size_t CodePageAreaStartOffset();
1306 
1307   static size_t CodePageAreaEndOffset();
1308 
CodePageAreaSize()1309   static size_t CodePageAreaSize() {
1310     return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1311   }
1312 
PageAreaSize(AllocationSpace space)1313   static size_t PageAreaSize(AllocationSpace space) {
1314     DCHECK_NE(LO_SPACE, space);
1315     return (space == CODE_SPACE) ? CodePageAreaSize()
1316                                  : Page::kAllocatableMemory;
1317   }
1318 
1319   static intptr_t GetCommitPageSize();
1320 
1321   explicit MemoryAllocator(Isolate* isolate);
1322 
1323   // Initializes its internal bookkeeping structures.
1324   // Max capacity of the total space and executable memory limit.
1325   bool SetUp(size_t max_capacity, size_t code_range_size);
1326 
1327   void TearDown();
1328 
1329   // Allocates a Page from the allocator. AllocationMode is used to indicate
1330   // whether pooled allocation, which only works for MemoryChunk::kPageSize,
1331   // should be tried first.
1332   template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
1333             typename SpaceType>
1334   Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
1335 
1336   LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
1337                                Executability executable);
1338 
1339   template <MemoryAllocator::FreeMode mode = kFull>
1340   void Free(MemoryChunk* chunk);
1341 
1342   // Returns allocated spaces in bytes.
Size()1343   size_t Size() { return size_.Value(); }
1344 
1345   // Returns allocated executable spaces in bytes.
SizeExecutable()1346   size_t SizeExecutable() { return size_executable_.Value(); }
1347 
1348   // Returns the maximum available bytes of heaps.
Available()1349   size_t Available() {
1350     const size_t size = Size();
1351     return capacity_ < size ? 0 : capacity_ - size;
1352   }
1353 
1354   // Returns maximum available bytes that the old space can have.
MaxAvailable()1355   size_t MaxAvailable() {
1356     return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
1357   }
1358 
1359   // Returns an indication of whether a pointer is in a space that has
1360   // been allocated by this MemoryAllocator.
IsOutsideAllocatedSpace(Address address)1361   V8_INLINE bool IsOutsideAllocatedSpace(Address address) {
1362     return address < lowest_ever_allocated_.Value() ||
1363            address >= highest_ever_allocated_.Value();
1364   }
1365 
1366   // Returns a MemoryChunk in which the memory region from commit_area_size to
1367   // reserve_area_size of the chunk area is reserved but not committed, it
1368   // could be committed later by calling MemoryChunk::CommitArea.
1369   MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
1370                              Executability executable, Space* space);
1371 
1372   Address ReserveAlignedMemory(size_t requested, size_t alignment, void* hint,
1373                                VirtualMemory* controller);
1374   Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1375                                 size_t alignment, Executability executable,
1376                                 void* hint, VirtualMemory* controller);
1377 
1378   bool CommitMemory(Address addr, size_t size);
1379 
1380   void FreeMemory(VirtualMemory* reservation, Executability executable);
1381   void FreeMemory(Address addr, size_t size, Executability executable);
1382 
1383   // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
1384   // internally memory is freed from |start_free| to the end of the reservation.
1385   // Additional memory beyond the page is not accounted though, so
1386   // |bytes_to_free| is computed by the caller.
1387   void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
1388                          size_t bytes_to_free, Address new_area_end);
1389 
1390   // Commit a contiguous block of memory from the initial chunk.  Assumes that
1391   // the address is not kNullAddress, the size is greater than zero, and that
1392   // the block is contained in the initial chunk.  Returns true if it succeeded
1393   // and false otherwise.
1394   bool CommitBlock(Address start, size_t size);
1395 
1396   // Checks if an allocated MemoryChunk was intended to be used for executable
1397   // memory.
IsMemoryChunkExecutable(MemoryChunk * chunk)1398   bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
1399     return executable_memory_.find(chunk) != executable_memory_.end();
1400   }
1401 
1402   // Uncommit a contiguous block of memory [start..(start+size)[.
1403   // start is not kNullAddress, the size is greater than zero, and the
1404   // block is contained in the initial chunk.  Returns true if it succeeded
1405   // and false otherwise.
1406   bool UncommitBlock(Address start, size_t size);
1407 
1408   // Zaps a contiguous block of memory [start..(start+size)[ thus
1409   // filling it up with a recognizable non-nullptr bit pattern.
1410   void ZapBlock(Address start, size_t size);
1411 
1412   V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
1413                                                     Address start,
1414                                                     size_t commit_size,
1415                                                     size_t reserved_size);
1416 
code_range()1417   CodeRange* code_range() { return code_range_; }
unmapper()1418   Unmapper* unmapper() { return &unmapper_; }
1419 
1420  private:
1421   // PreFree logically frees the object, i.e., it takes care of the size
1422   // bookkeeping and calls the allocation callback.
1423   void PreFreeMemory(MemoryChunk* chunk);
1424 
1425   // FreeMemory can be called concurrently when PreFree was executed before.
1426   void PerformFreeMemory(MemoryChunk* chunk);
1427 
1428   // See AllocatePage for public interface. Note that currently we only support
1429   // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
1430   template <typename SpaceType>
1431   MemoryChunk* AllocatePagePooled(SpaceType* owner);
1432 
1433   // Initializes pages in a chunk. Returns the first page address.
1434   // This function and GetChunkId() are provided for the mark-compact
1435   // collector to rebuild page headers in the from space, which is
1436   // used as a marking stack and its page headers are destroyed.
1437   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1438                                PagedSpace* owner);
1439 
UpdateAllocatedSpaceLimits(Address low,Address high)1440   void UpdateAllocatedSpaceLimits(Address low, Address high) {
1441     // The use of atomic primitives does not guarantee correctness (wrt.
1442     // desired semantics) by default. The loop here ensures that we update the
1443     // values only if they did not change in between.
1444     Address ptr = kNullAddress;
1445     do {
1446       ptr = lowest_ever_allocated_.Value();
1447     } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
1448     do {
1449       ptr = highest_ever_allocated_.Value();
1450     } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
1451   }
1452 
RegisterExecutableMemoryChunk(MemoryChunk * chunk)1453   void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
1454     DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
1455     DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
1456     executable_memory_.insert(chunk);
1457   }
1458 
UnregisterExecutableMemoryChunk(MemoryChunk * chunk)1459   void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
1460     DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
1461     executable_memory_.erase(chunk);
1462     chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
1463   }
1464 
1465   Isolate* isolate_;
1466   CodeRange* code_range_;
1467 
1468   // Maximum space size in bytes.
1469   size_t capacity_;
1470 
1471   // Allocated space size in bytes.
1472   base::AtomicNumber<size_t> size_;
1473   // Allocated executable space size in bytes.
1474   base::AtomicNumber<size_t> size_executable_;
1475 
1476   // We keep the lowest and highest addresses allocated as a quick way
1477   // of determining that pointers are outside the heap. The estimate is
1478   // conservative, i.e. not all addresses in 'allocated' space are allocated
1479   // to our heap. The range is [lowest, highest[, inclusive on the low end
1480   // and exclusive on the high end.
1481   base::AtomicValue<Address> lowest_ever_allocated_;
1482   base::AtomicValue<Address> highest_ever_allocated_;
1483 
1484   VirtualMemory last_chunk_;
1485   Unmapper unmapper_;
1486 
1487   // Data structure to remember allocated executable memory chunks.
1488   std::unordered_set<MemoryChunk*> executable_memory_;
1489 
1490   friend class heap::TestCodeRangeScope;
1491 
1492   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1493 };
1494 
1495 extern template Page*
1496 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
1497     size_t size, PagedSpace* owner, Executability executable);
1498 extern template Page*
1499 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
1500     size_t size, SemiSpace* owner, Executability executable);
1501 extern template Page*
1502 MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
1503     size_t size, SemiSpace* owner, Executability executable);
1504 
1505 // -----------------------------------------------------------------------------
1506 // Interface for heap object iterator to be implemented by all object space
1507 // object iterators.
1508 //
1509 // NOTE: The space specific object iterators also implements the own next()
1510 //       method which is used to avoid using virtual functions
1511 //       iterating a specific space.
1512 
1513 class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
1514  public:
~ObjectIterator()1515   virtual ~ObjectIterator() {}
1516   virtual HeapObject* Next() = 0;
1517 };
1518 
1519 template <class PAGE_TYPE>
1520 class PageIteratorImpl
1521     : public base::iterator<std::forward_iterator_tag, PAGE_TYPE> {
1522  public:
PageIteratorImpl(PAGE_TYPE * p)1523   explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE> & other)1524   PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
1525   PAGE_TYPE* operator*() { return p_; }
1526   bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1527     return rhs.p_ == p_;
1528   }
1529   bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1530     return rhs.p_ != p_;
1531   }
1532   inline PageIteratorImpl<PAGE_TYPE>& operator++();
1533   inline PageIteratorImpl<PAGE_TYPE> operator++(int);
1534 
1535  private:
1536   PAGE_TYPE* p_;
1537 };
1538 
1539 typedef PageIteratorImpl<Page> PageIterator;
1540 typedef PageIteratorImpl<LargePage> LargePageIterator;
1541 
1542 class PageRange {
1543  public:
1544   typedef PageIterator iterator;
PageRange(Page * begin,Page * end)1545   PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
PageRange(Page * page)1546   explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
1547   inline PageRange(Address start, Address limit);
1548 
begin()1549   iterator begin() { return iterator(begin_); }
end()1550   iterator end() { return iterator(end_); }
1551 
1552  private:
1553   Page* begin_;
1554   Page* end_;
1555 };
1556 
1557 // -----------------------------------------------------------------------------
1558 // Heap object iterator in new/old/map spaces.
1559 //
1560 // A HeapObjectIterator iterates objects from the bottom of the given space
1561 // to its top or from the bottom of the given page to its top.
1562 //
1563 // If objects are allocated in the page during iteration the iterator may
1564 // or may not iterate over those objects.  The caller must create a new
1565 // iterator in order to be sure to visit these new objects.
1566 class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
1567  public:
1568   // Creates a new object iterator in a given space.
1569   explicit HeapObjectIterator(PagedSpace* space);
1570   explicit HeapObjectIterator(Page* page);
1571 
1572   // Advance to the next object, skipping free spaces and other fillers and
1573   // skipping the special garbage section of which there is one per space.
1574   // Returns nullptr when the iteration has ended.
1575   inline HeapObject* Next() override;
1576 
1577  private:
1578   // Fast (inlined) path of next().
1579   inline HeapObject* FromCurrentPage();
1580 
1581   // Slow path of next(), goes into the next page.  Returns false if the
1582   // iteration has ended.
1583   bool AdvanceToNextPage();
1584 
1585   Address cur_addr_;  // Current iteration point.
1586   Address cur_end_;   // End iteration point.
1587   PagedSpace* space_;
1588   PageRange page_range_;
1589   PageRange::iterator current_page_;
1590 };
1591 
1592 
1593 // -----------------------------------------------------------------------------
1594 // A space has a circular list of pages. The next page can be accessed via
1595 // Page::next_page() call.
1596 
1597 // An abstraction of allocation and relocation pointers in a page-structured
1598 // space.
1599 class LinearAllocationArea {
1600  public:
LinearAllocationArea()1601   LinearAllocationArea() : top_(kNullAddress), limit_(kNullAddress) {}
LinearAllocationArea(Address top,Address limit)1602   LinearAllocationArea(Address top, Address limit) : top_(top), limit_(limit) {}
1603 
Reset(Address top,Address limit)1604   void Reset(Address top, Address limit) {
1605     set_top(top);
1606     set_limit(limit);
1607   }
1608 
INLINE(void set_top (Address top))1609   INLINE(void set_top(Address top)) {
1610     SLOW_DCHECK(top == kNullAddress || (top & kHeapObjectTagMask) == 0);
1611     top_ = top;
1612   }
1613 
INLINE(Address top ())1614   INLINE(Address top()) const {
1615     SLOW_DCHECK(top_ == kNullAddress || (top_ & kHeapObjectTagMask) == 0);
1616     return top_;
1617   }
1618 
top_address()1619   Address* top_address() { return &top_; }
1620 
INLINE(void set_limit (Address limit))1621   INLINE(void set_limit(Address limit)) {
1622     limit_ = limit;
1623   }
1624 
INLINE(Address limit ())1625   INLINE(Address limit()) const {
1626     return limit_;
1627   }
1628 
limit_address()1629   Address* limit_address() { return &limit_; }
1630 
1631 #ifdef DEBUG
VerifyPagedAllocation()1632   bool VerifyPagedAllocation() {
1633     return (Page::FromAllocationAreaAddress(top_) ==
1634             Page::FromAllocationAreaAddress(limit_)) &&
1635            (top_ <= limit_);
1636   }
1637 #endif
1638 
1639  private:
1640   // Current allocation top.
1641   Address top_;
1642   // Current allocation limit.
1643   Address limit_;
1644 };
1645 
1646 
1647 // An abstraction of the accounting statistics of a page-structured space.
1648 //
1649 // The stats are only set by functions that ensure they stay balanced. These
1650 // functions increase or decrease one of the non-capacity stats in conjunction
1651 // with capacity, or else they always balance increases and decreases to the
1652 // non-capacity stats.
1653 class AllocationStats BASE_EMBEDDED {
1654  public:
AllocationStats()1655   AllocationStats() { Clear(); }
1656 
1657   // Zero out all the allocation statistics (i.e., no capacity).
Clear()1658   void Clear() {
1659     capacity_ = 0;
1660     max_capacity_ = 0;
1661     ClearSize();
1662   }
1663 
ClearSize()1664   void ClearSize() {
1665     size_ = 0;
1666 #ifdef DEBUG
1667     allocated_on_page_.clear();
1668 #endif
1669   }
1670 
1671   // Accessors for the allocation statistics.
Capacity()1672   size_t Capacity() { return capacity_.Value(); }
MaxCapacity()1673   size_t MaxCapacity() { return max_capacity_; }
Size()1674   size_t Size() { return size_; }
1675 #ifdef DEBUG
AllocatedOnPage(Page * page)1676   size_t AllocatedOnPage(Page* page) { return allocated_on_page_[page]; }
1677 #endif
1678 
IncreaseAllocatedBytes(size_t bytes,Page * page)1679   void IncreaseAllocatedBytes(size_t bytes, Page* page) {
1680     DCHECK_GE(size_ + bytes, size_);
1681     size_ += bytes;
1682 #ifdef DEBUG
1683     allocated_on_page_[page] += bytes;
1684 #endif
1685   }
1686 
DecreaseAllocatedBytes(size_t bytes,Page * page)1687   void DecreaseAllocatedBytes(size_t bytes, Page* page) {
1688     DCHECK_GE(size_, bytes);
1689     size_ -= bytes;
1690 #ifdef DEBUG
1691     DCHECK_GE(allocated_on_page_[page], bytes);
1692     allocated_on_page_[page] -= bytes;
1693 #endif
1694   }
1695 
DecreaseCapacity(size_t bytes)1696   void DecreaseCapacity(size_t bytes) {
1697     size_t capacity = capacity_.Value();
1698     DCHECK_GE(capacity, bytes);
1699     DCHECK_GE(capacity - bytes, size_);
1700     USE(capacity);
1701     capacity_.Decrement(bytes);
1702   }
1703 
IncreaseCapacity(size_t bytes)1704   void IncreaseCapacity(size_t bytes) {
1705     size_t capacity = capacity_.Value();
1706     DCHECK_GE(capacity + bytes, capacity);
1707     capacity_.Increment(bytes);
1708     if (capacity > max_capacity_) {
1709       max_capacity_ = capacity;
1710     }
1711   }
1712 
1713  private:
1714   // |capacity_|: The number of object-area bytes (i.e., not including page
1715   // bookkeeping structures) currently in the space.
1716   // During evacuation capacity of the main spaces is accessed from multiple
1717   // threads to check the old generation hard limit.
1718   base::AtomicNumber<size_t> capacity_;
1719 
1720   // |max_capacity_|: The maximum capacity ever observed.
1721   size_t max_capacity_;
1722 
1723   // |size_|: The number of allocated bytes.
1724   size_t size_;
1725 
1726 #ifdef DEBUG
1727   std::unordered_map<Page*, size_t, Page::Hasher> allocated_on_page_;
1728 #endif
1729 };
1730 
1731 // A free list maintaining free blocks of memory. The free list is organized in
1732 // a way to encourage objects allocated around the same time to be near each
1733 // other. The normal way to allocate is intended to be by bumping a 'top'
1734 // pointer until it hits a 'limit' pointer.  When the limit is hit we need to
1735 // find a new space to allocate from. This is done with the free list, which is
1736 // divided up into rough categories to cut down on waste. Having finer
1737 // categories would scatter allocation more.
1738 
1739 // The free list is organized in categories as follows:
1740 // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
1741 //   allocation, when categories >= small do not have entries anymore.
1742 // 11-31 words (tiny): The tiny blocks are only used for allocation, when
1743 //   categories >= small do not have entries anymore.
1744 // 32-255 words (small): Used for allocating free space between 1-31 words in
1745 //   size.
1746 // 256-2047 words (medium): Used for allocating free space between 32-255 words
1747 //   in size.
1748 // 1048-16383 words (large): Used for allocating free space between 256-2047
1749 //   words in size.
1750 // At least 16384 words (huge): This list is for objects of 2048 words or
1751 //   larger. Empty pages are also added to this list.
1752 class V8_EXPORT_PRIVATE FreeList {
1753  public:
1754   // This method returns how much memory can be allocated after freeing
1755   // maximum_freed memory.
GuaranteedAllocatable(size_t maximum_freed)1756   static inline size_t GuaranteedAllocatable(size_t maximum_freed) {
1757     if (maximum_freed <= kTiniestListMax) {
1758       // Since we are not iterating over all list entries, we cannot guarantee
1759       // that we can find the maximum freed block in that free list.
1760       return 0;
1761     } else if (maximum_freed <= kTinyListMax) {
1762       return kTinyAllocationMax;
1763     } else if (maximum_freed <= kSmallListMax) {
1764       return kSmallAllocationMax;
1765     } else if (maximum_freed <= kMediumListMax) {
1766       return kMediumAllocationMax;
1767     } else if (maximum_freed <= kLargeListMax) {
1768       return kLargeAllocationMax;
1769     }
1770     return maximum_freed;
1771   }
1772 
SelectFreeListCategoryType(size_t size_in_bytes)1773   static FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
1774     if (size_in_bytes <= kTiniestListMax) {
1775       return kTiniest;
1776     } else if (size_in_bytes <= kTinyListMax) {
1777       return kTiny;
1778     } else if (size_in_bytes <= kSmallListMax) {
1779       return kSmall;
1780     } else if (size_in_bytes <= kMediumListMax) {
1781       return kMedium;
1782     } else if (size_in_bytes <= kLargeListMax) {
1783       return kLarge;
1784     }
1785     return kHuge;
1786   }
1787 
1788   FreeList();
1789 
1790   // Adds a node on the free list. The block of size {size_in_bytes} starting
1791   // at {start} is placed on the free list. The return value is the number of
1792   // bytes that were not added to the free list, because they freed memory block
1793   // was too small. Bookkeeping information will be written to the block, i.e.,
1794   // its contents will be destroyed. The start address should be word aligned,
1795   // and the size should be a non-zero multiple of the word size.
1796   size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
1797 
1798   // Allocates a free space node frome the free list of at least size_in_bytes
1799   // bytes. Returns the actual node size in node_size which can be bigger than
1800   // size_in_bytes. This method returns null if the allocation request cannot be
1801   // handled by the free list.
1802   V8_WARN_UNUSED_RESULT FreeSpace* Allocate(size_t size_in_bytes,
1803                                             size_t* node_size);
1804 
1805   // Clear the free list.
1806   void Reset();
1807 
ResetStats()1808   void ResetStats() {
1809     wasted_bytes_.SetValue(0);
1810     ForAllFreeListCategories(
1811         [](FreeListCategory* category) { category->ResetStats(); });
1812   }
1813 
1814   // Return the number of bytes available on the free list.
Available()1815   size_t Available() {
1816     size_t available = 0;
1817     ForAllFreeListCategories([&available](FreeListCategory* category) {
1818       available += category->available();
1819     });
1820     return available;
1821   }
1822 
IsEmpty()1823   bool IsEmpty() {
1824     bool empty = true;
1825     ForAllFreeListCategories([&empty](FreeListCategory* category) {
1826       if (!category->is_empty()) empty = false;
1827     });
1828     return empty;
1829   }
1830 
1831   // Used after booting the VM.
1832   void RepairLists(Heap* heap);
1833 
1834   size_t EvictFreeListItems(Page* page);
1835   bool ContainsPageFreeListItems(Page* page);
1836 
wasted_bytes()1837   size_t wasted_bytes() { return wasted_bytes_.Value(); }
1838 
1839   template <typename Callback>
ForAllFreeListCategories(FreeListCategoryType type,Callback callback)1840   void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
1841     FreeListCategory* current = categories_[type];
1842     while (current != nullptr) {
1843       FreeListCategory* next = current->next();
1844       callback(current);
1845       current = next;
1846     }
1847   }
1848 
1849   template <typename Callback>
ForAllFreeListCategories(Callback callback)1850   void ForAllFreeListCategories(Callback callback) {
1851     for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
1852       ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
1853     }
1854   }
1855 
1856   bool AddCategory(FreeListCategory* category);
1857   void RemoveCategory(FreeListCategory* category);
1858   void PrintCategories(FreeListCategoryType type);
1859 
1860   // Returns a page containing an entry for a given type, or nullptr otherwise.
1861   inline Page* GetPageForCategoryType(FreeListCategoryType type);
1862 
1863 #ifdef DEBUG
1864   size_t SumFreeLists();
1865   bool IsVeryLong();
1866 #endif
1867 
1868  private:
1869   class FreeListCategoryIterator {
1870    public:
FreeListCategoryIterator(FreeList * free_list,FreeListCategoryType type)1871     FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
1872         : current_(free_list->categories_[type]) {}
1873 
HasNext()1874     bool HasNext() { return current_ != nullptr; }
1875 
Next()1876     FreeListCategory* Next() {
1877       DCHECK(HasNext());
1878       FreeListCategory* tmp = current_;
1879       current_ = current_->next();
1880       return tmp;
1881     }
1882 
1883    private:
1884     FreeListCategory* current_;
1885   };
1886 
1887   // The size range of blocks, in bytes.
1888   static const size_t kMinBlockSize = 3 * kPointerSize;
1889   static const size_t kMaxBlockSize = Page::kAllocatableMemory;
1890 
1891   static const size_t kTiniestListMax = 0xa * kPointerSize;
1892   static const size_t kTinyListMax = 0x1f * kPointerSize;
1893   static const size_t kSmallListMax = 0xff * kPointerSize;
1894   static const size_t kMediumListMax = 0x7ff * kPointerSize;
1895   static const size_t kLargeListMax = 0x3fff * kPointerSize;
1896   static const size_t kTinyAllocationMax = kTiniestListMax;
1897   static const size_t kSmallAllocationMax = kTinyListMax;
1898   static const size_t kMediumAllocationMax = kSmallListMax;
1899   static const size_t kLargeAllocationMax = kMediumListMax;
1900 
1901   // Walks all available categories for a given |type| and tries to retrieve
1902   // a node. Returns nullptr if the category is empty.
1903   FreeSpace* FindNodeIn(FreeListCategoryType type, size_t minimum_size,
1904                         size_t* node_size);
1905 
1906   // Tries to retrieve a node from the first category in a given |type|.
1907   // Returns nullptr if the category is empty or the top entry is smaller
1908   // than minimum_size.
1909   FreeSpace* TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
1910                            size_t* node_size);
1911 
1912   // Searches a given |type| for a node of at least |minimum_size|.
1913   FreeSpace* SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
1914                                  size_t minimum_size);
1915 
1916   // The tiny categories are not used for fast allocation.
SelectFastAllocationFreeListCategoryType(size_t size_in_bytes)1917   FreeListCategoryType SelectFastAllocationFreeListCategoryType(
1918       size_t size_in_bytes) {
1919     if (size_in_bytes <= kSmallAllocationMax) {
1920       return kSmall;
1921     } else if (size_in_bytes <= kMediumAllocationMax) {
1922       return kMedium;
1923     } else if (size_in_bytes <= kLargeAllocationMax) {
1924       return kLarge;
1925     }
1926     return kHuge;
1927   }
1928 
top(FreeListCategoryType type)1929   FreeListCategory* top(FreeListCategoryType type) const {
1930     return categories_[type];
1931   }
1932 
1933   base::AtomicNumber<size_t> wasted_bytes_;
1934   FreeListCategory* categories_[kNumberOfCategories];
1935 
1936   friend class FreeListCategory;
1937 };
1938 
1939 // LocalAllocationBuffer represents a linear allocation area that is created
1940 // from a given {AllocationResult} and can be used to allocate memory without
1941 // synchronization.
1942 //
1943 // The buffer is properly closed upon destruction and reassignment.
1944 // Example:
1945 //   {
1946 //     AllocationResult result = ...;
1947 //     LocalAllocationBuffer a(heap, result, size);
1948 //     LocalAllocationBuffer b = a;
1949 //     CHECK(!a.IsValid());
1950 //     CHECK(b.IsValid());
1951 //     // {a} is invalid now and cannot be used for further allocations.
1952 //   }
1953 //   // Since {b} went out of scope, the LAB is closed, resulting in creating a
1954 //   // filler object for the remaining area.
1955 class LocalAllocationBuffer {
1956  public:
1957   // Indicates that a buffer cannot be used for allocations anymore. Can result
1958   // from either reassigning a buffer, or trying to construct it from an
1959   // invalid {AllocationResult}.
1960   static inline LocalAllocationBuffer InvalidBuffer();
1961 
1962   // Creates a new LAB from a given {AllocationResult}. Results in
1963   // InvalidBuffer if the result indicates a retry.
1964   static inline LocalAllocationBuffer FromResult(Heap* heap,
1965                                                  AllocationResult result,
1966                                                  intptr_t size);
1967 
~LocalAllocationBuffer()1968   ~LocalAllocationBuffer() { Close(); }
1969 
1970   // Convert to C++11 move-semantics once allowed by the style guide.
1971   LocalAllocationBuffer(const LocalAllocationBuffer& other);
1972   LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
1973 
1974   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
1975       int size_in_bytes, AllocationAlignment alignment);
1976 
IsValid()1977   inline bool IsValid() { return allocation_info_.top() != kNullAddress; }
1978 
1979   // Try to merge LABs, which is only possible when they are adjacent in memory.
1980   // Returns true if the merge was successful, false otherwise.
1981   inline bool TryMerge(LocalAllocationBuffer* other);
1982 
1983   inline bool TryFreeLast(HeapObject* object, int object_size);
1984 
1985   // Close a LAB, effectively invalidating it. Returns the unused area.
1986   LinearAllocationArea Close();
1987 
1988  private:
1989   LocalAllocationBuffer(Heap* heap, LinearAllocationArea allocation_info);
1990 
1991   Heap* heap_;
1992   LinearAllocationArea allocation_info_;
1993 };
1994 
1995 class SpaceWithLinearArea : public Space {
1996  public:
SpaceWithLinearArea(Heap * heap,AllocationSpace id)1997   SpaceWithLinearArea(Heap* heap, AllocationSpace id)
1998       : Space(heap, id), top_on_previous_step_(0) {
1999     allocation_info_.Reset(kNullAddress, kNullAddress);
2000   }
2001 
2002   virtual bool SupportsInlineAllocation() = 0;
2003 
2004   // Returns the allocation pointer in this space.
top()2005   Address top() { return allocation_info_.top(); }
limit()2006   Address limit() { return allocation_info_.limit(); }
2007 
2008   // The allocation top address.
allocation_top_address()2009   Address* allocation_top_address() { return allocation_info_.top_address(); }
2010 
2011   // The allocation limit address.
allocation_limit_address()2012   Address* allocation_limit_address() {
2013     return allocation_info_.limit_address();
2014   }
2015 
2016   V8_EXPORT_PRIVATE void AddAllocationObserver(
2017       AllocationObserver* observer) override;
2018   V8_EXPORT_PRIVATE void RemoveAllocationObserver(
2019       AllocationObserver* observer) override;
2020   V8_EXPORT_PRIVATE void ResumeAllocationObservers() override;
2021   V8_EXPORT_PRIVATE void PauseAllocationObservers() override;
2022 
2023   // When allocation observers are active we may use a lower limit to allow the
2024   // observers to 'interrupt' earlier than the natural limit. Given a linear
2025   // area bounded by [start, end), this function computes the limit to use to
2026   // allow proper observation based on existing observers. min_size specifies
2027   // the minimum size that the limited area should have.
2028   Address ComputeLimit(Address start, Address end, size_t min_size);
2029   V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
2030       size_t min_size) = 0;
2031 
2032  protected:
2033   // If we are doing inline allocation in steps, this method performs the 'step'
2034   // operation. top is the memory address of the bump pointer at the last
2035   // inline allocation (i.e. it determines the numbers of bytes actually
2036   // allocated since the last step.) top_for_next_step is the address of the
2037   // bump pointer where the next byte is going to be allocated from. top and
2038   // top_for_next_step may be different when we cross a page boundary or reset
2039   // the space.
2040   // TODO(ofrobots): clarify the precise difference between this and
2041   // Space::AllocationStep.
2042   void InlineAllocationStep(Address top, Address top_for_next_step,
2043                             Address soon_object, size_t size);
2044   V8_EXPORT_PRIVATE void StartNextInlineAllocationStep() override;
2045 
2046   // TODO(ofrobots): make these private after refactoring is complete.
2047   LinearAllocationArea allocation_info_;
2048   Address top_on_previous_step_;
2049 };
2050 
2051 class V8_EXPORT_PRIVATE PagedSpace
NON_EXPORTED_BASE(public SpaceWithLinearArea)2052     : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
2053  public:
2054   typedef PageIterator iterator;
2055 
2056   static const size_t kCompactionMemoryWanted = 500 * KB;
2057 
2058   // Creates a space with an id.
2059   PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
2060 
2061   ~PagedSpace() override { TearDown(); }
2062 
2063   // Set up the space using the given address range of virtual memory (from
2064   // the memory allocator's initial chunk) if possible.  If the block of
2065   // addresses is not big enough to contain a single page-aligned page, a
2066   // fresh chunk will be allocated.
2067   bool SetUp();
2068 
2069   // Returns true if the space has been successfully set up and not
2070   // subsequently torn down.
2071   bool HasBeenSetUp();
2072 
2073   // Checks whether an object/address is in this space.
2074   inline bool Contains(Address a);
2075   inline bool Contains(Object* o);
2076   bool ContainsSlow(Address addr);
2077 
2078   // Does the space need executable memory?
2079   Executability executable() { return executable_; }
2080 
2081   // During boot the free_space_map is created, and afterwards we may need
2082   // to write it into the free list nodes that were already created.
2083   void RepairFreeListsAfterDeserialization();
2084 
2085   // Prepares for a mark-compact GC.
2086   void PrepareForMarkCompact();
2087 
2088   // Current capacity without growing (Size() + Available()).
2089   size_t Capacity() { return accounting_stats_.Capacity(); }
2090 
2091   // Approximate amount of physical memory committed for this space.
2092   size_t CommittedPhysicalMemory() override;
2093 
2094   void ResetFreeListStatistics();
2095 
2096   // Sets the capacity, the available space and the wasted space to zero.
2097   // The stats are rebuilt during sweeping by adding each page to the
2098   // capacity and the size when it is encountered.  As free spaces are
2099   // discovered during the sweeping they are subtracted from the size and added
2100   // to the available and wasted totals.
2101   void ClearStats() {
2102     accounting_stats_.ClearSize();
2103     free_list_.ResetStats();
2104     ResetFreeListStatistics();
2105   }
2106 
2107   // Available bytes without growing.  These are the bytes on the free list.
2108   // The bytes in the linear allocation area are not included in this total
2109   // because updating the stats would slow down allocation.  New pages are
2110   // immediately added to the free list so they show up here.
2111   size_t Available() override { return free_list_.Available(); }
2112 
2113   // Allocated bytes in this space.  Garbage bytes that were not found due to
2114   // concurrent sweeping are counted as being allocated!  The bytes in the
2115   // current linear allocation area (between top and limit) are also counted
2116   // here.
2117   size_t Size() override { return accounting_stats_.Size(); }
2118 
2119   // As size, but the bytes in lazily swept pages are estimated and the bytes
2120   // in the current linear allocation area are not included.
2121   size_t SizeOfObjects() override;
2122 
2123   // Wasted bytes in this space.  These are just the bytes that were thrown away
2124   // due to being too small to use for allocation.
2125   virtual size_t Waste() { return free_list_.wasted_bytes(); }
2126 
2127   enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
2128 
2129   // Allocate the requested number of bytes in the space if possible, return a
2130   // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
2131   // to be manually updated later.
2132   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
2133       int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
2134 
2135   // Allocate the requested number of bytes in the space double aligned if
2136   // possible, return a failure object if not.
2137   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
2138       int size_in_bytes, AllocationAlignment alignment);
2139 
2140   // Allocate the requested number of bytes in the space and consider allocation
2141   // alignment if needed.
2142   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
2143       int size_in_bytes, AllocationAlignment alignment);
2144 
2145   size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
2146     if (size_in_bytes == 0) return 0;
2147     heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
2148                                  ClearRecordedSlots::kNo);
2149     if (mode == SpaceAccountingMode::kSpaceAccounted) {
2150       return AccountedFree(start, size_in_bytes);
2151     } else {
2152       return UnaccountedFree(start, size_in_bytes);
2153     }
2154   }
2155 
2156   // Give a block of memory to the space's free list.  It might be added to
2157   // the free list or accounted as waste.
2158   // If add_to_freelist is false then just accounting stats are updated and
2159   // no attempt to add area to free list is made.
2160   size_t AccountedFree(Address start, size_t size_in_bytes) {
2161     size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
2162     Page* page = Page::FromAddress(start);
2163     accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
2164     DCHECK_GE(size_in_bytes, wasted);
2165     return size_in_bytes - wasted;
2166   }
2167 
2168   size_t UnaccountedFree(Address start, size_t size_in_bytes) {
2169     size_t wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
2170     DCHECK_GE(size_in_bytes, wasted);
2171     return size_in_bytes - wasted;
2172   }
2173 
2174   inline bool TryFreeLast(HeapObject* object, int object_size);
2175 
2176   void ResetFreeList();
2177 
2178   // Empty space linear allocation area, returning unused area to free list.
2179   void FreeLinearAllocationArea();
2180 
2181   void MarkLinearAllocationAreaBlack();
2182   void UnmarkLinearAllocationArea();
2183 
2184   void DecreaseAllocatedBytes(size_t bytes, Page* page) {
2185     accounting_stats_.DecreaseAllocatedBytes(bytes, page);
2186   }
2187   void IncreaseAllocatedBytes(size_t bytes, Page* page) {
2188     accounting_stats_.IncreaseAllocatedBytes(bytes, page);
2189   }
2190   void DecreaseCapacity(size_t bytes) {
2191     accounting_stats_.DecreaseCapacity(bytes);
2192   }
2193   void IncreaseCapacity(size_t bytes) {
2194     accounting_stats_.IncreaseCapacity(bytes);
2195   }
2196 
2197   void RefineAllocatedBytesAfterSweeping(Page* page);
2198 
2199   // The dummy page that anchors the linked list of pages.
2200   Page* anchor() { return &anchor_; }
2201 
2202   Page* InitializePage(MemoryChunk* chunk, Executability executable);
2203   void ReleasePage(Page* page);
2204   // Adds the page to this space and returns the number of bytes added to the
2205   // free list of the space.
2206   size_t AddPage(Page* page);
2207   void RemovePage(Page* page);
2208   // Remove a page if it has at least |size_in_bytes| bytes available that can
2209   // be used for allocation.
2210   Page* RemovePageSafe(int size_in_bytes);
2211 
2212   void SetReadAndExecutable();
2213   void SetReadAndWritable();
2214 
2215 #ifdef VERIFY_HEAP
2216   // Verify integrity of this space.
2217   virtual void Verify(ObjectVisitor* visitor);
2218 
2219   void VerifyLiveBytes();
2220 
2221   // Overridden by subclasses to verify space-specific object
2222   // properties (e.g., only maps or free-list nodes are in map space).
2223   virtual void VerifyObject(HeapObject* obj) {}
2224 #endif
2225 
2226 #ifdef DEBUG
2227   void VerifyCountersAfterSweeping();
2228   void VerifyCountersBeforeConcurrentSweeping();
2229   // Print meta info and objects in this space.
2230   void Print() override;
2231 
2232   // Report code object related statistics
2233   static void ReportCodeStatistics(Isolate* isolate);
2234   static void ResetCodeStatistics(Isolate* isolate);
2235 #endif
2236 
2237   Page* FirstPage() { return anchor_.next_page(); }
2238   Page* LastPage() { return anchor_.prev_page(); }
2239 
2240   bool CanExpand(size_t size);
2241 
2242   // Returns the number of total pages in this space.
2243   int CountTotalPages();
2244 
2245   // Return size of allocatable area on a page in this space.
2246   inline int AreaSize() { return static_cast<int>(area_size_); }
2247 
2248   virtual bool is_local() { return false; }
2249 
2250   // Merges {other} into the current space. Note that this modifies {other},
2251   // e.g., removes its bump pointer area and resets statistics.
2252   void MergeCompactionSpace(CompactionSpace* other);
2253 
2254   // Refills the free list from the corresponding free list filled by the
2255   // sweeper.
2256   virtual void RefillFreeList();
2257 
2258   FreeList* free_list() { return &free_list_; }
2259 
2260   base::Mutex* mutex() { return &space_mutex_; }
2261 
2262   inline void UnlinkFreeListCategories(Page* page);
2263   inline size_t RelinkFreeListCategories(Page* page);
2264 
2265   iterator begin() { return iterator(anchor_.next_page()); }
2266   iterator end() { return iterator(&anchor_); }
2267 
2268   // Shrink immortal immovable pages of the space to be exactly the size needed
2269   // using the high water mark.
2270   void ShrinkImmortalImmovablePages();
2271 
2272   size_t ShrinkPageToHighWaterMark(Page* page);
2273 
2274   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2275 
2276   void SetLinearAllocationArea(Address top, Address limit);
2277 
2278  private:
2279   // Set space linear allocation area.
2280   void SetTopAndLimit(Address top, Address limit) {
2281     DCHECK(top == limit ||
2282            Page::FromAddress(top) == Page::FromAddress(limit - 1));
2283     MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2284     allocation_info_.Reset(top, limit);
2285   }
2286   void DecreaseLimit(Address new_limit);
2287   void UpdateInlineAllocationLimit(size_t min_size) override;
2288   bool SupportsInlineAllocation() override {
2289     return identity() == OLD_SPACE && !is_local();
2290   }
2291 
2292  protected:
2293   // PagedSpaces that should be included in snapshots have different, i.e.,
2294   // smaller, initial pages.
2295   virtual bool snapshotable() { return true; }
2296 
2297   bool HasPages() { return anchor_.next_page() != &anchor_; }
2298 
2299   // Cleans up the space, frees all pages in this space except those belonging
2300   // to the initial chunk, uncommits addresses in the initial chunk.
2301   void TearDown();
2302 
2303   // Expands the space by allocating a fixed number of pages. Returns false if
2304   // it cannot allocate requested number of pages from OS, or if the hard heap
2305   // size limit has been hit.
2306   bool Expand();
2307 
2308   // Sets up a linear allocation area that fits the given number of bytes.
2309   // Returns false if there is not enough space and the caller has to retry
2310   // after collecting garbage.
2311   inline bool EnsureLinearAllocationArea(int size_in_bytes);
2312   // Allocates an object from the linear allocation area. Assumes that the
2313   // linear allocation area is large enought to fit the object.
2314   inline HeapObject* AllocateLinearly(int size_in_bytes);
2315   // Tries to allocate an aligned object from the linear allocation area.
2316   // Returns nullptr if the linear allocation area does not fit the object.
2317   // Otherwise, returns the object pointer and writes the allocation size
2318   // (object size + alignment filler size) to the size_in_bytes.
2319   inline HeapObject* TryAllocateLinearlyAligned(int* size_in_bytes,
2320                                                 AllocationAlignment alignment);
2321 
2322   V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
2323       size_t size_in_bytes);
2324 
2325   // If sweeping is still in progress try to sweep unswept pages. If that is
2326   // not successful, wait for the sweeper threads and retry free-list
2327   // allocation. Returns false if there is not enough space and the caller
2328   // has to retry after collecting garbage.
2329   V8_WARN_UNUSED_RESULT virtual bool SweepAndRetryAllocation(int size_in_bytes);
2330 
2331   // Slow path of AllocateRaw. This function is space-dependent. Returns false
2332   // if there is not enough space and the caller has to retry after
2333   // collecting garbage.
2334   V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
2335       int size_in_bytes);
2336 
2337   // Implementation of SlowAllocateRaw. Returns false if there is not enough
2338   // space and the caller has to retry after collecting garbage.
2339   V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
2340       int size_in_bytes);
2341 
2342   Executability executable_;
2343 
2344   size_t area_size_;
2345 
2346   // Accounting information for this space.
2347   AllocationStats accounting_stats_;
2348 
2349   // The dummy page that anchors the double linked list of pages.
2350   Page anchor_;
2351 
2352   // The space's free list.
2353   FreeList free_list_;
2354 
2355   // Mutex guarding any concurrent access to the space.
2356   base::Mutex space_mutex_;
2357 
2358   friend class IncrementalMarking;
2359   friend class MarkCompactCollector;
2360 
2361   // Used in cctest.
2362   friend class heap::HeapTester;
2363 };
2364 
2365 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
2366 
2367 // -----------------------------------------------------------------------------
2368 // SemiSpace in young generation
2369 //
2370 // A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
2371 // The mark-compact collector  uses the memory of the first page in the from
2372 // space as a marking stack when tracing live objects.
2373 class SemiSpace : public Space {
2374  public:
2375   typedef PageIterator iterator;
2376 
2377   static void Swap(SemiSpace* from, SemiSpace* to);
2378 
SemiSpace(Heap * heap,SemiSpaceId semispace)2379   SemiSpace(Heap* heap, SemiSpaceId semispace)
2380       : Space(heap, NEW_SPACE),
2381         current_capacity_(0),
2382         maximum_capacity_(0),
2383         minimum_capacity_(0),
2384         age_mark_(kNullAddress),
2385         committed_(false),
2386         id_(semispace),
2387         anchor_(this),
2388         current_page_(nullptr),
2389         pages_used_(0) {}
2390 
2391   inline bool Contains(HeapObject* o);
2392   inline bool Contains(Object* o);
2393   inline bool ContainsSlow(Address a);
2394 
2395   void SetUp(size_t initial_capacity, size_t maximum_capacity);
2396   void TearDown();
HasBeenSetUp()2397   bool HasBeenSetUp() { return maximum_capacity_ != 0; }
2398 
2399   bool Commit();
2400   bool Uncommit();
is_committed()2401   bool is_committed() { return committed_; }
2402 
2403   // Grow the semispace to the new capacity.  The new capacity requested must
2404   // be larger than the current capacity and less than the maximum capacity.
2405   bool GrowTo(size_t new_capacity);
2406 
2407   // Shrinks the semispace to the new capacity.  The new capacity requested
2408   // must be more than the amount of used memory in the semispace and less
2409   // than the current capacity.
2410   bool ShrinkTo(size_t new_capacity);
2411 
2412   bool EnsureCurrentCapacity();
2413 
2414   // Returns the start address of the first page of the space.
space_start()2415   Address space_start() {
2416     DCHECK_NE(anchor_.next_page(), anchor());
2417     return anchor_.next_page()->area_start();
2418   }
2419 
first_page()2420   Page* first_page() { return anchor_.next_page(); }
current_page()2421   Page* current_page() { return current_page_; }
pages_used()2422   int pages_used() { return pages_used_; }
2423 
2424   // Returns one past the end address of the space.
space_end()2425   Address space_end() { return anchor_.prev_page()->area_end(); }
2426 
2427   // Returns the start address of the current page of the space.
page_low()2428   Address page_low() { return current_page_->area_start(); }
2429 
2430   // Returns one past the end address of the current page of the space.
page_high()2431   Address page_high() { return current_page_->area_end(); }
2432 
AdvancePage()2433   bool AdvancePage() {
2434     Page* next_page = current_page_->next_page();
2435     // We cannot expand if we reached the maximum number of pages already. Note
2436     // that we need to account for the next page already for this check as we
2437     // could potentially fill the whole page after advancing.
2438     const bool reached_max_pages = (pages_used_ + 1) == max_pages();
2439     if (next_page == anchor() || reached_max_pages) {
2440       return false;
2441     }
2442     current_page_ = next_page;
2443     pages_used_++;
2444     return true;
2445   }
2446 
2447   // Resets the space to using the first page.
2448   void Reset();
2449 
2450   void RemovePage(Page* page);
2451   void PrependPage(Page* page);
2452   Page* InitializePage(MemoryChunk* chunk, Executability executable);
2453 
2454   // Age mark accessors.
age_mark()2455   Address age_mark() { return age_mark_; }
2456   void set_age_mark(Address mark);
2457 
2458   // Returns the current capacity of the semispace.
current_capacity()2459   size_t current_capacity() { return current_capacity_; }
2460 
2461   // Returns the maximum capacity of the semispace.
maximum_capacity()2462   size_t maximum_capacity() { return maximum_capacity_; }
2463 
2464   // Returns the initial capacity of the semispace.
minimum_capacity()2465   size_t minimum_capacity() { return minimum_capacity_; }
2466 
id()2467   SemiSpaceId id() { return id_; }
2468 
2469   // Approximate amount of physical memory committed for this space.
2470   size_t CommittedPhysicalMemory() override;
2471 
2472   // If we don't have these here then SemiSpace will be abstract.  However
2473   // they should never be called:
2474 
Size()2475   size_t Size() override {
2476     UNREACHABLE();
2477   }
2478 
SizeOfObjects()2479   size_t SizeOfObjects() override { return Size(); }
2480 
Available()2481   size_t Available() override {
2482     UNREACHABLE();
2483   }
2484 
begin()2485   iterator begin() { return iterator(anchor_.next_page()); }
end()2486   iterator end() { return iterator(anchor()); }
2487 
2488   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2489 
2490 #ifdef DEBUG
2491   void Print() override;
2492   // Validate a range of of addresses in a SemiSpace.
2493   // The "from" address must be on a page prior to the "to" address,
2494   // in the linked page order, or it must be earlier on the same page.
2495   static void AssertValidRange(Address from, Address to);
2496 #else
2497   // Do nothing.
AssertValidRange(Address from,Address to)2498   inline static void AssertValidRange(Address from, Address to) {}
2499 #endif
2500 
2501 #ifdef VERIFY_HEAP
2502   virtual void Verify();
2503 #endif
2504 
2505  private:
2506   void RewindPages(Page* start, int num_pages);
2507 
anchor()2508   inline Page* anchor() { return &anchor_; }
max_pages()2509   inline int max_pages() {
2510     return static_cast<int>(current_capacity_ / Page::kPageSize);
2511   }
2512 
2513   // Copies the flags into the masked positions on all pages in the space.
2514   void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
2515 
2516   // The currently committed space capacity.
2517   size_t current_capacity_;
2518 
2519   // The maximum capacity that can be used by this space. A space cannot grow
2520   // beyond that size.
2521   size_t maximum_capacity_;
2522 
2523   // The minimum capacity for the space. A space cannot shrink below this size.
2524   size_t minimum_capacity_;
2525 
2526   // Used to govern object promotion during mark-compact collection.
2527   Address age_mark_;
2528 
2529   bool committed_;
2530   SemiSpaceId id_;
2531 
2532   Page anchor_;
2533   Page* current_page_;
2534   int pages_used_;
2535 
2536   friend class NewSpace;
2537   friend class SemiSpaceIterator;
2538 };
2539 
2540 
2541 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2542 // semispace of the heap's new space.  It iterates over the objects in the
2543 // semispace from a given start address (defaulting to the bottom of the
2544 // semispace) to the top of the semispace.  New objects allocated after the
2545 // iterator is created are not iterated.
2546 class SemiSpaceIterator : public ObjectIterator {
2547  public:
2548   // Create an iterator over the allocated objects in the given to-space.
2549   explicit SemiSpaceIterator(NewSpace* space);
2550 
2551   inline HeapObject* Next() override;
2552 
2553  private:
2554   void Initialize(Address start, Address end);
2555 
2556   // The current iteration point.
2557   Address current_;
2558   // The end of iteration.
2559   Address limit_;
2560 };
2561 
2562 // -----------------------------------------------------------------------------
2563 // The young generation space.
2564 //
2565 // The new space consists of a contiguous pair of semispaces.  It simply
2566 // forwards most functions to the appropriate semispace.
2567 
2568 class NewSpace : public SpaceWithLinearArea {
2569  public:
2570   typedef PageIterator iterator;
2571 
NewSpace(Heap * heap)2572   explicit NewSpace(Heap* heap)
2573       : SpaceWithLinearArea(heap, NEW_SPACE),
2574         to_space_(heap, kToSpace),
2575         from_space_(heap, kFromSpace),
2576         reservation_() {}
2577 
2578   inline bool Contains(HeapObject* o);
2579   inline bool ContainsSlow(Address a);
2580   inline bool Contains(Object* o);
2581 
2582   bool SetUp(size_t initial_semispace_capacity, size_t max_semispace_capacity);
2583 
2584   // Tears down the space.  Heap memory was not allocated by the space, so it
2585   // is not deallocated here.
2586   void TearDown();
2587 
2588   // True if the space has been set up but not torn down.
HasBeenSetUp()2589   bool HasBeenSetUp() {
2590     return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
2591   }
2592 
2593   // Flip the pair of spaces.
2594   void Flip();
2595 
2596   // Grow the capacity of the semispaces.  Assumes that they are not at
2597   // their maximum capacity.
2598   void Grow();
2599 
2600   // Shrink the capacity of the semispaces.
2601   void Shrink();
2602 
2603   // Return the allocated bytes in the active semispace.
Size()2604   size_t Size() override {
2605     DCHECK_GE(top(), to_space_.page_low());
2606     return to_space_.pages_used() * Page::kAllocatableMemory +
2607            static_cast<size_t>(top() - to_space_.page_low());
2608   }
2609 
SizeOfObjects()2610   size_t SizeOfObjects() override { return Size(); }
2611 
2612   // Return the allocatable capacity of a semispace.
Capacity()2613   size_t Capacity() {
2614     SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2615     return (to_space_.current_capacity() / Page::kPageSize) *
2616            Page::kAllocatableMemory;
2617   }
2618 
2619   // Return the current size of a semispace, allocatable and non-allocatable
2620   // memory.
TotalCapacity()2621   size_t TotalCapacity() {
2622     DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2623     return to_space_.current_capacity();
2624   }
2625 
2626   // Committed memory for NewSpace is the committed memory of both semi-spaces
2627   // combined.
CommittedMemory()2628   size_t CommittedMemory() override {
2629     return from_space_.CommittedMemory() + to_space_.CommittedMemory();
2630   }
2631 
MaximumCommittedMemory()2632   size_t MaximumCommittedMemory() override {
2633     return from_space_.MaximumCommittedMemory() +
2634            to_space_.MaximumCommittedMemory();
2635   }
2636 
2637   // Approximate amount of physical memory committed for this space.
2638   size_t CommittedPhysicalMemory() override;
2639 
2640   // Return the available bytes without growing.
Available()2641   size_t Available() override {
2642     DCHECK_GE(Capacity(), Size());
2643     return Capacity() - Size();
2644   }
2645 
ExternalBackingStoreBytes()2646   size_t ExternalBackingStoreBytes() const override {
2647     DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes());
2648     return to_space_.ExternalBackingStoreBytes();
2649   }
2650 
AllocatedSinceLastGC()2651   size_t AllocatedSinceLastGC() {
2652     const Address age_mark = to_space_.age_mark();
2653     DCHECK_NE(age_mark, kNullAddress);
2654     DCHECK_NE(top(), kNullAddress);
2655     Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
2656     Page* const last_page = Page::FromAllocationAreaAddress(top());
2657     Page* current_page = age_mark_page;
2658     size_t allocated = 0;
2659     if (current_page != last_page) {
2660       DCHECK_EQ(current_page, age_mark_page);
2661       DCHECK_GE(age_mark_page->area_end(), age_mark);
2662       allocated += age_mark_page->area_end() - age_mark;
2663       current_page = current_page->next_page();
2664     } else {
2665       DCHECK_GE(top(), age_mark);
2666       return top() - age_mark;
2667     }
2668     while (current_page != last_page) {
2669       DCHECK_NE(current_page, age_mark_page);
2670       allocated += Page::kAllocatableMemory;
2671       current_page = current_page->next_page();
2672     }
2673     DCHECK_GE(top(), current_page->area_start());
2674     allocated += top() - current_page->area_start();
2675     DCHECK_LE(allocated, Size());
2676     return allocated;
2677   }
2678 
MovePageFromSpaceToSpace(Page * page)2679   void MovePageFromSpaceToSpace(Page* page) {
2680     DCHECK(page->InFromSpace());
2681     from_space_.RemovePage(page);
2682     to_space_.PrependPage(page);
2683   }
2684 
2685   bool Rebalance();
2686 
2687   // Return the maximum capacity of a semispace.
MaximumCapacity()2688   size_t MaximumCapacity() {
2689     DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
2690     return to_space_.maximum_capacity();
2691   }
2692 
IsAtMaximumCapacity()2693   bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2694 
2695   // Returns the initial capacity of a semispace.
InitialTotalCapacity()2696   size_t InitialTotalCapacity() {
2697     DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
2698     return to_space_.minimum_capacity();
2699   }
2700 
ResetOriginalTop()2701   void ResetOriginalTop() {
2702     DCHECK_GE(top(), original_top());
2703     DCHECK_LE(top(), original_limit());
2704     original_top_.SetValue(top());
2705   }
2706 
original_top()2707   Address original_top() { return original_top_.Value(); }
original_limit()2708   Address original_limit() { return original_limit_.Value(); }
2709 
2710   // Return the address of the first object in the active semispace.
bottom()2711   Address bottom() { return to_space_.space_start(); }
2712 
2713   // Get the age mark of the inactive semispace.
age_mark()2714   Address age_mark() { return from_space_.age_mark(); }
2715   // Set the age mark in the active semispace.
set_age_mark(Address mark)2716   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2717 
2718   V8_WARN_UNUSED_RESULT INLINE(AllocationResult AllocateRawAligned(
2719       int size_in_bytes, AllocationAlignment alignment));
2720 
2721   V8_WARN_UNUSED_RESULT INLINE(
2722       AllocationResult AllocateRawUnaligned(int size_in_bytes));
2723 
2724   V8_WARN_UNUSED_RESULT INLINE(AllocationResult AllocateRaw(
2725       int size_in_bytes, AllocationAlignment alignment));
2726 
2727   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
2728       int size_in_bytes, AllocationAlignment alignment);
2729 
2730   // Reset the allocation pointer to the beginning of the active semispace.
2731   void ResetLinearAllocationArea();
2732 
2733   // When inline allocation stepping is active, either because of incremental
2734   // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
2735   // inline allocation every once in a while. This is done by setting
2736   // allocation_info_.limit to be lower than the actual limit and and increasing
2737   // it in steps to guarantee that the observers are notified periodically.
2738   void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
2739 
2740   // Get the extent of the inactive semispace (for use as a marking stack,
2741   // or to zap it). Notice: space-addresses are not necessarily on the
2742   // same page, so FromSpaceStart() might be above FromSpaceEnd().
FromSpacePageLow()2743   Address FromSpacePageLow() { return from_space_.page_low(); }
FromSpacePageHigh()2744   Address FromSpacePageHigh() { return from_space_.page_high(); }
FromSpaceStart()2745   Address FromSpaceStart() { return from_space_.space_start(); }
FromSpaceEnd()2746   Address FromSpaceEnd() { return from_space_.space_end(); }
2747 
2748   // Get the extent of the active semispace's pages' memory.
ToSpaceStart()2749   Address ToSpaceStart() { return to_space_.space_start(); }
ToSpaceEnd()2750   Address ToSpaceEnd() { return to_space_.space_end(); }
2751 
2752   inline bool ToSpaceContainsSlow(Address a);
2753   inline bool FromSpaceContainsSlow(Address a);
2754   inline bool ToSpaceContains(Object* o);
2755   inline bool FromSpaceContains(Object* o);
2756 
2757   // Try to switch the active semispace to a new, empty, page.
2758   // Returns false if this isn't possible or reasonable (i.e., there
2759   // are no pages, or the current page is already empty), or true
2760   // if successful.
2761   bool AddFreshPage();
2762   bool AddFreshPageSynchronized();
2763 
2764 #ifdef VERIFY_HEAP
2765   // Verify the active semispace.
2766   virtual void Verify();
2767 #endif
2768 
2769 #ifdef DEBUG
2770   // Print the active semispace.
Print()2771   void Print() override { to_space_.Print(); }
2772 #endif
2773 
2774   // Return whether the operation succeeded.
CommitFromSpaceIfNeeded()2775   bool CommitFromSpaceIfNeeded() {
2776     if (from_space_.is_committed()) return true;
2777     return from_space_.Commit();
2778   }
2779 
UncommitFromSpace()2780   bool UncommitFromSpace() {
2781     if (!from_space_.is_committed()) return true;
2782     return from_space_.Uncommit();
2783   }
2784 
IsFromSpaceCommitted()2785   bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
2786 
active_space()2787   SemiSpace* active_space() { return &to_space_; }
2788 
begin()2789   iterator begin() { return to_space_.begin(); }
end()2790   iterator end() { return to_space_.end(); }
2791 
2792   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2793 
from_space()2794   SemiSpace& from_space() { return from_space_; }
to_space()2795   SemiSpace& to_space() { return to_space_; }
2796 
2797  private:
2798   // Update linear allocation area to match the current to-space page.
2799   void UpdateLinearAllocationArea();
2800 
2801   base::Mutex mutex_;
2802 
2803   // The top and the limit at the time of setting the linear allocation area.
2804   // These values can be accessed by background tasks.
2805   base::AtomicValue<Address> original_top_;
2806   base::AtomicValue<Address> original_limit_;
2807 
2808   // The semispaces.
2809   SemiSpace to_space_;
2810   SemiSpace from_space_;
2811   VirtualMemory reservation_;
2812 
2813   bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
SupportsInlineAllocation()2814   bool SupportsInlineAllocation() override { return true; }
2815 
2816   friend class SemiSpaceIterator;
2817 };
2818 
2819 class PauseAllocationObserversScope {
2820  public:
2821   explicit PauseAllocationObserversScope(Heap* heap);
2822   ~PauseAllocationObserversScope();
2823 
2824  private:
2825   Heap* heap_;
2826   DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
2827 };
2828 
2829 // -----------------------------------------------------------------------------
2830 // Compaction space that is used temporarily during compaction.
2831 
2832 class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
2833  public:
CompactionSpace(Heap * heap,AllocationSpace id,Executability executable)2834   CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
2835       : PagedSpace(heap, id, executable) {}
2836 
is_local()2837   bool is_local() override { return true; }
2838 
2839  protected:
2840   // The space is temporary and not included in any snapshots.
snapshotable()2841   bool snapshotable() override { return false; }
2842 
2843   V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(
2844       int size_in_bytes) override;
2845 
2846   V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
2847       int size_in_bytes) override;
2848 };
2849 
2850 
2851 // A collection of |CompactionSpace|s used by a single compaction task.
2852 class CompactionSpaceCollection : public Malloced {
2853  public:
CompactionSpaceCollection(Heap * heap)2854   explicit CompactionSpaceCollection(Heap* heap)
2855       : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
2856         code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
2857 
Get(AllocationSpace space)2858   CompactionSpace* Get(AllocationSpace space) {
2859     switch (space) {
2860       case OLD_SPACE:
2861         return &old_space_;
2862       case CODE_SPACE:
2863         return &code_space_;
2864       default:
2865         UNREACHABLE();
2866     }
2867     UNREACHABLE();
2868   }
2869 
2870  private:
2871   CompactionSpace old_space_;
2872   CompactionSpace code_space_;
2873 };
2874 
2875 // -----------------------------------------------------------------------------
2876 // Old generation regular object space.
2877 
2878 class OldSpace : public PagedSpace {
2879  public:
2880   // Creates an old space object. The constructor does not allocate pages
2881   // from OS.
OldSpace(Heap * heap)2882   explicit OldSpace(Heap* heap) : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE) {}
2883 };
2884 
2885 // -----------------------------------------------------------------------------
2886 // Old generation code object space.
2887 
2888 class CodeSpace : public PagedSpace {
2889  public:
2890   // Creates an old space object. The constructor does not allocate pages
2891   // from OS.
CodeSpace(Heap * heap)2892   explicit CodeSpace(Heap* heap) : PagedSpace(heap, CODE_SPACE, EXECUTABLE) {}
2893 };
2894 
2895 
2896 // For contiguous spaces, top should be in the space (or at the end) and limit
2897 // should be the end of the space.
2898 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
2899   SLOW_DCHECK((space).page_low() <= (info).top() &&   \
2900               (info).top() <= (space).page_high() &&  \
2901               (info).limit() <= (space).page_high())
2902 
2903 
2904 // -----------------------------------------------------------------------------
2905 // Old space for all map objects
2906 
2907 class MapSpace : public PagedSpace {
2908  public:
2909   // Creates a map space object.
MapSpace(Heap * heap,AllocationSpace id)2910   MapSpace(Heap* heap, AllocationSpace id)
2911       : PagedSpace(heap, id, NOT_EXECUTABLE) {}
2912 
RoundSizeDownToObjectAlignment(int size)2913   int RoundSizeDownToObjectAlignment(int size) override {
2914     if (base::bits::IsPowerOfTwo(Map::kSize)) {
2915       return RoundDown(size, Map::kSize);
2916     } else {
2917       return (size / Map::kSize) * Map::kSize;
2918     }
2919   }
2920 
2921 #ifdef VERIFY_HEAP
2922   void VerifyObject(HeapObject* obj) override;
2923 #endif
2924 };
2925 
2926 // -----------------------------------------------------------------------------
2927 // Read Only space for all Immortal Immovable and Immutable objects
2928 
2929 class ReadOnlySpace : public PagedSpace {
2930  public:
2931   class WritableScope {
2932    public:
WritableScope(ReadOnlySpace * space)2933     explicit WritableScope(ReadOnlySpace* space) : space_(space) {
2934       space_->MarkAsReadWrite();
2935     }
2936 
~WritableScope()2937     ~WritableScope() { space_->MarkAsReadOnly(); }
2938 
2939    private:
2940     ReadOnlySpace* space_;
2941   };
2942 
2943   ReadOnlySpace(Heap* heap, AllocationSpace id, Executability executable);
2944 
2945   void ClearStringPaddingIfNeeded();
2946   void MarkAsReadOnly();
2947 
2948  private:
2949   void MarkAsReadWrite();
2950   void SetPermissionsForPages(PageAllocator::Permission access);
2951 
2952   bool is_marked_read_only_ = false;
2953   //
2954   // String padding must be cleared just before serialization and therefore the
2955   // string padding in the space will already have been cleared if the space was
2956   // deserialized.
2957   bool is_string_padding_cleared_;
2958 };
2959 
2960 // -----------------------------------------------------------------------------
2961 // Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
2962 // managed by the large object space. A large object is allocated from OS
2963 // heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2964 // A large object always starts at Page::kObjectStartOffset to a page.
2965 // Large objects do not move during garbage collections.
2966 
2967 class LargeObjectSpace : public Space {
2968  public:
2969   typedef LargePageIterator iterator;
2970 
2971   LargeObjectSpace(Heap* heap, AllocationSpace id);
2972   virtual ~LargeObjectSpace();
2973 
2974   // Initializes internal data structures.
2975   bool SetUp();
2976 
2977   // Releases internal resources, frees objects in this space.
2978   void TearDown();
2979 
ObjectSizeFor(size_t chunk_size)2980   static size_t ObjectSizeFor(size_t chunk_size) {
2981     if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2982     return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2983   }
2984 
2985   // Shared implementation of AllocateRaw, AllocateRawCode and
2986   // AllocateRawFixedArray.
2987   V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
2988                                                      Executability executable);
2989 
2990   // Available bytes for objects in this space.
2991   inline size_t Available() override;
2992 
Size()2993   size_t Size() override { return size_; }
SizeOfObjects()2994   size_t SizeOfObjects() override { return objects_size_; }
2995 
2996   // Approximate amount of physical memory committed for this space.
2997   size_t CommittedPhysicalMemory() override;
2998 
PageCount()2999   int PageCount() { return page_count_; }
3000 
3001   // Finds an object for a given address, returns a Smi if it is not found.
3002   // The function iterates through all objects in this space, may be slow.
3003   Object* FindObject(Address a);
3004 
3005   // Takes the chunk_map_mutex_ and calls FindPage after that.
3006   LargePage* FindPageThreadSafe(Address a);
3007 
3008   // Finds a large object page containing the given address, returns nullptr
3009   // if such a page doesn't exist.
3010   LargePage* FindPage(Address a);
3011 
3012   // Clears the marking state of live objects.
3013   void ClearMarkingStateOfLiveObjects();
3014 
3015   // Frees unmarked objects.
3016   void FreeUnmarkedObjects();
3017 
3018   void InsertChunkMapEntries(LargePage* page);
3019   void RemoveChunkMapEntries(LargePage* page);
3020   void RemoveChunkMapEntries(LargePage* page, Address free_start);
3021 
3022   // Checks whether a heap object is in this space; O(1).
3023   bool Contains(HeapObject* obj);
3024   // Checks whether an address is in the object area in this space. Iterates
3025   // all objects in the space. May be slow.
ContainsSlow(Address addr)3026   bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
3027 
3028   // Checks whether the space is empty.
IsEmpty()3029   bool IsEmpty() { return first_page_ == nullptr; }
3030 
first_page()3031   LargePage* first_page() { return first_page_; }
3032 
3033   // Collect code statistics.
3034   void CollectCodeStatistics();
3035 
begin()3036   iterator begin() { return iterator(first_page_); }
end()3037   iterator end() { return iterator(nullptr); }
3038 
3039   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
3040 
chunk_map_mutex()3041   base::Mutex* chunk_map_mutex() { return &chunk_map_mutex_; }
3042 
3043 #ifdef VERIFY_HEAP
3044   virtual void Verify();
3045 #endif
3046 
3047 #ifdef DEBUG
3048   void Print() override;
3049 #endif
3050 
3051  private:
3052   // The head of the linked list of large object chunks.
3053   LargePage* first_page_;
3054   size_t size_;          // allocated bytes
3055   int page_count_;       // number of chunks
3056   size_t objects_size_;  // size of objects
3057 
3058   // The chunk_map_mutex_ has to be used when the chunk map is accessed
3059   // concurrently.
3060   base::Mutex chunk_map_mutex_;
3061 
3062   // Page-aligned addresses to their corresponding LargePage.
3063   std::unordered_map<Address, LargePage*> chunk_map_;
3064 
3065   friend class LargeObjectIterator;
3066 };
3067 
3068 
3069 class LargeObjectIterator : public ObjectIterator {
3070  public:
3071   explicit LargeObjectIterator(LargeObjectSpace* space);
3072 
3073   HeapObject* Next() override;
3074 
3075  private:
3076   LargePage* current_;
3077 };
3078 
3079 // Iterates over the chunks (pages and large object pages) that can contain
3080 // pointers to new space or to evacuation candidates.
3081 class MemoryChunkIterator BASE_EMBEDDED {
3082  public:
3083   inline explicit MemoryChunkIterator(Heap* heap);
3084 
3085   // Return nullptr when the iterator is done.
3086   inline MemoryChunk* next();
3087 
3088  private:
3089   enum State {
3090     kOldSpaceState,
3091     kMapState,
3092     kCodeState,
3093     kLargeObjectState,
3094     kFinishedState
3095   };
3096   Heap* heap_;
3097   State state_;
3098   PageIterator old_iterator_;
3099   PageIterator code_iterator_;
3100   PageIterator map_iterator_;
3101   LargePageIterator lo_iterator_;
3102 };
3103 
3104 }  // namespace internal
3105 }  // namespace v8
3106 
3107 #endif  // V8_HEAP_SPACES_H_
3108