1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_MARK_COMPACT_H_
6 #define V8_HEAP_MARK_COMPACT_H_
7 
8 #include <atomic>
9 #include <vector>
10 
11 #include "src/heap/concurrent-marking.h"
12 #include "src/heap/marking-visitor.h"
13 #include "src/heap/marking-worklist.h"
14 #include "src/heap/marking.h"
15 #include "src/heap/memory-measurement.h"
16 #include "src/heap/spaces.h"
17 #include "src/heap/sweeper.h"
18 
19 namespace v8 {
20 namespace internal {
21 
22 // Forward declarations.
23 class EvacuationJobTraits;
24 class HeapObjectVisitor;
25 class ItemParallelJob;
26 class MigrationObserver;
27 class RecordMigratedSlotVisitor;
28 class UpdatingItem;
29 class YoungGenerationMarkingVisitor;
30 
31 class MarkBitCellIterator {
32  public:
MarkBitCellIterator(MemoryChunk * chunk,Bitmap * bitmap)33   MarkBitCellIterator(MemoryChunk* chunk, Bitmap* bitmap) : chunk_(chunk) {
34     last_cell_index_ =
35         Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(chunk_->area_end()));
36     cell_base_ = chunk_->address();
37     cell_index_ =
38         Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(cell_base_));
39     cells_ = bitmap->cells();
40   }
41 
Done()42   inline bool Done() { return cell_index_ >= last_cell_index_; }
43 
HasNext()44   inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
45 
CurrentCell()46   inline MarkBit::CellType* CurrentCell() {
47     DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
48                                chunk_->AddressToMarkbitIndex(cell_base_))));
49     return &cells_[cell_index_];
50   }
51 
CurrentCellBase()52   inline Address CurrentCellBase() {
53     DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
54                                chunk_->AddressToMarkbitIndex(cell_base_))));
55     return cell_base_;
56   }
57 
Advance()58   V8_WARN_UNUSED_RESULT inline bool Advance() {
59     cell_base_ += Bitmap::kBitsPerCell * kTaggedSize;
60     return ++cell_index_ != last_cell_index_;
61   }
62 
Advance(unsigned int new_cell_index)63   inline bool Advance(unsigned int new_cell_index) {
64     if (new_cell_index != cell_index_) {
65       DCHECK_GT(new_cell_index, cell_index_);
66       DCHECK_LE(new_cell_index, last_cell_index_);
67       unsigned int diff = new_cell_index - cell_index_;
68       cell_index_ = new_cell_index;
69       cell_base_ += diff * (Bitmap::kBitsPerCell * kTaggedSize);
70       return true;
71     }
72     return false;
73   }
74 
75   // Return the next mark bit cell. If there is no next it returns 0;
PeekNext()76   inline MarkBit::CellType PeekNext() {
77     if (HasNext()) {
78       return cells_[cell_index_ + 1];
79     }
80     return 0;
81   }
82 
83  private:
84   MemoryChunk* chunk_;
85   MarkBit::CellType* cells_;
86   unsigned int last_cell_index_;
87   unsigned int cell_index_;
88   Address cell_base_;
89 };
90 
91 enum LiveObjectIterationMode { kBlackObjects, kGreyObjects, kAllLiveObjects };
92 
93 template <LiveObjectIterationMode mode>
94 class LiveObjectRange {
95  public:
96   class iterator {
97    public:
98     using value_type = std::pair<HeapObject, int /* size */>;
99     using pointer = const value_type*;
100     using reference = const value_type&;
101     using iterator_category = std::forward_iterator_tag;
102 
103     inline iterator(MemoryChunk* chunk, Bitmap* bitmap, Address start);
104 
105     inline iterator& operator++();
106     inline iterator operator++(int);
107 
108     bool operator==(iterator other) const {
109       return current_object_ == other.current_object_;
110     }
111 
112     bool operator!=(iterator other) const { return !(*this == other); }
113 
114     value_type operator*() {
115       return std::make_pair(current_object_, current_size_);
116     }
117 
118    private:
119     inline void AdvanceToNextValidObject();
120 
121     MemoryChunk* const chunk_;
122     Map const one_word_filler_map_;
123     Map const two_word_filler_map_;
124     Map const free_space_map_;
125     MarkBitCellIterator it_;
126     Address cell_base_;
127     MarkBit::CellType current_cell_;
128     HeapObject current_object_;
129     int current_size_;
130   };
131 
LiveObjectRange(MemoryChunk * chunk,Bitmap * bitmap)132   LiveObjectRange(MemoryChunk* chunk, Bitmap* bitmap)
133       : chunk_(chunk),
134         bitmap_(bitmap),
135         start_(chunk_->area_start()),
136         end_(chunk->area_end()) {
137     DCHECK(!chunk->IsLargePage());
138   }
139 
140   inline iterator begin();
141   inline iterator end();
142 
143  private:
144   MemoryChunk* const chunk_;
145   Bitmap* bitmap_;
146   Address start_;
147   Address end_;
148 };
149 
150 class LiveObjectVisitor : AllStatic {
151  public:
152   enum IterationMode {
153     kKeepMarking,
154     kClearMarkbits,
155   };
156 
157   // Visits black objects on a MemoryChunk until the Visitor returns |false| for
158   // an object. If IterationMode::kClearMarkbits is passed the markbits and
159   // slots for visited objects are cleared for each successfully visited object.
160   template <class Visitor, typename MarkingState>
161   static bool VisitBlackObjects(MemoryChunk* chunk, MarkingState* state,
162                                 Visitor* visitor, IterationMode iteration_mode,
163                                 HeapObject* failed_object);
164 
165   // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
166   // visitation for an object.
167   template <class Visitor, typename MarkingState>
168   static void VisitBlackObjectsNoFail(MemoryChunk* chunk, MarkingState* state,
169                                       Visitor* visitor,
170                                       IterationMode iteration_mode);
171 
172   // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
173   // visitation for an object.
174   template <class Visitor, typename MarkingState>
175   static void VisitGreyObjectsNoFail(MemoryChunk* chunk, MarkingState* state,
176                                      Visitor* visitor,
177                                      IterationMode iteration_mode);
178 
179   template <typename MarkingState>
180   static void RecomputeLiveBytes(MemoryChunk* chunk, MarkingState* state);
181 };
182 
183 enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
184 enum MarkingTreatmentMode { KEEP, CLEAR };
185 enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
186 
187 // Base class for minor and full MC collectors.
188 class MarkCompactCollectorBase {
189  public:
190   virtual ~MarkCompactCollectorBase() = default;
191 
192   virtual void SetUp() = 0;
193   virtual void TearDown() = 0;
194   virtual void CollectGarbage() = 0;
195 
heap()196   inline Heap* heap() const { return heap_; }
197   inline Isolate* isolate();
198 
199  protected:
MarkCompactCollectorBase(Heap * heap)200   explicit MarkCompactCollectorBase(Heap* heap)
201       : heap_(heap), old_to_new_slots_(0) {}
202 
203   // Marking operations for objects reachable from roots.
204   virtual void MarkLiveObjects() = 0;
205   // Mark objects reachable (transitively) from objects in the marking
206   // work list.
207   virtual void DrainMarkingWorklist() = 0;
208   // Clear non-live references held in side data structures.
209   virtual void ClearNonLiveReferences() = 0;
210   virtual void EvacuatePrologue() = 0;
211   virtual void EvacuateEpilogue() = 0;
212   virtual void Evacuate() = 0;
213   virtual void EvacuatePagesInParallel() = 0;
214   virtual void UpdatePointersAfterEvacuation() = 0;
215   virtual UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk,
216                                                   Address start,
217                                                   Address end) = 0;
218   virtual UpdatingItem* CreateRememberedSetUpdatingItem(
219       MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) = 0;
220 
221   template <class Evacuator, class Collector>
222   void CreateAndExecuteEvacuationTasks(Collector* collector,
223                                        ItemParallelJob* job,
224                                        MigrationObserver* migration_observer,
225                                        const intptr_t live_bytes);
226 
227   // Returns whether this page should be moved according to heuristics.
228   bool ShouldMovePage(Page* p, intptr_t live_bytes, bool promote_young);
229 
230   int CollectToSpaceUpdatingItems(ItemParallelJob* job);
231   template <typename IterateableSpace>
232   int CollectRememberedSetUpdatingItems(ItemParallelJob* job,
233                                         IterateableSpace* space,
234                                         RememberedSetUpdatingMode mode);
235 
236   int NumberOfParallelCompactionTasks(int pages);
237   int NumberOfParallelPointerUpdateTasks(int pages, int slots);
238   int NumberOfParallelToSpacePointerUpdateTasks(int pages);
239 
240   Heap* heap_;
241   // Number of old to new slots. Should be computed during MarkLiveObjects.
242   // -1 indicates that the value couldn't be computed.
243   int old_to_new_slots_;
244 };
245 
246 class MinorMarkingState final
247     : public MarkingStateBase<MinorMarkingState, AccessMode::ATOMIC> {
248  public:
bitmap(const MemoryChunk * chunk)249   ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
250     return chunk->young_generation_bitmap<AccessMode::ATOMIC>();
251   }
252 
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)253   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
254     chunk->young_generation_live_byte_count_ += by;
255   }
256 
live_bytes(MemoryChunk * chunk)257   intptr_t live_bytes(MemoryChunk* chunk) const {
258     return chunk->young_generation_live_byte_count_;
259   }
260 
SetLiveBytes(MemoryChunk * chunk,intptr_t value)261   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
262     chunk->young_generation_live_byte_count_ = value;
263   }
264 };
265 
266 class MinorNonAtomicMarkingState final
267     : public MarkingStateBase<MinorNonAtomicMarkingState,
268                               AccessMode::NON_ATOMIC> {
269  public:
bitmap(const MemoryChunk * chunk)270   ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
271       const MemoryChunk* chunk) const {
272     return chunk->young_generation_bitmap<AccessMode::NON_ATOMIC>();
273   }
274 
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)275   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
276     chunk->young_generation_live_byte_count_.fetch_add(
277         by, std::memory_order_relaxed);
278   }
279 
live_bytes(MemoryChunk * chunk)280   intptr_t live_bytes(MemoryChunk* chunk) const {
281     return chunk->young_generation_live_byte_count_.load(
282         std::memory_order_relaxed);
283   }
284 
SetLiveBytes(MemoryChunk * chunk,intptr_t value)285   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
286     chunk->young_generation_live_byte_count_.store(value,
287                                                    std::memory_order_relaxed);
288   }
289 };
290 
291 // This is used by marking visitors.
292 class MajorMarkingState final
293     : public MarkingStateBase<MajorMarkingState, AccessMode::ATOMIC> {
294  public:
bitmap(const MemoryChunk * chunk)295   ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
296     DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
297                   reinterpret_cast<intptr_t>(chunk),
298               MemoryChunk::kMarkBitmapOffset);
299     return chunk->marking_bitmap<AccessMode::ATOMIC>();
300   }
301 
302   // Concurrent marking uses local live bytes so we may do these accesses
303   // non-atomically.
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)304   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
305     chunk->live_byte_count_ += by;
306   }
307 
live_bytes(MemoryChunk * chunk)308   intptr_t live_bytes(MemoryChunk* chunk) const {
309     return chunk->live_byte_count_;
310   }
311 
SetLiveBytes(MemoryChunk * chunk,intptr_t value)312   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
313     chunk->live_byte_count_ = value;
314   }
315 };
316 
317 // This is used by Scavenger and Evacuator in TransferColor.
318 // Live byte increments have to be atomic.
319 class MajorAtomicMarkingState final
320     : public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
321  public:
bitmap(const MemoryChunk * chunk)322   ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
323     DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
324                   reinterpret_cast<intptr_t>(chunk),
325               MemoryChunk::kMarkBitmapOffset);
326     return chunk->marking_bitmap<AccessMode::ATOMIC>();
327   }
328 
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)329   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
330     std::atomic_fetch_add(
331         reinterpret_cast<std::atomic<intptr_t>*>(&chunk->live_byte_count_), by);
332   }
333 };
334 
335 class MajorNonAtomicMarkingState final
336     : public MarkingStateBase<MajorNonAtomicMarkingState,
337                               AccessMode::NON_ATOMIC> {
338  public:
bitmap(const MemoryChunk * chunk)339   ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
340       const MemoryChunk* chunk) const {
341     DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
342                   reinterpret_cast<intptr_t>(chunk),
343               MemoryChunk::kMarkBitmapOffset);
344     return chunk->marking_bitmap<AccessMode::NON_ATOMIC>();
345   }
346 
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)347   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
348     chunk->live_byte_count_ += by;
349   }
350 
live_bytes(MemoryChunk * chunk)351   intptr_t live_bytes(MemoryChunk* chunk) const {
352     return chunk->live_byte_count_;
353   }
354 
SetLiveBytes(MemoryChunk * chunk,intptr_t value)355   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
356     chunk->live_byte_count_ = value;
357   }
358 };
359 
360 // This visitor is used for marking on the main thread. It is cheaper than
361 // the concurrent marking visitor because it does not snapshot JSObjects.
362 template <typename MarkingState>
363 class MainMarkingVisitor final
364     : public MarkingVisitorBase<MainMarkingVisitor<MarkingState>,
365                                 MarkingState> {
366  public:
367   // This is used for revisiting objects that were black allocated.
368   class RevisitScope {
369    public:
RevisitScope(MainMarkingVisitor * visitor)370     explicit RevisitScope(MainMarkingVisitor* visitor) : visitor_(visitor) {
371       DCHECK(!visitor->revisiting_object_);
372       visitor->revisiting_object_ = true;
373     }
~RevisitScope()374     ~RevisitScope() {
375       DCHECK(visitor_->revisiting_object_);
376       visitor_->revisiting_object_ = false;
377     }
378 
379    private:
380     MainMarkingVisitor<MarkingState>* visitor_;
381   };
382 
MainMarkingVisitor(MarkingState * marking_state,MarkingWorklists * marking_worklists,WeakObjects * weak_objects,Heap * heap,unsigned mark_compact_epoch,BytecodeFlushMode bytecode_flush_mode,bool embedder_tracing_enabled,bool is_forced_gc)383   MainMarkingVisitor(MarkingState* marking_state,
384                      MarkingWorklists* marking_worklists,
385                      WeakObjects* weak_objects, Heap* heap,
386                      unsigned mark_compact_epoch,
387                      BytecodeFlushMode bytecode_flush_mode,
388                      bool embedder_tracing_enabled, bool is_forced_gc)
389       : MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>(
390             kMainThreadTask, marking_worklists, weak_objects, heap,
391             mark_compact_epoch, bytecode_flush_mode, embedder_tracing_enabled,
392             is_forced_gc),
393         marking_state_(marking_state),
394         revisiting_object_(false) {}
395 
396   // HeapVisitor override to allow revisiting of black objects.
ShouldVisit(HeapObject object)397   bool ShouldVisit(HeapObject object) {
398     return marking_state_->GreyToBlack(object) ||
399            V8_UNLIKELY(revisiting_object_);
400   }
401 
402   void MarkDescriptorArrayFromWriteBarrier(HeapObject host,
403                                            DescriptorArray descriptors,
404                                            int number_of_own_descriptors);
405 
406  private:
407   // Functions required by MarkingVisitorBase.
408 
409   template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
410   int VisitJSObjectSubclass(Map map, T object);
411 
412   template <typename T>
413   int VisitLeftTrimmableArray(Map map, T object);
414 
415   template <typename TSlot>
416   void RecordSlot(HeapObject object, TSlot slot, HeapObject target);
417 
418   void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target);
419 
SynchronizePageAccess(HeapObject heap_object)420   void SynchronizePageAccess(HeapObject heap_object) {
421     // Nothing to do on the main thread.
422   }
423 
marking_state()424   MarkingState* marking_state() { return marking_state_; }
425 
retaining_path_mode()426   TraceRetainingPathMode retaining_path_mode() {
427     return (V8_UNLIKELY(FLAG_track_retaining_path))
428                ? TraceRetainingPathMode::kEnabled
429                : TraceRetainingPathMode::kDisabled;
430   }
431 
432   MarkingState* const marking_state_;
433 
434   friend class MarkingVisitorBase<MainMarkingVisitor<MarkingState>,
435                                   MarkingState>;
436   bool revisiting_object_;
437 };
438 
439 // Collector for young and old generation.
440 class MarkCompactCollector final : public MarkCompactCollectorBase {
441  public:
442 #ifdef V8_CONCURRENT_MARKING
443   using MarkingState = MajorMarkingState;
444 #else
445   using MarkingState = MajorNonAtomicMarkingState;
446 #endif  // V8_CONCURRENT_MARKING
447   using AtomicMarkingState = MajorAtomicMarkingState;
448   using NonAtomicMarkingState = MajorNonAtomicMarkingState;
449 
450   using MarkingVisitor = MainMarkingVisitor<MarkingState>;
451 
452   class RootMarkingVisitor;
453   class CustomRootBodyMarkingVisitor;
454 
455   enum IterationMode {
456     kKeepMarking,
457     kClearMarkbits,
458   };
459 
460   enum class MarkingWorklistProcessingMode {
461     kDefault,
462     kTrackNewlyDiscoveredObjects
463   };
464 
marking_state()465   MarkingState* marking_state() { return &marking_state_; }
466 
non_atomic_marking_state()467   NonAtomicMarkingState* non_atomic_marking_state() {
468     return &non_atomic_marking_state_;
469   }
470 
471   void SetUp() override;
472   void TearDown() override;
473   // Performs a global garbage collection.
474   void CollectGarbage() override;
475 
476   void CollectEvacuationCandidates(PagedSpace* space);
477 
478   void AddEvacuationCandidate(Page* p);
479 
480   // Prepares for GC by resetting relocation info in old and map spaces and
481   // choosing spaces to compact.
482   void Prepare();
483 
484   // Stop concurrent marking (either by preempting it right away or waiting for
485   // it to complete as requested by |stop_request|).
486   void FinishConcurrentMarking(ConcurrentMarking::StopRequest stop_request);
487 
488   bool StartCompaction();
489 
490   void AbortCompaction();
491 
492   void StartMarking();
493 
IsOnEvacuationCandidate(Object obj)494   static inline bool IsOnEvacuationCandidate(Object obj) {
495     return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
496   }
497 
498   static bool IsOnEvacuationCandidate(MaybeObject obj);
499 
500   struct RecordRelocSlotInfo {
501     MemoryChunk* memory_chunk;
502     SlotType slot_type;
503     bool should_record;
504     uint32_t offset;
505   };
506   static RecordRelocSlotInfo PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
507                                                     HeapObject target);
508   static void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target);
509   V8_INLINE static void RecordSlot(HeapObject object, ObjectSlot slot,
510                                    HeapObject target);
511   V8_INLINE static void RecordSlot(HeapObject object, HeapObjectSlot slot,
512                                    HeapObject target);
513   V8_INLINE static void RecordSlot(MemoryChunk* source_page,
514                                    HeapObjectSlot slot, HeapObject target);
515   void RecordLiveSlotsOnPage(Page* page);
516 
517   void UpdateSlots(SlotsBuffer* buffer);
518   void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
519 
is_compacting()520   bool is_compacting() const { return compacting_; }
521 
522   // Ensures that sweeping is finished.
523   //
524   // Note: Can only be called safely from main thread.
525   V8_EXPORT_PRIVATE void EnsureSweepingCompleted();
526 
527   // Checks if sweeping is in progress right now on any space.
sweeping_in_progress()528   bool sweeping_in_progress() const { return sweeper_->sweeping_in_progress(); }
529 
set_evacuation(bool evacuation)530   void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
531 
evacuation()532   bool evacuation() const { return evacuation_; }
533 
marking_worklists_holder()534   MarkingWorklistsHolder* marking_worklists_holder() {
535     return &marking_worklists_holder_;
536   }
marking_worklists()537   MarkingWorklists* marking_worklists() { return marking_worklists_.get(); }
538 
weak_objects()539   WeakObjects* weak_objects() { return &weak_objects_; }
540 
541   inline void AddTransitionArray(TransitionArray array);
542 
AddNewlyDiscovered(HeapObject object)543   void AddNewlyDiscovered(HeapObject object) {
544     if (ephemeron_marking_.newly_discovered_overflowed) return;
545 
546     if (ephemeron_marking_.newly_discovered.size() <
547         ephemeron_marking_.newly_discovered_limit) {
548       ephemeron_marking_.newly_discovered.push_back(object);
549     } else {
550       ephemeron_marking_.newly_discovered_overflowed = true;
551     }
552   }
553 
ResetNewlyDiscovered()554   void ResetNewlyDiscovered() {
555     ephemeron_marking_.newly_discovered_overflowed = false;
556     ephemeron_marking_.newly_discovered.clear();
557   }
558 
sweeper()559   Sweeper* sweeper() { return sweeper_; }
560 
561 #ifdef DEBUG
562   // Checks whether performing mark-compact collection.
in_use()563   bool in_use() { return state_ > PREPARE_GC; }
are_map_pointers_encoded()564   bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
565 #endif
566 
567   void VerifyMarking();
568 #ifdef VERIFY_HEAP
569   void VerifyValidStoreAndSlotsBufferEntries();
570   void VerifyMarkbitsAreClean();
571   void VerifyMarkbitsAreDirty(ReadOnlySpace* space);
572   void VerifyMarkbitsAreClean(PagedSpace* space);
573   void VerifyMarkbitsAreClean(NewSpace* space);
574   void VerifyMarkbitsAreClean(LargeObjectSpace* space);
575 #endif
576 
epoch()577   unsigned epoch() const { return epoch_; }
578 
579   explicit MarkCompactCollector(Heap* heap);
580   ~MarkCompactCollector() override;
581 
582   // Used by wrapper tracing.
583   V8_INLINE void MarkExternallyReferencedObject(HeapObject obj);
584   // Used by incremental marking for object that change their layout.
585   void VisitObject(HeapObject obj);
586   // Used by incremental marking for black-allocated objects.
587   void RevisitObject(HeapObject obj);
588   // Ensures that all descriptors int range [0, number_of_own_descripts)
589   // are visited.
590   void MarkDescriptorArrayFromWriteBarrier(HeapObject host,
591                                            DescriptorArray array,
592                                            int number_of_own_descriptors);
593 
594   // Drains the main thread marking worklist until the specified number of
595   // bytes are processed. If the number of bytes is zero, then the worklist
596   // is drained until it is empty.
597   template <MarkingWorklistProcessingMode mode =
598                 MarkingWorklistProcessingMode::kDefault>
599   size_t ProcessMarkingWorklist(size_t bytes_to_process);
600 
601  private:
602   void ComputeEvacuationHeuristics(size_t area_size,
603                                    int* target_fragmentation_percent,
604                                    size_t* max_evacuated_bytes);
605 
606   void RecordObjectStats();
607 
608   // Finishes GC, performs heap verification if enabled.
609   void Finish();
610 
611   // Free unmarked ArrayBufferExtensions.
612   void SweepArrayBufferExtensions();
613 
614   void MarkLiveObjects() override;
615 
616   // Marks the object black and adds it to the marking work list.
617   // This is for non-incremental marking only.
618   V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
619 
620   // Marks the object black and adds it to the marking work list.
621   // This is for non-incremental marking only.
622   V8_INLINE void MarkRootObject(Root root, HeapObject obj);
623 
624   // Mark the heap roots and all objects reachable from them.
625   void MarkRoots(RootVisitor* root_visitor,
626                  ObjectVisitor* custom_root_body_visitor);
627 
628   // Mark the string table specially.  References to internalized strings from
629   // the string table are weak.
630   void MarkStringTable(ObjectVisitor* visitor);
631 
632   // Marks object reachable from harmony weak maps and wrapper tracing.
633   void ProcessEphemeronMarking();
634 
635   // If the call-site of the top optimized code was not prepared for
636   // deoptimization, then treat embedded pointers in the code as strong as
637   // otherwise they can die and try to deoptimize the underlying code.
638   void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
639 
640   // Drains the main thread marking work list. Will mark all pending objects
641   // if no concurrent threads are running.
642   void DrainMarkingWorklist() override;
643 
644   // Implements ephemeron semantics: Marks value if key is already reachable.
645   // Returns true if value was actually marked.
646   bool ProcessEphemeron(HeapObject key, HeapObject value);
647 
648   // Marks ephemerons and drains marking worklist iteratively
649   // until a fixpoint is reached.
650   void ProcessEphemeronsUntilFixpoint();
651 
652   // Drains ephemeron and marking worklists. Single iteration of the
653   // fixpoint iteration.
654   bool ProcessEphemerons();
655 
656   // Mark ephemerons and drain marking worklist with a linear algorithm.
657   // Only used if fixpoint iteration doesn't finish within a few iterations.
658   void ProcessEphemeronsLinear();
659 
660   // Perform Wrapper Tracing if in use.
661   void PerformWrapperTracing();
662 
663   // Callback function for telling whether the object *p is an unmarked
664   // heap object.
665   static bool IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p);
666 
667   // Clear non-live references in weak cells, transition and descriptor arrays,
668   // and deoptimize dependent code of non-live maps.
669   void ClearNonLiveReferences() override;
670   void MarkDependentCodeForDeoptimization();
671   // Checks if the given weak cell is a simple transition from the parent map
672   // of the given dead target. If so it clears the transition and trims
673   // the descriptor array of the parent if needed.
674   void ClearPotentialSimpleMapTransition(Map dead_target);
675   void ClearPotentialSimpleMapTransition(Map map, Map dead_target);
676 
677   // Flushes a weakly held bytecode array from a shared function info.
678   void FlushBytecodeFromSFI(SharedFunctionInfo shared_info);
679 
680   // Clears bytecode arrays that have not been executed for multiple
681   // collections.
682   void ClearOldBytecodeCandidates();
683 
684   // Resets any JSFunctions which have had their bytecode flushed.
685   void ClearFlushedJsFunctions();
686 
687   // Compact every array in the global list of transition arrays and
688   // trim the corresponding descriptor array if a transition target is non-live.
689   void ClearFullMapTransitions();
690   void TrimDescriptorArray(Map map, DescriptorArray descriptors);
691   void TrimEnumCache(Map map, DescriptorArray descriptors);
692   bool CompactTransitionArray(Map map, TransitionArray transitions,
693                               DescriptorArray descriptors);
694 
695   // After all reachable objects have been marked those weak map entries
696   // with an unreachable key are removed from all encountered weak maps.
697   // The linked list of all encountered weak maps is destroyed.
698   void ClearWeakCollections();
699 
700   // Goes through the list of encountered weak references and clears those with
701   // dead values. If the value is a dead map and the parent map transitions to
702   // the dead map via weak cell, then this function also clears the map
703   // transition.
704   void ClearWeakReferences();
705 
706   // Goes through the list of encountered JSWeakRefs and WeakCells and clears
707   // those with dead values.
708   void ClearJSWeakRefs();
709 
710   void AbortWeakObjects();
711 
712   // Starts sweeping of spaces by contributing on the main thread and setting
713   // up other pages for sweeping. Does not start sweeper tasks.
714   void StartSweepSpaces();
715   void StartSweepSpace(PagedSpace* space);
716 
717   void EvacuatePrologue() override;
718   void EvacuateEpilogue() override;
719   void Evacuate() override;
720   void EvacuatePagesInParallel() override;
721   void UpdatePointersAfterEvacuation() override;
722 
723   UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
724                                           Address end) override;
725   UpdatingItem* CreateRememberedSetUpdatingItem(
726       MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
727 
728   int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
729   int CollectOldSpaceArrayBufferTrackerItems(ItemParallelJob* job);
730 
731   void ReleaseEvacuationCandidates();
732   void PostProcessEvacuationCandidates();
733   void ReportAbortedEvacuationCandidate(HeapObject failed_object,
734                                         MemoryChunk* chunk);
735 
736   static const int kEphemeronChunkSize = 8 * KB;
737 
738   int NumberOfParallelEphemeronVisitingTasks(size_t elements);
739 
740   void RightTrimDescriptorArray(DescriptorArray array, int descriptors_to_trim);
741 
742   base::Mutex mutex_;
743   base::Semaphore page_parallel_job_semaphore_;
744 
745 #ifdef DEBUG
746   enum CollectorState{IDLE,
747                       PREPARE_GC,
748                       MARK_LIVE_OBJECTS,
749                       SWEEP_SPACES,
750                       ENCODE_FORWARDING_ADDRESSES,
751                       UPDATE_POINTERS,
752                       RELOCATE_OBJECTS};
753 
754   // The current stage of the collector.
755   CollectorState state_;
756 #endif
757 
758   bool was_marked_incrementally_;
759 
760   bool evacuation_;
761 
762   // True if we are collecting slots to perform evacuation from evacuation
763   // candidates.
764   bool compacting_;
765 
766   bool black_allocation_;
767 
768   bool have_code_to_deoptimize_;
769 
770   MarkingWorklistsHolder marking_worklists_holder_;
771 
772   WeakObjects weak_objects_;
773   EphemeronMarking ephemeron_marking_;
774 
775   std::unique_ptr<MarkingVisitor> marking_visitor_;
776   std::unique_ptr<MarkingWorklists> marking_worklists_;
777   NativeContextInferrer native_context_inferrer_;
778   NativeContextStats native_context_stats_;
779 
780   // Candidates for pages that should be evacuated.
781   std::vector<Page*> evacuation_candidates_;
782   // Pages that are actually processed during evacuation.
783   std::vector<Page*> old_space_evacuation_pages_;
784   std::vector<Page*> new_space_evacuation_pages_;
785   std::vector<std::pair<HeapObject, Page*>> aborted_evacuation_candidates_;
786 
787   Sweeper* sweeper_;
788 
789   MarkingState marking_state_;
790   NonAtomicMarkingState non_atomic_marking_state_;
791 
792   // Counts the number of major mark-compact collections. The counter is
793   // incremented right after marking. This is used for:
794   // - marking descriptor arrays. See NumberOfMarkedDescriptors. Only the lower
795   //   two bits are used, so it is okay if this counter overflows and wraps
796   //   around.
797   unsigned epoch_ = 0;
798 
799   friend class FullEvacuator;
800   friend class RecordMigratedSlotVisitor;
801 };
802 
803 class EvacuationScope {
804  public:
EvacuationScope(MarkCompactCollector * collector)805   explicit EvacuationScope(MarkCompactCollector* collector)
806       : collector_(collector) {
807     collector_->set_evacuation(true);
808   }
809 
~EvacuationScope()810   ~EvacuationScope() { collector_->set_evacuation(false); }
811 
812  private:
813   MarkCompactCollector* collector_;
814 };
815 
816 #ifdef ENABLE_MINOR_MC
817 
818 // Collector for young-generation only.
819 class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
820  public:
821   using MarkingState = MinorMarkingState;
822   using NonAtomicMarkingState = MinorNonAtomicMarkingState;
823 
824   explicit MinorMarkCompactCollector(Heap* heap);
825   ~MinorMarkCompactCollector() override;
826 
marking_state()827   MarkingState* marking_state() { return &marking_state_; }
828 
non_atomic_marking_state()829   NonAtomicMarkingState* non_atomic_marking_state() {
830     return &non_atomic_marking_state_;
831   }
832 
833   void SetUp() override;
834   void TearDown() override;
835   void CollectGarbage() override;
836 
837   void MakeIterable(Page* page, MarkingTreatmentMode marking_mode,
838                     FreeSpaceTreatmentMode free_space_mode);
839   void CleanupSweepToIteratePages();
840 
841  private:
842   using MarkingWorklist = Worklist<HeapObject, 64 /* segment size */>;
843   class RootMarkingVisitor;
844 
845   static const int kNumMarkers = 8;
846   static const int kMainMarker = 0;
847 
worklist()848   inline MarkingWorklist* worklist() { return worklist_; }
849 
main_marking_visitor()850   inline YoungGenerationMarkingVisitor* main_marking_visitor() {
851     return main_marking_visitor_;
852   }
853 
854   void MarkLiveObjects() override;
855   void MarkRootSetInParallel(RootMarkingVisitor* root_visitor);
856   V8_INLINE void MarkRootObject(HeapObject obj);
857   void DrainMarkingWorklist() override;
858   void ClearNonLiveReferences() override;
859 
860   void EvacuatePrologue() override;
861   void EvacuateEpilogue() override;
862   void Evacuate() override;
863   void EvacuatePagesInParallel() override;
864   void UpdatePointersAfterEvacuation() override;
865 
866   UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
867                                           Address end) override;
868   UpdatingItem* CreateRememberedSetUpdatingItem(
869       MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
870 
871   int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
872 
873   int NumberOfParallelMarkingTasks(int pages);
874 
875   void SweepArrayBufferExtensions();
876 
877   MarkingWorklist* worklist_;
878 
879   YoungGenerationMarkingVisitor* main_marking_visitor_;
880   base::Semaphore page_parallel_job_semaphore_;
881   std::vector<Page*> new_space_evacuation_pages_;
882   std::vector<Page*> sweep_to_iterate_pages_;
883 
884   MarkingState marking_state_;
885   NonAtomicMarkingState non_atomic_marking_state_;
886 
887   friend class YoungGenerationMarkingTask;
888   friend class YoungGenerationMarkingVisitor;
889 };
890 
891 #endif  // ENABLE_MINOR_MC
892 
893 }  // namespace internal
894 }  // namespace v8
895 
896 #endif  // V8_HEAP_MARK_COMPACT_H_
897