1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_MARK_COMPACT_H_
6 #define V8_HEAP_MARK_COMPACT_H_
7 
8 #include <vector>
9 
10 #include "src/heap/concurrent-marking.h"
11 #include "src/heap/marking.h"
12 #include "src/heap/objects-visiting.h"
13 #include "src/heap/spaces.h"
14 #include "src/heap/sweeper.h"
15 #include "src/heap/worklist.h"
16 
17 namespace v8 {
18 namespace internal {
19 
20 // Forward declarations.
21 class EvacuationJobTraits;
22 class HeapObjectVisitor;
23 class ItemParallelJob;
24 class MigrationObserver;
25 class RecordMigratedSlotVisitor;
26 class UpdatingItem;
27 class YoungGenerationMarkingVisitor;
28 
29 template <typename ConcreteState, AccessMode access_mode>
30 class MarkingStateBase {
31  public:
MarkBitFrom(HeapObject * obj)32   V8_INLINE MarkBit MarkBitFrom(HeapObject* obj) {
33     return MarkBitFrom(MemoryChunk::FromAddress(obj->address()),
34                        obj->address());
35   }
36 
MarkBitFrom(MemoryChunk * p,Address addr)37   V8_INLINE MarkBit MarkBitFrom(MemoryChunk* p, Address addr) {
38     return static_cast<ConcreteState*>(this)->bitmap(p)->MarkBitFromIndex(
39         p->AddressToMarkbitIndex(addr));
40   }
41 
Color(HeapObject * obj)42   Marking::ObjectColor Color(HeapObject* obj) {
43     return Marking::Color(MarkBitFrom(obj));
44   }
45 
IsImpossible(HeapObject * obj)46   V8_INLINE bool IsImpossible(HeapObject* obj) {
47     return Marking::IsImpossible<access_mode>(MarkBitFrom(obj));
48   }
49 
IsBlack(HeapObject * obj)50   V8_INLINE bool IsBlack(HeapObject* obj) {
51     return Marking::IsBlack<access_mode>(MarkBitFrom(obj));
52   }
53 
IsWhite(HeapObject * obj)54   V8_INLINE bool IsWhite(HeapObject* obj) {
55     return Marking::IsWhite<access_mode>(MarkBitFrom(obj));
56   }
57 
IsGrey(HeapObject * obj)58   V8_INLINE bool IsGrey(HeapObject* obj) {
59     return Marking::IsGrey<access_mode>(MarkBitFrom(obj));
60   }
61 
IsBlackOrGrey(HeapObject * obj)62   V8_INLINE bool IsBlackOrGrey(HeapObject* obj) {
63     return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj));
64   }
65 
WhiteToGrey(HeapObject * obj)66   V8_INLINE bool WhiteToGrey(HeapObject* obj) {
67     return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj));
68   }
69 
WhiteToBlack(HeapObject * obj)70   V8_INLINE bool WhiteToBlack(HeapObject* obj) {
71     return WhiteToGrey(obj) && GreyToBlack(obj);
72   }
73 
GreyToBlack(HeapObject * obj)74   V8_INLINE bool GreyToBlack(HeapObject* obj) {
75     MemoryChunk* p = MemoryChunk::FromAddress(obj->address());
76     MarkBit markbit = MarkBitFrom(p, obj->address());
77     if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
78     static_cast<ConcreteState*>(this)->IncrementLiveBytes(p, obj->Size());
79     return true;
80   }
81 
ClearLiveness(MemoryChunk * chunk)82   void ClearLiveness(MemoryChunk* chunk) {
83     static_cast<ConcreteState*>(this)->bitmap(chunk)->Clear();
84     static_cast<ConcreteState*>(this)->SetLiveBytes(chunk, 0);
85   }
86 };
87 
88 class MarkBitCellIterator {
89  public:
MarkBitCellIterator(MemoryChunk * chunk,Bitmap * bitmap)90   MarkBitCellIterator(MemoryChunk* chunk, Bitmap* bitmap) : chunk_(chunk) {
91     DCHECK(Bitmap::IsCellAligned(
92         chunk_->AddressToMarkbitIndex(chunk_->area_start())));
93     DCHECK(Bitmap::IsCellAligned(
94         chunk_->AddressToMarkbitIndex(chunk_->area_end())));
95     last_cell_index_ =
96         Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(chunk_->area_end()));
97     cell_base_ = chunk_->area_start();
98     cell_index_ =
99         Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(cell_base_));
100     cells_ = bitmap->cells();
101   }
102 
Done()103   inline bool Done() { return cell_index_ >= last_cell_index_; }
104 
HasNext()105   inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
106 
CurrentCell()107   inline MarkBit::CellType* CurrentCell() {
108     DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
109                                chunk_->AddressToMarkbitIndex(cell_base_))));
110     return &cells_[cell_index_];
111   }
112 
CurrentCellBase()113   inline Address CurrentCellBase() {
114     DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
115                                chunk_->AddressToMarkbitIndex(cell_base_))));
116     return cell_base_;
117   }
118 
Advance()119   V8_WARN_UNUSED_RESULT inline bool Advance() {
120     cell_base_ += Bitmap::kBitsPerCell * kPointerSize;
121     return ++cell_index_ != last_cell_index_;
122   }
123 
Advance(unsigned int new_cell_index)124   inline bool Advance(unsigned int new_cell_index) {
125     if (new_cell_index != cell_index_) {
126       DCHECK_GT(new_cell_index, cell_index_);
127       DCHECK_LE(new_cell_index, last_cell_index_);
128       unsigned int diff = new_cell_index - cell_index_;
129       cell_index_ = new_cell_index;
130       cell_base_ += diff * (Bitmap::kBitsPerCell * kPointerSize);
131       return true;
132     }
133     return false;
134   }
135 
136   // Return the next mark bit cell. If there is no next it returns 0;
PeekNext()137   inline MarkBit::CellType PeekNext() {
138     if (HasNext()) {
139       return cells_[cell_index_ + 1];
140     }
141     return 0;
142   }
143 
144  private:
145   MemoryChunk* chunk_;
146   MarkBit::CellType* cells_;
147   unsigned int last_cell_index_;
148   unsigned int cell_index_;
149   Address cell_base_;
150 };
151 
152 enum LiveObjectIterationMode {
153   kBlackObjects,
154   kGreyObjects,
155   kAllLiveObjects
156 };
157 
158 template <LiveObjectIterationMode mode>
159 class LiveObjectRange {
160  public:
161   class iterator {
162    public:
163     using value_type = std::pair<HeapObject*, int /* size */>;
164     using pointer = const value_type*;
165     using reference = const value_type&;
166     using iterator_category = std::forward_iterator_tag;
167 
168     inline iterator(MemoryChunk* chunk, Bitmap* bitmap, Address start);
169 
170     inline iterator& operator++();
171     inline iterator operator++(int);
172 
173     bool operator==(iterator other) const {
174       return current_object_ == other.current_object_;
175     }
176 
177     bool operator!=(iterator other) const { return !(*this == other); }
178 
179     value_type operator*() {
180       return std::make_pair(current_object_, current_size_);
181     }
182 
183    private:
184     inline void AdvanceToNextValidObject();
185 
186     MemoryChunk* const chunk_;
187     Map* const one_word_filler_map_;
188     Map* const two_word_filler_map_;
189     Map* const free_space_map_;
190     MarkBitCellIterator it_;
191     Address cell_base_;
192     MarkBit::CellType current_cell_;
193     HeapObject* current_object_;
194     int current_size_;
195   };
196 
LiveObjectRange(MemoryChunk * chunk,Bitmap * bitmap)197   LiveObjectRange(MemoryChunk* chunk, Bitmap* bitmap)
198       : chunk_(chunk),
199         bitmap_(bitmap),
200         start_(chunk_->area_start()),
201         end_(chunk->area_end()) {}
202 
203   inline iterator begin();
204   inline iterator end();
205 
206  private:
207   MemoryChunk* const chunk_;
208   Bitmap* bitmap_;
209   Address start_;
210   Address end_;
211 };
212 
213 class LiveObjectVisitor : AllStatic {
214  public:
215   enum IterationMode {
216     kKeepMarking,
217     kClearMarkbits,
218   };
219 
220   // Visits black objects on a MemoryChunk until the Visitor returns |false| for
221   // an object. If IterationMode::kClearMarkbits is passed the markbits and
222   // slots for visited objects are cleared for each successfully visited object.
223   template <class Visitor, typename MarkingState>
224   static bool VisitBlackObjects(MemoryChunk* chunk, MarkingState* state,
225                                 Visitor* visitor, IterationMode iteration_mode,
226                                 HeapObject** failed_object);
227 
228   // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
229   // visitation for an object.
230   template <class Visitor, typename MarkingState>
231   static void VisitBlackObjectsNoFail(MemoryChunk* chunk, MarkingState* state,
232                                       Visitor* visitor,
233                                       IterationMode iteration_mode);
234 
235   // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
236   // visitation for an object.
237   template <class Visitor, typename MarkingState>
238   static void VisitGreyObjectsNoFail(MemoryChunk* chunk, MarkingState* state,
239                                      Visitor* visitor,
240                                      IterationMode iteration_mode);
241 
242   template <typename MarkingState>
243   static void RecomputeLiveBytes(MemoryChunk* chunk, MarkingState* state);
244 };
245 
246 enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
247 enum MarkingTreatmentMode { KEEP, CLEAR };
248 enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
249 
250 // Base class for minor and full MC collectors.
251 class MarkCompactCollectorBase {
252  public:
~MarkCompactCollectorBase()253   virtual ~MarkCompactCollectorBase() {}
254 
255   virtual void SetUp() = 0;
256   virtual void TearDown() = 0;
257   virtual void CollectGarbage() = 0;
258 
heap()259   inline Heap* heap() const { return heap_; }
isolate()260   inline Isolate* isolate() { return heap()->isolate(); }
261 
262  protected:
263   static const int kMainThread = 0;
MarkCompactCollectorBase(Heap * heap)264   explicit MarkCompactCollectorBase(Heap* heap)
265       : heap_(heap), old_to_new_slots_(0) {}
266 
267   // Marking operations for objects reachable from roots.
268   virtual void MarkLiveObjects() = 0;
269   // Mark objects reachable (transitively) from objects in the marking
270   // work list.
271   virtual void ProcessMarkingWorklist() = 0;
272   // Clear non-live references held in side data structures.
273   virtual void ClearNonLiveReferences() = 0;
274   virtual void EvacuatePrologue() = 0;
275   virtual void EvacuateEpilogue() = 0;
276   virtual void Evacuate() = 0;
277   virtual void EvacuatePagesInParallel() = 0;
278   virtual void UpdatePointersAfterEvacuation() = 0;
279   virtual UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk,
280                                                   Address start,
281                                                   Address end) = 0;
282   virtual UpdatingItem* CreateRememberedSetUpdatingItem(
283       MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) = 0;
284 
285   template <class Evacuator, class Collector>
286   void CreateAndExecuteEvacuationTasks(
287       Collector* collector, ItemParallelJob* job,
288       RecordMigratedSlotVisitor* record_visitor,
289       MigrationObserver* migration_observer, const intptr_t live_bytes);
290 
291   // Returns whether this page should be moved according to heuristics.
292   bool ShouldMovePage(Page* p, intptr_t live_bytes);
293 
294   int CollectToSpaceUpdatingItems(ItemParallelJob* job);
295   template <typename IterateableSpace>
296   int CollectRememberedSetUpdatingItems(ItemParallelJob* job,
297                                         IterateableSpace* space,
298                                         RememberedSetUpdatingMode mode);
299 
300   int NumberOfParallelCompactionTasks(int pages);
301   int NumberOfParallelPointerUpdateTasks(int pages, int slots);
302   int NumberOfParallelToSpacePointerUpdateTasks(int pages);
303 
304   Heap* heap_;
305   // Number of old to new slots. Should be computed during MarkLiveObjects.
306   // -1 indicates that the value couldn't be computed.
307   int old_to_new_slots_;
308 };
309 
310 class MinorMarkingState final
311     : public MarkingStateBase<MinorMarkingState, AccessMode::ATOMIC> {
312  public:
bitmap(const MemoryChunk * chunk)313   Bitmap* bitmap(const MemoryChunk* chunk) const {
314     return chunk->young_generation_bitmap_;
315   }
316 
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)317   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
318     reinterpret_cast<base::AtomicNumber<intptr_t>*>(
319         &chunk->young_generation_live_byte_count_)
320         ->Increment(by);
321   }
322 
live_bytes(MemoryChunk * chunk)323   intptr_t live_bytes(MemoryChunk* chunk) const {
324     return reinterpret_cast<base::AtomicNumber<intptr_t>*>(
325                &chunk->young_generation_live_byte_count_)
326         ->Value();
327   }
328 
SetLiveBytes(MemoryChunk * chunk,intptr_t value)329   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
330     reinterpret_cast<base::AtomicNumber<intptr_t>*>(
331         &chunk->young_generation_live_byte_count_)
332         ->SetValue(value);
333   }
334 };
335 
336 class MinorNonAtomicMarkingState final
337     : public MarkingStateBase<MinorNonAtomicMarkingState,
338                               AccessMode::NON_ATOMIC> {
339  public:
bitmap(const MemoryChunk * chunk)340   Bitmap* bitmap(const MemoryChunk* chunk) const {
341     return chunk->young_generation_bitmap_;
342   }
343 
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)344   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
345     chunk->young_generation_live_byte_count_ += by;
346   }
347 
live_bytes(MemoryChunk * chunk)348   intptr_t live_bytes(MemoryChunk* chunk) const {
349     return chunk->young_generation_live_byte_count_;
350   }
351 
SetLiveBytes(MemoryChunk * chunk,intptr_t value)352   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
353     chunk->young_generation_live_byte_count_ = value;
354   }
355 };
356 
357 // This marking state is used when concurrent marking is running.
358 class IncrementalMarkingState final
359     : public MarkingStateBase<IncrementalMarkingState, AccessMode::ATOMIC> {
360  public:
bitmap(const MemoryChunk * chunk)361   Bitmap* bitmap(const MemoryChunk* chunk) const {
362     return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
363   }
364 
365   // Concurrent marking uses local live bytes.
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)366   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
367     chunk->live_byte_count_ += by;
368   }
369 
live_bytes(MemoryChunk * chunk)370   intptr_t live_bytes(MemoryChunk* chunk) const {
371     return chunk->live_byte_count_;
372   }
373 
SetLiveBytes(MemoryChunk * chunk,intptr_t value)374   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
375     chunk->live_byte_count_ = value;
376   }
377 };
378 
379 class MajorAtomicMarkingState final
380     : public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
381  public:
bitmap(const MemoryChunk * chunk)382   Bitmap* bitmap(const MemoryChunk* chunk) const {
383     return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
384   }
385 
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)386   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
387     reinterpret_cast<base::AtomicNumber<intptr_t>*>(&chunk->live_byte_count_)
388         ->Increment(by);
389   }
390 
live_bytes(MemoryChunk * chunk)391   intptr_t live_bytes(MemoryChunk* chunk) const {
392     return reinterpret_cast<base::AtomicNumber<intptr_t>*>(
393                &chunk->live_byte_count_)
394         ->Value();
395   }
396 
SetLiveBytes(MemoryChunk * chunk,intptr_t value)397   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
398     reinterpret_cast<base::AtomicNumber<intptr_t>*>(&chunk->live_byte_count_)
399         ->SetValue(value);
400   }
401 };
402 
403 class MajorNonAtomicMarkingState final
404     : public MarkingStateBase<MajorNonAtomicMarkingState,
405                               AccessMode::NON_ATOMIC> {
406  public:
bitmap(const MemoryChunk * chunk)407   Bitmap* bitmap(const MemoryChunk* chunk) const {
408     return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
409   }
410 
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)411   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
412     chunk->live_byte_count_ += by;
413   }
414 
live_bytes(MemoryChunk * chunk)415   intptr_t live_bytes(MemoryChunk* chunk) const {
416     return chunk->live_byte_count_;
417   }
418 
SetLiveBytes(MemoryChunk * chunk,intptr_t value)419   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
420     chunk->live_byte_count_ = value;
421   }
422 };
423 
424 // Weak objects encountered during marking.
425 struct WeakObjects {
426   Worklist<WeakCell*, 64> weak_cells;
427   Worklist<TransitionArray*, 64> transition_arrays;
428   // TODO(marja): For old space, we only need the slot, not the host
429   // object. Optimize this by adding a different storage for old space.
430   Worklist<std::pair<HeapObject*, HeapObjectReference**>, 64> weak_references;
431   Worklist<std::pair<HeapObject*, Code*>, 64> weak_objects_in_code;
432 };
433 
434 // Collector for young and old generation.
435 class MarkCompactCollector final : public MarkCompactCollectorBase {
436  public:
437 #ifdef V8_CONCURRENT_MARKING
438   using MarkingState = IncrementalMarkingState;
439 #else
440   using MarkingState = MajorNonAtomicMarkingState;
441 #endif  // V8_CONCURRENT_MARKING
442   using NonAtomicMarkingState = MajorNonAtomicMarkingState;
443   // Wrapper for the shared and bailout worklists.
444   class MarkingWorklist {
445    public:
446     using ConcurrentMarkingWorklist = Worklist<HeapObject*, 64>;
447 
448     // The heap parameter is not used but needed to match the sequential case.
MarkingWorklist(Heap * heap)449     explicit MarkingWorklist(Heap* heap) {}
450 
Push(HeapObject * object)451     void Push(HeapObject* object) {
452       bool success = shared_.Push(kMainThread, object);
453       USE(success);
454       DCHECK(success);
455     }
456 
PushBailout(HeapObject * object)457     void PushBailout(HeapObject* object) {
458       bool success = bailout_.Push(kMainThread, object);
459       USE(success);
460       DCHECK(success);
461     }
462 
Pop()463     HeapObject* Pop() {
464       HeapObject* result;
465 #ifdef V8_CONCURRENT_MARKING
466       if (bailout_.Pop(kMainThread, &result)) return result;
467 #endif
468       if (shared_.Pop(kMainThread, &result)) return result;
469 #ifdef V8_CONCURRENT_MARKING
470       // The expectation is that this work list is empty almost all the time
471       // and we can thus avoid the emptiness checks by putting it last.
472       if (on_hold_.Pop(kMainThread, &result)) return result;
473 #endif
474       return nullptr;
475     }
476 
PopBailout()477     HeapObject* PopBailout() {
478       HeapObject* result;
479 #ifdef V8_CONCURRENT_MARKING
480       if (bailout_.Pop(kMainThread, &result)) return result;
481 #endif
482       return nullptr;
483     }
484 
Clear()485     void Clear() {
486       bailout_.Clear();
487       shared_.Clear();
488       on_hold_.Clear();
489     }
490 
IsBailoutEmpty()491     bool IsBailoutEmpty() { return bailout_.IsLocalEmpty(kMainThread); }
492 
IsEmpty()493     bool IsEmpty() {
494       return bailout_.IsLocalEmpty(kMainThread) &&
495              shared_.IsLocalEmpty(kMainThread) &&
496              on_hold_.IsLocalEmpty(kMainThread) &&
497              bailout_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty() &&
498              on_hold_.IsGlobalPoolEmpty();
499     }
500 
Size()501     int Size() {
502       return static_cast<int>(bailout_.LocalSize(kMainThread) +
503                               shared_.LocalSize(kMainThread) +
504                               on_hold_.LocalSize(kMainThread));
505     }
506 
507     // Calls the specified callback on each element of the deques and replaces
508     // the element with the result of the callback. If the callback returns
509     // nullptr then the element is removed from the deque.
510     // The callback must accept HeapObject* and return HeapObject*.
511     template <typename Callback>
Update(Callback callback)512     void Update(Callback callback) {
513       bailout_.Update(callback);
514       shared_.Update(callback);
515       on_hold_.Update(callback);
516     }
517 
shared()518     ConcurrentMarkingWorklist* shared() { return &shared_; }
bailout()519     ConcurrentMarkingWorklist* bailout() { return &bailout_; }
on_hold()520     ConcurrentMarkingWorklist* on_hold() { return &on_hold_; }
521 
Print()522     void Print() {
523       PrintWorklist("shared", &shared_);
524       PrintWorklist("bailout", &bailout_);
525       PrintWorklist("on_hold", &on_hold_);
526     }
527 
528    private:
529     // Prints the stats about the global pool of the worklist.
530     void PrintWorklist(const char* worklist_name,
531                        ConcurrentMarkingWorklist* worklist);
532     ConcurrentMarkingWorklist shared_;
533     ConcurrentMarkingWorklist bailout_;
534     ConcurrentMarkingWorklist on_hold_;
535   };
536 
537   class RootMarkingVisitor;
538   class CustomRootBodyMarkingVisitor;
539 
540   enum IterationMode {
541     kKeepMarking,
542     kClearMarkbits,
543   };
544 
marking_state()545   MarkingState* marking_state() { return &marking_state_; }
546 
non_atomic_marking_state()547   NonAtomicMarkingState* non_atomic_marking_state() {
548     return &non_atomic_marking_state_;
549   }
550 
551   void SetUp() override;
552   void TearDown() override;
553   // Performs a global garbage collection.
554   void CollectGarbage() override;
555 
556   void CollectEvacuationCandidates(PagedSpace* space);
557 
558   void AddEvacuationCandidate(Page* p);
559 
560   // Prepares for GC by resetting relocation info in old and map spaces and
561   // choosing spaces to compact.
562   void Prepare();
563 
564   // Stop concurrent marking (either by preempting it right away or waiting for
565   // it to complete as requested by |stop_request|).
566   void FinishConcurrentMarking(ConcurrentMarking::StopRequest stop_request);
567 
568   bool StartCompaction();
569 
570   void AbortCompaction();
571 
IsOnEvacuationCandidate(Object * obj)572   static inline bool IsOnEvacuationCandidate(Object* obj) {
573     return Page::FromAddress(reinterpret_cast<Address>(obj))
574         ->IsEvacuationCandidate();
575   }
576 
IsOnEvacuationCandidate(MaybeObject * obj)577   static inline bool IsOnEvacuationCandidate(MaybeObject* obj) {
578     return Page::FromAddress(reinterpret_cast<Address>(obj))
579         ->IsEvacuationCandidate();
580   }
581 
582   void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target);
583   V8_INLINE static void RecordSlot(HeapObject* object, Object** slot,
584                                    Object* target);
585   V8_INLINE static void RecordSlot(HeapObject* object,
586                                    HeapObjectReference** slot, Object* target);
587   void RecordLiveSlotsOnPage(Page* page);
588 
589   void UpdateSlots(SlotsBuffer* buffer);
590   void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
591 
592   void ClearMarkbits();
593 
is_compacting()594   bool is_compacting() const { return compacting_; }
595 
596   // Ensures that sweeping is finished.
597   //
598   // Note: Can only be called safely from main thread.
599   void EnsureSweepingCompleted();
600 
601   // Checks if sweeping is in progress right now on any space.
sweeping_in_progress()602   bool sweeping_in_progress() const { return sweeper_->sweeping_in_progress(); }
603 
set_evacuation(bool evacuation)604   void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
605 
evacuation()606   bool evacuation() const { return evacuation_; }
607 
marking_worklist()608   MarkingWorklist* marking_worklist() { return &marking_worklist_; }
609 
weak_objects()610   WeakObjects* weak_objects() { return &weak_objects_; }
611 
AddWeakCell(WeakCell * weak_cell)612   void AddWeakCell(WeakCell* weak_cell) {
613     weak_objects_.weak_cells.Push(kMainThread, weak_cell);
614   }
615 
AddTransitionArray(TransitionArray * array)616   void AddTransitionArray(TransitionArray* array) {
617     weak_objects_.transition_arrays.Push(kMainThread, array);
618   }
619 
AddWeakReference(HeapObject * host,HeapObjectReference ** slot)620   void AddWeakReference(HeapObject* host, HeapObjectReference** slot) {
621     weak_objects_.weak_references.Push(kMainThread, std::make_pair(host, slot));
622   }
623 
AddWeakObjectInCode(HeapObject * object,Code * code)624   void AddWeakObjectInCode(HeapObject* object, Code* code) {
625     weak_objects_.weak_objects_in_code.Push(kMainThread,
626                                             std::make_pair(object, code));
627   }
628 
sweeper()629   Sweeper* sweeper() { return sweeper_; }
630 
631 #ifdef DEBUG
632   // Checks whether performing mark-compact collection.
in_use()633   bool in_use() { return state_ > PREPARE_GC; }
are_map_pointers_encoded()634   bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
635 #endif
636 
637   void VerifyMarking();
638 #ifdef VERIFY_HEAP
639   void VerifyValidStoreAndSlotsBufferEntries();
640   void VerifyMarkbitsAreClean();
641   void VerifyMarkbitsAreDirty(PagedSpace* space);
642   void VerifyMarkbitsAreClean(PagedSpace* space);
643   void VerifyMarkbitsAreClean(NewSpace* space);
644 #endif
645 
646  private:
647   explicit MarkCompactCollector(Heap* heap);
648   ~MarkCompactCollector();
649 
650   bool WillBeDeoptimized(Code* code);
651 
652   void ComputeEvacuationHeuristics(size_t area_size,
653                                    int* target_fragmentation_percent,
654                                    size_t* max_evacuated_bytes);
655 
656   void RecordObjectStats();
657 
658   // Finishes GC, performs heap verification if enabled.
659   void Finish();
660 
661   void MarkLiveObjects() override;
662 
663   // Marks the object black and adds it to the marking work list.
664   // This is for non-incremental marking only.
665   V8_INLINE void MarkObject(HeapObject* host, HeapObject* obj);
666 
667   // Marks the object black and adds it to the marking work list.
668   // This is for non-incremental marking only.
669   V8_INLINE void MarkRootObject(Root root, HeapObject* obj);
670 
671   // Used by wrapper tracing.
672   V8_INLINE void MarkExternallyReferencedObject(HeapObject* obj);
673 
674   // Mark the heap roots and all objects reachable from them.
675   void MarkRoots(RootVisitor* root_visitor,
676                  ObjectVisitor* custom_root_body_visitor);
677 
678   // Mark the string table specially.  References to internalized strings from
679   // the string table are weak.
680   void MarkStringTable(ObjectVisitor* visitor);
681 
682   // Marks object reachable from harmony weak maps and wrapper tracing.
683   void ProcessEphemeralMarking();
684 
685   // If the call-site of the top optimized code was not prepared for
686   // deoptimization, then treat embedded pointers in the code as strong as
687   // otherwise they can die and try to deoptimize the underlying code.
688   void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
689 
690   // Collects a list of dependent code from maps embedded in optimize code.
691   DependentCode* DependentCodeListFromNonLiveMaps();
692 
693   // Drains the main thread marking work list. Will mark all pending objects
694   // if no concurrent threads are running.
695   void ProcessMarkingWorklist() override;
696 
697   // Callback function for telling whether the object *p is an unmarked
698   // heap object.
699   static bool IsUnmarkedHeapObject(Object** p);
700 
701   // Clear non-live references in weak cells, transition and descriptor arrays,
702   // and deoptimize dependent code of non-live maps.
703   void ClearNonLiveReferences() override;
704   void MarkDependentCodeForDeoptimization();
705   // Checks if the given weak cell is a simple transition from the parent map
706   // of the given dead target. If so it clears the transition and trims
707   // the descriptor array of the parent if needed.
708   void ClearPotentialSimpleMapTransition(Map* dead_target);
709   void ClearPotentialSimpleMapTransition(Map* map, Map* dead_target);
710   // Compact every array in the global list of transition arrays and
711   // trim the corresponding descriptor array if a transition target is non-live.
712   void ClearFullMapTransitions();
713   bool CompactTransitionArray(Map* map, TransitionArray* transitions,
714                               DescriptorArray* descriptors);
715   void TrimDescriptorArray(Map* map, DescriptorArray* descriptors);
716   void TrimEnumCache(Map* map, DescriptorArray* descriptors);
717 
718   // Mark all values associated with reachable keys in weak collections
719   // encountered so far.  This might push new object or even new weak maps onto
720   // the marking stack.
721   void ProcessWeakCollections();
722 
723   // After all reachable objects have been marked those weak map entries
724   // with an unreachable key are removed from all encountered weak maps.
725   // The linked list of all encountered weak maps is destroyed.
726   void ClearWeakCollections();
727 
728   // We have to remove all encountered weak maps from the list of weak
729   // collections when incremental marking is aborted.
730   void AbortWeakCollections();
731 
732   // Goes through the list of encountered weak cells and clears those with
733   // dead values. If the value is a dead map and the parent map transitions to
734   // the dead map via weak cell, then this function also clears the map
735   // transition.
736   void ClearWeakCells();
737   void ClearWeakReferences();
738   void AbortWeakObjects();
739 
740   // Starts sweeping of spaces by contributing on the main thread and setting
741   // up other pages for sweeping. Does not start sweeper tasks.
742   void StartSweepSpaces();
743   void StartSweepSpace(PagedSpace* space);
744 
745   void EvacuatePrologue() override;
746   void EvacuateEpilogue() override;
747   void Evacuate() override;
748   void EvacuatePagesInParallel() override;
749   void UpdatePointersAfterEvacuation() override;
750 
751   UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
752                                           Address end) override;
753   UpdatingItem* CreateRememberedSetUpdatingItem(
754       MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
755 
756   int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
757   int CollectOldSpaceArrayBufferTrackerItems(ItemParallelJob* job);
758 
759   void ReleaseEvacuationCandidates();
760   void PostProcessEvacuationCandidates();
761   void ReportAbortedEvacuationCandidate(HeapObject* failed_object, Page* page);
762 
763   void ClearMarkbitsInPagedSpace(PagedSpace* space);
764   void ClearMarkbitsInNewSpace(NewSpace* space);
765 
766   base::Mutex mutex_;
767   base::Semaphore page_parallel_job_semaphore_;
768 
769 #ifdef DEBUG
770   enum CollectorState {
771     IDLE,
772     PREPARE_GC,
773     MARK_LIVE_OBJECTS,
774     SWEEP_SPACES,
775     ENCODE_FORWARDING_ADDRESSES,
776     UPDATE_POINTERS,
777     RELOCATE_OBJECTS
778   };
779 
780   // The current stage of the collector.
781   CollectorState state_;
782 #endif
783 
784   bool was_marked_incrementally_;
785 
786   bool evacuation_;
787 
788   // True if we are collecting slots to perform evacuation from evacuation
789   // candidates.
790   bool compacting_;
791 
792   bool black_allocation_;
793 
794   bool have_code_to_deoptimize_;
795 
796   MarkingWorklist marking_worklist_;
797   WeakObjects weak_objects_;
798 
799   // Candidates for pages that should be evacuated.
800   std::vector<Page*> evacuation_candidates_;
801   // Pages that are actually processed during evacuation.
802   std::vector<Page*> old_space_evacuation_pages_;
803   std::vector<Page*> new_space_evacuation_pages_;
804   std::vector<std::pair<HeapObject*, Page*>> aborted_evacuation_candidates_;
805 
806   Sweeper* sweeper_;
807 
808   MarkingState marking_state_;
809   NonAtomicMarkingState non_atomic_marking_state_;
810 
811   friend class FullEvacuator;
812   friend class Heap;
813   friend class RecordMigratedSlotVisitor;
814 };
815 
816 template <FixedArrayVisitationMode fixed_array_mode,
817           TraceRetainingPathMode retaining_path_mode, typename MarkingState>
818 class MarkingVisitor final
819     : public HeapVisitor<
820           int,
821           MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>> {
822  public:
823   typedef HeapVisitor<
824       int, MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>>
825       Parent;
826 
827   V8_INLINE MarkingVisitor(MarkCompactCollector* collector,
828                            MarkingState* marking_state);
829 
ShouldVisitMapPointer()830   V8_INLINE bool ShouldVisitMapPointer() { return false; }
831 
832   V8_INLINE int VisitAllocationSite(Map* map, AllocationSite* object);
833   V8_INLINE int VisitBytecodeArray(Map* map, BytecodeArray* object);
834   V8_INLINE int VisitCodeDataContainer(Map* map, CodeDataContainer* object);
835   V8_INLINE int VisitFixedArray(Map* map, FixedArray* object);
836   V8_INLINE int VisitJSApiObject(Map* map, JSObject* object);
837   V8_INLINE int VisitJSFunction(Map* map, JSFunction* object);
838   V8_INLINE int VisitJSWeakCollection(Map* map, JSWeakCollection* object);
839   V8_INLINE int VisitMap(Map* map, Map* object);
840   V8_INLINE int VisitNativeContext(Map* map, Context* object);
841   V8_INLINE int VisitTransitionArray(Map* map, TransitionArray* object);
842   V8_INLINE int VisitWeakCell(Map* map, WeakCell* object);
843 
844   // ObjectVisitor implementation.
845   V8_INLINE void VisitPointer(HeapObject* host, Object** p) final;
846   V8_INLINE void VisitPointer(HeapObject* host, MaybeObject** p) final;
847   V8_INLINE void VisitPointers(HeapObject* host, Object** start,
848                                Object** end) final;
849   V8_INLINE void VisitPointers(HeapObject* host, MaybeObject** start,
850                                MaybeObject** end) final;
851   V8_INLINE void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final;
852   V8_INLINE void VisitCodeTarget(Code* host, RelocInfo* rinfo) final;
853 
854  private:
855   // Granularity in which FixedArrays are scanned if |fixed_array_mode|
856   // is true.
857   static const int kProgressBarScanningChunk = 32 * 1024;
858 
859   V8_INLINE int VisitFixedArrayIncremental(Map* map, FixedArray* object);
860 
861   V8_INLINE void MarkMapContents(Map* map);
862 
863   // Marks the object black without pushing it on the marking work list. Returns
864   // true if the object needed marking and false otherwise.
865   V8_INLINE bool MarkObjectWithoutPush(HeapObject* host, HeapObject* object);
866 
867   // Marks the object grey and pushes it on the marking work list.
868   V8_INLINE void MarkObject(HeapObject* host, HeapObject* obj);
869 
marking_state()870   MarkingState* marking_state() { return marking_state_; }
871 
marking_worklist()872   MarkCompactCollector::MarkingWorklist* marking_worklist() const {
873     return collector_->marking_worklist();
874   }
875 
876   Heap* const heap_;
877   MarkCompactCollector* const collector_;
878   MarkingState* const marking_state_;
879 };
880 
881 class EvacuationScope {
882  public:
EvacuationScope(MarkCompactCollector * collector)883   explicit EvacuationScope(MarkCompactCollector* collector)
884       : collector_(collector) {
885     collector_->set_evacuation(true);
886   }
887 
~EvacuationScope()888   ~EvacuationScope() { collector_->set_evacuation(false); }
889 
890  private:
891   MarkCompactCollector* collector_;
892 };
893 
894 #ifdef ENABLE_MINOR_MC
895 
896 // Collector for young-generation only.
897 class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
898  public:
899   using MarkingState = MinorMarkingState;
900   using NonAtomicMarkingState = MinorNonAtomicMarkingState;
901 
902   explicit MinorMarkCompactCollector(Heap* heap);
903   ~MinorMarkCompactCollector();
904 
marking_state()905   MarkingState* marking_state() { return &marking_state_; }
906 
non_atomic_marking_state()907   NonAtomicMarkingState* non_atomic_marking_state() {
908     return &non_atomic_marking_state_;
909   }
910 
911   void SetUp() override;
912   void TearDown() override;
913   void CollectGarbage() override;
914 
915   void MakeIterable(Page* page, MarkingTreatmentMode marking_mode,
916                     FreeSpaceTreatmentMode free_space_mode);
917   void CleanupSweepToIteratePages();
918 
919  private:
920   using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
921   class RootMarkingVisitor;
922 
923   static const int kNumMarkers = 8;
924   static const int kMainMarker = 0;
925 
worklist()926   inline MarkingWorklist* worklist() { return worklist_; }
927 
main_marking_visitor()928   inline YoungGenerationMarkingVisitor* main_marking_visitor() {
929     return main_marking_visitor_;
930   }
931 
932   void MarkLiveObjects() override;
933   void MarkRootSetInParallel(RootMarkingVisitor* root_visitor);
934   V8_INLINE void MarkRootObject(HeapObject* obj);
935   void ProcessMarkingWorklist() override;
936   void ClearNonLiveReferences() override;
937 
938   void EvacuatePrologue() override;
939   void EvacuateEpilogue() override;
940   void Evacuate() override;
941   void EvacuatePagesInParallel() override;
942   void UpdatePointersAfterEvacuation() override;
943 
944   UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
945                                           Address end) override;
946   UpdatingItem* CreateRememberedSetUpdatingItem(
947       MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
948 
949   int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
950 
951   int NumberOfParallelMarkingTasks(int pages);
952 
953   MarkingWorklist* worklist_;
954 
955   YoungGenerationMarkingVisitor* main_marking_visitor_;
956   base::Semaphore page_parallel_job_semaphore_;
957   std::vector<Page*> new_space_evacuation_pages_;
958   std::vector<Page*> sweep_to_iterate_pages_;
959 
960   MarkingState marking_state_;
961   NonAtomicMarkingState non_atomic_marking_state_;
962 
963   friend class YoungGenerationMarkingTask;
964   friend class YoungGenerationMarkingVisitor;
965 };
966 
967 #endif  // ENABLE_MINOR_MC
968 
969 }  // namespace internal
970 }  // namespace v8
971 
972 #endif  // V8_HEAP_MARK_COMPACT_H_
973