1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/incremental-marking.h"
6 
7 #include "src/codegen/compilation-cache.h"
8 #include "src/execution/vm-state-inl.h"
9 #include "src/heap/array-buffer-sweeper.h"
10 #include "src/heap/concurrent-marking.h"
11 #include "src/heap/embedder-tracing.h"
12 #include "src/heap/gc-idle-time-handler.h"
13 #include "src/heap/gc-tracer.h"
14 #include "src/heap/heap-inl.h"
15 #include "src/heap/incremental-marking-inl.h"
16 #include "src/heap/mark-compact-inl.h"
17 #include "src/heap/marking-visitor-inl.h"
18 #include "src/heap/marking-visitor.h"
19 #include "src/heap/object-stats.h"
20 #include "src/heap/objects-visiting-inl.h"
21 #include "src/heap/objects-visiting.h"
22 #include "src/heap/sweeper.h"
23 #include "src/init/v8.h"
24 #include "src/numbers/conversions.h"
25 #include "src/objects/data-handler-inl.h"
26 #include "src/objects/embedder-data-array-inl.h"
27 #include "src/objects/hash-table-inl.h"
28 #include "src/objects/slots-inl.h"
29 #include "src/objects/transitions-inl.h"
30 #include "src/objects/visitors.h"
31 #include "src/tracing/trace-event.h"
32 #include "src/utils/utils.h"
33 
34 namespace v8 {
35 namespace internal {
36 
Step(int bytes_allocated,Address addr,size_t size)37 void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
38                                         size_t size) {
39   Heap* heap = incremental_marking_->heap();
40   VMState<GC> state(heap->isolate());
41   RuntimeCallTimerScope runtime_timer(
42       heap->isolate(),
43       RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
44   incremental_marking_->AdvanceOnAllocation();
45   // AdvanceIncrementalMarkingOnAllocation can start incremental marking.
46   incremental_marking_->EnsureBlackAllocated(addr, size);
47 }
48 
IncrementalMarking(Heap * heap,WeakObjects * weak_objects)49 IncrementalMarking::IncrementalMarking(Heap* heap,
50                                        WeakObjects* weak_objects)
51     : heap_(heap),
52       collector_(heap->mark_compact_collector()),
53       weak_objects_(weak_objects),
54       new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
55       old_generation_observer_(this, kOldGenerationAllocatedThreshold) {
56   SetState(STOPPED);
57 }
58 
RecordWriteSlow(HeapObject obj,HeapObjectSlot slot,HeapObject value)59 void IncrementalMarking::RecordWriteSlow(HeapObject obj, HeapObjectSlot slot,
60                                          HeapObject value) {
61   if (BaseRecordWrite(obj, value) && slot.address() != kNullAddress) {
62     // Object is not going to be rescanned we need to record the slot.
63     collector_->RecordSlot(obj, slot, value);
64   }
65 }
66 
RecordWriteFromCode(Address raw_obj,Address slot_address,Isolate * isolate)67 int IncrementalMarking::RecordWriteFromCode(Address raw_obj,
68                                             Address slot_address,
69                                             Isolate* isolate) {
70   HeapObject obj = HeapObject::cast(Object(raw_obj));
71   MaybeObjectSlot slot(slot_address);
72   isolate->heap()->incremental_marking()->RecordWrite(obj, slot, *slot);
73   // Called by RecordWriteCodeStubAssembler, which doesnt accept void type
74   return 0;
75 }
76 
RecordWriteIntoCode(Code host,RelocInfo * rinfo,HeapObject value)77 void IncrementalMarking::RecordWriteIntoCode(Code host, RelocInfo* rinfo,
78                                              HeapObject value) {
79   DCHECK(IsMarking());
80   if (BaseRecordWrite(host, value)) {
81     // Object is not going to be rescanned.  We need to record the slot.
82     collector_->RecordRelocSlot(host, rinfo, value);
83   }
84 }
85 
MarkBlackAndVisitObjectDueToLayoutChange(HeapObject obj)86 void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
87     HeapObject obj) {
88   TRACE_EVENT0("v8", "V8.GCIncrementalMarkingLayoutChange");
89   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_LAYOUT_CHANGE);
90   marking_state()->WhiteToGrey(obj);
91   collector_->VisitObject(obj);
92 }
93 
NotifyLeftTrimming(HeapObject from,HeapObject to)94 void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) {
95   DCHECK(IsMarking());
96   DCHECK(MemoryChunk::FromHeapObject(from)->SweepingDone());
97   DCHECK_EQ(MemoryChunk::FromHeapObject(from), MemoryChunk::FromHeapObject(to));
98   DCHECK_NE(from, to);
99 
100   MarkBit new_mark_bit = marking_state()->MarkBitFrom(to);
101 
102   if (black_allocation() && Marking::IsBlack<kAtomicity>(new_mark_bit)) {
103     // Nothing to do if the object is in black area.
104     return;
105   }
106   MarkBlackAndVisitObjectDueToLayoutChange(from);
107   DCHECK(marking_state()->IsBlack(from));
108   // Mark the new address as black.
109   if (from.address() + kTaggedSize == to.address()) {
110     // The old and the new markbits overlap. The |to| object has the
111     // grey color. To make it black, we need to set the second bit.
112     DCHECK(new_mark_bit.Get<kAtomicity>());
113     new_mark_bit.Next().Set<kAtomicity>();
114   } else {
115     bool success = Marking::WhiteToBlack<kAtomicity>(new_mark_bit);
116     DCHECK(success);
117     USE(success);
118   }
119   DCHECK(marking_state()->IsBlack(to));
120 }
121 
122 class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
123  public:
IncrementalMarkingRootMarkingVisitor(IncrementalMarking * incremental_marking)124   explicit IncrementalMarkingRootMarkingVisitor(
125       IncrementalMarking* incremental_marking)
126       : heap_(incremental_marking->heap()) {}
127 
VisitRootPointer(Root root,const char * description,FullObjectSlot p)128   void VisitRootPointer(Root root, const char* description,
129                         FullObjectSlot p) override {
130     MarkObjectByPointer(p);
131   }
132 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)133   void VisitRootPointers(Root root, const char* description,
134                          FullObjectSlot start, FullObjectSlot end) override {
135     for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(p);
136   }
137 
138  private:
MarkObjectByPointer(FullObjectSlot p)139   void MarkObjectByPointer(FullObjectSlot p) {
140     Object obj = *p;
141     if (!obj.IsHeapObject()) return;
142 
143     heap_->incremental_marking()->WhiteToGreyAndPush(HeapObject::cast(obj));
144   }
145 
146   Heap* heap_;
147 };
148 
DeactivateIncrementalWriteBarrierForSpace(PagedSpace * space)149 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
150     PagedSpace* space) {
151   for (Page* p : *space) {
152     p->SetOldGenerationPageFlags(false);
153   }
154 }
155 
156 
DeactivateIncrementalWriteBarrierForSpace(NewSpace * space)157 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
158     NewSpace* space) {
159   for (Page* p : *space) {
160     p->SetYoungGenerationPageFlags(false);
161   }
162 }
163 
164 
DeactivateIncrementalWriteBarrier()165 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
166   DeactivateIncrementalWriteBarrierForSpace(heap_->old_space());
167   DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
168   DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
169   DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
170 
171   for (LargePage* p : *heap_->new_lo_space()) {
172     p->SetYoungGenerationPageFlags(false);
173     DCHECK(p->IsLargePage());
174   }
175 
176   for (LargePage* p : *heap_->lo_space()) {
177     p->SetOldGenerationPageFlags(false);
178   }
179 
180   for (LargePage* p : *heap_->code_lo_space()) {
181     p->SetOldGenerationPageFlags(false);
182   }
183 }
184 
185 
ActivateIncrementalWriteBarrier(PagedSpace * space)186 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
187   for (Page* p : *space) {
188     p->SetOldGenerationPageFlags(true);
189   }
190 }
191 
192 
ActivateIncrementalWriteBarrier(NewSpace * space)193 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
194   for (Page* p : *space) {
195     p->SetYoungGenerationPageFlags(true);
196   }
197 }
198 
199 
ActivateIncrementalWriteBarrier()200 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
201   ActivateIncrementalWriteBarrier(heap_->old_space());
202   ActivateIncrementalWriteBarrier(heap_->map_space());
203   ActivateIncrementalWriteBarrier(heap_->code_space());
204   ActivateIncrementalWriteBarrier(heap_->new_space());
205 
206   for (LargePage* p : *heap_->new_lo_space()) {
207     p->SetYoungGenerationPageFlags(true);
208     DCHECK(p->IsLargePage());
209   }
210 
211   for (LargePage* p : *heap_->lo_space()) {
212     p->SetOldGenerationPageFlags(true);
213   }
214 
215   for (LargePage* p : *heap_->code_lo_space()) {
216     p->SetOldGenerationPageFlags(true);
217   }
218 }
219 
220 
WasActivated()221 bool IncrementalMarking::WasActivated() { return was_activated_; }
222 
223 
CanBeActivated()224 bool IncrementalMarking::CanBeActivated() {
225   // Only start incremental marking in a safe state: 1) when incremental
226   // marking is turned on, 2) when we are currently not in a GC, and
227   // 3) when we are currently not serializing or deserializing the heap.
228   return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
229          heap_->deserialization_complete() &&
230          !heap_->isolate()->serializer_enabled();
231 }
232 
IsBelowActivationThresholds() const233 bool IncrementalMarking::IsBelowActivationThresholds() const {
234   return heap_->OldGenerationSizeOfObjects() <= kV8ActivationThreshold &&
235          heap_->GlobalSizeOfObjects() <= kGlobalActivationThreshold;
236 }
237 
Deactivate()238 void IncrementalMarking::Deactivate() {
239   DeactivateIncrementalWriteBarrier();
240 }
241 
Start(GarbageCollectionReason gc_reason)242 void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
243   if (FLAG_trace_incremental_marking) {
244     const size_t old_generation_size_mb =
245         heap()->OldGenerationSizeOfObjects() / MB;
246     const size_t old_generation_limit_mb =
247         heap()->old_generation_allocation_limit() / MB;
248     const size_t global_size_mb = heap()->GlobalSizeOfObjects() / MB;
249     const size_t global_limit_mb = heap()->global_allocation_limit() / MB;
250     heap()->isolate()->PrintWithTimestamp(
251         "[IncrementalMarking] Start (%s): (size/limit/slack) v8: %zuMB / %zuMB "
252         "/ %zuMB global: %zuMB / %zuMB / %zuMB\n",
253         Heap::GarbageCollectionReasonToString(gc_reason),
254         old_generation_size_mb, old_generation_limit_mb,
255         old_generation_size_mb > old_generation_limit_mb
256             ? 0
257             : old_generation_limit_mb - old_generation_size_mb,
258         global_size_mb, global_limit_mb,
259         global_size_mb > global_limit_mb ? 0
260                                          : global_limit_mb - global_size_mb);
261   }
262   DCHECK(FLAG_incremental_marking);
263   DCHECK(state_ == STOPPED);
264   DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
265   DCHECK(!heap_->isolate()->serializer_enabled());
266 
267   Counters* counters = heap_->isolate()->counters();
268 
269   counters->incremental_marking_reason()->AddSample(
270       static_cast<int>(gc_reason));
271   HistogramTimerScope incremental_marking_scope(
272       counters->gc_incremental_marking_start());
273   TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
274   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_START);
275   heap_->tracer()->NotifyIncrementalMarkingStart();
276 
277   start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
278   time_to_force_completion_ = 0.0;
279   initial_old_generation_size_ = heap_->OldGenerationSizeOfObjects();
280   old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
281   bytes_marked_ = 0;
282   scheduled_bytes_to_mark_ = 0;
283   schedule_update_time_ms_ = start_time_ms_;
284   bytes_marked_concurrently_ = 0;
285   was_activated_ = true;
286 
287   {
288     TRACE_GC(heap()->tracer(),
289              GCTracer::Scope::MC_INCREMENTAL_SWEEP_ARRAY_BUFFERS);
290     heap_->array_buffer_sweeper()->EnsureFinished();
291   }
292 
293   if (!collector_->sweeping_in_progress()) {
294     StartMarking();
295   } else {
296     if (FLAG_trace_incremental_marking) {
297       heap()->isolate()->PrintWithTimestamp(
298           "[IncrementalMarking] Start sweeping.\n");
299     }
300     SetState(SWEEPING);
301   }
302 
303   heap_->AddAllocationObserversToAllSpaces(&old_generation_observer_,
304                                            &new_generation_observer_);
305   incremental_marking_job()->Start(heap_);
306 }
307 
308 
StartMarking()309 void IncrementalMarking::StartMarking() {
310   if (heap_->isolate()->serializer_enabled()) {
311     // Black allocation currently starts when we start incremental marking,
312     // but we cannot enable black allocation while deserializing. Hence, we
313     // have to delay the start of incremental marking in that case.
314     if (FLAG_trace_incremental_marking) {
315       heap()->isolate()->PrintWithTimestamp(
316           "[IncrementalMarking] Start delayed - serializer\n");
317     }
318     return;
319   }
320   if (FLAG_trace_incremental_marking) {
321     heap()->isolate()->PrintWithTimestamp(
322         "[IncrementalMarking] Start marking\n");
323   }
324   is_compacting_ = !FLAG_never_compact && collector_->StartCompaction();
325   collector_->StartMarking();
326 
327   SetState(MARKING);
328 
329   ActivateIncrementalWriteBarrier();
330 
331   heap_->isolate()->compilation_cache()->MarkCompactPrologue();
332 
333   StartBlackAllocation();
334 
335   MarkRoots();
336 
337   if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
338     heap_->concurrent_marking()->ScheduleTasks();
339   }
340 
341   // Ready to start incremental marking.
342   if (FLAG_trace_incremental_marking) {
343     heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
344   }
345 
346   {
347     // TracePrologue may call back into V8 in corner cases, requiring that
348     // marking (including write barriers) is fully set up.
349     TRACE_GC(heap()->tracer(),
350              GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
351     heap_->local_embedder_heap_tracer()->TracePrologue(
352         heap_->flags_for_embedder_tracer());
353   }
354 }
355 
StartBlackAllocation()356 void IncrementalMarking::StartBlackAllocation() {
357   DCHECK(!black_allocation_);
358   DCHECK(IsMarking());
359   black_allocation_ = true;
360   heap()->old_space()->MarkLinearAllocationAreaBlack();
361   heap()->map_space()->MarkLinearAllocationAreaBlack();
362   heap()->code_space()->MarkLinearAllocationAreaBlack();
363   if (FLAG_trace_incremental_marking) {
364     heap()->isolate()->PrintWithTimestamp(
365         "[IncrementalMarking] Black allocation started\n");
366   }
367 }
368 
PauseBlackAllocation()369 void IncrementalMarking::PauseBlackAllocation() {
370   DCHECK(IsMarking());
371   heap()->old_space()->UnmarkLinearAllocationArea();
372   heap()->map_space()->UnmarkLinearAllocationArea();
373   heap()->code_space()->UnmarkLinearAllocationArea();
374   if (FLAG_trace_incremental_marking) {
375     heap()->isolate()->PrintWithTimestamp(
376         "[IncrementalMarking] Black allocation paused\n");
377   }
378   black_allocation_ = false;
379 }
380 
FinishBlackAllocation()381 void IncrementalMarking::FinishBlackAllocation() {
382   if (black_allocation_) {
383     black_allocation_ = false;
384     if (FLAG_trace_incremental_marking) {
385       heap()->isolate()->PrintWithTimestamp(
386           "[IncrementalMarking] Black allocation finished\n");
387     }
388   }
389 }
390 
EnsureBlackAllocated(Address allocated,size_t size)391 void IncrementalMarking::EnsureBlackAllocated(Address allocated, size_t size) {
392   if (black_allocation() && allocated != kNullAddress) {
393     HeapObject object = HeapObject::FromAddress(allocated);
394     if (marking_state()->IsWhite(object) && !Heap::InYoungGeneration(object)) {
395       if (heap_->IsLargeObject(object)) {
396         marking_state()->WhiteToBlack(object);
397       } else {
398         Page::FromAddress(allocated)->CreateBlackArea(allocated,
399                                                       allocated + size);
400       }
401     }
402   }
403 }
404 
MarkRoots()405 void IncrementalMarking::MarkRoots() {
406   DCHECK(!finalize_marking_completed_);
407   DCHECK(IsMarking());
408 
409   IncrementalMarkingRootMarkingVisitor visitor(this);
410   heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG_IGNORE_STACK);
411 }
412 
ShouldRetainMap(Map map,int age)413 bool IncrementalMarking::ShouldRetainMap(Map map, int age) {
414   if (age == 0) {
415     // The map has aged. Do not retain this map.
416     return false;
417   }
418   Object constructor = map.GetConstructor();
419   if (!constructor.IsHeapObject() ||
420       marking_state()->IsWhite(HeapObject::cast(constructor))) {
421     // The constructor is dead, no new objects with this map can
422     // be created. Do not retain this map.
423     return false;
424   }
425   return true;
426 }
427 
428 
RetainMaps()429 void IncrementalMarking::RetainMaps() {
430   // Do not retain dead maps if flag disables it or there is
431   // - memory pressure (reduce_memory_footprint_),
432   // - GC is requested by tests or dev-tools (abort_incremental_marking_).
433   bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
434                                    FLAG_retain_maps_for_n_gc == 0;
435   WeakArrayList retained_maps = heap()->retained_maps();
436   int length = retained_maps.length();
437   // The number_of_disposed_maps separates maps in the retained_maps
438   // array that were created before and after context disposal.
439   // We do not age and retain disposed maps to avoid memory leaks.
440   int number_of_disposed_maps = heap()->number_of_disposed_maps_;
441   for (int i = 0; i < length; i += 2) {
442     MaybeObject value = retained_maps.Get(i);
443     HeapObject map_heap_object;
444     if (!value->GetHeapObjectIfWeak(&map_heap_object)) {
445       continue;
446     }
447     int age = retained_maps.Get(i + 1).ToSmi().value();
448     int new_age;
449     Map map = Map::cast(map_heap_object);
450     if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
451         marking_state()->IsWhite(map)) {
452       if (ShouldRetainMap(map, age)) {
453         WhiteToGreyAndPush(map);
454       }
455       Object prototype = map.prototype();
456       if (age > 0 && prototype.IsHeapObject() &&
457           marking_state()->IsWhite(HeapObject::cast(prototype))) {
458         // The prototype is not marked, age the map.
459         new_age = age - 1;
460       } else {
461         // The prototype and the constructor are marked, this map keeps only
462         // transition tree alive, not JSObjects. Do not age the map.
463         new_age = age;
464       }
465     } else {
466       new_age = FLAG_retain_maps_for_n_gc;
467     }
468     // Compact the array and update the age.
469     if (new_age != age) {
470       retained_maps.Set(i + 1, MaybeObject::FromSmi(Smi::FromInt(new_age)));
471     }
472   }
473 }
474 
FinalizeIncrementally()475 void IncrementalMarking::FinalizeIncrementally() {
476   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE_BODY);
477   DCHECK(!finalize_marking_completed_);
478   DCHECK(IsMarking());
479 
480   double start = heap_->MonotonicallyIncreasingTimeInMs();
481 
482   // After finishing incremental marking, we try to discover all unmarked
483   // objects to reduce the marking load in the final pause.
484   // 1) We scan and mark the roots again to find all changes to the root set.
485   // 2) Age and retain maps embedded in optimized code.
486   MarkRoots();
487 
488   // Map retaining is needed for perfromance, not correctness,
489   // so we can do it only once at the beginning of the finalization.
490   RetainMaps();
491 
492   finalize_marking_completed_ = true;
493 
494   if (FLAG_trace_incremental_marking) {
495     double end = heap_->MonotonicallyIncreasingTimeInMs();
496     double delta = end - start;
497     heap()->isolate()->PrintWithTimestamp(
498         "[IncrementalMarking] Finalize incrementally spent %.1f ms.\n", delta);
499   }
500 }
501 
UpdateMarkingWorklistAfterScavenge()502 void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
503   if (!IsMarking()) return;
504 
505   Map filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
506 
507 #ifdef ENABLE_MINOR_MC
508   MinorMarkCompactCollector::MarkingState* minor_marking_state =
509       heap()->minor_mark_compact_collector()->marking_state();
510 #endif  // ENABLE_MINOR_MC
511 
512   collector_->marking_worklists_holder()->Update(
513       [
514 #ifdef DEBUG
515           // this is referred inside DCHECK.
516           this,
517 #endif
518 #ifdef ENABLE_MINOR_MC
519           minor_marking_state,
520 #endif
521           filler_map](HeapObject obj, HeapObject* out) -> bool {
522         DCHECK(obj.IsHeapObject());
523         // Only pointers to from space have to be updated.
524         if (Heap::InFromPage(obj)) {
525           MapWord map_word = obj.map_word();
526           if (!map_word.IsForwardingAddress()) {
527             // There may be objects on the marking deque that do not exist
528             // anymore, e.g. left trimmed objects or objects from the root set
529             // (frames). If these object are dead at scavenging time, their
530             // marking deque entries will not point to forwarding addresses.
531             // Hence, we can discard them.
532             return false;
533           }
534           HeapObject dest = map_word.ToForwardingAddress();
535           DCHECK_IMPLIES(marking_state()->IsWhite(obj),
536                          obj.IsFreeSpaceOrFiller());
537           *out = dest;
538           return true;
539         } else if (Heap::InToPage(obj)) {
540           // The object may be on a large page or on a page that was moved in
541           // new space.
542           DCHECK(Heap::IsLargeObject(obj) ||
543                  Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE));
544 #ifdef ENABLE_MINOR_MC
545           if (minor_marking_state->IsWhite(obj)) {
546             return false;
547           }
548 #endif  // ENABLE_MINOR_MC
549         // Either a large object or an object marked by the minor
550         // mark-compactor.
551           *out = obj;
552           return true;
553         } else {
554           // The object may be on a page that was moved from new to old space.
555           // Only applicable during minor MC garbage collections.
556           if (Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
557 #ifdef ENABLE_MINOR_MC
558             if (minor_marking_state->IsWhite(obj)) {
559               return false;
560             }
561 #endif  // ENABLE_MINOR_MC
562             *out = obj;
563             return true;
564           }
565           DCHECK_IMPLIES(marking_state()->IsWhite(obj),
566                          obj.IsFreeSpaceOrFiller());
567           // Skip one word filler objects that appear on the
568           // stack when we perform in place array shift.
569           if (obj.map() != filler_map) {
570             *out = obj;
571             return true;
572           }
573           return false;
574         }
575       });
576 
577   UpdateWeakReferencesAfterScavenge();
578 }
579 
UpdateWeakReferencesAfterScavenge()580 void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
581   weak_objects_->weak_references.Update(
582       [](std::pair<HeapObject, HeapObjectSlot> slot_in,
583          std::pair<HeapObject, HeapObjectSlot>* slot_out) -> bool {
584         HeapObject heap_obj = slot_in.first;
585         HeapObject forwarded = ForwardingAddress(heap_obj);
586 
587         if (!forwarded.is_null()) {
588           ptrdiff_t distance_to_slot =
589               slot_in.second.address() - slot_in.first.ptr();
590           Address new_slot = forwarded.ptr() + distance_to_slot;
591           slot_out->first = forwarded;
592           slot_out->second = HeapObjectSlot(new_slot);
593           return true;
594         }
595 
596         return false;
597       });
598   weak_objects_->weak_objects_in_code.Update(
599       [](std::pair<HeapObject, Code> slot_in,
600          std::pair<HeapObject, Code>* slot_out) -> bool {
601         HeapObject heap_obj = slot_in.first;
602         HeapObject forwarded = ForwardingAddress(heap_obj);
603 
604         if (!forwarded.is_null()) {
605           slot_out->first = forwarded;
606           slot_out->second = slot_in.second;
607           return true;
608         }
609 
610         return false;
611       });
612   weak_objects_->ephemeron_hash_tables.Update(
613       [](EphemeronHashTable slot_in, EphemeronHashTable* slot_out) -> bool {
614         EphemeronHashTable forwarded = ForwardingAddress(slot_in);
615 
616         if (!forwarded.is_null()) {
617           *slot_out = forwarded;
618           return true;
619         }
620 
621         return false;
622       });
623 
624   auto ephemeron_updater = [](Ephemeron slot_in, Ephemeron* slot_out) -> bool {
625     HeapObject key = slot_in.key;
626     HeapObject value = slot_in.value;
627     HeapObject forwarded_key = ForwardingAddress(key);
628     HeapObject forwarded_value = ForwardingAddress(value);
629 
630     if (!forwarded_key.is_null() && !forwarded_value.is_null()) {
631       *slot_out = Ephemeron{forwarded_key, forwarded_value};
632       return true;
633     }
634 
635     return false;
636   };
637 
638   weak_objects_->current_ephemerons.Update(ephemeron_updater);
639   weak_objects_->next_ephemerons.Update(ephemeron_updater);
640   weak_objects_->discovered_ephemerons.Update(ephemeron_updater);
641 
642   weak_objects_->flushed_js_functions.Update(
643       [](JSFunction slot_in, JSFunction* slot_out) -> bool {
644         JSFunction forwarded = ForwardingAddress(slot_in);
645 
646         if (!forwarded.is_null()) {
647           *slot_out = forwarded;
648           return true;
649         }
650 
651         return false;
652       });
653 #ifdef DEBUG
654   weak_objects_->bytecode_flushing_candidates.Iterate(
655       [](SharedFunctionInfo candidate) {
656         DCHECK(!Heap::InYoungGeneration(candidate));
657       });
658 #endif
659 }
660 
UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space)661 void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
662     size_t dead_bytes_in_new_space) {
663   if (!IsMarking()) return;
664   bytes_marked_ -= Min(bytes_marked_, dead_bytes_in_new_space);
665 }
666 
ProcessBlackAllocatedObject(HeapObject obj)667 void IncrementalMarking::ProcessBlackAllocatedObject(HeapObject obj) {
668   if (IsMarking() && marking_state()->IsBlack(obj)) {
669     collector_->RevisitObject(obj);
670   }
671 }
672 
EmbedderStep(double expected_duration_ms,double * duration_ms)673 StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms,
674                                             double* duration_ms) {
675   if (!ShouldDoEmbedderStep()) {
676     *duration_ms = 0.0;
677     return StepResult::kNoImmediateWork;
678   }
679 
680   constexpr size_t kObjectsToProcessBeforeDeadlineCheck = 500;
681 
682   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING);
683   LocalEmbedderHeapTracer* local_tracer = heap_->local_embedder_heap_tracer();
684   const double start = heap_->MonotonicallyIncreasingTimeInMs();
685   const double deadline = start + expected_duration_ms;
686   bool empty_worklist;
687   {
688     LocalEmbedderHeapTracer::ProcessingScope scope(local_tracer);
689     HeapObject object;
690     size_t cnt = 0;
691     empty_worklist = true;
692     while (marking_worklists()->PopEmbedder(&object)) {
693       scope.TracePossibleWrapper(JSObject::cast(object));
694       if (++cnt == kObjectsToProcessBeforeDeadlineCheck) {
695         if (deadline <= heap_->MonotonicallyIncreasingTimeInMs()) {
696           empty_worklist = false;
697           break;
698         }
699         cnt = 0;
700       }
701     }
702   }
703   bool remote_tracing_done =
704       local_tracer->Trace(deadline - heap_->MonotonicallyIncreasingTimeInMs());
705   double current = heap_->MonotonicallyIncreasingTimeInMs();
706   local_tracer->SetEmbedderWorklistEmpty(true);
707   *duration_ms = current - start;
708   return (empty_worklist && remote_tracing_done)
709              ? StepResult::kNoImmediateWork
710              : StepResult::kMoreWorkRemaining;
711 }
712 
Hurry()713 void IncrementalMarking::Hurry() {
714   if (!marking_worklists()->IsEmpty()) {
715     double start = 0.0;
716     if (FLAG_trace_incremental_marking) {
717       start = heap_->MonotonicallyIncreasingTimeInMs();
718       if (FLAG_trace_incremental_marking) {
719         heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Hurry\n");
720       }
721     }
722     collector_->ProcessMarkingWorklist(0);
723     SetState(COMPLETE);
724     if (FLAG_trace_incremental_marking) {
725       double end = heap_->MonotonicallyIncreasingTimeInMs();
726       double delta = end - start;
727       if (FLAG_trace_incremental_marking) {
728         heap()->isolate()->PrintWithTimestamp(
729             "[IncrementalMarking] Complete (hurry), spent %d ms.\n",
730             static_cast<int>(delta));
731       }
732     }
733   }
734 }
735 
736 
Stop()737 void IncrementalMarking::Stop() {
738   if (IsStopped()) return;
739   if (FLAG_trace_incremental_marking) {
740     int old_generation_size_mb =
741         static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
742     int old_generation_limit_mb =
743         static_cast<int>(heap()->old_generation_allocation_limit() / MB);
744     heap()->isolate()->PrintWithTimestamp(
745         "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, "
746         "overshoot %dMB\n",
747         old_generation_size_mb, old_generation_limit_mb,
748         Max(0, old_generation_size_mb - old_generation_limit_mb));
749   }
750 
751   SpaceIterator it(heap_);
752   while (it.HasNext()) {
753     Space* space = it.Next();
754     if (space == heap_->new_space()) {
755       space->RemoveAllocationObserver(&new_generation_observer_);
756     } else {
757       space->RemoveAllocationObserver(&old_generation_observer_);
758     }
759   }
760 
761   heap_->isolate()->stack_guard()->ClearGC();
762   SetState(STOPPED);
763   is_compacting_ = false;
764   FinishBlackAllocation();
765 }
766 
767 
Finalize()768 void IncrementalMarking::Finalize() {
769   Hurry();
770   Stop();
771 }
772 
773 
FinalizeMarking(CompletionAction action)774 void IncrementalMarking::FinalizeMarking(CompletionAction action) {
775   DCHECK(!finalize_marking_completed_);
776   if (FLAG_trace_incremental_marking) {
777     heap()->isolate()->PrintWithTimestamp(
778         "[IncrementalMarking] requesting finalization of incremental "
779         "marking.\n");
780   }
781   request_type_ = FINALIZATION;
782   if (action == GC_VIA_STACK_GUARD) {
783     heap_->isolate()->stack_guard()->RequestGC();
784   }
785 }
786 
CurrentTimeToMarkingTask() const787 double IncrementalMarking::CurrentTimeToMarkingTask() const {
788   const double recorded_time_to_marking_task =
789       heap_->tracer()->AverageTimeToIncrementalMarkingTask();
790   const double current_time_to_marking_task =
791       incremental_marking_job_.CurrentTimeToTask(heap_);
792   if (recorded_time_to_marking_task == 0.0) return 0.0;
793   return Max(recorded_time_to_marking_task, current_time_to_marking_task);
794 }
795 
MarkingComplete(CompletionAction action)796 void IncrementalMarking::MarkingComplete(CompletionAction action) {
797   // Allowed overshoot percantage of incremental marking walltime.
798   constexpr double kAllowedOvershoot = 0.1;
799   // Minimum overshoot in ms. This is used to allow moving away from stack when
800   // marking was fast.
801   constexpr double kMinOvershootMs = 50;
802 
803   if (action == GC_VIA_STACK_GUARD) {
804     if (time_to_force_completion_ == 0.0) {
805       const double now = heap_->MonotonicallyIncreasingTimeInMs();
806       const double overshoot_ms =
807           Max(kMinOvershootMs, (now - start_time_ms_) * kAllowedOvershoot);
808       const double time_to_marking_task = CurrentTimeToMarkingTask();
809       if (time_to_marking_task == 0.0 || time_to_marking_task > overshoot_ms) {
810         if (FLAG_trace_incremental_marking) {
811           heap()->isolate()->PrintWithTimestamp(
812               "[IncrementalMarking] Not delaying marking completion. time to "
813               "task: %fms allowed overshoot: %fms\n",
814               time_to_marking_task, overshoot_ms);
815         }
816       } else {
817         time_to_force_completion_ = now + overshoot_ms;
818         if (FLAG_trace_incremental_marking) {
819           heap()->isolate()->PrintWithTimestamp(
820               "[IncrementalMarking] Delaying GC via stack guard. time to task: "
821               "%fms "
822               "allowed overshoot: %fms\n",
823               time_to_marking_task, overshoot_ms);
824         }
825         incremental_marking_job_.ScheduleTask(
826             heap(), IncrementalMarkingJob::TaskType::kNormal);
827         return;
828       }
829     }
830     if (heap()->MonotonicallyIncreasingTimeInMs() < time_to_force_completion_) {
831       if (FLAG_trace_incremental_marking) {
832         heap()->isolate()->PrintWithTimestamp(
833             "[IncrementalMarking] Delaying GC via stack guard. time left: "
834             "%fms\n",
835             time_to_force_completion_ -
836                 heap_->MonotonicallyIncreasingTimeInMs());
837       }
838       return;
839     }
840   }
841 
842   SetState(COMPLETE);
843   // We will set the stack guard to request a GC now.  This will mean the rest
844   // of the GC gets performed as soon as possible (we can't do a GC here in a
845   // record-write context).  If a few things get allocated between now and then
846   // that shouldn't make us do a scavenge and keep being incremental.
847   if (FLAG_trace_incremental_marking) {
848     heap()->isolate()->PrintWithTimestamp(
849         "[IncrementalMarking] Complete (normal).\n");
850   }
851   request_type_ = COMPLETE_MARKING;
852   if (action == GC_VIA_STACK_GUARD) {
853     heap_->isolate()->stack_guard()->RequestGC();
854   }
855 }
856 
Epilogue()857 void IncrementalMarking::Epilogue() {
858   was_activated_ = false;
859   finalize_marking_completed_ = false;
860 }
861 
ShouldDoEmbedderStep()862 bool IncrementalMarking::ShouldDoEmbedderStep() {
863   return state_ == MARKING && FLAG_incremental_marking_wrappers &&
864          heap_->local_embedder_heap_tracer()->InUse();
865 }
866 
FastForwardSchedule()867 void IncrementalMarking::FastForwardSchedule() {
868   if (scheduled_bytes_to_mark_ < bytes_marked_) {
869     scheduled_bytes_to_mark_ = bytes_marked_;
870     if (FLAG_trace_incremental_marking) {
871       heap_->isolate()->PrintWithTimestamp(
872           "[IncrementalMarking] Fast-forwarded schedule\n");
873     }
874   }
875 }
876 
FastForwardScheduleIfCloseToFinalization()877 void IncrementalMarking::FastForwardScheduleIfCloseToFinalization() {
878   // Consider marking close to finalization if 75% of the initial old
879   // generation was marked.
880   if (bytes_marked_ > 3 * (initial_old_generation_size_ / 4)) {
881     FastForwardSchedule();
882   }
883 }
884 
ScheduleBytesToMarkBasedOnTime(double time_ms)885 void IncrementalMarking::ScheduleBytesToMarkBasedOnTime(double time_ms) {
886   // Time interval that should be sufficient to complete incremental marking.
887   constexpr double kTargetMarkingWallTimeInMs = 500;
888   constexpr double kMinTimeBetweenScheduleInMs = 10;
889   if (schedule_update_time_ms_ + kMinTimeBetweenScheduleInMs > time_ms) return;
890   double delta_ms =
891       Min(time_ms - schedule_update_time_ms_, kTargetMarkingWallTimeInMs);
892   schedule_update_time_ms_ = time_ms;
893 
894   size_t bytes_to_mark =
895       (delta_ms / kTargetMarkingWallTimeInMs) * initial_old_generation_size_;
896   AddScheduledBytesToMark(bytes_to_mark);
897 
898   if (FLAG_trace_incremental_marking) {
899     heap_->isolate()->PrintWithTimestamp(
900         "[IncrementalMarking] Scheduled %zuKB to mark based on time delta "
901         "%.1fms\n",
902         bytes_to_mark / KB, delta_ms);
903   }
904 }
905 
906 namespace {
CombineStepResults(StepResult a,StepResult b)907 StepResult CombineStepResults(StepResult a, StepResult b) {
908   DCHECK_NE(StepResult::kWaitingForFinalization, a);
909   DCHECK_NE(StepResult::kWaitingForFinalization, b);
910   if (a == StepResult::kMoreWorkRemaining ||
911       b == StepResult::kMoreWorkRemaining)
912     return StepResult::kMoreWorkRemaining;
913   return StepResult::kNoImmediateWork;
914 }
915 }  // anonymous namespace
916 
AdvanceWithDeadline(double deadline_in_ms,CompletionAction completion_action,StepOrigin step_origin)917 StepResult IncrementalMarking::AdvanceWithDeadline(
918     double deadline_in_ms, CompletionAction completion_action,
919     StepOrigin step_origin) {
920   HistogramTimerScope incremental_marking_scope(
921       heap_->isolate()->counters()->gc_incremental_marking());
922   TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
923   TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
924   DCHECK(!IsStopped());
925 
926   ScheduleBytesToMarkBasedOnTime(heap()->MonotonicallyIncreasingTimeInMs());
927   FastForwardScheduleIfCloseToFinalization();
928   return Step(kStepSizeInMs, completion_action, step_origin);
929 }
930 
FinalizeSweeping()931 void IncrementalMarking::FinalizeSweeping() {
932   DCHECK(state_ == SWEEPING);
933   if (collector_->sweeping_in_progress() &&
934       (!FLAG_concurrent_sweeping ||
935        !collector_->sweeper()->AreSweeperTasksRunning())) {
936     collector_->EnsureSweepingCompleted();
937   }
938   if (!collector_->sweeping_in_progress()) {
939 #ifdef DEBUG
940     heap_->VerifyCountersAfterSweeping();
941 #endif
942     StartMarking();
943   }
944 }
945 
StepSizeToKeepUpWithAllocations()946 size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
947   // Update bytes_allocated_ based on the allocation counter.
948   size_t current_counter = heap_->OldGenerationAllocationCounter();
949   size_t result = current_counter - old_generation_allocation_counter_;
950   old_generation_allocation_counter_ = current_counter;
951   return result;
952 }
953 
StepSizeToMakeProgress()954 size_t IncrementalMarking::StepSizeToMakeProgress() {
955   const size_t kTargetStepCount = 256;
956   const size_t kTargetStepCountAtOOM = 32;
957   const size_t kMaxStepSizeInByte = 256 * KB;
958   size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
959 
960   if (!heap()->CanExpandOldGeneration(oom_slack)) {
961     return heap()->OldGenerationSizeOfObjects() / kTargetStepCountAtOOM;
962   }
963 
964   return Min(Max(initial_old_generation_size_ / kTargetStepCount,
965                  IncrementalMarking::kMinStepSizeInBytes),
966              kMaxStepSizeInByte);
967 }
968 
AddScheduledBytesToMark(size_t bytes_to_mark)969 void IncrementalMarking::AddScheduledBytesToMark(size_t bytes_to_mark) {
970   if (scheduled_bytes_to_mark_ + bytes_to_mark < scheduled_bytes_to_mark_) {
971     // The overflow case.
972     scheduled_bytes_to_mark_ = std::numeric_limits<std::size_t>::max();
973   } else {
974     scheduled_bytes_to_mark_ += bytes_to_mark;
975   }
976 }
977 
ScheduleBytesToMarkBasedOnAllocation()978 void IncrementalMarking::ScheduleBytesToMarkBasedOnAllocation() {
979   size_t progress_bytes = StepSizeToMakeProgress();
980   size_t allocation_bytes = StepSizeToKeepUpWithAllocations();
981   size_t bytes_to_mark = progress_bytes + allocation_bytes;
982   AddScheduledBytesToMark(bytes_to_mark);
983 
984   if (FLAG_trace_incremental_marking) {
985     heap_->isolate()->PrintWithTimestamp(
986         "[IncrementalMarking] Scheduled %zuKB to mark based on allocation "
987         "(progress=%zuKB, allocation=%zuKB)\n",
988         bytes_to_mark / KB, progress_bytes / KB, allocation_bytes / KB);
989   }
990 }
991 
FetchBytesMarkedConcurrently()992 void IncrementalMarking::FetchBytesMarkedConcurrently() {
993   if (FLAG_concurrent_marking) {
994     size_t current_bytes_marked_concurrently =
995         heap()->concurrent_marking()->TotalMarkedBytes();
996     // The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
997     // short period of time when a concurrent marking task is finishing.
998     if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
999       bytes_marked_ +=
1000           current_bytes_marked_concurrently - bytes_marked_concurrently_;
1001       bytes_marked_concurrently_ = current_bytes_marked_concurrently;
1002     }
1003     if (FLAG_trace_incremental_marking) {
1004       heap_->isolate()->PrintWithTimestamp(
1005           "[IncrementalMarking] Marked %zuKB on background threads\n",
1006           heap_->concurrent_marking()->TotalMarkedBytes() / KB);
1007     }
1008   }
1009 }
1010 
ComputeStepSizeInBytes(StepOrigin step_origin)1011 size_t IncrementalMarking::ComputeStepSizeInBytes(StepOrigin step_origin) {
1012   FetchBytesMarkedConcurrently();
1013   if (FLAG_trace_incremental_marking) {
1014     if (scheduled_bytes_to_mark_ > bytes_marked_) {
1015       heap_->isolate()->PrintWithTimestamp(
1016           "[IncrementalMarking] Marker is %zuKB behind schedule\n",
1017           (scheduled_bytes_to_mark_ - bytes_marked_) / KB);
1018     } else {
1019       heap_->isolate()->PrintWithTimestamp(
1020           "[IncrementalMarking] Marker is %zuKB ahead of schedule\n",
1021           (bytes_marked_ - scheduled_bytes_to_mark_) / KB);
1022     }
1023   }
1024   // Allow steps on allocation to get behind the schedule by small ammount.
1025   // This gives higher priority to steps in tasks.
1026   size_t kScheduleMarginInBytes = step_origin == StepOrigin::kV8 ? 1 * MB : 0;
1027   if (bytes_marked_ + kScheduleMarginInBytes > scheduled_bytes_to_mark_)
1028     return 0;
1029   return scheduled_bytes_to_mark_ - bytes_marked_ - kScheduleMarginInBytes;
1030 }
1031 
AdvanceOnAllocation()1032 void IncrementalMarking::AdvanceOnAllocation() {
1033   // Code using an AlwaysAllocateScope assumes that the GC state does not
1034   // change; that implies that no marking steps must be performed.
1035   if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
1036       (state_ != SWEEPING && state_ != MARKING) || heap_->always_allocate()) {
1037     return;
1038   }
1039   HistogramTimerScope incremental_marking_scope(
1040       heap_->isolate()->counters()->gc_incremental_marking());
1041   TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
1042   TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
1043   ScheduleBytesToMarkBasedOnAllocation();
1044   Step(kMaxStepSizeInMs, GC_VIA_STACK_GUARD, StepOrigin::kV8);
1045 }
1046 
Step(double max_step_size_in_ms,CompletionAction action,StepOrigin step_origin)1047 StepResult IncrementalMarking::Step(double max_step_size_in_ms,
1048                                     CompletionAction action,
1049                                     StepOrigin step_origin) {
1050   double start = heap_->MonotonicallyIncreasingTimeInMs();
1051 
1052   if (state_ == SWEEPING) {
1053     TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
1054     FinalizeSweeping();
1055   }
1056 
1057   StepResult combined_result = StepResult::kMoreWorkRemaining;
1058   size_t bytes_to_process = 0;
1059   size_t v8_bytes_processed = 0;
1060   double embedder_duration = 0.0;
1061   double embedder_deadline = 0.0;
1062   if (state_ == MARKING) {
1063     if (FLAG_concurrent_marking) {
1064       heap_->new_space()->ResetOriginalTop();
1065       heap_->new_lo_space()->ResetPendingObject();
1066       // It is safe to merge back all objects that were on hold to the shared
1067       // work list at Step because we are at a safepoint where all objects
1068       // are properly initialized.
1069       marking_worklists()->MergeOnHold();
1070     }
1071 
1072 // Only print marking worklist in debug mode to save ~40KB of code size.
1073 #ifdef DEBUG
1074     if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
1075         FLAG_trace_gc_verbose) {
1076       collector_->marking_worklists_holder()->Print();
1077     }
1078 #endif
1079     if (FLAG_trace_incremental_marking) {
1080       heap_->isolate()->PrintWithTimestamp(
1081           "[IncrementalMarking] Marking speed %.fKB/ms\n",
1082           heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
1083     }
1084     // The first step after Scavenge will see many allocated bytes.
1085     // Cap the step size to distribute the marking work more uniformly.
1086     const double marking_speed =
1087         heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond();
1088     size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
1089         max_step_size_in_ms, marking_speed);
1090     bytes_to_process = Min(ComputeStepSizeInBytes(step_origin), max_step_size);
1091     bytes_to_process = Max(bytes_to_process, kMinStepSizeInBytes);
1092 
1093     // Perform a single V8 and a single embedder step. In case both have been
1094     // observed as empty back to back, we can finalize.
1095     //
1096     // This ignores that case where the embedder finds new V8-side objects. The
1097     // assumption is that large graphs are well connected and can mostly be
1098     // processed on their own. For small graphs, helping is not necessary.
1099     v8_bytes_processed = collector_->ProcessMarkingWorklist(bytes_to_process);
1100     StepResult v8_result = marking_worklists()->IsEmpty()
1101                                ? StepResult::kNoImmediateWork
1102                                : StepResult::kMoreWorkRemaining;
1103     StepResult embedder_result = StepResult::kNoImmediateWork;
1104     if (heap_->local_embedder_heap_tracer()->InUse()) {
1105       embedder_deadline =
1106           Min(max_step_size_in_ms,
1107               static_cast<double>(bytes_to_process) / marking_speed);
1108       embedder_result = EmbedderStep(embedder_deadline, &embedder_duration);
1109     }
1110     bytes_marked_ += v8_bytes_processed;
1111     combined_result = CombineStepResults(v8_result, embedder_result);
1112 
1113     if (combined_result == StepResult::kNoImmediateWork) {
1114       if (!finalize_marking_completed_) {
1115         FinalizeMarking(action);
1116         FastForwardSchedule();
1117         combined_result = StepResult::kWaitingForFinalization;
1118         incremental_marking_job()->Start(heap_);
1119       } else {
1120         MarkingComplete(action);
1121         combined_result = StepResult::kWaitingForFinalization;
1122       }
1123     }
1124     if (FLAG_concurrent_marking) {
1125       marking_worklists()->ShareWorkIfGlobalPoolIsEmpty();
1126       heap_->concurrent_marking()->RescheduleTasksIfNeeded();
1127     }
1128   }
1129   if (state_ == MARKING) {
1130     // Note that we do not report any marked by in case of finishing sweeping as
1131     // we did not process the marking worklist.
1132     const double v8_duration =
1133         heap_->MonotonicallyIncreasingTimeInMs() - start - embedder_duration;
1134     heap_->tracer()->AddIncrementalMarkingStep(v8_duration, v8_bytes_processed);
1135   }
1136   if (FLAG_trace_incremental_marking) {
1137     heap_->isolate()->PrintWithTimestamp(
1138         "[IncrementalMarking] Step %s V8: %zuKB (%zuKB), embedder: %fms (%fms) "
1139         "in %.1f\n",
1140         step_origin == StepOrigin::kV8 ? "in v8" : "in task",
1141         v8_bytes_processed / KB, bytes_to_process / KB, embedder_duration,
1142         embedder_deadline, heap_->MonotonicallyIncreasingTimeInMs() - start);
1143   }
1144   return combined_result;
1145 }
1146 
1147 }  // namespace internal
1148 }  // namespace v8
1149