1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/incremental-marking.h"
6 
7 #include "src/code-stubs.h"
8 #include "src/compilation-cache.h"
9 #include "src/conversions.h"
10 #include "src/heap/concurrent-marking.h"
11 #include "src/heap/gc-idle-time-handler.h"
12 #include "src/heap/gc-tracer.h"
13 #include "src/heap/heap-inl.h"
14 #include "src/heap/mark-compact-inl.h"
15 #include "src/heap/object-stats.h"
16 #include "src/heap/objects-visiting-inl.h"
17 #include "src/heap/objects-visiting.h"
18 #include "src/heap/sweeper.h"
19 #include "src/tracing/trace-event.h"
20 #include "src/v8.h"
21 #include "src/visitors.h"
22 #include "src/vm-state-inl.h"
23 
24 namespace v8 {
25 namespace internal {
26 
27 using IncrementalMarkingMarkingVisitor =
28     MarkingVisitor<FixedArrayVisitationMode::kIncremental,
29                    TraceRetainingPathMode::kDisabled,
30                    IncrementalMarking::MarkingState>;
31 
Step(int bytes_allocated,Address addr,size_t size)32 void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
33                                         size_t size) {
34   Heap* heap = incremental_marking_.heap();
35   VMState<GC> state(heap->isolate());
36   RuntimeCallTimerScope runtime_timer(
37       heap->isolate(),
38       RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
39   incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
40   if (incremental_marking_.black_allocation() && addr != kNullAddress) {
41     // AdvanceIncrementalMarkingOnAllocation can start black allocation.
42     // Ensure that the new object is marked black.
43     HeapObject* object = HeapObject::FromAddress(addr);
44     if (incremental_marking_.marking_state()->IsWhite(object) &&
45         !heap->InNewSpace(object)) {
46       if (heap->lo_space()->Contains(object)) {
47         incremental_marking_.marking_state()->WhiteToBlack(object);
48       } else {
49         Page::FromAddress(addr)->CreateBlackArea(addr, addr + size);
50       }
51     }
52   }
53 }
54 
IncrementalMarking(Heap * heap,MarkCompactCollector::MarkingWorklist * marking_worklist,WeakObjects * weak_objects)55 IncrementalMarking::IncrementalMarking(
56     Heap* heap, MarkCompactCollector::MarkingWorklist* marking_worklist,
57     WeakObjects* weak_objects)
58     : heap_(heap),
59       marking_worklist_(marking_worklist),
60       weak_objects_(weak_objects),
61       initial_old_generation_size_(0),
62       bytes_marked_ahead_of_schedule_(0),
63       bytes_marked_concurrently_(0),
64       unscanned_bytes_of_large_object_(0),
65       is_compacting_(false),
66       should_hurry_(false),
67       was_activated_(false),
68       black_allocation_(false),
69       finalize_marking_completed_(false),
70       trace_wrappers_toggle_(false),
71       request_type_(NONE),
72       new_generation_observer_(*this, kYoungGenerationAllocatedThreshold),
73       old_generation_observer_(*this, kOldGenerationAllocatedThreshold) {
74   DCHECK_NOT_NULL(marking_worklist_);
75   SetState(STOPPED);
76 }
77 
BaseRecordWrite(HeapObject * obj,Object * value)78 bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
79   HeapObject* value_heap_obj = HeapObject::cast(value);
80   DCHECK(!marking_state()->IsImpossible(value_heap_obj));
81   DCHECK(!marking_state()->IsImpossible(obj));
82 #ifdef V8_CONCURRENT_MARKING
83   // The write barrier stub generated with V8_CONCURRENT_MARKING does not
84   // check the color of the source object.
85   const bool need_recording = true;
86 #else
87   const bool need_recording = marking_state()->IsBlack(obj);
88 #endif
89 
90   if (need_recording && WhiteToGreyAndPush(value_heap_obj)) {
91     RestartIfNotMarking();
92   }
93   return is_compacting_ && need_recording;
94 }
95 
RecordWriteSlow(HeapObject * obj,HeapObjectReference ** slot,Object * value)96 void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
97                                          HeapObjectReference** slot,
98                                          Object* value) {
99   if (BaseRecordWrite(obj, value) && slot != nullptr) {
100     // Object is not going to be rescanned we need to record the slot.
101     heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
102   }
103 }
104 
RecordWriteFromCode(HeapObject * obj,Object ** slot,Isolate * isolate)105 int IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
106                                             Isolate* isolate) {
107   DCHECK(obj->IsHeapObject());
108   isolate->heap()->incremental_marking()->RecordWrite(obj, slot, *slot);
109   // Called by RecordWriteCodeStubAssembler, which doesnt accept void type
110   return 0;
111 }
112 
RecordWriteIntoCodeSlow(Code * host,RelocInfo * rinfo,Object * value)113 void IncrementalMarking::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
114                                                  Object* value) {
115   if (BaseRecordWrite(host, value)) {
116     // Object is not going to be rescanned.  We need to record the slot.
117     heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, value);
118   }
119 }
120 
WhiteToGreyAndPush(HeapObject * obj)121 bool IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj) {
122   if (marking_state()->WhiteToGrey(obj)) {
123     marking_worklist()->Push(obj);
124     return true;
125   }
126   return false;
127 }
128 
MarkBlackAndPush(HeapObject * obj)129 void IncrementalMarking::MarkBlackAndPush(HeapObject* obj) {
130   // Color the object black and push it into the bailout deque.
131   marking_state()->WhiteToGrey(obj);
132   if (marking_state()->GreyToBlack(obj)) {
133     if (FLAG_concurrent_marking) {
134       marking_worklist()->PushBailout(obj);
135     } else {
136       marking_worklist()->Push(obj);
137     }
138   }
139 }
140 
NotifyLeftTrimming(HeapObject * from,HeapObject * to)141 void IncrementalMarking::NotifyLeftTrimming(HeapObject* from, HeapObject* to) {
142   DCHECK(IsMarking());
143   DCHECK(MemoryChunk::FromAddress(from->address())->SweepingDone());
144   DCHECK_EQ(MemoryChunk::FromAddress(from->address()),
145             MemoryChunk::FromAddress(to->address()));
146   DCHECK_NE(from, to);
147 
148   MarkBit old_mark_bit = marking_state()->MarkBitFrom(from);
149   MarkBit new_mark_bit = marking_state()->MarkBitFrom(to);
150 
151   if (black_allocation() && Marking::IsBlack<kAtomicity>(new_mark_bit)) {
152     // Nothing to do if the object is in black area.
153     return;
154   }
155 
156   bool marked_black_due_to_left_trimming = false;
157   if (FLAG_concurrent_marking) {
158     // We need to mark the array black before overwriting its map and length
159     // so that the concurrent marker does not observe inconsistent state.
160     Marking::WhiteToGrey<kAtomicity>(old_mark_bit);
161     if (Marking::GreyToBlack<kAtomicity>(old_mark_bit)) {
162       // The concurrent marker will not mark the array. We need to push the
163       // new array start in marking deque to ensure that it will be marked.
164       marked_black_due_to_left_trimming = true;
165     }
166     DCHECK(Marking::IsBlack<kAtomicity>(old_mark_bit));
167   }
168 
169   if (Marking::IsBlack<kAtomicity>(old_mark_bit) &&
170       !marked_black_due_to_left_trimming) {
171     // The array was black before left trimming or was marked black by the
172     // concurrent marker. Simply transfer the color.
173     if (from->address() + kPointerSize == to->address()) {
174       // The old and the new markbits overlap. The |to| object has the
175       // grey color. To make it black, we need to set the second bit.
176       DCHECK(new_mark_bit.Get<kAtomicity>());
177       new_mark_bit.Next().Set<kAtomicity>();
178     } else {
179       bool success = Marking::WhiteToBlack<kAtomicity>(new_mark_bit);
180       DCHECK(success);
181       USE(success);
182     }
183   } else if (Marking::IsGrey<kAtomicity>(old_mark_bit) ||
184              marked_black_due_to_left_trimming) {
185     // The array was already grey or was marked black by this function.
186     // Mark the new array grey and push it to marking deque.
187     if (from->address() + kPointerSize == to->address()) {
188       // The old and the new markbits overlap. The |to| object is either white
189       // or grey.  Set the first bit to make sure that it is grey.
190       new_mark_bit.Set<kAtomicity>();
191       DCHECK(!new_mark_bit.Next().Get<kAtomicity>());
192     } else {
193       bool success = Marking::WhiteToGrey<kAtomicity>(new_mark_bit);
194       DCHECK(success);
195       USE(success);
196     }
197     marking_worklist()->Push(to);
198     RestartIfNotMarking();
199   }
200 }
201 
202 class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
203  public:
IncrementalMarkingRootMarkingVisitor(IncrementalMarking * incremental_marking)204   explicit IncrementalMarkingRootMarkingVisitor(
205       IncrementalMarking* incremental_marking)
206       : heap_(incremental_marking->heap()) {}
207 
VisitRootPointer(Root root,const char * description,Object ** p)208   void VisitRootPointer(Root root, const char* description,
209                         Object** p) override {
210     MarkObjectByPointer(p);
211   }
212 
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)213   void VisitRootPointers(Root root, const char* description, Object** start,
214                          Object** end) override {
215     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
216   }
217 
218  private:
MarkObjectByPointer(Object ** p)219   void MarkObjectByPointer(Object** p) {
220     Object* obj = *p;
221     if (!obj->IsHeapObject()) return;
222 
223     heap_->incremental_marking()->WhiteToGreyAndPush(HeapObject::cast(obj));
224   }
225 
226   Heap* heap_;
227 };
228 
SetOldSpacePageFlags(MemoryChunk * chunk,bool is_marking)229 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
230                                               bool is_marking) {
231   if (is_marking) {
232     chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
233     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
234   } else {
235     chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
236     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
237   }
238 }
239 
240 
SetNewSpacePageFlags(MemoryChunk * chunk,bool is_marking)241 void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
242                                               bool is_marking) {
243   chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
244   if (is_marking) {
245     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
246   } else {
247     chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
248   }
249 }
250 
251 
DeactivateIncrementalWriteBarrierForSpace(PagedSpace * space)252 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
253     PagedSpace* space) {
254   for (Page* p : *space) {
255     SetOldSpacePageFlags(p, false);
256   }
257 }
258 
259 
DeactivateIncrementalWriteBarrierForSpace(NewSpace * space)260 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
261     NewSpace* space) {
262   for (Page* p : *space) {
263     SetNewSpacePageFlags(p, false);
264   }
265 }
266 
267 
DeactivateIncrementalWriteBarrier()268 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
269   DeactivateIncrementalWriteBarrierForSpace(heap_->old_space());
270   DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
271   DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
272   DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
273 
274   for (LargePage* lop : *heap_->lo_space()) {
275     SetOldSpacePageFlags(lop, false);
276   }
277 }
278 
279 
ActivateIncrementalWriteBarrier(PagedSpace * space)280 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
281   for (Page* p : *space) {
282     SetOldSpacePageFlags(p, true);
283   }
284 }
285 
286 
ActivateIncrementalWriteBarrier(NewSpace * space)287 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
288   for (Page* p : *space) {
289     SetNewSpacePageFlags(p, true);
290   }
291 }
292 
293 
ActivateIncrementalWriteBarrier()294 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
295   ActivateIncrementalWriteBarrier(heap_->old_space());
296   ActivateIncrementalWriteBarrier(heap_->map_space());
297   ActivateIncrementalWriteBarrier(heap_->code_space());
298   ActivateIncrementalWriteBarrier(heap_->new_space());
299 
300   for (LargePage* lop : *heap_->lo_space()) {
301     SetOldSpacePageFlags(lop, true);
302   }
303 }
304 
305 
WasActivated()306 bool IncrementalMarking::WasActivated() { return was_activated_; }
307 
308 
CanBeActivated()309 bool IncrementalMarking::CanBeActivated() {
310   // Only start incremental marking in a safe state: 1) when incremental
311   // marking is turned on, 2) when we are currently not in a GC, and
312   // 3) when we are currently not serializing or deserializing the heap.
313   return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
314          heap_->deserialization_complete() &&
315          !heap_->isolate()->serializer_enabled();
316 }
317 
318 
Deactivate()319 void IncrementalMarking::Deactivate() {
320   DeactivateIncrementalWriteBarrier();
321 }
322 
Start(GarbageCollectionReason gc_reason)323 void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
324   if (FLAG_trace_incremental_marking) {
325     int old_generation_size_mb =
326         static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
327     int old_generation_limit_mb =
328         static_cast<int>(heap()->old_generation_allocation_limit() / MB);
329     heap()->isolate()->PrintWithTimestamp(
330         "[IncrementalMarking] Start (%s): old generation %dMB, limit %dMB, "
331         "slack %dMB\n",
332         Heap::GarbageCollectionReasonToString(gc_reason),
333         old_generation_size_mb, old_generation_limit_mb,
334         Max(0, old_generation_limit_mb - old_generation_size_mb));
335   }
336   DCHECK(FLAG_incremental_marking);
337   DCHECK(state_ == STOPPED);
338   DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
339   DCHECK(!heap_->isolate()->serializer_enabled());
340 
341   Counters* counters = heap_->isolate()->counters();
342 
343   counters->incremental_marking_reason()->AddSample(
344       static_cast<int>(gc_reason));
345   HistogramTimerScope incremental_marking_scope(
346       counters->gc_incremental_marking_start());
347   TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
348   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_START);
349   heap_->tracer()->NotifyIncrementalMarkingStart();
350 
351   start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
352   initial_old_generation_size_ = heap_->OldGenerationSizeOfObjects();
353   old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
354   bytes_allocated_ = 0;
355   bytes_marked_ahead_of_schedule_ = 0;
356   bytes_marked_concurrently_ = 0;
357   should_hurry_ = false;
358   was_activated_ = true;
359 
360   if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
361     StartMarking();
362   } else {
363     if (FLAG_trace_incremental_marking) {
364       heap()->isolate()->PrintWithTimestamp(
365           "[IncrementalMarking] Start sweeping.\n");
366     }
367     SetState(SWEEPING);
368   }
369 
370   heap_->AddAllocationObserversToAllSpaces(&old_generation_observer_,
371                                            &new_generation_observer_);
372   incremental_marking_job()->Start(heap_);
373 }
374 
375 
StartMarking()376 void IncrementalMarking::StartMarking() {
377   if (heap_->isolate()->serializer_enabled()) {
378     // Black allocation currently starts when we start incremental marking,
379     // but we cannot enable black allocation while deserializing. Hence, we
380     // have to delay the start of incremental marking in that case.
381     if (FLAG_trace_incremental_marking) {
382       heap()->isolate()->PrintWithTimestamp(
383           "[IncrementalMarking] Start delayed - serializer\n");
384     }
385     return;
386   }
387   if (FLAG_trace_incremental_marking) {
388     heap()->isolate()->PrintWithTimestamp(
389         "[IncrementalMarking] Start marking\n");
390   }
391 
392   is_compacting_ =
393       !FLAG_never_compact && heap_->mark_compact_collector()->StartCompaction();
394 
395   SetState(MARKING);
396 
397   {
398     TRACE_GC(heap()->tracer(),
399              GCTracer::Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE);
400     heap_->local_embedder_heap_tracer()->TracePrologue();
401   }
402 
403   ActivateIncrementalWriteBarrier();
404 
405 // Marking bits are cleared by the sweeper.
406 #ifdef VERIFY_HEAP
407   if (FLAG_verify_heap) {
408     heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
409   }
410 #endif
411 
412   heap_->isolate()->compilation_cache()->MarkCompactPrologue();
413 
414 #ifdef V8_CONCURRENT_MARKING
415   // The write-barrier does not check the color of the source object.
416   // Start black allocation earlier to ensure faster marking progress.
417   if (!black_allocation_) {
418     StartBlackAllocation();
419   }
420 #endif
421 
422   // Mark strong roots grey.
423   IncrementalMarkingRootMarkingVisitor visitor(this);
424   heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
425 
426   if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
427     heap_->concurrent_marking()->ScheduleTasks();
428   }
429 
430   // Ready to start incremental marking.
431   if (FLAG_trace_incremental_marking) {
432     heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
433   }
434 }
435 
StartBlackAllocation()436 void IncrementalMarking::StartBlackAllocation() {
437   DCHECK(FLAG_black_allocation);
438   DCHECK(!black_allocation_);
439   DCHECK(IsMarking());
440   black_allocation_ = true;
441   heap()->old_space()->MarkLinearAllocationAreaBlack();
442   heap()->map_space()->MarkLinearAllocationAreaBlack();
443   heap()->code_space()->MarkLinearAllocationAreaBlack();
444   if (FLAG_trace_incremental_marking) {
445     heap()->isolate()->PrintWithTimestamp(
446         "[IncrementalMarking] Black allocation started\n");
447   }
448 }
449 
PauseBlackAllocation()450 void IncrementalMarking::PauseBlackAllocation() {
451   DCHECK(FLAG_black_allocation);
452   DCHECK(IsMarking());
453   heap()->old_space()->UnmarkLinearAllocationArea();
454   heap()->map_space()->UnmarkLinearAllocationArea();
455   heap()->code_space()->UnmarkLinearAllocationArea();
456   if (FLAG_trace_incremental_marking) {
457     heap()->isolate()->PrintWithTimestamp(
458         "[IncrementalMarking] Black allocation paused\n");
459   }
460   black_allocation_ = false;
461 }
462 
FinishBlackAllocation()463 void IncrementalMarking::FinishBlackAllocation() {
464   if (black_allocation_) {
465     black_allocation_ = false;
466     if (FLAG_trace_incremental_marking) {
467       heap()->isolate()->PrintWithTimestamp(
468           "[IncrementalMarking] Black allocation finished\n");
469     }
470   }
471 }
472 
AbortBlackAllocation()473 void IncrementalMarking::AbortBlackAllocation() {
474   if (FLAG_trace_incremental_marking) {
475     heap()->isolate()->PrintWithTimestamp(
476         "[IncrementalMarking] Black allocation aborted\n");
477   }
478 }
479 
MarkRoots()480 void IncrementalMarking::MarkRoots() {
481   DCHECK(!finalize_marking_completed_);
482   DCHECK(IsMarking());
483 
484   IncrementalMarkingRootMarkingVisitor visitor(this);
485   heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
486 }
487 
ShouldRetainMap(Map * map,int age)488 bool ShouldRetainMap(Map* map, int age) {
489   if (age == 0) {
490     // The map has aged. Do not retain this map.
491     return false;
492   }
493   Object* constructor = map->GetConstructor();
494   Heap* heap = map->GetHeap();
495   if (!constructor->IsHeapObject() ||
496       heap->incremental_marking()->marking_state()->IsWhite(
497           HeapObject::cast(constructor))) {
498     // The constructor is dead, no new objects with this map can
499     // be created. Do not retain this map.
500     return false;
501   }
502   return true;
503 }
504 
505 
RetainMaps()506 void IncrementalMarking::RetainMaps() {
507   // Do not retain dead maps if flag disables it or there is
508   // - memory pressure (reduce_memory_footprint_),
509   // - GC is requested by tests or dev-tools (abort_incremental_marking_).
510   bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
511                                    heap()->ShouldAbortIncrementalMarking() ||
512                                    FLAG_retain_maps_for_n_gc == 0;
513   WeakArrayList* retained_maps = heap()->retained_maps();
514   int length = retained_maps->length();
515   // The number_of_disposed_maps separates maps in the retained_maps
516   // array that were created before and after context disposal.
517   // We do not age and retain disposed maps to avoid memory leaks.
518   int number_of_disposed_maps = heap()->number_of_disposed_maps_;
519   for (int i = 0; i < length; i += 2) {
520     MaybeObject* value = retained_maps->Get(i);
521     HeapObject* map_heap_object;
522     if (!value->ToWeakHeapObject(&map_heap_object)) {
523       continue;
524     }
525     int age = Smi::ToInt(retained_maps->Get(i + 1)->ToSmi());
526     int new_age;
527     Map* map = Map::cast(map_heap_object);
528     if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
529         marking_state()->IsWhite(map)) {
530       if (ShouldRetainMap(map, age)) {
531         WhiteToGreyAndPush(map);
532       }
533       Object* prototype = map->prototype();
534       if (age > 0 && prototype->IsHeapObject() &&
535           marking_state()->IsWhite(HeapObject::cast(prototype))) {
536         // The prototype is not marked, age the map.
537         new_age = age - 1;
538       } else {
539         // The prototype and the constructor are marked, this map keeps only
540         // transition tree alive, not JSObjects. Do not age the map.
541         new_age = age;
542       }
543     } else {
544       new_age = FLAG_retain_maps_for_n_gc;
545     }
546     // Compact the array and update the age.
547     if (new_age != age) {
548       retained_maps->Set(i + 1, MaybeObject::FromSmi(Smi::FromInt(new_age)));
549     }
550   }
551 }
552 
FinalizeIncrementally()553 void IncrementalMarking::FinalizeIncrementally() {
554   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE_BODY);
555   DCHECK(!finalize_marking_completed_);
556   DCHECK(IsMarking());
557 
558   double start = heap_->MonotonicallyIncreasingTimeInMs();
559 
560   // After finishing incremental marking, we try to discover all unmarked
561   // objects to reduce the marking load in the final pause.
562   // 1) We scan and mark the roots again to find all changes to the root set.
563   // 2) Age and retain maps embedded in optimized code.
564   MarkRoots();
565 
566   // Map retaining is needed for perfromance, not correctness,
567   // so we can do it only once at the beginning of the finalization.
568   RetainMaps();
569 
570   finalize_marking_completed_ = true;
571 
572   if (FLAG_black_allocation && !heap()->ShouldReduceMemory() &&
573       !black_allocation_) {
574     // TODO(hpayer): Move to an earlier point as soon as we make faster marking
575     // progress.
576     StartBlackAllocation();
577   }
578 
579   if (FLAG_trace_incremental_marking) {
580     double end = heap_->MonotonicallyIncreasingTimeInMs();
581     double delta = end - start;
582     heap()->isolate()->PrintWithTimestamp(
583         "[IncrementalMarking] Finalize incrementally spent %.1f ms.\n", delta);
584   }
585 }
586 
UpdateMarkingWorklistAfterScavenge()587 void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
588   if (!IsMarking()) return;
589 
590   Map* filler_map = heap_->one_pointer_filler_map();
591 
592 #ifdef ENABLE_MINOR_MC
593   MinorMarkCompactCollector::MarkingState* minor_marking_state =
594       heap()->minor_mark_compact_collector()->marking_state();
595 #else
596   void* minor_marking_state = nullptr;
597 #endif  // ENABLE_MINOR_MC
598 
599   marking_worklist()->Update([this, filler_map, minor_marking_state](
600                                  HeapObject* obj, HeapObject** out) -> bool {
601     DCHECK(obj->IsHeapObject());
602     // Only pointers to from space have to be updated.
603     if (heap_->InFromSpace(obj)) {
604       MapWord map_word = obj->map_word();
605       if (!map_word.IsForwardingAddress()) {
606         // There may be objects on the marking deque that do not exist anymore,
607         // e.g. left trimmed objects or objects from the root set (frames).
608         // If these object are dead at scavenging time, their marking deque
609         // entries will not point to forwarding addresses. Hence, we can discard
610         // them.
611         return false;
612       }
613       HeapObject* dest = map_word.ToForwardingAddress();
614       DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj->IsFiller());
615       *out = dest;
616       return true;
617     } else if (heap_->InToSpace(obj)) {
618       // The object may be on a page that was moved in new space.
619       DCHECK(
620           Page::FromAddress(obj->address())->IsFlagSet(Page::SWEEP_TO_ITERATE));
621 #ifdef ENABLE_MINOR_MC
622       if (minor_marking_state->IsGrey(obj)) {
623         *out = obj;
624         return true;
625       }
626 #endif  // ENABLE_MINOR_MC
627       return false;
628     } else {
629       // The object may be on a page that was moved from new to old space. Only
630       // applicable during minor MC garbage collections.
631       if (Page::FromAddress(obj->address())
632               ->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
633 #ifdef ENABLE_MINOR_MC
634         if (minor_marking_state->IsGrey(obj)) {
635           *out = obj;
636           return true;
637         }
638 #endif  // ENABLE_MINOR_MC
639         return false;
640       }
641       DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj->IsFiller());
642       // Skip one word filler objects that appear on the
643       // stack when we perform in place array shift.
644       if (obj->map() != filler_map) {
645         *out = obj;
646         return true;
647       }
648       return false;
649     }
650   });
651 
652   UpdateWeakReferencesAfterScavenge();
653 }
654 
UpdateWeakReferencesAfterScavenge()655 void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
656   weak_objects_->weak_references.Update(
657       [](std::pair<HeapObject*, HeapObjectReference**> slot_in,
658          std::pair<HeapObject*, HeapObjectReference**>* slot_out) -> bool {
659         HeapObject* heap_obj = slot_in.first;
660         MapWord map_word = heap_obj->map_word();
661         if (map_word.IsForwardingAddress()) {
662           ptrdiff_t distance_to_slot =
663               reinterpret_cast<Address>(slot_in.second) -
664               reinterpret_cast<Address>(slot_in.first);
665           Address new_slot =
666               reinterpret_cast<Address>(map_word.ToForwardingAddress()) +
667               distance_to_slot;
668           slot_out->first = map_word.ToForwardingAddress();
669           slot_out->second = reinterpret_cast<HeapObjectReference**>(new_slot);
670           return true;
671         }
672         if (heap_obj->GetHeap()->InNewSpace(heap_obj)) {
673           // The new space object containing the weak reference died.
674           return false;
675         }
676         *slot_out = slot_in;
677         return true;
678       });
679   weak_objects_->weak_objects_in_code.Update(
680       [](std::pair<HeapObject*, Code*> slot_in,
681          std::pair<HeapObject*, Code*>* slot_out) -> bool {
682         HeapObject* heap_obj = slot_in.first;
683         MapWord map_word = heap_obj->map_word();
684         if (map_word.IsForwardingAddress()) {
685           slot_out->first = map_word.ToForwardingAddress();
686           slot_out->second = slot_in.second;
687         } else {
688           *slot_out = slot_in;
689         }
690         return true;
691       });
692 }
693 
UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space)694 void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
695     size_t dead_bytes_in_new_space) {
696   if (!IsMarking()) return;
697   bytes_marked_ahead_of_schedule_ -=
698       Min(bytes_marked_ahead_of_schedule_, dead_bytes_in_new_space);
699 }
700 
IsFixedArrayWithProgressBar(HeapObject * obj)701 bool IncrementalMarking::IsFixedArrayWithProgressBar(HeapObject* obj) {
702   if (!obj->IsFixedArray()) return false;
703   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
704   return chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR);
705 }
706 
VisitObject(Map * map,HeapObject * obj)707 int IncrementalMarking::VisitObject(Map* map, HeapObject* obj) {
708   DCHECK(marking_state()->IsGrey(obj) || marking_state()->IsBlack(obj));
709   if (!marking_state()->GreyToBlack(obj)) {
710     // The object can already be black in these cases:
711     // 1. The object is a fixed array with the progress bar.
712     // 2. The object is a JSObject that was colored black before
713     //    unsafe layout change.
714     // 3. The object is a string that was colored black before
715     //    unsafe layout change.
716     // 4. The object is materizalized by the deoptimizer.
717     DCHECK(obj->IsHashTable() || obj->IsPropertyArray() ||
718            obj->IsFixedArray() || obj->IsJSObject() || obj->IsString());
719   }
720   DCHECK(marking_state()->IsBlack(obj));
721   WhiteToGreyAndPush(map);
722   IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector(),
723                                            marking_state());
724   return visitor.Visit(map, obj);
725 }
726 
ProcessBlackAllocatedObject(HeapObject * obj)727 void IncrementalMarking::ProcessBlackAllocatedObject(HeapObject* obj) {
728   if (IsMarking() && marking_state()->IsBlack(obj)) {
729     RevisitObject(obj);
730   }
731 }
732 
RevisitObject(HeapObject * obj)733 void IncrementalMarking::RevisitObject(HeapObject* obj) {
734   DCHECK(IsMarking());
735   DCHECK(FLAG_concurrent_marking || marking_state()->IsBlack(obj));
736   Page* page = Page::FromAddress(obj->address());
737   if (page->owner()->identity() == LO_SPACE) {
738     page->ResetProgressBar();
739   }
740   Map* map = obj->map();
741   WhiteToGreyAndPush(map);
742   IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector(),
743                                            marking_state());
744   visitor.Visit(map, obj);
745 }
746 
747 template <WorklistToProcess worklist_to_process>
ProcessMarkingWorklist(intptr_t bytes_to_process,ForceCompletionAction completion)748 intptr_t IncrementalMarking::ProcessMarkingWorklist(
749     intptr_t bytes_to_process, ForceCompletionAction completion) {
750   intptr_t bytes_processed = 0;
751   while (bytes_processed < bytes_to_process || completion == FORCE_COMPLETION) {
752     HeapObject* obj;
753     if (worklist_to_process == WorklistToProcess::kBailout) {
754       obj = marking_worklist()->PopBailout();
755     } else {
756       obj = marking_worklist()->Pop();
757     }
758     if (obj == nullptr) break;
759     // Left trimming may result in white, grey, or black filler objects on the
760     // marking deque. Ignore these objects.
761     if (obj->IsFiller()) {
762       DCHECK(!marking_state()->IsImpossible(obj));
763       continue;
764     }
765     unscanned_bytes_of_large_object_ = 0;
766     int size = VisitObject(obj->map(), obj);
767     bytes_processed += size - unscanned_bytes_of_large_object_;
768   }
769   // Report all found wrappers to the embedder. This is necessary as the
770   // embedder could potentially invalidate wrappers as soon as V8 is done
771   // with its incremental marking processing. Any cached wrappers could
772   // result in broken pointers at this point.
773   heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
774   return bytes_processed;
775 }
776 
777 
Hurry()778 void IncrementalMarking::Hurry() {
779   // A scavenge may have pushed new objects on the marking deque (due to black
780   // allocation) even in COMPLETE state. This may happen if scavenges are
781   // forced e.g. in tests. It should not happen when COMPLETE was set when
782   // incremental marking finished and a regular GC was triggered after that
783   // because should_hurry_ will force a full GC.
784   if (!marking_worklist()->IsEmpty()) {
785     double start = 0.0;
786     if (FLAG_trace_incremental_marking) {
787       start = heap_->MonotonicallyIncreasingTimeInMs();
788       if (FLAG_trace_incremental_marking) {
789         heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Hurry\n");
790       }
791     }
792     // TODO(gc) hurry can mark objects it encounters black as mutator
793     // was stopped.
794     ProcessMarkingWorklist(0, FORCE_COMPLETION);
795     SetState(COMPLETE);
796     if (FLAG_trace_incremental_marking) {
797       double end = heap_->MonotonicallyIncreasingTimeInMs();
798       double delta = end - start;
799       if (FLAG_trace_incremental_marking) {
800         heap()->isolate()->PrintWithTimestamp(
801             "[IncrementalMarking] Complete (hurry), spent %d ms.\n",
802             static_cast<int>(delta));
803       }
804     }
805   }
806 }
807 
808 
Stop()809 void IncrementalMarking::Stop() {
810   if (IsStopped()) return;
811   if (FLAG_trace_incremental_marking) {
812     int old_generation_size_mb =
813         static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
814     int old_generation_limit_mb =
815         static_cast<int>(heap()->old_generation_allocation_limit() / MB);
816     heap()->isolate()->PrintWithTimestamp(
817         "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, "
818         "overshoot %dMB\n",
819         old_generation_size_mb, old_generation_limit_mb,
820         Max(0, old_generation_size_mb - old_generation_limit_mb));
821   }
822 
823   SpaceIterator it(heap_);
824   while (it.has_next()) {
825     Space* space = it.next();
826     if (space == heap_->new_space()) {
827       space->RemoveAllocationObserver(&new_generation_observer_);
828     } else {
829       space->RemoveAllocationObserver(&old_generation_observer_);
830     }
831   }
832 
833   IncrementalMarking::set_should_hurry(false);
834   heap_->isolate()->stack_guard()->ClearGC();
835   SetState(STOPPED);
836   is_compacting_ = false;
837   FinishBlackAllocation();
838 }
839 
840 
Finalize()841 void IncrementalMarking::Finalize() {
842   Hurry();
843   Stop();
844 }
845 
846 
FinalizeMarking(CompletionAction action)847 void IncrementalMarking::FinalizeMarking(CompletionAction action) {
848   DCHECK(!finalize_marking_completed_);
849   if (FLAG_trace_incremental_marking) {
850     heap()->isolate()->PrintWithTimestamp(
851         "[IncrementalMarking] requesting finalization of incremental "
852         "marking.\n");
853   }
854   request_type_ = FINALIZATION;
855   if (action == GC_VIA_STACK_GUARD) {
856     heap_->isolate()->stack_guard()->RequestGC();
857   }
858 }
859 
860 
MarkingComplete(CompletionAction action)861 void IncrementalMarking::MarkingComplete(CompletionAction action) {
862   SetState(COMPLETE);
863   // We will set the stack guard to request a GC now.  This will mean the rest
864   // of the GC gets performed as soon as possible (we can't do a GC here in a
865   // record-write context).  If a few things get allocated between now and then
866   // that shouldn't make us do a scavenge and keep being incremental, so we set
867   // the should-hurry flag to indicate that there can't be much work left to do.
868   set_should_hurry(true);
869   if (FLAG_trace_incremental_marking) {
870     heap()->isolate()->PrintWithTimestamp(
871         "[IncrementalMarking] Complete (normal).\n");
872   }
873   request_type_ = COMPLETE_MARKING;
874   if (action == GC_VIA_STACK_GUARD) {
875     heap_->isolate()->stack_guard()->RequestGC();
876   }
877 }
878 
879 
Epilogue()880 void IncrementalMarking::Epilogue() {
881   was_activated_ = false;
882   finalize_marking_completed_ = false;
883 }
884 
AdvanceIncrementalMarking(double deadline_in_ms,CompletionAction completion_action,StepOrigin step_origin)885 double IncrementalMarking::AdvanceIncrementalMarking(
886     double deadline_in_ms, CompletionAction completion_action,
887     StepOrigin step_origin) {
888   HistogramTimerScope incremental_marking_scope(
889       heap_->isolate()->counters()->gc_incremental_marking());
890   TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
891   TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
892   DCHECK(!IsStopped());
893   DCHECK_EQ(
894       0, heap_->local_embedder_heap_tracer()->NumberOfCachedWrappersToTrace());
895 
896   double remaining_time_in_ms = 0.0;
897   intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
898       kStepSizeInMs,
899       heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
900 
901   const bool incremental_wrapper_tracing =
902       state_ == MARKING && FLAG_incremental_marking_wrappers &&
903       heap_->local_embedder_heap_tracer()->InUse();
904   do {
905     if (incremental_wrapper_tracing && trace_wrappers_toggle_) {
906       TRACE_GC(heap()->tracer(),
907                GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
908       const double wrapper_deadline =
909           heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
910       if (!heap_->local_embedder_heap_tracer()
911                ->ShouldFinalizeIncrementalMarking()) {
912         heap_->local_embedder_heap_tracer()->Trace(
913             wrapper_deadline, EmbedderHeapTracer::AdvanceTracingActions(
914                                   EmbedderHeapTracer::ForceCompletionAction::
915                                       DO_NOT_FORCE_COMPLETION));
916       }
917     } else {
918       Step(step_size_in_bytes, completion_action, step_origin);
919     }
920     trace_wrappers_toggle_ = !trace_wrappers_toggle_;
921     remaining_time_in_ms =
922         deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
923   } while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() &&
924            !marking_worklist()->IsEmpty());
925   return remaining_time_in_ms;
926 }
927 
928 
FinalizeSweeping()929 void IncrementalMarking::FinalizeSweeping() {
930   DCHECK(state_ == SWEEPING);
931   if (heap_->mark_compact_collector()->sweeping_in_progress() &&
932       (!FLAG_concurrent_sweeping ||
933        !heap_->mark_compact_collector()->sweeper()->AreSweeperTasksRunning())) {
934     heap_->mark_compact_collector()->EnsureSweepingCompleted();
935   }
936   if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
937 #ifdef DEBUG
938     heap_->VerifyCountersAfterSweeping();
939 #endif
940     StartMarking();
941   }
942 }
943 
StepSizeToKeepUpWithAllocations()944 size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
945   // Update bytes_allocated_ based on the allocation counter.
946   size_t current_counter = heap_->OldGenerationAllocationCounter();
947   bytes_allocated_ += current_counter - old_generation_allocation_counter_;
948   old_generation_allocation_counter_ = current_counter;
949   return bytes_allocated_;
950 }
951 
StepSizeToMakeProgress()952 size_t IncrementalMarking::StepSizeToMakeProgress() {
953   // We increase step size gradually based on the time passed in order to
954   // leave marking work to standalone tasks. The ramp up duration and the
955   // target step count are chosen based on benchmarks.
956   const int kRampUpIntervalMs = 300;
957   const size_t kTargetStepCount = 256;
958   const size_t kTargetStepCountAtOOM = 32;
959   size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
960 
961   if (!heap()->CanExpandOldGeneration(oom_slack)) {
962     return heap()->OldGenerationSizeOfObjects() / kTargetStepCountAtOOM;
963   }
964 
965   size_t step_size = Max(initial_old_generation_size_ / kTargetStepCount,
966                          IncrementalMarking::kMinStepSizeInBytes);
967   double time_passed_ms =
968       heap_->MonotonicallyIncreasingTimeInMs() - start_time_ms_;
969   double factor = Min(time_passed_ms / kRampUpIntervalMs, 1.0);
970   return static_cast<size_t>(factor * step_size);
971 }
972 
AdvanceIncrementalMarkingOnAllocation()973 void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
974   // Code using an AlwaysAllocateScope assumes that the GC state does not
975   // change; that implies that no marking steps must be performed.
976   if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
977       (state_ != SWEEPING && state_ != MARKING) || heap_->always_allocate()) {
978     return;
979   }
980 
981   size_t bytes_to_process =
982       StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
983 
984   if (bytes_to_process >= IncrementalMarking::kMinStepSizeInBytes) {
985     HistogramTimerScope incremental_marking_scope(
986         heap_->isolate()->counters()->gc_incremental_marking());
987     TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
988     TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
989     // The first step after Scavenge will see many allocated bytes.
990     // Cap the step size to distribute the marking work more uniformly.
991     size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
992         kMaxStepSizeInMs,
993         heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
994     bytes_to_process = Min(bytes_to_process, max_step_size);
995     size_t bytes_processed = 0;
996     if (FLAG_concurrent_marking) {
997       bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
998                              StepOrigin::kV8, WorklistToProcess::kBailout);
999       bytes_to_process = (bytes_processed >= bytes_to_process)
1000                              ? 0
1001                              : bytes_to_process - bytes_processed;
1002       size_t current_bytes_marked_concurrently =
1003           heap()->concurrent_marking()->TotalMarkedBytes();
1004       // The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
1005       // short period of time when a concurrent marking task is finishing.
1006       if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
1007         bytes_marked_ahead_of_schedule_ +=
1008             current_bytes_marked_concurrently - bytes_marked_concurrently_;
1009         bytes_marked_concurrently_ = current_bytes_marked_concurrently;
1010       }
1011     }
1012     if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
1013       // Steps performed in tasks and concurrently have put us ahead of
1014       // schedule. We skip processing of marking dequeue here and thus shift
1015       // marking time from inside V8 to standalone tasks.
1016       bytes_marked_ahead_of_schedule_ -= bytes_to_process;
1017       bytes_processed += bytes_to_process;
1018       bytes_to_process = IncrementalMarking::kMinStepSizeInBytes;
1019     }
1020     bytes_processed += Step(bytes_to_process, GC_VIA_STACK_GUARD,
1021                             StepOrigin::kV8, WorklistToProcess::kAll);
1022     bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
1023   }
1024 }
1025 
Step(size_t bytes_to_process,CompletionAction action,StepOrigin step_origin,WorklistToProcess worklist_to_process)1026 size_t IncrementalMarking::Step(size_t bytes_to_process,
1027                                 CompletionAction action, StepOrigin step_origin,
1028                                 WorklistToProcess worklist_to_process) {
1029   double start = heap_->MonotonicallyIncreasingTimeInMs();
1030 
1031   if (state_ == SWEEPING) {
1032     TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
1033     FinalizeSweeping();
1034   }
1035 
1036   size_t bytes_processed = 0;
1037   if (state_ == MARKING) {
1038     if (FLAG_concurrent_marking) {
1039       heap_->new_space()->ResetOriginalTop();
1040       // It is safe to merge back all objects that were on hold to the shared
1041       // work list at Step because we are at a safepoint where all objects
1042       // are properly initialized.
1043       marking_worklist()->shared()->MergeGlobalPool(
1044           marking_worklist()->on_hold());
1045     }
1046 
1047 // Only print marking worklist in debug mode to save ~40KB of code size.
1048 #ifdef DEBUG
1049     if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
1050         FLAG_trace_gc_verbose) {
1051       marking_worklist()->Print();
1052     }
1053 #endif
1054 
1055     if (worklist_to_process == WorklistToProcess::kBailout) {
1056       bytes_processed =
1057           ProcessMarkingWorklist<WorklistToProcess::kBailout>(bytes_to_process);
1058     } else {
1059       bytes_processed =
1060           ProcessMarkingWorklist<WorklistToProcess::kAll>(bytes_to_process);
1061     }
1062 
1063     if (step_origin == StepOrigin::kTask) {
1064       bytes_marked_ahead_of_schedule_ += bytes_processed;
1065     }
1066 
1067     if (marking_worklist()->IsEmpty()) {
1068       if (heap_->local_embedder_heap_tracer()
1069               ->ShouldFinalizeIncrementalMarking()) {
1070         if (!finalize_marking_completed_) {
1071           FinalizeMarking(action);
1072         } else {
1073           MarkingComplete(action);
1074         }
1075       } else {
1076         heap_->local_embedder_heap_tracer()->NotifyV8MarkingWorklistWasEmpty();
1077       }
1078     }
1079   }
1080   if (FLAG_concurrent_marking) {
1081     heap_->concurrent_marking()->RescheduleTasksIfNeeded();
1082   }
1083 
1084   double end = heap_->MonotonicallyIncreasingTimeInMs();
1085   double duration = (end - start);
1086   // Note that we report zero bytes here when sweeping was in progress or
1087   // when we just started incremental marking. In these cases we did not
1088   // process the marking deque.
1089   heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
1090   if (FLAG_trace_incremental_marking) {
1091     heap_->isolate()->PrintWithTimestamp(
1092         "[IncrementalMarking] Step %s %" PRIuS "KB (%" PRIuS "KB) in %.1f\n",
1093         step_origin == StepOrigin::kV8 ? "in v8" : "in task",
1094         bytes_processed / KB, bytes_to_process / KB, duration);
1095   }
1096   if (FLAG_trace_concurrent_marking) {
1097     heap_->isolate()->PrintWithTimestamp(
1098         "Concurrently marked %" PRIuS "KB\n",
1099         heap_->concurrent_marking()->TotalMarkedBytes() / KB);
1100   }
1101   return bytes_processed;
1102 }
1103 
1104 }  // namespace internal
1105 }  // namespace v8
1106