1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/cppgc/marker.h"
6 
7 #include <cstdint>
8 #include <memory>
9 
10 #include "include/cppgc/heap-consistency.h"
11 #include "include/cppgc/platform.h"
12 #include "src/base/platform/time.h"
13 #include "src/heap/cppgc/heap-object-header.h"
14 #include "src/heap/cppgc/heap-page.h"
15 #include "src/heap/cppgc/heap-visitor.h"
16 #include "src/heap/cppgc/heap.h"
17 #include "src/heap/cppgc/liveness-broker.h"
18 #include "src/heap/cppgc/marking-state.h"
19 #include "src/heap/cppgc/marking-visitor.h"
20 #include "src/heap/cppgc/process-heap.h"
21 #include "src/heap/cppgc/stats-collector.h"
22 #include "src/heap/cppgc/write-barrier.h"
23 
24 #if defined(CPPGC_CAGED_HEAP)
25 #include "include/cppgc/internal/caged-heap-local-data.h"
26 #endif
27 
28 namespace cppgc {
29 namespace internal {
30 
31 namespace {
32 
EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,HeapBase & heap)33 bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
34                                      HeapBase& heap) {
35   if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
36       config.marking_type ==
37           Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
38     WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Enter();
39 #if defined(CPPGC_CAGED_HEAP)
40     heap.caged_heap().local_data().is_incremental_marking_in_progress = true;
41 #endif  // defined(CPPGC_CAGED_HEAP)
42     return true;
43   }
44   return false;
45 }
46 
ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,HeapBase & heap)47 bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
48                                     HeapBase& heap) {
49   if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
50       config.marking_type ==
51           Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
52     WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Exit();
53 #if defined(CPPGC_CAGED_HEAP)
54     heap.caged_heap().local_data().is_incremental_marking_in_progress = false;
55 #endif  // defined(CPPGC_CAGED_HEAP)
56     return true;
57   }
58   return false;
59 }
60 
61 // Visit remembered set that was recorded in the generational barrier.
VisitRememberedSlots(HeapBase & heap,MutatorMarkingState & mutator_marking_state)62 void VisitRememberedSlots(HeapBase& heap,
63                           MutatorMarkingState& mutator_marking_state) {
64 #if defined(CPPGC_YOUNG_GENERATION)
65   StatsCollector::EnabledScope stats_scope(
66       heap.stats_collector(), StatsCollector::kMarkVisitRememberedSets);
67   for (void* slot : heap.remembered_slots()) {
68     auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
69                             ->ObjectHeaderFromInnerAddress(slot);
70     if (slot_header.IsYoung()) continue;
71     // The design of young generation requires collections to be executed at the
72     // top level (with the guarantee that no objects are currently being in
73     // construction). This can be ensured by running young GCs from safe points
74     // or by reintroducing nested allocation scopes that avoid finalization.
75     DCHECK(!slot_header.template IsInConstruction<AccessMode::kNonAtomic>());
76 
77     void* value = *reinterpret_cast<void**>(slot);
78     mutator_marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
79   }
80 #endif
81 }
82 
83 // Assumes that all spaces have their LABs reset.
ResetRememberedSet(HeapBase & heap)84 void ResetRememberedSet(HeapBase& heap) {
85 #if defined(CPPGC_YOUNG_GENERATION)
86   auto& local_data = heap.caged_heap().local_data();
87   local_data.age_table.Reset(&heap.caged_heap().allocator());
88   heap.remembered_slots().clear();
89 #endif
90 }
91 
92 static constexpr size_t kDefaultDeadlineCheckInterval = 150u;
93 
94 template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
95           typename WorklistLocal, typename Callback>
DrainWorklistWithBytesAndTimeDeadline(MarkingStateBase & marking_state,size_t marked_bytes_deadline,v8::base::TimeTicks time_deadline,WorklistLocal & worklist_local,Callback callback)96 bool DrainWorklistWithBytesAndTimeDeadline(MarkingStateBase& marking_state,
97                                            size_t marked_bytes_deadline,
98                                            v8::base::TimeTicks time_deadline,
99                                            WorklistLocal& worklist_local,
100                                            Callback callback) {
101   return DrainWorklistWithPredicate<kDeadlineCheckInterval>(
102       [&marking_state, marked_bytes_deadline, time_deadline]() {
103         return (marked_bytes_deadline <= marking_state.marked_bytes()) ||
104                (time_deadline <= v8::base::TimeTicks::Now());
105       },
106       worklist_local, callback);
107 }
108 
GetNextIncrementalStepDuration(IncrementalMarkingSchedule & schedule,HeapBase & heap)109 size_t GetNextIncrementalStepDuration(IncrementalMarkingSchedule& schedule,
110                                       HeapBase& heap) {
111   return schedule.GetNextIncrementalStepDuration(
112       heap.stats_collector()->allocated_object_size());
113 }
114 
115 }  // namespace
116 
117 constexpr v8::base::TimeDelta MarkerBase::kMaximumIncrementalStepDuration;
118 
IncrementalMarkingTask(MarkerBase * marker,MarkingConfig::StackState stack_state)119 MarkerBase::IncrementalMarkingTask::IncrementalMarkingTask(
120     MarkerBase* marker, MarkingConfig::StackState stack_state)
121     : marker_(marker),
122       stack_state_(stack_state),
123       handle_(Handle::NonEmptyTag{}) {}
124 
125 // static
126 MarkerBase::IncrementalMarkingTask::Handle
Post(cppgc::TaskRunner * runner,MarkerBase * marker)127 MarkerBase::IncrementalMarkingTask::Post(cppgc::TaskRunner* runner,
128                                          MarkerBase* marker) {
129   // Incremental GC is possible only via the GCInvoker, so getting here
130   // guarantees that either non-nestable tasks or conservative stack
131   // scanning are supported. This is required so that the incremental
132   // task can safely finalize GC if needed.
133   DCHECK_IMPLIES(marker->heap().stack_support() !=
134                      HeapBase::StackSupport::kSupportsConservativeStackScan,
135                  runner->NonNestableTasksEnabled());
136   MarkingConfig::StackState stack_state_for_task =
137       runner->NonNestableTasksEnabled()
138           ? MarkingConfig::StackState::kNoHeapPointers
139           : MarkingConfig::StackState::kMayContainHeapPointers;
140   auto task =
141       std::make_unique<IncrementalMarkingTask>(marker, stack_state_for_task);
142   auto handle = task->handle_;
143   if (runner->NonNestableTasksEnabled()) {
144     runner->PostNonNestableTask(std::move(task));
145   } else {
146     runner->PostTask(std::move(task));
147   }
148   return handle;
149 }
150 
Run()151 void MarkerBase::IncrementalMarkingTask::Run() {
152   if (handle_.IsCanceled()) return;
153 
154   StatsCollector::EnabledScope stats_scope(marker_->heap().stats_collector(),
155                                            StatsCollector::kIncrementalMark);
156 
157   if (marker_->IncrementalMarkingStep(stack_state_)) {
158     // Incremental marking is done so should finalize GC.
159     marker_->heap().FinalizeIncrementalGarbageCollectionIfNeeded(stack_state_);
160   }
161 }
162 
MarkerBase(Key,HeapBase & heap,cppgc::Platform * platform,MarkingConfig config)163 MarkerBase::MarkerBase(Key, HeapBase& heap, cppgc::Platform* platform,
164                        MarkingConfig config)
165     : heap_(heap),
166       config_(config),
167       platform_(platform),
168       foreground_task_runner_(platform_->GetForegroundTaskRunner()),
169       mutator_marking_state_(heap, marking_worklists_,
170                              heap.compactor().compaction_worklists()) {}
171 
~MarkerBase()172 MarkerBase::~MarkerBase() {
173   // The fixed point iteration may have found not-fully-constructed objects.
174   // Such objects should have already been found through the stack scan though
175   // and should thus already be marked.
176   if (!marking_worklists_.not_fully_constructed_worklist()->IsEmpty()) {
177 #if DEBUG
178     DCHECK_NE(MarkingConfig::StackState::kNoHeapPointers, config_.stack_state);
179     std::unordered_set<HeapObjectHeader*> objects =
180         mutator_marking_state_.not_fully_constructed_worklist().Extract();
181     for (HeapObjectHeader* object : objects) DCHECK(object->IsMarked());
182 #else
183     marking_worklists_.not_fully_constructed_worklist()->Clear();
184 #endif
185   }
186 
187   // |discovered_ephemeron_pairs_worklist_| may still hold ephemeron pairs with
188   // dead keys.
189   if (!marking_worklists_.discovered_ephemeron_pairs_worklist()->IsEmpty()) {
190 #if DEBUG
191     MarkingWorklists::EphemeronPairItem item;
192     while (mutator_marking_state_.discovered_ephemeron_pairs_worklist().Pop(
193         &item)) {
194       DCHECK(!HeapObjectHeader::FromObject(item.key).IsMarked());
195     }
196 #else
197     marking_worklists_.discovered_ephemeron_pairs_worklist()->Clear();
198 #endif
199   }
200 
201   marking_worklists_.weak_containers_worklist()->Clear();
202 }
203 
StartMarking()204 void MarkerBase::StartMarking() {
205   DCHECK(!is_marking_);
206   StatsCollector::EnabledScope stats_scope(
207       heap().stats_collector(),
208       config_.marking_type == MarkingConfig::MarkingType::kAtomic
209           ? StatsCollector::kAtomicMark
210           : StatsCollector::kIncrementalMark);
211 
212   heap().stats_collector()->NotifyMarkingStarted(config_.collection_type,
213                                                  config_.is_forced_gc);
214 
215   is_marking_ = true;
216   if (EnterIncrementalMarkingIfNeeded(config_, heap())) {
217     StatsCollector::EnabledScope inner_stats_scope(
218         heap().stats_collector(), StatsCollector::kMarkIncrementalStart);
219 
220     // Performing incremental or concurrent marking.
221     schedule_.NotifyIncrementalMarkingStart();
222     // Scanning the stack is expensive so we only do it at the atomic pause.
223     VisitRoots(MarkingConfig::StackState::kNoHeapPointers);
224     ScheduleIncrementalMarkingTask();
225     if (config_.marking_type ==
226         MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
227       mutator_marking_state_.Publish();
228       concurrent_marker_->Start();
229     }
230   }
231 }
232 
EnterAtomicPause(MarkingConfig::StackState stack_state)233 void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
234   StatsCollector::EnabledScope top_stats_scope(heap().stats_collector(),
235                                                StatsCollector::kAtomicMark);
236   StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
237                                            StatsCollector::kMarkAtomicPrologue);
238 
239   if (ExitIncrementalMarkingIfNeeded(config_, heap())) {
240     // Cancel remaining concurrent/incremental tasks.
241     concurrent_marker_->Cancel();
242     incremental_marking_handle_.Cancel();
243   }
244   config_.stack_state = stack_state;
245   config_.marking_type = MarkingConfig::MarkingType::kAtomic;
246 
247   {
248     // VisitRoots also resets the LABs.
249     VisitRoots(config_.stack_state);
250     if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
251       mutator_marking_state_.FlushNotFullyConstructedObjects();
252       DCHECK(marking_worklists_.not_fully_constructed_worklist()->IsEmpty());
253     } else {
254       MarkNotFullyConstructedObjects();
255     }
256   }
257 }
258 
LeaveAtomicPause()259 void MarkerBase::LeaveAtomicPause() {
260   {
261     StatsCollector::EnabledScope top_stats_scope(heap().stats_collector(),
262                                                  StatsCollector::kAtomicMark);
263     StatsCollector::EnabledScope stats_scope(
264         heap().stats_collector(), StatsCollector::kMarkAtomicEpilogue);
265     DCHECK(!incremental_marking_handle_);
266     ResetRememberedSet(heap());
267     heap().stats_collector()->NotifyMarkingCompleted(
268         // GetOverallMarkedBytes also includes concurrently marked bytes.
269         schedule_.GetOverallMarkedBytes());
270     is_marking_ = false;
271   }
272   {
273     // Weakness callbacks are forbidden from allocating objects.
274     cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(heap_);
275     ProcessWeakness();
276   }
277   // TODO(chromium:1056170): It would be better if the call to Unlock was
278   // covered by some cppgc scope.
279   g_process_mutex.Pointer()->Unlock();
280   heap().SetStackStateOfPrevGC(config_.stack_state);
281 }
282 
FinishMarking(MarkingConfig::StackState stack_state)283 void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
284   DCHECK(is_marking_);
285   EnterAtomicPause(stack_state);
286   {
287     StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
288                                              StatsCollector::kAtomicMark);
289     CHECK(AdvanceMarkingWithLimits(v8::base::TimeDelta::Max(), SIZE_MAX));
290     mutator_marking_state_.Publish();
291   }
292   LeaveAtomicPause();
293 }
294 
ProcessWeakness()295 void MarkerBase::ProcessWeakness() {
296   DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
297 
298   StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
299                                            StatsCollector::kAtomicWeak);
300 
301   heap().GetWeakPersistentRegion().Trace(&visitor());
302   // Processing cross-thread handles requires taking the process lock.
303   g_process_mutex.Get().AssertHeld();
304   CHECK(visited_cross_thread_persistents_in_atomic_pause_);
305   heap().GetWeakCrossThreadPersistentRegion().Trace(&visitor());
306 
307   // Call weak callbacks on objects that may now be pointing to dead objects.
308   MarkingWorklists::WeakCallbackItem item;
309   LivenessBroker broker = LivenessBrokerFactory::Create();
310   MarkingWorklists::WeakCallbackWorklist::Local& local =
311       mutator_marking_state_.weak_callback_worklist();
312   while (local.Pop(&item)) {
313     item.callback(broker, item.parameter);
314   }
315 
316   // Weak callbacks should not add any new objects for marking.
317   DCHECK(marking_worklists_.marking_worklist()->IsEmpty());
318 }
319 
VisitRoots(MarkingConfig::StackState stack_state)320 void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
321   StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
322                                            StatsCollector::kMarkVisitRoots);
323 
324   // Reset LABs before scanning roots. LABs are cleared to allow
325   // ObjectStartBitmap handling without considering LABs.
326   heap().object_allocator().ResetLinearAllocationBuffers();
327 
328   {
329     {
330       StatsCollector::DisabledScope inner_stats_scope(
331           heap().stats_collector(), StatsCollector::kMarkVisitPersistents);
332       heap().GetStrongPersistentRegion().Trace(&visitor());
333     }
334   }
335 
336   if (stack_state != MarkingConfig::StackState::kNoHeapPointers) {
337     StatsCollector::DisabledScope stack_stats_scope(
338         heap().stats_collector(), StatsCollector::kMarkVisitStack);
339     heap().stack()->IteratePointers(&stack_visitor());
340   }
341   if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
342     VisitRememberedSlots(heap(), mutator_marking_state_);
343   }
344 }
345 
VisitCrossThreadPersistentsIfNeeded()346 bool MarkerBase::VisitCrossThreadPersistentsIfNeeded() {
347   if (config_.marking_type != MarkingConfig::MarkingType::kAtomic ||
348       visited_cross_thread_persistents_in_atomic_pause_)
349     return false;
350 
351   StatsCollector::DisabledScope inner_stats_scope(
352       heap().stats_collector(),
353       StatsCollector::kMarkVisitCrossThreadPersistents);
354   // Lock guards against changes to {Weak}CrossThreadPersistent handles, that
355   // may conflict with marking. E.g., a WeakCrossThreadPersistent may be
356   // converted into a CrossThreadPersistent which requires that the handle
357   // is either cleared or the object is retained.
358   g_process_mutex.Pointer()->Lock();
359   heap().GetStrongCrossThreadPersistentRegion().Trace(&visitor());
360   visited_cross_thread_persistents_in_atomic_pause_ = true;
361   return (heap().GetStrongCrossThreadPersistentRegion().NodesInUse() > 0);
362 }
363 
ScheduleIncrementalMarkingTask()364 void MarkerBase::ScheduleIncrementalMarkingTask() {
365   DCHECK(platform_);
366   if (!foreground_task_runner_ || incremental_marking_handle_) return;
367   incremental_marking_handle_ =
368       IncrementalMarkingTask::Post(foreground_task_runner_.get(), this);
369 }
370 
IncrementalMarkingStepForTesting(MarkingConfig::StackState stack_state)371 bool MarkerBase::IncrementalMarkingStepForTesting(
372     MarkingConfig::StackState stack_state) {
373   return IncrementalMarkingStep(stack_state);
374 }
375 
IncrementalMarkingStep(MarkingConfig::StackState stack_state)376 bool MarkerBase::IncrementalMarkingStep(MarkingConfig::StackState stack_state) {
377   if (stack_state == MarkingConfig::StackState::kNoHeapPointers) {
378     mutator_marking_state_.FlushNotFullyConstructedObjects();
379   }
380   config_.stack_state = stack_state;
381 
382   return AdvanceMarkingWithLimits();
383 }
384 
AdvanceMarkingOnAllocation()385 void MarkerBase::AdvanceMarkingOnAllocation() {
386   StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
387                                            StatsCollector::kIncrementalMark);
388   StatsCollector::EnabledScope nested_scope(heap().stats_collector(),
389                                             StatsCollector::kMarkOnAllocation);
390   if (AdvanceMarkingWithLimits()) {
391     // Schedule another incremental task for finalizing without a stack.
392     ScheduleIncrementalMarkingTask();
393   }
394 }
395 
AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,size_t marked_bytes_limit)396 bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
397                                           size_t marked_bytes_limit) {
398   bool is_done = false;
399   if (!main_marking_disabled_for_testing_) {
400     if (marked_bytes_limit == 0) {
401       marked_bytes_limit = mutator_marking_state_.marked_bytes() +
402                            GetNextIncrementalStepDuration(schedule_, heap_);
403     }
404     StatsCollector::EnabledScope deadline_scope(
405         heap().stats_collector(),
406         StatsCollector::kMarkTransitiveClosureWithDeadline, "deadline_ms",
407         max_duration.InMillisecondsF());
408     const auto deadline = v8::base::TimeTicks::Now() + max_duration;
409     is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
410     if (is_done && VisitCrossThreadPersistentsIfNeeded()) {
411       // Both limits are absolute and hence can be passed along without further
412       // adjustment.
413       is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
414     }
415     schedule_.UpdateMutatorThreadMarkedBytes(
416         mutator_marking_state_.marked_bytes());
417   }
418   mutator_marking_state_.Publish();
419   if (!is_done) {
420     // If marking is atomic, |is_done| should always be true.
421     DCHECK_NE(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
422     ScheduleIncrementalMarkingTask();
423     if (config_.marking_type ==
424         MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
425       concurrent_marker_->NotifyIncrementalMutatorStepCompleted();
426     }
427   }
428   return is_done;
429 }
430 
ProcessWorklistsWithDeadline(size_t marked_bytes_deadline,v8::base::TimeTicks time_deadline)431 bool MarkerBase::ProcessWorklistsWithDeadline(
432     size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline) {
433   StatsCollector::EnabledScope stats_scope(
434       heap().stats_collector(), StatsCollector::kMarkTransitiveClosure);
435   bool saved_did_discover_new_ephemeron_pairs;
436   do {
437     mutator_marking_state_.ResetDidDiscoverNewEphemeronPairs();
438     if ((config_.marking_type == MarkingConfig::MarkingType::kAtomic) ||
439         schedule_.ShouldFlushEphemeronPairs()) {
440       mutator_marking_state_.FlushDiscoveredEphemeronPairs();
441     }
442 
443     // Bailout objects may be complicated to trace and thus might take longer
444     // than other objects. Therefore we reduce the interval between deadline
445     // checks to guarantee the deadline is not exceeded.
446     {
447       StatsCollector::EnabledScope inner_scope(
448           heap().stats_collector(), StatsCollector::kMarkProcessBailOutObjects);
449       if (!DrainWorklistWithBytesAndTimeDeadline<kDefaultDeadlineCheckInterval /
450                                                  5>(
451               mutator_marking_state_, marked_bytes_deadline, time_deadline,
452               mutator_marking_state_.concurrent_marking_bailout_worklist(),
453               [this](
454                   const MarkingWorklists::ConcurrentMarkingBailoutItem& item) {
455                 mutator_marking_state_.AccountMarkedBytes(item.bailedout_size);
456                 item.callback(&visitor(), item.parameter);
457               })) {
458         return false;
459       }
460     }
461 
462     {
463       StatsCollector::EnabledScope inner_scope(
464           heap().stats_collector(),
465           StatsCollector::kMarkProcessNotFullyconstructedWorklist);
466       if (!DrainWorklistWithBytesAndTimeDeadline(
467               mutator_marking_state_, marked_bytes_deadline, time_deadline,
468               mutator_marking_state_
469                   .previously_not_fully_constructed_worklist(),
470               [this](HeapObjectHeader* header) {
471                 mutator_marking_state_.AccountMarkedBytes(*header);
472                 DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
473                                                                      *header);
474               })) {
475         return false;
476       }
477     }
478 
479     {
480       StatsCollector::EnabledScope inner_scope(
481           heap().stats_collector(),
482           StatsCollector::kMarkProcessMarkingWorklist);
483       if (!DrainWorklistWithBytesAndTimeDeadline(
484               mutator_marking_state_, marked_bytes_deadline, time_deadline,
485               mutator_marking_state_.marking_worklist(),
486               [this](const MarkingWorklists::MarkingItem& item) {
487                 const HeapObjectHeader& header =
488                     HeapObjectHeader::FromObject(item.base_object_payload);
489                 DCHECK(!header.IsInConstruction<AccessMode::kNonAtomic>());
490                 DCHECK(header.IsMarked<AccessMode::kNonAtomic>());
491                 mutator_marking_state_.AccountMarkedBytes(header);
492                 item.callback(&visitor(), item.base_object_payload);
493               })) {
494         return false;
495       }
496     }
497 
498     {
499       StatsCollector::EnabledScope inner_scope(
500           heap().stats_collector(),
501           StatsCollector::kMarkProcessWriteBarrierWorklist);
502       if (!DrainWorklistWithBytesAndTimeDeadline(
503               mutator_marking_state_, marked_bytes_deadline, time_deadline,
504               mutator_marking_state_.write_barrier_worklist(),
505               [this](HeapObjectHeader* header) {
506                 mutator_marking_state_.AccountMarkedBytes(*header);
507                 DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
508                                                                      *header);
509               })) {
510         return false;
511       }
512       if (!DrainWorklistWithBytesAndTimeDeadline(
513               mutator_marking_state_, marked_bytes_deadline, time_deadline,
514               mutator_marking_state_.retrace_marked_objects_worklist(),
515               [this](HeapObjectHeader* header) {
516                 // Retracing does not increment marked bytes as the object has
517                 // already been processed before.
518                 DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
519                                                                      *header);
520               })) {
521         return false;
522       }
523     }
524 
525     saved_did_discover_new_ephemeron_pairs =
526         mutator_marking_state_.DidDiscoverNewEphemeronPairs();
527     {
528       StatsCollector::EnabledScope inner_stats_scope(
529           heap().stats_collector(), StatsCollector::kMarkProcessEphemerons);
530       if (!DrainWorklistWithBytesAndTimeDeadline(
531               mutator_marking_state_, marked_bytes_deadline, time_deadline,
532               mutator_marking_state_.ephemeron_pairs_for_processing_worklist(),
533               [this](const MarkingWorklists::EphemeronPairItem& item) {
534                 mutator_marking_state_.ProcessEphemeron(
535                     item.key, item.value, item.value_desc, visitor());
536               })) {
537         return false;
538       }
539     }
540   } while (!mutator_marking_state_.marking_worklist().IsLocalAndGlobalEmpty() ||
541            saved_did_discover_new_ephemeron_pairs);
542   return true;
543 }
544 
MarkNotFullyConstructedObjects()545 void MarkerBase::MarkNotFullyConstructedObjects() {
546   StatsCollector::DisabledScope stats_scope(
547       heap().stats_collector(),
548       StatsCollector::kMarkVisitNotFullyConstructedObjects);
549   std::unordered_set<HeapObjectHeader*> objects =
550       mutator_marking_state_.not_fully_constructed_worklist().Extract();
551   for (HeapObjectHeader* object : objects) {
552     DCHECK(object);
553     // TraceConservativelyIfNeeded delegates to either in-construction or
554     // fully constructed handling. Both handlers have their own marked bytes
555     // accounting and markbit handling (bailout).
556     conservative_visitor().TraceConservativelyIfNeeded(*object);
557   }
558 }
559 
ClearAllWorklistsForTesting()560 void MarkerBase::ClearAllWorklistsForTesting() {
561   marking_worklists_.ClearForTesting();
562   auto* compaction_worklists = heap_.compactor().compaction_worklists();
563   if (compaction_worklists) compaction_worklists->ClearForTesting();
564 }
565 
SetMainThreadMarkingDisabledForTesting(bool value)566 void MarkerBase::SetMainThreadMarkingDisabledForTesting(bool value) {
567   main_marking_disabled_for_testing_ = value;
568 }
569 
WaitForConcurrentMarkingForTesting()570 void MarkerBase::WaitForConcurrentMarkingForTesting() {
571   concurrent_marker_->JoinForTesting();
572 }
573 
NotifyCompactionCancelled()574 void MarkerBase::NotifyCompactionCancelled() {
575   // Compaction cannot be cancelled while concurrent marking is active.
576   DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
577   DCHECK_IMPLIES(concurrent_marker_, !concurrent_marker_->IsActive());
578   mutator_marking_state_.NotifyCompactionCancelled();
579 }
580 
Marker(Key key,HeapBase & heap,cppgc::Platform * platform,MarkingConfig config)581 Marker::Marker(Key key, HeapBase& heap, cppgc::Platform* platform,
582                MarkingConfig config)
583     : MarkerBase(key, heap, platform, config),
584       marking_visitor_(heap, mutator_marking_state_),
585       conservative_marking_visitor_(heap, mutator_marking_state_,
586                                     marking_visitor_) {
587   concurrent_marker_ = std::make_unique<ConcurrentMarker>(
588       heap_, marking_worklists_, schedule_, platform_);
589 }
590 
591 }  // namespace internal
592 }  // namespace cppgc
593