1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/concurrent-marking.h"
6 
7 #include <stack>
8 #include <unordered_map>
9 
10 #include "include/v8config.h"
11 #include "src/execution/isolate.h"
12 #include "src/heap/gc-tracer.h"
13 #include "src/heap/heap-inl.h"
14 #include "src/heap/heap.h"
15 #include "src/heap/mark-compact-inl.h"
16 #include "src/heap/mark-compact.h"
17 #include "src/heap/marking-visitor-inl.h"
18 #include "src/heap/marking-visitor.h"
19 #include "src/heap/marking.h"
20 #include "src/heap/memory-chunk.h"
21 #include "src/heap/memory-measurement-inl.h"
22 #include "src/heap/memory-measurement.h"
23 #include "src/heap/objects-visiting-inl.h"
24 #include "src/heap/objects-visiting.h"
25 #include "src/heap/worklist.h"
26 #include "src/init/v8.h"
27 #include "src/objects/data-handler-inl.h"
28 #include "src/objects/embedder-data-array-inl.h"
29 #include "src/objects/hash-table-inl.h"
30 #include "src/objects/slots-inl.h"
31 #include "src/objects/transitions-inl.h"
32 #include "src/utils/utils-inl.h"
33 #include "src/utils/utils.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 class ConcurrentMarkingState final
39     : public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
40  public:
ConcurrentMarkingState(MemoryChunkDataMap * memory_chunk_data)41   explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data)
42       : memory_chunk_data_(memory_chunk_data) {}
43 
bitmap(const BasicMemoryChunk * chunk)44   ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const BasicMemoryChunk* chunk) {
45     return chunk->marking_bitmap<AccessMode::ATOMIC>();
46   }
47 
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)48   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
49     (*memory_chunk_data_)[chunk].live_bytes += by;
50   }
51 
52   // The live_bytes and SetLiveBytes methods of the marking state are
53   // not used by the concurrent marker.
54 
55  private:
56   MemoryChunkDataMap* memory_chunk_data_;
57 };
58 
59 // Helper class for storing in-object slot addresses and values.
60 class SlotSnapshot {
61  public:
SlotSnapshot()62   SlotSnapshot() : number_of_slots_(0) {}
number_of_slots() const63   int number_of_slots() const { return number_of_slots_; }
slot(int i) const64   ObjectSlot slot(int i) const { return snapshot_[i].first; }
value(int i) const65   Object value(int i) const { return snapshot_[i].second; }
clear()66   void clear() { number_of_slots_ = 0; }
add(ObjectSlot slot,Object value)67   void add(ObjectSlot slot, Object value) {
68     snapshot_[number_of_slots_++] = {slot, value};
69   }
70 
71  private:
72   static const int kMaxSnapshotSize = JSObject::kMaxInstanceSize / kTaggedSize;
73   int number_of_slots_;
74   std::pair<ObjectSlot, Object> snapshot_[kMaxSnapshotSize];
75   DISALLOW_COPY_AND_ASSIGN(SlotSnapshot);
76 };
77 
78 class ConcurrentMarkingVisitor final
79     : public MarkingVisitorBase<ConcurrentMarkingVisitor,
80                                 ConcurrentMarkingState> {
81  public:
ConcurrentMarkingVisitor(int task_id,MarkingWorklists::Local * local_marking_worklists,WeakObjects * weak_objects,Heap * heap,unsigned mark_compact_epoch,BytecodeFlushMode bytecode_flush_mode,bool embedder_tracing_enabled,bool is_forced_gc,MemoryChunkDataMap * memory_chunk_data)82   ConcurrentMarkingVisitor(int task_id,
83                            MarkingWorklists::Local* local_marking_worklists,
84                            WeakObjects* weak_objects, Heap* heap,
85                            unsigned mark_compact_epoch,
86                            BytecodeFlushMode bytecode_flush_mode,
87                            bool embedder_tracing_enabled, bool is_forced_gc,
88                            MemoryChunkDataMap* memory_chunk_data)
89       : MarkingVisitorBase(task_id, local_marking_worklists, weak_objects, heap,
90                            mark_compact_epoch, bytecode_flush_mode,
91                            embedder_tracing_enabled, is_forced_gc),
92         marking_state_(memory_chunk_data),
93         memory_chunk_data_(memory_chunk_data) {}
94 
95   template <typename T>
Cast(HeapObject object)96   static V8_INLINE T Cast(HeapObject object) {
97     return T::cast(object);
98   }
99 
100   // HeapVisitor overrides to implement the snapshotting protocol.
101 
AllowDefaultJSObjectVisit()102   bool AllowDefaultJSObjectVisit() { return false; }
103 
VisitJSObject(Map map,JSObject object)104   int VisitJSObject(Map map, JSObject object) {
105     return VisitJSObjectSubclass(map, object);
106   }
107 
VisitJSObjectFast(Map map,JSObject object)108   int VisitJSObjectFast(Map map, JSObject object) {
109     return VisitJSObjectSubclassFast(map, object);
110   }
111 
VisitWasmInstanceObject(Map map,WasmInstanceObject object)112   int VisitWasmInstanceObject(Map map, WasmInstanceObject object) {
113     return VisitJSObjectSubclass(map, object);
114   }
115 
VisitJSWeakCollection(Map map,JSWeakCollection object)116   int VisitJSWeakCollection(Map map, JSWeakCollection object) {
117     return VisitJSObjectSubclass(map, object);
118   }
119 
VisitConsString(Map map,ConsString object)120   int VisitConsString(Map map, ConsString object) {
121     return VisitFullyWithSnapshot(map, object);
122   }
123 
VisitSlicedString(Map map,SlicedString object)124   int VisitSlicedString(Map map, SlicedString object) {
125     return VisitFullyWithSnapshot(map, object);
126   }
127 
VisitThinString(Map map,ThinString object)128   int VisitThinString(Map map, ThinString object) {
129     return VisitFullyWithSnapshot(map, object);
130   }
131 
VisitSeqOneByteString(Map map,SeqOneByteString object)132   int VisitSeqOneByteString(Map map, SeqOneByteString object) {
133     if (!ShouldVisit(object)) return 0;
134     VisitMapPointer(object);
135     return SeqOneByteString::SizeFor(object.synchronized_length());
136   }
137 
VisitSeqTwoByteString(Map map,SeqTwoByteString object)138   int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
139     if (!ShouldVisit(object)) return 0;
140     VisitMapPointer(object);
141     return SeqTwoByteString::SizeFor(object.synchronized_length());
142   }
143 
144   // Implements ephemeron semantics: Marks value if key is already reachable.
145   // Returns true if value was actually marked.
ProcessEphemeron(HeapObject key,HeapObject value)146   bool ProcessEphemeron(HeapObject key, HeapObject value) {
147     if (marking_state_.IsBlackOrGrey(key)) {
148       if (marking_state_.WhiteToGrey(value)) {
149         local_marking_worklists_->Push(value);
150         return true;
151       }
152 
153     } else if (marking_state_.IsWhite(value)) {
154       weak_objects_->next_ephemerons.Push(task_id_, Ephemeron{key, value});
155     }
156     return false;
157   }
158 
159   // HeapVisitor override.
ShouldVisit(HeapObject object)160   bool ShouldVisit(HeapObject object) {
161     return marking_state_.GreyToBlack(object);
162   }
163 
164  private:
165   // Helper class for collecting in-object slot addresses and values.
166   class SlotSnapshottingVisitor final : public ObjectVisitor {
167    public:
SlotSnapshottingVisitor(SlotSnapshot * slot_snapshot)168     explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot)
169         : slot_snapshot_(slot_snapshot) {
170       slot_snapshot_->clear();
171     }
172 
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)173     void VisitPointers(HeapObject host, ObjectSlot start,
174                        ObjectSlot end) override {
175       for (ObjectSlot p = start; p < end; ++p) {
176         Object object = p.Relaxed_Load();
177         slot_snapshot_->add(p, object);
178       }
179     }
180 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)181     void VisitPointers(HeapObject host, MaybeObjectSlot start,
182                        MaybeObjectSlot end) override {
183       // This should never happen, because we don't use snapshotting for objects
184       // which contain weak references.
185       UNREACHABLE();
186     }
187 
VisitCodeTarget(Code host,RelocInfo * rinfo)188     void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
189       // This should never happen, because snapshotting is performed only on
190       // JSObjects (and derived classes).
191       UNREACHABLE();
192     }
193 
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)194     void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
195       // This should never happen, because snapshotting is performed only on
196       // JSObjects (and derived classes).
197       UNREACHABLE();
198     }
199 
VisitCustomWeakPointers(HeapObject host,ObjectSlot start,ObjectSlot end)200     void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
201                                  ObjectSlot end) override {
202       DCHECK(host.IsWeakCell() || host.IsJSWeakRef());
203     }
204 
205    private:
206     SlotSnapshot* slot_snapshot_;
207   };
208 
209   template <typename T>
VisitJSObjectSubclassFast(Map map,T object)210   int VisitJSObjectSubclassFast(Map map, T object) {
211     DCHECK_IMPLIES(FLAG_unbox_double_fields, map.HasFastPointerLayout());
212     using TBodyDescriptor = typename T::FastBodyDescriptor;
213     return VisitJSObjectSubclass<T, TBodyDescriptor>(map, object);
214   }
215 
216   template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
VisitJSObjectSubclass(Map map,T object)217   int VisitJSObjectSubclass(Map map, T object) {
218     int size = TBodyDescriptor::SizeOf(map, object);
219     int used_size = map.UsedInstanceSize();
220     DCHECK_LE(used_size, size);
221     DCHECK_GE(used_size, JSObject::GetHeaderSize(map));
222     return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object,
223                                                           used_size, size);
224   }
225 
226   template <typename T>
VisitLeftTrimmableArray(Map map,T object)227   int VisitLeftTrimmableArray(Map map, T object) {
228     // The synchronized_length() function checks that the length is a Smi.
229     // This is not necessarily the case if the array is being left-trimmed.
230     Object length = object.unchecked_synchronized_length();
231     if (!ShouldVisit(object)) return 0;
232     // The cached length must be the actual length as the array is not black.
233     // Left trimming marks the array black before over-writing the length.
234     DCHECK(length.IsSmi());
235     int size = T::SizeFor(Smi::ToInt(length));
236     VisitMapPointer(object);
237     T::BodyDescriptor::IterateBody(map, object, size, this);
238     return size;
239   }
240 
VisitPointersInSnapshot(HeapObject host,const SlotSnapshot & snapshot)241   void VisitPointersInSnapshot(HeapObject host, const SlotSnapshot& snapshot) {
242     for (int i = 0; i < snapshot.number_of_slots(); i++) {
243       ObjectSlot slot = snapshot.slot(i);
244       Object object = snapshot.value(i);
245       DCHECK(!HasWeakHeapObjectTag(object));
246       if (!object.IsHeapObject()) continue;
247       HeapObject heap_object = HeapObject::cast(object);
248       MarkObject(host, heap_object);
249       RecordSlot(host, slot, heap_object);
250     }
251   }
252 
253   template <typename T>
VisitFullyWithSnapshot(Map map,T object)254   int VisitFullyWithSnapshot(Map map, T object) {
255     using TBodyDescriptor = typename T::BodyDescriptor;
256     int size = TBodyDescriptor::SizeOf(map, object);
257     return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object, size,
258                                                           size);
259   }
260 
261   template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
VisitPartiallyWithSnapshot(Map map,T object,int used_size,int size)262   int VisitPartiallyWithSnapshot(Map map, T object, int used_size, int size) {
263     const SlotSnapshot& snapshot =
264         MakeSlotSnapshot<T, TBodyDescriptor>(map, object, used_size);
265     if (!ShouldVisit(object)) return 0;
266     VisitPointersInSnapshot(object, snapshot);
267     return size;
268   }
269 
270   template <typename T, typename TBodyDescriptor>
MakeSlotSnapshot(Map map,T object,int size)271   const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
272     SlotSnapshottingVisitor visitor(&slot_snapshot_);
273     visitor.VisitPointer(object, object.map_slot());
274     TBodyDescriptor::IterateBody(map, object, size, &visitor);
275     return slot_snapshot_;
276   }
277 
278   template <typename TSlot>
RecordSlot(HeapObject object,TSlot slot,HeapObject target)279   void RecordSlot(HeapObject object, TSlot slot, HeapObject target) {
280     MarkCompactCollector::RecordSlot(object, slot, target);
281   }
282 
RecordRelocSlot(Code host,RelocInfo * rinfo,HeapObject target)283   void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target) {
284     MarkCompactCollector::RecordRelocSlotInfo info =
285         MarkCompactCollector::PrepareRecordRelocSlot(host, rinfo, target);
286     if (info.should_record) {
287       MemoryChunkData& data = (*memory_chunk_data_)[info.memory_chunk];
288       if (!data.typed_slots) {
289         data.typed_slots.reset(new TypedSlots());
290       }
291       data.typed_slots->Insert(info.slot_type, info.offset);
292     }
293   }
294 
SynchronizePageAccess(HeapObject heap_object)295   void SynchronizePageAccess(HeapObject heap_object) {
296 #ifdef THREAD_SANITIZER
297     // This is needed because TSAN does not process the memory fence
298     // emitted after page initialization.
299     BasicMemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
300 #endif
301   }
302 
marking_state()303   ConcurrentMarkingState* marking_state() { return &marking_state_; }
304 
retaining_path_mode()305   TraceRetainingPathMode retaining_path_mode() {
306     return TraceRetainingPathMode::kDisabled;
307   }
308 
309   ConcurrentMarkingState marking_state_;
310   MemoryChunkDataMap* memory_chunk_data_;
311   SlotSnapshot slot_snapshot_;
312 
313   friend class MarkingVisitorBase<ConcurrentMarkingVisitor,
314                                   ConcurrentMarkingState>;
315 };
316 
317 // Strings can change maps due to conversion to thin string or external strings.
318 // Use unchecked cast to avoid data race in slow dchecks.
319 template <>
Cast(HeapObject object)320 ConsString ConcurrentMarkingVisitor::Cast(HeapObject object) {
321   return ConsString::unchecked_cast(object);
322 }
323 
324 template <>
Cast(HeapObject object)325 SlicedString ConcurrentMarkingVisitor::Cast(HeapObject object) {
326   return SlicedString::unchecked_cast(object);
327 }
328 
329 template <>
Cast(HeapObject object)330 ThinString ConcurrentMarkingVisitor::Cast(HeapObject object) {
331   return ThinString::unchecked_cast(object);
332 }
333 
334 template <>
Cast(HeapObject object)335 SeqOneByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
336   return SeqOneByteString::unchecked_cast(object);
337 }
338 
339 template <>
Cast(HeapObject object)340 SeqTwoByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
341   return SeqTwoByteString::unchecked_cast(object);
342 }
343 
344 // Fixed array can become a free space during left trimming.
345 template <>
Cast(HeapObject object)346 FixedArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
347   return FixedArray::unchecked_cast(object);
348 }
349 
350 // The Deserializer changes the map from StrongDescriptorArray to
351 // DescriptorArray
352 template <>
Cast(HeapObject object)353 StrongDescriptorArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
354   return StrongDescriptorArray::unchecked_cast(DescriptorArray::cast(object));
355 }
356 
357 class ConcurrentMarking::JobTask : public v8::JobTask {
358  public:
JobTask(ConcurrentMarking * concurrent_marking,unsigned mark_compact_epoch,bool is_forced_gc)359   JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch,
360           bool is_forced_gc)
361       : concurrent_marking_(concurrent_marking),
362         mark_compact_epoch_(mark_compact_epoch),
363         is_forced_gc_(is_forced_gc) {}
364 
365   ~JobTask() override = default;
366 
367   // v8::JobTask overrides.
Run(JobDelegate * delegate)368   void Run(JobDelegate* delegate) override {
369     concurrent_marking_->Run(delegate, mark_compact_epoch_, is_forced_gc_);
370   }
371 
GetMaxConcurrency(size_t worker_count) const372   size_t GetMaxConcurrency(size_t worker_count) const override {
373     return concurrent_marking_->GetMaxConcurrency(worker_count);
374   }
375 
376  private:
377   ConcurrentMarking* concurrent_marking_;
378   const unsigned mark_compact_epoch_;
379   const bool is_forced_gc_;
380   DISALLOW_COPY_AND_ASSIGN(JobTask);
381 };
382 
ConcurrentMarking(Heap * heap,MarkingWorklists * marking_worklists,WeakObjects * weak_objects)383 ConcurrentMarking::ConcurrentMarking(Heap* heap,
384                                      MarkingWorklists* marking_worklists,
385                                      WeakObjects* weak_objects)
386     : heap_(heap),
387       marking_worklists_(marking_worklists),
388       weak_objects_(weak_objects) {
389 #ifndef V8_ATOMIC_MARKING_STATE
390   // Concurrent and parallel marking require atomic marking state.
391   CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
392 #endif
393 #ifndef V8_ATOMIC_OBJECT_FIELD_WRITES
394   // Concurrent marking requires atomic object field writes.
395   CHECK(!FLAG_concurrent_marking);
396 #endif
397 }
398 
Run(JobDelegate * delegate,unsigned mark_compact_epoch,bool is_forced_gc)399 void ConcurrentMarking::Run(JobDelegate* delegate, unsigned mark_compact_epoch,
400                             bool is_forced_gc) {
401   TRACE_BACKGROUND_GC(heap_->tracer(),
402                       GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
403   size_t kBytesUntilInterruptCheck = 64 * KB;
404   int kObjectsUntilInterrupCheck = 1000;
405   uint8_t task_id = delegate->GetTaskId() + 1;
406   TaskState* task_state = &task_state_[task_id];
407   MarkingWorklists::Local local_marking_worklists(marking_worklists_);
408   ConcurrentMarkingVisitor visitor(
409       task_id, &local_marking_worklists, weak_objects_, heap_,
410       mark_compact_epoch, Heap::GetBytecodeFlushMode(),
411       heap_->local_embedder_heap_tracer()->InUse(), is_forced_gc,
412       &task_state->memory_chunk_data);
413   NativeContextInferrer& native_context_inferrer =
414       task_state->native_context_inferrer;
415   NativeContextStats& native_context_stats = task_state->native_context_stats;
416   double time_ms;
417   size_t marked_bytes = 0;
418   Isolate* isolate = heap_->isolate();
419   if (FLAG_trace_concurrent_marking) {
420     isolate->PrintWithTimestamp("Starting concurrent marking task %d\n",
421                                 task_id);
422   }
423   bool ephemeron_marked = false;
424 
425   {
426     TimedScope scope(&time_ms);
427 
428     {
429       Ephemeron ephemeron;
430 
431       while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
432         if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
433           ephemeron_marked = true;
434         }
435       }
436     }
437     bool is_per_context_mode = local_marking_worklists.IsPerContextMode();
438     bool done = false;
439     while (!done) {
440       size_t current_marked_bytes = 0;
441       int objects_processed = 0;
442       while (current_marked_bytes < kBytesUntilInterruptCheck &&
443              objects_processed < kObjectsUntilInterrupCheck) {
444         HeapObject object;
445         if (!local_marking_worklists.Pop(&object)) {
446           done = true;
447           break;
448         }
449         objects_processed++;
450         // The order of the two loads is important.
451         Address new_space_top = heap_->new_space()->original_top_acquire();
452         Address new_space_limit = heap_->new_space()->original_limit_relaxed();
453         Address new_large_object = heap_->new_lo_space()->pending_object();
454         Address addr = object.address();
455         if ((new_space_top <= addr && addr < new_space_limit) ||
456             addr == new_large_object) {
457           local_marking_worklists.PushOnHold(object);
458         } else {
459           Map map = object.synchronized_map(isolate);
460           if (is_per_context_mode) {
461             Address context;
462             if (native_context_inferrer.Infer(isolate, map, object, &context)) {
463               local_marking_worklists.SwitchToContext(context);
464             }
465           }
466           size_t visited_size = visitor.Visit(map, object);
467           if (is_per_context_mode) {
468             native_context_stats.IncrementSize(
469                 local_marking_worklists.Context(), map, object, visited_size);
470           }
471           current_marked_bytes += visited_size;
472         }
473       }
474       marked_bytes += current_marked_bytes;
475       base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
476                                                 marked_bytes);
477       if (delegate->ShouldYield()) {
478         TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
479                      "ConcurrentMarking::Run Preempted");
480         break;
481       }
482     }
483 
484     if (done) {
485       Ephemeron ephemeron;
486 
487       while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
488         if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
489           ephemeron_marked = true;
490         }
491       }
492     }
493 
494     local_marking_worklists.Publish();
495     weak_objects_->transition_arrays.FlushToGlobal(task_id);
496     weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
497     weak_objects_->current_ephemerons.FlushToGlobal(task_id);
498     weak_objects_->next_ephemerons.FlushToGlobal(task_id);
499     weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
500     weak_objects_->weak_references.FlushToGlobal(task_id);
501     weak_objects_->js_weak_refs.FlushToGlobal(task_id);
502     weak_objects_->weak_cells.FlushToGlobal(task_id);
503     weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
504     weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id);
505     weak_objects_->flushed_js_functions.FlushToGlobal(task_id);
506     base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
507     total_marked_bytes_ += marked_bytes;
508 
509     if (ephemeron_marked) {
510       set_ephemeron_marked(true);
511     }
512   }
513   if (FLAG_trace_concurrent_marking) {
514     heap_->isolate()->PrintWithTimestamp(
515         "Task %d concurrently marked %dKB in %.2fms\n", task_id,
516         static_cast<int>(marked_bytes / KB), time_ms);
517   }
518 }
519 
GetMaxConcurrency(size_t worker_count)520 size_t ConcurrentMarking::GetMaxConcurrency(size_t worker_count) {
521   size_t marking_items = marking_worklists_->shared()->Size();
522   for (auto& worklist : marking_worklists_->context_worklists())
523     marking_items += worklist.worklist->Size();
524   return std::min<size_t>(
525       kMaxTasks,
526       worker_count + std::max<size_t>(
527                          {marking_items,
528                           weak_objects_->discovered_ephemerons.GlobalPoolSize(),
529                           weak_objects_->current_ephemerons.GlobalPoolSize()}));
530 }
531 
ScheduleJob(TaskPriority priority)532 void ConcurrentMarking::ScheduleJob(TaskPriority priority) {
533   DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
534   DCHECK(!heap_->IsTearingDown());
535   DCHECK(!job_handle_ || !job_handle_->IsValid());
536 
537   job_handle_ = V8::GetCurrentPlatform()->PostJob(
538       priority,
539       std::make_unique<JobTask>(this, heap_->mark_compact_collector()->epoch(),
540                                 heap_->is_current_gc_forced()));
541   DCHECK(job_handle_->IsValid());
542 }
543 
RescheduleJobIfNeeded(TaskPriority priority)544 void ConcurrentMarking::RescheduleJobIfNeeded(TaskPriority priority) {
545   DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
546   if (heap_->IsTearingDown()) return;
547 
548   if (marking_worklists_->shared()->IsEmpty() &&
549       weak_objects_->current_ephemerons.IsGlobalPoolEmpty() &&
550       weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) {
551     return;
552   }
553   if (!job_handle_ || !job_handle_->IsValid()) {
554     ScheduleJob(priority);
555   } else {
556     if (priority != TaskPriority::kUserVisible)
557       job_handle_->UpdatePriority(priority);
558     job_handle_->NotifyConcurrencyIncrease();
559   }
560 }
561 
Join()562 void ConcurrentMarking::Join() {
563   DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
564   if (!job_handle_ || !job_handle_->IsValid()) return;
565   job_handle_->Join();
566 }
567 
Pause()568 bool ConcurrentMarking::Pause() {
569   DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
570   if (!job_handle_ || !job_handle_->IsValid()) return false;
571 
572   job_handle_->Cancel();
573   return true;
574 }
575 
IsStopped()576 bool ConcurrentMarking::IsStopped() {
577   if (!FLAG_concurrent_marking) return true;
578 
579   return !job_handle_ || !job_handle_->IsValid();
580 }
581 
FlushNativeContexts(NativeContextStats * main_stats)582 void ConcurrentMarking::FlushNativeContexts(NativeContextStats* main_stats) {
583   DCHECK(!job_handle_ || !job_handle_->IsValid());
584   for (int i = 1; i <= kMaxTasks; i++) {
585     main_stats->Merge(task_state_[i].native_context_stats);
586     task_state_[i].native_context_stats.Clear();
587   }
588 }
589 
FlushMemoryChunkData(MajorNonAtomicMarkingState * marking_state)590 void ConcurrentMarking::FlushMemoryChunkData(
591     MajorNonAtomicMarkingState* marking_state) {
592   DCHECK(!job_handle_ || !job_handle_->IsValid());
593   for (int i = 1; i <= kMaxTasks; i++) {
594     MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data;
595     for (auto& pair : memory_chunk_data) {
596       // ClearLiveness sets the live bytes to zero.
597       // Pages with zero live bytes might be already unmapped.
598       MemoryChunk* memory_chunk = pair.first;
599       MemoryChunkData& data = pair.second;
600       if (data.live_bytes) {
601         marking_state->IncrementLiveBytes(memory_chunk, data.live_bytes);
602       }
603       if (data.typed_slots) {
604         RememberedSet<OLD_TO_OLD>::MergeTyped(memory_chunk,
605                                               std::move(data.typed_slots));
606       }
607     }
608     memory_chunk_data.clear();
609     task_state_[i].marked_bytes = 0;
610   }
611   total_marked_bytes_ = 0;
612 }
613 
ClearMemoryChunkData(MemoryChunk * chunk)614 void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
615   DCHECK(!job_handle_ || !job_handle_->IsValid());
616   for (int i = 1; i <= kMaxTasks; i++) {
617     auto it = task_state_[i].memory_chunk_data.find(chunk);
618     if (it != task_state_[i].memory_chunk_data.end()) {
619       it->second.live_bytes = 0;
620       it->second.typed_slots.reset();
621     }
622   }
623 }
624 
TotalMarkedBytes()625 size_t ConcurrentMarking::TotalMarkedBytes() {
626   size_t result = 0;
627   for (int i = 1; i <= kMaxTasks; i++) {
628     result +=
629         base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
630   }
631   result += total_marked_bytes_;
632   return result;
633 }
634 
PauseScope(ConcurrentMarking * concurrent_marking)635 ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
636     : concurrent_marking_(concurrent_marking),
637       resume_on_exit_(FLAG_concurrent_marking && concurrent_marking_->Pause()) {
638   DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
639 }
640 
~PauseScope()641 ConcurrentMarking::PauseScope::~PauseScope() {
642   if (resume_on_exit_) concurrent_marking_->RescheduleJobIfNeeded();
643 }
644 
645 }  // namespace internal
646 }  // namespace v8
647