1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/concurrent-marking.h"
6
7 #include <stack>
8 #include <unordered_map>
9
10 #include "include/v8config.h"
11 #include "src/execution/isolate.h"
12 #include "src/heap/gc-tracer.h"
13 #include "src/heap/heap-inl.h"
14 #include "src/heap/heap.h"
15 #include "src/heap/mark-compact-inl.h"
16 #include "src/heap/mark-compact.h"
17 #include "src/heap/marking-visitor-inl.h"
18 #include "src/heap/marking-visitor.h"
19 #include "src/heap/marking.h"
20 #include "src/heap/memory-measurement-inl.h"
21 #include "src/heap/memory-measurement.h"
22 #include "src/heap/objects-visiting-inl.h"
23 #include "src/heap/objects-visiting.h"
24 #include "src/heap/worklist.h"
25 #include "src/init/v8.h"
26 #include "src/objects/data-handler-inl.h"
27 #include "src/objects/embedder-data-array-inl.h"
28 #include "src/objects/hash-table-inl.h"
29 #include "src/objects/slots-inl.h"
30 #include "src/objects/transitions-inl.h"
31 #include "src/utils/utils-inl.h"
32 #include "src/utils/utils.h"
33
34 namespace v8 {
35 namespace internal {
36
37 class ConcurrentMarkingState final
38 : public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
39 public:
ConcurrentMarkingState(MemoryChunkDataMap * memory_chunk_data)40 explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data)
41 : memory_chunk_data_(memory_chunk_data) {}
42
bitmap(const MemoryChunk * chunk)43 ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) {
44 DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
45 reinterpret_cast<intptr_t>(chunk),
46 MemoryChunk::kMarkBitmapOffset);
47 return chunk->marking_bitmap<AccessMode::ATOMIC>();
48 }
49
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)50 void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
51 (*memory_chunk_data_)[chunk].live_bytes += by;
52 }
53
54 // The live_bytes and SetLiveBytes methods of the marking state are
55 // not used by the concurrent marker.
56
57 private:
58 MemoryChunkDataMap* memory_chunk_data_;
59 };
60
61 // Helper class for storing in-object slot addresses and values.
62 class SlotSnapshot {
63 public:
SlotSnapshot()64 SlotSnapshot() : number_of_slots_(0) {}
number_of_slots() const65 int number_of_slots() const { return number_of_slots_; }
slot(int i) const66 ObjectSlot slot(int i) const { return snapshot_[i].first; }
value(int i) const67 Object value(int i) const { return snapshot_[i].second; }
clear()68 void clear() { number_of_slots_ = 0; }
add(ObjectSlot slot,Object value)69 void add(ObjectSlot slot, Object value) {
70 snapshot_[number_of_slots_++] = {slot, value};
71 }
72
73 private:
74 static const int kMaxSnapshotSize = JSObject::kMaxInstanceSize / kTaggedSize;
75 int number_of_slots_;
76 std::pair<ObjectSlot, Object> snapshot_[kMaxSnapshotSize];
77 DISALLOW_COPY_AND_ASSIGN(SlotSnapshot);
78 };
79
80 class ConcurrentMarkingVisitor final
81 : public MarkingVisitorBase<ConcurrentMarkingVisitor,
82 ConcurrentMarkingState> {
83 public:
ConcurrentMarkingVisitor(int task_id,MarkingWorklists * marking_worklists,WeakObjects * weak_objects,Heap * heap,unsigned mark_compact_epoch,BytecodeFlushMode bytecode_flush_mode,bool embedder_tracing_enabled,bool is_forced_gc,MemoryChunkDataMap * memory_chunk_data)84 ConcurrentMarkingVisitor(int task_id, MarkingWorklists* marking_worklists,
85 WeakObjects* weak_objects, Heap* heap,
86 unsigned mark_compact_epoch,
87 BytecodeFlushMode bytecode_flush_mode,
88 bool embedder_tracing_enabled, bool is_forced_gc,
89 MemoryChunkDataMap* memory_chunk_data)
90 : MarkingVisitorBase(task_id, marking_worklists, weak_objects, heap,
91 mark_compact_epoch, bytecode_flush_mode,
92 embedder_tracing_enabled, is_forced_gc),
93 marking_state_(memory_chunk_data),
94 memory_chunk_data_(memory_chunk_data) {}
95
96 template <typename T>
Cast(HeapObject object)97 static V8_INLINE T Cast(HeapObject object) {
98 return T::cast(object);
99 }
100
101 // HeapVisitor overrides to implement the snapshotting protocol.
102
AllowDefaultJSObjectVisit()103 bool AllowDefaultJSObjectVisit() { return false; }
104
VisitJSObject(Map map,JSObject object)105 int VisitJSObject(Map map, JSObject object) {
106 return VisitJSObjectSubclass(map, object);
107 }
108
VisitJSObjectFast(Map map,JSObject object)109 int VisitJSObjectFast(Map map, JSObject object) {
110 return VisitJSObjectSubclassFast(map, object);
111 }
112
VisitWasmInstanceObject(Map map,WasmInstanceObject object)113 int VisitWasmInstanceObject(Map map, WasmInstanceObject object) {
114 return VisitJSObjectSubclass(map, object);
115 }
116
VisitJSWeakCollection(Map map,JSWeakCollection object)117 int VisitJSWeakCollection(Map map, JSWeakCollection object) {
118 return VisitJSObjectSubclass(map, object);
119 }
120
VisitConsString(Map map,ConsString object)121 int VisitConsString(Map map, ConsString object) {
122 return VisitFullyWithSnapshot(map, object);
123 }
124
VisitSlicedString(Map map,SlicedString object)125 int VisitSlicedString(Map map, SlicedString object) {
126 return VisitFullyWithSnapshot(map, object);
127 }
128
VisitThinString(Map map,ThinString object)129 int VisitThinString(Map map, ThinString object) {
130 return VisitFullyWithSnapshot(map, object);
131 }
132
VisitSeqOneByteString(Map map,SeqOneByteString object)133 int VisitSeqOneByteString(Map map, SeqOneByteString object) {
134 if (!ShouldVisit(object)) return 0;
135 VisitMapPointer(object);
136 return SeqOneByteString::SizeFor(object.synchronized_length());
137 }
138
VisitSeqTwoByteString(Map map,SeqTwoByteString object)139 int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
140 if (!ShouldVisit(object)) return 0;
141 VisitMapPointer(object);
142 return SeqTwoByteString::SizeFor(object.synchronized_length());
143 }
144
145 // Implements ephemeron semantics: Marks value if key is already reachable.
146 // Returns true if value was actually marked.
ProcessEphemeron(HeapObject key,HeapObject value)147 bool ProcessEphemeron(HeapObject key, HeapObject value) {
148 if (marking_state_.IsBlackOrGrey(key)) {
149 if (marking_state_.WhiteToGrey(value)) {
150 marking_worklists_->Push(value);
151 return true;
152 }
153
154 } else if (marking_state_.IsWhite(value)) {
155 weak_objects_->next_ephemerons.Push(task_id_, Ephemeron{key, value});
156 }
157 return false;
158 }
159
160 // HeapVisitor override.
ShouldVisit(HeapObject object)161 bool ShouldVisit(HeapObject object) {
162 return marking_state_.GreyToBlack(object);
163 }
164
165 private:
166 // Helper class for collecting in-object slot addresses and values.
167 class SlotSnapshottingVisitor final : public ObjectVisitor {
168 public:
SlotSnapshottingVisitor(SlotSnapshot * slot_snapshot)169 explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot)
170 : slot_snapshot_(slot_snapshot) {
171 slot_snapshot_->clear();
172 }
173
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)174 void VisitPointers(HeapObject host, ObjectSlot start,
175 ObjectSlot end) override {
176 for (ObjectSlot p = start; p < end; ++p) {
177 Object object = p.Relaxed_Load();
178 slot_snapshot_->add(p, object);
179 }
180 }
181
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)182 void VisitPointers(HeapObject host, MaybeObjectSlot start,
183 MaybeObjectSlot end) override {
184 // This should never happen, because we don't use snapshotting for objects
185 // which contain weak references.
186 UNREACHABLE();
187 }
188
VisitCodeTarget(Code host,RelocInfo * rinfo)189 void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
190 // This should never happen, because snapshotting is performed only on
191 // JSObjects (and derived classes).
192 UNREACHABLE();
193 }
194
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)195 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
196 // This should never happen, because snapshotting is performed only on
197 // JSObjects (and derived classes).
198 UNREACHABLE();
199 }
200
VisitCustomWeakPointers(HeapObject host,ObjectSlot start,ObjectSlot end)201 void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
202 ObjectSlot end) override {
203 DCHECK(host.IsWeakCell() || host.IsJSWeakRef());
204 }
205
206 private:
207 SlotSnapshot* slot_snapshot_;
208 };
209
210 template <typename T>
VisitJSObjectSubclassFast(Map map,T object)211 int VisitJSObjectSubclassFast(Map map, T object) {
212 DCHECK_IMPLIES(FLAG_unbox_double_fields, map.HasFastPointerLayout());
213 using TBodyDescriptor = typename T::FastBodyDescriptor;
214 return VisitJSObjectSubclass<T, TBodyDescriptor>(map, object);
215 }
216
217 template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
VisitJSObjectSubclass(Map map,T object)218 int VisitJSObjectSubclass(Map map, T object) {
219 int size = TBodyDescriptor::SizeOf(map, object);
220 int used_size = map.UsedInstanceSize();
221 DCHECK_LE(used_size, size);
222 DCHECK_GE(used_size, JSObject::GetHeaderSize(map));
223 return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object,
224 used_size, size);
225 }
226
227 template <typename T>
VisitLeftTrimmableArray(Map map,T object)228 int VisitLeftTrimmableArray(Map map, T object) {
229 // The synchronized_length() function checks that the length is a Smi.
230 // This is not necessarily the case if the array is being left-trimmed.
231 Object length = object.unchecked_synchronized_length();
232 if (!ShouldVisit(object)) return 0;
233 // The cached length must be the actual length as the array is not black.
234 // Left trimming marks the array black before over-writing the length.
235 DCHECK(length.IsSmi());
236 int size = T::SizeFor(Smi::ToInt(length));
237 VisitMapPointer(object);
238 T::BodyDescriptor::IterateBody(map, object, size, this);
239 return size;
240 }
241
VisitPointersInSnapshot(HeapObject host,const SlotSnapshot & snapshot)242 void VisitPointersInSnapshot(HeapObject host, const SlotSnapshot& snapshot) {
243 for (int i = 0; i < snapshot.number_of_slots(); i++) {
244 ObjectSlot slot = snapshot.slot(i);
245 Object object = snapshot.value(i);
246 DCHECK(!HasWeakHeapObjectTag(object));
247 if (!object.IsHeapObject()) continue;
248 HeapObject heap_object = HeapObject::cast(object);
249 MarkObject(host, heap_object);
250 RecordSlot(host, slot, heap_object);
251 }
252 }
253
254 template <typename T>
VisitFullyWithSnapshot(Map map,T object)255 int VisitFullyWithSnapshot(Map map, T object) {
256 using TBodyDescriptor = typename T::BodyDescriptor;
257 int size = TBodyDescriptor::SizeOf(map, object);
258 return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object, size,
259 size);
260 }
261
262 template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
VisitPartiallyWithSnapshot(Map map,T object,int used_size,int size)263 int VisitPartiallyWithSnapshot(Map map, T object, int used_size, int size) {
264 const SlotSnapshot& snapshot =
265 MakeSlotSnapshot<T, TBodyDescriptor>(map, object, used_size);
266 if (!ShouldVisit(object)) return 0;
267 VisitPointersInSnapshot(object, snapshot);
268 return size;
269 }
270
271 template <typename T, typename TBodyDescriptor>
MakeSlotSnapshot(Map map,T object,int size)272 const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
273 SlotSnapshottingVisitor visitor(&slot_snapshot_);
274 visitor.VisitPointer(object, object.map_slot());
275 TBodyDescriptor::IterateBody(map, object, size, &visitor);
276 return slot_snapshot_;
277 }
278
279 template <typename TSlot>
RecordSlot(HeapObject object,TSlot slot,HeapObject target)280 void RecordSlot(HeapObject object, TSlot slot, HeapObject target) {
281 MarkCompactCollector::RecordSlot(object, slot, target);
282 }
283
RecordRelocSlot(Code host,RelocInfo * rinfo,HeapObject target)284 void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target) {
285 MarkCompactCollector::RecordRelocSlotInfo info =
286 MarkCompactCollector::PrepareRecordRelocSlot(host, rinfo, target);
287 if (info.should_record) {
288 MemoryChunkData& data = (*memory_chunk_data_)[info.memory_chunk];
289 if (!data.typed_slots) {
290 data.typed_slots.reset(new TypedSlots());
291 }
292 data.typed_slots->Insert(info.slot_type, info.offset);
293 }
294 }
295
SynchronizePageAccess(HeapObject heap_object)296 void SynchronizePageAccess(HeapObject heap_object) {
297 #ifdef THREAD_SANITIZER
298 // This is needed because TSAN does not process the memory fence
299 // emitted after page initialization.
300 MemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
301 #endif
302 }
303
marking_state()304 ConcurrentMarkingState* marking_state() { return &marking_state_; }
305
retaining_path_mode()306 TraceRetainingPathMode retaining_path_mode() {
307 return TraceRetainingPathMode::kDisabled;
308 }
309
310 ConcurrentMarkingState marking_state_;
311 MemoryChunkDataMap* memory_chunk_data_;
312 SlotSnapshot slot_snapshot_;
313
314 friend class MarkingVisitorBase<ConcurrentMarkingVisitor,
315 ConcurrentMarkingState>;
316 };
317
318 // Strings can change maps due to conversion to thin string or external strings.
319 // Use unchecked cast to avoid data race in slow dchecks.
320 template <>
Cast(HeapObject object)321 ConsString ConcurrentMarkingVisitor::Cast(HeapObject object) {
322 return ConsString::unchecked_cast(object);
323 }
324
325 template <>
Cast(HeapObject object)326 SlicedString ConcurrentMarkingVisitor::Cast(HeapObject object) {
327 return SlicedString::unchecked_cast(object);
328 }
329
330 template <>
Cast(HeapObject object)331 ThinString ConcurrentMarkingVisitor::Cast(HeapObject object) {
332 return ThinString::unchecked_cast(object);
333 }
334
335 template <>
Cast(HeapObject object)336 SeqOneByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
337 return SeqOneByteString::unchecked_cast(object);
338 }
339
340 template <>
Cast(HeapObject object)341 SeqTwoByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
342 return SeqTwoByteString::unchecked_cast(object);
343 }
344
345 // Fixed array can become a free space during left trimming.
346 template <>
Cast(HeapObject object)347 FixedArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
348 return FixedArray::unchecked_cast(object);
349 }
350
351 class ConcurrentMarking::Task : public CancelableTask {
352 public:
Task(Isolate * isolate,ConcurrentMarking * concurrent_marking,TaskState * task_state,int task_id)353 Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
354 TaskState* task_state, int task_id)
355 : CancelableTask(isolate),
356 concurrent_marking_(concurrent_marking),
357 task_state_(task_state),
358 task_id_(task_id) {}
359
360 ~Task() override = default;
361
362 private:
363 // v8::internal::CancelableTask overrides.
RunInternal()364 void RunInternal() override {
365 concurrent_marking_->Run(task_id_, task_state_);
366 }
367
368 ConcurrentMarking* concurrent_marking_;
369 TaskState* task_state_;
370 int task_id_;
371 DISALLOW_COPY_AND_ASSIGN(Task);
372 };
373
ConcurrentMarking(Heap * heap,MarkingWorklistsHolder * marking_worklists_holder,WeakObjects * weak_objects)374 ConcurrentMarking::ConcurrentMarking(
375 Heap* heap, MarkingWorklistsHolder* marking_worklists_holder,
376 WeakObjects* weak_objects)
377 : heap_(heap),
378 marking_worklists_holder_(marking_worklists_holder),
379 weak_objects_(weak_objects) {
380 // The runtime flag should be set only if the compile time flag was set.
381 #ifndef V8_CONCURRENT_MARKING
382 CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
383 #endif
384 }
385
Run(int task_id,TaskState * task_state)386 void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
387 TRACE_BACKGROUND_GC(heap_->tracer(),
388 GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
389 size_t kBytesUntilInterruptCheck = 64 * KB;
390 int kObjectsUntilInterrupCheck = 1000;
391 MarkingWorklists marking_worklists(task_id, marking_worklists_holder_);
392 ConcurrentMarkingVisitor visitor(
393 task_id, &marking_worklists, weak_objects_, heap_,
394 task_state->mark_compact_epoch, Heap::GetBytecodeFlushMode(),
395 heap_->local_embedder_heap_tracer()->InUse(), task_state->is_forced_gc,
396 &task_state->memory_chunk_data);
397 NativeContextInferrer& native_context_inferrer =
398 task_state->native_context_inferrer;
399 NativeContextStats& native_context_stats = task_state->native_context_stats;
400 double time_ms;
401 size_t marked_bytes = 0;
402 Isolate* isolate = heap_->isolate();
403 if (FLAG_trace_concurrent_marking) {
404 isolate->PrintWithTimestamp("Starting concurrent marking task %d\n",
405 task_id);
406 }
407 bool ephemeron_marked = false;
408
409 {
410 TimedScope scope(&time_ms);
411
412 {
413 Ephemeron ephemeron;
414
415 while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
416 if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
417 ephemeron_marked = true;
418 }
419 }
420 }
421 bool is_per_context_mode = marking_worklists.IsPerContextMode();
422 bool done = false;
423 while (!done) {
424 size_t current_marked_bytes = 0;
425 int objects_processed = 0;
426 while (current_marked_bytes < kBytesUntilInterruptCheck &&
427 objects_processed < kObjectsUntilInterrupCheck) {
428 HeapObject object;
429 if (!marking_worklists.Pop(&object)) {
430 done = true;
431 break;
432 }
433 objects_processed++;
434 // The order of the two loads is important.
435 Address new_space_top = heap_->new_space()->original_top_acquire();
436 Address new_space_limit = heap_->new_space()->original_limit_relaxed();
437 Address new_large_object = heap_->new_lo_space()->pending_object();
438 Address addr = object.address();
439 if ((new_space_top <= addr && addr < new_space_limit) ||
440 addr == new_large_object) {
441 marking_worklists.PushOnHold(object);
442 } else {
443 Map map = object.synchronized_map(isolate);
444 if (is_per_context_mode) {
445 Address context;
446 if (native_context_inferrer.Infer(isolate, map, object, &context)) {
447 marking_worklists.SwitchToContext(context);
448 }
449 }
450 size_t visited_size = visitor.Visit(map, object);
451 if (is_per_context_mode) {
452 native_context_stats.IncrementSize(marking_worklists.Context(), map,
453 object, visited_size);
454 }
455 current_marked_bytes += visited_size;
456 }
457 }
458 marked_bytes += current_marked_bytes;
459 base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
460 marked_bytes);
461 if (task_state->preemption_request) {
462 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
463 "ConcurrentMarking::Run Preempted");
464 break;
465 }
466 }
467
468 if (done) {
469 Ephemeron ephemeron;
470
471 while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
472 if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
473 ephemeron_marked = true;
474 }
475 }
476 }
477
478 marking_worklists.FlushToGlobal();
479 weak_objects_->transition_arrays.FlushToGlobal(task_id);
480 weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
481 weak_objects_->current_ephemerons.FlushToGlobal(task_id);
482 weak_objects_->next_ephemerons.FlushToGlobal(task_id);
483 weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
484 weak_objects_->weak_references.FlushToGlobal(task_id);
485 weak_objects_->js_weak_refs.FlushToGlobal(task_id);
486 weak_objects_->weak_cells.FlushToGlobal(task_id);
487 weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
488 weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id);
489 weak_objects_->flushed_js_functions.FlushToGlobal(task_id);
490 base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
491 total_marked_bytes_ += marked_bytes;
492
493 if (ephemeron_marked) {
494 set_ephemeron_marked(true);
495 }
496
497 {
498 base::MutexGuard guard(&pending_lock_);
499 is_pending_[task_id] = false;
500 --pending_task_count_;
501 pending_condition_.NotifyAll();
502 }
503 }
504 if (FLAG_trace_concurrent_marking) {
505 heap_->isolate()->PrintWithTimestamp(
506 "Task %d concurrently marked %dKB in %.2fms\n", task_id,
507 static_cast<int>(marked_bytes / KB), time_ms);
508 }
509 }
510
ScheduleTasks()511 void ConcurrentMarking::ScheduleTasks() {
512 DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
513 DCHECK(!heap_->IsTearingDown());
514 base::MutexGuard guard(&pending_lock_);
515 if (total_task_count_ == 0) {
516 static const int num_cores =
517 V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
518 #if defined(V8_OS_MACOSX)
519 // Mac OSX 10.11 and prior seems to have trouble when doing concurrent
520 // marking on competing hyper-threads (regresses Octane/Splay). As such,
521 // only use num_cores/2, leaving one of those for the main thread.
522 // TODO(ulan): Use all cores on Mac 10.12+.
523 total_task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
524 #else // defined(OS_MACOSX)
525 // On other platforms use all logical cores, leaving one for the main
526 // thread.
527 total_task_count_ = Max(1, Min(kMaxTasks, num_cores - 2));
528 #endif // defined(OS_MACOSX)
529 DCHECK_LE(total_task_count_, kMaxTasks);
530 // One task is for the main thread.
531 STATIC_ASSERT(kMaxTasks + 1 <= MarkingWorklist::kMaxNumTasks);
532 }
533 // Task id 0 is for the main thread.
534 for (int i = 1; i <= total_task_count_; i++) {
535 if (!is_pending_[i]) {
536 if (FLAG_trace_concurrent_marking) {
537 heap_->isolate()->PrintWithTimestamp(
538 "Scheduling concurrent marking task %d\n", i);
539 }
540 task_state_[i].preemption_request = false;
541 task_state_[i].mark_compact_epoch =
542 heap_->mark_compact_collector()->epoch();
543 task_state_[i].is_forced_gc = heap_->is_current_gc_forced();
544 is_pending_[i] = true;
545 ++pending_task_count_;
546 auto task =
547 std::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i);
548 cancelable_id_[i] = task->id();
549 V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
550 }
551 }
552 DCHECK_EQ(total_task_count_, pending_task_count_);
553 }
554
RescheduleTasksIfNeeded()555 void ConcurrentMarking::RescheduleTasksIfNeeded() {
556 DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
557 if (heap_->IsTearingDown()) return;
558 {
559 base::MutexGuard guard(&pending_lock_);
560 // The total task count is initialized in ScheduleTasks from
561 // NumberOfWorkerThreads of the platform.
562 if (total_task_count_ > 0 && pending_task_count_ == total_task_count_) {
563 return;
564 }
565 }
566 if (!marking_worklists_holder_->shared()->IsGlobalPoolEmpty() ||
567 !weak_objects_->current_ephemerons.IsGlobalPoolEmpty() ||
568 !weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) {
569 ScheduleTasks();
570 }
571 }
572
Stop(StopRequest stop_request)573 bool ConcurrentMarking::Stop(StopRequest stop_request) {
574 DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
575 base::MutexGuard guard(&pending_lock_);
576
577 if (pending_task_count_ == 0) return false;
578
579 if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
580 CancelableTaskManager* task_manager =
581 heap_->isolate()->cancelable_task_manager();
582 for (int i = 1; i <= total_task_count_; i++) {
583 if (is_pending_[i]) {
584 if (task_manager->TryAbort(cancelable_id_[i]) ==
585 TryAbortResult::kTaskAborted) {
586 is_pending_[i] = false;
587 --pending_task_count_;
588 } else if (stop_request == StopRequest::PREEMPT_TASKS) {
589 task_state_[i].preemption_request = true;
590 }
591 }
592 }
593 }
594 while (pending_task_count_ > 0) {
595 pending_condition_.Wait(&pending_lock_);
596 }
597 for (int i = 1; i <= total_task_count_; i++) {
598 DCHECK(!is_pending_[i]);
599 }
600 return true;
601 }
602
IsStopped()603 bool ConcurrentMarking::IsStopped() {
604 if (!FLAG_concurrent_marking) return true;
605
606 base::MutexGuard guard(&pending_lock_);
607 return pending_task_count_ == 0;
608 }
609
FlushNativeContexts(NativeContextStats * main_stats)610 void ConcurrentMarking::FlushNativeContexts(NativeContextStats* main_stats) {
611 for (int i = 1; i <= total_task_count_; i++) {
612 main_stats->Merge(task_state_[i].native_context_stats);
613 task_state_[i].native_context_stats.Clear();
614 }
615 }
616
FlushMemoryChunkData(MajorNonAtomicMarkingState * marking_state)617 void ConcurrentMarking::FlushMemoryChunkData(
618 MajorNonAtomicMarkingState* marking_state) {
619 DCHECK_EQ(pending_task_count_, 0);
620 for (int i = 1; i <= total_task_count_; i++) {
621 MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data;
622 for (auto& pair : memory_chunk_data) {
623 // ClearLiveness sets the live bytes to zero.
624 // Pages with zero live bytes might be already unmapped.
625 MemoryChunk* memory_chunk = pair.first;
626 MemoryChunkData& data = pair.second;
627 if (data.live_bytes) {
628 marking_state->IncrementLiveBytes(memory_chunk, data.live_bytes);
629 }
630 if (data.typed_slots) {
631 RememberedSet<OLD_TO_OLD>::MergeTyped(memory_chunk,
632 std::move(data.typed_slots));
633 }
634 }
635 memory_chunk_data.clear();
636 task_state_[i].marked_bytes = 0;
637 }
638 total_marked_bytes_ = 0;
639 }
640
ClearMemoryChunkData(MemoryChunk * chunk)641 void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
642 for (int i = 1; i <= total_task_count_; i++) {
643 auto it = task_state_[i].memory_chunk_data.find(chunk);
644 if (it != task_state_[i].memory_chunk_data.end()) {
645 it->second.live_bytes = 0;
646 it->second.typed_slots.reset();
647 }
648 }
649 }
650
TotalMarkedBytes()651 size_t ConcurrentMarking::TotalMarkedBytes() {
652 size_t result = 0;
653 for (int i = 1; i <= total_task_count_; i++) {
654 result +=
655 base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
656 }
657 result += total_marked_bytes_;
658 return result;
659 }
660
PauseScope(ConcurrentMarking * concurrent_marking)661 ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
662 : concurrent_marking_(concurrent_marking),
663 resume_on_exit_(FLAG_concurrent_marking &&
664 concurrent_marking_->Stop(
665 ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
666 DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
667 }
668
~PauseScope()669 ConcurrentMarking::PauseScope::~PauseScope() {
670 if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded();
671 }
672
673 } // namespace internal
674 } // namespace v8
675