1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/concurrent-marking.h"
6
7 #include <stack>
8 #include <unordered_map>
9
10 #include "include/v8config.h"
11 #include "src/common/globals.h"
12 #include "src/execution/isolate.h"
13 #include "src/heap/gc-tracer.h"
14 #include "src/heap/heap-inl.h"
15 #include "src/heap/heap.h"
16 #include "src/heap/mark-compact-inl.h"
17 #include "src/heap/mark-compact.h"
18 #include "src/heap/marking-visitor-inl.h"
19 #include "src/heap/marking-visitor.h"
20 #include "src/heap/marking.h"
21 #include "src/heap/memory-chunk.h"
22 #include "src/heap/memory-measurement-inl.h"
23 #include "src/heap/memory-measurement.h"
24 #include "src/heap/objects-visiting-inl.h"
25 #include "src/heap/objects-visiting.h"
26 #include "src/heap/worklist.h"
27 #include "src/init/v8.h"
28 #include "src/objects/data-handler-inl.h"
29 #include "src/objects/embedder-data-array-inl.h"
30 #include "src/objects/hash-table-inl.h"
31 #include "src/objects/js-array-buffer-inl.h"
32 #include "src/objects/slots-inl.h"
33 #include "src/objects/transitions-inl.h"
34 #include "src/objects/visitors.h"
35 #include "src/utils/utils-inl.h"
36 #include "src/utils/utils.h"
37
38 namespace v8 {
39 namespace internal {
40
41 class ConcurrentMarkingState final
42 : public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
43 public:
ConcurrentMarkingState(MemoryChunkDataMap * memory_chunk_data)44 explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data)
45 : memory_chunk_data_(memory_chunk_data) {}
46
bitmap(const BasicMemoryChunk * chunk)47 ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const BasicMemoryChunk* chunk) {
48 return chunk->marking_bitmap<AccessMode::ATOMIC>();
49 }
50
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)51 void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
52 (*memory_chunk_data_)[chunk].live_bytes += by;
53 }
54
55 // The live_bytes and SetLiveBytes methods of the marking state are
56 // not used by the concurrent marker.
57
58 private:
59 MemoryChunkDataMap* memory_chunk_data_;
60 };
61
62 // Helper class for storing in-object slot addresses and values.
63 class SlotSnapshot {
64 public:
SlotSnapshot()65 SlotSnapshot() : number_of_slots_(0) {}
66 SlotSnapshot(const SlotSnapshot&) = delete;
67 SlotSnapshot& operator=(const SlotSnapshot&) = delete;
number_of_slots() const68 int number_of_slots() const { return number_of_slots_; }
slot(int i) const69 ObjectSlot slot(int i) const { return snapshot_[i].first; }
value(int i) const70 Object value(int i) const { return snapshot_[i].second; }
clear()71 void clear() { number_of_slots_ = 0; }
add(ObjectSlot slot,Object value)72 void add(ObjectSlot slot, Object value) {
73 snapshot_[number_of_slots_++] = {slot, value};
74 }
75
76 private:
77 static const int kMaxSnapshotSize = JSObject::kMaxInstanceSize / kTaggedSize;
78 int number_of_slots_;
79 std::pair<ObjectSlot, Object> snapshot_[kMaxSnapshotSize];
80 };
81
82 class ConcurrentMarkingVisitor final
83 : public MarkingVisitorBase<ConcurrentMarkingVisitor,
84 ConcurrentMarkingState> {
85 public:
ConcurrentMarkingVisitor(int task_id,MarkingWorklists::Local * local_marking_worklists,WeakObjects * weak_objects,Heap * heap,unsigned mark_compact_epoch,base::EnumSet<CodeFlushMode> code_flush_mode,bool embedder_tracing_enabled,bool should_keep_ages_unchanged,MemoryChunkDataMap * memory_chunk_data)86 ConcurrentMarkingVisitor(int task_id,
87 MarkingWorklists::Local* local_marking_worklists,
88 WeakObjects* weak_objects, Heap* heap,
89 unsigned mark_compact_epoch,
90 base::EnumSet<CodeFlushMode> code_flush_mode,
91 bool embedder_tracing_enabled,
92 bool should_keep_ages_unchanged,
93 MemoryChunkDataMap* memory_chunk_data)
94 : MarkingVisitorBase(task_id, local_marking_worklists, weak_objects, heap,
95 mark_compact_epoch, code_flush_mode,
96 embedder_tracing_enabled,
97 should_keep_ages_unchanged),
98 marking_state_(memory_chunk_data),
99 memory_chunk_data_(memory_chunk_data) {}
100
101 template <typename T>
Cast(HeapObject object)102 static V8_INLINE T Cast(HeapObject object) {
103 return T::cast(object);
104 }
105
106 // HeapVisitor overrides to implement the snapshotting protocol.
107
AllowDefaultJSObjectVisit()108 bool AllowDefaultJSObjectVisit() { return false; }
109
VisitJSObject(Map map,JSObject object)110 int VisitJSObject(Map map, JSObject object) {
111 return VisitJSObjectSubclass(map, object);
112 }
113
VisitJSObjectFast(Map map,JSObject object)114 int VisitJSObjectFast(Map map, JSObject object) {
115 return VisitJSObjectSubclassFast(map, object);
116 }
117
118 #if V8_ENABLE_WEBASSEMBLY
VisitWasmInstanceObject(Map map,WasmInstanceObject object)119 int VisitWasmInstanceObject(Map map, WasmInstanceObject object) {
120 return VisitJSObjectSubclass(map, object);
121 }
122 #endif // V8_ENABLE_WEBASSEMBLY
123
VisitJSWeakCollection(Map map,JSWeakCollection object)124 int VisitJSWeakCollection(Map map, JSWeakCollection object) {
125 return VisitJSObjectSubclass(map, object);
126 }
127
VisitConsString(Map map,ConsString object)128 int VisitConsString(Map map, ConsString object) {
129 return VisitFullyWithSnapshot(map, object);
130 }
131
VisitSlicedString(Map map,SlicedString object)132 int VisitSlicedString(Map map, SlicedString object) {
133 return VisitFullyWithSnapshot(map, object);
134 }
135
VisitThinString(Map map,ThinString object)136 int VisitThinString(Map map, ThinString object) {
137 return VisitFullyWithSnapshot(map, object);
138 }
139
VisitSeqOneByteString(Map map,SeqOneByteString object)140 int VisitSeqOneByteString(Map map, SeqOneByteString object) {
141 if (!ShouldVisit(object)) return 0;
142 VisitMapPointer(object);
143 return SeqOneByteString::SizeFor(object.length(kAcquireLoad));
144 }
145
VisitSeqTwoByteString(Map map,SeqTwoByteString object)146 int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
147 if (!ShouldVisit(object)) return 0;
148 VisitMapPointer(object);
149 return SeqTwoByteString::SizeFor(object.length(kAcquireLoad));
150 }
151
152 // Implements ephemeron semantics: Marks value if key is already reachable.
153 // Returns true if value was actually marked.
ProcessEphemeron(HeapObject key,HeapObject value)154 bool ProcessEphemeron(HeapObject key, HeapObject value) {
155 if (marking_state_.IsBlackOrGrey(key)) {
156 if (marking_state_.WhiteToGrey(value)) {
157 local_marking_worklists_->Push(value);
158 return true;
159 }
160
161 } else if (marking_state_.IsWhite(value)) {
162 weak_objects_->next_ephemerons.Push(task_id_, Ephemeron{key, value});
163 }
164 return false;
165 }
166
167 // HeapVisitor override.
ShouldVisit(HeapObject object)168 bool ShouldVisit(HeapObject object) {
169 return marking_state_.GreyToBlack(object);
170 }
171
172 private:
173 // Helper class for collecting in-object slot addresses and values.
174 class SlotSnapshottingVisitor final : public ObjectVisitorWithCageBases {
175 public:
SlotSnapshottingVisitor(SlotSnapshot * slot_snapshot,PtrComprCageBase cage_base,PtrComprCageBase code_cage_base)176 explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot,
177 PtrComprCageBase cage_base,
178 PtrComprCageBase code_cage_base)
179 : ObjectVisitorWithCageBases(cage_base, code_cage_base),
180 slot_snapshot_(slot_snapshot) {
181 slot_snapshot_->clear();
182 }
183
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)184 void VisitPointers(HeapObject host, ObjectSlot start,
185 ObjectSlot end) override {
186 for (ObjectSlot p = start; p < end; ++p) {
187 Object object = p.Relaxed_Load(cage_base());
188 slot_snapshot_->add(p, object);
189 }
190 }
191
VisitCodePointer(HeapObject host,CodeObjectSlot slot)192 void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
193 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
194 Object code = slot.Relaxed_Load(code_cage_base());
195 slot_snapshot_->add(ObjectSlot(slot.address()), code);
196 }
197
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)198 void VisitPointers(HeapObject host, MaybeObjectSlot start,
199 MaybeObjectSlot end) override {
200 // This should never happen, because we don't use snapshotting for objects
201 // which contain weak references.
202 UNREACHABLE();
203 }
204
VisitCodeTarget(Code host,RelocInfo * rinfo)205 void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
206 // This should never happen, because snapshotting is performed only on
207 // JSObjects (and derived classes).
208 UNREACHABLE();
209 }
210
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)211 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
212 // This should never happen, because snapshotting is performed only on
213 // JSObjects (and derived classes).
214 UNREACHABLE();
215 }
216
VisitCustomWeakPointers(HeapObject host,ObjectSlot start,ObjectSlot end)217 void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
218 ObjectSlot end) override {
219 DCHECK(host.IsWeakCell() || host.IsJSWeakRef());
220 }
221
222 private:
223 SlotSnapshot* slot_snapshot_;
224 };
225
226 template <typename T>
VisitJSObjectSubclassFast(Map map,T object)227 int VisitJSObjectSubclassFast(Map map, T object) {
228 using TBodyDescriptor = typename T::FastBodyDescriptor;
229 return VisitJSObjectSubclass<T, TBodyDescriptor>(map, object);
230 }
231
232 template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
VisitJSObjectSubclass(Map map,T object)233 int VisitJSObjectSubclass(Map map, T object) {
234 if (!ShouldVisit(object)) return 0;
235 int size = TBodyDescriptor::SizeOf(map, object);
236 int used_size = map.UsedInstanceSize();
237 DCHECK_LE(used_size, size);
238 DCHECK_GE(used_size, JSObject::GetHeaderSize(map));
239 this->VisitMapPointer(object);
240 // It is important to visit only the used field and ignore the slack fields
241 // because the slack fields may be trimmed concurrently.
242 TBodyDescriptor::IterateBody(map, object, used_size, this);
243 return size;
244 }
245
246 template <typename T>
VisitLeftTrimmableArray(Map map,T object)247 int VisitLeftTrimmableArray(Map map, T object) {
248 // The length() function checks that the length is a Smi.
249 // This is not necessarily the case if the array is being left-trimmed.
250 Object length = object.unchecked_length(kAcquireLoad);
251 if (!ShouldVisit(object)) return 0;
252 // The cached length must be the actual length as the array is not black.
253 // Left trimming marks the array black before over-writing the length.
254 DCHECK(length.IsSmi());
255 int size = T::SizeFor(Smi::ToInt(length));
256 VisitMapPointer(object);
257 T::BodyDescriptor::IterateBody(map, object, size, this);
258 return size;
259 }
260
VisitPointersInSnapshot(HeapObject host,const SlotSnapshot & snapshot)261 void VisitPointersInSnapshot(HeapObject host, const SlotSnapshot& snapshot) {
262 for (int i = 0; i < snapshot.number_of_slots(); i++) {
263 ObjectSlot slot = snapshot.slot(i);
264 Object object = snapshot.value(i);
265 DCHECK(!HasWeakHeapObjectTag(object));
266 if (!object.IsHeapObject()) continue;
267 HeapObject heap_object = HeapObject::cast(object);
268 MarkObject(host, heap_object);
269 RecordSlot(host, slot, heap_object);
270 }
271 }
272
273 template <typename T>
VisitFullyWithSnapshot(Map map,T object)274 int VisitFullyWithSnapshot(Map map, T object) {
275 using TBodyDescriptor = typename T::BodyDescriptor;
276 int size = TBodyDescriptor::SizeOf(map, object);
277 const SlotSnapshot& snapshot =
278 MakeSlotSnapshot<T, TBodyDescriptor>(map, object, size);
279 if (!ShouldVisit(object)) return 0;
280 VisitPointersInSnapshot(object, snapshot);
281 return size;
282 }
283
284 template <typename T, typename TBodyDescriptor>
MakeSlotSnapshot(Map map,T object,int size)285 const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
286 SlotSnapshottingVisitor visitor(&slot_snapshot_, cage_base(),
287 code_cage_base());
288 visitor.VisitPointer(object, object.map_slot());
289 TBodyDescriptor::IterateBody(map, object, size, &visitor);
290 return slot_snapshot_;
291 }
292
293 template <typename TSlot>
RecordSlot(HeapObject object,TSlot slot,HeapObject target)294 void RecordSlot(HeapObject object, TSlot slot, HeapObject target) {
295 MarkCompactCollector::RecordSlot(object, slot, target);
296 }
297
RecordRelocSlot(Code host,RelocInfo * rinfo,HeapObject target)298 void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target) {
299 MarkCompactCollector::RecordRelocSlotInfo info =
300 MarkCompactCollector::PrepareRecordRelocSlot(host, rinfo, target);
301 if (info.should_record) {
302 MemoryChunkData& data = (*memory_chunk_data_)[info.memory_chunk];
303 if (!data.typed_slots) {
304 data.typed_slots.reset(new TypedSlots());
305 }
306 data.typed_slots->Insert(info.slot_type, info.offset);
307 }
308 }
309
SynchronizePageAccess(HeapObject heap_object)310 void SynchronizePageAccess(HeapObject heap_object) {
311 #ifdef THREAD_SANITIZER
312 // This is needed because TSAN does not process the memory fence
313 // emitted after page initialization.
314 BasicMemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
315 #endif
316 }
317
marking_state()318 ConcurrentMarkingState* marking_state() { return &marking_state_; }
319
retaining_path_mode()320 TraceRetainingPathMode retaining_path_mode() {
321 return TraceRetainingPathMode::kDisabled;
322 }
323
324 ConcurrentMarkingState marking_state_;
325 MemoryChunkDataMap* memory_chunk_data_;
326 SlotSnapshot slot_snapshot_;
327
328 friend class MarkingVisitorBase<ConcurrentMarkingVisitor,
329 ConcurrentMarkingState>;
330 };
331
332 // Strings can change maps due to conversion to thin string or external strings.
333 // Use unchecked cast to avoid data race in slow dchecks.
334 template <>
Cast(HeapObject object)335 ConsString ConcurrentMarkingVisitor::Cast(HeapObject object) {
336 return ConsString::unchecked_cast(object);
337 }
338
339 template <>
Cast(HeapObject object)340 SlicedString ConcurrentMarkingVisitor::Cast(HeapObject object) {
341 return SlicedString::unchecked_cast(object);
342 }
343
344 template <>
Cast(HeapObject object)345 ThinString ConcurrentMarkingVisitor::Cast(HeapObject object) {
346 return ThinString::unchecked_cast(object);
347 }
348
349 template <>
Cast(HeapObject object)350 SeqOneByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
351 return SeqOneByteString::unchecked_cast(object);
352 }
353
354 template <>
Cast(HeapObject object)355 SeqTwoByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
356 return SeqTwoByteString::unchecked_cast(object);
357 }
358
359 // Fixed array can become a free space during left trimming.
360 template <>
Cast(HeapObject object)361 FixedArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
362 return FixedArray::unchecked_cast(object);
363 }
364
365 // The Deserializer changes the map from StrongDescriptorArray to
366 // DescriptorArray
367 template <>
Cast(HeapObject object)368 StrongDescriptorArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
369 return StrongDescriptorArray::unchecked_cast(DescriptorArray::cast(object));
370 }
371
372 class ConcurrentMarking::JobTask : public v8::JobTask {
373 public:
JobTask(ConcurrentMarking * concurrent_marking,unsigned mark_compact_epoch,base::EnumSet<CodeFlushMode> code_flush_mode,bool should_keep_ages_unchanged)374 JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch,
375 base::EnumSet<CodeFlushMode> code_flush_mode,
376 bool should_keep_ages_unchanged)
377 : concurrent_marking_(concurrent_marking),
378 mark_compact_epoch_(mark_compact_epoch),
379 code_flush_mode_(code_flush_mode),
380 should_keep_ages_unchanged_(should_keep_ages_unchanged) {}
381
382 ~JobTask() override = default;
383 JobTask(const JobTask&) = delete;
384 JobTask& operator=(const JobTask&) = delete;
385
386 // v8::JobTask overrides.
Run(JobDelegate * delegate)387 void Run(JobDelegate* delegate) override {
388 if (delegate->IsJoiningThread()) {
389 // TRACE_GC is not needed here because the caller opens the right scope.
390 concurrent_marking_->Run(delegate, code_flush_mode_, mark_compact_epoch_,
391 should_keep_ages_unchanged_);
392 } else {
393 TRACE_GC_EPOCH(concurrent_marking_->heap_->tracer(),
394 GCTracer::Scope::MC_BACKGROUND_MARKING,
395 ThreadKind::kBackground);
396 concurrent_marking_->Run(delegate, code_flush_mode_, mark_compact_epoch_,
397 should_keep_ages_unchanged_);
398 }
399 }
400
GetMaxConcurrency(size_t worker_count) const401 size_t GetMaxConcurrency(size_t worker_count) const override {
402 return concurrent_marking_->GetMaxConcurrency(worker_count);
403 }
404
405 private:
406 ConcurrentMarking* concurrent_marking_;
407 const unsigned mark_compact_epoch_;
408 base::EnumSet<CodeFlushMode> code_flush_mode_;
409 const bool should_keep_ages_unchanged_;
410 };
411
ConcurrentMarking(Heap * heap,MarkingWorklists * marking_worklists,WeakObjects * weak_objects)412 ConcurrentMarking::ConcurrentMarking(Heap* heap,
413 MarkingWorklists* marking_worklists,
414 WeakObjects* weak_objects)
415 : heap_(heap),
416 marking_worklists_(marking_worklists),
417 weak_objects_(weak_objects) {
418 #ifndef V8_ATOMIC_MARKING_STATE
419 // Concurrent and parallel marking require atomic marking state.
420 CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
421 #endif
422 #ifndef V8_ATOMIC_OBJECT_FIELD_WRITES
423 // Concurrent marking requires atomic object field writes.
424 CHECK(!FLAG_concurrent_marking);
425 #endif
426 }
427
Run(JobDelegate * delegate,base::EnumSet<CodeFlushMode> code_flush_mode,unsigned mark_compact_epoch,bool should_keep_ages_unchanged)428 void ConcurrentMarking::Run(JobDelegate* delegate,
429 base::EnumSet<CodeFlushMode> code_flush_mode,
430 unsigned mark_compact_epoch,
431 bool should_keep_ages_unchanged) {
432 size_t kBytesUntilInterruptCheck = 64 * KB;
433 int kObjectsUntilInterrupCheck = 1000;
434 uint8_t task_id = delegate->GetTaskId() + 1;
435 TaskState* task_state = &task_state_[task_id];
436 MarkingWorklists::Local local_marking_worklists(marking_worklists_);
437 ConcurrentMarkingVisitor visitor(
438 task_id, &local_marking_worklists, weak_objects_, heap_,
439 mark_compact_epoch, code_flush_mode,
440 heap_->local_embedder_heap_tracer()->InUse(), should_keep_ages_unchanged,
441 &task_state->memory_chunk_data);
442 NativeContextInferrer& native_context_inferrer =
443 task_state->native_context_inferrer;
444 NativeContextStats& native_context_stats = task_state->native_context_stats;
445 double time_ms;
446 size_t marked_bytes = 0;
447 Isolate* isolate = heap_->isolate();
448 if (FLAG_trace_concurrent_marking) {
449 isolate->PrintWithTimestamp("Starting concurrent marking task %d\n",
450 task_id);
451 }
452 bool another_ephemeron_iteration = false;
453
454 {
455 TimedScope scope(&time_ms);
456
457 {
458 Ephemeron ephemeron;
459
460 while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
461 if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
462 another_ephemeron_iteration = true;
463 }
464 }
465 }
466 bool is_per_context_mode = local_marking_worklists.IsPerContextMode();
467 bool done = false;
468 while (!done) {
469 size_t current_marked_bytes = 0;
470 int objects_processed = 0;
471 while (current_marked_bytes < kBytesUntilInterruptCheck &&
472 objects_processed < kObjectsUntilInterrupCheck) {
473 HeapObject object;
474 if (!local_marking_worklists.Pop(&object)) {
475 done = true;
476 break;
477 }
478 objects_processed++;
479
480 Address new_space_top = kNullAddress;
481 Address new_space_limit = kNullAddress;
482 Address new_large_object = kNullAddress;
483
484 if (heap_->new_space()) {
485 // The order of the two loads is important.
486 new_space_top = heap_->new_space()->original_top_acquire();
487 new_space_limit = heap_->new_space()->original_limit_relaxed();
488 }
489
490 if (heap_->new_lo_space()) {
491 new_large_object = heap_->new_lo_space()->pending_object();
492 }
493
494 Address addr = object.address();
495
496 if ((new_space_top <= addr && addr < new_space_limit) ||
497 addr == new_large_object) {
498 local_marking_worklists.PushOnHold(object);
499 } else {
500 Map map = object.map(isolate, kAcquireLoad);
501 if (is_per_context_mode) {
502 Address context;
503 if (native_context_inferrer.Infer(isolate, map, object, &context)) {
504 local_marking_worklists.SwitchToContext(context);
505 }
506 }
507 size_t visited_size = visitor.Visit(map, object);
508 if (is_per_context_mode) {
509 native_context_stats.IncrementSize(
510 local_marking_worklists.Context(), map, object, visited_size);
511 }
512 current_marked_bytes += visited_size;
513 }
514 }
515 if (objects_processed > 0) another_ephemeron_iteration = true;
516 marked_bytes += current_marked_bytes;
517 base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
518 marked_bytes);
519 if (delegate->ShouldYield()) {
520 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
521 "ConcurrentMarking::Run Preempted");
522 break;
523 }
524 }
525
526 if (done) {
527 Ephemeron ephemeron;
528
529 while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
530 if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
531 another_ephemeron_iteration = true;
532 }
533 }
534 }
535
536 local_marking_worklists.Publish();
537 weak_objects_->transition_arrays.FlushToGlobal(task_id);
538 weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
539 weak_objects_->current_ephemerons.FlushToGlobal(task_id);
540 weak_objects_->next_ephemerons.FlushToGlobal(task_id);
541 weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
542 weak_objects_->weak_references.FlushToGlobal(task_id);
543 weak_objects_->js_weak_refs.FlushToGlobal(task_id);
544 weak_objects_->weak_cells.FlushToGlobal(task_id);
545 weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
546 weak_objects_->code_flushing_candidates.FlushToGlobal(task_id);
547 weak_objects_->baseline_flushing_candidates.FlushToGlobal(task_id);
548 weak_objects_->flushed_js_functions.FlushToGlobal(task_id);
549 base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
550 total_marked_bytes_ += marked_bytes;
551
552 if (another_ephemeron_iteration) {
553 set_another_ephemeron_iteration(true);
554 }
555 }
556 if (FLAG_trace_concurrent_marking) {
557 heap_->isolate()->PrintWithTimestamp(
558 "Task %d concurrently marked %dKB in %.2fms\n", task_id,
559 static_cast<int>(marked_bytes / KB), time_ms);
560 }
561 }
562
GetMaxConcurrency(size_t worker_count)563 size_t ConcurrentMarking::GetMaxConcurrency(size_t worker_count) {
564 size_t marking_items = marking_worklists_->shared()->Size();
565 for (auto& worklist : marking_worklists_->context_worklists())
566 marking_items += worklist.worklist->Size();
567 return std::min<size_t>(
568 kMaxTasks,
569 worker_count + std::max<size_t>(
570 {marking_items,
571 weak_objects_->discovered_ephemerons.GlobalPoolSize(),
572 weak_objects_->current_ephemerons.GlobalPoolSize()}));
573 }
574
ScheduleJob(TaskPriority priority)575 void ConcurrentMarking::ScheduleJob(TaskPriority priority) {
576 DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
577 DCHECK(!heap_->IsTearingDown());
578 DCHECK(!job_handle_ || !job_handle_->IsValid());
579
580 job_handle_ = V8::GetCurrentPlatform()->PostJob(
581 priority, std::make_unique<JobTask>(
582 this, heap_->mark_compact_collector()->epoch(),
583 heap_->mark_compact_collector()->code_flush_mode(),
584 heap_->ShouldCurrentGCKeepAgesUnchanged()));
585 DCHECK(job_handle_->IsValid());
586 }
587
RescheduleJobIfNeeded(TaskPriority priority)588 void ConcurrentMarking::RescheduleJobIfNeeded(TaskPriority priority) {
589 DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
590 if (heap_->IsTearingDown()) return;
591
592 if (marking_worklists_->shared()->IsEmpty() &&
593 weak_objects_->current_ephemerons.IsGlobalPoolEmpty() &&
594 weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) {
595 return;
596 }
597 if (!job_handle_ || !job_handle_->IsValid()) {
598 ScheduleJob(priority);
599 } else {
600 if (priority != TaskPriority::kUserVisible)
601 job_handle_->UpdatePriority(priority);
602 job_handle_->NotifyConcurrencyIncrease();
603 }
604 }
605
Join()606 void ConcurrentMarking::Join() {
607 DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
608 if (!job_handle_ || !job_handle_->IsValid()) return;
609 job_handle_->Join();
610 }
611
Pause()612 bool ConcurrentMarking::Pause() {
613 DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
614 if (!job_handle_ || !job_handle_->IsValid()) return false;
615
616 job_handle_->Cancel();
617 return true;
618 }
619
IsStopped()620 bool ConcurrentMarking::IsStopped() {
621 if (!FLAG_concurrent_marking) return true;
622
623 return !job_handle_ || !job_handle_->IsValid();
624 }
625
FlushNativeContexts(NativeContextStats * main_stats)626 void ConcurrentMarking::FlushNativeContexts(NativeContextStats* main_stats) {
627 DCHECK(!job_handle_ || !job_handle_->IsValid());
628 for (int i = 1; i <= kMaxTasks; i++) {
629 main_stats->Merge(task_state_[i].native_context_stats);
630 task_state_[i].native_context_stats.Clear();
631 }
632 }
633
FlushMemoryChunkData(MajorNonAtomicMarkingState * marking_state)634 void ConcurrentMarking::FlushMemoryChunkData(
635 MajorNonAtomicMarkingState* marking_state) {
636 DCHECK(!job_handle_ || !job_handle_->IsValid());
637 for (int i = 1; i <= kMaxTasks; i++) {
638 MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data;
639 for (auto& pair : memory_chunk_data) {
640 // ClearLiveness sets the live bytes to zero.
641 // Pages with zero live bytes might be already unmapped.
642 MemoryChunk* memory_chunk = pair.first;
643 MemoryChunkData& data = pair.second;
644 if (data.live_bytes) {
645 marking_state->IncrementLiveBytes(memory_chunk, data.live_bytes);
646 }
647 if (data.typed_slots) {
648 RememberedSet<OLD_TO_OLD>::MergeTyped(memory_chunk,
649 std::move(data.typed_slots));
650 }
651 }
652 memory_chunk_data.clear();
653 task_state_[i].marked_bytes = 0;
654 }
655 total_marked_bytes_ = 0;
656 }
657
ClearMemoryChunkData(MemoryChunk * chunk)658 void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
659 DCHECK(!job_handle_ || !job_handle_->IsValid());
660 for (int i = 1; i <= kMaxTasks; i++) {
661 auto it = task_state_[i].memory_chunk_data.find(chunk);
662 if (it != task_state_[i].memory_chunk_data.end()) {
663 it->second.live_bytes = 0;
664 it->second.typed_slots.reset();
665 }
666 }
667 }
668
TotalMarkedBytes()669 size_t ConcurrentMarking::TotalMarkedBytes() {
670 size_t result = 0;
671 for (int i = 1; i <= kMaxTasks; i++) {
672 result +=
673 base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
674 }
675 result += total_marked_bytes_;
676 return result;
677 }
678
PauseScope(ConcurrentMarking * concurrent_marking)679 ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
680 : concurrent_marking_(concurrent_marking),
681 resume_on_exit_(FLAG_concurrent_marking && concurrent_marking_->Pause()) {
682 DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
683 }
684
~PauseScope()685 ConcurrentMarking::PauseScope::~PauseScope() {
686 if (resume_on_exit_) concurrent_marking_->RescheduleJobIfNeeded();
687 }
688
689 } // namespace internal
690 } // namespace v8
691