1 /*
2  * Copyright (C) 2013 Google Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are
6  * met:
7  *
8  *     * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  *     * Redistributions in binary form must reproduce the above
11  * copyright notice, this list of conditions and the following disclaimer
12  * in the documentation and/or other materials provided with the
13  * distribution.
14  *     * Neither the name of Google Inc. nor the names of its
15  * contributors may be used to endorse or promote products derived from
16  * this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include "third_party/blink/renderer/platform/heap/heap.h"
32 
33 #include <algorithm>
34 #include <limits>
35 #include <memory>
36 
37 #include "base/trace_event/process_memory_dump.h"
38 #include "third_party/blink/public/common/features.h"
39 #include "third_party/blink/public/platform/platform.h"
40 #include "third_party/blink/renderer/platform/bindings/script_forbidden_scope.h"
41 #include "third_party/blink/renderer/platform/bindings/trace_wrapper_v8_reference.h"
42 #include "third_party/blink/renderer/platform/heap/blink_gc_memory_dump_provider.h"
43 #include "third_party/blink/renderer/platform/heap/heap_stats_collector.h"
44 #include "third_party/blink/renderer/platform/heap/impl/heap_compact.h"
45 #include "third_party/blink/renderer/platform/heap/impl/marking_scheduling_oracle.h"
46 #include "third_party/blink/renderer/platform/heap/impl/marking_visitor.h"
47 #include "third_party/blink/renderer/platform/heap/impl/page_bloom_filter.h"
48 #include "third_party/blink/renderer/platform/heap/impl/page_memory.h"
49 #include "third_party/blink/renderer/platform/heap/impl/page_pool.h"
50 #include "third_party/blink/renderer/platform/heap/thread_state_scopes.h"
51 #include "third_party/blink/renderer/platform/heap/unified_heap_marking_visitor.h"
52 #include "third_party/blink/renderer/platform/instrumentation/histogram.h"
53 #include "third_party/blink/renderer/platform/instrumentation/tracing/trace_event.h"
54 #include "third_party/blink/renderer/platform/instrumentation/tracing/web_memory_allocator_dump.h"
55 #include "third_party/blink/renderer/platform/instrumentation/tracing/web_process_memory_dump.h"
56 #include "third_party/blink/renderer/platform/wtf/allocator/partitions.h"
57 #include "third_party/blink/renderer/platform/wtf/assertions.h"
58 #include "third_party/blink/renderer/platform/wtf/leak_annotations.h"
59 
60 namespace blink {
61 
62 HeapAllocHooks::AllocationHook* HeapAllocHooks::allocation_hook_ = nullptr;
63 HeapAllocHooks::FreeHook* HeapAllocHooks::free_hook_ = nullptr;
64 
65 class ProcessHeapReporter final : public ThreadHeapStatsObserver {
66  public:
IncreaseAllocatedSpace(size_t bytes)67   void IncreaseAllocatedSpace(size_t bytes) final {
68     ProcessHeap::IncreaseTotalAllocatedSpace(bytes);
69   }
70 
DecreaseAllocatedSpace(size_t bytes)71   void DecreaseAllocatedSpace(size_t bytes) final {
72     ProcessHeap::DecreaseTotalAllocatedSpace(bytes);
73   }
74 
ResetAllocatedObjectSize(size_t bytes)75   void ResetAllocatedObjectSize(size_t bytes) final {
76     ProcessHeap::DecreaseTotalAllocatedObjectSize(prev_incremented_);
77     ProcessHeap::IncreaseTotalAllocatedObjectSize(bytes);
78     prev_incremented_ = bytes;
79   }
80 
IncreaseAllocatedObjectSize(size_t bytes)81   void IncreaseAllocatedObjectSize(size_t bytes) final {
82     ProcessHeap::IncreaseTotalAllocatedObjectSize(bytes);
83     prev_incremented_ += bytes;
84   }
85 
DecreaseAllocatedObjectSize(size_t bytes)86   void DecreaseAllocatedObjectSize(size_t bytes) final {
87     ProcessHeap::DecreaseTotalAllocatedObjectSize(bytes);
88     prev_incremented_ -= bytes;
89   }
90 
91  private:
92   size_t prev_incremented_ = 0;
93 };
94 
ThreadHeap(ThreadState * thread_state)95 ThreadHeap::ThreadHeap(ThreadState* thread_state)
96     : thread_state_(thread_state),
97       heap_stats_collector_(std::make_unique<ThreadHeapStatsCollector>()),
98       region_tree_(std::make_unique<RegionTree>()),
99       page_bloom_filter_(std::make_unique<PageBloomFilter>()),
100       free_page_pool_(std::make_unique<PagePool>()),
101       process_heap_reporter_(std::make_unique<ProcessHeapReporter>()) {
102   if (ThreadState::Current()->IsMainThread())
103     main_thread_heap_ = this;
104 
105   for (int arena_index = 0; arena_index < BlinkGC::kLargeObjectArenaIndex;
106        arena_index++) {
107     arenas_[arena_index] = new NormalPageArena(thread_state_, arena_index);
108   }
109   arenas_[BlinkGC::kLargeObjectArenaIndex] =
110       new LargeObjectArena(thread_state_, BlinkGC::kLargeObjectArenaIndex);
111 
112   stats_collector()->RegisterObserver(process_heap_reporter_.get());
113 }
114 
~ThreadHeap()115 ThreadHeap::~ThreadHeap() {
116   for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i)
117     delete arenas_[i];
118 }
119 
CheckAndMarkPointer(MarkingVisitor * visitor,Address address)120 Address ThreadHeap::CheckAndMarkPointer(MarkingVisitor* visitor,
121                                         Address address) {
122   DCHECK(thread_state_->InAtomicMarkingPause());
123 
124 #if !DCHECK_IS_ON()
125   if (!page_bloom_filter_->MayContain(address)) {
126     return nullptr;
127   }
128 #endif
129 
130   if (BasePage* page = LookupPageForAddress(address)) {
131 #if DCHECK_IS_ON()
132     DCHECK(page->Contains(address));
133 #endif
134     DCHECK(page_bloom_filter_->MayContain(address));
135     DCHECK(&visitor->Heap() == &page->Arena()->GetThreadState()->Heap());
136     visitor->ConservativelyMarkAddress(page, address);
137     return address;
138   }
139 
140   return nullptr;
141 }
142 
VisitRememberedSets(MarkingVisitor * visitor)143 void ThreadHeap::VisitRememberedSets(MarkingVisitor* visitor) {
144   static_assert(BlinkGC::kLargeObjectArenaIndex + 1 == BlinkGC::kNumberOfArenas,
145                 "LargeObject arena must be the last one.");
146   const auto visit_header = [visitor](HeapObjectHeader* header) {
147     // Process only old objects.
148     if (header->IsOld<HeapObjectHeader::AccessMode::kNonAtomic>()) {
149       // The design of young generation requires collections to be executed at
150       // the top level (with the guarantee that no objects are currently being
151       // in construction). This can be ensured by running young GCs from safe
152       // points or by reintroducing nested allocation scopes that avoid
153       // finalization.
154       DCHECK(header->IsMarked());
155       DCHECK(!header->IsInConstruction());
156       const GCInfo& gc_info = GCInfo::From(header->GcInfoIndex());
157       gc_info.trace(visitor, header->Payload());
158     }
159   };
160   for (size_t i = 0; i < BlinkGC::kLargeObjectArenaIndex; ++i) {
161     static_cast<NormalPageArena*>(arenas_[i])
162         ->IterateAndClearCardTables(visit_header);
163   }
164   static_cast<LargeObjectArena*>(arenas_[BlinkGC::kLargeObjectArenaIndex])
165       ->IterateAndClearRememberedPages(visit_header);
166 }
167 
SetupWorklists(bool should_initialize_compaction_worklists)168 void ThreadHeap::SetupWorklists(bool should_initialize_compaction_worklists) {
169   marking_worklist_ = std::make_unique<MarkingWorklist>();
170   write_barrier_worklist_ = std::make_unique<WriteBarrierWorklist>();
171   not_fully_constructed_worklist_ =
172       std::make_unique<NotFullyConstructedWorklist>();
173   previously_not_fully_constructed_worklist_ =
174       std::make_unique<NotFullyConstructedWorklist>();
175   weak_callback_worklist_ = std::make_unique<WeakCallbackWorklist>();
176   discovered_ephemeron_pairs_worklist_ =
177       std::make_unique<EphemeronPairsWorklist>();
178   ephemeron_pairs_to_process_worklist_ =
179       std::make_unique<EphemeronPairsWorklist>();
180   v8_references_worklist_ = std::make_unique<V8ReferencesWorklist>();
181   not_safe_to_concurrently_trace_worklist_ =
182       std::make_unique<NotSafeToConcurrentlyTraceWorklist>();
183   weak_containers_worklist_ = std::make_unique<WeakContainersWorklist>();
184   if (should_initialize_compaction_worklists) {
185     movable_reference_worklist_ = std::make_unique<MovableReferenceWorklist>();
186   }
187 }
188 
DestroyMarkingWorklists(BlinkGC::StackState stack_state)189 void ThreadHeap::DestroyMarkingWorklists(BlinkGC::StackState stack_state) {
190   marking_worklist_.reset();
191   write_barrier_worklist_.reset();
192   previously_not_fully_constructed_worklist_.reset();
193   weak_callback_worklist_.reset();
194   ephemeron_pairs_to_process_worklist_.reset();
195   v8_references_worklist_.reset();
196   not_safe_to_concurrently_trace_worklist_.reset();
197   weak_containers_worklist_.reset();
198   // The fixed point iteration may have found not-fully-constructed objects.
199   // Such objects should have already been found through the stack scan though
200   // and should thus already be marked.
201   //
202   // Possible reasons for encountering unmarked objects here:
203   // - Object is not allocated through MakeGarbageCollected.
204   // - Broken stack (roots) scanning.
205   if (!not_fully_constructed_worklist_->IsGlobalEmpty()) {
206 #if DCHECK_IS_ON()
207     const bool conservative_gc =
208         BlinkGC::StackState::kHeapPointersOnStack == stack_state;
209     NotFullyConstructedItem item;
210     while (not_fully_constructed_worklist_->Pop(WorklistTaskId::MutatorThread,
211                                                 &item)) {
212       HeapObjectHeader* const header = HeapObjectHeader::FromInnerAddress(
213           reinterpret_cast<Address>(const_cast<void*>(item)));
214       DCHECK(conservative_gc && header->IsMarked())
215           << " conservative: " << (conservative_gc ? "yes" : "no")
216           << " type: " << header->Name();
217     }
218 #else
219     not_fully_constructed_worklist_->Clear();
220 #endif
221   }
222   not_fully_constructed_worklist_.reset();
223 
224   // |discovered_ephemeron_pairs_worklist_| may still hold ephemeron pairs with
225   // dead keys.
226   if (!discovered_ephemeron_pairs_worklist_->IsGlobalEmpty()) {
227 #if DCHECK_IS_ON()
228     EphemeronPairItem item;
229     while (discovered_ephemeron_pairs_worklist_->Pop(
230         WorklistTaskId::MutatorThread, &item)) {
231       const HeapObjectHeader* const header = HeapObjectHeader::FromInnerAddress(
232           reinterpret_cast<ConstAddress>(item.key));
233       DCHECK(!header->IsMarked());
234     }
235 #else
236     discovered_ephemeron_pairs_worklist_->Clear();
237 #endif
238   }
239   discovered_ephemeron_pairs_worklist_.reset();
240 }
241 
DestroyCompactionWorklists()242 void ThreadHeap::DestroyCompactionWorklists() {
243   movable_reference_worklist_.reset();
244 }
245 
Compaction()246 HeapCompact* ThreadHeap::Compaction() {
247   if (!compaction_)
248     compaction_ = std::make_unique<HeapCompact>(this);
249   return compaction_.get();
250 }
251 
ShouldRegisterMovingAddress()252 bool ThreadHeap::ShouldRegisterMovingAddress() {
253   return Compaction()->ShouldRegisterMovingAddress();
254 }
255 
FlushNotFullyConstructedObjects()256 void ThreadHeap::FlushNotFullyConstructedObjects() {
257   NotFullyConstructedWorklist::View view(not_fully_constructed_worklist_.get(),
258                                          WorklistTaskId::MutatorThread);
259   if (!view.IsLocalViewEmpty()) {
260     view.FlushToGlobal();
261     previously_not_fully_constructed_worklist_->MergeGlobalPool(
262         not_fully_constructed_worklist_.get());
263   }
264   DCHECK(view.IsLocalViewEmpty());
265 }
266 
FlushEphemeronPairs(EphemeronProcessing ephemeron_processing)267 void ThreadHeap::FlushEphemeronPairs(EphemeronProcessing ephemeron_processing) {
268   if (ephemeron_processing == EphemeronProcessing::kPartialProcessing) {
269     if (steps_since_last_ephemeron_pairs_flush_ <
270         kStepsBeforeEphemeronPairsFlush)
271       return;
272   }
273 
274   ThreadHeapStatsCollector::EnabledScope stats_scope(
275       stats_collector(), ThreadHeapStatsCollector::kMarkFlushEphemeronPairs);
276 
277   EphemeronPairsWorklist::View view(discovered_ephemeron_pairs_worklist_.get(),
278                                     WorklistTaskId::MutatorThread);
279   if (!view.IsLocalViewEmpty()) {
280     view.FlushToGlobal();
281     ephemeron_pairs_to_process_worklist_->MergeGlobalPool(
282         discovered_ephemeron_pairs_worklist_.get());
283   }
284 
285   steps_since_last_ephemeron_pairs_flush_ = 0;
286 }
287 
MarkNotFullyConstructedObjects(MarkingVisitor * visitor)288 void ThreadHeap::MarkNotFullyConstructedObjects(MarkingVisitor* visitor) {
289   DCHECK(!thread_state_->IsIncrementalMarking());
290   ThreadHeapStatsCollector::Scope stats_scope(
291       stats_collector(),
292       ThreadHeapStatsCollector::kMarkNotFullyConstructedObjects);
293 
294   DCHECK_EQ(WorklistTaskId::MutatorThread, visitor->task_id());
295   NotFullyConstructedItem item;
296   while (not_fully_constructed_worklist_->Pop(WorklistTaskId::MutatorThread,
297                                               &item)) {
298     BasePage* const page = PageFromObject(item);
299     visitor->ConservativelyMarkAddress(page,
300                                        reinterpret_cast<ConstAddress>(item));
301   }
302 }
303 
304 namespace {
305 
306 static constexpr size_t kDefaultDeadlineCheckInterval = 150u;
307 static constexpr size_t kDefaultConcurrentDeadlineCheckInterval =
308     5 * kDefaultDeadlineCheckInterval;
309 
310 template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
311           typename Worklist,
312           typename Callback,
313           typename YieldPredicate>
DrainWorklist(Worklist * worklist,Callback callback,YieldPredicate should_yield,int task_id)314 bool DrainWorklist(Worklist* worklist,
315                    Callback callback,
316                    YieldPredicate should_yield,
317                    int task_id) {
318   // For concurrent markers, should_yield also reports marked bytes.
319   if (worklist->IsLocalViewEmpty(task_id))
320     return true;
321   if (should_yield())
322     return false;
323   size_t processed_callback_count = kDeadlineCheckInterval;
324   typename Worklist::EntryType item;
325   while (worklist->Pop(task_id, &item)) {
326     callback(item);
327     if (--processed_callback_count == 0) {
328       if (should_yield()) {
329         return false;
330       }
331       processed_callback_count = kDeadlineCheckInterval;
332     }
333   }
334   return true;
335 }
336 
337 template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
338           typename Worklist,
339           typename Callback>
DrainWorklistWithDeadline(base::TimeTicks deadline,Worklist * worklist,Callback callback,int task_id)340 bool DrainWorklistWithDeadline(base::TimeTicks deadline,
341                                Worklist* worklist,
342                                Callback callback,
343                                int task_id) {
344   return DrainWorklist<kDeadlineCheckInterval>(
345       worklist, std::move(callback),
346       [deadline]() { return deadline <= base::TimeTicks::Now(); }, task_id);
347 }
348 
349 }  // namespace
350 
InvokeEphemeronCallbacks(EphemeronProcessing ephemeron_processing,MarkingVisitor * visitor,base::TimeTicks deadline)351 bool ThreadHeap::InvokeEphemeronCallbacks(
352     EphemeronProcessing ephemeron_processing,
353     MarkingVisitor* visitor,
354     base::TimeTicks deadline) {
355   if (ephemeron_processing == EphemeronProcessing::kPartialProcessing) {
356     if (steps_since_last_ephemeron_processing_ <
357         kStepsBeforeEphemeronProcessing) {
358       // Returning "no more work" to avoid excessive processing. The fixed
359       // point computation in the atomic pause takes care of correctness.
360       return true;
361     }
362   }
363 
364   FlushEphemeronPairs(EphemeronProcessing::kFullProcessing);
365 
366   steps_since_last_ephemeron_processing_ = 0;
367 
368   // Mark any strong pointers that have now become reachable in ephemeron maps.
369   ThreadHeapStatsCollector::EnabledScope stats_scope(
370       stats_collector(),
371       ThreadHeapStatsCollector::kMarkInvokeEphemeronCallbacks);
372 
373   DCHECK_EQ(WorklistTaskId::MutatorThread, visitor->task_id());
374 
375   // Then we iterate over the new callbacks found by the marking visitor.
376   // Callbacks found by the concurrent marking will be flushed eventually
377   // and then invoked by the mutator thread (in the atomic pause at latest).
378   return DrainWorklistWithDeadline(
379       deadline, ephemeron_pairs_to_process_worklist_.get(),
380       [visitor](EphemeronPairItem& item) {
381         visitor->VisitEphemeron(item.key, item.value_desc);
382       },
383       WorklistTaskId::MutatorThread);
384 }
385 
AdvanceMarking(MarkingVisitor * visitor,base::TimeTicks deadline,EphemeronProcessing ephemeron_processing)386 bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor,
387                                 base::TimeTicks deadline,
388                                 EphemeronProcessing ephemeron_processing) {
389   DCHECK_EQ(WorklistTaskId::MutatorThread, visitor->task_id());
390 
391   ++steps_since_last_ephemeron_pairs_flush_;
392   ++steps_since_last_ephemeron_processing_;
393 
394   bool finished;
395   bool processed_ephemerons = false;
396   FlushEphemeronPairs(ephemeron_processing);
397   // Ephemeron fixed point loop.
398   do {
399     {
400       // Iteratively mark all objects that are reachable from the objects
401       // currently pushed onto the marking worklist.
402       ThreadHeapStatsCollector::EnabledScope stats_scope(
403           stats_collector(), ThreadHeapStatsCollector::kMarkProcessWorklists);
404 
405       // Start with mutator-thread-only worklists (not fully constructed).
406       // If time runs out, concurrent markers can take care of the rest.
407 
408       {
409         ThreadHeapStatsCollector::EnabledScope inner_scope(
410             stats_collector(), ThreadHeapStatsCollector::kMarkBailOutObjects);
411         // Items in the bailout worklist are only collection backing stores.
412         // These items could take a long time to process, so we should check
413         // the deadline more often (backing stores and large items can also be
414         // found in the regular marking worklist, but those are interleaved
415         // with smaller objects).
416         finished = DrainWorklistWithDeadline<kDefaultDeadlineCheckInterval / 5>(
417             deadline, not_safe_to_concurrently_trace_worklist_.get(),
418             [visitor](const NotSafeToConcurrentlyTraceItem& item) {
419               item.desc.callback(visitor, item.desc.base_object_payload);
420               visitor->AccountMarkedBytes(item.bailout_size);
421             },
422             WorklistTaskId::MutatorThread);
423         if (!finished)
424           break;
425       }
426 
427       {
428         ThreadHeapStatsCollector::EnabledScope inner_scope(
429             stats_collector(),
430             ThreadHeapStatsCollector::kMarkFlushV8References);
431         finished = FlushV8References(deadline);
432         if (!finished)
433           break;
434       }
435 
436       {
437         ThreadHeapStatsCollector::EnabledScope inner_scope(
438             stats_collector(),
439             ThreadHeapStatsCollector::kMarkProcessNotFullyconstructeddWorklist);
440         // Convert |previously_not_fully_constructed_worklist_| to
441         // |marking_worklist_|. This merely re-adds items with the proper
442         // callbacks.
443         finished = DrainWorklistWithDeadline(
444             deadline, previously_not_fully_constructed_worklist_.get(),
445             [visitor](NotFullyConstructedItem& item) {
446               visitor->DynamicallyMarkAddress(
447                   reinterpret_cast<ConstAddress>(item));
448             },
449             WorklistTaskId::MutatorThread);
450         if (!finished)
451           break;
452       }
453 
454       {
455         ThreadHeapStatsCollector::EnabledScope inner_scope(
456             stats_collector(),
457             ThreadHeapStatsCollector::kMarkProcessMarkingWorklist);
458         finished = DrainWorklistWithDeadline(
459             deadline, marking_worklist_.get(),
460             [visitor](const MarkingItem& item) {
461               HeapObjectHeader* header =
462                   HeapObjectHeader::FromPayload(item.base_object_payload);
463               DCHECK(!header->IsInConstruction());
464               item.callback(visitor, item.base_object_payload);
465               visitor->AccountMarkedBytes(header);
466             },
467             WorklistTaskId::MutatorThread);
468         if (!finished)
469           break;
470       }
471 
472       {
473         ThreadHeapStatsCollector::EnabledScope inner_scope(
474             stats_collector(),
475             ThreadHeapStatsCollector::kMarkProcessWriteBarrierWorklist);
476         finished = DrainWorklistWithDeadline(
477             deadline, write_barrier_worklist_.get(),
478             [visitor](HeapObjectHeader* header) {
479               DCHECK(!header->IsInConstruction());
480               GCInfo::From(header->GcInfoIndex())
481                   .trace(visitor, header->Payload());
482               visitor->AccountMarkedBytes(header);
483             },
484             WorklistTaskId::MutatorThread);
485         if (!finished)
486           break;
487       }
488     }
489 
490     if ((ephemeron_processing == EphemeronProcessing::kFullProcessing) ||
491         !processed_ephemerons) {
492       processed_ephemerons = true;
493       finished =
494           InvokeEphemeronCallbacks(ephemeron_processing, visitor, deadline);
495       if (!finished)
496         break;
497     }
498 
499     // Rerun loop if ephemeron processing queued more objects for tracing.
500   } while (!marking_worklist_->IsLocalViewEmpty(WorklistTaskId::MutatorThread));
501 
502   return finished;
503 }
504 
HasWorkForConcurrentMarking() const505 bool ThreadHeap::HasWorkForConcurrentMarking() const {
506   return !marking_worklist_->IsGlobalPoolEmpty() ||
507          !write_barrier_worklist_->IsGlobalPoolEmpty() ||
508          !previously_not_fully_constructed_worklist_->IsGlobalPoolEmpty() ||
509          !ephemeron_pairs_to_process_worklist_->IsGlobalPoolEmpty();
510 }
511 
ConcurrentMarkingGlobalWorkSize() const512 size_t ThreadHeap::ConcurrentMarkingGlobalWorkSize() const {
513   return marking_worklist_->GlobalPoolSize() +
514          write_barrier_worklist_->GlobalPoolSize() +
515          previously_not_fully_constructed_worklist_->GlobalPoolSize() +
516          ephemeron_pairs_to_process_worklist_->GlobalPoolSize();
517 }
518 
AdvanceConcurrentMarking(ConcurrentMarkingVisitor * visitor,base::JobDelegate * delegate,MarkingSchedulingOracle * marking_scheduler)519 bool ThreadHeap::AdvanceConcurrentMarking(
520     ConcurrentMarkingVisitor* visitor,
521     base::JobDelegate* delegate,
522     MarkingSchedulingOracle* marking_scheduler) {
523   auto should_yield_callback = [marking_scheduler, visitor, delegate]() {
524     marking_scheduler->AddConcurrentlyMarkedBytes(
525         visitor->RecentlyMarkedBytes());
526     return delegate->ShouldYield();
527   };
528   bool finished;
529   do {
530     // Convert |previously_not_fully_constructed_worklist_| to
531     // |marking_worklist_|. This merely re-adds items with the proper
532     // callbacks.
533     finished = DrainWorklist<kDefaultConcurrentDeadlineCheckInterval>(
534         previously_not_fully_constructed_worklist_.get(),
535         [visitor](NotFullyConstructedItem& item) {
536           visitor->DynamicallyMarkAddress(reinterpret_cast<ConstAddress>(item));
537         },
538         should_yield_callback, visitor->task_id());
539     if (!finished)
540       break;
541 
542     // Iteratively mark all objects that are reachable from the objects
543     // currently pushed onto the marking worklist.
544     finished = DrainWorklist<kDefaultConcurrentDeadlineCheckInterval>(
545         marking_worklist_.get(),
546         [visitor](const MarkingItem& item) {
547           HeapObjectHeader* header =
548               HeapObjectHeader::FromPayload(item.base_object_payload);
549           PageFromObject(header)->SynchronizedLoad();
550           DCHECK(
551               !header
552                    ->IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>());
553           item.callback(visitor, item.base_object_payload);
554           visitor->AccountMarkedBytes(header);
555         },
556         should_yield_callback, visitor->task_id());
557     if (!finished)
558       break;
559 
560     finished = DrainWorklist<kDefaultConcurrentDeadlineCheckInterval>(
561         write_barrier_worklist_.get(),
562         [visitor](HeapObjectHeader* header) {
563           PageFromObject(header)->SynchronizedLoad();
564           DCHECK(
565               !header
566                    ->IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>());
567           GCInfo::From(header->GcInfoIndex()).trace(visitor, header->Payload());
568           visitor->AccountMarkedBytes(header);
569         },
570         should_yield_callback, visitor->task_id());
571     if (!finished)
572       break;
573 
574     {
575       ThreadHeapStatsCollector::ConcurrentScope stats_scope(
576           stats_collector(),
577           ThreadHeapStatsCollector::kConcurrentMarkInvokeEphemeronCallbacks);
578 
579       // Then we iterate over the new ephemerons found by the marking visitor.
580       // Callbacks found by the concurrent marking will be flushed eventually
581       // by the mutator thread and then invoked either concurrently or by the
582       // mutator thread (in the atomic pause at latest).
583       finished = DrainWorklist<kDefaultConcurrentDeadlineCheckInterval>(
584           ephemeron_pairs_to_process_worklist_.get(),
585           [visitor](EphemeronPairItem& item) {
586             visitor->VisitEphemeron(item.key, item.value_desc);
587           },
588           should_yield_callback, visitor->task_id());
589       if (!finished)
590         break;
591     }
592 
593   } while (HasWorkForConcurrentMarking());
594 
595   return finished;
596 }
597 
WeakProcessing(MarkingVisitor * visitor)598 void ThreadHeap::WeakProcessing(MarkingVisitor* visitor) {
599   ThreadHeapStatsCollector::Scope stats_scope(
600       stats_collector(), ThreadHeapStatsCollector::kMarkWeakProcessing);
601 
602   // Weak processing may access unmarked objects but are forbidden from
603   // resurrecting them or allocating new ones.
604   ThreadState::NoAllocationScope allocation_forbidden(ThreadState::Current());
605 
606   DCHECK_EQ(WorklistTaskId::MutatorThread, visitor->task_id());
607 
608   // Call weak callbacks on objects that may now be pointing to dead objects.
609   CustomCallbackItem item;
610   LivenessBroker broker = internal::LivenessBrokerFactory::Create();
611   while (weak_callback_worklist_->Pop(WorklistTaskId::MutatorThread, &item)) {
612     item.callback(broker, item.parameter);
613   }
614   // Weak callbacks should not add any new objects for marking.
615   DCHECK(marking_worklist_->IsGlobalEmpty());
616 }
617 
VerifyMarking()618 void ThreadHeap::VerifyMarking() {
619   for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i) {
620     arenas_[i]->VerifyMarking();
621   }
622 }
623 
ObjectPayloadSizeForTesting()624 size_t ThreadHeap::ObjectPayloadSizeForTesting() {
625   ThreadState::AtomicPauseScope atomic_pause_scope(thread_state_);
626   ScriptForbiddenScope script_forbidden_scope;
627   size_t object_payload_size = 0;
628   thread_state_->SetGCPhase(ThreadState::GCPhase::kMarking);
629   thread_state_->Heap().MakeConsistentForGC();
630   thread_state_->Heap().PrepareForSweep(BlinkGC::CollectionType::kMajor);
631   for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i)
632     object_payload_size += arenas_[i]->ObjectPayloadSizeForTesting();
633   MakeConsistentForMutator();
634   thread_state_->SetGCPhase(ThreadState::GCPhase::kSweeping);
635   thread_state_->SetGCPhase(ThreadState::GCPhase::kNone);
636   return object_payload_size;
637 }
638 
ResetAllocationPointForTesting()639 void ThreadHeap::ResetAllocationPointForTesting() {
640   for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i)
641     arenas_[i]->ResetAllocationPoint();
642 }
643 
LookupPageForAddress(ConstAddress address)644 BasePage* ThreadHeap::LookupPageForAddress(ConstAddress address) {
645   if (PageMemoryRegion* region = region_tree_->Lookup(address)) {
646     return region->PageFromAddress(address);
647   }
648   return nullptr;
649 }
650 
MakeConsistentForGC()651 void ThreadHeap::MakeConsistentForGC() {
652   DCHECK(thread_state_->InAtomicMarkingPause());
653   for (BaseArena* arena : arenas_) {
654     arena->MakeConsistentForGC();
655   }
656 }
657 
MakeConsistentForMutator()658 void ThreadHeap::MakeConsistentForMutator() {
659   DCHECK(thread_state_->InAtomicMarkingPause());
660   for (BaseArena* arena : arenas_) {
661     arena->MakeConsistentForMutator();
662   }
663 }
664 
Unmark()665 void ThreadHeap::Unmark() {
666   DCHECK(thread_state_->InAtomicMarkingPause());
667   for (BaseArena* arena : arenas_) {
668     arena->Unmark();
669   }
670 }
671 
Compact()672 void ThreadHeap::Compact() {
673   if (!Compaction()->IsCompacting())
674     return;
675 
676   ThreadHeapStatsCollector::Scope stats_scope(
677       stats_collector(), ThreadHeapStatsCollector::kAtomicPauseCompaction);
678   // Compaction is done eagerly and before the mutator threads get
679   // to run again. Doing it lazily is problematic, as the mutator's
680   // references to live objects could suddenly be invalidated by
681   // compaction of a page/heap. We do know all the references to
682   // the relocating objects just after marking, but won't later.
683   // (e.g., stack references could have been created, new objects
684   // created which refer to old collection objects, and so on.)
685 
686   // Compact the hash table backing store arena first, it usually has
687   // higher fragmentation and is larger.
688   for (int i = BlinkGC::kHashTableArenaIndex; i >= BlinkGC::kVectorArenaIndex;
689        --i)
690     static_cast<NormalPageArena*>(arenas_[i])->SweepAndCompact();
691   Compaction()->Finish();
692 }
693 
PrepareForSweep(BlinkGC::CollectionType collection_type)694 void ThreadHeap::PrepareForSweep(BlinkGC::CollectionType collection_type) {
695   DCHECK(thread_state_->InAtomicMarkingPause());
696   DCHECK(thread_state_->CheckThread());
697   for (int i = 0; i < BlinkGC::kNumberOfArenas; i++)
698     arenas_[i]->PrepareForSweep(collection_type);
699 }
700 
RemoveAllPages()701 void ThreadHeap::RemoveAllPages() {
702   DCHECK(thread_state_->CheckThread());
703   for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i)
704     arenas_[i]->RemoveAllPages();
705 }
706 
CompleteSweep()707 void ThreadHeap::CompleteSweep() {
708   for (int i = 0; i < BlinkGC::kNumberOfArenas; i++)
709     arenas_[i]->CompleteSweep();
710 }
711 
InvokeFinalizersOnSweptPages()712 void ThreadHeap::InvokeFinalizersOnSweptPages() {
713   for (size_t i = BlinkGC::kNormalPage1ArenaIndex; i < BlinkGC::kNumberOfArenas;
714        i++)
715     arenas_[i]->InvokeFinalizersOnSweptPages();
716 }
717 
718 #if defined(ADDRESS_SANITIZER)
PoisonUnmarkedObjects()719 void ThreadHeap::PoisonUnmarkedObjects() {
720   // Poisoning all unmarked objects in the other arenas.
721   for (int i = 1; i < BlinkGC::kNumberOfArenas; i++)
722     arenas_[i]->PoisonUnmarkedObjects();
723 }
724 #endif
725 
726 #if DCHECK_IS_ON()
FindPageFromAddress(Address address)727 BasePage* ThreadHeap::FindPageFromAddress(Address address) {
728   for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i) {
729     if (BasePage* page = arenas_[i]->FindPageFromAddress(address))
730       return page;
731   }
732   return nullptr;
733 }
734 #endif
735 
CollectStatistics(ThreadState::Statistics * stats)736 void ThreadHeap::CollectStatistics(ThreadState::Statistics* stats) {
737 #define SNAPSHOT_ARENA(name)                                \
738   arenas_[BlinkGC::k##name##ArenaIndex]->CollectStatistics( \
739       BlinkGC::ToString(BlinkGC::k##name##ArenaIndex), stats);
740 
741   FOR_EACH_ARENA(SNAPSHOT_ARENA)
742 #undef SNAPSHOT_ARENA
743 }
744 
AdvanceLazySweep(base::TimeTicks deadline)745 bool ThreadHeap::AdvanceLazySweep(base::TimeTicks deadline) {
746   static constexpr base::TimeDelta slack = base::TimeDelta::FromSecondsD(0.001);
747   for (size_t i = 0; i < BlinkGC::kNumberOfArenas; i++) {
748     // lazySweepWithDeadline() won't check the deadline until it sweeps
749     // 10 pages. So we give a small slack for safety.
750     const base::TimeDelta remaining_budget =
751         deadline - slack - base::TimeTicks::Now();
752     if (remaining_budget <= base::TimeDelta() ||
753         !arenas_[i]->LazySweepWithDeadline(deadline)) {
754       return false;
755     }
756   }
757   return true;
758 }
759 
AdvanceConcurrentSweep(base::JobDelegate * job)760 bool ThreadHeap::AdvanceConcurrentSweep(base::JobDelegate* job) {
761   for (size_t i = 0; i < BlinkGC::kNumberOfArenas; i++) {
762     while (!arenas_[i]->ConcurrentSweepOnePage()) {
763       if (job->ShouldYield())
764         return false;
765     }
766   }
767   return true;
768 }
769 
770 // TODO(omerkatz): Temporary solution until concurrent marking is ready. see
771 // https://crrev.com/c/1730054 for details. Eventually this will be removed.
FlushV8References(base::TimeTicks deadline)772 bool ThreadHeap::FlushV8References(base::TimeTicks deadline) {
773   if (!thread_state_->IsUnifiedGCMarkingInProgress())
774     return true;
775 
776   DCHECK(base::FeatureList::IsEnabled(
777              blink::features::kBlinkHeapConcurrentMarking) ||
778          v8_references_worklist_->IsGlobalEmpty());
779 
780   v8::EmbedderHeapTracer* controller =
781       reinterpret_cast<v8::EmbedderHeapTracer*>(
782           thread_state_->unified_heap_controller());
783   return DrainWorklistWithDeadline(
784       deadline, v8_references_worklist_.get(),
785       [controller](const V8Reference& reference) {
786         if (!reference->Get().IsEmpty()) {
787           controller->RegisterEmbedderReference(
788               reference->template Cast<v8::Data>().Get());
789         }
790       },
791       WorklistTaskId::MutatorThread);
792 }
793 
794 ThreadHeap* ThreadHeap::main_thread_heap_ = nullptr;
795 
796 }  // namespace blink
797