1 // Copyright 2018 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "third_party/blink/renderer/platform/heap/unified_heap_controller.h"
6 
7 #include "base/macros.h"
8 #include "third_party/blink/public/common/features.h"
9 #include "third_party/blink/renderer/platform/bindings/dom_wrapper_world.h"
10 #include "third_party/blink/renderer/platform/bindings/script_forbidden_scope.h"
11 #include "third_party/blink/renderer/platform/bindings/script_wrappable.h"
12 #include "third_party/blink/renderer/platform/bindings/wrapper_type_info.h"
13 #include "third_party/blink/renderer/platform/heap/heap.h"
14 #include "third_party/blink/renderer/platform/heap/heap_stats_collector.h"
15 #include "third_party/blink/renderer/platform/heap/impl/marking_visitor.h"
16 #include "third_party/blink/renderer/platform/heap/thread_state.h"
17 #include "third_party/blink/renderer/platform/runtime_enabled_features.h"
18 
19 namespace blink {
20 
21 namespace {
22 
ToBlinkGCStackState(v8::EmbedderHeapTracer::EmbedderStackState stack_state)23 constexpr BlinkGC::StackState ToBlinkGCStackState(
24     v8::EmbedderHeapTracer::EmbedderStackState stack_state) {
25   return stack_state == v8::EmbedderHeapTracer::EmbedderStackState::kEmpty
26              ? BlinkGC::kNoHeapPointersOnStack
27              : BlinkGC::kHeapPointersOnStack;
28 }
29 
30 }  // namespace
31 
UnifiedHeapController(ThreadState * thread_state)32 UnifiedHeapController::UnifiedHeapController(ThreadState* thread_state)
33     : thread_state_(thread_state) {
34   thread_state->Heap().stats_collector()->RegisterObserver(this);
35 }
36 
~UnifiedHeapController()37 UnifiedHeapController::~UnifiedHeapController() {
38   thread_state_->Heap().stats_collector()->UnregisterObserver(this);
39 }
40 
TracePrologue(v8::EmbedderHeapTracer::TraceFlags v8_flags)41 void UnifiedHeapController::TracePrologue(
42     v8::EmbedderHeapTracer::TraceFlags v8_flags) {
43   VLOG(2) << "UnifiedHeapController::TracePrologue";
44   ThreadHeapStatsCollector::BlinkGCInV8Scope nested_scope(
45       thread_state_->Heap().stats_collector());
46 
47   // Be conservative here as a new garbage collection gets started right away.
48   thread_state_->FinishIncrementalMarkingIfRunning(
49       BlinkGC::CollectionType::kMajor, BlinkGC::kHeapPointersOnStack,
50       BlinkGC::kIncrementalAndConcurrentMarking,
51       BlinkGC::kConcurrentAndLazySweeping,
52       thread_state_->current_gc_data_.reason);
53 
54   thread_state_->SetGCState(ThreadState::kNoGCScheduled);
55   BlinkGC::GCReason gc_reason;
56   if (v8_flags & v8::EmbedderHeapTracer::TraceFlags::kForced) {
57     gc_reason = BlinkGC::GCReason::kUnifiedHeapForcedForTestingGC;
58   } else if (v8_flags & v8::EmbedderHeapTracer::TraceFlags::kReduceMemory) {
59     gc_reason = BlinkGC::GCReason::kUnifiedHeapForMemoryReductionGC;
60   } else {
61     gc_reason = BlinkGC::GCReason::kUnifiedHeapGC;
62   }
63   thread_state_->StartIncrementalMarking(gc_reason);
64 
65   is_tracing_done_ = false;
66 }
67 
EnterFinalPause(EmbedderStackState stack_state)68 void UnifiedHeapController::EnterFinalPause(EmbedderStackState stack_state) {
69   VLOG(2) << "UnifiedHeapController::EnterFinalPause";
70   ThreadHeapStatsCollector::BlinkGCInV8Scope nested_scope(
71       thread_state_->Heap().stats_collector());
72   thread_state_->AtomicPauseMarkPrologue(
73       BlinkGC::CollectionType::kMajor, ToBlinkGCStackState(stack_state),
74       BlinkGC::kIncrementalAndConcurrentMarking,
75       thread_state_->current_gc_data_.reason);
76   thread_state_->AtomicPauseMarkRoots(ToBlinkGCStackState(stack_state),
77                                       BlinkGC::kIncrementalAndConcurrentMarking,
78                                       thread_state_->current_gc_data_.reason);
79 }
80 
TraceEpilogue(v8::EmbedderHeapTracer::TraceSummary * summary)81 void UnifiedHeapController::TraceEpilogue(
82     v8::EmbedderHeapTracer::TraceSummary* summary) {
83   VLOG(2) << "UnifiedHeapController::TraceEpilogue";
84   {
85     ThreadHeapStatsCollector::BlinkGCInV8Scope nested_scope(
86         thread_state_->Heap().stats_collector());
87     thread_state_->AtomicPauseMarkEpilogue(
88         BlinkGC::kIncrementalAndConcurrentMarking);
89     const BlinkGC::SweepingType sweeping_type =
90         thread_state_->IsForcedGC() ? BlinkGC::kEagerSweeping
91                                     : BlinkGC::kConcurrentAndLazySweeping;
92     thread_state_->AtomicPauseSweepAndCompact(
93         BlinkGC::CollectionType::kMajor,
94         BlinkGC::kIncrementalAndConcurrentMarking, sweeping_type);
95 
96     ThreadHeapStatsCollector* const stats_collector =
97         thread_state_->Heap().stats_collector();
98     summary->allocated_size =
99         static_cast<size_t>(stats_collector->marked_bytes());
100     summary->time = stats_collector->marking_time_so_far().InMillisecondsF();
101     buffered_allocated_size_ = 0;
102   }
103   thread_state_->AtomicPauseEpilogue();
104 }
105 
RegisterV8References(const std::vector<std::pair<void *,void * >> & internal_fields_of_potential_wrappers)106 void UnifiedHeapController::RegisterV8References(
107     const std::vector<std::pair<void*, void*>>&
108         internal_fields_of_potential_wrappers) {
109   VLOG(2) << "UnifiedHeapController::RegisterV8References";
110   DCHECK(thread_state()->IsMarkingInProgress());
111 
112   const bool was_in_atomic_pause = thread_state()->in_atomic_pause();
113   if (!was_in_atomic_pause)
114     ThreadState::Current()->EnterAtomicPause();
115   for (const auto& internal_fields : internal_fields_of_potential_wrappers) {
116     const WrapperTypeInfo* wrapper_type_info =
117         reinterpret_cast<const WrapperTypeInfo*>(internal_fields.first);
118     if (wrapper_type_info->gin_embedder != gin::GinEmbedder::kEmbedderBlink) {
119       continue;
120     }
121     is_tracing_done_ = false;
122     wrapper_type_info->Trace(thread_state_->CurrentVisitor(),
123                              internal_fields.second);
124   }
125   if (!was_in_atomic_pause)
126     ThreadState::Current()->LeaveAtomicPause();
127 }
128 
AdvanceTracing(double deadline_in_ms)129 bool UnifiedHeapController::AdvanceTracing(double deadline_in_ms) {
130   VLOG(2) << "UnifiedHeapController::AdvanceTracing";
131   ThreadHeapStatsCollector::BlinkGCInV8Scope nested_scope(
132       thread_state_->Heap().stats_collector());
133   if (!thread_state_->in_atomic_pause()) {
134     ThreadHeapStatsCollector::EnabledScope advance_tracing_scope(
135         thread_state_->Heap().stats_collector(),
136         ThreadHeapStatsCollector::kUnifiedMarkingStep);
137     // V8 calls into embedder tracing from its own marking to ensure
138     // progress. Oilpan will additionally schedule marking steps.
139     ThreadState::AtomicPauseScope atomic_pause_scope(thread_state_);
140     ScriptForbiddenScope script_forbidden_scope;
141     is_tracing_done_ = thread_state_->MarkPhaseAdvanceMarkingBasedOnSchedule(
142         base::TimeDelta::FromMillisecondsD(deadline_in_ms),
143         ThreadState::EphemeronProcessing::kPartialProcessing);
144     if (!is_tracing_done_) {
145       if (base::FeatureList::IsEnabled(
146               blink::features::kBlinkHeapConcurrentMarking)) {
147         thread_state_->ConcurrentMarkingStep();
148       }
149       thread_state_->RestartIncrementalMarkingIfPaused();
150     }
151     return is_tracing_done_;
152   }
153   thread_state_->AtomicPauseMarkTransitiveClosure();
154   is_tracing_done_ = true;
155   return true;
156 }
157 
IsTracingDone()158 bool UnifiedHeapController::IsTracingDone() {
159   return is_tracing_done_;
160 }
161 
IsRootForNonTracingGC(const v8::TracedReference<v8::Value> & handle)162 bool UnifiedHeapController::IsRootForNonTracingGC(
163     const v8::TracedReference<v8::Value>& handle) {
164   if (thread_state()->IsIncrementalMarking()) {
165     // We have a non-tracing GC while unified GC is in progress. Treat all
166     // objects as roots to avoid stale pointers in the marking worklists.
167     return true;
168   }
169   const uint16_t class_id = handle.WrapperClassId();
170   // Stand-alone reference or kCustomWrappableId. Keep as root as
171   // we don't know better.
172   if (class_id != WrapperTypeInfo::kNodeClassId &&
173       class_id != WrapperTypeInfo::kObjectClassId)
174     return true;
175 
176   const v8::TracedReference<v8::Object>& traced =
177       handle.template As<v8::Object>();
178   if (ToWrapperTypeInfo(traced)->IsActiveScriptWrappable() &&
179       ToScriptWrappable(traced)->HasPendingActivity()) {
180     return true;
181   }
182 
183   if (ToScriptWrappable(traced)->HasEventListeners()) {
184     return true;
185   }
186 
187   return false;
188 }
189 
ResetHandleInNonTracingGC(const v8::TracedReference<v8::Value> & handle)190 void UnifiedHeapController::ResetHandleInNonTracingGC(
191     const v8::TracedReference<v8::Value>& handle) {
192   const uint16_t class_id = handle.WrapperClassId();
193   // Only consider handles that have not been treated as roots, see
194   // IsRootForNonTracingGCInternal.
195   if (class_id != WrapperTypeInfo::kNodeClassId &&
196       class_id != WrapperTypeInfo::kObjectClassId)
197     return;
198 
199   // We should not reset any handles during an already running tracing
200   // collection. Resetting a handle could re-allocate a backing or trigger
201   // potential in place rehashing. Both operations may trigger write barriers by
202   // moving references. Such references may already be dead but not yet cleared
203   // which would result in reporting dead objects to V8.
204   DCHECK(!thread_state()->IsIncrementalMarking());
205   // Clearing the wrapper below adjusts the DOM wrapper store which may
206   // re-allocate its backing. We have to avoid report memory to V8 as that may
207   // trigger GC during GC.
208   ThreadState::GCForbiddenScope gc_forbidden(thread_state());
209   const v8::TracedReference<v8::Object>& traced = handle.As<v8::Object>();
210   bool success = DOMWrapperWorld::UnsetSpecificWrapperIfSet(
211       ToScriptWrappable(traced), traced);
212   // Since V8 found a handle, Blink needs to find it as well when trying to
213   // remove it.
214   CHECK(success);
215 }
216 
IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value> & handle)217 bool UnifiedHeapController::IsRootForNonTracingGC(
218     const v8::TracedGlobal<v8::Value>& handle) {
219   CHECK(false) << "Blink does not use v8::TracedGlobal.";
220   return false;
221 }
222 
ReportBufferedAllocatedSizeIfPossible()223 void UnifiedHeapController::ReportBufferedAllocatedSizeIfPossible() {
224   // Avoid reporting to V8 in the following conditions as that may trigger GC
225   // finalizations where not allowed.
226   // - Recursive sweeping.
227   // - GC forbidden scope.
228   if ((thread_state()->IsSweepingInProgress() &&
229        thread_state()->SweepForbidden()) ||
230       thread_state()->IsGCForbidden()) {
231     return;
232   }
233 
234   if (buffered_allocated_size_ < 0) {
235     DecreaseAllocatedSize(static_cast<size_t>(-buffered_allocated_size_));
236   } else {
237     IncreaseAllocatedSize(static_cast<size_t>(buffered_allocated_size_));
238   }
239   buffered_allocated_size_ = 0;
240 }
241 
IncreaseAllocatedObjectSize(size_t delta_bytes)242 void UnifiedHeapController::IncreaseAllocatedObjectSize(size_t delta_bytes) {
243   buffered_allocated_size_ += static_cast<int64_t>(delta_bytes);
244   ReportBufferedAllocatedSizeIfPossible();
245 }
246 
DecreaseAllocatedObjectSize(size_t delta_bytes)247 void UnifiedHeapController::DecreaseAllocatedObjectSize(size_t delta_bytes) {
248   buffered_allocated_size_ -= static_cast<int64_t>(delta_bytes);
249   ReportBufferedAllocatedSizeIfPossible();
250 }
251 
252 }  // namespace blink
253