1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
6 
7 #include "src/base/atomicops.h"
8 #include "src/codegen/compiler.h"
9 #include "src/codegen/optimized-compilation-info.h"
10 #include "src/execution/isolate.h"
11 #include "src/execution/local-isolate.h"
12 #include "src/heap/local-heap.h"
13 #include "src/init/v8.h"
14 #include "src/logging/counters.h"
15 #include "src/logging/log.h"
16 #include "src/objects/objects-inl.h"
17 #include "src/tasks/cancelable-task.h"
18 #include "src/tracing/trace-event.h"
19 
20 namespace v8 {
21 namespace internal {
22 
23 namespace {
24 
DisposeCompilationJob(OptimizedCompilationJob * job,bool restore_function_code)25 void DisposeCompilationJob(OptimizedCompilationJob* job,
26                            bool restore_function_code) {
27   if (restore_function_code) {
28     Handle<JSFunction> function = job->compilation_info()->closure();
29     function->set_code(function->shared().GetCode());
30     if (function->IsInOptimizationQueue()) {
31       function->ClearOptimizationMarker();
32     }
33     // TODO(mvstanton): We can't call EnsureFeedbackVector here due to
34     // allocation, but we probably shouldn't call set_code either, as this
35     // sometimes runs on the worker thread!
36     // JSFunction::EnsureFeedbackVector(function);
37   }
38   delete job;
39 }
40 
41 }  // namespace
42 
43 class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
44  public:
CompileTask(Isolate * isolate,OptimizingCompileDispatcher * dispatcher)45   explicit CompileTask(Isolate* isolate,
46                        OptimizingCompileDispatcher* dispatcher)
47       : CancelableTask(isolate),
48         isolate_(isolate),
49         worker_thread_runtime_call_stats_(
50             isolate->counters()->worker_thread_runtime_call_stats()),
51         dispatcher_(dispatcher) {
52     base::MutexGuard lock_guard(&dispatcher_->ref_count_mutex_);
53     ++dispatcher_->ref_count_;
54   }
55 
56   ~CompileTask() override = default;
57 
58  private:
59   // v8::Task overrides.
RunInternal()60   void RunInternal() override {
61     LocalIsolate local_isolate(isolate_, ThreadKind::kBackground);
62     DisallowHeapAllocation no_allocation;
63     DisallowHandleAllocation no_handles;
64     DisallowHandleDereference no_deref;
65 
66     {
67       WorkerThreadRuntimeCallStatsScope runtime_call_stats_scope(
68           worker_thread_runtime_call_stats_);
69       RuntimeCallTimerScope runtimeTimer(
70           runtime_call_stats_scope.Get(),
71           RuntimeCallCounterId::kOptimizeBackgroundDispatcherJob);
72 
73       TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
74       TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
75                    "V8.OptimizeBackground");
76 
77       if (dispatcher_->recompilation_delay_ != 0) {
78         base::OS::Sleep(base::TimeDelta::FromMilliseconds(
79             dispatcher_->recompilation_delay_));
80       }
81 
82       dispatcher_->CompileNext(dispatcher_->NextInput(&local_isolate, true),
83                                runtime_call_stats_scope.Get(), &local_isolate);
84     }
85     {
86       base::MutexGuard lock_guard(&dispatcher_->ref_count_mutex_);
87       if (--dispatcher_->ref_count_ == 0) {
88         dispatcher_->ref_count_zero_.NotifyOne();
89       }
90     }
91   }
92 
93   Isolate* isolate_;
94   WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats_;
95   OptimizingCompileDispatcher* dispatcher_;
96 
97   DISALLOW_COPY_AND_ASSIGN(CompileTask);
98 };
99 
~OptimizingCompileDispatcher()100 OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
101 #ifdef DEBUG
102   {
103     base::MutexGuard lock_guard(&ref_count_mutex_);
104     DCHECK_EQ(0, ref_count_);
105   }
106 #endif
107   DCHECK_EQ(0, input_queue_length_);
108   DeleteArray(input_queue_);
109 }
110 
NextInput(LocalIsolate * local_isolate,bool check_if_flushing)111 OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput(
112     LocalIsolate* local_isolate, bool check_if_flushing) {
113   base::MutexGuard access_input_queue_(&input_queue_mutex_);
114   if (input_queue_length_ == 0) return nullptr;
115   OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
116   DCHECK_NOT_NULL(job);
117   input_queue_shift_ = InputQueueIndex(1);
118   input_queue_length_--;
119   if (check_if_flushing) {
120     if (mode_ == FLUSH) {
121       UnparkedScope scope(local_isolate->heap());
122       AllowHandleDereference allow_handle_dereference;
123       DisposeCompilationJob(job, true);
124       return nullptr;
125     }
126   }
127   return job;
128 }
129 
CompileNext(OptimizedCompilationJob * job,RuntimeCallStats * stats,LocalIsolate * local_isolate)130 void OptimizingCompileDispatcher::CompileNext(OptimizedCompilationJob* job,
131                                               RuntimeCallStats* stats,
132                                               LocalIsolate* local_isolate) {
133   if (!job) return;
134 
135   // The function may have already been optimized by OSR.  Simply continue.
136   CompilationJob::Status status = job->ExecuteJob(stats, local_isolate);
137   USE(status);  // Prevent an unused-variable error.
138 
139   {
140     // The function may have already been optimized by OSR.  Simply continue.
141     // Use a mutex to make sure that functions marked for install
142     // are always also queued.
143     base::MutexGuard access_output_queue_(&output_queue_mutex_);
144     output_queue_.push(job);
145   }
146 
147   isolate_->stack_guard()->RequestInstallCode();
148 }
149 
FlushOutputQueue(bool restore_function_code)150 void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
151   for (;;) {
152     OptimizedCompilationJob* job = nullptr;
153     {
154       base::MutexGuard access_output_queue_(&output_queue_mutex_);
155       if (output_queue_.empty()) return;
156       job = output_queue_.front();
157       output_queue_.pop();
158     }
159 
160     DisposeCompilationJob(job, restore_function_code);
161   }
162 }
163 
Flush(BlockingBehavior blocking_behavior)164 void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
165   if (blocking_behavior == BlockingBehavior::kDontBlock) {
166     if (FLAG_block_concurrent_recompilation) Unblock();
167     base::MutexGuard access_input_queue_(&input_queue_mutex_);
168     while (input_queue_length_ > 0) {
169       OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
170       DCHECK_NOT_NULL(job);
171       input_queue_shift_ = InputQueueIndex(1);
172       input_queue_length_--;
173       DisposeCompilationJob(job, true);
174     }
175     FlushOutputQueue(true);
176     if (FLAG_trace_concurrent_recompilation) {
177       PrintF("  ** Flushed concurrent recompilation queues (not blocking).\n");
178     }
179     return;
180   }
181   mode_ = FLUSH;
182   if (FLAG_block_concurrent_recompilation) Unblock();
183   {
184     base::MutexGuard lock_guard(&ref_count_mutex_);
185     while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
186     mode_ = COMPILE;
187   }
188   FlushOutputQueue(true);
189   if (FLAG_trace_concurrent_recompilation) {
190     PrintF("  ** Flushed concurrent recompilation queues.\n");
191   }
192 }
193 
Stop()194 void OptimizingCompileDispatcher::Stop() {
195   mode_ = FLUSH;
196   if (FLAG_block_concurrent_recompilation) Unblock();
197   {
198     base::MutexGuard lock_guard(&ref_count_mutex_);
199     while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
200     mode_ = COMPILE;
201   }
202 
203   // At this point the optimizing compiler thread's event loop has stopped.
204   // There is no need for a mutex when reading input_queue_length_.
205   DCHECK_EQ(input_queue_length_, 0);
206   FlushOutputQueue(false);
207 }
208 
InstallOptimizedFunctions()209 void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
210   HandleScope handle_scope(isolate_);
211 
212   for (;;) {
213     OptimizedCompilationJob* job = nullptr;
214     {
215       base::MutexGuard access_output_queue_(&output_queue_mutex_);
216       if (output_queue_.empty()) return;
217       job = output_queue_.front();
218       output_queue_.pop();
219     }
220     OptimizedCompilationInfo* info = job->compilation_info();
221     Handle<JSFunction> function(*info->closure(), isolate_);
222     if (function->HasAvailableCodeKind(info->code_kind())) {
223       if (FLAG_trace_concurrent_recompilation) {
224         PrintF("  ** Aborting compilation for ");
225         function->ShortPrint();
226         PrintF(" as it has already been optimized.\n");
227       }
228       DisposeCompilationJob(job, false);
229     } else {
230       Compiler::FinalizeOptimizedCompilationJob(job, isolate_);
231     }
232   }
233 }
234 
QueueForOptimization(OptimizedCompilationJob * job)235 void OptimizingCompileDispatcher::QueueForOptimization(
236     OptimizedCompilationJob* job) {
237   DCHECK(IsQueueAvailable());
238   {
239     // Add job to the back of the input queue.
240     base::MutexGuard access_input_queue(&input_queue_mutex_);
241     DCHECK_LT(input_queue_length_, input_queue_capacity_);
242     input_queue_[InputQueueIndex(input_queue_length_)] = job;
243     input_queue_length_++;
244   }
245   if (FLAG_block_concurrent_recompilation) {
246     blocked_jobs_++;
247   } else {
248     V8::GetCurrentPlatform()->CallOnWorkerThread(
249         std::make_unique<CompileTask>(isolate_, this));
250   }
251 }
252 
Unblock()253 void OptimizingCompileDispatcher::Unblock() {
254   while (blocked_jobs_ > 0) {
255     V8::GetCurrentPlatform()->CallOnWorkerThread(
256         std::make_unique<CompileTask>(isolate_, this));
257     blocked_jobs_--;
258   }
259 }
260 
261 }  // namespace internal
262 }  // namespace v8
263