1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
6 
7 #include "src/base/atomicops.h"
8 #include "src/base/template-utils.h"
9 #include "src/cancelable-task.h"
10 #include "src/compiler.h"
11 #include "src/isolate.h"
12 #include "src/objects-inl.h"
13 #include "src/optimized-compilation-info.h"
14 #include "src/tracing/trace-event.h"
15 #include "src/v8.h"
16 
17 namespace v8 {
18 namespace internal {
19 
20 namespace {
21 
DisposeCompilationJob(OptimizedCompilationJob * job,bool restore_function_code)22 void DisposeCompilationJob(OptimizedCompilationJob* job,
23                            bool restore_function_code) {
24   if (restore_function_code) {
25     Handle<JSFunction> function = job->compilation_info()->closure();
26     function->set_code(function->shared()->GetCode());
27     if (function->IsInOptimizationQueue()) {
28       function->ClearOptimizationMarker();
29     }
30     // TODO(mvstanton): We can't call EnsureFeedbackVector here due to
31     // allocation, but we probably shouldn't call set_code either, as this
32     // sometimes runs on the worker thread!
33     // JSFunction::EnsureFeedbackVector(function);
34   }
35   delete job;
36 }
37 
38 }  // namespace
39 
40 class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
41  public:
CompileTask(Isolate * isolate,OptimizingCompileDispatcher * dispatcher)42   explicit CompileTask(Isolate* isolate,
43                        OptimizingCompileDispatcher* dispatcher)
44       : CancelableTask(isolate), isolate_(isolate), dispatcher_(dispatcher) {
45     base::LockGuard<base::Mutex> lock_guard(&dispatcher_->ref_count_mutex_);
46     ++dispatcher_->ref_count_;
47   }
48 
~CompileTask()49   virtual ~CompileTask() {}
50 
51  private:
52   // v8::Task overrides.
RunInternal()53   void RunInternal() override {
54     DisallowHeapAllocation no_allocation;
55     DisallowHandleAllocation no_handles;
56     DisallowHandleDereference no_deref;
57 
58     {
59       TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
60 
61       TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
62                    "V8.RecompileConcurrent");
63 
64       if (dispatcher_->recompilation_delay_ != 0) {
65         base::OS::Sleep(base::TimeDelta::FromMilliseconds(
66             dispatcher_->recompilation_delay_));
67       }
68 
69       dispatcher_->CompileNext(dispatcher_->NextInput(true));
70     }
71     {
72       base::LockGuard<base::Mutex> lock_guard(&dispatcher_->ref_count_mutex_);
73       if (--dispatcher_->ref_count_ == 0) {
74         dispatcher_->ref_count_zero_.NotifyOne();
75       }
76     }
77   }
78 
79   Isolate* isolate_;
80   OptimizingCompileDispatcher* dispatcher_;
81 
82   DISALLOW_COPY_AND_ASSIGN(CompileTask);
83 };
84 
~OptimizingCompileDispatcher()85 OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
86 #ifdef DEBUG
87   {
88     base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
89     DCHECK_EQ(0, ref_count_);
90   }
91 #endif
92   DCHECK_EQ(0, input_queue_length_);
93   DeleteArray(input_queue_);
94 }
95 
NextInput(bool check_if_flushing)96 OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput(
97     bool check_if_flushing) {
98   base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
99   if (input_queue_length_ == 0) return nullptr;
100   OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
101   DCHECK_NOT_NULL(job);
102   input_queue_shift_ = InputQueueIndex(1);
103   input_queue_length_--;
104   if (check_if_flushing) {
105     if (static_cast<ModeFlag>(base::Acquire_Load(&mode_)) == FLUSH) {
106       AllowHandleDereference allow_handle_dereference;
107       DisposeCompilationJob(job, true);
108       return nullptr;
109     }
110   }
111   return job;
112 }
113 
CompileNext(OptimizedCompilationJob * job)114 void OptimizingCompileDispatcher::CompileNext(OptimizedCompilationJob* job) {
115   if (!job) return;
116 
117   // The function may have already been optimized by OSR.  Simply continue.
118   CompilationJob::Status status = job->ExecuteJob();
119   USE(status);  // Prevent an unused-variable error.
120 
121   // The function may have already been optimized by OSR.  Simply continue.
122   // Use a mutex to make sure that functions marked for install
123   // are always also queued.
124   base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
125   output_queue_.push(job);
126   isolate_->stack_guard()->RequestInstallCode();
127 }
128 
FlushOutputQueue(bool restore_function_code)129 void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
130   for (;;) {
131     OptimizedCompilationJob* job = nullptr;
132     {
133       base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
134       if (output_queue_.empty()) return;
135       job = output_queue_.front();
136       output_queue_.pop();
137     }
138 
139     DisposeCompilationJob(job, restore_function_code);
140   }
141 }
142 
Flush(BlockingBehavior blocking_behavior)143 void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
144   if (blocking_behavior == BlockingBehavior::kDontBlock) {
145     if (FLAG_block_concurrent_recompilation) Unblock();
146     base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
147     while (input_queue_length_ > 0) {
148       OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
149       DCHECK_NOT_NULL(job);
150       input_queue_shift_ = InputQueueIndex(1);
151       input_queue_length_--;
152       DisposeCompilationJob(job, true);
153     }
154     FlushOutputQueue(true);
155     if (FLAG_trace_concurrent_recompilation) {
156       PrintF("  ** Flushed concurrent recompilation queues (not blocking).\n");
157     }
158     return;
159   }
160   base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
161   if (FLAG_block_concurrent_recompilation) Unblock();
162   {
163     base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
164     while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
165     base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
166   }
167   FlushOutputQueue(true);
168   if (FLAG_trace_concurrent_recompilation) {
169     PrintF("  ** Flushed concurrent recompilation queues.\n");
170   }
171 }
172 
Stop()173 void OptimizingCompileDispatcher::Stop() {
174   base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
175   if (FLAG_block_concurrent_recompilation) Unblock();
176   {
177     base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
178     while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
179     base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
180   }
181 
182   if (recompilation_delay_ != 0) {
183     // At this point the optimizing compiler thread's event loop has stopped.
184     // There is no need for a mutex when reading input_queue_length_.
185     while (input_queue_length_ > 0) CompileNext(NextInput());
186     InstallOptimizedFunctions();
187   } else {
188     FlushOutputQueue(false);
189   }
190 }
191 
InstallOptimizedFunctions()192 void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
193   HandleScope handle_scope(isolate_);
194 
195   for (;;) {
196     OptimizedCompilationJob* job = nullptr;
197     {
198       base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
199       if (output_queue_.empty()) return;
200       job = output_queue_.front();
201       output_queue_.pop();
202     }
203     OptimizedCompilationInfo* info = job->compilation_info();
204     Handle<JSFunction> function(*info->closure());
205     if (function->HasOptimizedCode()) {
206       if (FLAG_trace_concurrent_recompilation) {
207         PrintF("  ** Aborting compilation for ");
208         function->ShortPrint();
209         PrintF(" as it has already been optimized.\n");
210       }
211       DisposeCompilationJob(job, false);
212     } else {
213       Compiler::FinalizeCompilationJob(job, isolate_);
214     }
215   }
216 }
217 
QueueForOptimization(OptimizedCompilationJob * job)218 void OptimizingCompileDispatcher::QueueForOptimization(
219     OptimizedCompilationJob* job) {
220   DCHECK(IsQueueAvailable());
221   {
222     // Add job to the back of the input queue.
223     base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
224     DCHECK_LT(input_queue_length_, input_queue_capacity_);
225     input_queue_[InputQueueIndex(input_queue_length_)] = job;
226     input_queue_length_++;
227   }
228   if (FLAG_block_concurrent_recompilation) {
229     blocked_jobs_++;
230   } else {
231     V8::GetCurrentPlatform()->CallOnWorkerThread(
232         base::make_unique<CompileTask>(isolate_, this));
233   }
234 }
235 
Unblock()236 void OptimizingCompileDispatcher::Unblock() {
237   while (blocked_jobs_ > 0) {
238     V8::GetCurrentPlatform()->CallOnWorkerThread(
239         base::make_unique<CompileTask>(isolate_, this));
240     blocked_jobs_--;
241   }
242 }
243 
244 }  // namespace internal
245 }  // namespace v8
246