1 // Copyright 2018 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/wasm/wasm-engine.h"
6 
7 #include "src/base/functional.h"
8 #include "src/base/platform/time.h"
9 #include "src/common/globals.h"
10 #include "src/diagnostics/code-tracer.h"
11 #include "src/diagnostics/compilation-statistics.h"
12 #include "src/execution/frames.h"
13 #include "src/execution/v8threads.h"
14 #include "src/logging/counters.h"
15 #include "src/objects/heap-number.h"
16 #include "src/objects/js-promise.h"
17 #include "src/objects/objects-inl.h"
18 #include "src/strings/string-hasher-inl.h"
19 #include "src/utils/ostreams.h"
20 #include "src/wasm/function-compiler.h"
21 #include "src/wasm/module-compiler.h"
22 #include "src/wasm/module-decoder.h"
23 #include "src/wasm/module-instantiate.h"
24 #include "src/wasm/streaming-decoder.h"
25 #include "src/wasm/wasm-limits.h"
26 #include "src/wasm/wasm-objects-inl.h"
27 
28 #ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
29 #include "src/debug/wasm/gdb-server/gdb-server.h"
30 #endif  // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
31 
32 namespace v8 {
33 namespace internal {
34 namespace wasm {
35 
36 #define TRACE_CODE_GC(...)                                         \
37   do {                                                             \
38     if (FLAG_trace_wasm_code_gc) PrintF("[wasm-gc] " __VA_ARGS__); \
39   } while (false)
40 
41 namespace {
42 // A task to log a set of {WasmCode} objects in an isolate. It does not own any
43 // data itself, since it is owned by the platform, so lifetime is not really
44 // bound to the wasm engine.
45 class LogCodesTask : public Task {
46  public:
LogCodesTask(base::Mutex * mutex,LogCodesTask ** task_slot,Isolate * isolate,WasmEngine * engine)47   LogCodesTask(base::Mutex* mutex, LogCodesTask** task_slot, Isolate* isolate,
48                WasmEngine* engine)
49       : mutex_(mutex),
50         task_slot_(task_slot),
51         isolate_(isolate),
52         engine_(engine) {
53     DCHECK_NOT_NULL(task_slot);
54     DCHECK_NOT_NULL(isolate);
55   }
56 
~LogCodesTask()57   ~LogCodesTask() {
58     // If the platform deletes this task before executing it, we also deregister
59     // it to avoid use-after-free from still-running background threads.
60     if (!cancelled()) DeregisterTask();
61   }
62 
Run()63   void Run() override {
64     if (cancelled()) return;
65     DeregisterTask();
66     engine_->LogOutstandingCodesForIsolate(isolate_);
67   }
68 
Cancel()69   void Cancel() {
70     // Cancel will only be called on Isolate shutdown, which happens on the
71     // Isolate's foreground thread. Thus no synchronization needed.
72     isolate_ = nullptr;
73   }
74 
cancelled() const75   bool cancelled() const { return isolate_ == nullptr; }
76 
DeregisterTask()77   void DeregisterTask() {
78     // The task will only be deregistered from the foreground thread (executing
79     // this task or calling its destructor), thus we do not need synchronization
80     // on this field access.
81     if (task_slot_ == nullptr) return;  // already deregistered.
82     // Remove this task from the {IsolateInfo} in the engine. The next
83     // logging request will allocate and schedule a new task.
84     base::MutexGuard guard(mutex_);
85     DCHECK_EQ(this, *task_slot_);
86     *task_slot_ = nullptr;
87     task_slot_ = nullptr;
88   }
89 
90  private:
91   // The mutex of the WasmEngine.
92   base::Mutex* const mutex_;
93   // The slot in the WasmEngine where this LogCodesTask is stored. This is
94   // cleared by this task before execution or on task destruction.
95   LogCodesTask** task_slot_;
96   Isolate* isolate_;
97   WasmEngine* const engine_;
98 };
99 
CheckNoArchivedThreads(Isolate * isolate)100 void CheckNoArchivedThreads(Isolate* isolate) {
101   class ArchivedThreadsVisitor : public ThreadVisitor {
102     void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
103       // Archived threads are rarely used, and not combined with Wasm at the
104       // moment. Implement this and test it properly once we have a use case for
105       // that.
106       FATAL("archived threads in combination with wasm not supported");
107     }
108   } archived_threads_visitor;
109   isolate->thread_manager()->IterateArchivedThreads(&archived_threads_visitor);
110 }
111 
112 class WasmGCForegroundTask : public CancelableTask {
113  public:
WasmGCForegroundTask(Isolate * isolate)114   explicit WasmGCForegroundTask(Isolate* isolate)
115       : CancelableTask(isolate->cancelable_task_manager()), isolate_(isolate) {}
116 
RunInternal()117   void RunInternal() final {
118     WasmEngine* engine = isolate_->wasm_engine();
119     // If the foreground task is executing, there is no wasm code active. Just
120     // report an empty set of live wasm code.
121 #ifdef ENABLE_SLOW_DCHECKS
122     for (StackFrameIterator it(isolate_); !it.done(); it.Advance()) {
123       DCHECK_NE(StackFrame::WASM_COMPILED, it.frame()->type());
124     }
125 #endif
126     CheckNoArchivedThreads(isolate_);
127     engine->ReportLiveCodeForGC(isolate_, Vector<WasmCode*>{});
128   }
129 
130  private:
131   Isolate* isolate_;
132 };
133 
134 }  // namespace
135 
MaybeGetNativeModule(ModuleOrigin origin,Vector<const uint8_t> wire_bytes)136 std::shared_ptr<NativeModule> NativeModuleCache::MaybeGetNativeModule(
137     ModuleOrigin origin, Vector<const uint8_t> wire_bytes) {
138   if (origin != kWasmOrigin) return nullptr;
139   base::MutexGuard lock(&mutex_);
140   size_t prefix_hash = PrefixHash(wire_bytes);
141   NativeModuleCache::Key key{prefix_hash, wire_bytes};
142   while (true) {
143     auto it = map_.find(key);
144     if (it == map_.end()) {
145       // Even though this exact key is not in the cache, there might be a
146       // matching prefix hash indicating that a streaming compilation is
147       // currently compiling a module with the same prefix. {OnFinishedStream}
148       // happens on the main thread too, so waiting for streaming compilation to
149       // finish would create a deadlock. Instead, compile the module twice and
150       // handle the conflict in {UpdateNativeModuleCache}.
151 
152       // Insert a {nullopt} entry to let other threads know that this
153       // {NativeModule} is already being created on another thread.
154       auto p = map_.emplace(key, base::nullopt);
155       USE(p);
156       DCHECK(p.second);
157       return nullptr;
158     }
159     if (it->second.has_value()) {
160       if (auto shared_native_module = it->second.value().lock()) {
161         DCHECK_EQ(shared_native_module->wire_bytes(), wire_bytes);
162         return shared_native_module;
163       }
164     }
165     cache_cv_.Wait(&mutex_);
166   }
167 }
168 
GetStreamingCompilationOwnership(size_t prefix_hash)169 bool NativeModuleCache::GetStreamingCompilationOwnership(size_t prefix_hash) {
170   base::MutexGuard lock(&mutex_);
171   auto it = map_.lower_bound(Key{prefix_hash, {}});
172   if (it != map_.end() && it->first.prefix_hash == prefix_hash) {
173     DCHECK_IMPLIES(!it->first.bytes.empty(),
174                    PrefixHash(it->first.bytes) == prefix_hash);
175     return false;
176   }
177   Key key{prefix_hash, {}};
178   DCHECK_EQ(0, map_.count(key));
179   map_.emplace(key, base::nullopt);
180   return true;
181 }
182 
StreamingCompilationFailed(size_t prefix_hash)183 void NativeModuleCache::StreamingCompilationFailed(size_t prefix_hash) {
184   base::MutexGuard lock(&mutex_);
185   Key key{prefix_hash, {}};
186   DCHECK_EQ(1, map_.count(key));
187   map_.erase(key);
188   cache_cv_.NotifyAll();
189 }
190 
Update(std::shared_ptr<NativeModule> native_module,bool error)191 std::shared_ptr<NativeModule> NativeModuleCache::Update(
192     std::shared_ptr<NativeModule> native_module, bool error) {
193   DCHECK_NOT_NULL(native_module);
194   if (native_module->module()->origin != kWasmOrigin) return native_module;
195   Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
196   DCHECK(!wire_bytes.empty());
197   size_t prefix_hash = PrefixHash(native_module->wire_bytes());
198   base::MutexGuard lock(&mutex_);
199   map_.erase(Key{prefix_hash, {}});
200   const Key key{prefix_hash, wire_bytes};
201   auto it = map_.find(key);
202   if (it != map_.end()) {
203     if (it->second.has_value()) {
204       auto conflicting_module = it->second.value().lock();
205       if (conflicting_module != nullptr) {
206         DCHECK_EQ(conflicting_module->wire_bytes(), wire_bytes);
207         return conflicting_module;
208       }
209     }
210     map_.erase(it);
211   }
212   if (!error) {
213     // The key now points to the new native module's owned copy of the bytes,
214     // so that it stays valid until the native module is freed and erased from
215     // the map.
216     auto p = map_.emplace(
217         key, base::Optional<std::weak_ptr<NativeModule>>(native_module));
218     USE(p);
219     DCHECK(p.second);
220   }
221   cache_cv_.NotifyAll();
222   return native_module;
223 }
224 
Erase(NativeModule * native_module)225 void NativeModuleCache::Erase(NativeModule* native_module) {
226   if (native_module->module()->origin != kWasmOrigin) return;
227   // Happens in some tests where bytes are set directly.
228   if (native_module->wire_bytes().empty()) return;
229   base::MutexGuard lock(&mutex_);
230   size_t prefix_hash = PrefixHash(native_module->wire_bytes());
231   map_.erase(Key{prefix_hash, native_module->wire_bytes()});
232   cache_cv_.NotifyAll();
233 }
234 
235 // static
WireBytesHash(Vector<const uint8_t> bytes)236 size_t NativeModuleCache::WireBytesHash(Vector<const uint8_t> bytes) {
237   return StringHasher::HashSequentialString(
238       reinterpret_cast<const char*>(bytes.begin()), bytes.length(),
239       kZeroHashSeed);
240 }
241 
242 // static
PrefixHash(Vector<const uint8_t> wire_bytes)243 size_t NativeModuleCache::PrefixHash(Vector<const uint8_t> wire_bytes) {
244   // Compute the hash as a combined hash of the sections up to the code section
245   // header, to mirror the way streaming compilation does it.
246   Decoder decoder(wire_bytes.begin(), wire_bytes.end());
247   decoder.consume_bytes(8, "module header");
248   size_t hash = NativeModuleCache::WireBytesHash(wire_bytes.SubVector(0, 8));
249   SectionCode section_id = SectionCode::kUnknownSectionCode;
250   while (decoder.ok() && decoder.more()) {
251     section_id = static_cast<SectionCode>(decoder.consume_u8());
252     uint32_t section_size = decoder.consume_u32v("section size");
253     if (section_id == SectionCode::kCodeSectionCode) {
254       uint32_t num_functions = decoder.consume_u32v("num functions");
255       // If {num_functions} is 0, the streaming decoder skips the section. Do
256       // the same here to ensure hashes are consistent.
257       if (num_functions != 0) {
258         hash = base::hash_combine(hash, section_size);
259       }
260       break;
261     }
262     const uint8_t* payload_start = decoder.pc();
263     decoder.consume_bytes(section_size, "section payload");
264     size_t section_hash = NativeModuleCache::WireBytesHash(
265         Vector<const uint8_t>(payload_start, section_size));
266     hash = base::hash_combine(hash, section_hash);
267   }
268   return hash;
269 }
270 
271 struct WasmEngine::CurrentGCInfo {
CurrentGCInfov8::internal::wasm::WasmEngine::CurrentGCInfo272   explicit CurrentGCInfo(int8_t gc_sequence_index)
273       : gc_sequence_index(gc_sequence_index) {
274     DCHECK_NE(0, gc_sequence_index);
275   }
276 
277   // Set of isolates that did not scan their stack yet for used WasmCode, and
278   // their scheduled foreground task.
279   std::unordered_map<Isolate*, WasmGCForegroundTask*> outstanding_isolates;
280 
281   // Set of dead code. Filled with all potentially dead code on initialization.
282   // Code that is still in-use is removed by the individual isolates.
283   std::unordered_set<WasmCode*> dead_code;
284 
285   // The number of GCs triggered in the native module that triggered this GC.
286   // This is stored in the histogram for each participating isolate during
287   // execution of that isolate's foreground task.
288   const int8_t gc_sequence_index;
289 
290   // If during this GC, another GC was requested, we skipped that other GC (we
291   // only run one GC at a time). Remember though to trigger another one once
292   // this one finishes. {next_gc_sequence_index} is 0 if no next GC is needed,
293   // and >0 otherwise. It stores the {num_code_gcs_triggered} of the native
294   // module which triggered the next GC.
295   int8_t next_gc_sequence_index = 0;
296 
297   // The start time of this GC; used for tracing and sampled via {Counters}.
298   // Can be null ({TimeTicks::IsNull()}) if timer is not high resolution.
299   base::TimeTicks start_time;
300 };
301 
302 struct WasmEngine::IsolateInfo {
IsolateInfov8::internal::wasm::WasmEngine::IsolateInfo303   explicit IsolateInfo(Isolate* isolate)
304       : log_codes(WasmCode::ShouldBeLogged(isolate)),
305         async_counters(isolate->async_counters()) {
306     v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
307     v8::Platform* platform = V8::GetCurrentPlatform();
308     foreground_task_runner = platform->GetForegroundTaskRunner(v8_isolate);
309   }
310 
311 #ifdef DEBUG
~IsolateInfov8::internal::wasm::WasmEngine::IsolateInfo312   ~IsolateInfo() {
313     // Before destructing, the {WasmEngine} must have cleared outstanding code
314     // to log.
315     DCHECK_EQ(0, code_to_log.size());
316   }
317 #endif
318 
319   // All native modules that are being used by this Isolate (currently only
320   // grows, never shrinks).
321   std::set<NativeModule*> native_modules;
322 
323   // Caches whether code needs to be logged on this isolate.
324   bool log_codes;
325 
326   // The currently scheduled LogCodesTask.
327   LogCodesTask* log_codes_task = nullptr;
328 
329   // The vector of code objects that still need to be logged in this isolate.
330   std::vector<WasmCode*> code_to_log;
331 
332   // The foreground task runner of the isolate (can be called from background).
333   std::shared_ptr<v8::TaskRunner> foreground_task_runner;
334 
335   const std::shared_ptr<Counters> async_counters;
336 
337   // Keep new modules in tiered down state.
338   bool keep_tiered_down = false;
339 };
340 
341 struct WasmEngine::NativeModuleInfo {
342   // Set of isolates using this NativeModule.
343   std::unordered_set<Isolate*> isolates;
344 
345   // Set of potentially dead code. This set holds one ref for each code object,
346   // until code is detected to be really dead. At that point, the ref count is
347   // decremented and code is move to the {dead_code} set. If the code is finally
348   // deleted, it is also removed from {dead_code}.
349   std::unordered_set<WasmCode*> potentially_dead_code;
350 
351   // Code that is not being executed in any isolate any more, but the ref count
352   // did not drop to zero yet.
353   std::unordered_set<WasmCode*> dead_code;
354 
355   // Number of code GCs triggered because code in this native module became
356   // potentially dead.
357   int8_t num_code_gcs_triggered = 0;
358 };
359 
WasmEngine()360 WasmEngine::WasmEngine() : code_manager_(FLAG_wasm_max_code_space * MB) {}
361 
~WasmEngine()362 WasmEngine::~WasmEngine() {
363 #ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
364   // Synchronize on the GDB-remote thread, if running.
365   gdb_server_ = nullptr;
366 #endif  // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
367 
368   // Synchronize on all background compile tasks.
369   background_compile_task_manager_.CancelAndWait();
370   // All AsyncCompileJobs have been canceled.
371   DCHECK(async_compile_jobs_.empty());
372   // All Isolates have been deregistered.
373   DCHECK(isolates_.empty());
374   // All NativeModules did die.
375   DCHECK(native_modules_.empty());
376   // Native module cache does not leak.
377   DCHECK(native_module_cache_.empty());
378 }
379 
SyncValidate(Isolate * isolate,const WasmFeatures & enabled,const ModuleWireBytes & bytes)380 bool WasmEngine::SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
381                               const ModuleWireBytes& bytes) {
382   // TODO(titzer): remove dependency on the isolate.
383   if (bytes.start() == nullptr || bytes.length() == 0) return false;
384   ModuleResult result =
385       DecodeWasmModule(enabled, bytes.start(), bytes.end(), true, kWasmOrigin,
386                        isolate->counters(), allocator());
387   return result.ok();
388 }
389 
SyncCompileTranslatedAsmJs(Isolate * isolate,ErrorThrower * thrower,const ModuleWireBytes & bytes,Vector<const byte> asm_js_offset_table_bytes,Handle<HeapNumber> uses_bitset,LanguageMode language_mode)390 MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
391     Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
392     Vector<const byte> asm_js_offset_table_bytes,
393     Handle<HeapNumber> uses_bitset, LanguageMode language_mode) {
394   ModuleOrigin origin = language_mode == LanguageMode::kSloppy
395                             ? kAsmJsSloppyOrigin
396                             : kAsmJsStrictOrigin;
397   ModuleResult result =
398       DecodeWasmModule(WasmFeatures::ForAsmjs(), bytes.start(), bytes.end(),
399                        false, origin, isolate->counters(), allocator());
400   if (result.failed()) {
401     // This happens once in a while when we have missed some limit check
402     // in the asm parser. Output an error message to help diagnose, but crash.
403     std::cout << result.error().message();
404     UNREACHABLE();
405   }
406 
407   result.value()->asm_js_offset_information =
408       std::make_unique<AsmJsOffsetInformation>(asm_js_offset_table_bytes);
409 
410   // Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated
411   // in {CompileToNativeModule}.
412   Handle<FixedArray> export_wrappers;
413   std::shared_ptr<NativeModule> native_module =
414       CompileToNativeModule(isolate, WasmFeatures::ForAsmjs(), thrower,
415                             std::move(result).value(), bytes, &export_wrappers);
416   if (!native_module) return {};
417 
418   return AsmWasmData::New(isolate, std::move(native_module), export_wrappers,
419                           uses_bitset);
420 }
421 
FinalizeTranslatedAsmJs(Isolate * isolate,Handle<AsmWasmData> asm_wasm_data,Handle<Script> script)422 Handle<WasmModuleObject> WasmEngine::FinalizeTranslatedAsmJs(
423     Isolate* isolate, Handle<AsmWasmData> asm_wasm_data,
424     Handle<Script> script) {
425   std::shared_ptr<NativeModule> native_module =
426       asm_wasm_data->managed_native_module().get();
427   Handle<FixedArray> export_wrappers =
428       handle(asm_wasm_data->export_wrappers(), isolate);
429   Handle<WasmModuleObject> module_object = WasmModuleObject::New(
430       isolate, std::move(native_module), script, export_wrappers);
431   return module_object;
432 }
433 
SyncCompile(Isolate * isolate,const WasmFeatures & enabled,ErrorThrower * thrower,const ModuleWireBytes & bytes)434 MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
435     Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
436     const ModuleWireBytes& bytes) {
437   ModuleResult result =
438       DecodeWasmModule(enabled, bytes.start(), bytes.end(), false, kWasmOrigin,
439                        isolate->counters(), allocator());
440   if (result.failed()) {
441     thrower->CompileFailed(result.error());
442     return {};
443   }
444 
445   // Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated
446   // in {CompileToModuleObject}.
447   Handle<FixedArray> export_wrappers;
448   std::shared_ptr<NativeModule> native_module =
449       CompileToNativeModule(isolate, enabled, thrower,
450                             std::move(result).value(), bytes, &export_wrappers);
451   if (!native_module) return {};
452 
453 #ifdef DEBUG
454   // Ensure that code GC will check this isolate for live code.
455   {
456     base::MutexGuard lock(&mutex_);
457     DCHECK_EQ(1, isolates_.count(isolate));
458     DCHECK_EQ(1, isolates_[isolate]->native_modules.count(native_module.get()));
459     DCHECK_EQ(1, native_modules_.count(native_module.get()));
460     DCHECK_EQ(1, native_modules_[native_module.get()]->isolates.count(isolate));
461   }
462 #endif
463 
464   Handle<Script> script =
465       CreateWasmScript(isolate, bytes.module_bytes(),
466                        VectorOf(native_module->module()->source_map_url),
467                        native_module->module()->name);
468 
469   // Create the compiled module object and populate with compiled functions
470   // and information needed at instantiation time. This object needs to be
471   // serializable. Instantiation may occur off a deserialized version of this
472   // object.
473   Handle<WasmModuleObject> module_object = WasmModuleObject::New(
474       isolate, std::move(native_module), script, export_wrappers);
475 
476   // Finish the Wasm script now and make it public to the debugger.
477   isolate->debug()->OnAfterCompile(script);
478   return module_object;
479 }
480 
SyncInstantiate(Isolate * isolate,ErrorThrower * thrower,Handle<WasmModuleObject> module_object,MaybeHandle<JSReceiver> imports,MaybeHandle<JSArrayBuffer> memory)481 MaybeHandle<WasmInstanceObject> WasmEngine::SyncInstantiate(
482     Isolate* isolate, ErrorThrower* thrower,
483     Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
484     MaybeHandle<JSArrayBuffer> memory) {
485   return InstantiateToInstanceObject(isolate, thrower, module_object, imports,
486                                      memory);
487 }
488 
AsyncInstantiate(Isolate * isolate,std::unique_ptr<InstantiationResultResolver> resolver,Handle<WasmModuleObject> module_object,MaybeHandle<JSReceiver> imports)489 void WasmEngine::AsyncInstantiate(
490     Isolate* isolate, std::unique_ptr<InstantiationResultResolver> resolver,
491     Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports) {
492   ErrorThrower thrower(isolate, "WebAssembly.instantiate()");
493   // Instantiate a TryCatch so that caught exceptions won't progagate out.
494   // They will still be set as pending exceptions on the isolate.
495   // TODO(clemensb): Avoid TryCatch, use Execution::TryCall internally to invoke
496   // start function and report thrown exception explicitly via out argument.
497   v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
498   catcher.SetVerbose(false);
499   catcher.SetCaptureMessage(false);
500 
501   MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
502       isolate, &thrower, module_object, imports, Handle<JSArrayBuffer>::null());
503 
504   if (!instance_object.is_null()) {
505     resolver->OnInstantiationSucceeded(instance_object.ToHandleChecked());
506     return;
507   }
508 
509   if (isolate->has_pending_exception()) {
510     // The JS code executed during instantiation has thrown an exception.
511     // We have to move the exception to the promise chain.
512     Handle<Object> exception(isolate->pending_exception(), isolate);
513     isolate->clear_pending_exception();
514     *isolate->external_caught_exception_address() = false;
515     resolver->OnInstantiationFailed(exception);
516     thrower.Reset();
517   } else {
518     DCHECK(thrower.error());
519     resolver->OnInstantiationFailed(thrower.Reify());
520   }
521 }
522 
AsyncCompile(Isolate * isolate,const WasmFeatures & enabled,std::shared_ptr<CompilationResultResolver> resolver,const ModuleWireBytes & bytes,bool is_shared,const char * api_method_name_for_errors)523 void WasmEngine::AsyncCompile(
524     Isolate* isolate, const WasmFeatures& enabled,
525     std::shared_ptr<CompilationResultResolver> resolver,
526     const ModuleWireBytes& bytes, bool is_shared,
527     const char* api_method_name_for_errors) {
528   if (!FLAG_wasm_async_compilation) {
529     // Asynchronous compilation disabled; fall back on synchronous compilation.
530     ErrorThrower thrower(isolate, api_method_name_for_errors);
531     MaybeHandle<WasmModuleObject> module_object;
532     if (is_shared) {
533       // Make a copy of the wire bytes to avoid concurrent modification.
534       std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
535       memcpy(copy.get(), bytes.start(), bytes.length());
536       ModuleWireBytes bytes_copy(copy.get(), copy.get() + bytes.length());
537       module_object = SyncCompile(isolate, enabled, &thrower, bytes_copy);
538     } else {
539       // The wire bytes are not shared, OK to use them directly.
540       module_object = SyncCompile(isolate, enabled, &thrower, bytes);
541     }
542     if (thrower.error()) {
543       resolver->OnCompilationFailed(thrower.Reify());
544       return;
545     }
546     Handle<WasmModuleObject> module = module_object.ToHandleChecked();
547     resolver->OnCompilationSucceeded(module);
548     return;
549   }
550 
551   if (FLAG_wasm_test_streaming) {
552     std::shared_ptr<StreamingDecoder> streaming_decoder =
553         StartStreamingCompilation(
554             isolate, enabled, handle(isolate->context(), isolate),
555             api_method_name_for_errors, std::move(resolver));
556     streaming_decoder->OnBytesReceived(bytes.module_bytes());
557     streaming_decoder->Finish();
558     return;
559   }
560   // Make a copy of the wire bytes in case the user program changes them
561   // during asynchronous compilation.
562   std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
563   memcpy(copy.get(), bytes.start(), bytes.length());
564 
565   AsyncCompileJob* job =
566       CreateAsyncCompileJob(isolate, enabled, std::move(copy), bytes.length(),
567                             handle(isolate->context(), isolate),
568                             api_method_name_for_errors, std::move(resolver));
569   job->Start();
570 }
571 
StartStreamingCompilation(Isolate * isolate,const WasmFeatures & enabled,Handle<Context> context,const char * api_method_name,std::shared_ptr<CompilationResultResolver> resolver)572 std::shared_ptr<StreamingDecoder> WasmEngine::StartStreamingCompilation(
573     Isolate* isolate, const WasmFeatures& enabled, Handle<Context> context,
574     const char* api_method_name,
575     std::shared_ptr<CompilationResultResolver> resolver) {
576   AsyncCompileJob* job =
577       CreateAsyncCompileJob(isolate, enabled, std::unique_ptr<byte[]>(nullptr),
578                             0, context, api_method_name, std::move(resolver));
579   return job->CreateStreamingDecoder();
580 }
581 
CompileFunction(Isolate * isolate,NativeModule * native_module,uint32_t function_index,ExecutionTier tier)582 void WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module,
583                                  uint32_t function_index, ExecutionTier tier) {
584   // Note we assume that "one-off" compilations can discard detected features.
585   WasmFeatures detected = WasmFeatures::None();
586   WasmCompilationUnit::CompileWasmFunction(
587       isolate, native_module, &detected,
588       &native_module->module()->functions[function_index], tier);
589 }
590 
RecompileAllFunctions(Isolate * isolate,NativeModule * native_module,ExecutionTier tier)591 void WasmEngine::RecompileAllFunctions(Isolate* isolate,
592                                        NativeModule* native_module,
593                                        ExecutionTier tier) {
594   RecompileNativeModule(isolate, native_module, tier);
595 }
596 
TierDownAllModulesPerIsolate(Isolate * isolate)597 void WasmEngine::TierDownAllModulesPerIsolate(Isolate* isolate) {
598   std::vector<NativeModule*> native_modules;
599   {
600     base::MutexGuard lock(&mutex_);
601     if (isolates_[isolate]->keep_tiered_down) return;
602     isolates_[isolate]->keep_tiered_down = true;
603     for (auto* native_module : isolates_[isolate]->native_modules) {
604       native_modules.push_back(native_module);
605     }
606   }
607   for (auto* native_module : native_modules) {
608     native_module->TierDown(isolate);
609   }
610 }
611 
TierUpAllModulesPerIsolate(Isolate * isolate)612 void WasmEngine::TierUpAllModulesPerIsolate(Isolate* isolate) {
613   std::vector<NativeModule*> native_modules;
614   {
615     base::MutexGuard lock(&mutex_);
616     isolates_[isolate]->keep_tiered_down = false;
617     for (auto* native_module : isolates_[isolate]->native_modules) {
618       native_modules.push_back(native_module);
619     }
620   }
621   for (auto* native_module : native_modules) {
622     native_module->TierUp(isolate);
623   }
624 }
625 
ExportNativeModule(Handle<WasmModuleObject> module_object)626 std::shared_ptr<NativeModule> WasmEngine::ExportNativeModule(
627     Handle<WasmModuleObject> module_object) {
628   return module_object->shared_native_module();
629 }
630 
ImportNativeModule(Isolate * isolate,std::shared_ptr<NativeModule> shared_native_module)631 Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
632     Isolate* isolate, std::shared_ptr<NativeModule> shared_native_module) {
633   NativeModule* native_module = shared_native_module.get();
634   ModuleWireBytes wire_bytes(native_module->wire_bytes());
635   Handle<Script> script =
636       CreateWasmScript(isolate, wire_bytes.module_bytes(),
637                        VectorOf(native_module->module()->source_map_url),
638                        native_module->module()->name);
639   Handle<FixedArray> export_wrappers;
640   CompileJsToWasmWrappers(isolate, native_module->module(), &export_wrappers);
641   Handle<WasmModuleObject> module_object = WasmModuleObject::New(
642       isolate, std::move(shared_native_module), script, export_wrappers,
643       native_module->committed_code_space());
644   {
645     base::MutexGuard lock(&mutex_);
646     DCHECK_EQ(1, isolates_.count(isolate));
647     isolates_[isolate]->native_modules.insert(native_module);
648     DCHECK_EQ(1, native_modules_.count(native_module));
649     native_modules_[native_module]->isolates.insert(isolate);
650   }
651 
652   // Finish the Wasm script now and make it public to the debugger.
653   isolate->debug()->OnAfterCompile(script);
654   return module_object;
655 }
656 
GetOrCreateTurboStatistics()657 CompilationStatistics* WasmEngine::GetOrCreateTurboStatistics() {
658   base::MutexGuard guard(&mutex_);
659   if (compilation_stats_ == nullptr) {
660     compilation_stats_.reset(new CompilationStatistics());
661   }
662   return compilation_stats_.get();
663 }
664 
DumpAndResetTurboStatistics()665 void WasmEngine::DumpAndResetTurboStatistics() {
666   base::MutexGuard guard(&mutex_);
667   if (compilation_stats_ != nullptr) {
668     StdoutStream os;
669     os << AsPrintableStatistics{*compilation_stats_.get(), false} << std::endl;
670   }
671   compilation_stats_.reset();
672 }
673 
GetCodeTracer()674 CodeTracer* WasmEngine::GetCodeTracer() {
675   base::MutexGuard guard(&mutex_);
676   if (code_tracer_ == nullptr) code_tracer_.reset(new CodeTracer(-1));
677   return code_tracer_.get();
678 }
679 
CreateAsyncCompileJob(Isolate * isolate,const WasmFeatures & enabled,std::unique_ptr<byte[]> bytes_copy,size_t length,Handle<Context> context,const char * api_method_name,std::shared_ptr<CompilationResultResolver> resolver)680 AsyncCompileJob* WasmEngine::CreateAsyncCompileJob(
681     Isolate* isolate, const WasmFeatures& enabled,
682     std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context,
683     const char* api_method_name,
684     std::shared_ptr<CompilationResultResolver> resolver) {
685   AsyncCompileJob* job =
686       new AsyncCompileJob(isolate, enabled, std::move(bytes_copy), length,
687                           context, api_method_name, std::move(resolver));
688   // Pass ownership to the unique_ptr in {async_compile_jobs_}.
689   base::MutexGuard guard(&mutex_);
690   async_compile_jobs_[job] = std::unique_ptr<AsyncCompileJob>(job);
691   return job;
692 }
693 
RemoveCompileJob(AsyncCompileJob * job)694 std::unique_ptr<AsyncCompileJob> WasmEngine::RemoveCompileJob(
695     AsyncCompileJob* job) {
696   base::MutexGuard guard(&mutex_);
697   auto item = async_compile_jobs_.find(job);
698   DCHECK(item != async_compile_jobs_.end());
699   std::unique_ptr<AsyncCompileJob> result = std::move(item->second);
700   async_compile_jobs_.erase(item);
701   return result;
702 }
703 
HasRunningCompileJob(Isolate * isolate)704 bool WasmEngine::HasRunningCompileJob(Isolate* isolate) {
705   base::MutexGuard guard(&mutex_);
706   DCHECK_EQ(1, isolates_.count(isolate));
707   for (auto& entry : async_compile_jobs_) {
708     if (entry.first->isolate() == isolate) return true;
709   }
710   return false;
711 }
712 
DeleteCompileJobsOnContext(Handle<Context> context)713 void WasmEngine::DeleteCompileJobsOnContext(Handle<Context> context) {
714   // Under the mutex get all jobs to delete. Then delete them without holding
715   // the mutex, such that deletion can reenter the WasmEngine.
716   std::vector<std::unique_ptr<AsyncCompileJob>> jobs_to_delete;
717   {
718     base::MutexGuard guard(&mutex_);
719     for (auto it = async_compile_jobs_.begin();
720          it != async_compile_jobs_.end();) {
721       if (!it->first->context().is_identical_to(context)) {
722         ++it;
723         continue;
724       }
725       jobs_to_delete.push_back(std::move(it->second));
726       it = async_compile_jobs_.erase(it);
727     }
728   }
729 }
730 
DeleteCompileJobsOnIsolate(Isolate * isolate)731 void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
732   // Under the mutex get all jobs to delete. Then delete them without holding
733   // the mutex, such that deletion can reenter the WasmEngine.
734   std::vector<std::unique_ptr<AsyncCompileJob>> jobs_to_delete;
735   {
736     base::MutexGuard guard(&mutex_);
737     DCHECK_EQ(1, isolates_.count(isolate));
738     for (auto it = async_compile_jobs_.begin();
739          it != async_compile_jobs_.end();) {
740       if (it->first->isolate() != isolate) {
741         ++it;
742         continue;
743       }
744       jobs_to_delete.push_back(std::move(it->second));
745       it = async_compile_jobs_.erase(it);
746     }
747   }
748 }
749 
AddIsolate(Isolate * isolate)750 void WasmEngine::AddIsolate(Isolate* isolate) {
751   base::MutexGuard guard(&mutex_);
752   DCHECK_EQ(0, isolates_.count(isolate));
753   isolates_.emplace(isolate, std::make_unique<IsolateInfo>(isolate));
754 
755   // Install sampling GC callback.
756   // TODO(v8:7424): For now we sample module sizes in a GC callback. This will
757   // bias samples towards apps with high memory pressure. We should switch to
758   // using sampling based on regular intervals independent of the GC.
759   auto callback = [](v8::Isolate* v8_isolate, v8::GCType type,
760                      v8::GCCallbackFlags flags, void* data) {
761     Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
762     Counters* counters = isolate->counters();
763     WasmEngine* engine = isolate->wasm_engine();
764     base::MutexGuard lock(&engine->mutex_);
765     DCHECK_EQ(1, engine->isolates_.count(isolate));
766     for (auto* native_module : engine->isolates_[isolate]->native_modules) {
767       native_module->SampleCodeSize(counters, NativeModule::kSampling);
768     }
769   };
770   isolate->heap()->AddGCEpilogueCallback(callback, v8::kGCTypeMarkSweepCompact,
771                                          nullptr);
772 }
773 
RemoveIsolate(Isolate * isolate)774 void WasmEngine::RemoveIsolate(Isolate* isolate) {
775   base::MutexGuard guard(&mutex_);
776   auto it = isolates_.find(isolate);
777   DCHECK_NE(isolates_.end(), it);
778   std::unique_ptr<IsolateInfo> info = std::move(it->second);
779   isolates_.erase(it);
780   for (NativeModule* native_module : info->native_modules) {
781     DCHECK_EQ(1, native_modules_.count(native_module));
782     DCHECK_EQ(1, native_modules_[native_module]->isolates.count(isolate));
783     auto* info = native_modules_[native_module].get();
784     info->isolates.erase(isolate);
785     if (current_gc_info_) {
786       for (WasmCode* code : info->potentially_dead_code) {
787         current_gc_info_->dead_code.erase(code);
788       }
789     }
790   }
791   if (current_gc_info_) {
792     if (RemoveIsolateFromCurrentGC(isolate)) PotentiallyFinishCurrentGC();
793   }
794   if (auto* task = info->log_codes_task) task->Cancel();
795   if (!info->code_to_log.empty()) {
796     WasmCode::DecrementRefCount(VectorOf(info->code_to_log));
797     info->code_to_log.clear();
798   }
799 }
800 
LogCode(Vector<WasmCode * > code_vec)801 void WasmEngine::LogCode(Vector<WasmCode*> code_vec) {
802   if (code_vec.empty()) return;
803   base::MutexGuard guard(&mutex_);
804   NativeModule* native_module = code_vec[0]->native_module();
805   DCHECK_EQ(1, native_modules_.count(native_module));
806   for (Isolate* isolate : native_modules_[native_module]->isolates) {
807     DCHECK_EQ(1, isolates_.count(isolate));
808     IsolateInfo* info = isolates_[isolate].get();
809     if (info->log_codes == false) continue;
810     if (info->log_codes_task == nullptr) {
811       auto new_task = std::make_unique<LogCodesTask>(
812           &mutex_, &info->log_codes_task, isolate, this);
813       info->log_codes_task = new_task.get();
814       info->foreground_task_runner->PostTask(std::move(new_task));
815     }
816     if (info->code_to_log.empty()) {
817       isolate->stack_guard()->RequestLogWasmCode();
818     }
819     info->code_to_log.insert(info->code_to_log.end(), code_vec.begin(),
820                              code_vec.end());
821     for (WasmCode* code : code_vec) {
822       DCHECK_EQ(native_module, code->native_module());
823       code->IncRef();
824     }
825   }
826 }
827 
EnableCodeLogging(Isolate * isolate)828 void WasmEngine::EnableCodeLogging(Isolate* isolate) {
829   base::MutexGuard guard(&mutex_);
830   auto it = isolates_.find(isolate);
831   DCHECK_NE(isolates_.end(), it);
832   it->second->log_codes = true;
833 }
834 
LogOutstandingCodesForIsolate(Isolate * isolate)835 void WasmEngine::LogOutstandingCodesForIsolate(Isolate* isolate) {
836   // If by now we should not log code any more, do not log it.
837   if (!WasmCode::ShouldBeLogged(isolate)) return;
838 
839   // Under the mutex, get the vector of wasm code to log. Then log and decrement
840   // the ref count without holding the mutex.
841   std::vector<WasmCode*> code_to_log;
842   {
843     base::MutexGuard guard(&mutex_);
844     DCHECK_EQ(1, isolates_.count(isolate));
845     code_to_log.swap(isolates_[isolate]->code_to_log);
846   }
847   if (code_to_log.empty()) return;
848   for (WasmCode* code : code_to_log) {
849     code->LogCode(isolate);
850   }
851   WasmCode::DecrementRefCount(VectorOf(code_to_log));
852 }
853 
NewNativeModule(Isolate * isolate,const WasmFeatures & enabled,std::shared_ptr<const WasmModule> module,size_t code_size_estimate)854 std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
855     Isolate* isolate, const WasmFeatures& enabled,
856     std::shared_ptr<const WasmModule> module, size_t code_size_estimate) {
857 #ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
858   if (FLAG_wasm_gdb_remote && !gdb_server_) {
859     gdb_server_ = std::make_unique<gdb_server::GdbServer>();
860   }
861 #endif  // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
862 
863   std::shared_ptr<NativeModule> native_module = code_manager_.NewNativeModule(
864       this, isolate, enabled, code_size_estimate, std::move(module));
865   base::MutexGuard lock(&mutex_);
866   auto pair = native_modules_.insert(std::make_pair(
867       native_module.get(), std::make_unique<NativeModuleInfo>()));
868   DCHECK(pair.second);  // inserted new entry.
869   pair.first->second.get()->isolates.insert(isolate);
870   isolates_[isolate]->native_modules.insert(native_module.get());
871   if (isolates_[isolate]->keep_tiered_down) {
872     native_module->SetTieredDown();
873   }
874   return native_module;
875 }
876 
MaybeGetNativeModule(ModuleOrigin origin,Vector<const uint8_t> wire_bytes,Isolate * isolate)877 std::shared_ptr<NativeModule> WasmEngine::MaybeGetNativeModule(
878     ModuleOrigin origin, Vector<const uint8_t> wire_bytes, Isolate* isolate) {
879   std::shared_ptr<NativeModule> native_module =
880       native_module_cache_.MaybeGetNativeModule(origin, wire_bytes);
881   if (native_module) {
882     base::MutexGuard guard(&mutex_);
883     auto& native_module_info = native_modules_[native_module.get()];
884     if (!native_module_info) {
885       native_module_info = std::make_unique<NativeModuleInfo>();
886     }
887     native_module_info->isolates.insert(isolate);
888     isolates_[isolate]->native_modules.insert(native_module.get());
889   }
890   return native_module;
891 }
892 
UpdateNativeModuleCache(bool error,std::shared_ptr<NativeModule> * native_module,Isolate * isolate)893 bool WasmEngine::UpdateNativeModuleCache(
894     bool error, std::shared_ptr<NativeModule>* native_module,
895     Isolate* isolate) {
896   // Pass {native_module} by value here to keep it alive until at least after
897   // we returned from {Update}. Otherwise, we might {Erase} it inside {Update}
898   // which would lock the mutex twice.
899   auto prev = native_module->get();
900   *native_module = native_module_cache_.Update(*native_module, error);
901 
902   if (prev == native_module->get()) return true;
903 
904   base::MutexGuard guard(&mutex_);
905   auto& native_module_info = native_modules_[native_module->get()];
906   if (!native_module_info) {
907     native_module_info = std::make_unique<NativeModuleInfo>();
908   }
909   native_module_info->isolates.insert(isolate);
910   isolates_[isolate]->native_modules.insert((*native_module).get());
911   return false;
912 }
913 
GetStreamingCompilationOwnership(size_t prefix_hash)914 bool WasmEngine::GetStreamingCompilationOwnership(size_t prefix_hash) {
915   return native_module_cache_.GetStreamingCompilationOwnership(prefix_hash);
916 }
917 
StreamingCompilationFailed(size_t prefix_hash)918 void WasmEngine::StreamingCompilationFailed(size_t prefix_hash) {
919   native_module_cache_.StreamingCompilationFailed(prefix_hash);
920 }
921 
FreeNativeModule(NativeModule * native_module)922 void WasmEngine::FreeNativeModule(NativeModule* native_module) {
923   base::MutexGuard guard(&mutex_);
924   auto it = native_modules_.find(native_module);
925   DCHECK_NE(native_modules_.end(), it);
926   for (Isolate* isolate : it->second->isolates) {
927     DCHECK_EQ(1, isolates_.count(isolate));
928     IsolateInfo* info = isolates_[isolate].get();
929     DCHECK_EQ(1, info->native_modules.count(native_module));
930     info->native_modules.erase(native_module);
931     // If there are {WasmCode} objects of the deleted {NativeModule}
932     // outstanding to be logged in this isolate, remove them. Decrementing the
933     // ref count is not needed, since the {NativeModule} dies anyway.
934     size_t remaining = info->code_to_log.size();
935     if (remaining > 0) {
936       for (size_t i = 0; i < remaining; ++i) {
937         while (i < remaining &&
938                info->code_to_log[i]->native_module() == native_module) {
939           // Move the last remaining item to this slot (this can be the same
940           // as {i}, which is OK).
941           info->code_to_log[i] = info->code_to_log[--remaining];
942         }
943       }
944       info->code_to_log.resize(remaining);
945     }
946   }
947   // If there is a GC running which has references to code contained in the
948   // deleted {NativeModule}, remove those references.
949   if (current_gc_info_) {
950     for (auto it = current_gc_info_->dead_code.begin(),
951               end = current_gc_info_->dead_code.end();
952          it != end;) {
953       if ((*it)->native_module() == native_module) {
954         it = current_gc_info_->dead_code.erase(it);
955       } else {
956         ++it;
957       }
958     }
959     TRACE_CODE_GC("Native module %p died, reducing dead code objects to %zu.\n",
960                   native_module, current_gc_info_->dead_code.size());
961   }
962   native_module_cache_.Erase(native_module);
963   native_modules_.erase(it);
964 }
965 
966 namespace {
967 class SampleTopTierCodeSizeTask : public CancelableTask {
968  public:
SampleTopTierCodeSizeTask(Isolate * isolate,std::weak_ptr<NativeModule> native_module)969   SampleTopTierCodeSizeTask(Isolate* isolate,
970                             std::weak_ptr<NativeModule> native_module)
971       : CancelableTask(isolate),
972         isolate_(isolate),
973         native_module_(std::move(native_module)) {}
974 
RunInternal()975   void RunInternal() override {
976     if (std::shared_ptr<NativeModule> native_module = native_module_.lock()) {
977       native_module->SampleCodeSize(isolate_->counters(),
978                                     NativeModule::kAfterTopTier);
979     }
980   }
981 
982  private:
983   Isolate* const isolate_;
984   const std::weak_ptr<NativeModule> native_module_;
985 };
986 }  // namespace
987 
SampleTopTierCodeSizeInAllIsolates(const std::shared_ptr<NativeModule> & native_module)988 void WasmEngine::SampleTopTierCodeSizeInAllIsolates(
989     const std::shared_ptr<NativeModule>& native_module) {
990   base::MutexGuard lock(&mutex_);
991   DCHECK_EQ(1, native_modules_.count(native_module.get()));
992   for (Isolate* isolate : native_modules_[native_module.get()]->isolates) {
993     DCHECK_EQ(1, isolates_.count(isolate));
994     IsolateInfo* info = isolates_[isolate].get();
995     info->foreground_task_runner->PostTask(
996         std::make_unique<SampleTopTierCodeSizeTask>(isolate, native_module));
997   }
998 }
999 
ReportLiveCodeForGC(Isolate * isolate,Vector<WasmCode * > live_code)1000 void WasmEngine::ReportLiveCodeForGC(Isolate* isolate,
1001                                      Vector<WasmCode*> live_code) {
1002   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "ReportLiveCodeForGC");
1003   TRACE_CODE_GC("Isolate %d reporting %zu live code objects.\n", isolate->id(),
1004                 live_code.size());
1005   base::MutexGuard guard(&mutex_);
1006   // This report might come in late (note that we trigger both a stack guard and
1007   // a foreground task). In that case, ignore it.
1008   if (current_gc_info_ == nullptr) return;
1009   if (!RemoveIsolateFromCurrentGC(isolate)) return;
1010   isolate->counters()->wasm_module_num_triggered_code_gcs()->AddSample(
1011       current_gc_info_->gc_sequence_index);
1012   for (WasmCode* code : live_code) current_gc_info_->dead_code.erase(code);
1013   PotentiallyFinishCurrentGC();
1014 }
1015 
ReportLiveCodeFromStackForGC(Isolate * isolate)1016 void WasmEngine::ReportLiveCodeFromStackForGC(Isolate* isolate) {
1017   wasm::WasmCodeRefScope code_ref_scope;
1018   std::unordered_set<wasm::WasmCode*> live_wasm_code;
1019   for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
1020     StackFrame* const frame = it.frame();
1021     if (frame->type() != StackFrame::WASM_COMPILED) continue;
1022     live_wasm_code.insert(WasmCompiledFrame::cast(frame)->wasm_code());
1023   }
1024 
1025   CheckNoArchivedThreads(isolate);
1026 
1027   ReportLiveCodeForGC(isolate,
1028                       OwnedVector<WasmCode*>::Of(live_wasm_code).as_vector());
1029 }
1030 
AddPotentiallyDeadCode(WasmCode * code)1031 bool WasmEngine::AddPotentiallyDeadCode(WasmCode* code) {
1032   base::MutexGuard guard(&mutex_);
1033   auto it = native_modules_.find(code->native_module());
1034   DCHECK_NE(native_modules_.end(), it);
1035   NativeModuleInfo* info = it->second.get();
1036   if (info->dead_code.count(code)) return false;  // Code is already dead.
1037   auto added = info->potentially_dead_code.insert(code);
1038   if (!added.second) return false;  // An entry already existed.
1039   new_potentially_dead_code_size_ += code->instructions().size();
1040   if (FLAG_wasm_code_gc) {
1041     // Trigger a GC if 64kB plus 10% of committed code are potentially dead.
1042     size_t dead_code_limit =
1043         FLAG_stress_wasm_code_gc
1044             ? 0
1045             : 64 * KB + code_manager_.committed_code_space() / 10;
1046     if (new_potentially_dead_code_size_ > dead_code_limit) {
1047       bool inc_gc_count =
1048           info->num_code_gcs_triggered < std::numeric_limits<int8_t>::max();
1049       if (current_gc_info_ == nullptr) {
1050         if (inc_gc_count) ++info->num_code_gcs_triggered;
1051         TRACE_CODE_GC(
1052             "Triggering GC (potentially dead: %zu bytes; limit: %zu bytes).\n",
1053             new_potentially_dead_code_size_, dead_code_limit);
1054         TriggerGC(info->num_code_gcs_triggered);
1055       } else if (current_gc_info_->next_gc_sequence_index == 0) {
1056         if (inc_gc_count) ++info->num_code_gcs_triggered;
1057         TRACE_CODE_GC(
1058             "Scheduling another GC after the current one (potentially dead: "
1059             "%zu bytes; limit: %zu bytes).\n",
1060             new_potentially_dead_code_size_, dead_code_limit);
1061         current_gc_info_->next_gc_sequence_index = info->num_code_gcs_triggered;
1062         DCHECK_NE(0, current_gc_info_->next_gc_sequence_index);
1063       }
1064     }
1065   }
1066   return true;
1067 }
1068 
FreeDeadCode(const DeadCodeMap & dead_code)1069 void WasmEngine::FreeDeadCode(const DeadCodeMap& dead_code) {
1070   base::MutexGuard guard(&mutex_);
1071   FreeDeadCodeLocked(dead_code);
1072 }
1073 
FreeDeadCodeLocked(const DeadCodeMap & dead_code)1074 void WasmEngine::FreeDeadCodeLocked(const DeadCodeMap& dead_code) {
1075   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "FreeDeadCode");
1076   DCHECK(!mutex_.TryLock());
1077   for (auto& dead_code_entry : dead_code) {
1078     NativeModule* native_module = dead_code_entry.first;
1079     const std::vector<WasmCode*>& code_vec = dead_code_entry.second;
1080     DCHECK_EQ(1, native_modules_.count(native_module));
1081     auto* info = native_modules_[native_module].get();
1082     TRACE_CODE_GC("Freeing %zu code object%s of module %p.\n", code_vec.size(),
1083                   code_vec.size() == 1 ? "" : "s", native_module);
1084     for (WasmCode* code : code_vec) {
1085       DCHECK_EQ(1, info->dead_code.count(code));
1086       info->dead_code.erase(code);
1087     }
1088     native_module->FreeCode(VectorOf(code_vec));
1089   }
1090 }
1091 
TriggerGC(int8_t gc_sequence_index)1092 void WasmEngine::TriggerGC(int8_t gc_sequence_index) {
1093   DCHECK_NULL(current_gc_info_);
1094   DCHECK(FLAG_wasm_code_gc);
1095   new_potentially_dead_code_size_ = 0;
1096   current_gc_info_.reset(new CurrentGCInfo(gc_sequence_index));
1097   // Add all potentially dead code to this GC, and trigger a GC task in each
1098   // isolate.
1099   for (auto& entry : native_modules_) {
1100     NativeModuleInfo* info = entry.second.get();
1101     if (info->potentially_dead_code.empty()) continue;
1102     for (auto* isolate : native_modules_[entry.first]->isolates) {
1103       auto& gc_task = current_gc_info_->outstanding_isolates[isolate];
1104       if (!gc_task) {
1105         auto new_task = std::make_unique<WasmGCForegroundTask>(isolate);
1106         gc_task = new_task.get();
1107         DCHECK_EQ(1, isolates_.count(isolate));
1108         isolates_[isolate]->foreground_task_runner->PostTask(
1109             std::move(new_task));
1110       }
1111       isolate->stack_guard()->RequestWasmCodeGC();
1112     }
1113     for (WasmCode* code : info->potentially_dead_code) {
1114       current_gc_info_->dead_code.insert(code);
1115     }
1116   }
1117   TRACE_CODE_GC(
1118       "Starting GC. Total number of potentially dead code objects: %zu\n",
1119       current_gc_info_->dead_code.size());
1120 }
1121 
RemoveIsolateFromCurrentGC(Isolate * isolate)1122 bool WasmEngine::RemoveIsolateFromCurrentGC(Isolate* isolate) {
1123   DCHECK(!mutex_.TryLock());
1124   DCHECK_NOT_NULL(current_gc_info_);
1125   return current_gc_info_->outstanding_isolates.erase(isolate) != 0;
1126 }
1127 
PotentiallyFinishCurrentGC()1128 void WasmEngine::PotentiallyFinishCurrentGC() {
1129   DCHECK(!mutex_.TryLock());
1130   TRACE_CODE_GC(
1131       "Remaining dead code objects: %zu; outstanding isolates: %zu.\n",
1132       current_gc_info_->dead_code.size(),
1133       current_gc_info_->outstanding_isolates.size());
1134 
1135   // If there are more outstanding isolates, return immediately.
1136   if (!current_gc_info_->outstanding_isolates.empty()) return;
1137 
1138   // All remaining code in {current_gc_info->dead_code} is really dead.
1139   // Move it from the set of potentially dead code to the set of dead code,
1140   // and decrement its ref count.
1141   size_t num_freed = 0;
1142   DeadCodeMap dead_code;
1143   for (WasmCode* code : current_gc_info_->dead_code) {
1144     DCHECK_EQ(1, native_modules_.count(code->native_module()));
1145     auto* native_module_info = native_modules_[code->native_module()].get();
1146     DCHECK_EQ(1, native_module_info->potentially_dead_code.count(code));
1147     native_module_info->potentially_dead_code.erase(code);
1148     DCHECK_EQ(0, native_module_info->dead_code.count(code));
1149     native_module_info->dead_code.insert(code);
1150     if (code->DecRefOnDeadCode()) {
1151       dead_code[code->native_module()].push_back(code);
1152       ++num_freed;
1153     }
1154   }
1155 
1156   FreeDeadCodeLocked(dead_code);
1157 
1158   TRACE_CODE_GC("Found %zu dead code objects, freed %zu.\n",
1159                 current_gc_info_->dead_code.size(), num_freed);
1160   USE(num_freed);
1161 
1162   int8_t next_gc_sequence_index = current_gc_info_->next_gc_sequence_index;
1163   current_gc_info_.reset();
1164   if (next_gc_sequence_index != 0) TriggerGC(next_gc_sequence_index);
1165 }
1166 
1167 namespace {
1168 
1169 DEFINE_LAZY_LEAKY_OBJECT_GETTER(std::shared_ptr<WasmEngine>,
1170                                 GetSharedWasmEngine)
1171 
1172 }  // namespace
1173 
1174 // static
InitializeOncePerProcess()1175 void WasmEngine::InitializeOncePerProcess() {
1176   *GetSharedWasmEngine() = std::make_shared<WasmEngine>();
1177 }
1178 
1179 // static
GlobalTearDown()1180 void WasmEngine::GlobalTearDown() {
1181   GetSharedWasmEngine()->reset();
1182 }
1183 
1184 // static
GetWasmEngine()1185 std::shared_ptr<WasmEngine> WasmEngine::GetWasmEngine() {
1186   return *GetSharedWasmEngine();
1187 }
1188 
1189 // {max_initial_mem_pages} is declared in wasm-limits.h.
max_initial_mem_pages()1190 uint32_t max_initial_mem_pages() {
1191   STATIC_ASSERT(kV8MaxWasmMemoryPages <= kMaxUInt32);
1192   return std::min(uint32_t{kV8MaxWasmMemoryPages}, FLAG_wasm_max_mem_pages);
1193 }
1194 
max_maximum_mem_pages()1195 uint32_t max_maximum_mem_pages() {
1196   STATIC_ASSERT(kV8MaxWasmMemoryPages <= kMaxUInt32);
1197   return std::min(uint32_t{kV8MaxWasmMemoryPages},
1198                   FLAG_wasm_max_mem_pages_growth);
1199 }
1200 
1201 // {max_table_init_entries} is declared in wasm-limits.h.
max_table_init_entries()1202 uint32_t max_table_init_entries() {
1203   return std::min(uint32_t{kV8MaxWasmTableInitEntries},
1204                   FLAG_wasm_max_table_size);
1205 }
1206 
1207 #undef TRACE_CODE_GC
1208 
1209 }  // namespace wasm
1210 }  // namespace internal
1211 }  // namespace v8
1212