1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/wasm/wasm-code-manager.h"
6
7 #include <iomanip>
8
9 #include "src/assembler-inl.h"
10 #include "src/base/atomic-utils.h"
11 #include "src/base/macros.h"
12 #include "src/base/platform/platform.h"
13 #include "src/code-stubs.h"
14 #include "src/codegen.h"
15 #include "src/disassembler.h"
16 #include "src/globals.h"
17 #include "src/macro-assembler-inl.h"
18 #include "src/macro-assembler.h"
19 #include "src/objects-inl.h"
20 #include "src/wasm/function-compiler.h"
21 #include "src/wasm/wasm-module.h"
22 #include "src/wasm/wasm-objects-inl.h"
23 #include "src/wasm/wasm-objects.h"
24
25 #define TRACE_HEAP(...) \
26 do { \
27 if (FLAG_wasm_trace_native_heap) PrintF(__VA_ARGS__); \
28 } while (false)
29
30 namespace v8 {
31 namespace internal {
32 namespace wasm {
33
34 namespace {
35
36 // Binary predicate to perform lookups in {NativeModule::owned_code_} with a
37 // given address into a code object. Use with {std::upper_bound} for example.
38 struct WasmCodeUniquePtrComparator {
operator ()v8::internal::wasm::__anon44d540d20111::WasmCodeUniquePtrComparator39 bool operator()(Address pc, const std::unique_ptr<WasmCode>& code) const {
40 DCHECK_NE(kNullAddress, pc);
41 DCHECK_NOT_NULL(code);
42 return pc < code->instruction_start();
43 }
44 };
45
46 #if V8_TARGET_ARCH_X64
47 #define __ masm->
48 constexpr bool kModuleCanAllocateMoreMemory = false;
49
GenerateJumpTrampoline(MacroAssembler * masm,Address target)50 void GenerateJumpTrampoline(MacroAssembler* masm, Address target) {
51 __ movq(kScratchRegister, static_cast<uint64_t>(target));
52 __ jmp(kScratchRegister);
53 }
54 #undef __
55 #elif V8_TARGET_ARCH_S390X
56 #define __ masm->
57 constexpr bool kModuleCanAllocateMoreMemory = false;
58
GenerateJumpTrampoline(MacroAssembler * masm,Address target)59 void GenerateJumpTrampoline(MacroAssembler* masm, Address target) {
60 __ mov(ip, Operand(bit_cast<intptr_t, Address>(target)));
61 __ b(ip);
62 }
63 #undef __
64 #elif V8_TARGET_ARCH_ARM64
65 #define __ masm->
66 constexpr bool kModuleCanAllocateMoreMemory = false;
67
GenerateJumpTrampoline(MacroAssembler * masm,Address target)68 void GenerateJumpTrampoline(MacroAssembler* masm, Address target) {
69 UseScratchRegisterScope temps(masm);
70 Register scratch = temps.AcquireX();
71 __ Mov(scratch, reinterpret_cast<uint64_t>(target));
72 __ Br(scratch);
73 }
74 #undef __
75 #else
76 const bool kModuleCanAllocateMoreMemory = true;
77 #endif
78
RelocateCode(WasmCode * code,const WasmCode * orig,WasmCode::FlushICache flush_icache)79 void RelocateCode(WasmCode* code, const WasmCode* orig,
80 WasmCode::FlushICache flush_icache) {
81 intptr_t delta = code->instruction_start() - orig->instruction_start();
82 for (RelocIterator it(code->instructions(), code->reloc_info(),
83 code->constant_pool(), RelocInfo::kApplyMask);
84 !it.done(); it.next()) {
85 it.rinfo()->apply(delta);
86 }
87 if (flush_icache) {
88 Assembler::FlushICache(code->instructions().start(),
89 code->instructions().size());
90 }
91 }
92
93 } // namespace
94
DisjointAllocationPool(Address start,Address end)95 DisjointAllocationPool::DisjointAllocationPool(Address start, Address end) {
96 ranges_.push_back({start, end});
97 }
98
Merge(DisjointAllocationPool && other)99 void DisjointAllocationPool::Merge(DisjointAllocationPool&& other) {
100 auto dest_it = ranges_.begin();
101 auto dest_end = ranges_.end();
102
103 for (auto src_it = other.ranges_.begin(), src_end = other.ranges_.end();
104 src_it != src_end;) {
105 if (dest_it == dest_end) {
106 // everything else coming from src will be inserted
107 // at the back of ranges_ from now on.
108 ranges_.push_back(*src_it);
109 ++src_it;
110 continue;
111 }
112 // Before or adjacent to dest. Insert or merge, and advance
113 // just src.
114 if (dest_it->first >= src_it->second) {
115 if (dest_it->first == src_it->second) {
116 dest_it->first = src_it->first;
117 } else {
118 ranges_.insert(dest_it, {src_it->first, src_it->second});
119 }
120 ++src_it;
121 continue;
122 }
123 // Src is strictly after dest. Skip over this dest.
124 if (dest_it->second < src_it->first) {
125 ++dest_it;
126 continue;
127 }
128 // Src is adjacent from above. Merge and advance
129 // just src, because the next src, if any, is bound to be
130 // strictly above the newly-formed range.
131 DCHECK_EQ(dest_it->second, src_it->first);
132 dest_it->second = src_it->second;
133 ++src_it;
134 // Now that we merged, maybe this new range is adjacent to
135 // the next. Since we assume src to have come from the
136 // same original memory pool, it follows that the next src
137 // must be above or adjacent to the new bubble.
138 auto next_dest = dest_it;
139 ++next_dest;
140 if (next_dest != dest_end && dest_it->second == next_dest->first) {
141 dest_it->second = next_dest->second;
142 ranges_.erase(next_dest);
143 }
144
145 // src_it points now at the next, if any, src
146 DCHECK_IMPLIES(src_it != src_end, src_it->first >= dest_it->second);
147 }
148 }
149
Extract(size_t size,ExtractionMode mode)150 DisjointAllocationPool DisjointAllocationPool::Extract(size_t size,
151 ExtractionMode mode) {
152 DisjointAllocationPool ret;
153 for (auto it = ranges_.begin(), end = ranges_.end(); it != end;) {
154 auto current = it;
155 ++it;
156 DCHECK_LT(current->first, current->second);
157 size_t current_size = static_cast<size_t>(current->second - current->first);
158 if (size == current_size) {
159 ret.ranges_.push_back(*current);
160 ranges_.erase(current);
161 return ret;
162 }
163 if (size < current_size) {
164 ret.ranges_.push_back({current->first, current->first + size});
165 current->first += size;
166 DCHECK(current->first < current->second);
167 return ret;
168 }
169 if (mode != kContiguous) {
170 size -= current_size;
171 ret.ranges_.push_back(*current);
172 ranges_.erase(current);
173 }
174 }
175 if (size > 0) {
176 Merge(std::move(ret));
177 return {};
178 }
179 return ret;
180 }
181
constant_pool() const182 Address WasmCode::constant_pool() const {
183 if (FLAG_enable_embedded_constant_pool) {
184 if (constant_pool_offset_ < instructions().size()) {
185 return instruction_start() + constant_pool_offset_;
186 }
187 }
188 return kNullAddress;
189 }
190
trap_handler_index() const191 size_t WasmCode::trap_handler_index() const {
192 CHECK(HasTrapHandlerIndex());
193 return static_cast<size_t>(trap_handler_index_);
194 }
195
set_trap_handler_index(size_t value)196 void WasmCode::set_trap_handler_index(size_t value) {
197 trap_handler_index_ = value;
198 }
199
RegisterTrapHandlerData()200 void WasmCode::RegisterTrapHandlerData() {
201 if (kind() != wasm::WasmCode::kFunction) return;
202 if (HasTrapHandlerIndex()) return;
203
204 Address base = instruction_start();
205
206 size_t size = instructions().size();
207 const int index =
208 RegisterHandlerData(base, size, protected_instructions().size(),
209 protected_instructions().data());
210
211 // TODO(eholk): if index is negative, fail.
212 CHECK_LE(0, index);
213 set_trap_handler_index(static_cast<size_t>(index));
214 }
215
HasTrapHandlerIndex() const216 bool WasmCode::HasTrapHandlerIndex() const { return trap_handler_index_ >= 0; }
217
ResetTrapHandlerIndex()218 void WasmCode::ResetTrapHandlerIndex() { trap_handler_index_ = -1; }
219
ShouldBeLogged(Isolate * isolate)220 bool WasmCode::ShouldBeLogged(Isolate* isolate) {
221 return isolate->logger()->is_listening_to_code_events() ||
222 isolate->is_profiling() || FLAG_print_wasm_code || FLAG_print_code;
223 }
224
LogCode(Isolate * isolate) const225 void WasmCode::LogCode(Isolate* isolate) const {
226 DCHECK(ShouldBeLogged(isolate));
227 if (native_module()->shared_module_data() && index_.IsJust()) {
228 uint32_t index = this->index();
229 Handle<WasmSharedModuleData> shared_handle(
230 native_module()->shared_module_data(), isolate);
231 int name_length;
232 Handle<String> name(
233 WasmSharedModuleData::GetFunctionName(isolate, shared_handle, index));
234 auto cname =
235 name->ToCString(AllowNullsFlag::DISALLOW_NULLS,
236 RobustnessFlag::ROBUST_STRING_TRAVERSAL, &name_length);
237 PROFILE(isolate,
238 CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this,
239 {cname.get(), static_cast<size_t>(name_length)}));
240
241 #ifdef ENABLE_DISASSEMBLER
242 if (FLAG_print_code || FLAG_print_wasm_code) {
243 // TODO(wasm): Use proper log files, here and elsewhere.
244 OFStream os(stdout);
245 os << "--- Wasm " << (is_liftoff() ? "liftoff" : "turbofan")
246 << " code ---\n";
247 this->Disassemble(cname.get(), isolate, os);
248 os << "--- End code ---\n";
249 }
250 #endif
251
252 if (!source_positions().is_empty()) {
253 LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(instruction_start(),
254 source_positions()));
255 }
256 }
257 }
258
Print(Isolate * isolate) const259 void WasmCode::Print(Isolate* isolate) const {
260 OFStream os(stdout);
261 Disassemble(nullptr, isolate, os);
262 }
263
Disassemble(const char * name,Isolate * isolate,std::ostream & os,Address current_pc) const264 void WasmCode::Disassemble(const char* name, Isolate* isolate, std::ostream& os,
265 Address current_pc) const {
266 if (name) os << "name: " << name << "\n";
267 if (index_.IsJust()) os << "index: " << index_.FromJust() << "\n";
268 os << "kind: " << GetWasmCodeKindAsString(kind_) << "\n";
269 os << "compiler: " << (is_liftoff() ? "Liftoff" : "TurboFan") << "\n";
270 size_t body_size = instructions().size();
271 os << "Body (size = " << body_size << ")\n";
272
273 #ifdef ENABLE_DISASSEMBLER
274
275 size_t instruction_size = body_size;
276 if (constant_pool_offset_ && constant_pool_offset_ < instruction_size) {
277 instruction_size = constant_pool_offset_;
278 }
279 if (safepoint_table_offset_ && safepoint_table_offset_ < instruction_size) {
280 instruction_size = safepoint_table_offset_;
281 }
282 DCHECK_LT(0, instruction_size);
283 os << "Instructions (size = " << instruction_size << ")\n";
284 // TODO(mtrofin): rework the dependency on isolate and code in
285 // Disassembler::Decode.
286 Disassembler::Decode(isolate, &os, instructions().start(),
287 instructions().start() + instruction_size,
288 CodeReference(this), current_pc);
289 os << "\n";
290
291 if (!source_positions().is_empty()) {
292 os << "Source positions:\n pc offset position\n";
293 for (SourcePositionTableIterator it(source_positions()); !it.done();
294 it.Advance()) {
295 os << std::setw(10) << std::hex << it.code_offset() << std::dec
296 << std::setw(10) << it.source_position().ScriptOffset()
297 << (it.is_statement() ? " statement" : "") << "\n";
298 }
299 os << "\n";
300 }
301
302 os << "RelocInfo (size = " << reloc_size_ << ")\n";
303 for (RelocIterator it(instructions(), reloc_info(), constant_pool());
304 !it.done(); it.next()) {
305 it.rinfo()->Print(isolate, os);
306 }
307 os << "\n";
308 #endif // ENABLE_DISASSEMBLER
309 }
310
GetWasmCodeKindAsString(WasmCode::Kind kind)311 const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
312 switch (kind) {
313 case WasmCode::kFunction:
314 return "wasm function";
315 case WasmCode::kWasmToJsWrapper:
316 return "wasm-to-js";
317 case WasmCode::kLazyStub:
318 return "lazy-compile";
319 case WasmCode::kInterpreterEntry:
320 return "interpreter entry";
321 case WasmCode::kTrampoline:
322 return "trampoline";
323 }
324 return "unknown kind";
325 }
326
~WasmCode()327 WasmCode::~WasmCode() {
328 // Depending on finalizer order, the WasmCompiledModule finalizer may be
329 // called first, case in which we release here. If the InstanceFinalizer is
330 // called first, the handlers will be cleared in Reset, as-if the NativeModule
331 // may be later used again (which would be the case if the WasmCompiledModule
332 // were still held by a WasmModuleObject)
333 if (HasTrapHandlerIndex()) {
334 CHECK_LT(trap_handler_index(),
335 static_cast<size_t>(std::numeric_limits<int>::max()));
336 trap_handler::ReleaseHandlerData(static_cast<int>(trap_handler_index()));
337 }
338 }
339
340 base::AtomicNumber<size_t> NativeModule::next_id_;
341
NativeModule(uint32_t num_functions,uint32_t num_imports,bool can_request_more,VirtualMemory * code_space,WasmCodeManager * code_manager,ModuleEnv & env)342 NativeModule::NativeModule(uint32_t num_functions, uint32_t num_imports,
343 bool can_request_more, VirtualMemory* code_space,
344 WasmCodeManager* code_manager, ModuleEnv& env)
345 : instance_id(next_id_.Increment(1)),
346 code_table_(num_functions),
347 num_imported_functions_(num_imports),
348 compilation_state_(NewCompilationState(
349 reinterpret_cast<Isolate*>(code_manager->isolate_), env)),
350 free_code_space_(code_space->address(), code_space->end()),
351 wasm_code_manager_(code_manager),
352 can_request_more_memory_(can_request_more),
353 use_trap_handler_(env.use_trap_handler) {
354 VirtualMemory my_mem;
355 owned_code_space_.push_back(my_mem);
356 owned_code_space_.back().TakeControl(code_space);
357 owned_code_.reserve(num_functions);
358 }
359
ResizeCodeTableForTesting(size_t num_functions,size_t max_functions)360 void NativeModule::ResizeCodeTableForTesting(size_t num_functions,
361 size_t max_functions) {
362 DCHECK_LE(num_functions, max_functions);
363 if (num_imported_functions_ == num_functions) {
364 // For some tests, the code table might have been initialized to store
365 // a number of imported functions on creation. If that is the case,
366 // we need to retroactively reserve the space.
367 DCHECK_EQ(code_table_.capacity(), num_imported_functions_);
368 DCHECK_EQ(code_table_.size(), num_imported_functions_);
369 DCHECK_EQ(num_functions, 1);
370 code_table_.reserve(max_functions);
371 } else {
372 DCHECK_GT(num_functions, function_count());
373 if (code_table_.capacity() == 0) {
374 code_table_.reserve(max_functions);
375 }
376 DCHECK_EQ(code_table_.capacity(), max_functions);
377 code_table_.resize(num_functions);
378 }
379 }
380
AddOwnedCode(Vector<const byte> orig_instructions,std::unique_ptr<const byte[]> reloc_info,size_t reloc_size,std::unique_ptr<const byte[]> source_pos,size_t source_pos_size,Maybe<uint32_t> index,WasmCode::Kind kind,size_t constant_pool_offset,uint32_t stack_slots,size_t safepoint_table_offset,size_t handler_table_offset,std::unique_ptr<ProtectedInstructions> protected_instructions,WasmCode::Tier tier,WasmCode::FlushICache flush_icache)381 WasmCode* NativeModule::AddOwnedCode(
382 Vector<const byte> orig_instructions,
383 std::unique_ptr<const byte[]> reloc_info, size_t reloc_size,
384 std::unique_ptr<const byte[]> source_pos, size_t source_pos_size,
385 Maybe<uint32_t> index, WasmCode::Kind kind, size_t constant_pool_offset,
386 uint32_t stack_slots, size_t safepoint_table_offset,
387 size_t handler_table_offset,
388 std::unique_ptr<ProtectedInstructions> protected_instructions,
389 WasmCode::Tier tier, WasmCode::FlushICache flush_icache) {
390 // both allocation and insertion in owned_code_ happen in the same critical
391 // section, thus ensuring owned_code_'s elements are rarely if ever moved.
392 base::LockGuard<base::Mutex> lock(&allocation_mutex_);
393 Address executable_buffer = AllocateForCode(orig_instructions.size());
394 if (executable_buffer == kNullAddress) {
395 V8::FatalProcessOutOfMemory(nullptr, "NativeModule::AddOwnedCode");
396 UNREACHABLE();
397 }
398 memcpy(reinterpret_cast<void*>(executable_buffer), orig_instructions.start(),
399 orig_instructions.size());
400 std::unique_ptr<WasmCode> code(new WasmCode(
401 {reinterpret_cast<byte*>(executable_buffer), orig_instructions.size()},
402 std::move(reloc_info), reloc_size, std::move(source_pos), source_pos_size,
403 this, index, kind, constant_pool_offset, stack_slots,
404 safepoint_table_offset, handler_table_offset,
405 std::move(protected_instructions), tier));
406 WasmCode* ret = code.get();
407
408 // TODO(mtrofin): We allocate in increasing address order, and
409 // even if we end up with segmented memory, we may end up only with a few
410 // large moves - if, for example, a new segment is below the current ones.
411 auto insert_before =
412 std::upper_bound(owned_code_.begin(), owned_code_.end(),
413 ret->instruction_start(), WasmCodeUniquePtrComparator());
414 owned_code_.insert(insert_before, std::move(code));
415 if (flush_icache) {
416 Assembler::FlushICache(ret->instructions().start(),
417 ret->instructions().size());
418 }
419 return ret;
420 }
421
AddCodeCopy(Handle<Code> code,WasmCode::Kind kind,uint32_t index)422 WasmCode* NativeModule::AddCodeCopy(Handle<Code> code, WasmCode::Kind kind,
423 uint32_t index) {
424 WasmCode* ret = AddAnonymousCode(code, kind);
425 code_table_[index] = ret;
426 ret->index_ = Just(index);
427 return ret;
428 }
429
AddInterpreterEntry(Handle<Code> code,uint32_t index)430 WasmCode* NativeModule::AddInterpreterEntry(Handle<Code> code, uint32_t index) {
431 WasmCode* ret = AddAnonymousCode(code, WasmCode::kInterpreterEntry);
432 ret->index_ = Just(index);
433 return ret;
434 }
435
SetLazyBuiltin(Handle<Code> code)436 void NativeModule::SetLazyBuiltin(Handle<Code> code) {
437 WasmCode* lazy_builtin = AddAnonymousCode(code, WasmCode::kLazyStub);
438 for (uint32_t i = num_imported_functions(), e = function_count(); i < e;
439 ++i) {
440 code_table_[i] = lazy_builtin;
441 }
442 }
443
shared_module_data() const444 WasmSharedModuleData* NativeModule::shared_module_data() const {
445 DCHECK_NOT_NULL(shared_module_data_);
446 return *shared_module_data_;
447 }
448
SetSharedModuleData(Handle<WasmSharedModuleData> shared)449 void NativeModule::SetSharedModuleData(Handle<WasmSharedModuleData> shared) {
450 DCHECK_NULL(shared_module_data_);
451 shared_module_data_ =
452 shared->GetIsolate()->global_handles()->Create(*shared).location();
453 GlobalHandles::MakeWeak(reinterpret_cast<Object***>(&shared_module_data_));
454 }
455
AddAnonymousCode(Handle<Code> code,WasmCode::Kind kind)456 WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
457 WasmCode::Kind kind) {
458 std::unique_ptr<byte[]> reloc_info;
459 if (code->relocation_size() > 0) {
460 reloc_info.reset(new byte[code->relocation_size()]);
461 memcpy(reloc_info.get(), code->relocation_start(), code->relocation_size());
462 }
463 std::unique_ptr<byte[]> source_pos;
464 Handle<ByteArray> source_pos_table(code->SourcePositionTable());
465 if (source_pos_table->length() > 0) {
466 source_pos.reset(new byte[source_pos_table->length()]);
467 source_pos_table->copy_out(0, source_pos.get(), source_pos_table->length());
468 }
469 std::unique_ptr<ProtectedInstructions> protected_instructions(
470 new ProtectedInstructions(0));
471 Vector<const byte> orig_instructions(
472 reinterpret_cast<byte*>(code->InstructionStart()),
473 static_cast<size_t>(code->InstructionSize()));
474 int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
475 int safepoint_table_offset =
476 code->has_safepoint_info() ? code->safepoint_table_offset() : 0;
477 WasmCode* ret =
478 AddOwnedCode(orig_instructions, // instructions
479 std::move(reloc_info), // reloc_info
480 static_cast<size_t>(code->relocation_size()), // reloc_size
481 std::move(source_pos), // source positions
482 static_cast<size_t>(source_pos_table->length()),
483 Nothing<uint32_t>(), // index
484 kind, // kind
485 code->constant_pool_offset(), // constant_pool_offset
486 stack_slots, // stack_slots
487 safepoint_table_offset, // safepoint_table_offset
488 code->handler_table_offset(), // handler_table_offset
489 std::move(protected_instructions), // protected_instructions
490 WasmCode::kOther, // kind
491 WasmCode::kNoFlushICache); // flush_icache
492 intptr_t delta = ret->instruction_start() - code->InstructionStart();
493 int mask = RelocInfo::kApplyMask | RelocInfo::kCodeTargetMask |
494 RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
495
496 RelocIterator orig_it(*code, mask);
497 for (RelocIterator it(ret->instructions(), ret->reloc_info(),
498 ret->constant_pool(), mask);
499 !it.done(); it.next(), orig_it.next()) {
500 if (RelocInfo::IsCodeTarget(it.rinfo()->rmode())) {
501 Code* call_target =
502 Code::GetCodeFromTargetAddress(orig_it.rinfo()->target_address());
503 it.rinfo()->set_target_address(GetLocalAddressFor(handle(call_target)),
504 SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
505 } else {
506 if (RelocInfo::IsEmbeddedObject(it.rinfo()->rmode())) {
507 DCHECK(Heap::IsImmovable(it.rinfo()->target_object()));
508 } else {
509 it.rinfo()->apply(delta);
510 }
511 }
512 }
513 // Flush the i-cache here instead of in AddOwnedCode, to include the changes
514 // made while iterating over the RelocInfo above.
515 Assembler::FlushICache(ret->instructions().start(),
516 ret->instructions().size());
517 if (FLAG_print_wasm_code) {
518 // TODO(mstarzinger): don't need the isolate here.
519 ret->Print(code->GetIsolate());
520 }
521 return ret;
522 }
523
AddCode(const CodeDesc & desc,uint32_t frame_slots,uint32_t index,size_t safepoint_table_offset,size_t handler_table_offset,std::unique_ptr<ProtectedInstructions> protected_instructions,Handle<ByteArray> source_pos_table,WasmCode::Tier tier)524 WasmCode* NativeModule::AddCode(
525 const CodeDesc& desc, uint32_t frame_slots, uint32_t index,
526 size_t safepoint_table_offset, size_t handler_table_offset,
527 std::unique_ptr<ProtectedInstructions> protected_instructions,
528 Handle<ByteArray> source_pos_table, WasmCode::Tier tier) {
529 std::unique_ptr<byte[]> reloc_info;
530 if (desc.reloc_size) {
531 reloc_info.reset(new byte[desc.reloc_size]);
532 memcpy(reloc_info.get(), desc.buffer + desc.buffer_size - desc.reloc_size,
533 desc.reloc_size);
534 }
535 std::unique_ptr<byte[]> source_pos;
536 if (source_pos_table->length() > 0) {
537 source_pos.reset(new byte[source_pos_table->length()]);
538 source_pos_table->copy_out(0, source_pos.get(), source_pos_table->length());
539 }
540 TurboAssembler* origin = reinterpret_cast<TurboAssembler*>(desc.origin);
541 WasmCode* ret = AddOwnedCode(
542 {desc.buffer, static_cast<size_t>(desc.instr_size)},
543 std::move(reloc_info), static_cast<size_t>(desc.reloc_size),
544 std::move(source_pos), static_cast<size_t>(source_pos_table->length()),
545 Just(index), WasmCode::kFunction,
546 desc.instr_size - desc.constant_pool_size, frame_slots,
547 safepoint_table_offset, handler_table_offset,
548 std::move(protected_instructions), tier, WasmCode::kNoFlushICache);
549
550 code_table_[index] = ret;
551 // TODO(mtrofin): this is a copy and paste from Code::CopyFrom.
552 int mode_mask = RelocInfo::kCodeTargetMask |
553 RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
554 RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
555 RelocInfo::kApplyMask;
556 // Needed to find target_object and runtime_entry on X64
557
558 AllowDeferredHandleDereference embedding_raw_address;
559 for (RelocIterator it(ret->instructions(), ret->reloc_info(),
560 ret->constant_pool(), mode_mask);
561 !it.done(); it.next()) {
562 RelocInfo::Mode mode = it.rinfo()->rmode();
563 if (mode == RelocInfo::EMBEDDED_OBJECT) {
564 Handle<HeapObject> p = it.rinfo()->target_object_handle(origin);
565 DCHECK(p->IsUndefined(p->GetIsolate()) || p->IsNull(p->GetIsolate()));
566 it.rinfo()->set_target_object(*p, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
567 } else if (RelocInfo::IsCodeTarget(mode)) {
568 // rewrite code handles to direct pointers to the first instruction in the
569 // code object
570 Handle<Object> p = it.rinfo()->target_object_handle(origin);
571 Code* code = Code::cast(*p);
572 it.rinfo()->set_target_address(GetLocalAddressFor(handle(code)),
573 SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
574 } else if (RelocInfo::IsRuntimeEntry(mode)) {
575 Address p = it.rinfo()->target_runtime_entry(origin);
576 it.rinfo()->set_target_runtime_entry(p, SKIP_WRITE_BARRIER,
577 SKIP_ICACHE_FLUSH);
578 } else {
579 intptr_t delta = ret->instructions().start() - desc.buffer;
580 it.rinfo()->apply(delta);
581 }
582 }
583 // Flush the i-cache here instead of in AddOwnedCode, to include the changes
584 // made while iterating over the RelocInfo above.
585 Assembler::FlushICache(ret->instructions().start(),
586 ret->instructions().size());
587 return ret;
588 }
589
590 #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64
CreateTrampolineTo(Handle<Code> code)591 Address NativeModule::CreateTrampolineTo(Handle<Code> code) {
592 MacroAssembler masm(code->GetIsolate(), nullptr, 0, CodeObjectRequired::kNo);
593 Address dest = code->raw_instruction_start();
594 GenerateJumpTrampoline(&masm, dest);
595 CodeDesc code_desc;
596 masm.GetCode(nullptr, &code_desc);
597 Vector<const byte> instructions(code_desc.buffer,
598 static_cast<size_t>(code_desc.instr_size));
599 WasmCode* wasm_code = AddOwnedCode(instructions, // instructions
600 nullptr, // reloc_info
601 0, // reloc_size
602 nullptr, // source_pos
603 0, // source_pos_size
604 Nothing<uint32_t>(), // index
605 WasmCode::kTrampoline, // kind
606 0, // constant_pool_offset
607 0, // stack_slots
608 0, // safepoint_table_offset
609 0, // handler_table_offset
610 {}, // protected_instructions
611 WasmCode::kOther, // tier
612 WasmCode::kFlushICache); // flush_icache
613 Address ret = wasm_code->instruction_start();
614 trampolines_.emplace(std::make_pair(dest, ret));
615 return ret;
616 }
617 #else
CreateTrampolineTo(Handle<Code> code)618 Address NativeModule::CreateTrampolineTo(Handle<Code> code) {
619 Address ret = code->raw_instruction_start();
620 trampolines_.insert(std::make_pair(ret, ret));
621 return ret;
622 }
623 #endif
624
GetLocalAddressFor(Handle<Code> code)625 Address NativeModule::GetLocalAddressFor(Handle<Code> code) {
626 DCHECK(Heap::IsImmovable(*code));
627
628 Address index = code->raw_instruction_start();
629 auto trampoline_iter = trampolines_.find(index);
630 if (trampoline_iter == trampolines_.end()) {
631 return CreateTrampolineTo(code);
632 } else {
633 return trampoline_iter->second;
634 }
635 }
636
AllocateForCode(size_t size)637 Address NativeModule::AllocateForCode(size_t size) {
638 // this happens under a lock assumed by the caller.
639 size = RoundUp(size, kCodeAlignment);
640 DisjointAllocationPool mem = free_code_space_.Allocate(size);
641 if (mem.IsEmpty()) {
642 if (!can_request_more_memory_) return kNullAddress;
643
644 Address hint = owned_code_space_.empty() ? kNullAddress
645 : owned_code_space_.back().end();
646 VirtualMemory empty_mem;
647 owned_code_space_.push_back(empty_mem);
648 VirtualMemory& new_mem = owned_code_space_.back();
649 wasm_code_manager_->TryAllocate(size, &new_mem,
650 reinterpret_cast<void*>(hint));
651 if (!new_mem.IsReserved()) return kNullAddress;
652 DisjointAllocationPool mem_pool(new_mem.address(), new_mem.end());
653 wasm_code_manager_->AssignRanges(new_mem.address(), new_mem.end(), this);
654
655 free_code_space_.Merge(std::move(mem_pool));
656 mem = free_code_space_.Allocate(size);
657 if (mem.IsEmpty()) return kNullAddress;
658 }
659 Address ret = mem.ranges().front().first;
660 Address end = ret + size;
661 Address commit_start = RoundUp(ret, AllocatePageSize());
662 Address commit_end = RoundUp(end, AllocatePageSize());
663 // {commit_start} will be either ret or the start of the next page.
664 // {commit_end} will be the start of the page after the one in which
665 // the allocation ends.
666 // We start from an aligned start, and we know we allocated vmem in
667 // page multiples.
668 // We just need to commit what's not committed. The page in which we
669 // start is already committed (or we start at the beginning of a page).
670 // The end needs to be committed all through the end of the page.
671 if (commit_start < commit_end) {
672 #if V8_OS_WIN
673 // On Windows, we cannot commit a range that straddles different
674 // reservations of virtual memory. Because we bump-allocate, and because, if
675 // we need more memory, we append that memory at the end of the
676 // owned_code_space_ list, we traverse that list in reverse order to find
677 // the reservation(s) that guide how to chunk the region to commit.
678 for (auto it = owned_code_space_.crbegin(),
679 rend = owned_code_space_.crend();
680 it != rend && commit_start < commit_end; ++it) {
681 if (commit_end > it->end() || it->address() >= commit_end) continue;
682 Address start = std::max(commit_start, it->address());
683 size_t commit_size = static_cast<size_t>(commit_end - start);
684 DCHECK(IsAligned(commit_size, AllocatePageSize()));
685 if (!wasm_code_manager_->Commit(start, commit_size)) {
686 return kNullAddress;
687 }
688 committed_code_space_ += commit_size;
689 commit_end = start;
690 }
691 #else
692 size_t commit_size = static_cast<size_t>(commit_end - commit_start);
693 DCHECK(IsAligned(commit_size, AllocatePageSize()));
694 if (!wasm_code_manager_->Commit(commit_start, commit_size)) {
695 return kNullAddress;
696 }
697 committed_code_space_ += commit_size;
698 #endif
699 }
700 DCHECK(IsAligned(ret, kCodeAlignment));
701 allocated_code_space_.Merge(std::move(mem));
702 TRACE_HEAP("ID: %zu. Code alloc: %p,+%zu\n", instance_id,
703 reinterpret_cast<void*>(ret), size);
704 return ret;
705 }
706
Lookup(Address pc)707 WasmCode* NativeModule::Lookup(Address pc) {
708 if (owned_code_.empty()) return nullptr;
709 auto iter = std::upper_bound(owned_code_.begin(), owned_code_.end(), pc,
710 WasmCodeUniquePtrComparator());
711 if (iter == owned_code_.begin()) return nullptr;
712 --iter;
713 WasmCode* candidate = iter->get();
714 DCHECK_NOT_NULL(candidate);
715 return candidate->contains(pc) ? candidate : nullptr;
716 }
717
GetCallTargetForFunction(uint32_t func_index)718 Address NativeModule::GetCallTargetForFunction(uint32_t func_index) {
719 // TODO(clemensh): Introduce a jump table and return a slot of it here.
720 WasmCode* wasm_code = code(func_index);
721 if (!wasm_code) return kNullAddress;
722 if (wasm_code->kind() != WasmCode::kLazyStub) {
723 return wasm_code->instruction_start();
724 }
725
726 #if DEBUG
727 auto num_imported_functions =
728 shared_module_data()->module()->num_imported_functions;
729 if (func_index < num_imported_functions) {
730 DCHECK(!wasm_code->IsAnonymous());
731 }
732 #endif
733 if (!wasm_code->IsAnonymous()) {
734 // If the function wasn't imported, its index should match.
735 DCHECK_IMPLIES(func_index >= num_imported_functions,
736 func_index == wasm_code->index());
737 return wasm_code->instruction_start();
738 }
739 if (!lazy_compile_stubs_.get()) {
740 lazy_compile_stubs_ =
741 base::make_unique<std::vector<WasmCode*>>(function_count());
742 }
743 WasmCode* cloned_code = lazy_compile_stubs_.get()->at(func_index);
744 if (cloned_code == nullptr) {
745 cloned_code = CloneCode(wasm_code, WasmCode::kNoFlushICache);
746 RelocateCode(cloned_code, wasm_code, WasmCode::kFlushICache);
747 cloned_code->index_ = Just(func_index);
748 lazy_compile_stubs_.get()->at(func_index) = cloned_code;
749 }
750 DCHECK_EQ(func_index, cloned_code->index());
751 return cloned_code->instruction_start();
752 }
753
CloneCode(const WasmCode * original_code,WasmCode::FlushICache flush_icache)754 WasmCode* NativeModule::CloneCode(const WasmCode* original_code,
755 WasmCode::FlushICache flush_icache) {
756 std::unique_ptr<byte[]> reloc_info;
757 if (original_code->reloc_info().size() > 0) {
758 reloc_info.reset(new byte[original_code->reloc_info().size()]);
759 memcpy(reloc_info.get(), original_code->reloc_info().start(),
760 original_code->reloc_info().size());
761 }
762 std::unique_ptr<byte[]> source_pos;
763 if (original_code->source_positions().size() > 0) {
764 source_pos.reset(new byte[original_code->source_positions().size()]);
765 memcpy(source_pos.get(), original_code->source_positions().start(),
766 original_code->source_positions().size());
767 }
768 DCHECK_EQ(0, original_code->protected_instructions().size());
769 std::unique_ptr<ProtectedInstructions> protected_instructions(
770 new ProtectedInstructions(0));
771 WasmCode* ret = AddOwnedCode(
772 original_code->instructions(), std::move(reloc_info),
773 original_code->reloc_info().size(), std::move(source_pos),
774 original_code->source_positions().size(), original_code->index_,
775 original_code->kind(), original_code->constant_pool_offset_,
776 original_code->stack_slots(), original_code->safepoint_table_offset_,
777 original_code->handler_table_offset_, std::move(protected_instructions),
778 original_code->tier(), flush_icache);
779 if (!ret->IsAnonymous()) {
780 code_table_[ret->index()] = ret;
781 }
782 return ret;
783 }
784
UnpackAndRegisterProtectedInstructions()785 void NativeModule::UnpackAndRegisterProtectedInstructions() {
786 for (uint32_t i = num_imported_functions(), e = function_count(); i < e;
787 ++i) {
788 WasmCode* wasm_code = code(i);
789 if (wasm_code == nullptr) continue;
790 wasm_code->RegisterTrapHandlerData();
791 }
792 }
793
ReleaseProtectedInstructions()794 void NativeModule::ReleaseProtectedInstructions() {
795 for (uint32_t i = num_imported_functions(), e = function_count(); i < e;
796 ++i) {
797 WasmCode* wasm_code = code(i);
798 if (wasm_code->HasTrapHandlerIndex()) {
799 CHECK_LT(wasm_code->trap_handler_index(),
800 static_cast<size_t>(std::numeric_limits<int>::max()));
801 trap_handler::ReleaseHandlerData(
802 static_cast<int>(wasm_code->trap_handler_index()));
803 wasm_code->ResetTrapHandlerIndex();
804 }
805 }
806 }
807
~NativeModule()808 NativeModule::~NativeModule() {
809 TRACE_HEAP("Deleting native module: %p\n", reinterpret_cast<void*>(this));
810 // Clear the handle at the beginning of destructor to make it robust against
811 // potential GCs in the rest of the desctructor.
812 if (shared_module_data_ != nullptr) {
813 Isolate* isolate = shared_module_data()->GetIsolate();
814 isolate->global_handles()->Destroy(
815 reinterpret_cast<Object**>(shared_module_data_));
816 shared_module_data_ = nullptr;
817 }
818 wasm_code_manager_->FreeNativeModule(this);
819 }
820
WasmCodeManager(v8::Isolate * isolate,size_t max_committed)821 WasmCodeManager::WasmCodeManager(v8::Isolate* isolate, size_t max_committed)
822 : isolate_(isolate) {
823 DCHECK_LE(max_committed, kMaxWasmCodeMemory);
824 remaining_uncommitted_code_space_.store(max_committed);
825 }
826
Commit(Address start,size_t size)827 bool WasmCodeManager::Commit(Address start, size_t size) {
828 DCHECK(IsAligned(start, AllocatePageSize()));
829 DCHECK(IsAligned(size, AllocatePageSize()));
830 // Reserve the size. Use CAS loop to avoid underflow on
831 // {remaining_uncommitted_}. Temporary underflow would allow concurrent
832 // threads to over-commit.
833 while (true) {
834 size_t old_value = remaining_uncommitted_code_space_.load();
835 if (old_value < size) return false;
836 if (remaining_uncommitted_code_space_.compare_exchange_weak(
837 old_value, old_value - size)) {
838 break;
839 }
840 }
841 PageAllocator::Permission permission = FLAG_wasm_write_protect_code_memory
842 ? PageAllocator::kReadWrite
843 : PageAllocator::kReadWriteExecute;
844
845 bool ret = SetPermissions(start, size, permission);
846 TRACE_HEAP("Setting rw permissions for %p:%p\n",
847 reinterpret_cast<void*>(start),
848 reinterpret_cast<void*>(start + size));
849 if (!ret) {
850 // Highly unlikely.
851 remaining_uncommitted_code_space_.fetch_add(size);
852 return false;
853 }
854 // This API assumes main thread
855 isolate_->AdjustAmountOfExternalAllocatedMemory(size);
856 if (WouldGCHelp()) {
857 // This API does not assume main thread, and would schedule
858 // a GC if called from a different thread, instead of synchronously
859 // doing one.
860 isolate_->MemoryPressureNotification(MemoryPressureLevel::kCritical);
861 }
862 return ret;
863 }
864
WouldGCHelp() const865 bool WasmCodeManager::WouldGCHelp() const {
866 // If all we have is one module, or none, no GC would help.
867 // GC would help if there's some remaining native modules that
868 // would be collected.
869 if (active_ <= 1) return false;
870 // We have an expectation on the largest size a native function
871 // may have.
872 constexpr size_t kMaxNativeFunction = 32 * MB;
873 size_t remaining = remaining_uncommitted_code_space_.load();
874 return remaining < kMaxNativeFunction;
875 }
876
AssignRanges(Address start,Address end,NativeModule * native_module)877 void WasmCodeManager::AssignRanges(Address start, Address end,
878 NativeModule* native_module) {
879 lookup_map_.insert(std::make_pair(start, std::make_pair(end, native_module)));
880 }
881
TryAllocate(size_t size,VirtualMemory * ret,void * hint)882 void WasmCodeManager::TryAllocate(size_t size, VirtualMemory* ret, void* hint) {
883 DCHECK_GT(size, 0);
884 size = RoundUp(size, AllocatePageSize());
885 if (hint == nullptr) hint = GetRandomMmapAddr();
886
887 if (!AlignedAllocVirtualMemory(size, static_cast<size_t>(AllocatePageSize()),
888 hint, ret)) {
889 DCHECK(!ret->IsReserved());
890 }
891 TRACE_HEAP("VMem alloc: %p:%p (%zu)\n",
892 reinterpret_cast<void*>(ret->address()),
893 reinterpret_cast<void*>(ret->end()), ret->size());
894 }
895
GetAllocationChunk(const WasmModule & module)896 size_t WasmCodeManager::GetAllocationChunk(const WasmModule& module) {
897 // TODO(mtrofin): this should pick up its 'maximal code range size'
898 // from something embedder-provided
899 if (kRequiresCodeRange) return kMaxWasmCodeMemory;
900 DCHECK(kModuleCanAllocateMoreMemory);
901 size_t ret = AllocatePageSize();
902 // a ballpark guesstimate on native inflation factor.
903 constexpr size_t kMultiplier = 4;
904
905 for (auto& function : module.functions) {
906 ret += kMultiplier * function.code.length();
907 }
908 return ret;
909 }
910
NewNativeModule(const WasmModule & module,ModuleEnv & env)911 std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
912 const WasmModule& module, ModuleEnv& env) {
913 size_t code_size = GetAllocationChunk(module);
914 return NewNativeModule(
915 code_size, static_cast<uint32_t>(module.functions.size()),
916 module.num_imported_functions, kModuleCanAllocateMoreMemory, env);
917 }
918
NewNativeModule(size_t size_estimate,uint32_t num_functions,uint32_t num_imported_functions,bool can_request_more,ModuleEnv & env)919 std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
920 size_t size_estimate, uint32_t num_functions,
921 uint32_t num_imported_functions, bool can_request_more, ModuleEnv& env) {
922 VirtualMemory mem;
923 TryAllocate(size_estimate, &mem);
924 if (mem.IsReserved()) {
925 Address start = mem.address();
926 size_t size = mem.size();
927 Address end = mem.end();
928 std::unique_ptr<NativeModule> ret(
929 new NativeModule(num_functions, num_imported_functions,
930 can_request_more, &mem, this, env));
931 TRACE_HEAP("New Module: ID:%zu. Mem: %p,+%zu\n", ret->instance_id,
932 reinterpret_cast<void*>(start), size);
933 AssignRanges(start, end, ret.get());
934 ++active_;
935 return ret;
936 }
937
938 V8::FatalProcessOutOfMemory(reinterpret_cast<Isolate*>(isolate_),
939 "WasmCodeManager::NewNativeModule");
940 return nullptr;
941 }
942
SetExecutable(bool executable)943 bool NativeModule::SetExecutable(bool executable) {
944 if (is_executable_ == executable) return true;
945 TRACE_HEAP("Setting module %zu as executable: %d.\n", instance_id,
946 executable);
947 PageAllocator::Permission permission =
948 executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite;
949
950 if (FLAG_wasm_write_protect_code_memory) {
951 #if V8_OS_WIN
952 // On windows, we need to switch permissions per separate virtual memory
953 // reservation. This is really just a problem when the NativeModule is
954 // growable (meaning can_request_more_memory_). That's 32-bit in production,
955 // or unittests.
956 // For now, in that case, we commit at reserved memory granularity.
957 // Technically, that may be a waste, because we may reserve more than we
958 // use. On 32-bit though, the scarce resource is the address space -
959 // committed or not.
960 if (can_request_more_memory_) {
961 for (auto& vmem : owned_code_space_) {
962 if (!SetPermissions(vmem.address(), vmem.size(), permission)) {
963 return false;
964 }
965 TRACE_HEAP("Set %p:%p to executable:%d\n", vmem.address(), vmem.end(),
966 executable);
967 }
968 is_executable_ = executable;
969 return true;
970 }
971 #endif
972 for (auto& range : allocated_code_space_.ranges()) {
973 // allocated_code_space_ is fine-grained, so we need to
974 // page-align it.
975 size_t range_size = RoundUp(
976 static_cast<size_t>(range.second - range.first), AllocatePageSize());
977 if (!SetPermissions(range.first, range_size, permission)) {
978 return false;
979 }
980 TRACE_HEAP("Set %p:%p to executable:%d\n",
981 reinterpret_cast<void*>(range.first),
982 reinterpret_cast<void*>(range.second), executable);
983 }
984 }
985 is_executable_ = executable;
986 return true;
987 }
988
FreeNativeModule(NativeModule * native_module)989 void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
990 DCHECK_GE(active_, 1);
991 --active_;
992 TRACE_HEAP("Freeing %zu\n", native_module->instance_id);
993 for (auto& vmem : native_module->owned_code_space_) {
994 lookup_map_.erase(vmem.address());
995 Free(&vmem);
996 DCHECK(!vmem.IsReserved());
997 }
998 native_module->owned_code_space_.clear();
999
1000 size_t code_size = native_module->committed_code_space_;
1001 DCHECK(IsAligned(code_size, AllocatePageSize()));
1002
1003 if (module_code_size_mb_) {
1004 module_code_size_mb_->AddSample(static_cast<int>(code_size / MB));
1005 }
1006
1007 // No need to tell the GC anything if we're destroying the heap,
1008 // which we currently indicate by having the isolate_ as null
1009 if (isolate_ == nullptr) return;
1010 remaining_uncommitted_code_space_.fetch_add(code_size);
1011 isolate_->AdjustAmountOfExternalAllocatedMemory(
1012 -static_cast<int64_t>(code_size));
1013 }
1014
1015 // TODO(wasm): We can make this more efficient if needed. For
1016 // example, we can preface the first instruction with a pointer to
1017 // the WasmCode. In the meantime, we have a separate API so we can
1018 // easily identify those places where we know we have the first
1019 // instruction PC.
GetCodeFromStartAddress(Address pc) const1020 WasmCode* WasmCodeManager::GetCodeFromStartAddress(Address pc) const {
1021 WasmCode* code = LookupCode(pc);
1022 // This method can only be called for valid instruction start addresses.
1023 DCHECK_NOT_NULL(code);
1024 DCHECK_EQ(pc, code->instruction_start());
1025 return code;
1026 }
1027
LookupCode(Address pc) const1028 WasmCode* WasmCodeManager::LookupCode(Address pc) const {
1029 if (lookup_map_.empty()) return nullptr;
1030
1031 auto iter = lookup_map_.upper_bound(pc);
1032 if (iter == lookup_map_.begin()) return nullptr;
1033 --iter;
1034 Address range_start = iter->first;
1035 Address range_end = iter->second.first;
1036 NativeModule* candidate = iter->second.second;
1037
1038 DCHECK_NOT_NULL(candidate);
1039 if (range_start <= pc && pc < range_end) return candidate->Lookup(pc);
1040 return nullptr;
1041 }
1042
Free(VirtualMemory * mem)1043 void WasmCodeManager::Free(VirtualMemory* mem) {
1044 DCHECK(mem->IsReserved());
1045 void* start = reinterpret_cast<void*>(mem->address());
1046 void* end = reinterpret_cast<void*>(mem->end());
1047 size_t size = mem->size();
1048 mem->Free();
1049 TRACE_HEAP("VMem Release: %p:%p (%zu)\n", start, end, size);
1050 }
1051
remaining_uncommitted_code_space() const1052 size_t WasmCodeManager::remaining_uncommitted_code_space() const {
1053 return remaining_uncommitted_code_space_.load();
1054 }
1055
NativeModuleModificationScope(NativeModule * native_module)1056 NativeModuleModificationScope::NativeModuleModificationScope(
1057 NativeModule* native_module)
1058 : native_module_(native_module) {
1059 if (native_module_ && (native_module_->modification_scope_depth_++) == 0) {
1060 bool success = native_module_->SetExecutable(false);
1061 CHECK(success);
1062 }
1063 }
1064
~NativeModuleModificationScope()1065 NativeModuleModificationScope::~NativeModuleModificationScope() {
1066 if (native_module_ && (native_module_->modification_scope_depth_--) == 1) {
1067 bool success = native_module_->SetExecutable(true);
1068 CHECK(success);
1069 }
1070 }
1071
1072 } // namespace wasm
1073 } // namespace internal
1074 } // namespace v8
1075 #undef TRACE_HEAP
1076