1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 *
4 * Copyright 2018 Mozilla Foundation
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 #include "wasm/WasmCraneliftCompile.h"
20
21 #include "mozilla/CheckedInt.h"
22 #include "mozilla/ScopeExit.h"
23
24 #include "jit/Disassemble.h"
25 #include "js/Printf.h"
26 #include "vm/JSContext.h"
27
28 #include "wasm/cranelift/baldrapi.h"
29 #include "wasm/cranelift/clifapi.h"
30 #include "wasm/WasmFrameIter.h" // js::wasm::GenerateFunction{Pro,Epi}logue
31 #include "wasm/WasmGC.h"
32 #include "wasm/WasmGenerator.h"
33 #include "wasm/WasmStubs.h"
34
35 #include "jit/MacroAssembler-inl.h"
36
37 using namespace js;
38 using namespace js::jit;
39 using namespace js::wasm;
40
41 using mozilla::CheckedInt;
42
CraneliftPlatformSupport()43 bool wasm::CraneliftPlatformSupport() { return cranelift_supports_platform(); }
44
ToSymbolicAddress(BD_SymbolicAddress bd)45 static inline SymbolicAddress ToSymbolicAddress(BD_SymbolicAddress bd) {
46 switch (bd) {
47 case BD_SymbolicAddress::RefFunc:
48 return SymbolicAddress::RefFunc;
49 case BD_SymbolicAddress::MemoryGrow:
50 return SymbolicAddress::MemoryGrowM32;
51 case BD_SymbolicAddress::MemorySize:
52 return SymbolicAddress::MemorySizeM32;
53 case BD_SymbolicAddress::MemoryCopy:
54 return SymbolicAddress::MemCopyM32;
55 case BD_SymbolicAddress::MemoryCopyShared:
56 return SymbolicAddress::MemCopySharedM32;
57 case BD_SymbolicAddress::DataDrop:
58 return SymbolicAddress::DataDrop;
59 case BD_SymbolicAddress::MemoryFill:
60 return SymbolicAddress::MemFillM32;
61 case BD_SymbolicAddress::MemoryFillShared:
62 return SymbolicAddress::MemFillSharedM32;
63 case BD_SymbolicAddress::MemoryInit:
64 return SymbolicAddress::MemInitM32;
65 case BD_SymbolicAddress::TableCopy:
66 return SymbolicAddress::TableCopy;
67 case BD_SymbolicAddress::ElemDrop:
68 return SymbolicAddress::ElemDrop;
69 case BD_SymbolicAddress::TableFill:
70 return SymbolicAddress::TableFill;
71 case BD_SymbolicAddress::TableGet:
72 return SymbolicAddress::TableGet;
73 case BD_SymbolicAddress::TableGrow:
74 return SymbolicAddress::TableGrow;
75 case BD_SymbolicAddress::TableInit:
76 return SymbolicAddress::TableInit;
77 case BD_SymbolicAddress::TableSet:
78 return SymbolicAddress::TableSet;
79 case BD_SymbolicAddress::TableSize:
80 return SymbolicAddress::TableSize;
81 case BD_SymbolicAddress::FloorF32:
82 return SymbolicAddress::FloorF;
83 case BD_SymbolicAddress::FloorF64:
84 return SymbolicAddress::FloorD;
85 case BD_SymbolicAddress::CeilF32:
86 return SymbolicAddress::CeilF;
87 case BD_SymbolicAddress::CeilF64:
88 return SymbolicAddress::CeilD;
89 case BD_SymbolicAddress::NearestF32:
90 return SymbolicAddress::NearbyIntF;
91 case BD_SymbolicAddress::NearestF64:
92 return SymbolicAddress::NearbyIntD;
93 case BD_SymbolicAddress::TruncF32:
94 return SymbolicAddress::TruncF;
95 case BD_SymbolicAddress::TruncF64:
96 return SymbolicAddress::TruncD;
97 case BD_SymbolicAddress::PreBarrier:
98 return SymbolicAddress::PreBarrierFiltering;
99 case BD_SymbolicAddress::PostBarrier:
100 return SymbolicAddress::PostBarrierFiltering;
101 case BD_SymbolicAddress::WaitI32:
102 return SymbolicAddress::WaitI32M32;
103 case BD_SymbolicAddress::WaitI64:
104 return SymbolicAddress::WaitI64M32;
105 case BD_SymbolicAddress::Wake:
106 return SymbolicAddress::WakeM32;
107 case BD_SymbolicAddress::Limit:
108 break;
109 }
110 MOZ_CRASH("unknown baldrdash symbolic address");
111 }
112
GenerateCraneliftCode(WasmMacroAssembler & masm,const CraneliftCompiledFunc & func,const FuncType & funcType,const TypeIdDesc & funcTypeId,uint32_t lineOrBytecode,uint32_t funcBytecodeSize,StackMaps * stackMaps,size_t stackMapsOffset,size_t stackMapsCount,FuncOffsets * offsets)113 static bool GenerateCraneliftCode(
114 WasmMacroAssembler& masm, const CraneliftCompiledFunc& func,
115 const FuncType& funcType, const TypeIdDesc& funcTypeId,
116 uint32_t lineOrBytecode, uint32_t funcBytecodeSize, StackMaps* stackMaps,
117 size_t stackMapsOffset, size_t stackMapsCount, FuncOffsets* offsets) {
118 wasm::GenerateFunctionPrologue(masm, funcTypeId, mozilla::Nothing(), offsets);
119
120 // Omit the check when framePushed is small and we know there's no
121 // recursion.
122 if (func.frame_pushed < MAX_UNCHECKED_LEAF_FRAME_SIZE &&
123 !func.contains_calls) {
124 masm.reserveStack(func.frame_pushed);
125 } else {
126 std::pair<CodeOffset, uint32_t> pair = masm.wasmReserveStackChecked(
127 func.frame_pushed, BytecodeOffset(lineOrBytecode));
128 CodeOffset trapInsnOffset = pair.first;
129 size_t nBytesReservedBeforeTrap = pair.second;
130
131 MachineState trapExitLayout;
132 size_t trapExitLayoutNumWords;
133 GenerateTrapExitMachineState(&trapExitLayout, &trapExitLayoutNumWords);
134
135 size_t nInboundStackArgBytes = StackArgAreaSizeUnaligned(funcType.args());
136
137 ArgTypeVector args(funcType);
138 wasm::StackMap* functionEntryStackMap = nullptr;
139 if (!CreateStackMapForFunctionEntryTrap(
140 args, trapExitLayout, trapExitLayoutNumWords,
141 nBytesReservedBeforeTrap, nInboundStackArgBytes,
142 &functionEntryStackMap)) {
143 return false;
144 }
145
146 // In debug builds, we'll always have a stackmap, even if there are no
147 // refs to track.
148 MOZ_ASSERT(functionEntryStackMap);
149
150 if (functionEntryStackMap &&
151 !stackMaps->add((uint8_t*)(uintptr_t)trapInsnOffset.offset(),
152 functionEntryStackMap)) {
153 functionEntryStackMap->destroy();
154 return false;
155 }
156 }
157 MOZ_ASSERT(masm.framePushed() == func.frame_pushed);
158
159 // Copy the machine code; handle jump tables and other read-only data below.
160 uint32_t funcBase = masm.currentOffset();
161 if (func.code_size && !masm.appendRawCode(func.code, func.code_size)) {
162 return false;
163 }
164 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
165 uint32_t codeEnd = masm.currentOffset();
166 #endif
167
168 wasm::GenerateFunctionEpilogue(masm, func.frame_pushed, offsets);
169
170 if (func.num_rodata_relocs > 0) {
171 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
172 constexpr size_t jumptableElementSize = 4;
173
174 MOZ_ASSERT(func.jumptables_size % jumptableElementSize == 0);
175
176 // Align the jump tables properly.
177 masm.haltingAlign(jumptableElementSize);
178
179 // Copy over the tables and read-only data.
180 uint32_t rodataBase = masm.currentOffset();
181 if (!masm.appendRawCode(func.code + func.code_size,
182 func.total_size - func.code_size)) {
183 return false;
184 }
185
186 uint32_t numElem = func.jumptables_size / jumptableElementSize;
187 uint32_t bias = rodataBase - codeEnd;
188
189 // Bias the jump table(s). The table values are negative values
190 // representing backward jumps. By shifting the table down we increase the
191 // distance and so we add a negative value to reflect the larger distance.
192 //
193 // Note addToPCRel4() works from the end of the instruction, hence the loop
194 // bounds.
195 for (uint32_t i = 1; i <= numElem; i++) {
196 masm.addToPCRel4(rodataBase + (i * jumptableElementSize), -bias);
197 }
198
199 // Patch up the code locations. These represent forward distances that also
200 // become greater, so we add a positive value.
201 for (uint32_t i = 0; i < func.num_rodata_relocs; i++) {
202 MOZ_ASSERT(func.rodata_relocs[i] < func.code_size);
203 masm.addToPCRel4(funcBase + func.rodata_relocs[i], bias);
204 }
205 #else
206 MOZ_CRASH("No jump table support on this platform");
207 #endif
208 }
209
210 masm.flush();
211 if (masm.oom()) {
212 return false;
213 }
214 offsets->end = masm.currentOffset();
215
216 for (size_t i = 0; i < stackMapsCount; i++) {
217 auto* maplet = stackMaps->getRef(stackMapsOffset + i);
218 maplet->offsetBy(funcBase);
219 }
220
221 for (size_t i = 0; i < func.num_metadata; i++) {
222 const CraneliftMetadataEntry& metadata = func.metadatas[i];
223
224 CheckedInt<size_t> offset = funcBase;
225 offset += metadata.code_offset;
226 if (!offset.isValid()) {
227 return false;
228 }
229
230 #ifdef DEBUG
231 // Check code offsets.
232 MOZ_ASSERT(offset.value() >= offsets->uncheckedCallEntry);
233 MOZ_ASSERT(offset.value() < offsets->ret);
234 MOZ_ASSERT(metadata.module_bytecode_offset != 0);
235
236 // Check bytecode offsets.
237 if (lineOrBytecode > 0) {
238 MOZ_ASSERT(metadata.module_bytecode_offset >= lineOrBytecode);
239 MOZ_ASSERT(metadata.module_bytecode_offset <
240 lineOrBytecode + funcBytecodeSize);
241 }
242 #endif
243 uint32_t bytecodeOffset = metadata.module_bytecode_offset;
244
245 switch (metadata.which) {
246 case CraneliftMetadataEntry::Which::DirectCall: {
247 CallSiteDesc desc(bytecodeOffset, CallSiteDesc::Func);
248 masm.append(desc, CodeOffset(offset.value()), metadata.extra);
249 break;
250 }
251 case CraneliftMetadataEntry::Which::IndirectCall: {
252 CallSiteDesc desc(bytecodeOffset, CallSiteDesc::Indirect);
253 masm.append(desc, CodeOffset(offset.value()));
254 break;
255 }
256 case CraneliftMetadataEntry::Which::Trap: {
257 Trap trap = (Trap)metadata.extra;
258 BytecodeOffset trapOffset(bytecodeOffset);
259 masm.append(trap, wasm::TrapSite(offset.value(), trapOffset));
260 break;
261 }
262 case CraneliftMetadataEntry::Which::SymbolicAccess: {
263 CodeOffset raOffset(offset.value());
264 CallSiteDesc desc(bytecodeOffset, CallSiteDesc::Symbolic);
265 masm.append(desc, raOffset);
266
267 SymbolicAddress sym =
268 ToSymbolicAddress(BD_SymbolicAddress(metadata.extra));
269 masm.append(SymbolicAccess(raOffset, sym));
270 break;
271 }
272 default: {
273 MOZ_CRASH("unknown cranelift metadata kind");
274 }
275 }
276 }
277
278 return true;
279 }
280
281 // In Rust, a BatchCompiler variable has a lifetime constrained by those of its
282 // associated StaticEnvironment and ModuleEnvironment. This RAII class ties
283 // them together, as well as makes sure that the compiler is properly destroyed
284 // when it exits scope.
285
286 class CraneliftContext {
287 CraneliftStaticEnvironment staticEnv_;
288 CraneliftModuleEnvironment moduleEnv_;
289 CraneliftCompiler* compiler_;
290
291 public:
CraneliftContext(const ModuleEnvironment & moduleEnv)292 explicit CraneliftContext(const ModuleEnvironment& moduleEnv)
293 : moduleEnv_(moduleEnv), compiler_(nullptr) {
294 staticEnv_.ref_types_enabled = true;
295 staticEnv_.threads_enabled = true;
296 staticEnv_.v128_enabled = moduleEnv.v128Enabled();
297 #ifdef WASM_SUPPORTS_HUGE_MEMORY
298 if (moduleEnv.hugeMemoryEnabled()) {
299 // In the huge memory configuration, we always reserve the full 4 GB
300 // index space for a heap.
301 staticEnv_.static_memory_bound = HugeIndexRange;
302 }
303 #endif
304 staticEnv_.memory_guard_size =
305 GetMaxOffsetGuardLimit(moduleEnv.hugeMemoryEnabled());
306 // Otherwise, heap bounds are stored in the `boundsCheckLimit` field
307 // of TlsData.
308 }
init()309 bool init() {
310 compiler_ = cranelift_compiler_create(&staticEnv_, &moduleEnv_);
311 return !!compiler_;
312 }
~CraneliftContext()313 ~CraneliftContext() {
314 if (compiler_) {
315 cranelift_compiler_destroy(compiler_);
316 }
317 }
operator CraneliftCompiler*()318 operator CraneliftCompiler*() { return compiler_; }
319 };
320
CraneliftFuncCompileInput(const FuncCompileInput & func)321 CraneliftFuncCompileInput::CraneliftFuncCompileInput(
322 const FuncCompileInput& func)
323 : bytecode(func.begin),
324 bytecode_size(func.end - func.begin),
325 index(func.index),
326 offset_in_module(func.lineOrBytecode) {}
327
328 static_assert(offsetof(TlsData, boundsCheckLimit) == sizeof(void*),
329 "fix make_heap() in wasm2clif.rs");
330
CraneliftStaticEnvironment()331 CraneliftStaticEnvironment::CraneliftStaticEnvironment()
332 :
333 #ifdef JS_CODEGEN_X64
334 has_sse2(Assembler::HasSSE2()),
335 has_sse3(Assembler::HasSSE3()),
336 has_sse41(Assembler::HasSSE41()),
337 has_sse42(Assembler::HasSSE42()),
338 has_popcnt(Assembler::HasPOPCNT()),
339 has_avx(Assembler::HasAVX()),
340 has_bmi1(Assembler::HasBMI1()),
341 has_bmi2(Assembler::HasBMI2()),
342 has_lzcnt(Assembler::HasLZCNT()),
343 #else
344 has_sse2(false),
345 has_sse3(false),
346 has_sse41(false),
347 has_sse42(false),
348 has_popcnt(false),
349 has_avx(false),
350 has_bmi1(false),
351 has_bmi2(false),
352 has_lzcnt(false),
353 #endif
354 #if defined(XP_WIN)
355 platform_is_windows(true),
356 #else
357 platform_is_windows(false),
358 #endif
359 ref_types_enabled(false),
360 threads_enabled(false),
361 v128_enabled(false),
362 static_memory_bound(0),
363 memory_guard_size(0),
364 memory_base_tls_offset(offsetof(TlsData, memoryBase)),
365 instance_tls_offset(offsetof(TlsData, instance)),
366 interrupt_tls_offset(offsetof(TlsData, interrupt)),
367 cx_tls_offset(offsetof(TlsData, cx)),
368 realm_cx_offset(JSContext::offsetOfRealm()),
369 realm_tls_offset(offsetof(TlsData, realm)),
370 realm_func_import_tls_offset(offsetof(FuncImportTls, realm)),
371 size_of_wasm_frame(sizeof(wasm::Frame)) {
372 }
373
374 // Most of BaldrMonkey's data structures refer to a "global offset" which is a
375 // byte offset into the `globalArea` field of the `TlsData` struct.
376 //
377 // Cranelift represents global variables with their byte offset from the "VM
378 // context pointer" which is the `WasmTlsReg` pointing to the `TlsData`
379 // struct.
380 //
381 // This function translates between the two.
382
globalToTlsOffset(size_t globalOffset)383 static size_t globalToTlsOffset(size_t globalOffset) {
384 return offsetof(wasm::TlsData, globalArea) + globalOffset;
385 }
386
CraneliftModuleEnvironment(const ModuleEnvironment & env)387 CraneliftModuleEnvironment::CraneliftModuleEnvironment(
388 const ModuleEnvironment& env)
389 : env(&env) {
390 if (env.memory.isSome()) {
391 // We use |auto| here rather than |uint64_t| so that the static_assert will
392 // fail if |pages| is changed to some other size.
393 auto pages = env.memory->initialPages().value();
394 static_assert(sizeof(pages) == 8);
395 MOZ_RELEASE_ASSERT(pages <= MaxMemory32LimitField);
396 min_memory_length = uint32_t(pages);
397 }
398 }
399
env_unpack(BD_ValType valType)400 TypeCode env_unpack(BD_ValType valType) {
401 return PackedTypeCode::fromBits(valType.packed).typeCode();
402 }
403
env_num_datas(const CraneliftModuleEnvironment * env)404 size_t env_num_datas(const CraneliftModuleEnvironment* env) {
405 return env->env->dataCount.valueOr(0);
406 }
407
env_num_elems(const CraneliftModuleEnvironment * env)408 size_t env_num_elems(const CraneliftModuleEnvironment* env) {
409 return env->env->elemSegments.length();
410 }
env_elem_typecode(const CraneliftModuleEnvironment * env,uint32_t index)411 TypeCode env_elem_typecode(const CraneliftModuleEnvironment* env,
412 uint32_t index) {
413 return env->env->elemSegments[index]->elemType.packed().typeCode();
414 }
415
416 // Returns a number of pages in the range [0..65536], or UINT32_MAX to signal
417 // that no maximum has been set.
env_max_memory(const CraneliftModuleEnvironment * env)418 uint32_t env_max_memory(const CraneliftModuleEnvironment* env) {
419 const ModuleEnvironment& moduleEnv = *env->env;
420 if (moduleEnv.memory.isNothing()) {
421 return UINT32_MAX;
422 }
423
424 Maybe<Pages> maxPages = moduleEnv.memory->maximumPages();
425 if (maxPages.isNothing()) {
426 return UINT32_MAX;
427 }
428
429 // We use |auto| here rather than |uint64_t| so that the static_assert will
430 // fail if |maxPages| is changed to some other size.
431 auto pages = maxPages->value();
432 static_assert(sizeof(pages) == 8);
433 MOZ_RELEASE_ASSERT(pages <= MaxMemory32LimitField);
434 return pages;
435 }
436
env_uses_shared_memory(const CraneliftModuleEnvironment * env)437 bool env_uses_shared_memory(const CraneliftModuleEnvironment* env) {
438 return env->env->usesSharedMemory();
439 }
440
env_has_memory(const CraneliftModuleEnvironment * env)441 bool env_has_memory(const CraneliftModuleEnvironment* env) {
442 return env->env->usesMemory();
443 }
444
env_num_types(const CraneliftModuleEnvironment * env)445 size_t env_num_types(const CraneliftModuleEnvironment* env) {
446 return env->env->types->length();
447 }
env_type(const CraneliftModuleEnvironment * env,size_t typeIndex)448 const FuncType* env_type(const CraneliftModuleEnvironment* env,
449 size_t typeIndex) {
450 return &(*env->env->types)[typeIndex].funcType();
451 }
452
env_num_funcs(const CraneliftModuleEnvironment * env)453 size_t env_num_funcs(const CraneliftModuleEnvironment* env) {
454 return env->env->funcs.length();
455 }
env_func_sig(const CraneliftModuleEnvironment * env,size_t funcIndex)456 const FuncType* env_func_sig(const CraneliftModuleEnvironment* env,
457 size_t funcIndex) {
458 return env->env->funcs[funcIndex].type;
459 }
env_func_sig_id(const CraneliftModuleEnvironment * env,size_t funcIndex)460 const TypeIdDesc* env_func_sig_id(const CraneliftModuleEnvironment* env,
461 size_t funcIndex) {
462 return env->env->funcs[funcIndex].typeId;
463 }
env_func_sig_index(const CraneliftModuleEnvironment * env,size_t funcIndex)464 size_t env_func_sig_index(const CraneliftModuleEnvironment* env,
465 size_t funcIndex) {
466 return env->env->funcs[funcIndex].typeIndex;
467 }
env_is_func_valid_for_ref(const CraneliftModuleEnvironment * env,uint32_t index)468 bool env_is_func_valid_for_ref(const CraneliftModuleEnvironment* env,
469 uint32_t index) {
470 return env->env->funcs[index].canRefFunc();
471 }
472
env_func_import_tls_offset(const CraneliftModuleEnvironment * env,size_t funcIndex)473 size_t env_func_import_tls_offset(const CraneliftModuleEnvironment* env,
474 size_t funcIndex) {
475 return globalToTlsOffset(env->env->funcImportGlobalDataOffsets[funcIndex]);
476 }
477
env_func_is_import(const CraneliftModuleEnvironment * env,size_t funcIndex)478 bool env_func_is_import(const CraneliftModuleEnvironment* env,
479 size_t funcIndex) {
480 return env->env->funcIsImport(funcIndex);
481 }
482
env_signature(const CraneliftModuleEnvironment * env,size_t funcTypeIndex)483 const FuncType* env_signature(const CraneliftModuleEnvironment* env,
484 size_t funcTypeIndex) {
485 return &(*env->env->types)[funcTypeIndex].funcType();
486 }
487
env_signature_id(const CraneliftModuleEnvironment * env,size_t funcTypeIndex)488 const TypeIdDesc* env_signature_id(const CraneliftModuleEnvironment* env,
489 size_t funcTypeIndex) {
490 return &env->env->typeIds[funcTypeIndex];
491 }
492
env_num_tables(const CraneliftModuleEnvironment * env)493 size_t env_num_tables(const CraneliftModuleEnvironment* env) {
494 return env->env->tables.length();
495 }
env_table(const CraneliftModuleEnvironment * env,size_t tableIndex)496 const TableDesc* env_table(const CraneliftModuleEnvironment* env,
497 size_t tableIndex) {
498 return &env->env->tables[tableIndex];
499 }
500
env_num_globals(const CraneliftModuleEnvironment * env)501 size_t env_num_globals(const CraneliftModuleEnvironment* env) {
502 return env->env->globals.length();
503 }
env_global(const CraneliftModuleEnvironment * env,size_t globalIndex)504 const GlobalDesc* env_global(const CraneliftModuleEnvironment* env,
505 size_t globalIndex) {
506 return &env->env->globals[globalIndex];
507 }
508
CraneliftCompileFunctions(const ModuleEnvironment & moduleEnv,const CompilerEnvironment & compilerEnv,LifoAlloc & lifo,const FuncCompileInputVector & inputs,CompiledCode * code,UniqueChars * error)509 bool wasm::CraneliftCompileFunctions(const ModuleEnvironment& moduleEnv,
510 const CompilerEnvironment& compilerEnv,
511 LifoAlloc& lifo,
512 const FuncCompileInputVector& inputs,
513 CompiledCode* code, UniqueChars* error) {
514 MOZ_RELEASE_ASSERT(CraneliftPlatformSupport());
515
516 MOZ_ASSERT(compilerEnv.tier() == Tier::Optimized);
517 MOZ_ASSERT(compilerEnv.debug() == DebugEnabled::False);
518 MOZ_ASSERT(compilerEnv.optimizedBackend() == OptimizedBackend::Cranelift);
519 MOZ_ASSERT(!moduleEnv.isAsmJS());
520
521 TempAllocator alloc(&lifo);
522 JitContext jitContext(&alloc);
523 WasmMacroAssembler masm(alloc, moduleEnv);
524 AutoCreatedBy acb(masm, "wasm::CraneliftCompileFunctions");
525
526 MOZ_ASSERT(IsCompilingWasm());
527
528 // Swap in already-allocated empty vectors to avoid malloc/free.
529 MOZ_ASSERT(code->empty());
530
531 CraneliftReusableData reusableContext;
532 if (!code->swapCranelift(masm, reusableContext)) {
533 return false;
534 }
535
536 if (!reusableContext) {
537 auto context = MakeUnique<CraneliftContext>(moduleEnv);
538 if (!context || !context->init()) {
539 return false;
540 }
541 reusableContext.reset((void**)context.release());
542 }
543
544 CraneliftContext* compiler = (CraneliftContext*)reusableContext.get();
545
546 // Disable instruction spew if we're going to disassemble after code
547 // generation, or the output will be a mess.
548
549 bool jitSpew = JitSpewEnabled(js::jit::JitSpew_Codegen);
550 if (jitSpew) {
551 DisableChannel(js::jit::JitSpew_Codegen);
552 }
553 auto reenableSpew = mozilla::MakeScopeExit([&] {
554 if (jitSpew) {
555 EnableChannel(js::jit::JitSpew_Codegen);
556 }
557 });
558
559 for (const FuncCompileInput& func : inputs) {
560 Decoder d(func.begin, func.end, func.lineOrBytecode, error);
561
562 size_t funcBytecodeSize = func.end - func.begin;
563
564 size_t previousStackmapCount = code->stackMaps.length();
565
566 CraneliftFuncCompileInput clifInput(func);
567 clifInput.stackmaps = (BD_Stackmaps*)&code->stackMaps;
568
569 CraneliftCompiledFunc clifFunc;
570
571 char* clifError = nullptr;
572 if (!cranelift_compile_function(*compiler, &clifInput, &clifFunc,
573 &clifError)) {
574 *error = JS_smprintf("%s", clifError);
575 cranelift_compiler_free_error(clifError);
576 return false;
577 }
578
579 uint32_t lineOrBytecode = func.lineOrBytecode;
580 const FuncType& funcType = *moduleEnv.funcs[clifInput.index].type;
581 const TypeIdDesc& funcTypeId = *moduleEnv.funcs[clifInput.index].typeId;
582
583 FuncOffsets offsets;
584 if (!GenerateCraneliftCode(
585 masm, clifFunc, funcType, funcTypeId, lineOrBytecode,
586 funcBytecodeSize, &code->stackMaps, previousStackmapCount,
587 code->stackMaps.length() - previousStackmapCount, &offsets)) {
588 return false;
589 }
590
591 if (!code->codeRanges.emplaceBack(func.index, lineOrBytecode, offsets)) {
592 return false;
593 }
594 }
595
596 masm.finish();
597 if (masm.oom()) {
598 return false;
599 }
600
601 if (jitSpew) {
602 // The disassembler uses the jitspew for output, so re-enable now.
603 EnableChannel(js::jit::JitSpew_Codegen);
604
605 uint32_t totalCodeSize = masm.currentOffset();
606 uint8_t* codeBuf = (uint8_t*)js_malloc(totalCodeSize);
607 if (codeBuf) {
608 masm.executableCopy(codeBuf);
609
610 const CodeRangeVector& codeRanges = code->codeRanges;
611 MOZ_ASSERT(codeRanges.length() >= inputs.length());
612
613 // Within the current batch, functions' code ranges have been added in
614 // the same order as the inputs.
615 size_t firstCodeRangeIndex = codeRanges.length() - inputs.length();
616
617 for (size_t i = 0; i < inputs.length(); i++) {
618 int funcIndex = inputs[i].index;
619
620 JitSpew(JitSpew_Codegen, "# ========================================");
621 JitSpew(JitSpew_Codegen, "# Start of wasm cranelift code for index %d",
622 funcIndex);
623
624 size_t codeRangeIndex = firstCodeRangeIndex + i;
625 uint32_t codeStart = codeRanges[codeRangeIndex].begin();
626 uint32_t codeEnd = codeRanges[codeRangeIndex].end();
627
628 jit::Disassemble(
629 codeBuf + codeStart, codeEnd - codeStart,
630 [](const char* text) { JitSpew(JitSpew_Codegen, "%s", text); });
631
632 JitSpew(JitSpew_Codegen, "# End of wasm cranelift code for index %d",
633 funcIndex);
634 }
635 js_free(codeBuf);
636 }
637 }
638
639 return code->swapCranelift(masm, reusableContext);
640 }
641
CraneliftFreeReusableData(void * ptr)642 void wasm::CraneliftFreeReusableData(void* ptr) {
643 CraneliftContext* compiler = (CraneliftContext*)ptr;
644 if (compiler) {
645 js_delete(compiler);
646 }
647 }
648
649 ////////////////////////////////////////////////////////////////////////////////
650 //
651 // Callbacks from Rust to C++.
652
653 // Offsets assumed by the `make_heap()` function.
654 static_assert(offsetof(wasm::TlsData, memoryBase) == 0, "memory base moved");
655
656 // The translate_call() function in wasm2clif.rs depends on these offsets.
657 static_assert(offsetof(wasm::FuncImportTls, code) == 0,
658 "Import code field moved");
659 static_assert(offsetof(wasm::FuncImportTls, tls) == sizeof(void*),
660 "Import tls moved");
661
662 // Global
663
global_isConstant(const GlobalDesc * global)664 bool global_isConstant(const GlobalDesc* global) {
665 return global->isConstant();
666 }
667
global_isMutable(const GlobalDesc * global)668 bool global_isMutable(const GlobalDesc* global) { return global->isMutable(); }
669
global_isIndirect(const GlobalDesc * global)670 bool global_isIndirect(const GlobalDesc* global) {
671 return global->isIndirect();
672 }
673
global_constantValue(const GlobalDesc * global)674 BD_ConstantValue global_constantValue(const GlobalDesc* global) {
675 Val value(global->constantValue());
676 BD_ConstantValue v;
677 v.t = TypeCode(value.type().kind());
678 switch (v.t) {
679 case TypeCode::I32:
680 v.u.i32 = value.i32();
681 break;
682 case TypeCode::I64:
683 v.u.i64 = value.i64();
684 break;
685 case TypeCode::F32:
686 v.u.f32 = value.f32();
687 break;
688 case TypeCode::F64:
689 v.u.f64 = value.f64();
690 break;
691 case TypeCode::V128:
692 memcpy(&v.u.v128, &value.v128(), sizeof(v.u.v128));
693 break;
694 case AbstractReferenceTypeCode:
695 v.u.r = value.ref().forCompiledCode();
696 break;
697 default:
698 MOZ_CRASH("Bad type");
699 }
700 return v;
701 }
702
global_type(const GlobalDesc * global)703 TypeCode global_type(const GlobalDesc* global) {
704 return global->type().packed().typeCode();
705 }
706
global_tlsOffset(const GlobalDesc * global)707 size_t global_tlsOffset(const GlobalDesc* global) {
708 return globalToTlsOffset(global->offset());
709 }
710
711 // TableDesc
712
table_tlsOffset(const TableDesc * table)713 size_t table_tlsOffset(const TableDesc* table) {
714 return globalToTlsOffset(table->globalDataOffset);
715 }
716
table_initialLimit(const TableDesc * table)717 uint32_t table_initialLimit(const TableDesc* table) {
718 return table->initialLength;
719 }
table_maximumLimit(const TableDesc * table)720 uint32_t table_maximumLimit(const TableDesc* table) {
721 return table->maximumLength.valueOr(UINT32_MAX);
722 }
table_elementTypeCode(const TableDesc * table)723 TypeCode table_elementTypeCode(const TableDesc* table) {
724 return table->elemType.packed().typeCode();
725 }
726
727 // Sig
728
funcType_numArgs(const FuncType * funcType)729 size_t funcType_numArgs(const FuncType* funcType) {
730 return funcType->args().length();
731 }
732
funcType_args(const FuncType * funcType)733 const BD_ValType* funcType_args(const FuncType* funcType) {
734 static_assert(sizeof(BD_ValType) == sizeof(ValType), "update BD_ValType");
735 return (const BD_ValType*)funcType->args().begin();
736 }
737
funcType_numResults(const FuncType * funcType)738 size_t funcType_numResults(const FuncType* funcType) {
739 return funcType->results().length();
740 }
741
funcType_results(const FuncType * funcType)742 const BD_ValType* funcType_results(const FuncType* funcType) {
743 static_assert(sizeof(BD_ValType) == sizeof(ValType), "update BD_ValType");
744 return (const BD_ValType*)funcType->results().begin();
745 }
746
funcType_idKind(const TypeIdDesc * funcTypeId)747 TypeIdDescKind funcType_idKind(const TypeIdDesc* funcTypeId) {
748 return funcTypeId->kind();
749 }
750
funcType_idImmediate(const TypeIdDesc * funcTypeId)751 size_t funcType_idImmediate(const TypeIdDesc* funcTypeId) {
752 return funcTypeId->immediate();
753 }
754
funcType_idTlsOffset(const TypeIdDesc * funcTypeId)755 size_t funcType_idTlsOffset(const TypeIdDesc* funcTypeId) {
756 return globalToTlsOffset(funcTypeId->globalDataOffset());
757 }
758
stackmaps_add(BD_Stackmaps * sink,const uint32_t * bitMap,size_t mappedWords,size_t argsSize,size_t codeOffset)759 void stackmaps_add(BD_Stackmaps* sink, const uint32_t* bitMap,
760 size_t mappedWords, size_t argsSize, size_t codeOffset) {
761 const uint32_t BitElemSize = sizeof(uint32_t) * 8;
762
763 StackMaps* maps = (StackMaps*)sink;
764 StackMap* map = StackMap::create(mappedWords);
765 MOZ_ALWAYS_TRUE(map);
766
767 // Copy the cranelift stackmap into our spidermonkey one
768 // TODO: Take ownership of the cranelift stackmap and avoid a copy
769 for (uint32_t i = 0; i < mappedWords; i++) {
770 uint32_t bit = (bitMap[i / BitElemSize] >> (i % BitElemSize)) & 0x1;
771 if (bit) {
772 map->setBit(i);
773 }
774 }
775
776 map->setFrameOffsetFromTop((argsSize + sizeof(wasm::Frame)) /
777 sizeof(uintptr_t));
778 MOZ_ALWAYS_TRUE(maps->add((uint8_t*)codeOffset, map));
779 }
780