1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 *
4 * Copyright 2018 Mozilla Foundation
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 #include "wasm/WasmCraneliftCompile.h"
20
21 #include "mozilla/CheckedInt.h"
22 #include "mozilla/ScopeExit.h"
23
24 #include "jit/Disassemble.h"
25 #include "js/Printf.h"
26 #include "vm/JSContext.h"
27
28 #include "wasm/cranelift/baldrapi.h"
29 #include "wasm/cranelift/clifapi.h"
30 #include "wasm/WasmFrameIter.h" // js::wasm::GenerateFunction{Pro,Epi}logue
31 #include "wasm/WasmGC.h"
32 #include "wasm/WasmGenerator.h"
33 #include "wasm/WasmStubs.h"
34
35 #include "jit/MacroAssembler-inl.h"
36
37 using namespace js;
38 using namespace js::jit;
39 using namespace js::wasm;
40
41 using mozilla::CheckedInt;
42
CraneliftPlatformSupport()43 bool wasm::CraneliftPlatformSupport() { return cranelift_supports_platform(); }
44
ToSymbolicAddress(BD_SymbolicAddress bd)45 static inline SymbolicAddress ToSymbolicAddress(BD_SymbolicAddress bd) {
46 switch (bd) {
47 case BD_SymbolicAddress::RefFunc:
48 return SymbolicAddress::RefFunc;
49 case BD_SymbolicAddress::MemoryGrow:
50 return SymbolicAddress::MemoryGrow;
51 case BD_SymbolicAddress::MemorySize:
52 return SymbolicAddress::MemorySize;
53 case BD_SymbolicAddress::MemoryCopy:
54 return SymbolicAddress::MemCopy32;
55 case BD_SymbolicAddress::MemoryCopyShared:
56 return SymbolicAddress::MemCopyShared32;
57 case BD_SymbolicAddress::DataDrop:
58 return SymbolicAddress::DataDrop;
59 case BD_SymbolicAddress::MemoryFill:
60 return SymbolicAddress::MemFill32;
61 case BD_SymbolicAddress::MemoryFillShared:
62 return SymbolicAddress::MemFillShared32;
63 case BD_SymbolicAddress::MemoryInit:
64 return SymbolicAddress::MemInit32;
65 case BD_SymbolicAddress::TableCopy:
66 return SymbolicAddress::TableCopy;
67 case BD_SymbolicAddress::ElemDrop:
68 return SymbolicAddress::ElemDrop;
69 case BD_SymbolicAddress::TableFill:
70 return SymbolicAddress::TableFill;
71 case BD_SymbolicAddress::TableGet:
72 return SymbolicAddress::TableGet;
73 case BD_SymbolicAddress::TableGrow:
74 return SymbolicAddress::TableGrow;
75 case BD_SymbolicAddress::TableInit:
76 return SymbolicAddress::TableInit;
77 case BD_SymbolicAddress::TableSet:
78 return SymbolicAddress::TableSet;
79 case BD_SymbolicAddress::TableSize:
80 return SymbolicAddress::TableSize;
81 case BD_SymbolicAddress::FloorF32:
82 return SymbolicAddress::FloorF;
83 case BD_SymbolicAddress::FloorF64:
84 return SymbolicAddress::FloorD;
85 case BD_SymbolicAddress::CeilF32:
86 return SymbolicAddress::CeilF;
87 case BD_SymbolicAddress::CeilF64:
88 return SymbolicAddress::CeilD;
89 case BD_SymbolicAddress::NearestF32:
90 return SymbolicAddress::NearbyIntF;
91 case BD_SymbolicAddress::NearestF64:
92 return SymbolicAddress::NearbyIntD;
93 case BD_SymbolicAddress::TruncF32:
94 return SymbolicAddress::TruncF;
95 case BD_SymbolicAddress::TruncF64:
96 return SymbolicAddress::TruncD;
97 case BD_SymbolicAddress::PreBarrier:
98 return SymbolicAddress::PreBarrierFiltering;
99 case BD_SymbolicAddress::PostBarrier:
100 return SymbolicAddress::PostBarrierFiltering;
101 case BD_SymbolicAddress::WaitI32:
102 return SymbolicAddress::WaitI32;
103 case BD_SymbolicAddress::WaitI64:
104 return SymbolicAddress::WaitI64;
105 case BD_SymbolicAddress::Wake:
106 return SymbolicAddress::Wake;
107 case BD_SymbolicAddress::Limit:
108 break;
109 }
110 MOZ_CRASH("unknown baldrdash symbolic address");
111 }
112
GenerateCraneliftCode(WasmMacroAssembler & masm,const CraneliftCompiledFunc & func,const FuncType & funcType,const TypeIdDesc & funcTypeId,uint32_t lineOrBytecode,uint32_t funcBytecodeSize,StackMaps * stackMaps,size_t stackMapsOffset,size_t stackMapsCount,FuncOffsets * offsets)113 static bool GenerateCraneliftCode(
114 WasmMacroAssembler& masm, const CraneliftCompiledFunc& func,
115 const FuncType& funcType, const TypeIdDesc& funcTypeId,
116 uint32_t lineOrBytecode, uint32_t funcBytecodeSize, StackMaps* stackMaps,
117 size_t stackMapsOffset, size_t stackMapsCount, FuncOffsets* offsets) {
118 wasm::GenerateFunctionPrologue(masm, funcTypeId, mozilla::Nothing(), offsets);
119
120 // Omit the check when framePushed is small and we know there's no
121 // recursion.
122 if (func.frame_pushed < MAX_UNCHECKED_LEAF_FRAME_SIZE &&
123 !func.contains_calls) {
124 masm.reserveStack(func.frame_pushed);
125 } else {
126 std::pair<CodeOffset, uint32_t> pair = masm.wasmReserveStackChecked(
127 func.frame_pushed, BytecodeOffset(lineOrBytecode));
128 CodeOffset trapInsnOffset = pair.first;
129 size_t nBytesReservedBeforeTrap = pair.second;
130
131 MachineState trapExitLayout;
132 size_t trapExitLayoutNumWords;
133 GenerateTrapExitMachineState(&trapExitLayout, &trapExitLayoutNumWords);
134
135 size_t nInboundStackArgBytes = StackArgAreaSizeUnaligned(funcType.args());
136
137 ArgTypeVector args(funcType);
138 wasm::StackMap* functionEntryStackMap = nullptr;
139 if (!CreateStackMapForFunctionEntryTrap(
140 args, trapExitLayout, trapExitLayoutNumWords,
141 nBytesReservedBeforeTrap, nInboundStackArgBytes,
142 &functionEntryStackMap)) {
143 return false;
144 }
145
146 // In debug builds, we'll always have a stackmap, even if there are no
147 // refs to track.
148 MOZ_ASSERT(functionEntryStackMap);
149
150 if (functionEntryStackMap &&
151 !stackMaps->add((uint8_t*)(uintptr_t)trapInsnOffset.offset(),
152 functionEntryStackMap)) {
153 functionEntryStackMap->destroy();
154 return false;
155 }
156 }
157 MOZ_ASSERT(masm.framePushed() == func.frame_pushed);
158
159 // Copy the machine code; handle jump tables and other read-only data below.
160 uint32_t funcBase = masm.currentOffset();
161 if (func.code_size && !masm.appendRawCode(func.code, func.code_size)) {
162 return false;
163 }
164 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
165 uint32_t codeEnd = masm.currentOffset();
166 #endif
167
168 wasm::GenerateFunctionEpilogue(masm, func.frame_pushed, offsets);
169
170 if (func.num_rodata_relocs > 0) {
171 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
172 constexpr size_t jumptableElementSize = 4;
173
174 MOZ_ASSERT(func.jumptables_size % jumptableElementSize == 0);
175
176 // Align the jump tables properly.
177 masm.haltingAlign(jumptableElementSize);
178
179 // Copy over the tables and read-only data.
180 uint32_t rodataBase = masm.currentOffset();
181 if (!masm.appendRawCode(func.code + func.code_size,
182 func.total_size - func.code_size)) {
183 return false;
184 }
185
186 uint32_t numElem = func.jumptables_size / jumptableElementSize;
187 uint32_t bias = rodataBase - codeEnd;
188
189 // Bias the jump table(s). The table values are negative values
190 // representing backward jumps. By shifting the table down we increase the
191 // distance and so we add a negative value to reflect the larger distance.
192 //
193 // Note addToPCRel4() works from the end of the instruction, hence the loop
194 // bounds.
195 for (uint32_t i = 1; i <= numElem; i++) {
196 masm.addToPCRel4(rodataBase + (i * jumptableElementSize), -bias);
197 }
198
199 // Patch up the code locations. These represent forward distances that also
200 // become greater, so we add a positive value.
201 for (uint32_t i = 0; i < func.num_rodata_relocs; i++) {
202 MOZ_ASSERT(func.rodata_relocs[i] < func.code_size);
203 masm.addToPCRel4(funcBase + func.rodata_relocs[i], bias);
204 }
205 #else
206 MOZ_CRASH("No jump table support on this platform");
207 #endif
208 }
209
210 masm.flush();
211 if (masm.oom()) {
212 return false;
213 }
214 offsets->end = masm.currentOffset();
215
216 for (size_t i = 0; i < stackMapsCount; i++) {
217 auto* maplet = stackMaps->getRef(stackMapsOffset + i);
218 maplet->offsetBy(funcBase);
219 }
220
221 for (size_t i = 0; i < func.num_metadata; i++) {
222 const CraneliftMetadataEntry& metadata = func.metadatas[i];
223
224 CheckedInt<size_t> offset = funcBase;
225 offset += metadata.code_offset;
226 if (!offset.isValid()) {
227 return false;
228 }
229
230 #ifdef DEBUG
231 // Check code offsets.
232 MOZ_ASSERT(offset.value() >= offsets->uncheckedCallEntry);
233 MOZ_ASSERT(offset.value() < offsets->ret);
234 MOZ_ASSERT(metadata.module_bytecode_offset != 0);
235
236 // Check bytecode offsets.
237 if (lineOrBytecode > 0) {
238 MOZ_ASSERT(metadata.module_bytecode_offset >= lineOrBytecode);
239 MOZ_ASSERT(metadata.module_bytecode_offset <
240 lineOrBytecode + funcBytecodeSize);
241 }
242 #endif
243 uint32_t bytecodeOffset = metadata.module_bytecode_offset;
244
245 switch (metadata.which) {
246 case CraneliftMetadataEntry::Which::DirectCall: {
247 CallSiteDesc desc(bytecodeOffset, CallSiteDesc::Func);
248 masm.append(desc, CodeOffset(offset.value()), metadata.extra);
249 break;
250 }
251 case CraneliftMetadataEntry::Which::IndirectCall: {
252 CallSiteDesc desc(bytecodeOffset, CallSiteDesc::Dynamic);
253 masm.append(desc, CodeOffset(offset.value()));
254 break;
255 }
256 case CraneliftMetadataEntry::Which::Trap: {
257 Trap trap = (Trap)metadata.extra;
258 BytecodeOffset trapOffset(bytecodeOffset);
259 masm.append(trap, wasm::TrapSite(offset.value(), trapOffset));
260 break;
261 }
262 case CraneliftMetadataEntry::Which::SymbolicAccess: {
263 CodeOffset raOffset(offset.value());
264 CallSiteDesc desc(bytecodeOffset, CallSiteDesc::Symbolic);
265 masm.append(desc, raOffset);
266
267 SymbolicAddress sym =
268 ToSymbolicAddress(BD_SymbolicAddress(metadata.extra));
269 masm.append(SymbolicAccess(raOffset, sym));
270 break;
271 }
272 default: {
273 MOZ_CRASH("unknown cranelift metadata kind");
274 }
275 }
276 }
277
278 return true;
279 }
280
281 // In Rust, a BatchCompiler variable has a lifetime constrained by those of its
282 // associated StaticEnvironment and ModuleEnvironment. This RAII class ties
283 // them together, as well as makes sure that the compiler is properly destroyed
284 // when it exits scope.
285
286 class CraneliftContext {
287 CraneliftStaticEnvironment staticEnv_;
288 CraneliftModuleEnvironment moduleEnv_;
289 CraneliftCompiler* compiler_;
290
291 public:
CraneliftContext(const ModuleEnvironment & moduleEnv)292 explicit CraneliftContext(const ModuleEnvironment& moduleEnv)
293 : moduleEnv_(moduleEnv), compiler_(nullptr) {
294 staticEnv_.ref_types_enabled = true;
295 staticEnv_.threads_enabled = true;
296 staticEnv_.v128_enabled = moduleEnv.v128Enabled();
297 #ifdef WASM_SUPPORTS_HUGE_MEMORY
298 if (moduleEnv.hugeMemoryEnabled()) {
299 // In the huge memory configuration, we always reserve the full 4 GB
300 // index space for a heap.
301 staticEnv_.static_memory_bound = HugeIndexRange;
302 staticEnv_.memory_guard_size = HugeOffsetGuardLimit;
303 } else {
304 staticEnv_.memory_guard_size = OffsetGuardLimit;
305 }
306 #endif
307 // Otherwise, heap bounds are stored in the `boundsCheckLimit` field
308 // of TlsData.
309 }
init()310 bool init() {
311 compiler_ = cranelift_compiler_create(&staticEnv_, &moduleEnv_);
312 return !!compiler_;
313 }
~CraneliftContext()314 ~CraneliftContext() {
315 if (compiler_) {
316 cranelift_compiler_destroy(compiler_);
317 }
318 }
operator CraneliftCompiler*()319 operator CraneliftCompiler*() { return compiler_; }
320 };
321
CraneliftFuncCompileInput(const FuncCompileInput & func)322 CraneliftFuncCompileInput::CraneliftFuncCompileInput(
323 const FuncCompileInput& func)
324 : bytecode(func.begin),
325 bytecode_size(func.end - func.begin),
326 index(func.index),
327 offset_in_module(func.lineOrBytecode) {}
328
329 static_assert(offsetof(TlsData, boundsCheckLimit) == sizeof(void*),
330 "fix make_heap() in wasm2clif.rs");
331
CraneliftStaticEnvironment()332 CraneliftStaticEnvironment::CraneliftStaticEnvironment()
333 :
334 #ifdef JS_CODEGEN_X64
335 has_sse2(Assembler::HasSSE2()),
336 has_sse3(Assembler::HasSSE3()),
337 has_sse41(Assembler::HasSSE41()),
338 has_sse42(Assembler::HasSSE42()),
339 has_popcnt(Assembler::HasPOPCNT()),
340 has_avx(Assembler::HasAVX()),
341 has_bmi1(Assembler::HasBMI1()),
342 has_bmi2(Assembler::HasBMI2()),
343 has_lzcnt(Assembler::HasLZCNT()),
344 #else
345 has_sse2(false),
346 has_sse3(false),
347 has_sse41(false),
348 has_sse42(false),
349 has_popcnt(false),
350 has_avx(false),
351 has_bmi1(false),
352 has_bmi2(false),
353 has_lzcnt(false),
354 #endif
355 #if defined(XP_WIN)
356 platform_is_windows(true),
357 #else
358 platform_is_windows(false),
359 #endif
360 ref_types_enabled(false),
361 threads_enabled(false),
362 v128_enabled(false),
363 static_memory_bound(0),
364 memory_guard_size(0),
365 memory_base_tls_offset(offsetof(TlsData, memoryBase)),
366 instance_tls_offset(offsetof(TlsData, instance)),
367 interrupt_tls_offset(offsetof(TlsData, interrupt)),
368 cx_tls_offset(offsetof(TlsData, cx)),
369 realm_cx_offset(JSContext::offsetOfRealm()),
370 realm_tls_offset(offsetof(TlsData, realm)),
371 realm_func_import_tls_offset(offsetof(FuncImportTls, realm)),
372 size_of_wasm_frame(sizeof(wasm::Frame)) {
373 }
374
375 // Most of BaldrMonkey's data structures refer to a "global offset" which is a
376 // byte offset into the `globalArea` field of the `TlsData` struct.
377 //
378 // Cranelift represents global variables with their byte offset from the "VM
379 // context pointer" which is the `WasmTlsReg` pointing to the `TlsData`
380 // struct.
381 //
382 // This function translates between the two.
383
globalToTlsOffset(size_t globalOffset)384 static size_t globalToTlsOffset(size_t globalOffset) {
385 return offsetof(wasm::TlsData, globalArea) + globalOffset;
386 }
387
CraneliftModuleEnvironment(const ModuleEnvironment & env)388 CraneliftModuleEnvironment::CraneliftModuleEnvironment(
389 const ModuleEnvironment& env)
390 : env(&env) {
391 // env.minMemoryLength is in bytes. Convert it to wasm pages.
392 static_assert(sizeof(env.minMemoryLength) == 8);
393 MOZ_RELEASE_ASSERT(env.minMemoryLength <= (((uint64_t)1) << 32));
394 MOZ_RELEASE_ASSERT((env.minMemoryLength & wasm::PageMask) == 0);
395 min_memory_length = (uint32_t)(env.minMemoryLength >> wasm::PageBits);
396 }
397
env_unpack(BD_ValType valType)398 TypeCode env_unpack(BD_ValType valType) {
399 return PackedTypeCode::fromBits(valType.packed).typeCode();
400 }
401
env_num_datas(const CraneliftModuleEnvironment * env)402 size_t env_num_datas(const CraneliftModuleEnvironment* env) {
403 return env->env->dataCount.valueOr(0);
404 }
405
env_num_elems(const CraneliftModuleEnvironment * env)406 size_t env_num_elems(const CraneliftModuleEnvironment* env) {
407 return env->env->elemSegments.length();
408 }
env_elem_typecode(const CraneliftModuleEnvironment * env,uint32_t index)409 TypeCode env_elem_typecode(const CraneliftModuleEnvironment* env,
410 uint32_t index) {
411 return env->env->elemSegments[index]->elemType.packed().typeCode();
412 }
413
414 // Returns a number of pages in the range [0..65536], or UINT32_MAX to signal
415 // that no maximum has been set.
env_max_memory(const CraneliftModuleEnvironment * env)416 uint32_t env_max_memory(const CraneliftModuleEnvironment* env) {
417 // env.maxMemoryLength is in bytes. Convert it to wasm pages.
418 if (env->env->maxMemoryLength.isSome()) {
419 // We use |auto| here rather than |uint64_t| so that the static_assert will
420 // fail if |maxMemoryLength| is changed to some other size.
421 auto inBytes = *(env->env->maxMemoryLength);
422 static_assert(sizeof(inBytes) == 8);
423 MOZ_RELEASE_ASSERT(inBytes <= (((uint64_t)1) << 32));
424 MOZ_RELEASE_ASSERT((inBytes & wasm::PageMask) == 0);
425 return (uint32_t)(inBytes >> wasm::PageBits);
426 }
427 return UINT32_MAX;
428 }
429
env_uses_shared_memory(const CraneliftModuleEnvironment * env)430 bool env_uses_shared_memory(const CraneliftModuleEnvironment* env) {
431 return env->env->usesSharedMemory();
432 }
433
env_has_memory(const CraneliftModuleEnvironment * env)434 bool env_has_memory(const CraneliftModuleEnvironment* env) {
435 return env->env->usesMemory();
436 }
437
env_num_types(const CraneliftModuleEnvironment * env)438 size_t env_num_types(const CraneliftModuleEnvironment* env) {
439 return env->env->types.length();
440 }
env_type(const CraneliftModuleEnvironment * env,size_t typeIndex)441 const FuncType* env_type(const CraneliftModuleEnvironment* env,
442 size_t typeIndex) {
443 return &env->env->types[typeIndex].funcType();
444 }
445
env_num_funcs(const CraneliftModuleEnvironment * env)446 size_t env_num_funcs(const CraneliftModuleEnvironment* env) {
447 return env->env->funcs.length();
448 }
env_func_sig(const CraneliftModuleEnvironment * env,size_t funcIndex)449 const FuncType* env_func_sig(const CraneliftModuleEnvironment* env,
450 size_t funcIndex) {
451 return env->env->funcs[funcIndex].type;
452 }
env_func_sig_id(const CraneliftModuleEnvironment * env,size_t funcIndex)453 const TypeIdDesc* env_func_sig_id(const CraneliftModuleEnvironment* env,
454 size_t funcIndex) {
455 return env->env->funcs[funcIndex].typeId;
456 }
env_func_sig_index(const CraneliftModuleEnvironment * env,size_t funcIndex)457 size_t env_func_sig_index(const CraneliftModuleEnvironment* env,
458 size_t funcIndex) {
459 return env->env->funcs[funcIndex].typeIndex;
460 }
env_is_func_valid_for_ref(const CraneliftModuleEnvironment * env,uint32_t index)461 bool env_is_func_valid_for_ref(const CraneliftModuleEnvironment* env,
462 uint32_t index) {
463 return env->env->funcs[index].canRefFunc();
464 }
465
env_func_import_tls_offset(const CraneliftModuleEnvironment * env,size_t funcIndex)466 size_t env_func_import_tls_offset(const CraneliftModuleEnvironment* env,
467 size_t funcIndex) {
468 return globalToTlsOffset(env->env->funcImportGlobalDataOffsets[funcIndex]);
469 }
470
env_func_is_import(const CraneliftModuleEnvironment * env,size_t funcIndex)471 bool env_func_is_import(const CraneliftModuleEnvironment* env,
472 size_t funcIndex) {
473 return env->env->funcIsImport(funcIndex);
474 }
475
env_signature(const CraneliftModuleEnvironment * env,size_t funcTypeIndex)476 const FuncType* env_signature(const CraneliftModuleEnvironment* env,
477 size_t funcTypeIndex) {
478 return &env->env->types[funcTypeIndex].funcType();
479 }
480
env_signature_id(const CraneliftModuleEnvironment * env,size_t funcTypeIndex)481 const TypeIdDesc* env_signature_id(const CraneliftModuleEnvironment* env,
482 size_t funcTypeIndex) {
483 return &env->env->typeIds[funcTypeIndex];
484 }
485
env_num_tables(const CraneliftModuleEnvironment * env)486 size_t env_num_tables(const CraneliftModuleEnvironment* env) {
487 return env->env->tables.length();
488 }
env_table(const CraneliftModuleEnvironment * env,size_t tableIndex)489 const TableDesc* env_table(const CraneliftModuleEnvironment* env,
490 size_t tableIndex) {
491 return &env->env->tables[tableIndex];
492 }
493
env_num_globals(const CraneliftModuleEnvironment * env)494 size_t env_num_globals(const CraneliftModuleEnvironment* env) {
495 return env->env->globals.length();
496 }
env_global(const CraneliftModuleEnvironment * env,size_t globalIndex)497 const GlobalDesc* env_global(const CraneliftModuleEnvironment* env,
498 size_t globalIndex) {
499 return &env->env->globals[globalIndex];
500 }
501
CraneliftCompileFunctions(const ModuleEnvironment & moduleEnv,const CompilerEnvironment & compilerEnv,LifoAlloc & lifo,const FuncCompileInputVector & inputs,CompiledCode * code,UniqueChars * error)502 bool wasm::CraneliftCompileFunctions(const ModuleEnvironment& moduleEnv,
503 const CompilerEnvironment& compilerEnv,
504 LifoAlloc& lifo,
505 const FuncCompileInputVector& inputs,
506 CompiledCode* code, UniqueChars* error) {
507 MOZ_RELEASE_ASSERT(CraneliftPlatformSupport());
508
509 MOZ_ASSERT(compilerEnv.tier() == Tier::Optimized);
510 MOZ_ASSERT(compilerEnv.debug() == DebugEnabled::False);
511 MOZ_ASSERT(compilerEnv.optimizedBackend() == OptimizedBackend::Cranelift);
512 MOZ_ASSERT(!moduleEnv.isAsmJS());
513
514 TempAllocator alloc(&lifo);
515 JitContext jitContext(&alloc);
516 WasmMacroAssembler masm(alloc, moduleEnv);
517 MOZ_ASSERT(IsCompilingWasm());
518
519 // Swap in already-allocated empty vectors to avoid malloc/free.
520 MOZ_ASSERT(code->empty());
521
522 CraneliftReusableData reusableContext;
523 if (!code->swapCranelift(masm, reusableContext)) {
524 return false;
525 }
526
527 if (!reusableContext) {
528 auto context = MakeUnique<CraneliftContext>(moduleEnv);
529 if (!context || !context->init()) {
530 return false;
531 }
532 reusableContext.reset((void**)context.release());
533 }
534
535 CraneliftContext* compiler = (CraneliftContext*)reusableContext.get();
536
537 // Disable instruction spew if we're going to disassemble after code
538 // generation, or the output will be a mess.
539
540 bool jitSpew = JitSpewEnabled(js::jit::JitSpew_Codegen);
541 if (jitSpew) {
542 DisableChannel(js::jit::JitSpew_Codegen);
543 }
544 auto reenableSpew = mozilla::MakeScopeExit([&] {
545 if (jitSpew) {
546 EnableChannel(js::jit::JitSpew_Codegen);
547 }
548 });
549
550 for (const FuncCompileInput& func : inputs) {
551 Decoder d(func.begin, func.end, func.lineOrBytecode, error);
552
553 size_t funcBytecodeSize = func.end - func.begin;
554
555 size_t previousStackmapCount = code->stackMaps.length();
556
557 CraneliftFuncCompileInput clifInput(func);
558 clifInput.stackmaps = (BD_Stackmaps*)&code->stackMaps;
559
560 CraneliftCompiledFunc clifFunc;
561
562 char* clifError = nullptr;
563 if (!cranelift_compile_function(*compiler, &clifInput, &clifFunc,
564 &clifError)) {
565 *error = JS_smprintf("%s", clifError);
566 cranelift_compiler_free_error(clifError);
567 return false;
568 }
569
570 uint32_t lineOrBytecode = func.lineOrBytecode;
571 const FuncType& funcType = *moduleEnv.funcs[clifInput.index].type;
572 const TypeIdDesc& funcTypeId = *moduleEnv.funcs[clifInput.index].typeId;
573
574 FuncOffsets offsets;
575 if (!GenerateCraneliftCode(
576 masm, clifFunc, funcType, funcTypeId, lineOrBytecode,
577 funcBytecodeSize, &code->stackMaps, previousStackmapCount,
578 code->stackMaps.length() - previousStackmapCount, &offsets)) {
579 return false;
580 }
581
582 if (!code->codeRanges.emplaceBack(func.index, lineOrBytecode, offsets)) {
583 return false;
584 }
585 }
586
587 masm.finish();
588 if (masm.oom()) {
589 return false;
590 }
591
592 if (jitSpew) {
593 // The disassembler uses the jitspew for output, so re-enable now.
594 EnableChannel(js::jit::JitSpew_Codegen);
595
596 uint32_t totalCodeSize = masm.currentOffset();
597 uint8_t* codeBuf = (uint8_t*)js_malloc(totalCodeSize);
598 if (codeBuf) {
599 masm.executableCopy(codeBuf);
600
601 const CodeRangeVector& codeRanges = code->codeRanges;
602 MOZ_ASSERT(codeRanges.length() >= inputs.length());
603
604 // Within the current batch, functions' code ranges have been added in
605 // the same order as the inputs.
606 size_t firstCodeRangeIndex = codeRanges.length() - inputs.length();
607
608 for (size_t i = 0; i < inputs.length(); i++) {
609 int funcIndex = inputs[i].index;
610
611 JitSpew(JitSpew_Codegen, "# ========================================");
612 JitSpew(JitSpew_Codegen, "# Start of wasm cranelift code for index %d",
613 funcIndex);
614
615 size_t codeRangeIndex = firstCodeRangeIndex + i;
616 uint32_t codeStart = codeRanges[codeRangeIndex].begin();
617 uint32_t codeEnd = codeRanges[codeRangeIndex].end();
618
619 jit::Disassemble(
620 codeBuf + codeStart, codeEnd - codeStart,
621 [](const char* text) { JitSpew(JitSpew_Codegen, "%s", text); });
622
623 JitSpew(JitSpew_Codegen, "# End of wasm cranelift code for index %d",
624 funcIndex);
625 }
626 js_free(codeBuf);
627 }
628 }
629
630 return code->swapCranelift(masm, reusableContext);
631 }
632
CraneliftFreeReusableData(void * ptr)633 void wasm::CraneliftFreeReusableData(void* ptr) {
634 CraneliftContext* compiler = (CraneliftContext*)ptr;
635 if (compiler) {
636 js_delete(compiler);
637 }
638 }
639
640 ////////////////////////////////////////////////////////////////////////////////
641 //
642 // Callbacks from Rust to C++.
643
644 // Offsets assumed by the `make_heap()` function.
645 static_assert(offsetof(wasm::TlsData, memoryBase) == 0, "memory base moved");
646
647 // The translate_call() function in wasm2clif.rs depends on these offsets.
648 static_assert(offsetof(wasm::FuncImportTls, code) == 0,
649 "Import code field moved");
650 static_assert(offsetof(wasm::FuncImportTls, tls) == sizeof(void*),
651 "Import tls moved");
652
653 // Global
654
global_isConstant(const GlobalDesc * global)655 bool global_isConstant(const GlobalDesc* global) {
656 return global->isConstant();
657 }
658
global_isMutable(const GlobalDesc * global)659 bool global_isMutable(const GlobalDesc* global) { return global->isMutable(); }
660
global_isIndirect(const GlobalDesc * global)661 bool global_isIndirect(const GlobalDesc* global) {
662 return global->isIndirect();
663 }
664
global_constantValue(const GlobalDesc * global)665 BD_ConstantValue global_constantValue(const GlobalDesc* global) {
666 Val value(global->constantValue());
667 BD_ConstantValue v;
668 v.t = TypeCode(value.type().kind());
669 switch (v.t) {
670 case TypeCode::I32:
671 v.u.i32 = value.i32();
672 break;
673 case TypeCode::I64:
674 v.u.i64 = value.i64();
675 break;
676 case TypeCode::F32:
677 v.u.f32 = value.f32();
678 break;
679 case TypeCode::F64:
680 v.u.f64 = value.f64();
681 break;
682 case TypeCode::V128:
683 memcpy(&v.u.v128, &value.v128(), sizeof(v.u.v128));
684 break;
685 case AbstractReferenceTypeCode:
686 v.u.r = value.ref().forCompiledCode();
687 break;
688 default:
689 MOZ_CRASH("Bad type");
690 }
691 return v;
692 }
693
global_type(const GlobalDesc * global)694 TypeCode global_type(const GlobalDesc* global) {
695 return global->type().packed().typeCode();
696 }
697
global_tlsOffset(const GlobalDesc * global)698 size_t global_tlsOffset(const GlobalDesc* global) {
699 return globalToTlsOffset(global->offset());
700 }
701
702 // TableDesc
703
table_tlsOffset(const TableDesc * table)704 size_t table_tlsOffset(const TableDesc* table) {
705 return globalToTlsOffset(table->globalDataOffset);
706 }
707
table_initialLimit(const TableDesc * table)708 uint32_t table_initialLimit(const TableDesc* table) {
709 return table->initialLength;
710 }
table_maximumLimit(const TableDesc * table)711 uint32_t table_maximumLimit(const TableDesc* table) {
712 return table->maximumLength.valueOr(UINT32_MAX);
713 }
table_elementTypeCode(const TableDesc * table)714 TypeCode table_elementTypeCode(const TableDesc* table) {
715 return table->elemType.packed().typeCode();
716 }
717
718 // Sig
719
funcType_numArgs(const FuncType * funcType)720 size_t funcType_numArgs(const FuncType* funcType) {
721 return funcType->args().length();
722 }
723
funcType_args(const FuncType * funcType)724 const BD_ValType* funcType_args(const FuncType* funcType) {
725 static_assert(sizeof(BD_ValType) == sizeof(ValType), "update BD_ValType");
726 return (const BD_ValType*)funcType->args().begin();
727 }
728
funcType_numResults(const FuncType * funcType)729 size_t funcType_numResults(const FuncType* funcType) {
730 return funcType->results().length();
731 }
732
funcType_results(const FuncType * funcType)733 const BD_ValType* funcType_results(const FuncType* funcType) {
734 static_assert(sizeof(BD_ValType) == sizeof(ValType), "update BD_ValType");
735 return (const BD_ValType*)funcType->results().begin();
736 }
737
funcType_idKind(const TypeIdDesc * funcTypeId)738 TypeIdDescKind funcType_idKind(const TypeIdDesc* funcTypeId) {
739 return funcTypeId->kind();
740 }
741
funcType_idImmediate(const TypeIdDesc * funcTypeId)742 size_t funcType_idImmediate(const TypeIdDesc* funcTypeId) {
743 return funcTypeId->immediate();
744 }
745
funcType_idTlsOffset(const TypeIdDesc * funcTypeId)746 size_t funcType_idTlsOffset(const TypeIdDesc* funcTypeId) {
747 return globalToTlsOffset(funcTypeId->globalDataOffset());
748 }
749
stackmaps_add(BD_Stackmaps * sink,const uint32_t * bitMap,size_t mappedWords,size_t argsSize,size_t codeOffset)750 void stackmaps_add(BD_Stackmaps* sink, const uint32_t* bitMap,
751 size_t mappedWords, size_t argsSize, size_t codeOffset) {
752 const uint32_t BitElemSize = sizeof(uint32_t) * 8;
753
754 StackMaps* maps = (StackMaps*)sink;
755 StackMap* map = StackMap::create(mappedWords);
756 MOZ_ALWAYS_TRUE(map);
757
758 // Copy the cranelift stackmap into our spidermonkey one
759 // TODO: Take ownership of the cranelift stackmap and avoid a copy
760 for (uint32_t i = 0; i < mappedWords; i++) {
761 uint32_t bit = (bitMap[i / BitElemSize] >> (i % BitElemSize)) & 0x1;
762 if (bit) {
763 map->setBit(i);
764 }
765 }
766
767 map->setFrameOffsetFromTop((argsSize + sizeof(wasm::Frame)) /
768 sizeof(uintptr_t));
769 MOZ_ALWAYS_TRUE(maps->add((uint8_t*)codeOffset, map));
770 }
771