1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 *
4 * Copyright 2015 Mozilla Foundation
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 #include "wasm/WasmStubs.h"
20
21 #include "mozilla/ArrayUtils.h"
22 #include "mozilla/EnumeratedRange.h"
23
24 #include "wasm/WasmCode.h"
25 #include "wasm/WasmGenerator.h"
26 #include "wasm/WasmInstance.h"
27
28 #include "jit/MacroAssembler-inl.h"
29
30 using namespace js;
31 using namespace js::jit;
32 using namespace js::wasm;
33
34 using mozilla::ArrayLength;
35 using mozilla::MakeEnumeratedRange;
36
37 typedef Vector<jit::MIRType, 8, SystemAllocPolicy> MIRTypeVector;
38 typedef jit::ABIArgIter<MIRTypeVector> ABIArgMIRTypeIter;
39 typedef jit::ABIArgIter<ValTypeVector> ABIArgValTypeIter;
40
FinishOffsets(MacroAssembler & masm,Offsets * offsets)41 static bool FinishOffsets(MacroAssembler& masm, Offsets* offsets) {
42 // On old ARM hardware, constant pools could be inserted and they need to
43 // be flushed before considering the size of the masm.
44 masm.flushBuffer();
45 offsets->end = masm.size();
46 return !masm.oom();
47 }
48
AssertStackAlignment(MacroAssembler & masm,uint32_t alignment,uint32_t addBeforeAssert=0)49 static void AssertStackAlignment(MacroAssembler& masm, uint32_t alignment,
50 uint32_t addBeforeAssert = 0) {
51 MOZ_ASSERT(
52 (sizeof(Frame) + masm.framePushed() + addBeforeAssert) % alignment == 0);
53 masm.assertStackAlignment(alignment, addBeforeAssert);
54 }
55
StackDecrementForCall(MacroAssembler & masm,uint32_t alignment,unsigned bytesToPush)56 static unsigned StackDecrementForCall(MacroAssembler& masm, uint32_t alignment,
57 unsigned bytesToPush) {
58 return StackDecrementForCall(alignment, sizeof(Frame) + masm.framePushed(),
59 bytesToPush);
60 }
61
62 template <class VectorT>
StackArgBytes(const VectorT & args)63 static unsigned StackArgBytes(const VectorT& args) {
64 ABIArgIter<VectorT> iter(args);
65 while (!iter.done()) iter++;
66 return iter.stackBytesConsumedSoFar();
67 }
68
69 template <class VectorT>
StackDecrementForCall(MacroAssembler & masm,uint32_t alignment,const VectorT & args,unsigned extraBytes=0)70 static unsigned StackDecrementForCall(MacroAssembler& masm, uint32_t alignment,
71 const VectorT& args,
72 unsigned extraBytes = 0) {
73 return StackDecrementForCall(masm, alignment,
74 StackArgBytes(args) + extraBytes);
75 }
76
SetupABIArguments(MacroAssembler & masm,const FuncExport & fe,Register argv,Register scratch)77 static void SetupABIArguments(MacroAssembler& masm, const FuncExport& fe,
78 Register argv, Register scratch) {
79 // Copy parameters out of argv and into the registers/stack-slots specified by
80 // the system ABI.
81 for (ABIArgValTypeIter iter(fe.sig().args()); !iter.done(); iter++) {
82 unsigned argOffset = iter.index() * sizeof(ExportArg);
83 Address src(argv, argOffset);
84 MIRType type = iter.mirType();
85 switch (iter->kind()) {
86 case ABIArg::GPR:
87 if (type == MIRType::Int32)
88 masm.load32(src, iter->gpr());
89 else if (type == MIRType::Int64)
90 masm.load64(src, iter->gpr64());
91 break;
92 #ifdef JS_CODEGEN_REGISTER_PAIR
93 case ABIArg::GPR_PAIR:
94 if (type == MIRType::Int64)
95 masm.load64(src, iter->gpr64());
96 else
97 MOZ_CRASH("wasm uses hardfp for function calls.");
98 break;
99 #endif
100 case ABIArg::FPU: {
101 static_assert(sizeof(ExportArg) >= jit::Simd128DataSize,
102 "ExportArg must be big enough to store SIMD values");
103 switch (type) {
104 case MIRType::Int8x16:
105 case MIRType::Int16x8:
106 case MIRType::Int32x4:
107 case MIRType::Bool8x16:
108 case MIRType::Bool16x8:
109 case MIRType::Bool32x4:
110 masm.loadUnalignedSimd128Int(src, iter->fpu());
111 break;
112 case MIRType::Float32x4:
113 masm.loadUnalignedSimd128Float(src, iter->fpu());
114 break;
115 case MIRType::Double:
116 masm.loadDouble(src, iter->fpu());
117 break;
118 case MIRType::Float32:
119 masm.loadFloat32(src, iter->fpu());
120 break;
121 default:
122 MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected FPU type");
123 break;
124 }
125 break;
126 }
127 case ABIArg::Stack:
128 switch (type) {
129 case MIRType::Int32:
130 masm.load32(src, scratch);
131 masm.storePtr(scratch, Address(masm.getStackPointer(),
132 iter->offsetFromArgBase()));
133 break;
134 case MIRType::Int64: {
135 RegisterOrSP sp = masm.getStackPointer();
136 #if JS_BITS_PER_WORD == 32
137 masm.load32(LowWord(src), scratch);
138 masm.store32(scratch,
139 LowWord(Address(sp, iter->offsetFromArgBase())));
140 masm.load32(HighWord(src), scratch);
141 masm.store32(scratch,
142 HighWord(Address(sp, iter->offsetFromArgBase())));
143 #else
144 Register64 scratch64(scratch);
145 masm.load64(src, scratch64);
146 masm.store64(scratch64, Address(sp, iter->offsetFromArgBase()));
147 #endif
148 break;
149 }
150 case MIRType::Double:
151 masm.loadDouble(src, ScratchDoubleReg);
152 masm.storeDouble(
153 ScratchDoubleReg,
154 Address(masm.getStackPointer(), iter->offsetFromArgBase()));
155 break;
156 case MIRType::Float32:
157 masm.loadFloat32(src, ScratchFloat32Reg);
158 masm.storeFloat32(
159 ScratchFloat32Reg,
160 Address(masm.getStackPointer(), iter->offsetFromArgBase()));
161 break;
162 case MIRType::Int8x16:
163 case MIRType::Int16x8:
164 case MIRType::Int32x4:
165 case MIRType::Bool8x16:
166 case MIRType::Bool16x8:
167 case MIRType::Bool32x4:
168 masm.loadUnalignedSimd128Int(src, ScratchSimd128Reg);
169 masm.storeAlignedSimd128Int(
170 ScratchSimd128Reg,
171 Address(masm.getStackPointer(), iter->offsetFromArgBase()));
172 break;
173 case MIRType::Float32x4:
174 masm.loadUnalignedSimd128Float(src, ScratchSimd128Reg);
175 masm.storeAlignedSimd128Float(
176 ScratchSimd128Reg,
177 Address(masm.getStackPointer(), iter->offsetFromArgBase()));
178 break;
179 default:
180 MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE(
181 "unexpected stack arg type");
182 }
183 break;
184 case ABIArg::Uninitialized:
185 MOZ_CRASH("Uninitialized ABIArg kind");
186 }
187 }
188 }
189
StoreABIReturn(MacroAssembler & masm,const FuncExport & fe,Register argv)190 static void StoreABIReturn(MacroAssembler& masm, const FuncExport& fe,
191 Register argv) {
192 // Store the return value in argv[0].
193 switch (fe.sig().ret()) {
194 case ExprType::Void:
195 break;
196 case ExprType::I32:
197 masm.store32(ReturnReg, Address(argv, 0));
198 break;
199 case ExprType::I64:
200 masm.store64(ReturnReg64, Address(argv, 0));
201 break;
202 case ExprType::F32:
203 masm.canonicalizeFloat(ReturnFloat32Reg);
204 masm.storeFloat32(ReturnFloat32Reg, Address(argv, 0));
205 break;
206 case ExprType::F64:
207 masm.canonicalizeDouble(ReturnDoubleReg);
208 masm.storeDouble(ReturnDoubleReg, Address(argv, 0));
209 break;
210 case ExprType::I8x16:
211 case ExprType::I16x8:
212 case ExprType::I32x4:
213 case ExprType::B8x16:
214 case ExprType::B16x8:
215 case ExprType::B32x4:
216 // We don't have control on argv alignment, do an unaligned access.
217 masm.storeUnalignedSimd128Int(ReturnSimd128Reg, Address(argv, 0));
218 break;
219 case ExprType::F32x4:
220 // We don't have control on argv alignment, do an unaligned access.
221 masm.storeUnalignedSimd128Float(ReturnSimd128Reg, Address(argv, 0));
222 break;
223 case ExprType::Limit:
224 MOZ_CRASH("Limit");
225 }
226 }
227
228 #if defined(JS_CODEGEN_ARM)
229 // The ARM system ABI also includes d15 & s31 in the non volatile float
230 // registers. Also exclude lr (a.k.a. r14) as we preserve it manually)
231 static const LiveRegisterSet NonVolatileRegs =
232 LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask &
233 ~(uint32_t(1) << Registers::lr)),
234 FloatRegisterSet(FloatRegisters::NonVolatileMask |
235 (1ULL << FloatRegisters::d15) |
236 (1ULL << FloatRegisters::s31)));
237 #else
238 static const LiveRegisterSet NonVolatileRegs =
239 LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask),
240 FloatRegisterSet(FloatRegisters::NonVolatileMask));
241 #endif
242
243 #if defined(JS_CODEGEN_NONE)
244 static const unsigned NonVolatileRegsPushSize = 0;
245 #else
246 static const unsigned NonVolatileRegsPushSize =
247 NonVolatileRegs.gprs().size() * sizeof(intptr_t) +
248 NonVolatileRegs.fpus().getPushSizeInBytes();
249 #endif
250 static const unsigned FramePushedBeforeAlign =
251 NonVolatileRegsPushSize + sizeof(void*);
252
CallFuncExport(MacroAssembler & masm,const FuncExport & fe,const Maybe<ImmPtr> & funcPtr)253 static void CallFuncExport(MacroAssembler& masm, const FuncExport& fe,
254 const Maybe<ImmPtr>& funcPtr) {
255 MOZ_ASSERT(fe.hasEagerStubs() == !funcPtr);
256 if (funcPtr)
257 masm.call(*funcPtr);
258 else
259 masm.call(CallSiteDesc(CallSiteDesc::Func), fe.funcIndex());
260 }
261
262 // Generate a stub that enters wasm from a C++ caller via the native ABI. The
263 // signature of the entry point is Module::ExportFuncPtr. The exported wasm
264 // function has an ABI derived from its specific signature, so this function
265 // must map from the ABI of ExportFuncPtr to the export's signature's ABI.
GenerateInterpEntry(MacroAssembler & masm,const FuncExport & fe,const Maybe<ImmPtr> & funcPtr,Offsets * offsets)266 static bool GenerateInterpEntry(MacroAssembler& masm, const FuncExport& fe,
267 const Maybe<ImmPtr>& funcPtr,
268 Offsets* offsets) {
269 masm.haltingAlign(CodeAlignment);
270
271 offsets->begin = masm.currentOffset();
272
273 // Save the return address if it wasn't already saved by the call insn.
274 #ifdef JS_USE_LINK_REGISTER
275 masm.pushReturnAddress();
276 #endif
277
278 // Save all caller non-volatile registers before we clobber them here and in
279 // the wasm callee (which does not preserve non-volatile registers).
280 masm.setFramePushed(0);
281 masm.PushRegsInMask(NonVolatileRegs);
282 MOZ_ASSERT(masm.framePushed() == NonVolatileRegsPushSize);
283
284 // Put the 'argv' argument into a non-argument/return/TLS register so that
285 // we can use 'argv' while we fill in the arguments for the wasm callee.
286 // Use a second non-argument/return register as temporary scratch.
287 Register argv = ABINonArgReturnReg0;
288 Register scratch = ABINonArgReturnReg1;
289
290 // Read the arguments of wasm::ExportFuncPtr according to the native ABI.
291 // The entry stub's frame is 1 word.
292 const unsigned argBase = sizeof(void*) + masm.framePushed();
293 ABIArgGenerator abi;
294 ABIArg arg;
295
296 // arg 1: ExportArg*
297 arg = abi.next(MIRType::Pointer);
298 if (arg.kind() == ABIArg::GPR)
299 masm.movePtr(arg.gpr(), argv);
300 else
301 masm.loadPtr(
302 Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()),
303 argv);
304
305 // Arg 2: TlsData*
306 arg = abi.next(MIRType::Pointer);
307 if (arg.kind() == ABIArg::GPR)
308 masm.movePtr(arg.gpr(), WasmTlsReg);
309 else
310 masm.loadPtr(
311 Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()),
312 WasmTlsReg);
313
314 // Save 'argv' on the stack so that we can recover it after the call.
315 masm.Push(argv);
316
317 // Since we're about to dynamically align the stack, reset the frame depth
318 // so we can still assert static stack depth balancing.
319 MOZ_ASSERT(masm.framePushed() == FramePushedBeforeAlign);
320 masm.setFramePushed(0);
321
322 // Dynamically align the stack since ABIStackAlignment is not necessarily
323 // WasmStackAlignment. Preserve SP so it can be restored after the call.
324 masm.moveStackPtrTo(scratch);
325 masm.andToStackPtr(Imm32(~(WasmStackAlignment - 1)));
326 masm.Push(scratch);
327
328 // Reserve stack space for the call.
329 unsigned argDecrement = StackDecrementForCall(
330 WasmStackAlignment, masm.framePushed(), StackArgBytes(fe.sig().args()));
331 masm.reserveStack(argDecrement);
332
333 // Copy parameters out of argv and into the wasm ABI registers/stack-slots.
334 SetupABIArguments(masm, fe, argv, scratch);
335
336 // Setup wasm register state. The nullness of the frame pointer is used to
337 // determine whether the call ended in success or failure.
338 masm.movePtr(ImmWord(0), FramePointer);
339 masm.loadWasmPinnedRegsFromTls();
340
341 // Call into the real function. Note that, due to the throw stub, fp, tls
342 // and pinned registers may be clobbered.
343 masm.assertStackAlignment(WasmStackAlignment);
344 CallFuncExport(masm, fe, funcPtr);
345 masm.assertStackAlignment(WasmStackAlignment);
346
347 // Pop the arguments pushed after the dynamic alignment.
348 masm.freeStack(argDecrement);
349
350 // Pop the stack pointer to its value right before dynamic alignment.
351 masm.PopStackPtr();
352 MOZ_ASSERT(masm.framePushed() == 0);
353 masm.setFramePushed(FramePushedBeforeAlign);
354
355 // Recover the 'argv' pointer which was saved before aligning the stack.
356 masm.Pop(argv);
357
358 // Store the return value in argv[0].
359 StoreABIReturn(masm, fe, argv);
360
361 // After the ReturnReg is stored into argv[0] but before fp is clobbered by
362 // the PopRegsInMask(NonVolatileRegs) below, set the return value based on
363 // whether fp is null (which is the case for successful returns) or the
364 // FailFP magic value (set by the throw stub);
365 Label success, join;
366 masm.branchTestPtr(Assembler::Zero, FramePointer, FramePointer, &success);
367 #ifdef DEBUG
368 Label ok;
369 masm.branchPtr(Assembler::Equal, FramePointer, Imm32(FailFP), &ok);
370 masm.breakpoint();
371 masm.bind(&ok);
372 #endif
373 masm.move32(Imm32(false), ReturnReg);
374 masm.jump(&join);
375 masm.bind(&success);
376 masm.move32(Imm32(true), ReturnReg);
377 masm.bind(&join);
378
379 // Restore clobbered non-volatile registers of the caller.
380 masm.PopRegsInMask(NonVolatileRegs);
381 MOZ_ASSERT(masm.framePushed() == 0);
382
383 masm.ret();
384
385 return FinishOffsets(masm, offsets);
386 }
387
388 #ifdef JS_PUNBOX64
389 static const ValueOperand ScratchValIonEntry = ValueOperand(ABINonArgReg0);
390 #else
391 static const ValueOperand ScratchValIonEntry =
392 ValueOperand(ABINonArgReg0, ABINonArgReg1);
393 #endif
394 static const Register ScratchIonEntry = ABINonArgReg2;
395
CallSymbolicAddress(MacroAssembler & masm,bool isAbsolute,SymbolicAddress sym)396 static void CallSymbolicAddress(MacroAssembler& masm, bool isAbsolute,
397 SymbolicAddress sym) {
398 if (isAbsolute)
399 masm.call(ImmPtr(SymbolicAddressTarget(sym), ImmPtr::NoCheckToken()));
400 else
401 masm.call(sym);
402 }
403
404 // Load instance's TLS from the callee.
GenerateJitEntryLoadTls(MacroAssembler & masm,unsigned frameSize)405 static void GenerateJitEntryLoadTls(MacroAssembler& masm, unsigned frameSize) {
406 // ScratchIonEntry := callee => JSFunction*
407 unsigned offset = frameSize + JitFrameLayout::offsetOfCalleeToken();
408 masm.loadFunctionFromCalleeToken(Address(masm.getStackPointer(), offset),
409 ScratchIonEntry);
410
411 // ScratchValIonEntry := callee->getExtendedSlot(WASM_TLSDATA_SLOT)
412 // => Private(TlsData*)
413 offset = FunctionExtended::offsetOfExtendedSlot(
414 FunctionExtended::WASM_TLSDATA_SLOT);
415 masm.loadValue(Address(ScratchIonEntry, offset), ScratchValIonEntry);
416
417 // ScratchIonEntry := ScratchIonEntry->toPrivate() => TlsData*
418 masm.unboxPrivate(ScratchValIonEntry, WasmTlsReg);
419 // \o/
420 }
421
422 // Creates a JS fake exit frame for wasm, so the frame iterators just use
423 // JSJit frame iteration.
GenerateJitEntryThrow(MacroAssembler & masm,unsigned frameSize)424 static void GenerateJitEntryThrow(MacroAssembler& masm, unsigned frameSize) {
425 MOZ_ASSERT(masm.framePushed() == frameSize);
426
427 GenerateJitEntryLoadTls(masm, frameSize);
428
429 masm.freeStack(frameSize);
430
431 masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, cx)), ScratchIonEntry);
432 masm.enterFakeExitFrame(ScratchIonEntry, ScratchIonEntry,
433 ExitFrameType::WasmJitEntry);
434
435 masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, instance)),
436 ScratchIonEntry);
437 masm.loadPtr(
438 Address(ScratchIonEntry, Instance::offsetOfJSJitExceptionHandler()),
439 ScratchIonEntry);
440 masm.jump(ScratchIonEntry);
441 }
442
443 // Generate a stub that enters wasm from a jit code caller via the jit ABI.
GenerateJitEntry(MacroAssembler & masm,size_t funcExportIndex,const FuncExport & fe,const Maybe<ImmPtr> & funcPtr,Offsets * offsets)444 static bool GenerateJitEntry(MacroAssembler& masm, size_t funcExportIndex,
445 const FuncExport& fe, const Maybe<ImmPtr>& funcPtr,
446 Offsets* offsets) {
447 RegisterOrSP sp = masm.getStackPointer();
448
449 GenerateJitEntryPrologue(masm, offsets);
450
451 // The jit caller has set up the following stack layout (sp grows to the
452 // left):
453 // <-- retAddr | descriptor | callee | argc | this | arg1..N
454
455 unsigned normalBytesNeeded = StackArgBytes(fe.sig().args());
456
457 MIRTypeVector coerceArgTypes;
458 MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Int32));
459 MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
460 MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
461 unsigned oolBytesNeeded = StackArgBytes(coerceArgTypes);
462
463 unsigned bytesNeeded = Max(normalBytesNeeded, oolBytesNeeded);
464
465 // Note the jit caller ensures the stack is aligned *after* the call
466 // instruction.
467 unsigned frameSize =
468 StackDecrementForCall(WasmStackAlignment, 0, bytesNeeded);
469
470 // Reserve stack space for wasm ABI arguments, set up like this:
471 // <-- ABI args | padding
472 masm.reserveStack(frameSize);
473
474 GenerateJitEntryLoadTls(masm, frameSize);
475
476 if (fe.sig().hasI64ArgOrRet()) {
477 CallSymbolicAddress(masm, !fe.hasEagerStubs(),
478 SymbolicAddress::ReportInt64JSCall);
479 GenerateJitEntryThrow(masm, frameSize);
480 return FinishOffsets(masm, offsets);
481 }
482
483 FloatRegister scratchF = ABINonArgDoubleReg;
484 Register scratchG = ScratchIonEntry;
485 ValueOperand scratchV = ScratchValIonEntry;
486
487 // We do two loops:
488 // - one loop up-front will make sure that all the Value tags fit the
489 // expected signature argument types. If at least one inline conversion
490 // fails, we just jump to the OOL path which will call into C++. Inline
491 // conversions are ordered in the way we expect them to happen the most.
492 // - the second loop will unbox the arguments into the right registers.
493 Label oolCall;
494 for (size_t i = 0; i < fe.sig().args().length(); i++) {
495 unsigned jitArgOffset = frameSize + JitFrameLayout::offsetOfActualArg(i);
496 Address jitArgAddr(sp, jitArgOffset);
497 masm.loadValue(jitArgAddr, scratchV);
498
499 Label next;
500 switch (fe.sig().args()[i]) {
501 case ValType::I32: {
502 ScratchTagScope tag(masm, scratchV);
503 masm.splitTagForTest(scratchV, tag);
504
505 // For int32 inputs, just skip.
506 masm.branchTestInt32(Assembler::Equal, tag, &next);
507
508 // For double inputs, unbox, truncate and store back.
509 Label storeBack, notDouble;
510 masm.branchTestDouble(Assembler::NotEqual, tag, ¬Double);
511 {
512 ScratchTagScopeRelease _(&tag);
513 masm.unboxDouble(scratchV, scratchF);
514 masm.branchTruncateDoubleMaybeModUint32(scratchF, scratchG, &oolCall);
515 masm.jump(&storeBack);
516 }
517 masm.bind(¬Double);
518
519 // For null or undefined, store 0.
520 Label nullOrUndefined, notNullOrUndefined;
521 masm.branchTestUndefined(Assembler::Equal, tag, &nullOrUndefined);
522 masm.branchTestNull(Assembler::NotEqual, tag, ¬NullOrUndefined);
523 masm.bind(&nullOrUndefined);
524 {
525 ScratchTagScopeRelease _(&tag);
526 masm.storeValue(Int32Value(0), jitArgAddr);
527 }
528 masm.jump(&next);
529 masm.bind(¬NullOrUndefined);
530
531 // For booleans, store the number value back. Other types (symbol,
532 // object, strings) go to the C++ call.
533 masm.branchTestBoolean(Assembler::NotEqual, tag, &oolCall);
534 masm.unboxBoolean(scratchV, scratchG);
535 // fallthrough:
536
537 masm.bind(&storeBack);
538 {
539 ScratchTagScopeRelease _(&tag);
540 masm.storeValue(JSVAL_TYPE_INT32, scratchG, jitArgAddr);
541 }
542 break;
543 }
544 case ValType::F32:
545 case ValType::F64: {
546 // Note we can reuse the same code for f32/f64 here, since for the
547 // case of f32, the conversion of f64 to f32 will happen in the
548 // second loop.
549 ScratchTagScope tag(masm, scratchV);
550 masm.splitTagForTest(scratchV, tag);
551
552 // For double inputs, just skip.
553 masm.branchTestDouble(Assembler::Equal, tag, &next);
554
555 // For int32 inputs, convert and rebox.
556 Label storeBack, notInt32;
557 {
558 ScratchTagScopeRelease _(&tag);
559 masm.branchTestInt32(Assembler::NotEqual, scratchV, ¬Int32);
560 masm.int32ValueToDouble(scratchV, scratchF);
561 masm.jump(&storeBack);
562 }
563 masm.bind(¬Int32);
564
565 // For undefined (missing argument), store NaN.
566 Label notUndefined;
567 masm.branchTestUndefined(Assembler::NotEqual, tag, ¬Undefined);
568 {
569 ScratchTagScopeRelease _(&tag);
570 masm.storeValue(DoubleValue(JS::GenericNaN()), jitArgAddr);
571 masm.jump(&next);
572 }
573 masm.bind(¬Undefined);
574
575 // +null is 0.
576 Label notNull;
577 masm.branchTestNull(Assembler::NotEqual, tag, ¬Null);
578 {
579 ScratchTagScopeRelease _(&tag);
580 masm.storeValue(DoubleValue(0.), jitArgAddr);
581 }
582 masm.jump(&next);
583 masm.bind(¬Null);
584
585 // For booleans, store the number value back. Other types (symbol,
586 // object, strings) go to the C++ call.
587 masm.branchTestBoolean(Assembler::NotEqual, tag, &oolCall);
588 masm.boolValueToDouble(scratchV, scratchF);
589 // fallthrough:
590
591 masm.bind(&storeBack);
592 masm.boxDouble(scratchF, jitArgAddr);
593 break;
594 }
595 default: {
596 MOZ_CRASH("unexpected argument type when calling from the jit");
597 }
598 }
599 masm.nopAlign(CodeAlignment);
600 masm.bind(&next);
601 }
602
603 Label rejoinBeforeCall;
604 masm.bind(&rejoinBeforeCall);
605
606 // Convert all the expected values to unboxed values on the stack.
607 for (ABIArgValTypeIter iter(fe.sig().args()); !iter.done(); iter++) {
608 unsigned jitArgOffset =
609 frameSize + JitFrameLayout::offsetOfActualArg(iter.index());
610 Address argv(sp, jitArgOffset);
611 bool isStackArg = iter->kind() == ABIArg::Stack;
612 switch (iter.mirType()) {
613 case MIRType::Int32: {
614 Register target = isStackArg ? ScratchIonEntry : iter->gpr();
615 masm.unboxInt32(argv, target);
616 if (isStackArg)
617 masm.storePtr(target, Address(sp, iter->offsetFromArgBase()));
618 break;
619 }
620 case MIRType::Float32: {
621 FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
622 masm.unboxDouble(argv, ABINonArgDoubleReg);
623 masm.convertDoubleToFloat32(ABINonArgDoubleReg, target);
624 if (isStackArg)
625 masm.storeFloat32(target, Address(sp, iter->offsetFromArgBase()));
626 break;
627 }
628 case MIRType::Double: {
629 FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
630 masm.unboxDouble(argv, target);
631 if (isStackArg)
632 masm.storeDouble(target, Address(sp, iter->offsetFromArgBase()));
633 break;
634 }
635 default: { MOZ_CRASH("unexpected input argument when calling from jit"); }
636 }
637 }
638
639 // Setup wasm register state.
640 masm.loadWasmPinnedRegsFromTls();
641
642 // Call into the real function. Note that, due to the throw stub, fp, tls
643 // and pinned registers may be clobbered.
644 masm.assertStackAlignment(WasmStackAlignment);
645 CallFuncExport(masm, fe, funcPtr);
646 masm.assertStackAlignment(WasmStackAlignment);
647
648 // If fp is equal to the FailFP magic value (set by the throw stub), then
649 // report the exception to the JIT caller by jumping into the exception
650 // stub; otherwise the FP value is still set to the parent ion frame value.
651 Label exception;
652 masm.branchPtr(Assembler::Equal, FramePointer, Imm32(FailFP), &exception);
653
654 // Pop arguments.
655 masm.freeStack(frameSize);
656
657 // Store the return value in the JSReturnOperand.
658 switch (fe.sig().ret()) {
659 case ExprType::Void:
660 masm.moveValue(UndefinedValue(), JSReturnOperand);
661 break;
662 case ExprType::I32:
663 masm.boxNonDouble(JSVAL_TYPE_INT32, ReturnReg, JSReturnOperand);
664 break;
665 case ExprType::F32:
666 masm.canonicalizeFloat(ReturnFloat32Reg);
667 masm.convertFloat32ToDouble(ReturnFloat32Reg, ReturnDoubleReg);
668 masm.boxDouble(ReturnDoubleReg, JSReturnOperand, ScratchDoubleReg);
669 break;
670 case ExprType::F64:
671 masm.canonicalizeDouble(ReturnDoubleReg);
672 masm.boxDouble(ReturnDoubleReg, JSReturnOperand, ScratchDoubleReg);
673 break;
674 case ExprType::I64:
675 case ExprType::I8x16:
676 case ExprType::I16x8:
677 case ExprType::I32x4:
678 case ExprType::B8x16:
679 case ExprType::B16x8:
680 case ExprType::B32x4:
681 case ExprType::F32x4:
682 MOZ_CRASH("unexpected return type when calling from ion to wasm");
683 case ExprType::Limit:
684 MOZ_CRASH("Limit");
685 }
686
687 MOZ_ASSERT(masm.framePushed() == 0);
688 masm.ret();
689
690 // Generate an OOL call to the C++ conversion path.
691 if (fe.sig().args().length()) {
692 masm.bind(&oolCall);
693 masm.setFramePushed(frameSize);
694
695 ABIArgMIRTypeIter argsIter(coerceArgTypes);
696
697 // argument 0: function export index.
698 if (argsIter->kind() == ABIArg::GPR)
699 masm.movePtr(ImmWord(funcExportIndex), argsIter->gpr());
700 else
701 masm.storePtr(ImmWord(funcExportIndex),
702 Address(sp, argsIter->offsetFromArgBase()));
703 argsIter++;
704
705 // argument 1: tlsData
706 if (argsIter->kind() == ABIArg::GPR)
707 masm.movePtr(WasmTlsReg, argsIter->gpr());
708 else
709 masm.storePtr(WasmTlsReg, Address(sp, argsIter->offsetFromArgBase()));
710 argsIter++;
711
712 // argument 2: effective address of start of argv
713 Address argv(sp, masm.framePushed() + JitFrameLayout::offsetOfActualArg(0));
714 if (argsIter->kind() == ABIArg::GPR) {
715 masm.computeEffectiveAddress(argv, argsIter->gpr());
716 } else {
717 masm.computeEffectiveAddress(argv, ScratchIonEntry);
718 masm.storePtr(ScratchIonEntry,
719 Address(sp, argsIter->offsetFromArgBase()));
720 }
721 argsIter++;
722 MOZ_ASSERT(argsIter.done());
723
724 masm.assertStackAlignment(ABIStackAlignment);
725 CallSymbolicAddress(masm, !fe.hasEagerStubs(),
726 SymbolicAddress::CoerceInPlace_JitEntry);
727 masm.assertStackAlignment(ABIStackAlignment);
728
729 masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg,
730 &rejoinBeforeCall);
731 }
732
733 // Prepare to throw: reload WasmTlsReg from the frame.
734 masm.bind(&exception);
735 masm.setFramePushed(frameSize);
736 GenerateJitEntryThrow(masm, frameSize);
737
738 return FinishOffsets(masm, offsets);
739 }
740
StackCopy(MacroAssembler & masm,MIRType type,Register scratch,Address src,Address dst)741 static void StackCopy(MacroAssembler& masm, MIRType type, Register scratch,
742 Address src, Address dst) {
743 if (type == MIRType::Int32) {
744 masm.load32(src, scratch);
745 masm.store32(scratch, dst);
746 } else if (type == MIRType::Int64) {
747 #if JS_BITS_PER_WORD == 32
748 masm.load32(LowWord(src), scratch);
749 masm.store32(scratch, LowWord(dst));
750 masm.load32(HighWord(src), scratch);
751 masm.store32(scratch, HighWord(dst));
752 #else
753 Register64 scratch64(scratch);
754 masm.load64(src, scratch64);
755 masm.store64(scratch64, dst);
756 #endif
757 } else if (type == MIRType::Float32) {
758 masm.loadFloat32(src, ScratchFloat32Reg);
759 masm.storeFloat32(ScratchFloat32Reg, dst);
760 } else {
761 MOZ_ASSERT(type == MIRType::Double);
762 masm.loadDouble(src, ScratchDoubleReg);
763 masm.storeDouble(ScratchDoubleReg, dst);
764 }
765 }
766
767 typedef bool ToValue;
768
FillArgumentArray(MacroAssembler & masm,const ValTypeVector & args,unsigned argOffset,unsigned offsetToCallerStackArgs,Register scratch,ToValue toValue)769 static void FillArgumentArray(MacroAssembler& masm, const ValTypeVector& args,
770 unsigned argOffset,
771 unsigned offsetToCallerStackArgs,
772 Register scratch, ToValue toValue) {
773 for (ABIArgValTypeIter i(args); !i.done(); i++) {
774 Address dst(masm.getStackPointer(), argOffset + i.index() * sizeof(Value));
775
776 MIRType type = i.mirType();
777 switch (i->kind()) {
778 case ABIArg::GPR:
779 if (type == MIRType::Int32) {
780 if (toValue)
781 masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dst);
782 else
783 masm.store32(i->gpr(), dst);
784 } else if (type == MIRType::Int64) {
785 // We can't box int64 into Values (yet).
786 if (toValue)
787 masm.breakpoint();
788 else
789 masm.store64(i->gpr64(), dst);
790 } else {
791 MOZ_CRASH("unexpected input type?");
792 }
793 break;
794 #ifdef JS_CODEGEN_REGISTER_PAIR
795 case ABIArg::GPR_PAIR:
796 if (type == MIRType::Int64)
797 masm.store64(i->gpr64(), dst);
798 else
799 MOZ_CRASH("wasm uses hardfp for function calls.");
800 break;
801 #endif
802 case ABIArg::FPU: {
803 MOZ_ASSERT(IsFloatingPointType(type));
804 FloatRegister srcReg = i->fpu();
805 if (type == MIRType::Double) {
806 if (toValue) {
807 // Preserve the NaN pattern in the input.
808 masm.moveDouble(srcReg, ScratchDoubleReg);
809 srcReg = ScratchDoubleReg;
810 masm.canonicalizeDouble(srcReg);
811 }
812 masm.storeDouble(srcReg, dst);
813 } else {
814 MOZ_ASSERT(type == MIRType::Float32);
815 if (toValue) {
816 // JS::Values can't store Float32, so convert to a Double.
817 masm.convertFloat32ToDouble(srcReg, ScratchDoubleReg);
818 masm.canonicalizeDouble(ScratchDoubleReg);
819 masm.storeDouble(ScratchDoubleReg, dst);
820 } else {
821 // Preserve the NaN pattern in the input.
822 masm.moveFloat32(srcReg, ScratchFloat32Reg);
823 masm.canonicalizeFloat(ScratchFloat32Reg);
824 masm.storeFloat32(ScratchFloat32Reg, dst);
825 }
826 }
827 break;
828 }
829 case ABIArg::Stack: {
830 Address src(masm.getStackPointer(),
831 offsetToCallerStackArgs + i->offsetFromArgBase());
832 if (toValue) {
833 if (type == MIRType::Int32) {
834 masm.load32(src, scratch);
835 masm.storeValue(JSVAL_TYPE_INT32, scratch, dst);
836 } else if (type == MIRType::Int64) {
837 // We can't box int64 into Values (yet).
838 masm.breakpoint();
839 } else {
840 MOZ_ASSERT(IsFloatingPointType(type));
841 if (type == MIRType::Float32) {
842 masm.loadFloat32(src, ScratchFloat32Reg);
843 masm.convertFloat32ToDouble(ScratchFloat32Reg, ScratchDoubleReg);
844 } else {
845 masm.loadDouble(src, ScratchDoubleReg);
846 }
847 masm.canonicalizeDouble(ScratchDoubleReg);
848 masm.storeDouble(ScratchDoubleReg, dst);
849 }
850 } else {
851 StackCopy(masm, type, scratch, src, dst);
852 }
853 break;
854 }
855 case ABIArg::Uninitialized:
856 MOZ_CRASH("Uninitialized ABIArg kind");
857 }
858 }
859 }
860
861 // Generate a wrapper function with the standard intra-wasm call ABI which
862 // simply calls an import. This wrapper function allows any import to be treated
863 // like a normal wasm function for the purposes of exports and table calls. In
864 // particular, the wrapper function provides:
865 // - a table entry, so JS imports can be put into tables
866 // - normal entries, so that, if the import is re-exported, an entry stub can
867 // be generated and called without any special cases
GenerateImportFunction(jit::MacroAssembler & masm,const FuncImport & fi,SigIdDesc sigId,FuncOffsets * offsets)868 static bool GenerateImportFunction(jit::MacroAssembler& masm,
869 const FuncImport& fi, SigIdDesc sigId,
870 FuncOffsets* offsets) {
871 masm.setFramePushed(0);
872
873 unsigned framePushed =
874 StackDecrementForCall(masm, WasmStackAlignment, fi.sig().args());
875
876 GenerateFunctionPrologue(masm, framePushed, IsLeaf(false), sigId,
877 BytecodeOffset(0), offsets);
878
879 // The argument register state is already setup by our caller. We just need
880 // to be sure not to clobber it before the call.
881 Register scratch = ABINonArgReg0;
882
883 // Copy our frame's stack arguments to the callee frame's stack argument.
884 unsigned offsetToCallerStackArgs = sizeof(Frame) + masm.framePushed();
885 ABIArgValTypeIter i(fi.sig().args());
886 for (; !i.done(); i++) {
887 if (i->kind() != ABIArg::Stack) continue;
888
889 Address src(masm.getStackPointer(),
890 offsetToCallerStackArgs + i->offsetFromArgBase());
891 Address dst(masm.getStackPointer(), i->offsetFromArgBase());
892 StackCopy(masm, i.mirType(), scratch, src, dst);
893 }
894
895 // Call the import exit stub.
896 CallSiteDesc desc(CallSiteDesc::Dynamic);
897 masm.wasmCallImport(desc, CalleeDesc::import(fi.tlsDataOffset()));
898
899 // Restore the TLS register and pinned regs, per wasm function ABI.
900 masm.loadWasmTlsRegFromFrame();
901 masm.loadWasmPinnedRegsFromTls();
902
903 GenerateFunctionEpilogue(masm, framePushed, offsets);
904
905 masm.wasmEmitOldTrapOutOfLineCode();
906
907 return FinishOffsets(masm, offsets);
908 }
909
910 static const unsigned STUBS_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
911
GenerateImportFunctions(const ModuleEnvironment & env,const FuncImportVector & imports,CompiledCode * code)912 bool wasm::GenerateImportFunctions(const ModuleEnvironment& env,
913 const FuncImportVector& imports,
914 CompiledCode* code) {
915 LifoAlloc lifo(STUBS_LIFO_DEFAULT_CHUNK_SIZE);
916 TempAllocator alloc(&lifo);
917 MacroAssembler masm(MacroAssembler::WasmToken(), alloc);
918
919 for (uint32_t funcIndex = 0; funcIndex < imports.length(); funcIndex++) {
920 const FuncImport& fi = imports[funcIndex];
921
922 FuncOffsets offsets;
923 if (!GenerateImportFunction(masm, fi, env.funcSigs[funcIndex]->id,
924 &offsets))
925 return false;
926 if (!code->codeRanges.emplaceBack(funcIndex, /* bytecodeOffset = */ 0,
927 offsets))
928 return false;
929 }
930
931 masm.finish();
932 if (masm.oom()) return false;
933
934 return code->swap(masm);
935 }
936
937 // Generate a stub that is called via the internal ABI derived from the
938 // signature of the import and calls into an appropriate callImport C++
939 // function, having boxed all the ABI arguments into a homogeneous Value array.
GenerateImportInterpExit(MacroAssembler & masm,const FuncImport & fi,uint32_t funcImportIndex,Label * throwLabel,CallableOffsets * offsets)940 static bool GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi,
941 uint32_t funcImportIndex,
942 Label* throwLabel,
943 CallableOffsets* offsets) {
944 masm.setFramePushed(0);
945
946 // Argument types for Instance::callImport_*:
947 static const MIRType typeArray[] = {MIRType::Pointer, // Instance*
948 MIRType::Pointer, // funcImportIndex
949 MIRType::Int32, // argc
950 MIRType::Pointer}; // argv
951 MIRTypeVector invokeArgTypes;
952 MOZ_ALWAYS_TRUE(invokeArgTypes.append(typeArray, ArrayLength(typeArray)));
953
954 // At the point of the call, the stack layout shall be (sp grows to the left):
955 // | stack args | padding | argv[] | padding | retaddr | caller stack args |
956 // The padding between stack args and argv ensures that argv is aligned. The
957 // padding between argv and retaddr ensures that sp is aligned.
958 unsigned argOffset =
959 AlignBytes(StackArgBytes(invokeArgTypes), sizeof(double));
960 unsigned argBytes = Max<size_t>(1, fi.sig().args().length()) * sizeof(Value);
961 unsigned framePushed =
962 StackDecrementForCall(masm, ABIStackAlignment, argOffset + argBytes);
963
964 GenerateExitPrologue(masm, framePushed, ExitReason::Fixed::ImportInterp,
965 offsets);
966
967 // Fill the argument array.
968 unsigned offsetToCallerStackArgs = sizeof(Frame) + masm.framePushed();
969 Register scratch = ABINonArgReturnReg0;
970 FillArgumentArray(masm, fi.sig().args(), argOffset, offsetToCallerStackArgs,
971 scratch, ToValue(false));
972
973 // Prepare the arguments for the call to Instance::callImport_*.
974 ABIArgMIRTypeIter i(invokeArgTypes);
975
976 // argument 0: Instance*
977 Address instancePtr(WasmTlsReg, offsetof(TlsData, instance));
978 if (i->kind() == ABIArg::GPR) {
979 masm.loadPtr(instancePtr, i->gpr());
980 } else {
981 masm.loadPtr(instancePtr, scratch);
982 masm.storePtr(scratch,
983 Address(masm.getStackPointer(), i->offsetFromArgBase()));
984 }
985 i++;
986
987 // argument 1: funcImportIndex
988 if (i->kind() == ABIArg::GPR)
989 masm.mov(ImmWord(funcImportIndex), i->gpr());
990 else
991 masm.store32(Imm32(funcImportIndex),
992 Address(masm.getStackPointer(), i->offsetFromArgBase()));
993 i++;
994
995 // argument 2: argc
996 unsigned argc = fi.sig().args().length();
997 if (i->kind() == ABIArg::GPR)
998 masm.mov(ImmWord(argc), i->gpr());
999 else
1000 masm.store32(Imm32(argc),
1001 Address(masm.getStackPointer(), i->offsetFromArgBase()));
1002 i++;
1003
1004 // argument 3: argv
1005 Address argv(masm.getStackPointer(), argOffset);
1006 if (i->kind() == ABIArg::GPR) {
1007 masm.computeEffectiveAddress(argv, i->gpr());
1008 } else {
1009 masm.computeEffectiveAddress(argv, scratch);
1010 masm.storePtr(scratch,
1011 Address(masm.getStackPointer(), i->offsetFromArgBase()));
1012 }
1013 i++;
1014 MOZ_ASSERT(i.done());
1015
1016 // Make the call, test whether it succeeded, and extract the return value.
1017 AssertStackAlignment(masm, ABIStackAlignment);
1018 switch (fi.sig().ret()) {
1019 case ExprType::Void:
1020 masm.call(SymbolicAddress::CallImport_Void);
1021 masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
1022 break;
1023 case ExprType::I32:
1024 masm.call(SymbolicAddress::CallImport_I32);
1025 masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
1026 masm.load32(argv, ReturnReg);
1027 break;
1028 case ExprType::I64:
1029 masm.call(SymbolicAddress::CallImport_I64);
1030 masm.jump(throwLabel);
1031 break;
1032 case ExprType::F32:
1033 masm.call(SymbolicAddress::CallImport_F64);
1034 masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
1035 masm.loadDouble(argv, ReturnDoubleReg);
1036 masm.convertDoubleToFloat32(ReturnDoubleReg, ReturnFloat32Reg);
1037 break;
1038 case ExprType::F64:
1039 masm.call(SymbolicAddress::CallImport_F64);
1040 masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
1041 masm.loadDouble(argv, ReturnDoubleReg);
1042 break;
1043 case ExprType::I8x16:
1044 case ExprType::I16x8:
1045 case ExprType::I32x4:
1046 case ExprType::F32x4:
1047 case ExprType::B8x16:
1048 case ExprType::B16x8:
1049 case ExprType::B32x4:
1050 MOZ_CRASH("SIMD types shouldn't be returned from a FFI");
1051 case ExprType::Limit:
1052 MOZ_CRASH("Limit");
1053 }
1054
1055 // The native ABI preserves the TLS, heap and global registers since they
1056 // are non-volatile.
1057 MOZ_ASSERT(NonVolatileRegs.has(WasmTlsReg));
1058 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || \
1059 defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \
1060 defined(JS_CODEGEN_MIPS64)
1061 MOZ_ASSERT(NonVolatileRegs.has(HeapReg));
1062 #endif
1063
1064 GenerateExitEpilogue(masm, framePushed, ExitReason::Fixed::ImportInterp,
1065 offsets);
1066
1067 return FinishOffsets(masm, offsets);
1068 }
1069
1070 // Generate a stub that is called via the internal ABI derived from the
1071 // signature of the import and calls into a compatible JIT function,
1072 // having boxed all the ABI arguments into the JIT stack frame layout.
GenerateImportJitExit(MacroAssembler & masm,const FuncImport & fi,Label * throwLabel,JitExitOffsets * offsets)1073 static bool GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi,
1074 Label* throwLabel, JitExitOffsets* offsets) {
1075 masm.setFramePushed(0);
1076
1077 // JIT calls use the following stack layout (sp grows to the left):
1078 // | retaddr | descriptor | callee | argc | this | arg1..N |
1079 // After the JIT frame, the global register (if present) is saved since the
1080 // JIT's ABI does not preserve non-volatile regs. Also, unlike most ABIs,
1081 // the JIT ABI requires that sp be JitStackAlignment-aligned *after* pushing
1082 // the return address.
1083 static_assert(WasmStackAlignment >= JitStackAlignment, "subsumes");
1084 unsigned sizeOfRetAddr = sizeof(void*);
1085 unsigned sizeOfPreFrame = WasmToJSJitFrameLayout::Size() - sizeOfRetAddr;
1086 unsigned sizeOfThisAndArgs = (1 + fi.sig().args().length()) * sizeof(Value);
1087 unsigned totalJitFrameBytes =
1088 sizeOfRetAddr + sizeOfPreFrame + sizeOfThisAndArgs;
1089 unsigned jitFramePushed =
1090 StackDecrementForCall(masm, JitStackAlignment, totalJitFrameBytes) -
1091 sizeOfRetAddr;
1092 unsigned sizeOfThisAndArgsAndPadding = jitFramePushed - sizeOfPreFrame;
1093
1094 GenerateJitExitPrologue(masm, jitFramePushed, offsets);
1095
1096 // 1. Descriptor
1097 size_t argOffset = 0;
1098 uint32_t descriptor =
1099 MakeFrameDescriptor(sizeOfThisAndArgsAndPadding, JitFrame_WasmToJSJit,
1100 WasmToJSJitFrameLayout::Size());
1101 masm.storePtr(ImmWord(uintptr_t(descriptor)),
1102 Address(masm.getStackPointer(), argOffset));
1103 argOffset += sizeof(size_t);
1104
1105 // 2. Callee
1106 Register callee = ABINonArgReturnReg0; // live until call
1107 Register scratch = ABINonArgReturnReg1; // repeatedly clobbered
1108
1109 // 2.1. Get JSFunction callee
1110 masm.loadWasmGlobalPtr(fi.tlsDataOffset() + offsetof(FuncImportTls, obj),
1111 callee);
1112
1113 // 2.2. Save callee
1114 masm.storePtr(callee, Address(masm.getStackPointer(), argOffset));
1115 argOffset += sizeof(size_t);
1116
1117 // 3. Argc
1118 unsigned argc = fi.sig().args().length();
1119 masm.storePtr(ImmWord(uintptr_t(argc)),
1120 Address(masm.getStackPointer(), argOffset));
1121 argOffset += sizeof(size_t);
1122 MOZ_ASSERT(argOffset == sizeOfPreFrame);
1123
1124 // 4. |this| value
1125 masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), argOffset));
1126 argOffset += sizeof(Value);
1127
1128 // 5. Fill the arguments
1129 unsigned offsetToCallerStackArgs = jitFramePushed + sizeof(Frame);
1130 FillArgumentArray(masm, fi.sig().args(), argOffset, offsetToCallerStackArgs,
1131 scratch, ToValue(true));
1132 argOffset += fi.sig().args().length() * sizeof(Value);
1133 MOZ_ASSERT(argOffset == sizeOfThisAndArgs + sizeOfPreFrame);
1134
1135 // 6. Check if we need to rectify arguments
1136 masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), scratch);
1137
1138 Label rectify;
1139 masm.branch32(Assembler::Above, scratch, Imm32(fi.sig().args().length()),
1140 &rectify);
1141
1142 // 7. If we haven't rectified arguments, load callee executable entry point
1143 masm.loadJitCodeNoArgCheck(callee, callee);
1144
1145 Label rejoinBeforeCall;
1146 masm.bind(&rejoinBeforeCall);
1147
1148 AssertStackAlignment(masm, JitStackAlignment, sizeOfRetAddr);
1149 masm.callJitNoProfiler(callee);
1150
1151 // Note that there might be a GC thing in the JSReturnOperand now.
1152 // In all the code paths from here:
1153 // - either the value is unboxed because it was a primitive and we don't
1154 // need to worry about rooting anymore.
1155 // - or the value needs to be rooted, but nothing can cause a GC between
1156 // here and CoerceInPlace, which roots before coercing to a primitive.
1157 // In particular, this is true because wasm::InInterruptibleCode will
1158 // return false when PC is in the jit exit.
1159
1160 // The JIT callee clobbers all registers, including WasmTlsReg and
1161 // FramePointer, so restore those here. During this sequence of
1162 // instructions, FP can't be trusted by the profiling frame iterator.
1163 offsets->untrustedFPStart = masm.currentOffset();
1164 AssertStackAlignment(masm, JitStackAlignment, sizeOfRetAddr);
1165
1166 masm.loadWasmTlsRegFromFrame();
1167 masm.moveStackPtrTo(FramePointer);
1168 masm.addPtr(Imm32(masm.framePushed()), FramePointer);
1169 offsets->untrustedFPEnd = masm.currentOffset();
1170
1171 // As explained above, the frame was aligned for the JIT ABI such that
1172 // (sp + sizeof(void*)) % JitStackAlignment == 0
1173 // But now we possibly want to call one of several different C++ functions,
1174 // so subtract the sizeof(void*) so that sp is aligned for an ABI call.
1175 static_assert(ABIStackAlignment <= JitStackAlignment, "subsumes");
1176 masm.reserveStack(sizeOfRetAddr);
1177 unsigned nativeFramePushed = masm.framePushed();
1178 AssertStackAlignment(masm, ABIStackAlignment);
1179
1180 #ifdef DEBUG
1181 {
1182 Label ok;
1183 masm.branchTestMagic(Assembler::NotEqual, JSReturnOperand, &ok);
1184 masm.breakpoint();
1185 masm.bind(&ok);
1186 }
1187 #endif
1188
1189 Label oolConvert;
1190 switch (fi.sig().ret()) {
1191 case ExprType::Void:
1192 break;
1193 case ExprType::I32:
1194 masm.truncateValueToInt32(JSReturnOperand, ReturnDoubleReg, ReturnReg,
1195 &oolConvert);
1196 break;
1197 case ExprType::I64:
1198 masm.breakpoint();
1199 break;
1200 case ExprType::F32:
1201 masm.convertValueToFloat(JSReturnOperand, ReturnFloat32Reg, &oolConvert);
1202 break;
1203 case ExprType::F64:
1204 masm.convertValueToDouble(JSReturnOperand, ReturnDoubleReg, &oolConvert);
1205 break;
1206 case ExprType::I8x16:
1207 case ExprType::I16x8:
1208 case ExprType::I32x4:
1209 case ExprType::F32x4:
1210 case ExprType::B8x16:
1211 case ExprType::B16x8:
1212 case ExprType::B32x4:
1213 MOZ_CRASH("SIMD types shouldn't be returned from an import");
1214 case ExprType::Limit:
1215 MOZ_CRASH("Limit");
1216 }
1217
1218 Label done;
1219 masm.bind(&done);
1220
1221 GenerateJitExitEpilogue(masm, masm.framePushed(), offsets);
1222
1223 {
1224 // Call the arguments rectifier.
1225 masm.bind(&rectify);
1226 masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, instance)), callee);
1227 masm.loadPtr(Address(callee, Instance::offsetOfJSJitArgsRectifier()),
1228 callee);
1229 masm.jump(&rejoinBeforeCall);
1230 }
1231
1232 if (oolConvert.used()) {
1233 masm.bind(&oolConvert);
1234 masm.setFramePushed(nativeFramePushed);
1235
1236 // Coercion calls use the following stack layout (sp grows to the left):
1237 // | args | padding | Value argv[1] | padding | exit Frame |
1238 MIRTypeVector coerceArgTypes;
1239 MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
1240 unsigned offsetToCoerceArgv =
1241 AlignBytes(StackArgBytes(coerceArgTypes), sizeof(Value));
1242 MOZ_ASSERT(nativeFramePushed >= offsetToCoerceArgv + sizeof(Value));
1243 AssertStackAlignment(masm, ABIStackAlignment);
1244
1245 // Store return value into argv[0]
1246 masm.storeValue(JSReturnOperand,
1247 Address(masm.getStackPointer(), offsetToCoerceArgv));
1248
1249 // From this point, it's safe to reuse the scratch register (which
1250 // might be part of the JSReturnOperand).
1251
1252 // The JIT might have clobbered exitFP at this point. Since there's
1253 // going to be a CoerceInPlace call, pretend we're still doing the JIT
1254 // call by restoring our tagged exitFP.
1255 SetExitFP(masm, ExitReason::Fixed::ImportJit, scratch);
1256
1257 // argument 0: argv
1258 ABIArgMIRTypeIter i(coerceArgTypes);
1259 Address argv(masm.getStackPointer(), offsetToCoerceArgv);
1260 if (i->kind() == ABIArg::GPR) {
1261 masm.computeEffectiveAddress(argv, i->gpr());
1262 } else {
1263 masm.computeEffectiveAddress(argv, scratch);
1264 masm.storePtr(scratch,
1265 Address(masm.getStackPointer(), i->offsetFromArgBase()));
1266 }
1267 i++;
1268 MOZ_ASSERT(i.done());
1269
1270 // Call coercion function. Note that right after the call, the value of
1271 // FP is correct because FP is non-volatile in the native ABI.
1272 AssertStackAlignment(masm, ABIStackAlignment);
1273 switch (fi.sig().ret()) {
1274 case ExprType::I32:
1275 masm.call(SymbolicAddress::CoerceInPlace_ToInt32);
1276 masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
1277 masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv),
1278 ReturnReg);
1279 break;
1280 case ExprType::F64:
1281 case ExprType::F32:
1282 masm.call(SymbolicAddress::CoerceInPlace_ToNumber);
1283 masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
1284 masm.loadDouble(Address(masm.getStackPointer(), offsetToCoerceArgv),
1285 ReturnDoubleReg);
1286 if (fi.sig().ret() == ExprType::F32)
1287 masm.convertDoubleToFloat32(ReturnDoubleReg, ReturnFloat32Reg);
1288 break;
1289 default:
1290 MOZ_CRASH("Unsupported convert type");
1291 }
1292
1293 // Maintain the invariant that exitFP is either unset or not set to a
1294 // wasm tagged exitFP, per the jit exit contract.
1295 ClearExitFP(masm, scratch);
1296
1297 masm.jump(&done);
1298 masm.setFramePushed(0);
1299 }
1300
1301 MOZ_ASSERT(masm.framePushed() == 0);
1302
1303 return FinishOffsets(masm, offsets);
1304 }
1305
1306 struct ABIFunctionArgs {
1307 ABIFunctionType abiType;
1308 size_t len;
1309
ABIFunctionArgsABIFunctionArgs1310 explicit ABIFunctionArgs(ABIFunctionType sig)
1311 : abiType(ABIFunctionType(sig >> ArgType_Shift)) {
1312 len = 0;
1313 uint32_t i = uint32_t(abiType);
1314 while (i) {
1315 i = i >> ArgType_Shift;
1316 len++;
1317 }
1318 }
1319
lengthABIFunctionArgs1320 size_t length() const { return len; }
1321
operator []ABIFunctionArgs1322 MIRType operator[](size_t i) const {
1323 MOZ_ASSERT(i < len);
1324 uint32_t abi = uint32_t(abiType);
1325 while (i--) abi = abi >> ArgType_Shift;
1326 return ToMIRType(ABIArgType(abi & ArgType_Mask));
1327 }
1328 };
1329
GenerateBuiltinThunk(MacroAssembler & masm,ABIFunctionType abiType,ExitReason exitReason,void * funcPtr,CallableOffsets * offsets)1330 bool wasm::GenerateBuiltinThunk(MacroAssembler& masm, ABIFunctionType abiType,
1331 ExitReason exitReason, void* funcPtr,
1332 CallableOffsets* offsets) {
1333 masm.setFramePushed(0);
1334
1335 ABIFunctionArgs args(abiType);
1336 uint32_t framePushed = StackDecrementForCall(masm, ABIStackAlignment, args);
1337
1338 GenerateExitPrologue(masm, framePushed, exitReason, offsets);
1339
1340 // Copy out and convert caller arguments, if needed.
1341 unsigned offsetToCallerStackArgs = sizeof(Frame) + masm.framePushed();
1342 Register scratch = ABINonArgReturnReg0;
1343 for (ABIArgIter<ABIFunctionArgs> i(args); !i.done(); i++) {
1344 if (i->argInRegister()) {
1345 #ifdef JS_CODEGEN_ARM
1346 // Non hard-fp passes the args values in GPRs.
1347 if (!UseHardFpABI() && IsFloatingPointType(i.mirType())) {
1348 FloatRegister input = i->fpu();
1349 if (i.mirType() == MIRType::Float32) {
1350 masm.ma_vxfer(input, Register::FromCode(input.id()));
1351 } else if (i.mirType() == MIRType::Double) {
1352 uint32_t regId = input.singleOverlay().id();
1353 masm.ma_vxfer(input, Register::FromCode(regId),
1354 Register::FromCode(regId + 1));
1355 }
1356 }
1357 #endif
1358 continue;
1359 }
1360
1361 Address src(masm.getStackPointer(),
1362 offsetToCallerStackArgs + i->offsetFromArgBase());
1363 Address dst(masm.getStackPointer(), i->offsetFromArgBase());
1364 StackCopy(masm, i.mirType(), scratch, src, dst);
1365 }
1366
1367 AssertStackAlignment(masm, ABIStackAlignment);
1368 masm.call(ImmPtr(funcPtr, ImmPtr::NoCheckToken()));
1369
1370 #if defined(JS_CODEGEN_X86)
1371 // x86 passes the return value on the x87 FP stack.
1372 Operand op(esp, 0);
1373 MIRType retType = ToMIRType(ABIArgType(abiType & ArgType_Mask));
1374 if (retType == MIRType::Float32) {
1375 masm.fstp32(op);
1376 masm.loadFloat32(op, ReturnFloat32Reg);
1377 } else if (retType == MIRType::Double) {
1378 masm.fstp(op);
1379 masm.loadDouble(op, ReturnDoubleReg);
1380 }
1381 #elif defined(JS_CODEGEN_ARM)
1382 // Non hard-fp passes the return values in GPRs.
1383 MIRType retType = ToMIRType(ABIArgType(abiType & ArgType_Mask));
1384 if (!UseHardFpABI() && IsFloatingPointType(retType))
1385 masm.ma_vxfer(r0, r1, d0);
1386 #endif
1387
1388 GenerateExitEpilogue(masm, framePushed, exitReason, offsets);
1389 return FinishOffsets(masm, offsets);
1390 }
1391
1392 // Generate a stub which calls WasmReportTrap() and can be executed by having
1393 // the signal handler redirect PC from any trapping instruction.
GenerateTrapExit(MacroAssembler & masm,Label * throwLabel,Offsets * offsets)1394 static bool GenerateTrapExit(MacroAssembler& masm, Label* throwLabel,
1395 Offsets* offsets) {
1396 masm.haltingAlign(CodeAlignment);
1397
1398 offsets->begin = masm.currentOffset();
1399
1400 // We know that StackPointer is word-aligned, but not necessarily
1401 // stack-aligned, so we need to align it dynamically.
1402 masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
1403 if (ShadowStackSpace) masm.subFromStackPtr(Imm32(ShadowStackSpace));
1404
1405 masm.assertStackAlignment(ABIStackAlignment);
1406 masm.call(SymbolicAddress::ReportTrap);
1407
1408 masm.jump(throwLabel);
1409
1410 return FinishOffsets(masm, offsets);
1411 }
1412
1413 // Generate a stub that calls into WasmOldReportTrap with the right trap reason.
1414 // This stub is called with ABIStackAlignment by a trap out-of-line path. An
1415 // exit prologue/epilogue is used so that stack unwinding picks up the
1416 // current JitActivation. Unwinding will begin at the caller of this trap exit.
GenerateOldTrapExit(MacroAssembler & masm,Trap trap,Label * throwLabel,CallableOffsets * offsets)1417 static bool GenerateOldTrapExit(MacroAssembler& masm, Trap trap,
1418 Label* throwLabel, CallableOffsets* offsets) {
1419 masm.haltingAlign(CodeAlignment);
1420
1421 masm.setFramePushed(0);
1422
1423 MIRTypeVector args;
1424 MOZ_ALWAYS_TRUE(args.append(MIRType::Int32));
1425
1426 uint32_t framePushed = StackDecrementForCall(masm, ABIStackAlignment, args);
1427
1428 GenerateExitPrologue(masm, framePushed, ExitReason::Fixed::Trap, offsets);
1429
1430 ABIArgMIRTypeIter i(args);
1431 if (i->kind() == ABIArg::GPR)
1432 masm.move32(Imm32(int32_t(trap)), i->gpr());
1433 else
1434 masm.store32(Imm32(int32_t(trap)),
1435 Address(masm.getStackPointer(), i->offsetFromArgBase()));
1436 i++;
1437 MOZ_ASSERT(i.done());
1438
1439 masm.assertStackAlignment(ABIStackAlignment);
1440 masm.call(SymbolicAddress::OldReportTrap);
1441
1442 masm.jump(throwLabel);
1443
1444 GenerateExitEpilogue(masm, framePushed, ExitReason::Fixed::Trap, offsets);
1445
1446 return FinishOffsets(masm, offsets);
1447 }
1448
1449 // Generate a stub which is only used by the signal handlers to handle out of
1450 // bounds access by experimental SIMD.js and Atomics and unaligned accesses on
1451 // ARM. This stub is executed by direct PC transfer from the faulting memory
1452 // access and thus the stack depth is unknown. Since
1453 // JitActivation::packedExitFP() is not set before calling the error reporter,
1454 // the current wasm activation will be lost. This stub should be removed when
1455 // SIMD.js and Atomics are moved to wasm and given proper traps and when we use
1456 // a non-faulting strategy for unaligned ARM access.
GenerateGenericMemoryAccessTrap(MacroAssembler & masm,SymbolicAddress reporter,Label * throwLabel,Offsets * offsets)1457 static bool GenerateGenericMemoryAccessTrap(MacroAssembler& masm,
1458 SymbolicAddress reporter,
1459 Label* throwLabel,
1460 Offsets* offsets) {
1461 masm.haltingAlign(CodeAlignment);
1462
1463 offsets->begin = masm.currentOffset();
1464
1465 // sp can be anything at this point, so ensure it is aligned when calling
1466 // into C++. We unconditionally jump to throw so don't worry about
1467 // restoring sp.
1468 masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
1469 if (ShadowStackSpace) masm.subFromStackPtr(Imm32(ShadowStackSpace));
1470
1471 masm.call(reporter);
1472 masm.jump(throwLabel);
1473
1474 return FinishOffsets(masm, offsets);
1475 }
1476
GenerateOutOfBoundsExit(MacroAssembler & masm,Label * throwLabel,Offsets * offsets)1477 static bool GenerateOutOfBoundsExit(MacroAssembler& masm, Label* throwLabel,
1478 Offsets* offsets) {
1479 return GenerateGenericMemoryAccessTrap(
1480 masm, SymbolicAddress::ReportOutOfBounds, throwLabel, offsets);
1481 }
1482
GenerateUnalignedExit(MacroAssembler & masm,Label * throwLabel,Offsets * offsets)1483 static bool GenerateUnalignedExit(MacroAssembler& masm, Label* throwLabel,
1484 Offsets* offsets) {
1485 return GenerateGenericMemoryAccessTrap(
1486 masm, SymbolicAddress::ReportUnalignedAccess, throwLabel, offsets);
1487 }
1488
1489 #if defined(JS_CODEGEN_ARM)
1490 static const LiveRegisterSet AllRegsExceptPCSP(
1491 GeneralRegisterSet(Registers::AllMask & ~((uint32_t(1) << Registers::sp) |
1492 (uint32_t(1) << Registers::pc))),
1493 FloatRegisterSet(FloatRegisters::AllDoubleMask));
1494 static_assert(!SupportsSimd,
1495 "high lanes of SIMD registers need to be saved too.");
1496 #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
1497 static const LiveRegisterSet AllUserRegsExceptSP(
1498 GeneralRegisterSet(Registers::AllMask &
1499 ~((uint32_t(1) << Registers::k0) |
1500 (uint32_t(1) << Registers::k1) |
1501 (uint32_t(1) << Registers::sp) |
1502 (uint32_t(1) << Registers::zero))),
1503 FloatRegisterSet(FloatRegisters::AllDoubleMask));
1504 static_assert(!SupportsSimd,
1505 "high lanes of SIMD registers need to be saved too.");
1506 #else
1507 static const LiveRegisterSet AllRegsExceptSP(
1508 GeneralRegisterSet(Registers::AllMask &
1509 ~(uint32_t(1) << Registers::StackPointer)),
1510 FloatRegisterSet(FloatRegisters::AllMask));
1511 #endif
1512
1513 // The async interrupt-callback exit is called from arbitrarily-interrupted wasm
1514 // code. It calls into the WasmHandleExecutionInterrupt to determine whether we
1515 // must really halt execution which can reenter the VM (e.g., to display the
1516 // slow script dialog). If execution is not interrupted, this stub must
1517 // carefully preserve *all* register state. If execution is interrupted, the
1518 // entire activation will be popped by the throw stub, so register state does
1519 // not need to be restored.
GenerateInterruptExit(MacroAssembler & masm,Label * throwLabel,Offsets * offsets)1520 static bool GenerateInterruptExit(MacroAssembler& masm, Label* throwLabel,
1521 Offsets* offsets) {
1522 masm.haltingAlign(CodeAlignment);
1523
1524 offsets->begin = masm.currentOffset();
1525
1526 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
1527 // Be very careful here not to perturb the machine state before saving it
1528 // to the stack. In particular, add/sub instructions may set conditions in
1529 // the flags register.
1530 masm.push(Imm32(0)); // space used as return address, updated below
1531 masm.setFramePushed(
1532 0); // set to 0 now so that framePushed is offset of return address
1533 masm.PushFlags(); // after this we are safe to use sub
1534 masm.PushRegsInMask(AllRegsExceptSP); // save all GP/FP registers (except SP)
1535
1536 // We know that StackPointer is word-aligned, but not necessarily
1537 // stack-aligned, so we need to align it dynamically.
1538 masm.moveStackPtrTo(ABINonVolatileReg);
1539 masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
1540 if (ShadowStackSpace) masm.subFromStackPtr(Imm32(ShadowStackSpace));
1541
1542 // Make the call to C++, which preserves ABINonVolatileReg.
1543 masm.assertStackAlignment(ABIStackAlignment);
1544 masm.call(SymbolicAddress::HandleExecutionInterrupt);
1545
1546 // HandleExecutionInterrupt returns null if execution is interrupted and
1547 // the resumption pc otherwise.
1548 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
1549
1550 // Restore the stack pointer then store resumePC into the stack slow that
1551 // will be popped by the 'ret' below.
1552 masm.moveToStackPtr(ABINonVolatileReg);
1553 masm.storePtr(ReturnReg, Address(StackPointer, masm.framePushed()));
1554
1555 // Restore the machine state to before the interrupt. After popping flags,
1556 // no instructions can be executed which set flags.
1557 masm.PopRegsInMask(AllRegsExceptSP);
1558 masm.PopFlags();
1559
1560 // Return to the resumePC stored into this stack slot above.
1561 MOZ_ASSERT(masm.framePushed() == 0);
1562 masm.ret();
1563 #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
1564 // Reserve space to store resumePC and HeapReg.
1565 masm.subFromStackPtr(Imm32(2 * sizeof(intptr_t)));
1566 // Set to zero so we can use masm.framePushed() below.
1567 masm.setFramePushed(0);
1568
1569 // Save all registers, except sp.
1570 masm.PushRegsInMask(AllUserRegsExceptSP);
1571
1572 // Save the stack pointer and FCSR in a non-volatile registers.
1573 masm.moveStackPtrTo(s0);
1574 masm.as_cfc1(s1, Assembler::FCSR);
1575
1576 // Align the stack.
1577 masm.ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
1578
1579 // Store HeapReg into the reserved space.
1580 masm.storePtr(HeapReg, Address(s0, masm.framePushed() + sizeof(intptr_t)));
1581
1582 #ifdef USES_O32_ABI
1583 // MIPS ABI requires rewserving stack for registes $a0 to $a3.
1584 masm.subFromStackPtr(Imm32(4 * sizeof(intptr_t)));
1585 #endif
1586
1587 masm.assertStackAlignment(ABIStackAlignment);
1588 masm.call(SymbolicAddress::HandleExecutionInterrupt);
1589
1590 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
1591
1592 // This will restore stack to the address before the call.
1593 masm.moveToStackPtr(s0);
1594
1595 // Restore FCSR.
1596 masm.as_ctc1(s1, Assembler::FCSR);
1597
1598 // Store resumePC into the reserved space.
1599 masm.storePtr(ReturnReg, Address(s0, masm.framePushed()));
1600
1601 masm.PopRegsInMask(AllUserRegsExceptSP);
1602
1603 // Pop resumePC into PC. Clobber HeapReg to make the jump and restore it
1604 // during jump delay slot.
1605 masm.loadPtr(Address(StackPointer, 0), HeapReg);
1606 // Reclaim the reserve space.
1607 masm.addToStackPtr(Imm32(2 * sizeof(intptr_t)));
1608 masm.as_jr(HeapReg);
1609 masm.loadPtr(Address(StackPointer, -int32_t(sizeof(intptr_t))), HeapReg);
1610 #elif defined(JS_CODEGEN_ARM)
1611 {
1612 // Be careful not to clobber scratch registers before they are saved.
1613 ScratchRegisterScope scratch(masm);
1614 SecondScratchRegisterScope secondScratch(masm);
1615
1616 // Reserve a word to receive the return address.
1617 masm.as_alu(StackPointer, StackPointer, Imm8(4), OpSub);
1618
1619 // Set framePushed to 0 now so that framePushed can be used later as the
1620 // stack offset to the return-address space reserved above.
1621 masm.setFramePushed(0);
1622
1623 // Save all GP/FP registers (except PC and SP).
1624 masm.PushRegsInMask(AllRegsExceptPCSP);
1625 }
1626
1627 // Save SP, APSR and FPSCR in non-volatile registers.
1628 masm.as_mrs(r4);
1629 masm.as_vmrs(r5);
1630 masm.mov(sp, r6);
1631
1632 // We know that StackPointer is word-aligned, but not necessarily
1633 // stack-aligned, so we need to align it dynamically.
1634 masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
1635
1636 // Make the call to C++, which preserves the non-volatile registers.
1637 masm.assertStackAlignment(ABIStackAlignment);
1638 masm.call(SymbolicAddress::HandleExecutionInterrupt);
1639
1640 // HandleExecutionInterrupt returns null if execution is interrupted and
1641 // the resumption pc otherwise.
1642 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
1643
1644 // Restore the stack pointer then store resumePC into the stack slot that
1645 // will be popped by the 'ret' below.
1646 masm.mov(r6, sp);
1647 masm.storePtr(ReturnReg, Address(sp, masm.framePushed()));
1648
1649 // Restore the machine state to before the interrupt. After popping flags,
1650 // no instructions can be executed which set flags.
1651 masm.as_vmsr(r5);
1652 masm.as_msr(r4);
1653 masm.PopRegsInMask(AllRegsExceptPCSP);
1654
1655 // Return to the resumePC stored into this stack slot above.
1656 MOZ_ASSERT(masm.framePushed() == 0);
1657 masm.ret();
1658 #elif defined(JS_CODEGEN_ARM64)
1659 MOZ_CRASH();
1660 #elif defined(JS_CODEGEN_NONE)
1661 MOZ_CRASH();
1662 #else
1663 #error "Unknown architecture!"
1664 #endif
1665
1666 return FinishOffsets(masm, offsets);
1667 }
1668
1669 // Generate a stub that restores the stack pointer to what it was on entry to
1670 // the wasm activation, sets the return register to 'false' and then executes a
1671 // return which will return from this wasm activation to the caller. This stub
1672 // should only be called after the caller has reported an error (or, in the case
1673 // of the interrupt stub, intends to interrupt execution).
GenerateThrowStub(MacroAssembler & masm,Label * throwLabel,Offsets * offsets)1674 static bool GenerateThrowStub(MacroAssembler& masm, Label* throwLabel,
1675 Offsets* offsets) {
1676 masm.haltingAlign(CodeAlignment);
1677
1678 masm.bind(throwLabel);
1679
1680 offsets->begin = masm.currentOffset();
1681
1682 // The throw stub can be jumped to from an async interrupt that is halting
1683 // execution. Thus the stack pointer can be unaligned and we must align it
1684 // dynamically.
1685 masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
1686 if (ShadowStackSpace) masm.subFromStackPtr(Imm32(ShadowStackSpace));
1687
1688 // WasmHandleThrow unwinds JitActivation::wasmExitFP() and returns the
1689 // address of the return address on the stack this stub should return to.
1690 // Set the FramePointer to a magic value to indicate a return by throw.
1691 masm.call(SymbolicAddress::HandleThrow);
1692 masm.moveToStackPtr(ReturnReg);
1693 masm.move32(Imm32(FailFP), FramePointer);
1694 masm.ret();
1695
1696 return FinishOffsets(masm, offsets);
1697 }
1698
1699 static const LiveRegisterSet AllAllocatableRegs =
1700 LiveRegisterSet(GeneralRegisterSet(Registers::AllocatableMask),
1701 FloatRegisterSet(FloatRegisters::AllMask));
1702
1703 // Generate a stub that handle toggable enter/leave frame traps or breakpoints.
1704 // The trap records frame pointer (via GenerateExitPrologue) and saves most of
1705 // registers to not affect the code generated by WasmBaselineCompile.
GenerateDebugTrapStub(MacroAssembler & masm,Label * throwLabel,CallableOffsets * offsets)1706 static bool GenerateDebugTrapStub(MacroAssembler& masm, Label* throwLabel,
1707 CallableOffsets* offsets) {
1708 masm.haltingAlign(CodeAlignment);
1709
1710 masm.setFramePushed(0);
1711
1712 GenerateExitPrologue(masm, 0, ExitReason::Fixed::DebugTrap, offsets);
1713
1714 // Save all registers used between baseline compiler operations.
1715 masm.PushRegsInMask(AllAllocatableRegs);
1716
1717 uint32_t framePushed = masm.framePushed();
1718
1719 // This method might be called with unaligned stack -- aligning and
1720 // saving old stack pointer at the top.
1721 Register scratch = ABINonArgReturnReg0;
1722 masm.moveStackPtrTo(scratch);
1723 masm.subFromStackPtr(Imm32(sizeof(intptr_t)));
1724 masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
1725 masm.storePtr(scratch, Address(masm.getStackPointer(), 0));
1726
1727 if (ShadowStackSpace) masm.subFromStackPtr(Imm32(ShadowStackSpace));
1728 masm.assertStackAlignment(ABIStackAlignment);
1729 masm.call(SymbolicAddress::HandleDebugTrap);
1730
1731 masm.branchIfFalseBool(ReturnReg, throwLabel);
1732
1733 if (ShadowStackSpace) masm.addToStackPtr(Imm32(ShadowStackSpace));
1734 masm.Pop(scratch);
1735 masm.moveToStackPtr(scratch);
1736
1737 masm.setFramePushed(framePushed);
1738 masm.PopRegsInMask(AllAllocatableRegs);
1739
1740 GenerateExitEpilogue(masm, 0, ExitReason::Fixed::DebugTrap, offsets);
1741
1742 return FinishOffsets(masm, offsets);
1743 }
1744
GenerateEntryStubs(MacroAssembler & masm,size_t funcExportIndex,const FuncExport & fe,const Maybe<ImmPtr> & callee,bool isAsmJS,CodeRangeVector * codeRanges)1745 bool wasm::GenerateEntryStubs(MacroAssembler& masm, size_t funcExportIndex,
1746 const FuncExport& fe, const Maybe<ImmPtr>& callee,
1747 bool isAsmJS, CodeRangeVector* codeRanges) {
1748 MOZ_ASSERT(!callee == fe.hasEagerStubs());
1749 MOZ_ASSERT_IF(isAsmJS, fe.hasEagerStubs());
1750
1751 Offsets offsets;
1752 if (!GenerateInterpEntry(masm, fe, callee, &offsets)) return false;
1753 if (!codeRanges->emplaceBack(CodeRange::InterpEntry, fe.funcIndex(), offsets))
1754 return false;
1755
1756 if (isAsmJS) return true;
1757
1758 if (!GenerateJitEntry(masm, funcExportIndex, fe, callee, &offsets))
1759 return false;
1760 if (!codeRanges->emplaceBack(CodeRange::JitEntry, fe.funcIndex(), offsets))
1761 return false;
1762
1763 return true;
1764 }
1765
GenerateStubs(const ModuleEnvironment & env,const FuncImportVector & imports,const FuncExportVector & exports,CompiledCode * code)1766 bool wasm::GenerateStubs(const ModuleEnvironment& env,
1767 const FuncImportVector& imports,
1768 const FuncExportVector& exports, CompiledCode* code) {
1769 LifoAlloc lifo(STUBS_LIFO_DEFAULT_CHUNK_SIZE);
1770 TempAllocator alloc(&lifo);
1771 MacroAssembler masm(MacroAssembler::WasmToken(), alloc);
1772
1773 // Swap in already-allocated empty vectors to avoid malloc/free.
1774 if (!code->swap(masm)) return false;
1775
1776 Label throwLabel;
1777
1778 JitSpew(JitSpew_Codegen, "# Emitting wasm import stubs");
1779
1780 for (uint32_t funcIndex = 0; funcIndex < imports.length(); funcIndex++) {
1781 const FuncImport& fi = imports[funcIndex];
1782
1783 CallableOffsets interpOffsets;
1784 if (!GenerateImportInterpExit(masm, fi, funcIndex, &throwLabel,
1785 &interpOffsets))
1786 return false;
1787 if (!code->codeRanges.emplaceBack(CodeRange::ImportInterpExit, funcIndex,
1788 interpOffsets))
1789 return false;
1790
1791 JitExitOffsets jitOffsets;
1792 if (!GenerateImportJitExit(masm, fi, &throwLabel, &jitOffsets))
1793 return false;
1794 if (!code->codeRanges.emplaceBack(funcIndex, jitOffsets)) return false;
1795 }
1796
1797 JitSpew(JitSpew_Codegen, "# Emitting wasm export stubs");
1798
1799 Maybe<ImmPtr> noAbsolute;
1800 for (size_t i = 0; i < exports.length(); i++) {
1801 const FuncExport& fe = exports[i];
1802 if (!fe.hasEagerStubs()) continue;
1803 if (!GenerateEntryStubs(masm, i, fe, noAbsolute, env.isAsmJS(),
1804 &code->codeRanges))
1805 return false;
1806 }
1807
1808 JitSpew(JitSpew_Codegen, "# Emitting wasm trap stubs");
1809
1810 for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
1811 switch (trap) {
1812 case Trap::Unreachable:
1813 case Trap::IntegerOverflow:
1814 case Trap::InvalidConversionToInteger:
1815 case Trap::IntegerDivideByZero:
1816 case Trap::IndirectCallToNull:
1817 case Trap::IndirectCallBadSig:
1818 case Trap::ImpreciseSimdConversion:
1819 case Trap::StackOverflow:
1820 case Trap::ThrowReported:
1821 break;
1822 // The TODO list of "old" traps to convert to new traps:
1823 case Trap::OutOfBounds:
1824 case Trap::UnalignedAccess: {
1825 CallableOffsets offsets;
1826 if (!GenerateOldTrapExit(masm, trap, &throwLabel, &offsets))
1827 return false;
1828 if (!code->codeRanges.emplaceBack(trap, offsets)) return false;
1829 break;
1830 }
1831 case Trap::Limit:
1832 MOZ_CRASH("impossible");
1833 }
1834 }
1835
1836 Offsets offsets;
1837
1838 JitSpew(JitSpew_Codegen, "# Emitting wasm exit stubs");
1839
1840 if (!GenerateOutOfBoundsExit(masm, &throwLabel, &offsets)) return false;
1841 if (!code->codeRanges.emplaceBack(CodeRange::OutOfBoundsExit, offsets))
1842 return false;
1843
1844 if (!GenerateUnalignedExit(masm, &throwLabel, &offsets)) return false;
1845 if (!code->codeRanges.emplaceBack(CodeRange::UnalignedExit, offsets))
1846 return false;
1847
1848 if (!GenerateTrapExit(masm, &throwLabel, &offsets)) return false;
1849 if (!code->codeRanges.emplaceBack(CodeRange::TrapExit, offsets)) return false;
1850
1851 if (!GenerateInterruptExit(masm, &throwLabel, &offsets)) return false;
1852 if (!code->codeRanges.emplaceBack(CodeRange::Interrupt, offsets))
1853 return false;
1854
1855 {
1856 CallableOffsets offsets;
1857 if (!GenerateDebugTrapStub(masm, &throwLabel, &offsets)) return false;
1858 if (!code->codeRanges.emplaceBack(CodeRange::DebugTrap, offsets))
1859 return false;
1860 }
1861
1862 if (!GenerateThrowStub(masm, &throwLabel, &offsets)) return false;
1863 if (!code->codeRanges.emplaceBack(CodeRange::Throw, offsets)) return false;
1864
1865 masm.finish();
1866 if (masm.oom()) return false;
1867
1868 return code->swap(masm);
1869 }
1870