1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 *
4 * Copyright 2015 Mozilla Foundation
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 #include "wasm/WasmStubs.h"
20
21 #include "mozilla/ArrayUtils.h"
22
23 #include "wasm/WasmCode.h"
24 #include "wasm/WasmIonCompile.h"
25
26 #include "jit/MacroAssembler-inl.h"
27
28 using namespace js;
29 using namespace js::jit;
30 using namespace js::wasm;
31
32 using mozilla::ArrayLength;
33
34 static void
AssertStackAlignment(MacroAssembler & masm,uint32_t alignment,uint32_t addBeforeAssert=0)35 AssertStackAlignment(MacroAssembler& masm, uint32_t alignment, uint32_t addBeforeAssert = 0)
36 {
37 MOZ_ASSERT((sizeof(Frame) + masm.framePushed() + addBeforeAssert) % alignment == 0);
38 masm.assertStackAlignment(alignment, addBeforeAssert);
39 }
40
41 static unsigned
StackDecrementForCall(MacroAssembler & masm,uint32_t alignment,unsigned bytesToPush)42 StackDecrementForCall(MacroAssembler& masm, uint32_t alignment, unsigned bytesToPush)
43 {
44 return StackDecrementForCall(alignment, sizeof(Frame) + masm.framePushed(), bytesToPush);
45 }
46
47 template <class VectorT>
48 static unsigned
StackArgBytes(const VectorT & args)49 StackArgBytes(const VectorT& args)
50 {
51 ABIArgIter<VectorT> iter(args);
52 while (!iter.done())
53 iter++;
54 return iter.stackBytesConsumedSoFar();
55 }
56
57 template <class VectorT>
58 static unsigned
StackDecrementForCall(MacroAssembler & masm,uint32_t alignment,const VectorT & args,unsigned extraBytes=0)59 StackDecrementForCall(MacroAssembler& masm, uint32_t alignment, const VectorT& args,
60 unsigned extraBytes = 0)
61 {
62 return StackDecrementForCall(masm, alignment, StackArgBytes(args) + extraBytes);
63 }
64
65 #if defined(JS_CODEGEN_ARM)
66 // The ARM system ABI also includes d15 & s31 in the non volatile float registers.
67 // Also exclude lr (a.k.a. r14) as we preserve it manually)
68 static const LiveRegisterSet NonVolatileRegs =
69 LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask&
70 ~(uint32_t(1) << Registers::lr)),
71 FloatRegisterSet(FloatRegisters::NonVolatileMask
72 | (1ULL << FloatRegisters::d15)
73 | (1ULL << FloatRegisters::s31)));
74 #else
75 static const LiveRegisterSet NonVolatileRegs =
76 LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask),
77 FloatRegisterSet(FloatRegisters::NonVolatileMask));
78 #endif
79
80 #if defined(JS_CODEGEN_MIPS32)
81 // Mips is using one more double slot due to stack alignment for double values.
82 // Look at MacroAssembler::PushRegsInMask(RegisterSet set)
83 static const unsigned FramePushedAfterSave = NonVolatileRegs.gprs().size() * sizeof(intptr_t) +
84 NonVolatileRegs.fpus().getPushSizeInBytes() +
85 sizeof(double);
86 #elif defined(JS_CODEGEN_NONE)
87 static const unsigned FramePushedAfterSave = 0;
88 #else
89 static const unsigned FramePushedAfterSave = NonVolatileRegs.gprs().size() * sizeof(intptr_t)
90 + NonVolatileRegs.fpus().getPushSizeInBytes();
91 #endif
92 static const unsigned FramePushedForEntrySP = FramePushedAfterSave + sizeof(void*);
93
94 // Generate a stub that enters wasm from a C++ caller via the native ABI. The
95 // signature of the entry point is Module::ExportFuncPtr. The exported wasm
96 // function has an ABI derived from its specific signature, so this function
97 // must map from the ABI of ExportFuncPtr to the export's signature's ABI.
98 Offsets
GenerateEntry(MacroAssembler & masm,const FuncExport & fe)99 wasm::GenerateEntry(MacroAssembler& masm, const FuncExport& fe)
100 {
101 masm.haltingAlign(CodeAlignment);
102
103 Offsets offsets;
104 offsets.begin = masm.currentOffset();
105
106 // Save the return address if it wasn't already saved by the call insn.
107 #if defined(JS_CODEGEN_ARM)
108 masm.push(lr);
109 #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
110 masm.push(ra);
111 #endif
112
113 // Save all caller non-volatile registers before we clobber them here and in
114 // the asm.js callee (which does not preserve non-volatile registers).
115 masm.setFramePushed(0);
116 masm.PushRegsInMask(NonVolatileRegs);
117 MOZ_ASSERT(masm.framePushed() == FramePushedAfterSave);
118
119 // Put the 'argv' argument into a non-argument/return/TLS register so that
120 // we can use 'argv' while we fill in the arguments for the asm.js callee.
121 Register argv = ABINonArgReturnReg0;
122 Register scratch = ABINonArgReturnReg1;
123
124 // Read the arguments of wasm::ExportFuncPtr according to the native ABI.
125 // The entry stub's frame is only 1 word, not the usual 2 for wasm::Frame.
126 const unsigned argBase = sizeof(void*) + masm.framePushed();
127 ABIArgGenerator abi;
128 ABIArg arg;
129
130 // arg 1: ExportArg*
131 arg = abi.next(MIRType::Pointer);
132 if (arg.kind() == ABIArg::GPR)
133 masm.movePtr(arg.gpr(), argv);
134 else
135 masm.loadPtr(Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()), argv);
136
137 // Arg 2: TlsData*
138 arg = abi.next(MIRType::Pointer);
139 if (arg.kind() == ABIArg::GPR)
140 masm.movePtr(arg.gpr(), WasmTlsReg);
141 else
142 masm.loadPtr(Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()), WasmTlsReg);
143
144 // Setup pinned registers that are assumed throughout wasm code.
145 masm.loadWasmPinnedRegsFromTls();
146
147 // Save 'argv' on the stack so that we can recover it after the call. Use
148 // a second non-argument/return register as temporary scratch.
149 masm.Push(argv);
150
151 // Save the stack pointer in the WasmActivation right before dynamically
152 // aligning the stack so that it may be recovered on return or throw.
153 MOZ_ASSERT(masm.framePushed() == FramePushedForEntrySP);
154 masm.loadWasmActivationFromTls(scratch);
155 masm.storeStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP()));
156
157 // Dynamically align the stack since ABIStackAlignment is not necessarily
158 // WasmStackAlignment. We'll use entrySP to recover the original stack
159 // pointer on return.
160 masm.andToStackPtr(Imm32(~(WasmStackAlignment - 1)));
161
162 // Bump the stack for the call.
163 masm.reserveStack(AlignBytes(StackArgBytes(fe.sig().args()), WasmStackAlignment));
164
165 // Copy parameters out of argv and into the registers/stack-slots specified by
166 // the system ABI.
167 for (ABIArgValTypeIter iter(fe.sig().args()); !iter.done(); iter++) {
168 unsigned argOffset = iter.index() * sizeof(ExportArg);
169 Address src(argv, argOffset);
170 MIRType type = iter.mirType();
171 switch (iter->kind()) {
172 case ABIArg::GPR:
173 if (type == MIRType::Int32)
174 masm.load32(src, iter->gpr());
175 else if (type == MIRType::Int64)
176 masm.load64(src, iter->gpr64());
177 break;
178 #ifdef JS_CODEGEN_REGISTER_PAIR
179 case ABIArg::GPR_PAIR:
180 if (type == MIRType::Int64)
181 masm.load64(src, iter->gpr64());
182 else
183 MOZ_CRASH("wasm uses hardfp for function calls.");
184 break;
185 #endif
186 case ABIArg::FPU: {
187 static_assert(sizeof(ExportArg) >= jit::Simd128DataSize,
188 "ExportArg must be big enough to store SIMD values");
189 switch (type) {
190 case MIRType::Int8x16:
191 case MIRType::Int16x8:
192 case MIRType::Int32x4:
193 case MIRType::Bool8x16:
194 case MIRType::Bool16x8:
195 case MIRType::Bool32x4:
196 masm.loadUnalignedSimd128Int(src, iter->fpu());
197 break;
198 case MIRType::Float32x4:
199 masm.loadUnalignedSimd128Float(src, iter->fpu());
200 break;
201 case MIRType::Double:
202 masm.loadDouble(src, iter->fpu());
203 break;
204 case MIRType::Float32:
205 masm.loadFloat32(src, iter->fpu());
206 break;
207 default:
208 MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected FPU type");
209 break;
210 }
211 break;
212 }
213 case ABIArg::Stack:
214 switch (type) {
215 case MIRType::Int32:
216 masm.load32(src, scratch);
217 masm.storePtr(scratch, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
218 break;
219 case MIRType::Int64: {
220 Register sp = masm.getStackPointer();
221 #if JS_BITS_PER_WORD == 32
222 masm.load32(Address(src.base, src.offset + INT64LOW_OFFSET), scratch);
223 masm.store32(scratch, Address(sp, iter->offsetFromArgBase() + INT64LOW_OFFSET));
224 masm.load32(Address(src.base, src.offset + INT64HIGH_OFFSET), scratch);
225 masm.store32(scratch, Address(sp, iter->offsetFromArgBase() + INT64HIGH_OFFSET));
226 #else
227 Register64 scratch64(scratch);
228 masm.load64(src, scratch64);
229 masm.store64(scratch64, Address(sp, iter->offsetFromArgBase()));
230 #endif
231 break;
232 }
233 case MIRType::Double:
234 masm.loadDouble(src, ScratchDoubleReg);
235 masm.storeDouble(ScratchDoubleReg,
236 Address(masm.getStackPointer(), iter->offsetFromArgBase()));
237 break;
238 case MIRType::Float32:
239 masm.loadFloat32(src, ScratchFloat32Reg);
240 masm.storeFloat32(ScratchFloat32Reg,
241 Address(masm.getStackPointer(), iter->offsetFromArgBase()));
242 break;
243 case MIRType::Int8x16:
244 case MIRType::Int16x8:
245 case MIRType::Int32x4:
246 case MIRType::Bool8x16:
247 case MIRType::Bool16x8:
248 case MIRType::Bool32x4:
249 masm.loadUnalignedSimd128Int(src, ScratchSimd128Reg);
250 masm.storeAlignedSimd128Int(
251 ScratchSimd128Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
252 break;
253 case MIRType::Float32x4:
254 masm.loadUnalignedSimd128Float(src, ScratchSimd128Reg);
255 masm.storeAlignedSimd128Float(
256 ScratchSimd128Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
257 break;
258 default:
259 MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected stack arg type");
260 }
261 break;
262 }
263 }
264
265 // Call into the real function.
266 masm.assertStackAlignment(WasmStackAlignment);
267 masm.call(CallSiteDesc(CallSiteDesc::Func), fe.funcIndex());
268
269 // Recover the stack pointer value before dynamic alignment.
270 masm.loadWasmActivationFromTls(scratch);
271 masm.loadStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP()));
272 masm.setFramePushed(FramePushedForEntrySP);
273
274 // Recover the 'argv' pointer which was saved before aligning the stack.
275 masm.Pop(argv);
276
277 // Store the return value in argv[0]
278 switch (fe.sig().ret()) {
279 case ExprType::Void:
280 break;
281 case ExprType::I32:
282 masm.store32(ReturnReg, Address(argv, 0));
283 break;
284 case ExprType::I64:
285 masm.store64(ReturnReg64, Address(argv, 0));
286 break;
287 case ExprType::F32:
288 if (!JitOptions.wasmTestMode)
289 masm.canonicalizeFloat(ReturnFloat32Reg);
290 masm.storeFloat32(ReturnFloat32Reg, Address(argv, 0));
291 break;
292 case ExprType::F64:
293 if (!JitOptions.wasmTestMode)
294 masm.canonicalizeDouble(ReturnDoubleReg);
295 masm.storeDouble(ReturnDoubleReg, Address(argv, 0));
296 break;
297 case ExprType::I8x16:
298 case ExprType::I16x8:
299 case ExprType::I32x4:
300 case ExprType::B8x16:
301 case ExprType::B16x8:
302 case ExprType::B32x4:
303 // We don't have control on argv alignment, do an unaligned access.
304 masm.storeUnalignedSimd128Int(ReturnSimd128Reg, Address(argv, 0));
305 break;
306 case ExprType::F32x4:
307 // We don't have control on argv alignment, do an unaligned access.
308 masm.storeUnalignedSimd128Float(ReturnSimd128Reg, Address(argv, 0));
309 break;
310 case ExprType::Limit:
311 MOZ_CRASH("Limit");
312 }
313
314 // Restore clobbered non-volatile registers of the caller.
315 masm.PopRegsInMask(NonVolatileRegs);
316 MOZ_ASSERT(masm.framePushed() == 0);
317
318 masm.move32(Imm32(true), ReturnReg);
319 masm.ret();
320
321 offsets.end = masm.currentOffset();
322 return offsets;
323 }
324
325 static void
StackCopy(MacroAssembler & masm,MIRType type,Register scratch,Address src,Address dst)326 StackCopy(MacroAssembler& masm, MIRType type, Register scratch, Address src, Address dst)
327 {
328 if (type == MIRType::Int32) {
329 masm.load32(src, scratch);
330 masm.store32(scratch, dst);
331 } else if (type == MIRType::Int64) {
332 #if JS_BITS_PER_WORD == 32
333 masm.load32(Address(src.base, src.offset + INT64LOW_OFFSET), scratch);
334 masm.store32(scratch, Address(dst.base, dst.offset + INT64LOW_OFFSET));
335 masm.load32(Address(src.base, src.offset + INT64HIGH_OFFSET), scratch);
336 masm.store32(scratch, Address(dst.base, dst.offset + INT64HIGH_OFFSET));
337 #else
338 Register64 scratch64(scratch);
339 masm.load64(src, scratch64);
340 masm.store64(scratch64, dst);
341 #endif
342 } else if (type == MIRType::Float32) {
343 masm.loadFloat32(src, ScratchFloat32Reg);
344 masm.storeFloat32(ScratchFloat32Reg, dst);
345 } else {
346 MOZ_ASSERT(type == MIRType::Double);
347 masm.loadDouble(src, ScratchDoubleReg);
348 masm.storeDouble(ScratchDoubleReg, dst);
349 }
350 }
351
352 typedef bool ToValue;
353
354 static void
FillArgumentArray(MacroAssembler & masm,const ValTypeVector & args,unsigned argOffset,unsigned offsetToCallerStackArgs,Register scratch,ToValue toValue)355 FillArgumentArray(MacroAssembler& masm, const ValTypeVector& args, unsigned argOffset,
356 unsigned offsetToCallerStackArgs, Register scratch, ToValue toValue)
357 {
358 for (ABIArgValTypeIter i(args); !i.done(); i++) {
359 Address dst(masm.getStackPointer(), argOffset + i.index() * sizeof(Value));
360
361 MIRType type = i.mirType();
362 switch (i->kind()) {
363 case ABIArg::GPR:
364 if (type == MIRType::Int32) {
365 if (toValue)
366 masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dst);
367 else
368 masm.store32(i->gpr(), dst);
369 } else if (type == MIRType::Int64) {
370 // We can't box int64 into Values (yet).
371 if (toValue)
372 masm.breakpoint();
373 else
374 masm.store64(i->gpr64(), dst);
375 } else {
376 MOZ_CRASH("unexpected input type?");
377 }
378 break;
379 #ifdef JS_CODEGEN_REGISTER_PAIR
380 case ABIArg::GPR_PAIR:
381 if (type == MIRType::Int64)
382 masm.store64(i->gpr64(), dst);
383 else
384 MOZ_CRASH("wasm uses hardfp for function calls.");
385 break;
386 #endif
387 case ABIArg::FPU: {
388 MOZ_ASSERT(IsFloatingPointType(type));
389 FloatRegister srcReg = i->fpu();
390 if (type == MIRType::Double) {
391 if (toValue) {
392 // Preserve the NaN pattern in the input.
393 masm.moveDouble(srcReg, ScratchDoubleReg);
394 srcReg = ScratchDoubleReg;
395 masm.canonicalizeDouble(srcReg);
396 }
397 masm.storeDouble(srcReg, dst);
398 } else {
399 MOZ_ASSERT(type == MIRType::Float32);
400 if (toValue) {
401 // JS::Values can't store Float32, so convert to a Double.
402 masm.convertFloat32ToDouble(srcReg, ScratchDoubleReg);
403 masm.canonicalizeDouble(ScratchDoubleReg);
404 masm.storeDouble(ScratchDoubleReg, dst);
405 } else {
406 // Preserve the NaN pattern in the input.
407 masm.moveFloat32(srcReg, ScratchFloat32Reg);
408 masm.canonicalizeFloat(ScratchFloat32Reg);
409 masm.storeFloat32(ScratchFloat32Reg, dst);
410 }
411 }
412 break;
413 }
414 case ABIArg::Stack: {
415 Address src(masm.getStackPointer(), offsetToCallerStackArgs + i->offsetFromArgBase());
416 if (toValue) {
417 if (type == MIRType::Int32) {
418 masm.load32(src, scratch);
419 masm.storeValue(JSVAL_TYPE_INT32, scratch, dst);
420 } else if (type == MIRType::Int64) {
421 // We can't box int64 into Values (yet).
422 masm.breakpoint();
423 } else {
424 MOZ_ASSERT(IsFloatingPointType(type));
425 if (type == MIRType::Float32) {
426 masm.loadFloat32(src, ScratchFloat32Reg);
427 masm.convertFloat32ToDouble(ScratchFloat32Reg, ScratchDoubleReg);
428 } else {
429 masm.loadDouble(src, ScratchDoubleReg);
430 }
431 masm.canonicalizeDouble(ScratchDoubleReg);
432 masm.storeDouble(ScratchDoubleReg, dst);
433 }
434 } else {
435 StackCopy(masm, type, scratch, src, dst);
436 }
437 break;
438 }
439 }
440 }
441 }
442
443 // Generate a wrapper function with the standard intra-wasm call ABI which simply
444 // calls an import. This wrapper function allows any import to be treated like a
445 // normal wasm function for the purposes of exports and table calls. In
446 // particular, the wrapper function provides:
447 // - a table entry, so JS imports can be put into tables
448 // - normal (non-)profiling entries, so that, if the import is re-exported,
449 // an entry stub can be generated and called without any special cases
450 FuncOffsets
GenerateImportFunction(jit::MacroAssembler & masm,const FuncImport & fi,SigIdDesc sigId)451 wasm::GenerateImportFunction(jit::MacroAssembler& masm, const FuncImport& fi, SigIdDesc sigId)
452 {
453 masm.setFramePushed(0);
454
455 unsigned tlsBytes = sizeof(void*);
456 unsigned framePushed = StackDecrementForCall(masm, WasmStackAlignment, fi.sig().args(), tlsBytes);
457
458 FuncOffsets offsets;
459 GenerateFunctionPrologue(masm, framePushed, sigId, &offsets);
460
461 // The argument register state is already setup by our caller. We just need
462 // to be sure not to clobber it before the call.
463 Register scratch = ABINonArgReg0;
464
465 // Copy our frame's stack arguments to the callee frame's stack argument.
466 unsigned offsetToCallerStackArgs = sizeof(Frame) + masm.framePushed();
467 ABIArgValTypeIter i(fi.sig().args());
468 for (; !i.done(); i++) {
469 if (i->kind() != ABIArg::Stack)
470 continue;
471
472 Address src(masm.getStackPointer(), offsetToCallerStackArgs + i->offsetFromArgBase());
473 Address dst(masm.getStackPointer(), i->offsetFromArgBase());
474 StackCopy(masm, i.mirType(), scratch, src, dst);
475 }
476
477 // Save the TLS register so it can be restored later.
478 uint32_t tlsStackOffset = i.stackBytesConsumedSoFar();
479 masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), tlsStackOffset));
480
481 // Call the import exit stub.
482 CallSiteDesc desc(CallSiteDesc::Dynamic);
483 masm.wasmCallImport(desc, CalleeDesc::import(fi.tlsDataOffset()));
484
485 // Restore the TLS register and pinned regs, per wasm function ABI.
486 masm.loadPtr(Address(masm.getStackPointer(), tlsStackOffset), WasmTlsReg);
487 masm.loadWasmPinnedRegsFromTls();
488
489 GenerateFunctionEpilogue(masm, framePushed, &offsets);
490
491 masm.wasmEmitTrapOutOfLineCode();
492
493 offsets.end = masm.currentOffset();
494 return offsets;
495 }
496
497 // Generate a stub that is called via the internal ABI derived from the
498 // signature of the import and calls into an appropriate callImport C++
499 // function, having boxed all the ABI arguments into a homogeneous Value array.
500 ProfilingOffsets
GenerateImportInterpExit(MacroAssembler & masm,const FuncImport & fi,uint32_t funcImportIndex,Label * throwLabel)501 wasm::GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi, uint32_t funcImportIndex,
502 Label* throwLabel)
503 {
504 masm.setFramePushed(0);
505
506 // Argument types for Module::callImport_*:
507 static const MIRType typeArray[] = { MIRType::Pointer, // Instance*
508 MIRType::Pointer, // funcImportIndex
509 MIRType::Int32, // argc
510 MIRType::Pointer }; // argv
511 MIRTypeVector invokeArgTypes;
512 MOZ_ALWAYS_TRUE(invokeArgTypes.append(typeArray, ArrayLength(typeArray)));
513
514 // At the point of the call, the stack layout shall be (sp grows to the left):
515 // | stack args | padding | Value argv[] | padding | retaddr | caller stack args |
516 // The padding between stack args and argv ensures that argv is aligned. The
517 // padding between argv and retaddr ensures that sp is aligned.
518 unsigned argOffset = AlignBytes(StackArgBytes(invokeArgTypes), sizeof(double));
519 unsigned argBytes = Max<size_t>(1, fi.sig().args().length()) * sizeof(Value);
520 unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, argOffset + argBytes);
521
522 ProfilingOffsets offsets;
523 GenerateExitPrologue(masm, framePushed, ExitReason::ImportInterp, &offsets);
524
525 // Fill the argument array.
526 unsigned offsetToCallerStackArgs = sizeof(Frame) + masm.framePushed();
527 Register scratch = ABINonArgReturnReg0;
528 FillArgumentArray(masm, fi.sig().args(), argOffset, offsetToCallerStackArgs, scratch, ToValue(false));
529
530 // Prepare the arguments for the call to Module::callImport_*.
531 ABIArgMIRTypeIter i(invokeArgTypes);
532
533 // argument 0: Instance*
534 Address instancePtr(WasmTlsReg, offsetof(TlsData, instance));
535 if (i->kind() == ABIArg::GPR) {
536 masm.loadPtr(instancePtr, i->gpr());
537 } else {
538 masm.loadPtr(instancePtr, scratch);
539 masm.storePtr(scratch, Address(masm.getStackPointer(), i->offsetFromArgBase()));
540 }
541 i++;
542
543 // argument 1: funcImportIndex
544 if (i->kind() == ABIArg::GPR)
545 masm.mov(ImmWord(funcImportIndex), i->gpr());
546 else
547 masm.store32(Imm32(funcImportIndex), Address(masm.getStackPointer(), i->offsetFromArgBase()));
548 i++;
549
550 // argument 2: argc
551 unsigned argc = fi.sig().args().length();
552 if (i->kind() == ABIArg::GPR)
553 masm.mov(ImmWord(argc), i->gpr());
554 else
555 masm.store32(Imm32(argc), Address(masm.getStackPointer(), i->offsetFromArgBase()));
556 i++;
557
558 // argument 3: argv
559 Address argv(masm.getStackPointer(), argOffset);
560 if (i->kind() == ABIArg::GPR) {
561 masm.computeEffectiveAddress(argv, i->gpr());
562 } else {
563 masm.computeEffectiveAddress(argv, scratch);
564 masm.storePtr(scratch, Address(masm.getStackPointer(), i->offsetFromArgBase()));
565 }
566 i++;
567 MOZ_ASSERT(i.done());
568
569 // Make the call, test whether it succeeded, and extract the return value.
570 AssertStackAlignment(masm, ABIStackAlignment);
571 switch (fi.sig().ret()) {
572 case ExprType::Void:
573 masm.call(SymbolicAddress::CallImport_Void);
574 masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
575 break;
576 case ExprType::I32:
577 masm.call(SymbolicAddress::CallImport_I32);
578 masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
579 masm.load32(argv, ReturnReg);
580 break;
581 case ExprType::I64:
582 masm.call(SymbolicAddress::CallImport_I64);
583 masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
584 masm.load64(argv, ReturnReg64);
585 break;
586 case ExprType::F32:
587 masm.call(SymbolicAddress::CallImport_F64);
588 masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
589 masm.loadDouble(argv, ReturnDoubleReg);
590 masm.convertDoubleToFloat32(ReturnDoubleReg, ReturnFloat32Reg);
591 break;
592 case ExprType::F64:
593 masm.call(SymbolicAddress::CallImport_F64);
594 masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
595 masm.loadDouble(argv, ReturnDoubleReg);
596 break;
597 case ExprType::I8x16:
598 case ExprType::I16x8:
599 case ExprType::I32x4:
600 case ExprType::F32x4:
601 case ExprType::B8x16:
602 case ExprType::B16x8:
603 case ExprType::B32x4:
604 MOZ_CRASH("SIMD types shouldn't be returned from a FFI");
605 case ExprType::Limit:
606 MOZ_CRASH("Limit");
607 }
608
609 // The native ABI preserves the TLS, heap and global registers since they
610 // are non-volatile.
611 MOZ_ASSERT(NonVolatileRegs.has(WasmTlsReg));
612 #if defined(JS_CODEGEN_X64) || \
613 defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
614 defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
615 MOZ_ASSERT(NonVolatileRegs.has(HeapReg));
616 #endif
617 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
618 defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
619 MOZ_ASSERT(NonVolatileRegs.has(GlobalReg));
620 #endif
621
622 GenerateExitEpilogue(masm, framePushed, ExitReason::ImportInterp, &offsets);
623
624 offsets.end = masm.currentOffset();
625 return offsets;
626 }
627
628 static const unsigned SavedTlsReg = sizeof(void*);
629
630 // Generate a stub that is called via the internal ABI derived from the
631 // signature of the import and calls into a compatible JIT function,
632 // having boxed all the ABI arguments into the JIT stack frame layout.
633 ProfilingOffsets
GenerateImportJitExit(MacroAssembler & masm,const FuncImport & fi,Label * throwLabel)634 wasm::GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* throwLabel)
635 {
636 masm.setFramePushed(0);
637
638 // JIT calls use the following stack layout (sp grows to the left):
639 // | retaddr | descriptor | callee | argc | this | arg1..N |
640 // After the JIT frame, the global register (if present) is saved since the
641 // JIT's ABI does not preserve non-volatile regs. Also, unlike most ABIs,
642 // the JIT ABI requires that sp be JitStackAlignment-aligned *after* pushing
643 // the return address.
644 static_assert(WasmStackAlignment >= JitStackAlignment, "subsumes");
645 unsigned sizeOfRetAddr = sizeof(void*);
646 unsigned jitFrameBytes = 3 * sizeof(void*) + (1 + fi.sig().args().length()) * sizeof(Value);
647 unsigned totalJitFrameBytes = sizeOfRetAddr + jitFrameBytes + SavedTlsReg;
648 unsigned jitFramePushed = StackDecrementForCall(masm, JitStackAlignment, totalJitFrameBytes) -
649 sizeOfRetAddr;
650
651 ProfilingOffsets offsets;
652 GenerateExitPrologue(masm, jitFramePushed, ExitReason::ImportJit, &offsets);
653
654 // 1. Descriptor
655 size_t argOffset = 0;
656 uint32_t descriptor = MakeFrameDescriptor(jitFramePushed, JitFrame_Entry,
657 JitFrameLayout::Size());
658 masm.storePtr(ImmWord(uintptr_t(descriptor)), Address(masm.getStackPointer(), argOffset));
659 argOffset += sizeof(size_t);
660
661 // 2. Callee
662 Register callee = ABINonArgReturnReg0; // live until call
663 Register scratch = ABINonArgReturnReg1; // repeatedly clobbered
664
665 // 2.1. Get callee
666 masm.loadWasmGlobalPtr(fi.tlsDataOffset() + offsetof(FuncImportTls, obj), callee);
667
668 // 2.2. Save callee
669 masm.storePtr(callee, Address(masm.getStackPointer(), argOffset));
670 argOffset += sizeof(size_t);
671
672 // 2.3. Load callee executable entry point
673 masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), callee);
674 masm.loadBaselineOrIonNoArgCheck(callee, callee, nullptr);
675
676 // 3. Argc
677 unsigned argc = fi.sig().args().length();
678 masm.storePtr(ImmWord(uintptr_t(argc)), Address(masm.getStackPointer(), argOffset));
679 argOffset += sizeof(size_t);
680
681 // 4. |this| value
682 masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), argOffset));
683 argOffset += sizeof(Value);
684
685 // 5. Fill the arguments
686 unsigned offsetToCallerStackArgs = jitFramePushed + sizeof(Frame);
687 FillArgumentArray(masm, fi.sig().args(), argOffset, offsetToCallerStackArgs, scratch, ToValue(true));
688 argOffset += fi.sig().args().length() * sizeof(Value);
689 MOZ_ASSERT(argOffset == jitFrameBytes);
690
691 // 6. Jit code will clobber all registers, even non-volatiles. WasmTlsReg
692 // must be kept live for the benefit of the epilogue, so push it on the
693 // stack so that it can be restored before the epilogue.
694 static_assert(SavedTlsReg == sizeof(void*), "stack frame accounting");
695 masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), jitFrameBytes));
696
697 {
698 // Enable Activation.
699 //
700 // This sequence requires two registers, and needs to preserve the
701 // 'callee' register, so there are three live registers.
702 MOZ_ASSERT(callee == WasmIonExitRegCallee);
703 Register cx = WasmIonExitRegE0;
704 Register act = WasmIonExitRegE1;
705
706 // JitActivation* act = cx->activation();
707 masm.movePtr(SymbolicAddress::Context, cx);
708 masm.loadPtr(Address(cx, JSContext::offsetOfActivation()), act);
709
710 // act.active_ = true;
711 masm.store8(Imm32(1), Address(act, JitActivation::offsetOfActiveUint8()));
712
713 // cx->jitActivation = act;
714 masm.storePtr(act, Address(cx, offsetof(JSContext, jitActivation)));
715
716 // cx->profilingActivation_ = act;
717 masm.storePtr(act, Address(cx, JSContext::offsetOfProfilingActivation()));
718 }
719
720 AssertStackAlignment(masm, JitStackAlignment, sizeOfRetAddr);
721 masm.callJitNoProfiler(callee);
722 AssertStackAlignment(masm, JitStackAlignment, sizeOfRetAddr);
723
724 {
725 // Disable Activation.
726 //
727 // This sequence needs three registers, and must preserve the JSReturnReg_Data and
728 // JSReturnReg_Type, so there are five live registers.
729 MOZ_ASSERT(JSReturnReg_Data == WasmIonExitRegReturnData);
730 MOZ_ASSERT(JSReturnReg_Type == WasmIonExitRegReturnType);
731 Register cx = WasmIonExitRegD0;
732 Register act = WasmIonExitRegD1;
733 Register tmp = WasmIonExitRegD2;
734
735 // JitActivation* act = cx->activation();
736 masm.movePtr(SymbolicAddress::Context, cx);
737 masm.loadPtr(Address(cx, JSContext::offsetOfActivation()), act);
738
739 // cx->jitTop = act->prevJitTop_;
740 masm.loadPtr(Address(act, JitActivation::offsetOfPrevJitTop()), tmp);
741 masm.storePtr(tmp, Address(cx, offsetof(JSContext, jitTop)));
742
743 // cx->jitActivation = act->prevJitActivation_;
744 masm.loadPtr(Address(act, JitActivation::offsetOfPrevJitActivation()), tmp);
745 masm.storePtr(tmp, Address(cx, offsetof(JSContext, jitActivation)));
746
747 // cx->profilingActivation = act->prevProfilingActivation_;
748 masm.loadPtr(Address(act, Activation::offsetOfPrevProfiling()), tmp);
749 masm.storePtr(tmp, Address(cx, JSContext::offsetOfProfilingActivation()));
750
751 // act->active_ = false;
752 masm.store8(Imm32(0), Address(act, JitActivation::offsetOfActiveUint8()));
753 }
754
755 // As explained above, the frame was aligned for the JIT ABI such that
756 // (sp + sizeof(void*)) % JitStackAlignment == 0
757 // But now we possibly want to call one of several different C++ functions,
758 // so subtract the sizeof(void*) so that sp is aligned for an ABI call.
759 static_assert(ABIStackAlignment <= JitStackAlignment, "subsumes");
760 masm.reserveStack(sizeOfRetAddr);
761 unsigned nativeFramePushed = masm.framePushed();
762 AssertStackAlignment(masm, ABIStackAlignment);
763
764 masm.branchTestMagic(Assembler::Equal, JSReturnOperand, throwLabel);
765
766 Label oolConvert;
767 switch (fi.sig().ret()) {
768 case ExprType::Void:
769 break;
770 case ExprType::I32:
771 masm.convertValueToInt32(JSReturnOperand, ReturnDoubleReg, ReturnReg, &oolConvert,
772 /* -0 check */ false);
773 break;
774 case ExprType::I64:
775 // We don't expect int64 to be returned from Ion yet, because of a
776 // guard in callImport.
777 masm.breakpoint();
778 break;
779 case ExprType::F32:
780 masm.convertValueToFloat(JSReturnOperand, ReturnFloat32Reg, &oolConvert);
781 break;
782 case ExprType::F64:
783 masm.convertValueToDouble(JSReturnOperand, ReturnDoubleReg, &oolConvert);
784 break;
785 case ExprType::I8x16:
786 case ExprType::I16x8:
787 case ExprType::I32x4:
788 case ExprType::F32x4:
789 case ExprType::B8x16:
790 case ExprType::B16x8:
791 case ExprType::B32x4:
792 MOZ_CRASH("SIMD types shouldn't be returned from an import");
793 case ExprType::Limit:
794 MOZ_CRASH("Limit");
795 }
796
797 Label done;
798 masm.bind(&done);
799
800 // Ion code does not respect the system ABI's callee-saved register
801 // conventions so reload any assumed-non-volatile registers. Note that the
802 // reserveStack(sizeOfRetAddr) above means that the stack pointer is at a
803 // different offset than when WasmTlsReg was stored.
804 masm.loadPtr(Address(masm.getStackPointer(), jitFrameBytes + sizeOfRetAddr), WasmTlsReg);
805
806 GenerateExitEpilogue(masm, masm.framePushed(), ExitReason::ImportJit, &offsets);
807
808 if (oolConvert.used()) {
809 masm.bind(&oolConvert);
810 masm.setFramePushed(nativeFramePushed);
811
812 // Coercion calls use the following stack layout (sp grows to the left):
813 // | args | padding | Value argv[1] | padding | exit Frame |
814 MIRTypeVector coerceArgTypes;
815 JS_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
816 unsigned offsetToCoerceArgv = AlignBytes(StackArgBytes(coerceArgTypes), sizeof(Value));
817 MOZ_ASSERT(nativeFramePushed >= offsetToCoerceArgv + sizeof(Value));
818 AssertStackAlignment(masm, ABIStackAlignment);
819
820 // Store return value into argv[0]
821 masm.storeValue(JSReturnOperand, Address(masm.getStackPointer(), offsetToCoerceArgv));
822
823 // argument 0: argv
824 ABIArgMIRTypeIter i(coerceArgTypes);
825 Address argv(masm.getStackPointer(), offsetToCoerceArgv);
826 if (i->kind() == ABIArg::GPR) {
827 masm.computeEffectiveAddress(argv, i->gpr());
828 } else {
829 masm.computeEffectiveAddress(argv, scratch);
830 masm.storePtr(scratch, Address(masm.getStackPointer(), i->offsetFromArgBase()));
831 }
832 i++;
833 MOZ_ASSERT(i.done());
834
835 // Call coercion function
836 AssertStackAlignment(masm, ABIStackAlignment);
837 switch (fi.sig().ret()) {
838 case ExprType::I32:
839 masm.call(SymbolicAddress::CoerceInPlace_ToInt32);
840 masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
841 masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv), ReturnReg);
842 break;
843 case ExprType::F64:
844 masm.call(SymbolicAddress::CoerceInPlace_ToNumber);
845 masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
846 masm.loadDouble(Address(masm.getStackPointer(), offsetToCoerceArgv), ReturnDoubleReg);
847 break;
848 case ExprType::F32:
849 masm.call(SymbolicAddress::CoerceInPlace_ToNumber);
850 masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
851 masm.loadDouble(Address(masm.getStackPointer(), offsetToCoerceArgv), ReturnDoubleReg);
852 masm.convertDoubleToFloat32(ReturnDoubleReg, ReturnFloat32Reg);
853 break;
854 default:
855 MOZ_CRASH("Unsupported convert type");
856 }
857
858 masm.jump(&done);
859 masm.setFramePushed(0);
860 }
861
862 MOZ_ASSERT(masm.framePushed() == 0);
863
864 offsets.end = masm.currentOffset();
865 return offsets;
866 }
867
868 // Generate a stub that calls into ReportTrap with the right trap reason.
869 // This stub is called with ABIStackAlignment by a trap out-of-line path. A
870 // profiling prologue/epilogue is used so that stack unwinding picks up the
871 // current WasmActivation. Unwinding will begin at the caller of this trap exit.
872 ProfilingOffsets
GenerateTrapExit(MacroAssembler & masm,Trap trap,Label * throwLabel)873 wasm::GenerateTrapExit(MacroAssembler& masm, Trap trap, Label* throwLabel)
874 {
875 masm.haltingAlign(CodeAlignment);
876
877 masm.setFramePushed(0);
878
879 MIRTypeVector args;
880 MOZ_ALWAYS_TRUE(args.append(MIRType::Int32));
881
882 uint32_t framePushed = StackDecrementForCall(masm, ABIStackAlignment, args);
883
884 ProfilingOffsets offsets;
885 GenerateExitPrologue(masm, framePushed, ExitReason::Trap, &offsets);
886
887 ABIArgMIRTypeIter i(args);
888 if (i->kind() == ABIArg::GPR)
889 masm.move32(Imm32(int32_t(trap)), i->gpr());
890 else
891 masm.store32(Imm32(int32_t(trap)), Address(masm.getStackPointer(), i->offsetFromArgBase()));
892 i++;
893 MOZ_ASSERT(i.done());
894
895 masm.assertStackAlignment(ABIStackAlignment);
896 masm.call(SymbolicAddress::ReportTrap);
897
898 masm.jump(throwLabel);
899
900 GenerateExitEpilogue(masm, framePushed, ExitReason::Trap, &offsets);
901
902 offsets.end = masm.currentOffset();
903 return offsets;
904 }
905
906 // Generate a stub which is only used by the signal handlers to handle out of
907 // bounds access by experimental SIMD.js and Atomics and unaligned accesses on
908 // ARM. This stub is executed by direct PC transfer from the faulting memory
909 // access and thus the stack depth is unknown. Since WasmActivation::fp is not
910 // set before calling the error reporter, the current wasm activation will be
911 // lost. This stub should be removed when SIMD.js and Atomics are moved to wasm
912 // and given proper traps and when we use a non-faulting strategy for unaligned
913 // ARM access.
914 static Offsets
GenerateGenericMemoryAccessTrap(MacroAssembler & masm,SymbolicAddress reporter,Label * throwLabel)915 GenerateGenericMemoryAccessTrap(MacroAssembler& masm, SymbolicAddress reporter, Label* throwLabel)
916 {
917 masm.haltingAlign(CodeAlignment);
918
919 Offsets offsets;
920 offsets.begin = masm.currentOffset();
921
922 // sp can be anything at this point, so ensure it is aligned when calling
923 // into C++. We unconditionally jump to throw so don't worry about
924 // restoring sp.
925 masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
926 if (ShadowStackSpace)
927 masm.subFromStackPtr(Imm32(ShadowStackSpace));
928
929 masm.call(reporter);
930 masm.jump(throwLabel);
931
932 offsets.end = masm.currentOffset();
933 return offsets;
934 }
935
936 Offsets
GenerateOutOfBoundsExit(MacroAssembler & masm,Label * throwLabel)937 wasm::GenerateOutOfBoundsExit(MacroAssembler& masm, Label* throwLabel)
938 {
939 return GenerateGenericMemoryAccessTrap(masm, SymbolicAddress::ReportOutOfBounds, throwLabel);
940 }
941
942 Offsets
GenerateUnalignedExit(MacroAssembler & masm,Label * throwLabel)943 wasm::GenerateUnalignedExit(MacroAssembler& masm, Label* throwLabel)
944 {
945 return GenerateGenericMemoryAccessTrap(masm, SymbolicAddress::ReportUnalignedAccess, throwLabel);
946 }
947
948 static const LiveRegisterSet AllRegsExceptSP(
949 GeneralRegisterSet(Registers::AllMask & ~(uint32_t(1) << Registers::StackPointer)),
950 FloatRegisterSet(FloatRegisters::AllMask));
951
952 // The async interrupt-callback exit is called from arbitrarily-interrupted wasm
953 // code. That means we must first save *all* registers and restore *all*
954 // registers (except the stack pointer) when we resume. The address to resume to
955 // (assuming that js::HandleExecutionInterrupt doesn't indicate that the
956 // execution should be aborted) is stored in WasmActivation::resumePC_.
957 // Unfortunately, loading this requires a scratch register which we don't have
958 // after restoring all registers. To hack around this, push the resumePC on the
959 // stack so that it can be popped directly into PC.
960 Offsets
GenerateInterruptExit(MacroAssembler & masm,Label * throwLabel)961 wasm::GenerateInterruptExit(MacroAssembler& masm, Label* throwLabel)
962 {
963 masm.haltingAlign(CodeAlignment);
964
965 Offsets offsets;
966 offsets.begin = masm.currentOffset();
967
968 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
969 // Be very careful here not to perturb the machine state before saving it
970 // to the stack. In particular, add/sub instructions may set conditions in
971 // the flags register.
972 masm.push(Imm32(0)); // space for resumePC
973 masm.pushFlags(); // after this we are safe to use sub
974 masm.setFramePushed(0); // set to zero so we can use masm.framePushed() below
975 masm.PushRegsInMask(AllRegsExceptSP); // save all GP/FP registers (except SP)
976
977 Register scratch = ABINonArgReturnReg0;
978
979 // Store resumePC into the reserved space.
980 masm.loadWasmActivationFromSymbolicAddress(scratch);
981 masm.loadPtr(Address(scratch, WasmActivation::offsetOfResumePC()), scratch);
982 masm.storePtr(scratch, Address(masm.getStackPointer(), masm.framePushed() + sizeof(void*)));
983
984 // We know that StackPointer is word-aligned, but not necessarily
985 // stack-aligned, so we need to align it dynamically.
986 masm.moveStackPtrTo(ABINonVolatileReg);
987 masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
988 if (ShadowStackSpace)
989 masm.subFromStackPtr(Imm32(ShadowStackSpace));
990
991 masm.assertStackAlignment(ABIStackAlignment);
992 masm.call(SymbolicAddress::HandleExecutionInterrupt);
993
994 masm.branchIfFalseBool(ReturnReg, throwLabel);
995
996 // Restore the StackPointer to its position before the call.
997 masm.moveToStackPtr(ABINonVolatileReg);
998
999 // Restore the machine state to before the interrupt.
1000 masm.PopRegsInMask(AllRegsExceptSP); // restore all GP/FP registers (except SP)
1001 masm.popFlags(); // after this, nothing that sets conditions
1002 masm.ret(); // pop resumePC into PC
1003 #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
1004 // Reserve space to store resumePC and HeapReg.
1005 masm.subFromStackPtr(Imm32(2 * sizeof(intptr_t)));
1006 // set to zero so we can use masm.framePushed() below.
1007 masm.setFramePushed(0);
1008 static_assert(!SupportsSimd, "high lanes of SIMD registers need to be saved too.");
1009 // save all registers,except sp. After this stack is alligned.
1010 masm.PushRegsInMask(AllRegsExceptSP);
1011
1012 // Save the stack pointer in a non-volatile register.
1013 masm.moveStackPtrTo(s0);
1014 // Align the stack.
1015 masm.ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
1016
1017 // Store resumePC into the reserved space.
1018 masm.loadWasmActivationFromSymbolicAddress(IntArgReg0);
1019 masm.loadPtr(Address(IntArgReg0, WasmActivation::offsetOfResumePC()), IntArgReg1);
1020 masm.storePtr(IntArgReg1, Address(s0, masm.framePushed()));
1021 // Store HeapReg into the reserved space.
1022 masm.storePtr(HeapReg, Address(s0, masm.framePushed() + sizeof(intptr_t)));
1023
1024 # ifdef USES_O32_ABI
1025 // MIPS ABI requires rewserving stack for registes $a0 to $a3.
1026 masm.subFromStackPtr(Imm32(4 * sizeof(intptr_t)));
1027 # endif
1028
1029 masm.assertStackAlignment(ABIStackAlignment);
1030 masm.call(SymbolicAddress::HandleExecutionInterrupt);
1031
1032 # ifdef USES_O32_ABI
1033 masm.addToStackPtr(Imm32(4 * sizeof(intptr_t)));
1034 # endif
1035
1036 masm.branchIfFalseBool(ReturnReg, throwLabel);
1037
1038 // This will restore stack to the address before the call.
1039 masm.moveToStackPtr(s0);
1040 masm.PopRegsInMask(AllRegsExceptSP);
1041
1042 // Pop resumePC into PC. Clobber HeapReg to make the jump and restore it
1043 // during jump delay slot.
1044 masm.loadPtr(Address(StackPointer, 0), HeapReg);
1045 // Reclaim the reserve space.
1046 masm.addToStackPtr(Imm32(2 * sizeof(intptr_t)));
1047 masm.as_jr(HeapReg);
1048 masm.loadPtr(Address(StackPointer, -sizeof(intptr_t)), HeapReg);
1049 #elif defined(JS_CODEGEN_ARM)
1050 masm.setFramePushed(0); // set to zero so we can use masm.framePushed() below
1051
1052 // Save all GPR, except the stack pointer.
1053 masm.PushRegsInMask(LiveRegisterSet(
1054 GeneralRegisterSet(Registers::AllMask & ~(1<<Registers::sp)),
1055 FloatRegisterSet(uint32_t(0))));
1056
1057 // Save both the APSR and FPSCR in non-volatile registers.
1058 masm.as_mrs(r4);
1059 masm.as_vmrs(r5);
1060 // Save the stack pointer in a non-volatile register.
1061 masm.mov(sp,r6);
1062 // Align the stack.
1063 masm.as_bic(sp, sp, Imm8(7));
1064
1065 // Store resumePC into the return PC stack slot.
1066 masm.loadWasmActivationFromSymbolicAddress(IntArgReg0);
1067 masm.loadPtr(Address(IntArgReg0, WasmActivation::offsetOfResumePC()), IntArgReg1);
1068 masm.storePtr(IntArgReg1, Address(r6, 14 * sizeof(uint32_t*)));
1069
1070 // Save all FP registers
1071 static_assert(!SupportsSimd, "high lanes of SIMD registers need to be saved too.");
1072 masm.PushRegsInMask(LiveRegisterSet(GeneralRegisterSet(0),
1073 FloatRegisterSet(FloatRegisters::AllDoubleMask)));
1074
1075 masm.assertStackAlignment(ABIStackAlignment);
1076 masm.call(SymbolicAddress::HandleExecutionInterrupt);
1077
1078 masm.branchIfFalseBool(ReturnReg, throwLabel);
1079
1080 // Restore the machine state to before the interrupt. this will set the pc!
1081
1082 // Restore all FP registers
1083 masm.PopRegsInMask(LiveRegisterSet(GeneralRegisterSet(0),
1084 FloatRegisterSet(FloatRegisters::AllDoubleMask)));
1085 masm.mov(r6,sp);
1086 masm.as_vmsr(r5);
1087 masm.as_msr(r4);
1088 // Restore all GP registers
1089 masm.startDataTransferM(IsLoad, sp, IA, WriteBack);
1090 masm.transferReg(r0);
1091 masm.transferReg(r1);
1092 masm.transferReg(r2);
1093 masm.transferReg(r3);
1094 masm.transferReg(r4);
1095 masm.transferReg(r5);
1096 masm.transferReg(r6);
1097 masm.transferReg(r7);
1098 masm.transferReg(r8);
1099 masm.transferReg(r9);
1100 masm.transferReg(r10);
1101 masm.transferReg(r11);
1102 masm.transferReg(r12);
1103 masm.transferReg(lr);
1104 masm.finishDataTransfer();
1105 masm.ret();
1106 #elif defined(JS_CODEGEN_ARM64)
1107 MOZ_CRASH();
1108 #elif defined (JS_CODEGEN_NONE)
1109 MOZ_CRASH();
1110 #else
1111 # error "Unknown architecture!"
1112 #endif
1113
1114 offsets.end = masm.currentOffset();
1115 return offsets;
1116 }
1117
1118 // Generate a stub that restores the stack pointer to what it was on entry to
1119 // the wasm activation, sets the return register to 'false' and then executes a
1120 // return which will return from this wasm activation to the caller. This stub
1121 // should only be called after the caller has reported an error (or, in the case
1122 // of the interrupt stub, intends to interrupt execution).
1123 Offsets
GenerateThrowStub(MacroAssembler & masm,Label * throwLabel)1124 wasm::GenerateThrowStub(MacroAssembler& masm, Label* throwLabel)
1125 {
1126 masm.haltingAlign(CodeAlignment);
1127
1128 masm.bind(throwLabel);
1129
1130 Offsets offsets;
1131 offsets.begin = masm.currentOffset();
1132
1133 // We are about to pop all frames in this WasmActivation. Set fp to null to
1134 // maintain the invariant that fp is either null or pointing to a valid
1135 // frame.
1136 Register scratch = ABINonArgReturnReg0;
1137 masm.loadWasmActivationFromSymbolicAddress(scratch);
1138 masm.storePtr(ImmWord(0), Address(scratch, WasmActivation::offsetOfFP()));
1139
1140 masm.setFramePushed(FramePushedForEntrySP);
1141 masm.loadStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP()));
1142 masm.Pop(scratch);
1143 masm.PopRegsInMask(NonVolatileRegs);
1144 MOZ_ASSERT(masm.framePushed() == 0);
1145
1146 masm.mov(ImmWord(0), ReturnReg);
1147 masm.ret();
1148
1149 offsets.end = masm.currentOffset();
1150 return offsets;
1151 }
1152