1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  *
4  * Copyright 2015 Mozilla Foundation
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 #include "wasm/WasmStubs.h"
20 
21 #include <algorithm>
22 #include <iterator>
23 
24 #include "jit/ABIFunctions.h"
25 #include "jit/JitFrames.h"
26 #include "jit/JitScript.h"
27 #include "jit/RegisterAllocator.h"
28 #include "js/Printf.h"
29 #include "util/Memory.h"
30 #include "wasm/WasmCode.h"
31 #include "wasm/WasmGenerator.h"
32 #include "wasm/WasmInstance.h"
33 
34 #include "jit/ABIFunctionList-inl.h"
35 #include "jit/MacroAssembler-inl.h"
36 
37 using namespace js;
38 using namespace js::jit;
39 using namespace js::wasm;
40 
41 using MIRTypeVector = Vector<jit::MIRType, 8, SystemAllocPolicy>;
42 using ABIArgMIRTypeIter = jit::ABIArgIter<MIRTypeVector>;
43 
44 /*****************************************************************************/
45 // ABIResultIter implementation
46 
ResultStackSize(ValType type)47 static uint32_t ResultStackSize(ValType type) {
48   switch (type.kind()) {
49     case ValType::I32:
50       return ABIResult::StackSizeOfInt32;
51     case ValType::I64:
52       return ABIResult::StackSizeOfInt64;
53     case ValType::F32:
54       return ABIResult::StackSizeOfFloat;
55     case ValType::F64:
56       return ABIResult::StackSizeOfDouble;
57 #ifdef ENABLE_WASM_SIMD
58     case ValType::V128:
59       return ABIResult::StackSizeOfV128;
60 #endif
61     case ValType::Ref:
62       return ABIResult::StackSizeOfPtr;
63     default:
64       MOZ_CRASH("Unexpected result type");
65   }
66 }
67 
MIRTypeToABIResultSize(jit::MIRType type)68 uint32_t js::wasm::MIRTypeToABIResultSize(jit::MIRType type) {
69   switch (type) {
70     case MIRType::Int32:
71       return ABIResult::StackSizeOfInt32;
72     case MIRType::Int64:
73       return ABIResult::StackSizeOfInt64;
74     case MIRType::Float32:
75       return ABIResult::StackSizeOfFloat;
76     case MIRType::Double:
77       return ABIResult::StackSizeOfDouble;
78 #ifdef ENABLE_WASM_SIMD
79     case MIRType::Simd128:
80       return ABIResult::StackSizeOfV128;
81 #endif
82     case MIRType::Pointer:
83     case MIRType::RefOrNull:
84       return ABIResult::StackSizeOfPtr;
85     default:
86       MOZ_CRASH("MIRTypeToABIResultSize - unhandled case");
87   }
88 }
89 
size() const90 uint32_t ABIResult::size() const { return ResultStackSize(type()); }
91 
settleRegister(ValType type)92 void ABIResultIter::settleRegister(ValType type) {
93   MOZ_ASSERT(!done());
94   MOZ_ASSERT_IF(direction_ == Next, index() < MaxRegisterResults);
95   MOZ_ASSERT_IF(direction_ == Prev, index() >= count_ - MaxRegisterResults);
96   static_assert(MaxRegisterResults == 1, "expected a single register result");
97 
98   switch (type.kind()) {
99     case ValType::I32:
100       cur_ = ABIResult(type, ReturnReg);
101       break;
102     case ValType::I64:
103       cur_ = ABIResult(type, ReturnReg64);
104       break;
105     case ValType::F32:
106       cur_ = ABIResult(type, ReturnFloat32Reg);
107       break;
108     case ValType::F64:
109       cur_ = ABIResult(type, ReturnDoubleReg);
110       break;
111     case ValType::Rtt:
112     case ValType::Ref:
113       cur_ = ABIResult(type, ReturnReg);
114       break;
115 #ifdef ENABLE_WASM_SIMD
116     case ValType::V128:
117       cur_ = ABIResult(type, ReturnSimd128Reg);
118       break;
119 #endif
120     default:
121       MOZ_CRASH("Unexpected result type");
122   }
123 }
124 
settleNext()125 void ABIResultIter::settleNext() {
126   MOZ_ASSERT(direction_ == Next);
127   MOZ_ASSERT(!done());
128 
129   uint32_t typeIndex = count_ - index_ - 1;
130   ValType type = type_[typeIndex];
131 
132   if (index_ < MaxRegisterResults) {
133     settleRegister(type);
134     return;
135   }
136 
137   cur_ = ABIResult(type, nextStackOffset_);
138   nextStackOffset_ += ResultStackSize(type);
139 }
140 
settlePrev()141 void ABIResultIter::settlePrev() {
142   MOZ_ASSERT(direction_ == Prev);
143   MOZ_ASSERT(!done());
144   uint32_t typeIndex = index_;
145   ValType type = type_[typeIndex];
146 
147   if (count_ - index_ - 1 < MaxRegisterResults) {
148     settleRegister(type);
149     return;
150   }
151 
152   uint32_t size = ResultStackSize(type);
153   MOZ_ASSERT(nextStackOffset_ >= size);
154   nextStackOffset_ -= size;
155   cur_ = ABIResult(type, nextStackOffset_);
156 }
157 
158 #ifdef WASM_CODEGEN_DEBUG
159 template <class Closure>
GenPrint(DebugChannel channel,MacroAssembler & masm,const Maybe<Register> & taken,Closure passArgAndCall)160 static void GenPrint(DebugChannel channel, MacroAssembler& masm,
161                      const Maybe<Register>& taken, Closure passArgAndCall) {
162   if (!IsCodegenDebugEnabled(channel)) {
163     return;
164   }
165 
166   AllocatableRegisterSet regs(RegisterSet::All());
167   LiveRegisterSet save(regs.asLiveSet());
168   masm.PushRegsInMask(save);
169 
170   if (taken) {
171     regs.take(taken.value());
172   }
173   Register temp = regs.takeAnyGeneral();
174 
175   {
176     MOZ_ASSERT(MaybeGetJitContext(),
177                "codegen debug checks require a jit context");
178     masm.setupUnalignedABICall(temp);
179     passArgAndCall(IsCompilingWasm(), temp);
180   }
181 
182   masm.PopRegsInMask(save);
183 }
184 
GenPrintf(DebugChannel channel,MacroAssembler & masm,const char * fmt,...)185 static void GenPrintf(DebugChannel channel, MacroAssembler& masm,
186                       const char* fmt, ...) {
187   va_list ap;
188   va_start(ap, fmt);
189   UniqueChars str = JS_vsmprintf(fmt, ap);
190   va_end(ap);
191 
192   GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
193     // If we've gone this far, it means we're actually using the debugging
194     // strings. In this case, we leak them! This is only for debugging, and
195     // doing the right thing is cumbersome (in Ion, it'd mean add a vec of
196     // strings to the IonScript; in wasm, it'd mean add it to the current
197     // Module and serialize it properly).
198     const char* text = str.release();
199 
200     masm.movePtr(ImmPtr((void*)text, ImmPtr::NoCheckToken()), temp);
201     masm.passABIArg(temp);
202     if (inWasm) {
203       masm.callDebugWithABI(SymbolicAddress::PrintText);
204     } else {
205       using Fn = void (*)(const char* output);
206       masm.callWithABI<Fn, PrintText>(MoveOp::GENERAL,
207                                       CheckUnsafeCallWithABI::DontCheckOther);
208     }
209   });
210 }
211 
GenPrintIsize(DebugChannel channel,MacroAssembler & masm,const Register & src)212 static void GenPrintIsize(DebugChannel channel, MacroAssembler& masm,
213                           const Register& src) {
214   GenPrint(channel, masm, Some(src), [&](bool inWasm, Register _temp) {
215     masm.passABIArg(src);
216     if (inWasm) {
217       masm.callDebugWithABI(SymbolicAddress::PrintI32);
218     } else {
219       using Fn = void (*)(int32_t val);
220       masm.callWithABI<Fn, PrintI32>(MoveOp::GENERAL,
221                                      CheckUnsafeCallWithABI::DontCheckOther);
222     }
223   });
224 }
225 
GenPrintPtr(DebugChannel channel,MacroAssembler & masm,const Register & src)226 static void GenPrintPtr(DebugChannel channel, MacroAssembler& masm,
227                         const Register& src) {
228   GenPrint(channel, masm, Some(src), [&](bool inWasm, Register _temp) {
229     masm.passABIArg(src);
230     if (inWasm) {
231       masm.callDebugWithABI(SymbolicAddress::PrintPtr);
232     } else {
233       using Fn = void (*)(uint8_t * val);
234       masm.callWithABI<Fn, PrintPtr>(MoveOp::GENERAL,
235                                      CheckUnsafeCallWithABI::DontCheckOther);
236     }
237   });
238 }
239 
GenPrintI64(DebugChannel channel,MacroAssembler & masm,const Register64 & src)240 static void GenPrintI64(DebugChannel channel, MacroAssembler& masm,
241                         const Register64& src) {
242 #  if JS_BITS_PER_WORD == 64
243   GenPrintf(channel, masm, "i64 ");
244   GenPrintIsize(channel, masm, src.reg);
245 #  else
246   GenPrintf(channel, masm, "i64(");
247   GenPrintIsize(channel, masm, src.low);
248   GenPrintIsize(channel, masm, src.high);
249   GenPrintf(channel, masm, ") ");
250 #  endif
251 }
252 
GenPrintF32(DebugChannel channel,MacroAssembler & masm,const FloatRegister & src)253 static void GenPrintF32(DebugChannel channel, MacroAssembler& masm,
254                         const FloatRegister& src) {
255   GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
256     masm.passABIArg(src, MoveOp::FLOAT32);
257     if (inWasm) {
258       masm.callDebugWithABI(SymbolicAddress::PrintF32);
259     } else {
260       using Fn = void (*)(float val);
261       masm.callWithABI<Fn, PrintF32>(MoveOp::GENERAL,
262                                      CheckUnsafeCallWithABI::DontCheckOther);
263     }
264   });
265 }
266 
GenPrintF64(DebugChannel channel,MacroAssembler & masm,const FloatRegister & src)267 static void GenPrintF64(DebugChannel channel, MacroAssembler& masm,
268                         const FloatRegister& src) {
269   GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
270     masm.passABIArg(src, MoveOp::DOUBLE);
271     if (inWasm) {
272       masm.callDebugWithABI(SymbolicAddress::PrintF64);
273     } else {
274       using Fn = void (*)(double val);
275       masm.callWithABI<Fn, PrintF64>(MoveOp::GENERAL,
276                                      CheckUnsafeCallWithABI::DontCheckOther);
277     }
278   });
279 }
280 
281 #  ifdef ENABLE_WASM_SIMD
GenPrintV128(DebugChannel channel,MacroAssembler & masm,const FloatRegister & src)282 static void GenPrintV128(DebugChannel channel, MacroAssembler& masm,
283                          const FloatRegister& src) {
284   // TODO: We might try to do something meaningful here once SIMD data are
285   // aligned and hence C++-ABI compliant.  For now, just make ourselves visible.
286   GenPrintf(channel, masm, "v128");
287 }
288 #  endif
289 #else
GenPrintf(DebugChannel channel,MacroAssembler & masm,const char * fmt,...)290 static void GenPrintf(DebugChannel channel, MacroAssembler& masm,
291                       const char* fmt, ...) {}
GenPrintIsize(DebugChannel channel,MacroAssembler & masm,const Register & src)292 static void GenPrintIsize(DebugChannel channel, MacroAssembler& masm,
293                           const Register& src) {}
GenPrintPtr(DebugChannel channel,MacroAssembler & masm,const Register & src)294 static void GenPrintPtr(DebugChannel channel, MacroAssembler& masm,
295                         const Register& src) {}
GenPrintI64(DebugChannel channel,MacroAssembler & masm,const Register64 & src)296 static void GenPrintI64(DebugChannel channel, MacroAssembler& masm,
297                         const Register64& src) {}
GenPrintF32(DebugChannel channel,MacroAssembler & masm,const FloatRegister & src)298 static void GenPrintF32(DebugChannel channel, MacroAssembler& masm,
299                         const FloatRegister& src) {}
GenPrintF64(DebugChannel channel,MacroAssembler & masm,const FloatRegister & src)300 static void GenPrintF64(DebugChannel channel, MacroAssembler& masm,
301                         const FloatRegister& src) {}
302 #  ifdef ENABLE_WASM_SIMD
GenPrintV128(DebugChannel channel,MacroAssembler & masm,const FloatRegister & src)303 static void GenPrintV128(DebugChannel channel, MacroAssembler& masm,
304                          const FloatRegister& src) {}
305 #  endif
306 #endif
307 
FinishOffsets(MacroAssembler & masm,Offsets * offsets)308 static bool FinishOffsets(MacroAssembler& masm, Offsets* offsets) {
309   // On old ARM hardware, constant pools could be inserted and they need to
310   // be flushed before considering the size of the masm.
311   masm.flushBuffer();
312   offsets->end = masm.size();
313   return !masm.oom();
314 }
315 
AssertStackAlignment(MacroAssembler & masm,uint32_t alignment,uint32_t addBeforeAssert=0)316 static void AssertStackAlignment(MacroAssembler& masm, uint32_t alignment,
317                                  uint32_t addBeforeAssert = 0) {
318   MOZ_ASSERT(
319       (sizeof(Frame) + masm.framePushed() + addBeforeAssert) % alignment == 0);
320   masm.assertStackAlignment(alignment, addBeforeAssert);
321 }
322 
323 template <class VectorT, template <class VecT> class ABIArgIterT>
StackArgBytesHelper(const VectorT & args)324 static unsigned StackArgBytesHelper(const VectorT& args) {
325   ABIArgIterT<VectorT> iter(args);
326   while (!iter.done()) {
327     iter++;
328   }
329   return iter.stackBytesConsumedSoFar();
330 }
331 
332 template <class VectorT>
StackArgBytesForNativeABI(const VectorT & args)333 static unsigned StackArgBytesForNativeABI(const VectorT& args) {
334   return StackArgBytesHelper<VectorT, ABIArgIter>(args);
335 }
336 
337 template <class VectorT>
StackArgBytesForWasmABI(const VectorT & args)338 static unsigned StackArgBytesForWasmABI(const VectorT& args) {
339   return StackArgBytesHelper<VectorT, WasmABIArgIter>(args);
340 }
341 
StackArgBytesForWasmABI(const FuncType & funcType)342 static unsigned StackArgBytesForWasmABI(const FuncType& funcType) {
343   ArgTypeVector args(funcType);
344   return StackArgBytesForWasmABI(args);
345 }
346 
Move64(MacroAssembler & masm,const Address & src,const Address & dest,Register scratch)347 static void Move64(MacroAssembler& masm, const Address& src,
348                    const Address& dest, Register scratch) {
349 #if JS_BITS_PER_WORD == 32
350   masm.load32(LowWord(src), scratch);
351   masm.store32(scratch, LowWord(dest));
352   masm.load32(HighWord(src), scratch);
353   masm.store32(scratch, HighWord(dest));
354 #else
355   Register64 scratch64(scratch);
356   masm.load64(src, scratch64);
357   masm.store64(scratch64, dest);
358 #endif
359 }
360 
SetupABIArguments(MacroAssembler & masm,const FuncExport & fe,Register argv,Register scratch)361 static void SetupABIArguments(MacroAssembler& masm, const FuncExport& fe,
362                               Register argv, Register scratch) {
363   // Copy parameters out of argv and into the registers/stack-slots specified by
364   // the wasm ABI.
365   //
366   // SetupABIArguments are only used for C++ -> wasm calls through callExport(),
367   // and V128 and Ref types (other than externref) are not currently allowed.
368   ArgTypeVector args(fe.funcType());
369   for (WasmABIArgIter iter(args); !iter.done(); iter++) {
370     unsigned argOffset = iter.index() * sizeof(ExportArg);
371     Address src(argv, argOffset);
372     MIRType type = iter.mirType();
373     switch (iter->kind()) {
374       case ABIArg::GPR:
375         if (type == MIRType::Int32) {
376           masm.load32(src, iter->gpr());
377         } else if (type == MIRType::Int64) {
378           masm.load64(src, iter->gpr64());
379         } else if (type == MIRType::RefOrNull) {
380           masm.loadPtr(src, iter->gpr());
381         } else if (type == MIRType::StackResults) {
382           MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
383           masm.loadPtr(src, iter->gpr());
384         } else {
385           MOZ_CRASH("unknown GPR type");
386         }
387         break;
388 #ifdef JS_CODEGEN_REGISTER_PAIR
389       case ABIArg::GPR_PAIR:
390         if (type == MIRType::Int64) {
391           masm.load64(src, iter->gpr64());
392         } else {
393           MOZ_CRASH("wasm uses hardfp for function calls.");
394         }
395         break;
396 #endif
397       case ABIArg::FPU: {
398         static_assert(sizeof(ExportArg) >= jit::Simd128DataSize,
399                       "ExportArg must be big enough to store SIMD values");
400         switch (type) {
401           case MIRType::Double:
402             masm.loadDouble(src, iter->fpu());
403             break;
404           case MIRType::Float32:
405             masm.loadFloat32(src, iter->fpu());
406             break;
407           case MIRType::Simd128:
408 #ifdef ENABLE_WASM_SIMD
409             // This is only used by the testing invoke path,
410             // wasmLosslessInvoke, and is guarded against in normal JS-API
411             // call paths.
412             masm.loadUnalignedSimd128(src, iter->fpu());
413             break;
414 #else
415             MOZ_CRASH("V128 not supported in SetupABIArguments");
416 #endif
417           default:
418             MOZ_CRASH("unexpected FPU type");
419             break;
420         }
421         break;
422       }
423       case ABIArg::Stack:
424         switch (type) {
425           case MIRType::Int32:
426             masm.load32(src, scratch);
427             masm.storePtr(scratch, Address(masm.getStackPointer(),
428                                            iter->offsetFromArgBase()));
429             break;
430           case MIRType::Int64: {
431             RegisterOrSP sp = masm.getStackPointer();
432             Move64(masm, src, Address(sp, iter->offsetFromArgBase()), scratch);
433             break;
434           }
435           case MIRType::RefOrNull:
436             masm.loadPtr(src, scratch);
437             masm.storePtr(scratch, Address(masm.getStackPointer(),
438                                            iter->offsetFromArgBase()));
439             break;
440           case MIRType::Double: {
441             ScratchDoubleScope fpscratch(masm);
442             masm.loadDouble(src, fpscratch);
443             masm.storeDouble(fpscratch, Address(masm.getStackPointer(),
444                                                 iter->offsetFromArgBase()));
445             break;
446           }
447           case MIRType::Float32: {
448             ScratchFloat32Scope fpscratch(masm);
449             masm.loadFloat32(src, fpscratch);
450             masm.storeFloat32(fpscratch, Address(masm.getStackPointer(),
451                                                  iter->offsetFromArgBase()));
452             break;
453           }
454           case MIRType::Simd128: {
455 #ifdef ENABLE_WASM_SIMD
456             // This is only used by the testing invoke path,
457             // wasmLosslessInvoke, and is guarded against in normal JS-API
458             // call paths.
459             ScratchSimd128Scope fpscratch(masm);
460             masm.loadUnalignedSimd128(src, fpscratch);
461             masm.storeUnalignedSimd128(
462                 fpscratch,
463                 Address(masm.getStackPointer(), iter->offsetFromArgBase()));
464             break;
465 #else
466             MOZ_CRASH("V128 not supported in SetupABIArguments");
467 #endif
468           }
469           case MIRType::StackResults: {
470             MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
471             masm.loadPtr(src, scratch);
472             masm.storePtr(scratch, Address(masm.getStackPointer(),
473                                            iter->offsetFromArgBase()));
474             break;
475           }
476           default:
477             MOZ_CRASH("unexpected stack arg type");
478         }
479         break;
480       case ABIArg::Uninitialized:
481         MOZ_CRASH("Uninitialized ABIArg kind");
482     }
483   }
484 }
485 
StoreRegisterResult(MacroAssembler & masm,const FuncExport & fe,Register loc)486 static void StoreRegisterResult(MacroAssembler& masm, const FuncExport& fe,
487                                 Register loc) {
488   ResultType results = ResultType::Vector(fe.funcType().results());
489   DebugOnly<bool> sawRegisterResult = false;
490   for (ABIResultIter iter(results); !iter.done(); iter.next()) {
491     const ABIResult& result = iter.cur();
492     if (result.inRegister()) {
493       MOZ_ASSERT(!sawRegisterResult);
494       sawRegisterResult = true;
495       switch (result.type().kind()) {
496         case ValType::I32:
497           masm.store32(result.gpr(), Address(loc, 0));
498           break;
499         case ValType::I64:
500           masm.store64(result.gpr64(), Address(loc, 0));
501           break;
502         case ValType::V128:
503 #ifdef ENABLE_WASM_SIMD
504           masm.storeUnalignedSimd128(result.fpr(), Address(loc, 0));
505           break;
506 #else
507           MOZ_CRASH("V128 not supported in StoreABIReturn");
508 #endif
509         case ValType::F32:
510           masm.canonicalizeFloat(result.fpr());
511           masm.storeFloat32(result.fpr(), Address(loc, 0));
512           break;
513         case ValType::F64:
514           masm.canonicalizeDouble(result.fpr());
515           masm.storeDouble(result.fpr(), Address(loc, 0));
516           break;
517         case ValType::Rtt:
518         case ValType::Ref:
519           masm.storePtr(result.gpr(), Address(loc, 0));
520           break;
521       }
522     }
523   }
524   MOZ_ASSERT(sawRegisterResult == (results.length() > 0));
525 }
526 
527 #if defined(JS_CODEGEN_ARM)
528 // The ARM system ABI also includes d15 & s31 in the non volatile float
529 // registers. Also exclude lr (a.k.a. r14) as we preserve it manually.
530 static const LiveRegisterSet NonVolatileRegs = LiveRegisterSet(
531     GeneralRegisterSet(Registers::NonVolatileMask &
532                        ~(Registers::SetType(1) << Registers::lr)),
533     FloatRegisterSet(FloatRegisters::NonVolatileMask |
534                      (FloatRegisters::SetType(1) << FloatRegisters::d15) |
535                      (FloatRegisters::SetType(1) << FloatRegisters::s31)));
536 #elif defined(JS_CODEGEN_ARM64)
537 // Exclude the Link Register (x30) because it is preserved manually.
538 //
539 // Include x16 (scratch) to make a 16-byte aligned amount of integer registers.
540 // Include d31 (scratch) to make a 16-byte aligned amount of floating registers.
541 static const LiveRegisterSet NonVolatileRegs = LiveRegisterSet(
542     GeneralRegisterSet((Registers::NonVolatileMask &
543                         ~(Registers::SetType(1) << Registers::lr)) |
544                        (Registers::SetType(1) << Registers::x16)),
545     FloatRegisterSet(FloatRegisters::NonVolatileMask |
546                      FloatRegisters::NonAllocatableMask));
547 #else
548 static const LiveRegisterSet NonVolatileRegs =
549     LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask),
550                     FloatRegisterSet(FloatRegisters::NonVolatileMask));
551 #endif
552 
553 static const unsigned NumExtraPushed = 2;  // tls and argv
554 
555 #ifdef JS_CODEGEN_ARM64
556 static const unsigned WasmPushSize = 16;
557 #else
558 static const unsigned WasmPushSize = sizeof(void*);
559 #endif
560 
AssertExpectedSP(MacroAssembler & masm)561 static void AssertExpectedSP(MacroAssembler& masm) {
562 #ifdef JS_CODEGEN_ARM64
563   MOZ_ASSERT(sp.Is(masm.GetStackPointer64()));
564 #  ifdef DEBUG
565   // Since we're asserting that SP is the currently active stack pointer,
566   // let's also in effect assert that PSP is dead -- by setting it to 1, so as
567   // to cause to cause any attempts to use it to segfault in an easily
568   // identifiable way.
569   masm.asVIXL().Mov(PseudoStackPointer64, 1);
570 #  endif
571 #endif
572 }
573 
574 template <class Operand>
WasmPush(MacroAssembler & masm,const Operand & op)575 static void WasmPush(MacroAssembler& masm, const Operand& op) {
576 #ifdef JS_CODEGEN_ARM64
577   // Allocate a pad word so that SP can remain properly aligned.  |op| will be
578   // written at the lower-addressed of the two words pushed here.
579   masm.reserveStack(WasmPushSize);
580   masm.storePtr(op, Address(masm.getStackPointer(), 0));
581 #else
582   masm.Push(op);
583 #endif
584 }
585 
WasmPop(MacroAssembler & masm,Register r)586 static void WasmPop(MacroAssembler& masm, Register r) {
587 #ifdef JS_CODEGEN_ARM64
588   // Also pop the pad word allocated by WasmPush.
589   masm.loadPtr(Address(masm.getStackPointer(), 0), r);
590   masm.freeStack(WasmPushSize);
591 #else
592   masm.Pop(r);
593 #endif
594 }
595 
MoveSPForJitABI(MacroAssembler & masm)596 static void MoveSPForJitABI(MacroAssembler& masm) {
597 #ifdef JS_CODEGEN_ARM64
598   masm.moveStackPtrTo(PseudoStackPointer);
599 #endif
600 }
601 
CallFuncExport(MacroAssembler & masm,const FuncExport & fe,const Maybe<ImmPtr> & funcPtr)602 static void CallFuncExport(MacroAssembler& masm, const FuncExport& fe,
603                            const Maybe<ImmPtr>& funcPtr) {
604   MOZ_ASSERT(fe.hasEagerStubs() == !funcPtr);
605   MoveSPForJitABI(masm);
606   if (funcPtr) {
607     masm.call(*funcPtr);
608   } else {
609     masm.call(CallSiteDesc(CallSiteDesc::Func), fe.funcIndex());
610   }
611 }
612 
613 STATIC_ASSERT_ANYREF_IS_JSOBJECT;  // Strings are currently boxed
614 
615 // Unboxing is branchy and contorted because of Spectre mitigations - we don't
616 // have enough scratch registers.  Were it not for the spectre mitigations in
617 // branchTestObjClass, the branch nest below would be restructured significantly
618 // by inverting branches and using fewer registers.
619 
620 // Unbox an anyref in src (clobbering src in the process) and then re-box it as
621 // a Value in *dst.  See the definition of AnyRef for a discussion of pointer
622 // representation.
UnboxAnyrefIntoValue(MacroAssembler & masm,Register tls,Register src,const Address & dst,Register scratch)623 static void UnboxAnyrefIntoValue(MacroAssembler& masm, Register tls,
624                                  Register src, const Address& dst,
625                                  Register scratch) {
626   MOZ_ASSERT(src != scratch);
627 
628   // Not actually the value we're passing, but we've no way of
629   // decoding anything better.
630   GenPrintPtr(DebugChannel::Import, masm, src);
631 
632   Label notNull, mustUnbox, done;
633   masm.branchTestPtr(Assembler::NonZero, src, src, &notNull);
634   masm.storeValue(NullValue(), dst);
635   masm.jump(&done);
636 
637   masm.bind(&notNull);
638   // The type test will clear src if the test fails, so store early.
639   masm.storeValue(JSVAL_TYPE_OBJECT, src, dst);
640   // Spectre mitigations: see comment above about efficiency.
641   masm.branchTestObjClass(Assembler::Equal, src,
642                           Address(tls, offsetof(TlsData, valueBoxClass)),
643                           scratch, src, &mustUnbox);
644   masm.jump(&done);
645 
646   masm.bind(&mustUnbox);
647   Move64(masm, Address(src, WasmValueBox::offsetOfValue()), dst, scratch);
648 
649   masm.bind(&done);
650 }
651 
652 // Unbox an anyref in src and then re-box it as a Value in dst.
653 // See the definition of AnyRef for a discussion of pointer representation.
UnboxAnyrefIntoValueReg(MacroAssembler & masm,Register tls,Register src,ValueOperand dst,Register scratch)654 static void UnboxAnyrefIntoValueReg(MacroAssembler& masm, Register tls,
655                                     Register src, ValueOperand dst,
656                                     Register scratch) {
657   MOZ_ASSERT(src != scratch);
658 #if JS_BITS_PER_WORD == 32
659   MOZ_ASSERT(dst.typeReg() != scratch);
660   MOZ_ASSERT(dst.payloadReg() != scratch);
661 #else
662   MOZ_ASSERT(dst.valueReg() != scratch);
663 #endif
664 
665   // Not actually the value we're passing, but we've no way of
666   // decoding anything better.
667   GenPrintPtr(DebugChannel::Import, masm, src);
668 
669   Label notNull, mustUnbox, done;
670   masm.branchTestPtr(Assembler::NonZero, src, src, &notNull);
671   masm.moveValue(NullValue(), dst);
672   masm.jump(&done);
673 
674   masm.bind(&notNull);
675   // The type test will clear src if the test fails, so store early.
676   masm.moveValue(TypedOrValueRegister(MIRType::Object, AnyRegister(src)), dst);
677   // Spectre mitigations: see comment above about efficiency.
678   masm.branchTestObjClass(Assembler::Equal, src,
679                           Address(tls, offsetof(TlsData, valueBoxClass)),
680                           scratch, src, &mustUnbox);
681   masm.jump(&done);
682 
683   masm.bind(&mustUnbox);
684   masm.loadValue(Address(src, WasmValueBox::offsetOfValue()), dst);
685 
686   masm.bind(&done);
687 }
688 
689 // Box the Value in src as an anyref in dest.  src and dest must not overlap.
690 // See the definition of AnyRef for a discussion of pointer representation.
BoxValueIntoAnyref(MacroAssembler & masm,ValueOperand src,Register dest,Label * oolConvert)691 static void BoxValueIntoAnyref(MacroAssembler& masm, ValueOperand src,
692                                Register dest, Label* oolConvert) {
693   Label nullValue, objectValue, done;
694   {
695     ScratchTagScope tag(masm, src);
696     masm.splitTagForTest(src, tag);
697     masm.branchTestObject(Assembler::Equal, tag, &objectValue);
698     masm.branchTestNull(Assembler::Equal, tag, &nullValue);
699     masm.jump(oolConvert);
700   }
701 
702   masm.bind(&nullValue);
703   masm.xorPtr(dest, dest);
704   masm.jump(&done);
705 
706   masm.bind(&objectValue);
707   masm.unboxObject(src, dest);
708 
709   masm.bind(&done);
710 }
711 
712 // Generate a stub that enters wasm from a C++ caller via the native ABI. The
713 // signature of the entry point is Module::ExportFuncPtr. The exported wasm
714 // function has an ABI derived from its specific signature, so this function
715 // must map from the ABI of ExportFuncPtr to the export's signature's ABI.
GenerateInterpEntry(MacroAssembler & masm,const FuncExport & fe,const Maybe<ImmPtr> & funcPtr,Offsets * offsets)716 static bool GenerateInterpEntry(MacroAssembler& masm, const FuncExport& fe,
717                                 const Maybe<ImmPtr>& funcPtr,
718                                 Offsets* offsets) {
719   AssertExpectedSP(masm);
720   masm.haltingAlign(CodeAlignment);
721 
722   offsets->begin = masm.currentOffset();
723 
724   // Save the return address if it wasn't already saved by the call insn.
725 #ifdef JS_USE_LINK_REGISTER
726 #  if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || \
727       defined(JS_CODEGEN_MIPS64)
728   masm.pushReturnAddress();
729 #  elif defined(JS_CODEGEN_ARM64)
730   // WasmPush updates framePushed() unlike pushReturnAddress(), but that's
731   // cancelled by the setFramePushed() below.
732   WasmPush(masm, lr);
733 #  else
734   MOZ_CRASH("Implement this");
735 #  endif
736 #endif
737 
738   // Save all caller non-volatile registers before we clobber them here and in
739   // the wasm callee (which does not preserve non-volatile registers).
740   masm.setFramePushed(0);
741   masm.PushRegsInMask(NonVolatileRegs);
742 
743   const unsigned nonVolatileRegsPushSize =
744       masm.PushRegsInMaskSizeInBytes(NonVolatileRegs);
745 
746   MOZ_ASSERT(masm.framePushed() == nonVolatileRegsPushSize);
747 
748   // Put the 'argv' argument into a non-argument/return/TLS register so that
749   // we can use 'argv' while we fill in the arguments for the wasm callee.
750   // Use a second non-argument/return register as temporary scratch.
751   Register argv = ABINonArgReturnReg0;
752   Register scratch = ABINonArgReturnReg1;
753 
754   // Read the arguments of wasm::ExportFuncPtr according to the native ABI.
755   // The entry stub's frame is 1 word.
756   const unsigned argBase = sizeof(void*) + masm.framePushed();
757   ABIArgGenerator abi;
758   ABIArg arg;
759 
760   // arg 1: ExportArg*
761   arg = abi.next(MIRType::Pointer);
762   if (arg.kind() == ABIArg::GPR) {
763     masm.movePtr(arg.gpr(), argv);
764   } else {
765     masm.loadPtr(
766         Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()),
767         argv);
768   }
769 
770   // Arg 2: TlsData*
771   arg = abi.next(MIRType::Pointer);
772   if (arg.kind() == ABIArg::GPR) {
773     masm.movePtr(arg.gpr(), WasmTlsReg);
774   } else {
775     masm.loadPtr(
776         Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()),
777         WasmTlsReg);
778   }
779 
780   WasmPush(masm, WasmTlsReg);
781 
782   // Save 'argv' on the stack so that we can recover it after the call.
783   WasmPush(masm, argv);
784 
785   // Since we're about to dynamically align the stack, reset the frame depth
786   // so we can still assert static stack depth balancing.
787   const unsigned framePushedBeforeAlign =
788       nonVolatileRegsPushSize + NumExtraPushed * WasmPushSize;
789 
790   MOZ_ASSERT(masm.framePushed() == framePushedBeforeAlign);
791   masm.setFramePushed(0);
792 
793   // Dynamically align the stack since ABIStackAlignment is not necessarily
794   // WasmStackAlignment. Preserve SP so it can be restored after the call.
795 #ifdef JS_CODEGEN_ARM64
796   static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
797 #else
798   masm.moveStackPtrTo(scratch);
799   masm.andToStackPtr(Imm32(~(WasmStackAlignment - 1)));
800   masm.Push(scratch);
801 #endif
802 
803   // Reserve stack space for the wasm call.
804   unsigned argDecrement =
805       StackDecrementForCall(WasmStackAlignment, masm.framePushed(),
806                             StackArgBytesForWasmABI(fe.funcType()));
807   masm.reserveStack(argDecrement);
808 
809   // Copy parameters out of argv and into the wasm ABI registers/stack-slots.
810   SetupABIArguments(masm, fe, argv, scratch);
811 
812   // Setup wasm register state. The nullness of the frame pointer is used to
813   // determine whether the call ended in success or failure.
814   masm.movePtr(ImmWord(0), FramePointer);
815   masm.loadWasmPinnedRegsFromTls();
816 
817   masm.storePtr(WasmTlsReg,
818                 Address(masm.getStackPointer(), WasmCalleeTLSOffsetBeforeCall));
819 
820   // Call into the real function. Note that, due to the throw stub, fp, tls
821   // and pinned registers may be clobbered.
822   masm.assertStackAlignment(WasmStackAlignment);
823   CallFuncExport(masm, fe, funcPtr);
824   masm.assertStackAlignment(WasmStackAlignment);
825 
826   // Pop the arguments pushed after the dynamic alignment.
827   masm.freeStack(argDecrement);
828 
829   // Pop the stack pointer to its value right before dynamic alignment.
830 #ifdef JS_CODEGEN_ARM64
831   static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
832 #else
833   masm.PopStackPtr();
834 #endif
835   MOZ_ASSERT(masm.framePushed() == 0);
836   masm.setFramePushed(framePushedBeforeAlign);
837 
838   // Recover the 'argv' pointer which was saved before aligning the stack.
839   WasmPop(masm, argv);
840 
841   WasmPop(masm, WasmTlsReg);
842 
843   // Store the register result, if any, in argv[0].
844   // No spectre.index_masking is required, as the value leaves ReturnReg.
845   StoreRegisterResult(masm, fe, argv);
846 
847   // After the ReturnReg is stored into argv[0] but before fp is clobbered by
848   // the PopRegsInMask(NonVolatileRegs) below, set the return value based on
849   // whether fp is null (which is the case for successful returns) or the
850   // FailFP magic value (set by the throw stub);
851   Label success, join;
852   masm.branchTestPtr(Assembler::Zero, FramePointer, FramePointer, &success);
853 #ifdef DEBUG
854   Label ok;
855   masm.branchPtr(Assembler::Equal, FramePointer, Imm32(FailFP), &ok);
856   masm.breakpoint();
857   masm.bind(&ok);
858 #endif
859   masm.move32(Imm32(false), ReturnReg);
860   masm.jump(&join);
861   masm.bind(&success);
862   masm.move32(Imm32(true), ReturnReg);
863   masm.bind(&join);
864 
865   // Restore clobbered non-volatile registers of the caller.
866   masm.PopRegsInMask(NonVolatileRegs);
867   MOZ_ASSERT(masm.framePushed() == 0);
868 
869 #if defined(JS_CODEGEN_ARM64)
870   masm.setFramePushed(WasmPushSize);
871   WasmPop(masm, lr);
872   masm.abiret();
873 #else
874   masm.ret();
875 #endif
876 
877   return FinishOffsets(masm, offsets);
878 }
879 
880 #ifdef JS_PUNBOX64
881 static const ValueOperand ScratchValIonEntry = ValueOperand(ABINonArgReg0);
882 #else
883 static const ValueOperand ScratchValIonEntry =
884     ValueOperand(ABINonArgReg0, ABINonArgReg1);
885 #endif
886 static const Register ScratchIonEntry = ABINonArgReg2;
887 
CallSymbolicAddress(MacroAssembler & masm,bool isAbsolute,SymbolicAddress sym)888 static void CallSymbolicAddress(MacroAssembler& masm, bool isAbsolute,
889                                 SymbolicAddress sym) {
890   if (isAbsolute) {
891     masm.call(ImmPtr(SymbolicAddressTarget(sym), ImmPtr::NoCheckToken()));
892   } else {
893     masm.call(sym);
894   }
895 }
896 
897 // Load instance's TLS from the callee.
GenerateJitEntryLoadTls(MacroAssembler & masm,unsigned frameSize)898 static void GenerateJitEntryLoadTls(MacroAssembler& masm, unsigned frameSize) {
899   AssertExpectedSP(masm);
900 
901   // ScratchIonEntry := callee => JSFunction*
902   unsigned offset = frameSize + JitFrameLayout::offsetOfCalleeToken();
903   masm.loadFunctionFromCalleeToken(Address(masm.getStackPointer(), offset),
904                                    ScratchIonEntry);
905 
906   // ScratchIonEntry := callee->getExtendedSlot(WASM_TLSDATA_SLOT)->toPrivate()
907   //                 => TlsData*
908   offset = FunctionExtended::offsetOfExtendedSlot(
909       FunctionExtended::WASM_TLSDATA_SLOT);
910   masm.loadPrivate(Address(ScratchIonEntry, offset), WasmTlsReg);
911 }
912 
913 // Creates a JS fake exit frame for wasm, so the frame iterators just use
914 // JSJit frame iteration.
GenerateJitEntryThrow(MacroAssembler & masm,unsigned frameSize)915 static void GenerateJitEntryThrow(MacroAssembler& masm, unsigned frameSize) {
916   AssertExpectedSP(masm);
917 
918   MOZ_ASSERT(masm.framePushed() == frameSize);
919 
920   GenerateJitEntryLoadTls(masm, frameSize);
921 
922   masm.freeStack(frameSize);
923   MoveSPForJitABI(masm);
924 
925   masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, cx)), ScratchIonEntry);
926   masm.enterFakeExitFrameForWasm(ScratchIonEntry, ScratchIonEntry,
927                                  ExitFrameType::WasmGenericJitEntry);
928 
929   masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, instance)),
930                ScratchIonEntry);
931   masm.loadPtr(
932       Address(ScratchIonEntry, Instance::offsetOfJSJitExceptionHandler()),
933       ScratchIonEntry);
934   masm.jump(ScratchIonEntry);
935 }
936 
937 // Helper function for allocating a BigInt and initializing it from an I64
938 // in GenerateJitEntry and GenerateImportInterpExit. The return result is
939 // written to scratch.
GenerateBigIntInitialization(MacroAssembler & masm,unsigned bytesPushedByPrologue,Register64 input,Register scratch,const FuncExport * fe,Label * fail)940 static void GenerateBigIntInitialization(MacroAssembler& masm,
941                                          unsigned bytesPushedByPrologue,
942                                          Register64 input, Register scratch,
943                                          const FuncExport* fe, Label* fail) {
944 #if JS_BITS_PER_WORD == 32
945   MOZ_ASSERT(input.low != scratch);
946   MOZ_ASSERT(input.high != scratch);
947 #else
948   MOZ_ASSERT(input.reg != scratch);
949 #endif
950 
951   // We need to avoid clobbering other argument registers and the input.
952   AllocatableRegisterSet regs(RegisterSet::Volatile());
953   LiveRegisterSet save(regs.asLiveSet());
954   masm.PushRegsInMask(save);
955 
956   unsigned frameSize = StackDecrementForCall(
957       ABIStackAlignment, masm.framePushed() + bytesPushedByPrologue, 0);
958   masm.reserveStack(frameSize);
959   masm.assertStackAlignment(ABIStackAlignment);
960 
961   // Needs to use a different call type depending on stub it's used from.
962   if (fe) {
963     CallSymbolicAddress(masm, !fe->hasEagerStubs(),
964                         SymbolicAddress::AllocateBigInt);
965   } else {
966     masm.call(SymbolicAddress::AllocateBigInt);
967   }
968   masm.storeCallPointerResult(scratch);
969 
970   masm.assertStackAlignment(ABIStackAlignment);
971   masm.freeStack(frameSize);
972 
973   LiveRegisterSet ignore;
974   ignore.add(scratch);
975   masm.PopRegsInMaskIgnore(save, ignore);
976 
977   masm.branchTest32(Assembler::Zero, scratch, scratch, fail);
978   masm.initializeBigInt64(Scalar::BigInt64, scratch, input);
979 }
980 
981 // Generate a stub that enters wasm from a jit code caller via the jit ABI.
982 //
983 // ARM64 note: This does not save the PseudoStackPointer so we must be sure to
984 // recompute it on every return path, be it normal return or exception return.
985 // The JIT code we return to assumes it is correct.
986 
GenerateJitEntry(MacroAssembler & masm,size_t funcExportIndex,const FuncExport & fe,const Maybe<ImmPtr> & funcPtr,Offsets * offsets)987 static bool GenerateJitEntry(MacroAssembler& masm, size_t funcExportIndex,
988                              const FuncExport& fe, const Maybe<ImmPtr>& funcPtr,
989                              Offsets* offsets) {
990   AssertExpectedSP(masm);
991 
992   RegisterOrSP sp = masm.getStackPointer();
993 
994   GenerateJitEntryPrologue(masm, offsets);
995 
996   // The jit caller has set up the following stack layout (sp grows to the
997   // left):
998   // <-- retAddr | descriptor | callee | argc | this | arg1..N
999 
1000   unsigned normalBytesNeeded = StackArgBytesForWasmABI(fe.funcType());
1001 
1002   MIRTypeVector coerceArgTypes;
1003   MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Int32));
1004   MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
1005   MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
1006   unsigned oolBytesNeeded = StackArgBytesForWasmABI(coerceArgTypes);
1007 
1008   unsigned bytesNeeded = std::max(normalBytesNeeded, oolBytesNeeded);
1009 
1010   // Note the jit caller ensures the stack is aligned *after* the call
1011   // instruction.
1012   unsigned frameSize = StackDecrementForCall(WasmStackAlignment,
1013                                              masm.framePushed(), bytesNeeded);
1014 
1015   // Reserve stack space for wasm ABI arguments, set up like this:
1016   // <-- ABI args | padding
1017   masm.reserveStack(frameSize);
1018 
1019   GenerateJitEntryLoadTls(masm, frameSize);
1020 
1021   if (fe.funcType().hasUnexposableArgOrRet()) {
1022     CallSymbolicAddress(masm, !fe.hasEagerStubs(),
1023                         SymbolicAddress::ReportV128JSCall);
1024     GenerateJitEntryThrow(masm, frameSize);
1025     return FinishOffsets(masm, offsets);
1026   }
1027 
1028   FloatRegister scratchF = ABINonArgDoubleReg;
1029   Register scratchG = ScratchIonEntry;
1030   ValueOperand scratchV = ScratchValIonEntry;
1031 
1032   GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; arguments ",
1033             fe.funcIndex());
1034 
1035   // We do two loops:
1036   // - one loop up-front will make sure that all the Value tags fit the
1037   // expected signature argument types. If at least one inline conversion
1038   // fails, we just jump to the OOL path which will call into C++. Inline
1039   // conversions are ordered in the way we expect them to happen the most.
1040   // - the second loop will unbox the arguments into the right registers.
1041   Label oolCall;
1042   for (size_t i = 0; i < fe.funcType().args().length(); i++) {
1043     unsigned jitArgOffset = frameSize + JitFrameLayout::offsetOfActualArg(i);
1044     Address jitArgAddr(sp, jitArgOffset);
1045     masm.loadValue(jitArgAddr, scratchV);
1046 
1047     Label next;
1048     switch (fe.funcType().args()[i].kind()) {
1049       case ValType::I32: {
1050         ScratchTagScope tag(masm, scratchV);
1051         masm.splitTagForTest(scratchV, tag);
1052 
1053         // For int32 inputs, just skip.
1054         masm.branchTestInt32(Assembler::Equal, tag, &next);
1055 
1056         // For double inputs, unbox, truncate and store back.
1057         Label storeBack, notDouble;
1058         masm.branchTestDouble(Assembler::NotEqual, tag, &notDouble);
1059         {
1060           ScratchTagScopeRelease _(&tag);
1061           masm.unboxDouble(scratchV, scratchF);
1062           masm.branchTruncateDoubleMaybeModUint32(scratchF, scratchG, &oolCall);
1063           masm.jump(&storeBack);
1064         }
1065         masm.bind(&notDouble);
1066 
1067         // For null or undefined, store 0.
1068         Label nullOrUndefined, notNullOrUndefined;
1069         masm.branchTestUndefined(Assembler::Equal, tag, &nullOrUndefined);
1070         masm.branchTestNull(Assembler::NotEqual, tag, &notNullOrUndefined);
1071         masm.bind(&nullOrUndefined);
1072         {
1073           ScratchTagScopeRelease _(&tag);
1074           masm.storeValue(Int32Value(0), jitArgAddr);
1075         }
1076         masm.jump(&next);
1077         masm.bind(&notNullOrUndefined);
1078 
1079         // For booleans, store the number value back. Other types (symbol,
1080         // object, strings) go to the C++ call.
1081         masm.branchTestBoolean(Assembler::NotEqual, tag, &oolCall);
1082         masm.unboxBoolean(scratchV, scratchG);
1083         // fallthrough:
1084 
1085         masm.bind(&storeBack);
1086         {
1087           ScratchTagScopeRelease _(&tag);
1088           masm.storeValue(JSVAL_TYPE_INT32, scratchG, jitArgAddr);
1089         }
1090         break;
1091       }
1092       case ValType::I64: {
1093         ScratchTagScope tag(masm, scratchV);
1094         masm.splitTagForTest(scratchV, tag);
1095 
1096         // For BigInt inputs, just skip. Otherwise go to C++ for other
1097         // types that require creating a new BigInt or erroring.
1098         masm.branchTestBigInt(Assembler::NotEqual, tag, &oolCall);
1099         masm.jump(&next);
1100         break;
1101       }
1102       case ValType::F32:
1103       case ValType::F64: {
1104         // Note we can reuse the same code for f32/f64 here, since for the
1105         // case of f32, the conversion of f64 to f32 will happen in the
1106         // second loop.
1107         ScratchTagScope tag(masm, scratchV);
1108         masm.splitTagForTest(scratchV, tag);
1109 
1110         // For double inputs, just skip.
1111         masm.branchTestDouble(Assembler::Equal, tag, &next);
1112 
1113         // For int32 inputs, convert and rebox.
1114         Label storeBack, notInt32;
1115         {
1116           ScratchTagScopeRelease _(&tag);
1117           masm.branchTestInt32(Assembler::NotEqual, scratchV, &notInt32);
1118           masm.int32ValueToDouble(scratchV, scratchF);
1119           masm.jump(&storeBack);
1120         }
1121         masm.bind(&notInt32);
1122 
1123         // For undefined (missing argument), store NaN.
1124         Label notUndefined;
1125         masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
1126         {
1127           ScratchTagScopeRelease _(&tag);
1128           masm.storeValue(DoubleValue(JS::GenericNaN()), jitArgAddr);
1129           masm.jump(&next);
1130         }
1131         masm.bind(&notUndefined);
1132 
1133         // +null is 0.
1134         Label notNull;
1135         masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
1136         {
1137           ScratchTagScopeRelease _(&tag);
1138           masm.storeValue(DoubleValue(0.), jitArgAddr);
1139         }
1140         masm.jump(&next);
1141         masm.bind(&notNull);
1142 
1143         // For booleans, store the number value back. Other types (symbol,
1144         // object, strings) go to the C++ call.
1145         masm.branchTestBoolean(Assembler::NotEqual, tag, &oolCall);
1146         masm.boolValueToDouble(scratchV, scratchF);
1147         // fallthrough:
1148 
1149         masm.bind(&storeBack);
1150         {
1151           ScratchTagScopeRelease _(&tag);
1152           masm.boxDouble(scratchF, jitArgAddr);
1153         }
1154         break;
1155       }
1156       case ValType::Ref: {
1157         switch (fe.funcType().args()[i].refTypeKind()) {
1158           case RefType::Extern: {
1159             ScratchTagScope tag(masm, scratchV);
1160             masm.splitTagForTest(scratchV, tag);
1161 
1162             // For object inputs, we handle object and null inline, everything
1163             // else requires an actual box and we go out of line to allocate
1164             // that.
1165             masm.branchTestObject(Assembler::Equal, tag, &next);
1166             masm.branchTestNull(Assembler::Equal, tag, &next);
1167             masm.jump(&oolCall);
1168             break;
1169           }
1170           case RefType::Func:
1171           case RefType::Eq:
1172           case RefType::TypeIndex: {
1173             // Guarded against by temporarilyUnsupportedReftypeForEntry()
1174             MOZ_CRASH("unexpected argument type when calling from the jit");
1175           }
1176         }
1177         break;
1178       }
1179       case ValType::V128: {
1180         // Guarded against by hasUnexposableArgOrRet()
1181         MOZ_CRASH("unexpected argument type when calling from the jit");
1182       }
1183       default: {
1184         MOZ_CRASH("unexpected argument type when calling from the jit");
1185       }
1186     }
1187     masm.nopAlign(CodeAlignment);
1188     masm.bind(&next);
1189   }
1190 
1191   Label rejoinBeforeCall;
1192   masm.bind(&rejoinBeforeCall);
1193 
1194   // Convert all the expected values to unboxed values on the stack.
1195   ArgTypeVector args(fe.funcType());
1196   for (WasmABIArgIter iter(args); !iter.done(); iter++) {
1197     unsigned jitArgOffset =
1198         frameSize + JitFrameLayout::offsetOfActualArg(iter.index());
1199     Address argv(sp, jitArgOffset);
1200     bool isStackArg = iter->kind() == ABIArg::Stack;
1201     switch (iter.mirType()) {
1202       case MIRType::Int32: {
1203         Register target = isStackArg ? ScratchIonEntry : iter->gpr();
1204         masm.unboxInt32(argv, target);
1205         GenPrintIsize(DebugChannel::Function, masm, target);
1206         if (isStackArg) {
1207           masm.storePtr(target, Address(sp, iter->offsetFromArgBase()));
1208         }
1209         break;
1210       }
1211       case MIRType::Int64: {
1212         // The coercion has provided a BigInt value by this point, which
1213         // we need to convert to an I64 here.
1214         if (isStackArg) {
1215           Address dst(sp, iter->offsetFromArgBase());
1216           Register src = scratchV.payloadOrValueReg();
1217 #if JS_BITS_PER_WORD == 64
1218           Register64 scratch64(scratchG);
1219 #else
1220           Register64 scratch64(scratchG, ABINonArgReg3);
1221 #endif
1222           masm.unboxBigInt(argv, src);
1223           masm.loadBigInt64(src, scratch64);
1224           GenPrintI64(DebugChannel::Function, masm, scratch64);
1225           masm.store64(scratch64, dst);
1226         } else {
1227           Register src = scratchG;
1228           Register64 target = iter->gpr64();
1229           masm.unboxBigInt(argv, src);
1230           masm.loadBigInt64(src, target);
1231           GenPrintI64(DebugChannel::Function, masm, target);
1232         }
1233         break;
1234       }
1235       case MIRType::Float32: {
1236         FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
1237         masm.unboxDouble(argv, ABINonArgDoubleReg);
1238         masm.convertDoubleToFloat32(ABINonArgDoubleReg, target);
1239         GenPrintF32(DebugChannel::Function, masm, target.asSingle());
1240         if (isStackArg) {
1241           masm.storeFloat32(target, Address(sp, iter->offsetFromArgBase()));
1242         }
1243         break;
1244       }
1245       case MIRType::Double: {
1246         FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
1247         masm.unboxDouble(argv, target);
1248         GenPrintF64(DebugChannel::Function, masm, target);
1249         if (isStackArg) {
1250           masm.storeDouble(target, Address(sp, iter->offsetFromArgBase()));
1251         }
1252         break;
1253       }
1254       case MIRType::RefOrNull: {
1255         Register target = isStackArg ? ScratchIonEntry : iter->gpr();
1256         masm.unboxObjectOrNull(argv, target);
1257         GenPrintPtr(DebugChannel::Function, masm, target);
1258         if (isStackArg) {
1259           masm.storePtr(target, Address(sp, iter->offsetFromArgBase()));
1260         }
1261         break;
1262       }
1263       default: {
1264         MOZ_CRASH("unexpected input argument when calling from jit");
1265       }
1266     }
1267   }
1268 
1269   GenPrintf(DebugChannel::Function, masm, "\n");
1270 
1271   // Setup wasm register state.
1272   masm.loadWasmPinnedRegsFromTls();
1273 
1274   masm.storePtr(WasmTlsReg,
1275                 Address(masm.getStackPointer(), WasmCalleeTLSOffsetBeforeCall));
1276 
1277   // Call into the real function. Note that, due to the throw stub, fp, tls
1278   // and pinned registers may be clobbered.
1279   masm.assertStackAlignment(WasmStackAlignment);
1280   CallFuncExport(masm, fe, funcPtr);
1281   masm.assertStackAlignment(WasmStackAlignment);
1282 
1283   // If fp is equal to the FailFP magic value (set by the throw stub), then
1284   // report the exception to the JIT caller by jumping into the exception
1285   // stub; otherwise the FP value is still set to the parent ion frame value.
1286   Label exception;
1287   masm.branchPtr(Assembler::Equal, FramePointer, Imm32(FailFP), &exception);
1288 
1289   // Pop arguments.
1290   masm.freeStack(frameSize);
1291 
1292   GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; returns ",
1293             fe.funcIndex());
1294 
1295   // Store the return value in the JSReturnOperand.
1296   const ValTypeVector& results = fe.funcType().results();
1297   if (results.length() == 0) {
1298     GenPrintf(DebugChannel::Function, masm, "void");
1299     masm.moveValue(UndefinedValue(), JSReturnOperand);
1300   } else {
1301     MOZ_ASSERT(results.length() == 1, "multi-value return to JS unimplemented");
1302     switch (results[0].kind()) {
1303       case ValType::I32:
1304         GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
1305         // No spectre.index_masking is required, as the value is boxed.
1306         masm.boxNonDouble(JSVAL_TYPE_INT32, ReturnReg, JSReturnOperand);
1307         break;
1308       case ValType::F32: {
1309         masm.canonicalizeFloat(ReturnFloat32Reg);
1310         masm.convertFloat32ToDouble(ReturnFloat32Reg, ReturnDoubleReg);
1311         GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
1312         ScratchDoubleScope fpscratch(masm);
1313         masm.boxDouble(ReturnDoubleReg, JSReturnOperand, fpscratch);
1314         break;
1315       }
1316       case ValType::F64: {
1317         masm.canonicalizeDouble(ReturnDoubleReg);
1318         GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
1319         ScratchDoubleScope fpscratch(masm);
1320         masm.boxDouble(ReturnDoubleReg, JSReturnOperand, fpscratch);
1321         break;
1322       }
1323       case ValType::I64: {
1324         Label fail, done;
1325         GenPrintI64(DebugChannel::Function, masm, ReturnReg64);
1326         GenerateBigIntInitialization(masm, 0, ReturnReg64, scratchG, &fe,
1327                                      &fail);
1328         masm.boxNonDouble(JSVAL_TYPE_BIGINT, scratchG, JSReturnOperand);
1329         masm.jump(&done);
1330         masm.bind(&fail);
1331         // Fixup the stack for the exception tail so that we can share it.
1332         masm.reserveStack(frameSize);
1333         masm.jump(&exception);
1334         masm.bind(&done);
1335         // Un-fixup the stack for the benefit of the assertion below.
1336         masm.setFramePushed(0);
1337         break;
1338       }
1339       case ValType::Rtt:
1340       case ValType::V128: {
1341         MOZ_CRASH("unexpected return type when calling from ion to wasm");
1342       }
1343       case ValType::Ref: {
1344         switch (results[0].refTypeKind()) {
1345           case RefType::Func:
1346           case RefType::Eq:
1347             // For FuncRef and EqRef use the AnyRef path for now, since that
1348             // will work.
1349           case RefType::Extern:
1350             // Per comment above, the call may have clobbered the Tls register,
1351             // so reload since unboxing will need it.
1352             GenerateJitEntryLoadTls(masm, /* frameSize */ 0);
1353             UnboxAnyrefIntoValueReg(masm, WasmTlsReg, ReturnReg,
1354                                     JSReturnOperand, WasmJitEntryReturnScratch);
1355             break;
1356           case RefType::TypeIndex:
1357             MOZ_CRASH("unexpected return type when calling from ion to wasm");
1358         }
1359         break;
1360       }
1361     }
1362   }
1363 
1364   GenPrintf(DebugChannel::Function, masm, "\n");
1365 
1366   MOZ_ASSERT(masm.framePushed() == 0);
1367 #ifdef JS_CODEGEN_ARM64
1368   AssertExpectedSP(masm);
1369   masm.loadPtr(Address(sp, 0), lr);
1370   masm.addToStackPtr(Imm32(8));
1371   // Copy SP into PSP to enforce return-point invariants (SP == PSP).
1372   // `addToStackPtr` won't sync them because SP is the active pointer here.
1373   // For the same reason, we can't use initPseudoStackPtr to do the sync, so
1374   // we have to do it "by hand".  Omitting this causes many tests to segfault.
1375   masm.moveStackPtrTo(PseudoStackPointer);
1376   masm.abiret();
1377 #else
1378   masm.ret();
1379 #endif
1380 
1381   // Generate an OOL call to the C++ conversion path.
1382   if (fe.funcType().args().length()) {
1383     masm.bind(&oolCall);
1384     masm.setFramePushed(frameSize);
1385 
1386     // Baseline and Ion call C++ runtime via BuiltinThunk with wasm abi, so to
1387     // unify the BuiltinThunk's interface we call it here with wasm abi.
1388     jit::WasmABIArgIter<MIRTypeVector> argsIter(coerceArgTypes);
1389 
1390     // argument 0: function export index.
1391     if (argsIter->kind() == ABIArg::GPR) {
1392       masm.movePtr(ImmWord(funcExportIndex), argsIter->gpr());
1393     } else {
1394       masm.storePtr(ImmWord(funcExportIndex),
1395                     Address(sp, argsIter->offsetFromArgBase()));
1396     }
1397     argsIter++;
1398 
1399     // argument 1: tlsData
1400     if (argsIter->kind() == ABIArg::GPR) {
1401       masm.movePtr(WasmTlsReg, argsIter->gpr());
1402     } else {
1403       masm.storePtr(WasmTlsReg, Address(sp, argsIter->offsetFromArgBase()));
1404     }
1405     argsIter++;
1406 
1407     // argument 2: effective address of start of argv
1408     Address argv(sp, masm.framePushed() + JitFrameLayout::offsetOfActualArg(0));
1409     if (argsIter->kind() == ABIArg::GPR) {
1410       masm.computeEffectiveAddress(argv, argsIter->gpr());
1411     } else {
1412       masm.computeEffectiveAddress(argv, ScratchIonEntry);
1413       masm.storePtr(ScratchIonEntry,
1414                     Address(sp, argsIter->offsetFromArgBase()));
1415     }
1416     argsIter++;
1417     MOZ_ASSERT(argsIter.done());
1418 
1419     masm.assertStackAlignment(ABIStackAlignment);
1420     CallSymbolicAddress(masm, !fe.hasEagerStubs(),
1421                         SymbolicAddress::CoerceInPlace_JitEntry);
1422     masm.assertStackAlignment(ABIStackAlignment);
1423 
1424     // No spectre.index_masking is required, as the return value is used as a
1425     // bool.
1426     masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg,
1427                       &rejoinBeforeCall);
1428   }
1429 
1430   // Prepare to throw: reload WasmTlsReg from the frame.
1431   masm.bind(&exception);
1432   masm.setFramePushed(frameSize);
1433   GenerateJitEntryThrow(masm, frameSize);
1434 
1435   return FinishOffsets(masm, offsets);
1436 }
1437 
GenerateDirectCallFromJit(MacroAssembler & masm,const FuncExport & fe,const Instance & inst,const JitCallStackArgVector & stackArgs,bool profilingEnabled,Register scratch,uint32_t * callOffset)1438 void wasm::GenerateDirectCallFromJit(MacroAssembler& masm, const FuncExport& fe,
1439                                      const Instance& inst,
1440                                      const JitCallStackArgVector& stackArgs,
1441                                      bool profilingEnabled, Register scratch,
1442                                      uint32_t* callOffset) {
1443   MOZ_ASSERT(!IsCompilingWasm());
1444 
1445   size_t framePushedAtStart = masm.framePushed();
1446 
1447   if (profilingEnabled) {
1448     // FramePointer isn't volatile, manually preserve it because it will be
1449     // clobbered below.
1450     masm.Push(FramePointer);
1451   } else {
1452 #ifdef DEBUG
1453     // Ensure that the FramePointer is actually Ion-volatile. This might
1454     // assert when bug 1426134 lands.
1455     AllocatableRegisterSet set(RegisterSet::All());
1456     TakeJitRegisters(/* profiling */ false, &set);
1457     MOZ_ASSERT(set.has(FramePointer),
1458                "replace the whole if branch by the then body when this fails");
1459 #endif
1460   }
1461 
1462   // Note, if code here pushes a reference value into the frame for its own
1463   // purposes (and not just as an argument to the callee) then the frame must be
1464   // traced in TraceJitExitFrame, see the case there for DirectWasmJitCall.  The
1465   // callee will trace values that are pushed as arguments, however.
1466 
1467   // Push a special frame descriptor that indicates the frame size so we can
1468   // directly iterate from the current JIT frame without an extra call.
1469   *callOffset = masm.buildFakeExitFrame(scratch);
1470   masm.loadJSContext(scratch);
1471 
1472   masm.moveStackPtrTo(FramePointer);
1473   masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::DirectWasmJitCall);
1474   masm.orPtr(Imm32(ExitOrJitEntryFPTag), FramePointer);
1475 
1476   // Move stack arguments to their final locations.
1477   unsigned bytesNeeded = StackArgBytesForWasmABI(fe.funcType());
1478   bytesNeeded = StackDecrementForCall(WasmStackAlignment, masm.framePushed(),
1479                                       bytesNeeded);
1480   if (bytesNeeded) {
1481     masm.reserveStack(bytesNeeded);
1482   }
1483 
1484   GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; arguments ",
1485             fe.funcIndex());
1486 
1487   ArgTypeVector args(fe.funcType());
1488   for (WasmABIArgIter iter(args); !iter.done(); iter++) {
1489     MOZ_ASSERT_IF(iter->kind() == ABIArg::GPR, iter->gpr() != scratch);
1490     MOZ_ASSERT_IF(iter->kind() == ABIArg::GPR, iter->gpr() != FramePointer);
1491     if (iter->kind() != ABIArg::Stack) {
1492       switch (iter.mirType()) {
1493         case MIRType::Int32:
1494           GenPrintIsize(DebugChannel::Function, masm, iter->gpr());
1495           break;
1496         case MIRType::Int64:
1497           GenPrintI64(DebugChannel::Function, masm, iter->gpr64());
1498           break;
1499         case MIRType::Float32:
1500           GenPrintF32(DebugChannel::Function, masm, iter->fpu());
1501           break;
1502         case MIRType::Double:
1503           GenPrintF64(DebugChannel::Function, masm, iter->fpu());
1504           break;
1505         case MIRType::RefOrNull:
1506           GenPrintPtr(DebugChannel::Function, masm, iter->gpr());
1507           break;
1508         case MIRType::StackResults:
1509           MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
1510           GenPrintPtr(DebugChannel::Function, masm, iter->gpr());
1511           break;
1512         default:
1513           MOZ_CRASH("ion to wasm fast path can only handle i32/f32/f64");
1514       }
1515       continue;
1516     }
1517 
1518     Address dst(masm.getStackPointer(), iter->offsetFromArgBase());
1519 
1520     const JitCallStackArg& stackArg = stackArgs[iter.index()];
1521     switch (stackArg.tag()) {
1522       case JitCallStackArg::Tag::Imm32:
1523         GenPrintf(DebugChannel::Function, masm, "%d ", stackArg.imm32());
1524         masm.storePtr(ImmWord(stackArg.imm32()), dst);
1525         break;
1526       case JitCallStackArg::Tag::GPR:
1527         MOZ_ASSERT(stackArg.gpr() != scratch);
1528         MOZ_ASSERT(stackArg.gpr() != FramePointer);
1529         GenPrintIsize(DebugChannel::Function, masm, stackArg.gpr());
1530         masm.storePtr(stackArg.gpr(), dst);
1531         break;
1532       case JitCallStackArg::Tag::FPU:
1533         switch (iter.mirType()) {
1534           case MIRType::Double:
1535             GenPrintF64(DebugChannel::Function, masm, stackArg.fpu());
1536             masm.storeDouble(stackArg.fpu(), dst);
1537             break;
1538           case MIRType::Float32:
1539             GenPrintF32(DebugChannel::Function, masm, stackArg.fpu());
1540             masm.storeFloat32(stackArg.fpu(), dst);
1541             break;
1542           default:
1543             MOZ_CRASH(
1544                 "unexpected MIR type for a float register in wasm fast call");
1545         }
1546         break;
1547       case JitCallStackArg::Tag::Address: {
1548         // The address offsets were valid *before* we pushed our frame.
1549         Address src = stackArg.addr();
1550         src.offset += masm.framePushed() - framePushedAtStart;
1551         switch (iter.mirType()) {
1552           case MIRType::Double: {
1553             ScratchDoubleScope fpscratch(masm);
1554             GenPrintF64(DebugChannel::Function, masm, fpscratch);
1555             masm.loadDouble(src, fpscratch);
1556             masm.storeDouble(fpscratch, dst);
1557             break;
1558           }
1559           case MIRType::Float32: {
1560             ScratchFloat32Scope fpscratch(masm);
1561             masm.loadFloat32(src, fpscratch);
1562             GenPrintF32(DebugChannel::Function, masm, fpscratch);
1563             masm.storeFloat32(fpscratch, dst);
1564             break;
1565           }
1566           case MIRType::Int32: {
1567             masm.loadPtr(src, scratch);
1568             GenPrintIsize(DebugChannel::Function, masm, scratch);
1569             masm.storePtr(scratch, dst);
1570             break;
1571           }
1572           case MIRType::RefOrNull: {
1573             masm.loadPtr(src, scratch);
1574             GenPrintPtr(DebugChannel::Function, masm, scratch);
1575             masm.storePtr(scratch, dst);
1576             break;
1577           }
1578           case MIRType::StackResults: {
1579             MOZ_CRASH("multi-value in ion to wasm fast path unimplemented");
1580           }
1581           default: {
1582             MOZ_CRASH("unexpected MIR type for a stack slot in wasm fast call");
1583           }
1584         }
1585         break;
1586       }
1587       case JitCallStackArg::Tag::Undefined: {
1588         MOZ_CRASH("can't happen because of arg.kind() check");
1589       }
1590     }
1591   }
1592 
1593   GenPrintf(DebugChannel::Function, masm, "\n");
1594 
1595   // Load tls; from now on, WasmTlsReg is live.
1596   masm.movePtr(ImmPtr(inst.tlsData()), WasmTlsReg);
1597   masm.storePtr(WasmTlsReg,
1598                 Address(masm.getStackPointer(), WasmCalleeTLSOffsetBeforeCall));
1599   masm.loadWasmPinnedRegsFromTls();
1600 
1601   // Actual call.
1602   const CodeTier& codeTier = inst.code().codeTier(inst.code().bestTier());
1603   const MetadataTier& metadata = codeTier.metadata();
1604   const CodeRange& codeRange = metadata.codeRange(fe);
1605   void* callee = codeTier.segment().base() + codeRange.funcUncheckedCallEntry();
1606 
1607   masm.assertStackAlignment(WasmStackAlignment);
1608   MoveSPForJitABI(masm);
1609   masm.callJit(ImmPtr(callee));
1610 #ifdef JS_CODEGEN_ARM64
1611   // WASM does not always keep PSP in sync with SP.  So reinitialize it as it
1612   // might be clobbered either by WASM or by any C++ calls within.
1613   masm.initPseudoStackPtr();
1614 #endif
1615   masm.assertStackAlignment(WasmStackAlignment);
1616 
1617   masm.branchPtr(Assembler::Equal, FramePointer, Imm32(wasm::FailFP),
1618                  masm.exceptionLabel());
1619 
1620   // Store the return value in the appropriate place.
1621   GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; returns ",
1622             fe.funcIndex());
1623   const ValTypeVector& results = fe.funcType().results();
1624   if (results.length() == 0) {
1625     masm.moveValue(UndefinedValue(), JSReturnOperand);
1626     GenPrintf(DebugChannel::Function, masm, "void");
1627   } else {
1628     MOZ_ASSERT(results.length() == 1, "multi-value return to JS unimplemented");
1629     switch (results[0].kind()) {
1630       case wasm::ValType::I32:
1631         // The return value is in ReturnReg, which is what Ion expects.
1632         GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
1633 #if defined(JS_CODEGEN_X64)
1634         if (JitOptions.spectreIndexMasking) {
1635           masm.movl(ReturnReg, ReturnReg);
1636         }
1637 #endif
1638         break;
1639       case wasm::ValType::I64:
1640         // The return value is in ReturnReg64, which is what Ion expects.
1641         GenPrintI64(DebugChannel::Function, masm, ReturnReg64);
1642         break;
1643       case wasm::ValType::F32:
1644         masm.canonicalizeFloat(ReturnFloat32Reg);
1645         GenPrintF32(DebugChannel::Function, masm, ReturnFloat32Reg);
1646         break;
1647       case wasm::ValType::F64:
1648         masm.canonicalizeDouble(ReturnDoubleReg);
1649         GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
1650         break;
1651       case wasm::ValType::Ref:
1652         switch (results[0].refTypeKind()) {
1653           case wasm::RefType::Func:
1654           case wasm::RefType::Eq:
1655             // For FuncRef and EqRef, use the AnyRef path for now, since that
1656             // will work.
1657           case wasm::RefType::Extern:
1658             // The call to wasm above preserves the WasmTlsReg, we don't need to
1659             // reload it here.
1660             UnboxAnyrefIntoValueReg(masm, WasmTlsReg, ReturnReg,
1661                                     JSReturnOperand, WasmJitEntryReturnScratch);
1662             break;
1663           case wasm::RefType::TypeIndex:
1664             MOZ_CRASH("unexpected return type when calling from ion to wasm");
1665         }
1666         break;
1667       case wasm::ValType::Rtt:
1668       case wasm::ValType::V128:
1669         MOZ_CRASH("unexpected return type when calling from ion to wasm");
1670     }
1671   }
1672 
1673   GenPrintf(DebugChannel::Function, masm, "\n");
1674 
1675   // Free args + frame descriptor.
1676   masm.leaveExitFrame(bytesNeeded + ExitFrameLayout::Size());
1677 
1678   // If we pushed it, free FramePointer.
1679   if (profilingEnabled) {
1680     masm.Pop(FramePointer);
1681   }
1682 
1683   MOZ_ASSERT(framePushedAtStart == masm.framePushed());
1684 }
1685 
StackCopy(MacroAssembler & masm,MIRType type,Register scratch,Address src,Address dst)1686 static void StackCopy(MacroAssembler& masm, MIRType type, Register scratch,
1687                       Address src, Address dst) {
1688   if (type == MIRType::Int32) {
1689     masm.load32(src, scratch);
1690     GenPrintIsize(DebugChannel::Import, masm, scratch);
1691     masm.store32(scratch, dst);
1692   } else if (type == MIRType::Int64) {
1693 #if JS_BITS_PER_WORD == 32
1694     GenPrintf(DebugChannel::Import, masm, "i64(");
1695     masm.load32(LowWord(src), scratch);
1696     GenPrintIsize(DebugChannel::Import, masm, scratch);
1697     masm.store32(scratch, LowWord(dst));
1698     masm.load32(HighWord(src), scratch);
1699     GenPrintIsize(DebugChannel::Import, masm, scratch);
1700     masm.store32(scratch, HighWord(dst));
1701     GenPrintf(DebugChannel::Import, masm, ") ");
1702 #else
1703     Register64 scratch64(scratch);
1704     masm.load64(src, scratch64);
1705     GenPrintIsize(DebugChannel::Import, masm, scratch);
1706     masm.store64(scratch64, dst);
1707 #endif
1708   } else if (type == MIRType::RefOrNull || type == MIRType::Pointer ||
1709              type == MIRType::StackResults) {
1710     masm.loadPtr(src, scratch);
1711     GenPrintPtr(DebugChannel::Import, masm, scratch);
1712     masm.storePtr(scratch, dst);
1713   } else if (type == MIRType::Float32) {
1714     ScratchFloat32Scope fpscratch(masm);
1715     masm.loadFloat32(src, fpscratch);
1716     GenPrintF32(DebugChannel::Import, masm, fpscratch);
1717     masm.storeFloat32(fpscratch, dst);
1718   } else if (type == MIRType::Double) {
1719     ScratchDoubleScope fpscratch(masm);
1720     masm.loadDouble(src, fpscratch);
1721     GenPrintF64(DebugChannel::Import, masm, fpscratch);
1722     masm.storeDouble(fpscratch, dst);
1723 #ifdef ENABLE_WASM_SIMD
1724   } else if (type == MIRType::Simd128) {
1725     ScratchSimd128Scope fpscratch(masm);
1726     masm.loadUnalignedSimd128(src, fpscratch);
1727     GenPrintV128(DebugChannel::Import, masm, fpscratch);
1728     masm.storeUnalignedSimd128(fpscratch, dst);
1729 #endif
1730   } else {
1731     MOZ_CRASH("StackCopy: unexpected type");
1732   }
1733 }
1734 
1735 using ToValue = bool;
1736 
1737 // Note, when toValue is true then this may destroy the values in incoming
1738 // argument registers as a result of Spectre mitigation.
FillArgumentArrayForExit(MacroAssembler & masm,Register tls,unsigned funcImportIndex,const FuncType & funcType,unsigned argOffset,unsigned offsetFromFPToCallerStackArgs,Register scratch,Register scratch2,Register scratch3,ToValue toValue,Label * throwLabel)1739 static void FillArgumentArrayForExit(
1740     MacroAssembler& masm, Register tls, unsigned funcImportIndex,
1741     const FuncType& funcType, unsigned argOffset,
1742     unsigned offsetFromFPToCallerStackArgs, Register scratch, Register scratch2,
1743     Register scratch3, ToValue toValue, Label* throwLabel) {
1744   MOZ_ASSERT(scratch != scratch2);
1745   MOZ_ASSERT(scratch != scratch3);
1746   MOZ_ASSERT(scratch2 != scratch3);
1747 
1748   // This loop does not root the values that are being constructed in
1749   // for the arguments. Allocations that are generated by code either
1750   // in the loop or called from it should be NoGC allocations.
1751   GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; arguments ",
1752             funcImportIndex);
1753 
1754   ArgTypeVector args(funcType);
1755   for (ABIArgIter i(args); !i.done(); i++) {
1756     Address dst(masm.getStackPointer(), argOffset + i.index() * sizeof(Value));
1757 
1758     MIRType type = i.mirType();
1759     MOZ_ASSERT(args.isSyntheticStackResultPointerArg(i.index()) ==
1760                (type == MIRType::StackResults));
1761     switch (i->kind()) {
1762       case ABIArg::GPR:
1763         if (type == MIRType::Int32) {
1764           GenPrintIsize(DebugChannel::Import, masm, i->gpr());
1765           if (toValue) {
1766             masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dst);
1767           } else {
1768             masm.store32(i->gpr(), dst);
1769           }
1770         } else if (type == MIRType::Int64) {
1771           GenPrintI64(DebugChannel::Import, masm, i->gpr64());
1772 
1773           if (toValue) {
1774             GenerateBigIntInitialization(masm, offsetFromFPToCallerStackArgs,
1775                                          i->gpr64(), scratch, nullptr,
1776                                          throwLabel);
1777             masm.storeValue(JSVAL_TYPE_BIGINT, scratch, dst);
1778           } else {
1779             masm.store64(i->gpr64(), dst);
1780           }
1781         } else if (type == MIRType::RefOrNull) {
1782           if (toValue) {
1783             // This works also for FuncRef because it is distinguishable from
1784             // a boxed AnyRef.
1785             masm.movePtr(i->gpr(), scratch2);
1786             UnboxAnyrefIntoValue(masm, tls, scratch2, dst, scratch);
1787           } else {
1788             GenPrintPtr(DebugChannel::Import, masm, i->gpr());
1789             masm.storePtr(i->gpr(), dst);
1790           }
1791         } else if (type == MIRType::StackResults) {
1792           MOZ_ASSERT(!toValue, "Multi-result exit to JIT unimplemented");
1793           GenPrintPtr(DebugChannel::Import, masm, i->gpr());
1794           masm.storePtr(i->gpr(), dst);
1795         } else {
1796           MOZ_CRASH("FillArgumentArrayForExit, ABIArg::GPR: unexpected type");
1797         }
1798         break;
1799 #ifdef JS_CODEGEN_REGISTER_PAIR
1800       case ABIArg::GPR_PAIR:
1801         if (type == MIRType::Int64) {
1802           GenPrintI64(DebugChannel::Import, masm, i->gpr64());
1803 
1804           if (toValue) {
1805             GenerateBigIntInitialization(masm, offsetFromFPToCallerStackArgs,
1806                                          i->gpr64(), scratch, nullptr,
1807                                          throwLabel);
1808             masm.storeValue(JSVAL_TYPE_BIGINT, scratch, dst);
1809           } else {
1810             masm.store64(i->gpr64(), dst);
1811           }
1812         } else {
1813           MOZ_CRASH("wasm uses hardfp for function calls.");
1814         }
1815         break;
1816 #endif
1817       case ABIArg::FPU: {
1818         FloatRegister srcReg = i->fpu();
1819         if (type == MIRType::Double) {
1820           if (toValue) {
1821             // Preserve the NaN pattern in the input.
1822             ScratchDoubleScope fpscratch(masm);
1823             masm.moveDouble(srcReg, fpscratch);
1824             masm.canonicalizeDouble(fpscratch);
1825             GenPrintF64(DebugChannel::Import, masm, fpscratch);
1826             masm.boxDouble(fpscratch, dst);
1827           } else {
1828             GenPrintF64(DebugChannel::Import, masm, srcReg);
1829             masm.storeDouble(srcReg, dst);
1830           }
1831         } else if (type == MIRType::Float32) {
1832           if (toValue) {
1833             // JS::Values can't store Float32, so convert to a Double.
1834             ScratchDoubleScope fpscratch(masm);
1835             masm.convertFloat32ToDouble(srcReg, fpscratch);
1836             masm.canonicalizeDouble(fpscratch);
1837             GenPrintF64(DebugChannel::Import, masm, fpscratch);
1838             masm.boxDouble(fpscratch, dst);
1839           } else {
1840             // Preserve the NaN pattern in the input.
1841             GenPrintF32(DebugChannel::Import, masm, srcReg);
1842             masm.storeFloat32(srcReg, dst);
1843           }
1844         } else if (type == MIRType::Simd128) {
1845           // The value should never escape; the call will be stopped later as
1846           // the import is being called.  But we should generate something sane
1847           // here for the boxed case since a debugger or the stack walker may
1848           // observe something.
1849           ScratchDoubleScope dscratch(masm);
1850           masm.loadConstantDouble(0, dscratch);
1851           GenPrintF64(DebugChannel::Import, masm, dscratch);
1852           if (toValue) {
1853             masm.boxDouble(dscratch, dst);
1854           } else {
1855             masm.storeDouble(dscratch, dst);
1856           }
1857         } else {
1858           MOZ_CRASH("Unknown MIRType in wasm exit stub");
1859         }
1860         break;
1861       }
1862       case ABIArg::Stack: {
1863         Address src(FramePointer,
1864                     offsetFromFPToCallerStackArgs + i->offsetFromArgBase());
1865         if (toValue) {
1866           if (type == MIRType::Int32) {
1867             masm.load32(src, scratch);
1868             GenPrintIsize(DebugChannel::Import, masm, scratch);
1869             masm.storeValue(JSVAL_TYPE_INT32, scratch, dst);
1870           } else if (type == MIRType::Int64) {
1871 #if JS_BITS_PER_WORD == 64
1872             Register64 scratch64(scratch2);
1873 #else
1874             Register64 scratch64(scratch2, scratch3);
1875 #endif
1876             masm.load64(src, scratch64);
1877             GenPrintI64(DebugChannel::Import, masm, scratch64);
1878             GenerateBigIntInitialization(masm, sizeof(Frame), scratch64,
1879                                          scratch, nullptr, throwLabel);
1880             masm.storeValue(JSVAL_TYPE_BIGINT, scratch, dst);
1881           } else if (type == MIRType::RefOrNull) {
1882             // This works also for FuncRef because it is distinguishable from a
1883             // boxed AnyRef.
1884             masm.loadPtr(src, scratch);
1885             UnboxAnyrefIntoValue(masm, tls, scratch, dst, scratch2);
1886           } else if (IsFloatingPointType(type)) {
1887             ScratchDoubleScope dscratch(masm);
1888             FloatRegister fscratch = dscratch.asSingle();
1889             if (type == MIRType::Float32) {
1890               masm.loadFloat32(src, fscratch);
1891               masm.convertFloat32ToDouble(fscratch, dscratch);
1892             } else {
1893               masm.loadDouble(src, dscratch);
1894             }
1895             masm.canonicalizeDouble(dscratch);
1896             GenPrintF64(DebugChannel::Import, masm, dscratch);
1897             masm.boxDouble(dscratch, dst);
1898           } else if (type == MIRType::Simd128) {
1899             // The value should never escape; the call will be stopped later as
1900             // the import is being called.  But we should generate something
1901             // sane here for the boxed case since a debugger or the stack walker
1902             // may observe something.
1903             ScratchDoubleScope dscratch(masm);
1904             masm.loadConstantDouble(0, dscratch);
1905             GenPrintF64(DebugChannel::Import, masm, dscratch);
1906             masm.boxDouble(dscratch, dst);
1907           } else {
1908             MOZ_CRASH(
1909                 "FillArgumentArrayForExit, ABIArg::Stack: unexpected type");
1910           }
1911         } else {
1912           if (type == MIRType::Simd128) {
1913             // As above.  StackCopy does not know this trick.
1914             ScratchDoubleScope dscratch(masm);
1915             masm.loadConstantDouble(0, dscratch);
1916             GenPrintF64(DebugChannel::Import, masm, dscratch);
1917             masm.storeDouble(dscratch, dst);
1918           } else {
1919             StackCopy(masm, type, scratch, src, dst);
1920           }
1921         }
1922         break;
1923       }
1924       case ABIArg::Uninitialized:
1925         MOZ_CRASH("Uninitialized ABIArg kind");
1926     }
1927   }
1928   GenPrintf(DebugChannel::Import, masm, "\n");
1929 }
1930 
1931 // Generate a wrapper function with the standard intra-wasm call ABI which
1932 // simply calls an import. This wrapper function allows any import to be treated
1933 // like a normal wasm function for the purposes of exports and table calls. In
1934 // particular, the wrapper function provides:
1935 //  - a table entry, so JS imports can be put into tables
1936 //  - normal entries, so that, if the import is re-exported, an entry stub can
1937 //    be generated and called without any special cases
GenerateImportFunction(jit::MacroAssembler & masm,const FuncImport & fi,TypeIdDesc funcTypeId,FuncOffsets * offsets)1938 static bool GenerateImportFunction(jit::MacroAssembler& masm,
1939                                    const FuncImport& fi, TypeIdDesc funcTypeId,
1940                                    FuncOffsets* offsets) {
1941   AssertExpectedSP(masm);
1942 
1943   GenerateFunctionPrologue(masm, funcTypeId, Nothing(), offsets);
1944 
1945   MOZ_ASSERT(masm.framePushed() == 0);
1946   const unsigned sizeOfTlsSlot = sizeof(void*);
1947   unsigned framePushed = StackDecrementForCall(
1948       WasmStackAlignment,
1949       sizeof(Frame),  // pushed by prologue
1950       StackArgBytesForWasmABI(fi.funcType()) + sizeOfTlsSlot);
1951   masm.wasmReserveStackChecked(framePushed, BytecodeOffset(0));
1952   MOZ_ASSERT(masm.framePushed() == framePushed);
1953 
1954   masm.storePtr(WasmTlsReg,
1955                 Address(masm.getStackPointer(), framePushed - sizeOfTlsSlot));
1956 
1957   // The argument register state is already setup by our caller. We just need
1958   // to be sure not to clobber it before the call.
1959   Register scratch = ABINonArgReg0;
1960 
1961   // Copy our frame's stack arguments to the callee frame's stack argument.
1962   unsigned offsetFromFPToCallerStackArgs = sizeof(Frame);
1963   ArgTypeVector args(fi.funcType());
1964   for (WasmABIArgIter i(args); !i.done(); i++) {
1965     if (i->kind() != ABIArg::Stack) {
1966       continue;
1967     }
1968 
1969     Address src(FramePointer,
1970                 offsetFromFPToCallerStackArgs + i->offsetFromArgBase());
1971     Address dst(masm.getStackPointer(), i->offsetFromArgBase());
1972     GenPrintf(DebugChannel::Import, masm,
1973               "calling exotic import function with arguments: ");
1974     StackCopy(masm, i.mirType(), scratch, src, dst);
1975     GenPrintf(DebugChannel::Import, masm, "\n");
1976   }
1977 
1978   // Call the import exit stub.
1979   CallSiteDesc desc(CallSiteDesc::Dynamic);
1980   MoveSPForJitABI(masm);
1981   masm.wasmCallImport(desc, CalleeDesc::import(fi.tlsDataOffset()));
1982 
1983   // Restore the TLS register and pinned regs, per wasm function ABI.
1984   masm.loadPtr(Address(masm.getStackPointer(), framePushed - sizeOfTlsSlot),
1985                WasmTlsReg);
1986   masm.loadWasmPinnedRegsFromTls();
1987 
1988   // Restore cx->realm.
1989   masm.switchToWasmTlsRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
1990 
1991   GenerateFunctionEpilogue(masm, framePushed, offsets);
1992   return FinishOffsets(masm, offsets);
1993 }
1994 
1995 static const unsigned STUBS_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
1996 
GenerateImportFunctions(const ModuleEnvironment & env,const FuncImportVector & imports,CompiledCode * code)1997 bool wasm::GenerateImportFunctions(const ModuleEnvironment& env,
1998                                    const FuncImportVector& imports,
1999                                    CompiledCode* code) {
2000   LifoAlloc lifo(STUBS_LIFO_DEFAULT_CHUNK_SIZE);
2001   TempAllocator alloc(&lifo);
2002   WasmMacroAssembler masm(alloc, env);
2003 
2004   for (uint32_t funcIndex = 0; funcIndex < imports.length(); funcIndex++) {
2005     const FuncImport& fi = imports[funcIndex];
2006 
2007     FuncOffsets offsets;
2008     if (!GenerateImportFunction(masm, fi, *env.funcs[funcIndex].typeId,
2009                                 &offsets)) {
2010       return false;
2011     }
2012     if (!code->codeRanges.emplaceBack(funcIndex, /* bytecodeOffset = */ 0,
2013                                       offsets)) {
2014       return false;
2015     }
2016   }
2017 
2018   masm.finish();
2019   if (masm.oom()) {
2020     return false;
2021   }
2022 
2023   return code->swap(masm);
2024 }
2025 
2026 // Generate a stub that is called via the internal ABI derived from the
2027 // signature of the import and calls into an appropriate callImport C++
2028 // function, having boxed all the ABI arguments into a homogeneous Value array.
GenerateImportInterpExit(MacroAssembler & masm,const FuncImport & fi,uint32_t funcImportIndex,Label * throwLabel,CallableOffsets * offsets)2029 static bool GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi,
2030                                      uint32_t funcImportIndex,
2031                                      Label* throwLabel,
2032                                      CallableOffsets* offsets) {
2033   AssertExpectedSP(masm);
2034   masm.setFramePushed(0);
2035 
2036   // Argument types for Instance::callImport_*:
2037   static const MIRType typeArray[] = {MIRType::Pointer,   // Instance*
2038                                       MIRType::Pointer,   // funcImportIndex
2039                                       MIRType::Int32,     // argc
2040                                       MIRType::Pointer};  // argv
2041   MIRTypeVector invokeArgTypes;
2042   MOZ_ALWAYS_TRUE(invokeArgTypes.append(typeArray, std::size(typeArray)));
2043 
2044   // At the point of the call, the stack layout shall be (sp grows to the left):
2045   //  | stack args | padding | argv[] | padding | retaddr | caller stack args |
2046   // The padding between stack args and argv ensures that argv is aligned. The
2047   // padding between argv and retaddr ensures that sp is aligned.
2048   unsigned argOffset =
2049       AlignBytes(StackArgBytesForNativeABI(invokeArgTypes), sizeof(double));
2050   // The abiArgCount includes a stack result pointer argument if needed.
2051   unsigned abiArgCount = ArgTypeVector(fi.funcType()).lengthWithStackResults();
2052   unsigned argBytes = std::max<size_t>(1, abiArgCount) * sizeof(Value);
2053   unsigned framePushed =
2054       StackDecrementForCall(ABIStackAlignment,
2055                             sizeof(Frame),  // pushed by prologue
2056                             argOffset + argBytes);
2057 
2058   GenerateExitPrologue(masm, framePushed, ExitReason::Fixed::ImportInterp,
2059                        offsets);
2060 
2061   // Fill the argument array.
2062   unsigned offsetFromFPToCallerStackArgs = sizeof(FrameWithTls);
2063   Register scratch = ABINonArgReturnReg0;
2064   Register scratch2 = ABINonArgReturnReg1;
2065   // The scratch3 reg does not need to be non-volatile, but has to be
2066   // distinct from scratch & scratch2.
2067   Register scratch3 = ABINonVolatileReg;
2068   FillArgumentArrayForExit(masm, WasmTlsReg, funcImportIndex, fi.funcType(),
2069                            argOffset, offsetFromFPToCallerStackArgs, scratch,
2070                            scratch2, scratch3, ToValue(false), throwLabel);
2071 
2072   // Prepare the arguments for the call to Instance::callImport_*.
2073   ABIArgMIRTypeIter i(invokeArgTypes);
2074 
2075   // argument 0: Instance*
2076   Address instancePtr(WasmTlsReg, offsetof(TlsData, instance));
2077   if (i->kind() == ABIArg::GPR) {
2078     masm.loadPtr(instancePtr, i->gpr());
2079   } else {
2080     masm.loadPtr(instancePtr, scratch);
2081     masm.storePtr(scratch,
2082                   Address(masm.getStackPointer(), i->offsetFromArgBase()));
2083   }
2084   i++;
2085 
2086   // argument 1: funcImportIndex
2087   if (i->kind() == ABIArg::GPR) {
2088     masm.mov(ImmWord(funcImportIndex), i->gpr());
2089   } else {
2090     masm.store32(Imm32(funcImportIndex),
2091                  Address(masm.getStackPointer(), i->offsetFromArgBase()));
2092   }
2093   i++;
2094 
2095   // argument 2: argc
2096   unsigned argc = abiArgCount;
2097   if (i->kind() == ABIArg::GPR) {
2098     masm.mov(ImmWord(argc), i->gpr());
2099   } else {
2100     masm.store32(Imm32(argc),
2101                  Address(masm.getStackPointer(), i->offsetFromArgBase()));
2102   }
2103   i++;
2104 
2105   // argument 3: argv
2106   Address argv(masm.getStackPointer(), argOffset);
2107   if (i->kind() == ABIArg::GPR) {
2108     masm.computeEffectiveAddress(argv, i->gpr());
2109   } else {
2110     masm.computeEffectiveAddress(argv, scratch);
2111     masm.storePtr(scratch,
2112                   Address(masm.getStackPointer(), i->offsetFromArgBase()));
2113   }
2114   i++;
2115   MOZ_ASSERT(i.done());
2116 
2117   // Make the call, test whether it succeeded, and extract the return value.
2118   AssertStackAlignment(masm, ABIStackAlignment);
2119   masm.call(SymbolicAddress::CallImport_General);
2120   masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
2121 
2122   ResultType resultType = ResultType::Vector(fi.funcType().results());
2123   ValType registerResultType;
2124   for (ABIResultIter iter(resultType); !iter.done(); iter.next()) {
2125     if (iter.cur().inRegister()) {
2126       MOZ_ASSERT(!registerResultType.isValid());
2127       registerResultType = iter.cur().type();
2128     }
2129   }
2130   if (!registerResultType.isValid()) {
2131     GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
2132               funcImportIndex);
2133     GenPrintf(DebugChannel::Import, masm, "void");
2134   } else {
2135     switch (registerResultType.kind()) {
2136       case ValType::I32:
2137         masm.load32(argv, ReturnReg);
2138         // No spectre.index_masking is required, as we know the value comes from
2139         // an i32 load.
2140         GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
2141                   funcImportIndex);
2142         GenPrintIsize(DebugChannel::Import, masm, ReturnReg);
2143         break;
2144       case ValType::I64:
2145         masm.load64(argv, ReturnReg64);
2146         GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
2147                   funcImportIndex);
2148         GenPrintI64(DebugChannel::Import, masm, ReturnReg64);
2149         break;
2150       case ValType::Rtt:
2151       case ValType::V128:
2152         // Note, CallImport_Rtt/V128 currently always throws, so we should never
2153         // reach this point.
2154         masm.breakpoint();
2155         break;
2156       case ValType::F32:
2157         masm.loadFloat32(argv, ReturnFloat32Reg);
2158         GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
2159                   funcImportIndex);
2160         GenPrintF32(DebugChannel::Import, masm, ReturnFloat32Reg);
2161         break;
2162       case ValType::F64:
2163         masm.loadDouble(argv, ReturnDoubleReg);
2164         GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
2165                   funcImportIndex);
2166         GenPrintF64(DebugChannel::Import, masm, ReturnDoubleReg);
2167         break;
2168       case ValType::Ref:
2169         switch (registerResultType.refTypeKind()) {
2170           case RefType::Func:
2171             masm.loadPtr(argv, ReturnReg);
2172             GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
2173                       funcImportIndex);
2174             GenPrintPtr(DebugChannel::Import, masm, ReturnReg);
2175             break;
2176           case RefType::Extern:
2177           case RefType::Eq:
2178             masm.loadPtr(argv, ReturnReg);
2179             GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
2180                       funcImportIndex);
2181             GenPrintPtr(DebugChannel::Import, masm, ReturnReg);
2182             break;
2183           case RefType::TypeIndex:
2184             MOZ_CRASH("No Ref support here yet");
2185         }
2186         break;
2187     }
2188   }
2189 
2190   GenPrintf(DebugChannel::Import, masm, "\n");
2191 
2192   // The native ABI preserves the TLS, heap and global registers since they
2193   // are non-volatile.
2194   MOZ_ASSERT(NonVolatileRegs.has(WasmTlsReg));
2195 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) ||      \
2196     defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \
2197     defined(JS_CODEGEN_MIPS64)
2198   MOZ_ASSERT(NonVolatileRegs.has(HeapReg));
2199 #endif
2200 
2201   GenerateExitEpilogue(masm, framePushed, ExitReason::Fixed::ImportInterp,
2202                        offsets);
2203 
2204   return FinishOffsets(masm, offsets);
2205 }
2206 
2207 // Generate a stub that is called via the internal ABI derived from the
2208 // signature of the import and calls into a compatible JIT function,
2209 // having boxed all the ABI arguments into the JIT stack frame layout.
GenerateImportJitExit(MacroAssembler & masm,const FuncImport & fi,unsigned funcImportIndex,Label * throwLabel,JitExitOffsets * offsets)2210 static bool GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi,
2211                                   unsigned funcImportIndex, Label* throwLabel,
2212                                   JitExitOffsets* offsets) {
2213   AssertExpectedSP(masm);
2214   masm.setFramePushed(0);
2215 
2216   // JIT calls use the following stack layout (sp grows to the left):
2217   //   | WasmToJSJitFrameLayout | this | arg1..N | saved Tls |
2218   // Unlike most ABIs, the JIT ABI requires that sp be JitStackAlignment-
2219   // aligned *after* pushing the return address.
2220   static_assert(WasmStackAlignment >= JitStackAlignment, "subsumes");
2221   const unsigned sizeOfTlsSlot = sizeof(void*);
2222   const unsigned sizeOfRetAddr = sizeof(void*);
2223   const unsigned sizeOfPreFrame =
2224       WasmToJSJitFrameLayout::Size() - sizeOfRetAddr;
2225   const unsigned sizeOfThisAndArgs =
2226       (1 + fi.funcType().args().length()) * sizeof(Value);
2227   const unsigned totalJitFrameBytes =
2228       sizeOfRetAddr + sizeOfPreFrame + sizeOfThisAndArgs + sizeOfTlsSlot;
2229   const unsigned jitFramePushed =
2230       StackDecrementForCall(JitStackAlignment,
2231                             sizeof(Frame),  // pushed by prologue
2232                             totalJitFrameBytes) -
2233       sizeOfRetAddr;
2234   const unsigned sizeOfThisAndArgsAndPadding = jitFramePushed - sizeOfPreFrame;
2235 
2236   // On ARM64 we must align the SP to a 16-byte boundary.
2237 #ifdef JS_CODEGEN_ARM64
2238   const unsigned frameAlignExtra = sizeof(void*);
2239 #else
2240   const unsigned frameAlignExtra = 0;
2241 #endif
2242 
2243   GenerateJitExitPrologue(masm, jitFramePushed + frameAlignExtra, offsets);
2244 
2245   // 1. Descriptor.
2246   size_t argOffset = frameAlignExtra;
2247   uint32_t descriptor =
2248       MakeFrameDescriptor(sizeOfThisAndArgsAndPadding, FrameType::WasmToJSJit,
2249                           WasmToJSJitFrameLayout::Size());
2250   masm.storePtr(ImmWord(uintptr_t(descriptor)),
2251                 Address(masm.getStackPointer(), argOffset));
2252   argOffset += sizeof(size_t);
2253 
2254   // 2. Callee, part 1 -- need the callee register for argument filling, so
2255   // record offset here and set up callee later.
2256   size_t calleeArgOffset = argOffset;
2257   argOffset += sizeof(size_t);
2258 
2259   // 3. Argc.
2260   unsigned argc = fi.funcType().args().length();
2261   masm.storePtr(ImmWord(uintptr_t(argc)),
2262                 Address(masm.getStackPointer(), argOffset));
2263   argOffset += sizeof(size_t);
2264   MOZ_ASSERT(argOffset == sizeOfPreFrame + frameAlignExtra);
2265 
2266   // 4. |this| value.
2267   masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), argOffset));
2268   argOffset += sizeof(Value);
2269 
2270   // 5. Fill the arguments.
2271   const uint32_t offsetFromFPToCallerStackArgs = sizeof(FrameWithTls);
2272   Register scratch = ABINonArgReturnReg1;   // Repeatedly clobbered
2273   Register scratch2 = ABINonArgReturnReg0;  // Reused as callee below
2274   // The scratch3 reg does not need to be non-volatile, but has to be
2275   // distinct from scratch & scratch2.
2276   Register scratch3 = ABINonVolatileReg;
2277   FillArgumentArrayForExit(masm, WasmTlsReg, funcImportIndex, fi.funcType(),
2278                            argOffset, offsetFromFPToCallerStackArgs, scratch,
2279                            scratch2, scratch3, ToValue(true), throwLabel);
2280   argOffset += fi.funcType().args().length() * sizeof(Value);
2281   MOZ_ASSERT(argOffset == sizeOfThisAndArgs + sizeOfPreFrame + frameAlignExtra);
2282 
2283   // Preserve Tls because the JIT callee clobbers it.
2284   const size_t savedTlsOffset = argOffset;
2285   masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), savedTlsOffset));
2286 
2287   // 2. Callee, part 2 -- now that the register is free, set up the callee.
2288   Register callee = ABINonArgReturnReg0;  // Live until call
2289 
2290   // 2.1. Get JSFunction callee.
2291   masm.loadWasmGlobalPtr(fi.tlsDataOffset() + offsetof(FuncImportTls, fun),
2292                          callee);
2293 
2294   // 2.2. Save callee.
2295   masm.storePtr(callee, Address(masm.getStackPointer(), calleeArgOffset));
2296 
2297   // 6. Check if we need to rectify arguments.
2298   masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), scratch);
2299 
2300   Label rectify;
2301   masm.branch32(Assembler::Above, scratch, Imm32(fi.funcType().args().length()),
2302                 &rectify);
2303 
2304   // 7. If we haven't rectified arguments, load callee executable entry point.
2305 
2306   masm.loadJitCodeRaw(callee, callee);
2307 
2308   Label rejoinBeforeCall;
2309   masm.bind(&rejoinBeforeCall);
2310 
2311   AssertStackAlignment(masm, JitStackAlignment,
2312                        sizeOfRetAddr + frameAlignExtra);
2313 #ifdef JS_CODEGEN_ARM64
2314   AssertExpectedSP(masm);
2315   // Conform to JIT ABI.  Note this doesn't update PSP since SP is the active
2316   // pointer.
2317   masm.addToStackPtr(Imm32(8));
2318   // Manually resync PSP.  Omitting this causes eg tests/wasm/import-export.js
2319   // to segfault.
2320   masm.moveStackPtrTo(PseudoStackPointer);
2321 #endif
2322   masm.callJitNoProfiler(callee);
2323 #ifdef JS_CODEGEN_ARM64
2324   // Conform to platform conventions - align the SP.
2325   masm.subFromStackPtr(Imm32(8));
2326 #endif
2327 
2328   // Note that there might be a GC thing in the JSReturnOperand now.
2329   // In all the code paths from here:
2330   // - either the value is unboxed because it was a primitive and we don't
2331   //   need to worry about rooting anymore.
2332   // - or the value needs to be rooted, but nothing can cause a GC between
2333   //   here and CoerceInPlace, which roots before coercing to a primitive.
2334 
2335   // The JIT callee clobbers all registers, including WasmTlsReg and
2336   // FramePointer, so restore those here. During this sequence of
2337   // instructions, FP can't be trusted by the profiling frame iterator.
2338   offsets->untrustedFPStart = masm.currentOffset();
2339   AssertStackAlignment(masm, JitStackAlignment,
2340                        sizeOfRetAddr + frameAlignExtra);
2341 
2342   masm.loadPtr(Address(masm.getStackPointer(), savedTlsOffset), WasmTlsReg);
2343   masm.moveStackPtrTo(FramePointer);
2344   masm.addPtr(Imm32(masm.framePushed()), FramePointer);
2345   offsets->untrustedFPEnd = masm.currentOffset();
2346 
2347   // As explained above, the frame was aligned for the JIT ABI such that
2348   //   (sp + sizeof(void*)) % JitStackAlignment == 0
2349   // But now we possibly want to call one of several different C++ functions,
2350   // so subtract the sizeof(void*) so that sp is aligned for an ABI call.
2351   static_assert(ABIStackAlignment <= JitStackAlignment, "subsumes");
2352 #ifdef JS_CODEGEN_ARM64
2353   // We've already allocated the extra space for frame alignment.
2354   static_assert(sizeOfRetAddr == frameAlignExtra, "ARM64 SP alignment");
2355 #else
2356   masm.reserveStack(sizeOfRetAddr);
2357 #endif
2358   unsigned nativeFramePushed = masm.framePushed();
2359   AssertStackAlignment(masm, ABIStackAlignment);
2360 
2361 #ifdef DEBUG
2362   {
2363     Label ok;
2364     masm.branchTestMagic(Assembler::NotEqual, JSReturnOperand, &ok);
2365     masm.breakpoint();
2366     masm.bind(&ok);
2367   }
2368 #endif
2369 
2370   GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
2371             funcImportIndex);
2372 
2373   Label oolConvert;
2374   const ValTypeVector& results = fi.funcType().results();
2375   if (results.length() == 0) {
2376     GenPrintf(DebugChannel::Import, masm, "void");
2377   } else {
2378     MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
2379     switch (results[0].kind()) {
2380       case ValType::I32:
2381         // No spectre.index_masking required, as the return value does not come
2382         // to us in ReturnReg.
2383         masm.truncateValueToInt32(JSReturnOperand, ReturnDoubleReg, ReturnReg,
2384                                   &oolConvert);
2385         GenPrintIsize(DebugChannel::Import, masm, ReturnReg);
2386         break;
2387       case ValType::I64:
2388         // No fastpath for now, go immediately to ool case
2389         masm.jump(&oolConvert);
2390         break;
2391       case ValType::Rtt:
2392       case ValType::V128:
2393         // Unreachable as callImport should not call the stub.
2394         masm.breakpoint();
2395         break;
2396       case ValType::F32:
2397         masm.convertValueToFloat(JSReturnOperand, ReturnFloat32Reg,
2398                                  &oolConvert);
2399         GenPrintF32(DebugChannel::Import, masm, ReturnFloat32Reg);
2400         break;
2401       case ValType::F64:
2402         masm.convertValueToDouble(JSReturnOperand, ReturnDoubleReg,
2403                                   &oolConvert);
2404         GenPrintF64(DebugChannel::Import, masm, ReturnDoubleReg);
2405         break;
2406       case ValType::Ref:
2407         switch (results[0].refTypeKind()) {
2408           case RefType::Extern:
2409             BoxValueIntoAnyref(masm, JSReturnOperand, ReturnReg, &oolConvert);
2410             GenPrintPtr(DebugChannel::Import, masm, ReturnReg);
2411             break;
2412           case RefType::Func:
2413           case RefType::Eq:
2414           case RefType::TypeIndex:
2415             MOZ_CRASH("typed reference returned by import (jit exit) NYI");
2416         }
2417         break;
2418     }
2419   }
2420 
2421   GenPrintf(DebugChannel::Import, masm, "\n");
2422 
2423   Label done;
2424   masm.bind(&done);
2425 
2426   GenerateJitExitEpilogue(masm, masm.framePushed(), offsets);
2427 
2428   {
2429     // Call the arguments rectifier.
2430     masm.bind(&rectify);
2431     masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, instance)), callee);
2432     masm.loadPtr(Address(callee, Instance::offsetOfJSJitArgsRectifier()),
2433                  callee);
2434     masm.jump(&rejoinBeforeCall);
2435   }
2436 
2437   if (oolConvert.used()) {
2438     masm.bind(&oolConvert);
2439     masm.setFramePushed(nativeFramePushed);
2440 
2441     // Coercion calls use the following stack layout (sp grows to the left):
2442     //   | args | padding | Value argv[1] | padding | exit Frame |
2443     MIRTypeVector coerceArgTypes;
2444     MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
2445     unsigned offsetToCoerceArgv =
2446         AlignBytes(StackArgBytesForNativeABI(coerceArgTypes), sizeof(Value));
2447     MOZ_ASSERT(nativeFramePushed >= offsetToCoerceArgv + sizeof(Value));
2448     AssertStackAlignment(masm, ABIStackAlignment);
2449 
2450     // Store return value into argv[0].
2451     masm.storeValue(JSReturnOperand,
2452                     Address(masm.getStackPointer(), offsetToCoerceArgv));
2453 
2454     // From this point, it's safe to reuse the scratch register (which
2455     // might be part of the JSReturnOperand).
2456 
2457     // The JIT might have clobbered exitFP at this point. Since there's
2458     // going to be a CoerceInPlace call, pretend we're still doing the JIT
2459     // call by restoring our tagged exitFP.
2460     SetExitFP(masm, ExitReason::Fixed::ImportJit, scratch);
2461 
2462     // argument 0: argv
2463     ABIArgMIRTypeIter i(coerceArgTypes);
2464     Address argv(masm.getStackPointer(), offsetToCoerceArgv);
2465     if (i->kind() == ABIArg::GPR) {
2466       masm.computeEffectiveAddress(argv, i->gpr());
2467     } else {
2468       masm.computeEffectiveAddress(argv, scratch);
2469       masm.storePtr(scratch,
2470                     Address(masm.getStackPointer(), i->offsetFromArgBase()));
2471     }
2472     i++;
2473     MOZ_ASSERT(i.done());
2474 
2475     // Call coercion function. Note that right after the call, the value of
2476     // FP is correct because FP is non-volatile in the native ABI.
2477     AssertStackAlignment(masm, ABIStackAlignment);
2478     const ValTypeVector& results = fi.funcType().results();
2479     if (results.length() > 0) {
2480       // NOTE that once there can be more than one result and we can box some of
2481       // the results (as we must for AnyRef), pointer and already-boxed results
2482       // must be rooted while subsequent results are boxed.
2483       MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
2484       switch (results[0].kind()) {
2485         case ValType::I32:
2486           masm.call(SymbolicAddress::CoerceInPlace_ToInt32);
2487           masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
2488           masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv),
2489                           ReturnReg);
2490           // No spectre.index_masking required, as we generate a known-good
2491           // value in a safe way here.
2492           break;
2493         case ValType::I64: {
2494           masm.call(SymbolicAddress::CoerceInPlace_ToBigInt);
2495           masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
2496           Address argv(masm.getStackPointer(), offsetToCoerceArgv);
2497           masm.unboxBigInt(argv, scratch);
2498           masm.loadBigInt64(scratch, ReturnReg64);
2499           break;
2500         }
2501         case ValType::F64:
2502         case ValType::F32:
2503           masm.call(SymbolicAddress::CoerceInPlace_ToNumber);
2504           masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
2505           masm.unboxDouble(Address(masm.getStackPointer(), offsetToCoerceArgv),
2506                            ReturnDoubleReg);
2507           if (results[0].kind() == ValType::F32) {
2508             masm.convertDoubleToFloat32(ReturnDoubleReg, ReturnFloat32Reg);
2509           }
2510           break;
2511         case ValType::Ref:
2512           switch (results[0].refTypeKind()) {
2513             case RefType::Extern:
2514               masm.call(SymbolicAddress::BoxValue_Anyref);
2515               masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg,
2516                                 throwLabel);
2517               break;
2518             case RefType::Func:
2519             case RefType::Eq:
2520             case RefType::TypeIndex:
2521               MOZ_CRASH("Unsupported convert type");
2522           }
2523           break;
2524         default:
2525           MOZ_CRASH("Unsupported convert type");
2526       }
2527     }
2528 
2529     // Maintain the invariant that exitFP is either unset or not set to a
2530     // wasm tagged exitFP, per the jit exit contract.
2531     ClearExitFP(masm, scratch);
2532 
2533     masm.jump(&done);
2534     masm.setFramePushed(0);
2535   }
2536 
2537   MOZ_ASSERT(masm.framePushed() == 0);
2538 
2539   return FinishOffsets(masm, offsets);
2540 }
2541 
2542 struct ABIFunctionArgs {
2543   ABIFunctionType abiType;
2544   size_t len;
2545 
ABIFunctionArgsABIFunctionArgs2546   explicit ABIFunctionArgs(ABIFunctionType sig)
2547       : abiType(ABIFunctionType(sig >> ArgType_Shift)) {
2548     len = 0;
2549     uint32_t i = uint32_t(abiType);
2550     while (i) {
2551       i = i >> ArgType_Shift;
2552       len++;
2553     }
2554   }
2555 
lengthABIFunctionArgs2556   size_t length() const { return len; }
2557 
operator []ABIFunctionArgs2558   MIRType operator[](size_t i) const {
2559     MOZ_ASSERT(i < len);
2560     uint32_t abi = uint32_t(abiType);
2561     while (i--) {
2562       abi = abi >> ArgType_Shift;
2563     }
2564     return ToMIRType(ABIArgType(abi & ArgType_Mask));
2565   }
2566 };
2567 
GenerateBuiltinThunk(MacroAssembler & masm,ABIFunctionType abiType,ExitReason exitReason,void * funcPtr,CallableOffsets * offsets)2568 bool wasm::GenerateBuiltinThunk(MacroAssembler& masm, ABIFunctionType abiType,
2569                                 ExitReason exitReason, void* funcPtr,
2570                                 CallableOffsets* offsets) {
2571   AssertExpectedSP(masm);
2572   masm.setFramePushed(0);
2573 
2574   ABIFunctionArgs args(abiType);
2575   uint32_t framePushed =
2576       StackDecrementForCall(ABIStackAlignment,
2577                             sizeof(Frame),  // pushed by prologue
2578                             StackArgBytesForNativeABI(args));
2579 
2580   GenerateExitPrologue(masm, framePushed, exitReason, offsets);
2581 
2582   // Copy out and convert caller arguments, if needed.
2583   unsigned offsetFromFPToCallerStackArgs = sizeof(FrameWithTls);
2584   Register scratch = ABINonArgReturnReg0;
2585   for (ABIArgIter i(args); !i.done(); i++) {
2586     if (i->argInRegister()) {
2587 #ifdef JS_CODEGEN_ARM
2588       // Non hard-fp passes the args values in GPRs.
2589       if (!UseHardFpABI() && IsFloatingPointType(i.mirType())) {
2590         FloatRegister input = i->fpu();
2591         if (i.mirType() == MIRType::Float32) {
2592           masm.ma_vxfer(input, Register::FromCode(input.id()));
2593         } else if (i.mirType() == MIRType::Double) {
2594           uint32_t regId = input.singleOverlay().id();
2595           masm.ma_vxfer(input, Register::FromCode(regId),
2596                         Register::FromCode(regId + 1));
2597         }
2598       }
2599 #endif
2600       continue;
2601     }
2602 
2603     Address src(FramePointer,
2604                 offsetFromFPToCallerStackArgs + i->offsetFromArgBase());
2605     Address dst(masm.getStackPointer(), i->offsetFromArgBase());
2606     StackCopy(masm, i.mirType(), scratch, src, dst);
2607   }
2608 
2609   AssertStackAlignment(masm, ABIStackAlignment);
2610   MoveSPForJitABI(masm);
2611   masm.call(ImmPtr(funcPtr, ImmPtr::NoCheckToken()));
2612 
2613 #if defined(JS_CODEGEN_X64)
2614   // No spectre.index_masking is required, as the caller will mask.
2615 #elif defined(JS_CODEGEN_X86)
2616   // x86 passes the return value on the x87 FP stack.
2617   Operand op(esp, 0);
2618   MIRType retType = ToMIRType(ABIArgType(abiType & ArgType_Mask));
2619   if (retType == MIRType::Float32) {
2620     masm.fstp32(op);
2621     masm.loadFloat32(op, ReturnFloat32Reg);
2622   } else if (retType == MIRType::Double) {
2623     masm.fstp(op);
2624     masm.loadDouble(op, ReturnDoubleReg);
2625   }
2626 #elif defined(JS_CODEGEN_ARM)
2627   // Non hard-fp passes the return values in GPRs.
2628   MIRType retType = ToMIRType(ABIArgType(abiType & ArgType_Mask));
2629   if (!UseHardFpABI() && IsFloatingPointType(retType)) {
2630     masm.ma_vxfer(r0, r1, d0);
2631   }
2632 #endif
2633 
2634   GenerateExitEpilogue(masm, framePushed, exitReason, offsets);
2635   return FinishOffsets(masm, offsets);
2636 }
2637 
2638 #if defined(JS_CODEGEN_ARM)
2639 static const LiveRegisterSet RegsToPreserve(
2640     GeneralRegisterSet(Registers::AllMask &
2641                        ~((Registers::SetType(1) << Registers::sp) |
2642                          (Registers::SetType(1) << Registers::pc))),
2643     FloatRegisterSet(FloatRegisters::AllDoubleMask));
2644 #  ifdef ENABLE_WASM_SIMD
2645 #    error "high lanes of SIMD registers need to be saved too."
2646 #  endif
2647 #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
2648 static const LiveRegisterSet RegsToPreserve(
2649     GeneralRegisterSet(Registers::AllMask &
2650                        ~((Registers::SetType(1) << Registers::k0) |
2651                          (Registers::SetType(1) << Registers::k1) |
2652                          (Registers::SetType(1) << Registers::sp) |
2653                          (Registers::SetType(1) << Registers::zero))),
2654     FloatRegisterSet(FloatRegisters::AllDoubleMask));
2655 #  ifdef ENABLE_WASM_SIMD
2656 #    error "high lanes of SIMD registers need to be saved too."
2657 #  endif
2658 #elif defined(JS_CODEGEN_ARM64)
2659 // We assume that traps do not happen while lr is live. This both ensures that
2660 // the size of RegsToPreserve is a multiple of 2 (preserving WasmStackAlignment)
2661 // and gives us a register to clobber in the return path.
2662 static const LiveRegisterSet RegsToPreserve(
2663     GeneralRegisterSet(Registers::AllMask &
2664                        ~((Registers::SetType(1) << RealStackPointer.code()) |
2665                          (Registers::SetType(1) << Registers::lr))),
2666 #  ifdef ENABLE_WASM_SIMD
2667     FloatRegisterSet(FloatRegisters::AllSimd128Mask));
2668 #  else
2669     // If SIMD is not enabled, it's pointless to save/restore the upper 64
2670     // bits of each vector register.
2671     FloatRegisterSet(FloatRegisters::AllDoubleMask));
2672 #  endif
2673 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
2674 // It's correct to use FloatRegisters::AllMask even when SIMD is not enabled;
2675 // PushRegsInMask strips out the high lanes of the XMM registers in this case,
2676 // while the singles will be stripped as they are aliased by the larger doubles.
2677 static const LiveRegisterSet RegsToPreserve(
2678     GeneralRegisterSet(Registers::AllMask &
2679                        ~(Registers::SetType(1) << Registers::StackPointer)),
2680     FloatRegisterSet(FloatRegisters::AllMask));
2681 #else
2682 static const LiveRegisterSet RegsToPreserve(
2683     GeneralRegisterSet(0), FloatRegisterSet(FloatRegisters::AllDoubleMask));
2684 #  ifdef ENABLE_WASM_SIMD
2685 #    error "no SIMD support"
2686 #  endif
2687 #endif
2688 
2689 // Generate a MachineState which describes the locations of the GPRs as saved
2690 // by GenerateTrapExit.  FP registers are ignored.  Note that the values
2691 // stored in the MachineState are offsets in words downwards from the top of
2692 // the save area.  That is, a higher value implies a lower address.
GenerateTrapExitMachineState(MachineState * machine,size_t * numWords)2693 void wasm::GenerateTrapExitMachineState(MachineState* machine,
2694                                         size_t* numWords) {
2695   // This is the number of words pushed by the initial WasmPush().
2696   *numWords = WasmPushSize / sizeof(void*);
2697   MOZ_ASSERT(*numWords == TrapExitDummyValueOffsetFromTop + 1);
2698 
2699   // And these correspond to the PushRegsInMask() that immediately follows.
2700   for (GeneralRegisterBackwardIterator iter(RegsToPreserve.gprs()); iter.more();
2701        ++iter) {
2702     machine->setRegisterLocation(*iter,
2703                                  reinterpret_cast<uintptr_t*>(*numWords));
2704     (*numWords)++;
2705   }
2706 }
2707 
2708 // Generate a stub which calls WasmReportTrap() and can be executed by having
2709 // the signal handler redirect PC from any trapping instruction.
GenerateTrapExit(MacroAssembler & masm,Label * throwLabel,Offsets * offsets)2710 static bool GenerateTrapExit(MacroAssembler& masm, Label* throwLabel,
2711                              Offsets* offsets) {
2712   AssertExpectedSP(masm);
2713   masm.haltingAlign(CodeAlignment);
2714 
2715   masm.setFramePushed(0);
2716 
2717   offsets->begin = masm.currentOffset();
2718 
2719   // Traps can only happen at well-defined program points. However, since
2720   // traps may resume and the optimal assumption for the surrounding code is
2721   // that registers are not clobbered, we need to preserve all registers in
2722   // the trap exit. One simplifying assumption is that flags may be clobbered.
2723   // Push a dummy word to use as return address below.
2724   WasmPush(masm, ImmWord(TrapExitDummyValue));
2725   unsigned framePushedBeforePreserve = masm.framePushed();
2726   masm.PushRegsInMask(RegsToPreserve);
2727   unsigned offsetOfReturnWord = masm.framePushed() - framePushedBeforePreserve;
2728 
2729   // We know that StackPointer is word-aligned, but not necessarily
2730   // stack-aligned, so we need to align it dynamically.
2731   Register preAlignStackPointer = ABINonVolatileReg;
2732   masm.moveStackPtrTo(preAlignStackPointer);
2733   masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
2734   if (ShadowStackSpace) {
2735     masm.subFromStackPtr(Imm32(ShadowStackSpace));
2736   }
2737 
2738   masm.assertStackAlignment(ABIStackAlignment);
2739   masm.call(SymbolicAddress::HandleTrap);
2740 
2741   // WasmHandleTrap returns null if control should transfer to the throw stub.
2742   masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
2743 
2744   // Otherwise, the return value is the TrapData::resumePC we must jump to.
2745   // We must restore register state before jumping, which will clobber
2746   // ReturnReg, so store ReturnReg in the above-reserved stack slot which we
2747   // use to jump to via ret.
2748   masm.moveToStackPtr(preAlignStackPointer);
2749   masm.storePtr(ReturnReg, Address(masm.getStackPointer(), offsetOfReturnWord));
2750   masm.PopRegsInMask(RegsToPreserve);
2751 #ifdef JS_CODEGEN_ARM64
2752   WasmPop(masm, lr);
2753   masm.abiret();
2754 #else
2755   masm.ret();
2756 #endif
2757 
2758   return FinishOffsets(masm, offsets);
2759 }
2760 
2761 // Generate a stub that restores the stack pointer to what it was on entry to
2762 // the wasm activation, sets the return register to 'false' and then executes a
2763 // return which will return from this wasm activation to the caller. This stub
2764 // should only be called after the caller has reported an error.
GenerateThrowStub(MacroAssembler & masm,Label * throwLabel,Offsets * offsets)2765 static bool GenerateThrowStub(MacroAssembler& masm, Label* throwLabel,
2766                               Offsets* offsets) {
2767   Register scratch = ABINonArgReturnReg0;
2768   Register scratch2 = ABINonArgReturnReg1;
2769 
2770   AssertExpectedSP(masm);
2771   masm.haltingAlign(CodeAlignment);
2772   masm.setFramePushed(0);
2773 
2774   masm.bind(throwLabel);
2775 
2776   offsets->begin = masm.currentOffset();
2777 
2778   // Conservatively, the stack pointer can be unaligned and we must align it
2779   // dynamically.
2780   masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
2781   if (ShadowStackSpace) {
2782     masm.subFromStackPtr(Imm32(ShadowStackSpace));
2783   }
2784 
2785   // Allocate space for exception or regular resume information.
2786   masm.reserveStack(sizeof(jit::ResumeFromException));
2787   masm.moveStackPtrTo(scratch);
2788 
2789   MIRTypeVector handleThrowTypes;
2790   MOZ_ALWAYS_TRUE(handleThrowTypes.append(MIRType::Pointer));
2791 
2792   unsigned frameSize =
2793       StackDecrementForCall(ABIStackAlignment, masm.framePushed(),
2794                             StackArgBytesForNativeABI(handleThrowTypes));
2795   masm.reserveStack(frameSize);
2796   masm.assertStackAlignment(ABIStackAlignment);
2797 
2798   ABIArgMIRTypeIter i(handleThrowTypes);
2799   if (i->kind() == ABIArg::GPR) {
2800     masm.movePtr(scratch, i->gpr());
2801   } else {
2802     masm.storePtr(scratch,
2803                   Address(masm.getStackPointer(), i->offsetFromArgBase()));
2804   }
2805   i++;
2806   MOZ_ASSERT(i.done());
2807 
2808   // WasmHandleThrow unwinds JitActivation::wasmExitFP() and returns the
2809   // address of the return address on the stack this stub should return to.
2810   // Set the FramePointer to a magic value to indicate a return by throw.
2811   //
2812   // If there is a Wasm catch handler present, it will instead return the
2813   // address of the handler to jump to and the FP/SP values to restore.
2814   masm.call(SymbolicAddress::HandleThrow);
2815 
2816   Label resumeCatch, leaveWasm;
2817 
2818   masm.load32(Address(ReturnReg, offsetof(jit::ResumeFromException, kind)),
2819               scratch);
2820 
2821   masm.branch32(Assembler::Equal, scratch,
2822                 Imm32(jit::ResumeFromException::RESUME_WASM_CATCH),
2823                 &resumeCatch);
2824   masm.branch32(Assembler::Equal, scratch,
2825                 Imm32(jit::ResumeFromException::RESUME_WASM), &leaveWasm);
2826 
2827   masm.breakpoint();
2828 
2829   // The case where a Wasm catch handler was found while unwinding the stack.
2830   masm.bind(&resumeCatch);
2831   masm.loadPtr(Address(ReturnReg, offsetof(ResumeFromException, framePointer)),
2832                FramePointer);
2833   // Defer reloading stackPointer until just before the jump, so as to
2834   // protect other live data on the stack.
2835 
2836   // When there is a catch handler, HandleThrow passes it the Value needed for
2837   // the handler's argument as well.
2838 #ifdef JS_64BIT
2839   ValueOperand val(scratch);
2840 #else
2841   ValueOperand val(scratch, scratch2);
2842 #endif
2843   masm.loadValue(Address(ReturnReg, offsetof(ResumeFromException, exception)),
2844                  val);
2845   Register obj = masm.extractObject(val, scratch2);
2846   masm.loadPtr(Address(ReturnReg, offsetof(ResumeFromException, target)),
2847                scratch);
2848   // Now it's safe to reload stackPointer.
2849   masm.loadStackPtr(
2850       Address(ReturnReg, offsetof(ResumeFromException, stackPointer)));
2851   // This move must come after the SP is reloaded because WasmExceptionReg may
2852   // alias ReturnReg.
2853   masm.movePtr(obj, WasmExceptionReg);
2854   masm.jump(scratch);
2855 
2856   // No catch handler was found, so we will just return out.
2857   masm.bind(&leaveWasm);
2858   masm.loadPtr(Address(ReturnReg, offsetof(ResumeFromException, framePointer)),
2859                FramePointer);
2860   masm.loadPtr(Address(ReturnReg, offsetof(ResumeFromException, stackPointer)),
2861                scratch);
2862   masm.moveToStackPtr(scratch);
2863 #ifdef JS_CODEGEN_ARM64
2864   masm.loadPtr(Address(scratch, 0), lr);
2865   masm.addToStackPtr(Imm32(8));
2866   masm.abiret();
2867 #else
2868   masm.ret();
2869 #endif
2870 
2871   return FinishOffsets(masm, offsets);
2872 }
2873 
2874 static const LiveRegisterSet AllAllocatableRegs =
2875     LiveRegisterSet(GeneralRegisterSet(Registers::AllocatableMask),
2876                     FloatRegisterSet(FloatRegisters::AllMask));
2877 
2878 // Generate a stub that handle toggable enter/leave frame traps or breakpoints.
2879 // The trap records frame pointer (via GenerateExitPrologue) and saves most of
2880 // registers to not affect the code generated by WasmBaselineCompile.
GenerateDebugTrapStub(MacroAssembler & masm,Label * throwLabel,CallableOffsets * offsets)2881 static bool GenerateDebugTrapStub(MacroAssembler& masm, Label* throwLabel,
2882                                   CallableOffsets* offsets) {
2883   AssertExpectedSP(masm);
2884   masm.haltingAlign(CodeAlignment);
2885   masm.setFramePushed(0);
2886 
2887   GenerateExitPrologue(masm, 0, ExitReason::Fixed::DebugTrap, offsets);
2888 
2889   // Save all registers used between baseline compiler operations.
2890   masm.PushRegsInMask(AllAllocatableRegs);
2891 
2892   uint32_t framePushed = masm.framePushed();
2893 
2894   // This method might be called with unaligned stack -- aligning and
2895   // saving old stack pointer at the top.
2896 #ifdef JS_CODEGEN_ARM64
2897   // On ARM64 however the stack is always aligned.
2898   static_assert(ABIStackAlignment == 16, "ARM64 SP alignment");
2899 #else
2900   Register scratch = ABINonArgReturnReg0;
2901   masm.moveStackPtrTo(scratch);
2902   masm.subFromStackPtr(Imm32(sizeof(intptr_t)));
2903   masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
2904   masm.storePtr(scratch, Address(masm.getStackPointer(), 0));
2905 #endif
2906 
2907   if (ShadowStackSpace) {
2908     masm.subFromStackPtr(Imm32(ShadowStackSpace));
2909   }
2910   masm.assertStackAlignment(ABIStackAlignment);
2911   masm.call(SymbolicAddress::HandleDebugTrap);
2912 
2913   masm.branchIfFalseBool(ReturnReg, throwLabel);
2914 
2915   if (ShadowStackSpace) {
2916     masm.addToStackPtr(Imm32(ShadowStackSpace));
2917   }
2918 #ifndef JS_CODEGEN_ARM64
2919   masm.Pop(scratch);
2920   masm.moveToStackPtr(scratch);
2921 #endif
2922 
2923   masm.setFramePushed(framePushed);
2924   masm.PopRegsInMask(AllAllocatableRegs);
2925 
2926   GenerateExitEpilogue(masm, 0, ExitReason::Fixed::DebugTrap, offsets);
2927 
2928   return FinishOffsets(masm, offsets);
2929 }
2930 
GenerateEntryStubs(MacroAssembler & masm,size_t funcExportIndex,const FuncExport & fe,const Maybe<ImmPtr> & callee,bool isAsmJS,CodeRangeVector * codeRanges)2931 bool wasm::GenerateEntryStubs(MacroAssembler& masm, size_t funcExportIndex,
2932                               const FuncExport& fe, const Maybe<ImmPtr>& callee,
2933                               bool isAsmJS, CodeRangeVector* codeRanges) {
2934   MOZ_ASSERT(!callee == fe.hasEagerStubs());
2935   MOZ_ASSERT_IF(isAsmJS, fe.hasEagerStubs());
2936 
2937   Offsets offsets;
2938   if (!GenerateInterpEntry(masm, fe, callee, &offsets)) {
2939     return false;
2940   }
2941   if (!codeRanges->emplaceBack(CodeRange::InterpEntry, fe.funcIndex(),
2942                                offsets)) {
2943     return false;
2944   }
2945 
2946   if (isAsmJS || !fe.canHaveJitEntry()) {
2947     return true;
2948   }
2949 
2950   if (!GenerateJitEntry(masm, funcExportIndex, fe, callee, &offsets)) {
2951     return false;
2952   }
2953   if (!codeRanges->emplaceBack(CodeRange::JitEntry, fe.funcIndex(), offsets)) {
2954     return false;
2955   }
2956 
2957   return true;
2958 }
2959 
GenerateProvisionalLazyJitEntryStub(MacroAssembler & masm,Offsets * offsets)2960 bool wasm::GenerateProvisionalLazyJitEntryStub(MacroAssembler& masm,
2961                                                Offsets* offsets) {
2962   AssertExpectedSP(masm);
2963   masm.setFramePushed(0);
2964   offsets->begin = masm.currentOffset();
2965 
2966 #ifdef JS_CODEGEN_ARM64
2967   // Unaligned ABI calls require SP+PSP, but our mode here is SP-only
2968   masm.SetStackPointer64(PseudoStackPointer64);
2969   masm.Mov(PseudoStackPointer64, sp);
2970 #endif
2971 
2972 #ifdef JS_USE_LINK_REGISTER
2973   masm.pushReturnAddress();
2974 #endif
2975 
2976   AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
2977   Register temp = regs.takeAny();
2978 
2979   using Fn = void* (*)();
2980   masm.setupUnalignedABICall(temp);
2981   masm.callWithABI<Fn, GetContextSensitiveInterpreterStub>(
2982       MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
2983 
2984 #ifdef JS_USE_LINK_REGISTER
2985   masm.popReturnAddress();
2986 #endif
2987 
2988   masm.jump(ReturnReg);
2989 
2990 #ifdef JS_CODEGEN_ARM64
2991   // Undo the SP+PSP mode
2992   masm.SetStackPointer64(sp);
2993 #endif
2994 
2995   return FinishOffsets(masm, offsets);
2996 }
2997 
GenerateStubs(const ModuleEnvironment & env,const FuncImportVector & imports,const FuncExportVector & exports,CompiledCode * code)2998 bool wasm::GenerateStubs(const ModuleEnvironment& env,
2999                          const FuncImportVector& imports,
3000                          const FuncExportVector& exports, CompiledCode* code) {
3001   LifoAlloc lifo(STUBS_LIFO_DEFAULT_CHUNK_SIZE);
3002   TempAllocator alloc(&lifo);
3003   WasmMacroAssembler masm(alloc, env);
3004 
3005   // Swap in already-allocated empty vectors to avoid malloc/free.
3006   if (!code->swap(masm)) {
3007     return false;
3008   }
3009 
3010   Label throwLabel;
3011 
3012   JitSpew(JitSpew_Codegen, "# Emitting wasm import stubs");
3013 
3014   for (uint32_t funcIndex = 0; funcIndex < imports.length(); funcIndex++) {
3015     const FuncImport& fi = imports[funcIndex];
3016 
3017     CallableOffsets interpOffsets;
3018     if (!GenerateImportInterpExit(masm, fi, funcIndex, &throwLabel,
3019                                   &interpOffsets)) {
3020       return false;
3021     }
3022     if (!code->codeRanges.emplaceBack(CodeRange::ImportInterpExit, funcIndex,
3023                                       interpOffsets)) {
3024       return false;
3025     }
3026 
3027     // Skip if the function does not have a signature that allows for a JIT
3028     // exit.
3029     if (!fi.canHaveJitExit()) {
3030       continue;
3031     }
3032 
3033     JitExitOffsets jitOffsets;
3034     if (!GenerateImportJitExit(masm, fi, funcIndex, &throwLabel, &jitOffsets)) {
3035       return false;
3036     }
3037     if (!code->codeRanges.emplaceBack(funcIndex, jitOffsets)) {
3038       return false;
3039     }
3040   }
3041 
3042   JitSpew(JitSpew_Codegen, "# Emitting wasm export stubs");
3043 
3044   Maybe<ImmPtr> noAbsolute;
3045   for (size_t i = 0; i < exports.length(); i++) {
3046     const FuncExport& fe = exports[i];
3047     if (!fe.hasEagerStubs()) {
3048       continue;
3049     }
3050     if (!GenerateEntryStubs(masm, i, fe, noAbsolute, env.isAsmJS(),
3051                             &code->codeRanges)) {
3052       return false;
3053     }
3054   }
3055 
3056   JitSpew(JitSpew_Codegen, "# Emitting wasm exit stubs");
3057 
3058   Offsets offsets;
3059 
3060   if (!GenerateTrapExit(masm, &throwLabel, &offsets)) {
3061     return false;
3062   }
3063   if (!code->codeRanges.emplaceBack(CodeRange::TrapExit, offsets)) {
3064     return false;
3065   }
3066 
3067   CallableOffsets callableOffsets;
3068   if (!GenerateDebugTrapStub(masm, &throwLabel, &callableOffsets)) {
3069     return false;
3070   }
3071   if (!code->codeRanges.emplaceBack(CodeRange::DebugTrap, callableOffsets)) {
3072     return false;
3073   }
3074 
3075   if (!GenerateThrowStub(masm, &throwLabel, &offsets)) {
3076     return false;
3077   }
3078   if (!code->codeRanges.emplaceBack(CodeRange::Throw, offsets)) {
3079     return false;
3080   }
3081 
3082   masm.finish();
3083   if (masm.oom()) {
3084     return false;
3085   }
3086 
3087   return code->swap(masm);
3088 }
3089