1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  *
4  * Copyright 2015 Mozilla Foundation
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 #include "wasm/WasmStubs.h"
20 
21 #include <algorithm>
22 #include <iterator>
23 
24 #include "jit/ABIArgGenerator.h"
25 #include "jit/JitFrames.h"
26 #include "jit/RegisterAllocator.h"
27 #include "js/Printf.h"
28 #include "util/Memory.h"
29 #include "wasm/WasmCode.h"
30 #include "wasm/WasmGenerator.h"
31 #include "wasm/WasmInstance.h"
32 
33 #include "jit/MacroAssembler-inl.h"
34 
35 using namespace js;
36 using namespace js::jit;
37 using namespace js::wasm;
38 
39 using MIRTypeVector = Vector<jit::MIRType, 8, SystemAllocPolicy>;
40 using ABIArgMIRTypeIter = jit::ABIArgIter<MIRTypeVector>;
41 
42 /*****************************************************************************/
43 // ABIResultIter implementation
44 
ResultStackSize(ValType type)45 static uint32_t ResultStackSize(ValType type) {
46   switch (type.kind()) {
47     case ValType::I32:
48       return ABIResult::StackSizeOfInt32;
49     case ValType::I64:
50       return ABIResult::StackSizeOfInt64;
51     case ValType::F32:
52       return ABIResult::StackSizeOfFloat;
53     case ValType::F64:
54       return ABIResult::StackSizeOfDouble;
55 #ifdef ENABLE_WASM_SIMD
56     case ValType::V128:
57       return ABIResult::StackSizeOfV128;
58 #endif
59     case ValType::Ref:
60       return ABIResult::StackSizeOfPtr;
61     default:
62       MOZ_CRASH("Unexpected result type");
63   }
64 }
65 
66 // Compute the size of the stack slot that the wasm ABI requires be allocated
67 // for a particular MIRType.  Note that this sometimes differs from the
68 // MIRType's natural size.  See also ResultStackSize above and ABIResult::size()
69 // and ABIResultIter below.
70 
MIRTypeToABIResultSize(jit::MIRType type)71 uint32_t js::wasm::MIRTypeToABIResultSize(jit::MIRType type) {
72   switch (type) {
73     case MIRType::Int32:
74       return ABIResult::StackSizeOfInt32;
75     case MIRType::Int64:
76       return ABIResult::StackSizeOfInt64;
77     case MIRType::Float32:
78       return ABIResult::StackSizeOfFloat;
79     case MIRType::Double:
80       return ABIResult::StackSizeOfDouble;
81 #ifdef ENABLE_WASM_SIMD
82     case MIRType::Simd128:
83       return ABIResult::StackSizeOfV128;
84 #endif
85     case MIRType::Pointer:
86     case MIRType::RefOrNull:
87       return ABIResult::StackSizeOfPtr;
88     default:
89       MOZ_CRASH("MIRTypeToABIResultSize - unhandled case");
90   }
91 }
92 
size() const93 uint32_t ABIResult::size() const { return ResultStackSize(type()); }
94 
settleRegister(ValType type)95 void ABIResultIter::settleRegister(ValType type) {
96   MOZ_ASSERT(!done());
97   MOZ_ASSERT_IF(direction_ == Next, index() < MaxRegisterResults);
98   MOZ_ASSERT_IF(direction_ == Prev, index() >= count_ - MaxRegisterResults);
99   static_assert(MaxRegisterResults == 1, "expected a single register result");
100 
101   switch (type.kind()) {
102     case ValType::I32:
103       cur_ = ABIResult(type, ReturnReg);
104       break;
105     case ValType::I64:
106       cur_ = ABIResult(type, ReturnReg64);
107       break;
108     case ValType::F32:
109       cur_ = ABIResult(type, ReturnFloat32Reg);
110       break;
111     case ValType::F64:
112       cur_ = ABIResult(type, ReturnDoubleReg);
113       break;
114     case ValType::Rtt:
115     case ValType::Ref:
116       cur_ = ABIResult(type, ReturnReg);
117       break;
118 #ifdef ENABLE_WASM_SIMD
119     case ValType::V128:
120       cur_ = ABIResult(type, ReturnSimd128Reg);
121       break;
122 #endif
123     default:
124       MOZ_CRASH("Unexpected result type");
125   }
126 }
127 
settleNext()128 void ABIResultIter::settleNext() {
129   MOZ_ASSERT(direction_ == Next);
130   MOZ_ASSERT(!done());
131 
132   uint32_t typeIndex = count_ - index_ - 1;
133   ValType type = type_[typeIndex];
134 
135   if (index_ < MaxRegisterResults) {
136     settleRegister(type);
137     return;
138   }
139 
140   cur_ = ABIResult(type, nextStackOffset_);
141   nextStackOffset_ += ResultStackSize(type);
142 }
143 
settlePrev()144 void ABIResultIter::settlePrev() {
145   MOZ_ASSERT(direction_ == Prev);
146   MOZ_ASSERT(!done());
147   uint32_t typeIndex = index_;
148   ValType type = type_[typeIndex];
149 
150   if (count_ - index_ - 1 < MaxRegisterResults) {
151     settleRegister(type);
152     return;
153   }
154 
155   uint32_t size = ResultStackSize(type);
156   MOZ_ASSERT(nextStackOffset_ >= size);
157   nextStackOffset_ -= size;
158   cur_ = ABIResult(type, nextStackOffset_);
159 }
160 
161 #ifdef WASM_CODEGEN_DEBUG
162 template <class Closure>
GenPrint(DebugChannel channel,MacroAssembler & masm,const Maybe<Register> & taken,Closure passArgAndCall)163 static void GenPrint(DebugChannel channel, MacroAssembler& masm,
164                      const Maybe<Register>& taken, Closure passArgAndCall) {
165   if (!IsCodegenDebugEnabled(channel)) {
166     return;
167   }
168 
169   AllocatableRegisterSet regs(RegisterSet::All());
170   LiveRegisterSet save(regs.asLiveSet());
171   masm.PushRegsInMask(save);
172 
173   if (taken) {
174     regs.take(taken.value());
175   }
176   Register temp = regs.takeAnyGeneral();
177 
178   {
179     MOZ_ASSERT(MaybeGetJitContext(),
180                "codegen debug checks require a jit context");
181     masm.setupUnalignedABICall(temp);
182     passArgAndCall(IsCompilingWasm(), temp);
183   }
184 
185   masm.PopRegsInMask(save);
186 }
187 
GenPrintf(DebugChannel channel,MacroAssembler & masm,const char * fmt,...)188 static void GenPrintf(DebugChannel channel, MacroAssembler& masm,
189                       const char* fmt, ...) {
190   va_list ap;
191   va_start(ap, fmt);
192   UniqueChars str = JS_vsmprintf(fmt, ap);
193   va_end(ap);
194 
195   GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
196     // If we've gone this far, it means we're actually using the debugging
197     // strings. In this case, we leak them! This is only for debugging, and
198     // doing the right thing is cumbersome (in Ion, it'd mean add a vec of
199     // strings to the IonScript; in wasm, it'd mean add it to the current
200     // Module and serialize it properly).
201     const char* text = str.release();
202 
203     masm.movePtr(ImmPtr((void*)text, ImmPtr::NoCheckToken()), temp);
204     masm.passABIArg(temp);
205     if (inWasm) {
206       masm.callDebugWithABI(SymbolicAddress::PrintText);
207     } else {
208       using Fn = void (*)(const char* output);
209       masm.callWithABI<Fn, PrintText>(MoveOp::GENERAL,
210                                       CheckUnsafeCallWithABI::DontCheckOther);
211     }
212   });
213 }
214 
GenPrintIsize(DebugChannel channel,MacroAssembler & masm,const Register & src)215 static void GenPrintIsize(DebugChannel channel, MacroAssembler& masm,
216                           const Register& src) {
217   GenPrint(channel, masm, Some(src), [&](bool inWasm, Register _temp) {
218     masm.passABIArg(src);
219     if (inWasm) {
220       masm.callDebugWithABI(SymbolicAddress::PrintI32);
221     } else {
222       using Fn = void (*)(int32_t val);
223       masm.callWithABI<Fn, PrintI32>(MoveOp::GENERAL,
224                                      CheckUnsafeCallWithABI::DontCheckOther);
225     }
226   });
227 }
228 
GenPrintPtr(DebugChannel channel,MacroAssembler & masm,const Register & src)229 static void GenPrintPtr(DebugChannel channel, MacroAssembler& masm,
230                         const Register& src) {
231   GenPrint(channel, masm, Some(src), [&](bool inWasm, Register _temp) {
232     masm.passABIArg(src);
233     if (inWasm) {
234       masm.callDebugWithABI(SymbolicAddress::PrintPtr);
235     } else {
236       using Fn = void (*)(uint8_t * val);
237       masm.callWithABI<Fn, PrintPtr>(MoveOp::GENERAL,
238                                      CheckUnsafeCallWithABI::DontCheckOther);
239     }
240   });
241 }
242 
GenPrintI64(DebugChannel channel,MacroAssembler & masm,const Register64 & src)243 static void GenPrintI64(DebugChannel channel, MacroAssembler& masm,
244                         const Register64& src) {
245 #  if JS_BITS_PER_WORD == 64
246   GenPrintf(channel, masm, "i64 ");
247   GenPrintIsize(channel, masm, src.reg);
248 #  else
249   GenPrintf(channel, masm, "i64(");
250   GenPrintIsize(channel, masm, src.low);
251   GenPrintIsize(channel, masm, src.high);
252   GenPrintf(channel, masm, ") ");
253 #  endif
254 }
255 
GenPrintF32(DebugChannel channel,MacroAssembler & masm,const FloatRegister & src)256 static void GenPrintF32(DebugChannel channel, MacroAssembler& masm,
257                         const FloatRegister& src) {
258   GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
259     masm.passABIArg(src, MoveOp::FLOAT32);
260     if (inWasm) {
261       masm.callDebugWithABI(SymbolicAddress::PrintF32);
262     } else {
263       using Fn = void (*)(float val);
264       masm.callWithABI<Fn, PrintF32>(MoveOp::GENERAL,
265                                      CheckUnsafeCallWithABI::DontCheckOther);
266     }
267   });
268 }
269 
GenPrintF64(DebugChannel channel,MacroAssembler & masm,const FloatRegister & src)270 static void GenPrintF64(DebugChannel channel, MacroAssembler& masm,
271                         const FloatRegister& src) {
272   GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
273     masm.passABIArg(src, MoveOp::DOUBLE);
274     if (inWasm) {
275       masm.callDebugWithABI(SymbolicAddress::PrintF64);
276     } else {
277       using Fn = void (*)(double val);
278       masm.callWithABI<Fn, PrintF64>(MoveOp::GENERAL,
279                                      CheckUnsafeCallWithABI::DontCheckOther);
280     }
281   });
282 }
283 
284 #  ifdef ENABLE_WASM_SIMD
GenPrintV128(DebugChannel channel,MacroAssembler & masm,const FloatRegister & src)285 static void GenPrintV128(DebugChannel channel, MacroAssembler& masm,
286                          const FloatRegister& src) {
287   // TODO: We might try to do something meaningful here once SIMD data are
288   // aligned and hence C++-ABI compliant.  For now, just make ourselves visible.
289   GenPrintf(channel, masm, "v128");
290 }
291 #  endif
292 #else
GenPrintf(DebugChannel channel,MacroAssembler & masm,const char * fmt,...)293 static void GenPrintf(DebugChannel channel, MacroAssembler& masm,
294                       const char* fmt, ...) {}
GenPrintIsize(DebugChannel channel,MacroAssembler & masm,const Register & src)295 static void GenPrintIsize(DebugChannel channel, MacroAssembler& masm,
296                           const Register& src) {}
GenPrintPtr(DebugChannel channel,MacroAssembler & masm,const Register & src)297 static void GenPrintPtr(DebugChannel channel, MacroAssembler& masm,
298                         const Register& src) {}
GenPrintI64(DebugChannel channel,MacroAssembler & masm,const Register64 & src)299 static void GenPrintI64(DebugChannel channel, MacroAssembler& masm,
300                         const Register64& src) {}
GenPrintF32(DebugChannel channel,MacroAssembler & masm,const FloatRegister & src)301 static void GenPrintF32(DebugChannel channel, MacroAssembler& masm,
302                         const FloatRegister& src) {}
GenPrintF64(DebugChannel channel,MacroAssembler & masm,const FloatRegister & src)303 static void GenPrintF64(DebugChannel channel, MacroAssembler& masm,
304                         const FloatRegister& src) {}
305 #  ifdef ENABLE_WASM_SIMD
GenPrintV128(DebugChannel channel,MacroAssembler & masm,const FloatRegister & src)306 static void GenPrintV128(DebugChannel channel, MacroAssembler& masm,
307                          const FloatRegister& src) {}
308 #  endif
309 #endif
310 
FinishOffsets(MacroAssembler & masm,Offsets * offsets)311 static bool FinishOffsets(MacroAssembler& masm, Offsets* offsets) {
312   // On old ARM hardware, constant pools could be inserted and they need to
313   // be flushed before considering the size of the masm.
314   masm.flushBuffer();
315   offsets->end = masm.size();
316   return !masm.oom();
317 }
318 
AssertStackAlignment(MacroAssembler & masm,uint32_t alignment,uint32_t addBeforeAssert=0)319 static void AssertStackAlignment(MacroAssembler& masm, uint32_t alignment,
320                                  uint32_t addBeforeAssert = 0) {
321   MOZ_ASSERT(
322       (sizeof(Frame) + masm.framePushed() + addBeforeAssert) % alignment == 0);
323   masm.assertStackAlignment(alignment, addBeforeAssert);
324 }
325 
326 template <class VectorT, template <class VecT> class ABIArgIterT>
StackArgBytesHelper(const VectorT & args)327 static unsigned StackArgBytesHelper(const VectorT& args) {
328   ABIArgIterT<VectorT> iter(args);
329   while (!iter.done()) {
330     iter++;
331   }
332   return iter.stackBytesConsumedSoFar();
333 }
334 
335 template <class VectorT>
StackArgBytesForNativeABI(const VectorT & args)336 static unsigned StackArgBytesForNativeABI(const VectorT& args) {
337   return StackArgBytesHelper<VectorT, ABIArgIter>(args);
338 }
339 
340 template <class VectorT>
StackArgBytesForWasmABI(const VectorT & args)341 static unsigned StackArgBytesForWasmABI(const VectorT& args) {
342   return StackArgBytesHelper<VectorT, WasmABIArgIter>(args);
343 }
344 
StackArgBytesForWasmABI(const FuncType & funcType)345 static unsigned StackArgBytesForWasmABI(const FuncType& funcType) {
346   ArgTypeVector args(funcType);
347   return StackArgBytesForWasmABI(args);
348 }
349 
Move64(MacroAssembler & masm,const Address & src,const Address & dest,Register scratch)350 static void Move64(MacroAssembler& masm, const Address& src,
351                    const Address& dest, Register scratch) {
352 #if JS_BITS_PER_WORD == 32
353   MOZ_RELEASE_ASSERT(src.base != scratch && dest.base != scratch);
354   masm.load32(LowWord(src), scratch);
355   masm.store32(scratch, LowWord(dest));
356   masm.load32(HighWord(src), scratch);
357   masm.store32(scratch, HighWord(dest));
358 #else
359   Register64 scratch64(scratch);
360   masm.load64(src, scratch64);
361   masm.store64(scratch64, dest);
362 #endif
363 }
364 
SetupABIArguments(MacroAssembler & masm,const FuncExport & fe,Register argv,Register scratch)365 static void SetupABIArguments(MacroAssembler& masm, const FuncExport& fe,
366                               Register argv, Register scratch) {
367   // Copy parameters out of argv and into the registers/stack-slots specified by
368   // the wasm ABI.
369   //
370   // SetupABIArguments are only used for C++ -> wasm calls through callExport(),
371   // and V128 and Ref types (other than externref) are not currently allowed.
372   ArgTypeVector args(fe.funcType());
373   for (WasmABIArgIter iter(args); !iter.done(); iter++) {
374     unsigned argOffset = iter.index() * sizeof(ExportArg);
375     Address src(argv, argOffset);
376     MIRType type = iter.mirType();
377     switch (iter->kind()) {
378       case ABIArg::GPR:
379         if (type == MIRType::Int32) {
380           masm.load32(src, iter->gpr());
381         } else if (type == MIRType::Int64) {
382           masm.load64(src, iter->gpr64());
383         } else if (type == MIRType::RefOrNull) {
384           masm.loadPtr(src, iter->gpr());
385         } else if (type == MIRType::StackResults) {
386           MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
387           masm.loadPtr(src, iter->gpr());
388         } else {
389           MOZ_CRASH("unknown GPR type");
390         }
391         break;
392 #ifdef JS_CODEGEN_REGISTER_PAIR
393       case ABIArg::GPR_PAIR:
394         if (type == MIRType::Int64) {
395           masm.load64(src, iter->gpr64());
396         } else {
397           MOZ_CRASH("wasm uses hardfp for function calls.");
398         }
399         break;
400 #endif
401       case ABIArg::FPU: {
402         static_assert(sizeof(ExportArg) >= jit::Simd128DataSize,
403                       "ExportArg must be big enough to store SIMD values");
404         switch (type) {
405           case MIRType::Double:
406             masm.loadDouble(src, iter->fpu());
407             break;
408           case MIRType::Float32:
409             masm.loadFloat32(src, iter->fpu());
410             break;
411           case MIRType::Simd128:
412 #ifdef ENABLE_WASM_SIMD
413             // This is only used by the testing invoke path,
414             // wasmLosslessInvoke, and is guarded against in normal JS-API
415             // call paths.
416             masm.loadUnalignedSimd128(src, iter->fpu());
417             break;
418 #else
419             MOZ_CRASH("V128 not supported in SetupABIArguments");
420 #endif
421           default:
422             MOZ_CRASH("unexpected FPU type");
423             break;
424         }
425         break;
426       }
427       case ABIArg::Stack:
428         switch (type) {
429           case MIRType::Int32:
430             masm.load32(src, scratch);
431             masm.storePtr(scratch, Address(masm.getStackPointer(),
432                                            iter->offsetFromArgBase()));
433             break;
434           case MIRType::Int64: {
435             RegisterOrSP sp = masm.getStackPointer();
436             Move64(masm, src, Address(sp, iter->offsetFromArgBase()), scratch);
437             break;
438           }
439           case MIRType::RefOrNull:
440             masm.loadPtr(src, scratch);
441             masm.storePtr(scratch, Address(masm.getStackPointer(),
442                                            iter->offsetFromArgBase()));
443             break;
444           case MIRType::Double: {
445             ScratchDoubleScope fpscratch(masm);
446             masm.loadDouble(src, fpscratch);
447             masm.storeDouble(fpscratch, Address(masm.getStackPointer(),
448                                                 iter->offsetFromArgBase()));
449             break;
450           }
451           case MIRType::Float32: {
452             ScratchFloat32Scope fpscratch(masm);
453             masm.loadFloat32(src, fpscratch);
454             masm.storeFloat32(fpscratch, Address(masm.getStackPointer(),
455                                                  iter->offsetFromArgBase()));
456             break;
457           }
458           case MIRType::Simd128: {
459 #ifdef ENABLE_WASM_SIMD
460             // This is only used by the testing invoke path,
461             // wasmLosslessInvoke, and is guarded against in normal JS-API
462             // call paths.
463             ScratchSimd128Scope fpscratch(masm);
464             masm.loadUnalignedSimd128(src, fpscratch);
465             masm.storeUnalignedSimd128(
466                 fpscratch,
467                 Address(masm.getStackPointer(), iter->offsetFromArgBase()));
468             break;
469 #else
470             MOZ_CRASH("V128 not supported in SetupABIArguments");
471 #endif
472           }
473           case MIRType::StackResults: {
474             MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
475             masm.loadPtr(src, scratch);
476             masm.storePtr(scratch, Address(masm.getStackPointer(),
477                                            iter->offsetFromArgBase()));
478             break;
479           }
480           default:
481             MOZ_CRASH("unexpected stack arg type");
482         }
483         break;
484       case ABIArg::Uninitialized:
485         MOZ_CRASH("Uninitialized ABIArg kind");
486     }
487   }
488 }
489 
StoreRegisterResult(MacroAssembler & masm,const FuncExport & fe,Register loc)490 static void StoreRegisterResult(MacroAssembler& masm, const FuncExport& fe,
491                                 Register loc) {
492   ResultType results = ResultType::Vector(fe.funcType().results());
493   DebugOnly<bool> sawRegisterResult = false;
494   for (ABIResultIter iter(results); !iter.done(); iter.next()) {
495     const ABIResult& result = iter.cur();
496     if (result.inRegister()) {
497       MOZ_ASSERT(!sawRegisterResult);
498       sawRegisterResult = true;
499       switch (result.type().kind()) {
500         case ValType::I32:
501           masm.store32(result.gpr(), Address(loc, 0));
502           break;
503         case ValType::I64:
504           masm.store64(result.gpr64(), Address(loc, 0));
505           break;
506         case ValType::V128:
507 #ifdef ENABLE_WASM_SIMD
508           masm.storeUnalignedSimd128(result.fpr(), Address(loc, 0));
509           break;
510 #else
511           MOZ_CRASH("V128 not supported in StoreABIReturn");
512 #endif
513         case ValType::F32:
514           masm.canonicalizeFloat(result.fpr());
515           masm.storeFloat32(result.fpr(), Address(loc, 0));
516           break;
517         case ValType::F64:
518           masm.canonicalizeDouble(result.fpr());
519           masm.storeDouble(result.fpr(), Address(loc, 0));
520           break;
521         case ValType::Rtt:
522         case ValType::Ref:
523           masm.storePtr(result.gpr(), Address(loc, 0));
524           break;
525       }
526     }
527   }
528   MOZ_ASSERT(sawRegisterResult == (results.length() > 0));
529 }
530 
531 #if defined(JS_CODEGEN_ARM)
532 // The ARM system ABI also includes d15 & s31 in the non volatile float
533 // registers. Also exclude lr (a.k.a. r14) as we preserve it manually.
534 static const LiveRegisterSet NonVolatileRegs = LiveRegisterSet(
535     GeneralRegisterSet(Registers::NonVolatileMask &
536                        ~(Registers::SetType(1) << Registers::lr)),
537     FloatRegisterSet(FloatRegisters::NonVolatileMask |
538                      (FloatRegisters::SetType(1) << FloatRegisters::d15) |
539                      (FloatRegisters::SetType(1) << FloatRegisters::s31)));
540 #elif defined(JS_CODEGEN_ARM64)
541 // Exclude the Link Register (x30) because it is preserved manually.
542 //
543 // Include x16 (scratch) to make a 16-byte aligned amount of integer registers.
544 // Include d31 (scratch) to make a 16-byte aligned amount of floating registers.
545 static const LiveRegisterSet NonVolatileRegs = LiveRegisterSet(
546     GeneralRegisterSet((Registers::NonVolatileMask &
547                         ~(Registers::SetType(1) << Registers::lr)) |
548                        (Registers::SetType(1) << Registers::x16)),
549     FloatRegisterSet(FloatRegisters::NonVolatileMask |
550                      FloatRegisters::NonAllocatableMask));
551 #else
552 static const LiveRegisterSet NonVolatileRegs =
553     LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask),
554                     FloatRegisterSet(FloatRegisters::NonVolatileMask));
555 #endif
556 
557 static const unsigned NumExtraPushed = 2;  // tls and argv
558 
559 #ifdef JS_CODEGEN_ARM64
560 static const unsigned WasmPushSize = 16;
561 #else
562 static const unsigned WasmPushSize = sizeof(void*);
563 #endif
564 
AssertExpectedSP(MacroAssembler & masm)565 static void AssertExpectedSP(MacroAssembler& masm) {
566 #ifdef JS_CODEGEN_ARM64
567   MOZ_ASSERT(sp.Is(masm.GetStackPointer64()));
568 #  ifdef DEBUG
569   // Since we're asserting that SP is the currently active stack pointer,
570   // let's also in effect assert that PSP is dead -- by setting it to 1, so as
571   // to cause to cause any attempts to use it to segfault in an easily
572   // identifiable way.
573   masm.asVIXL().Mov(PseudoStackPointer64, 1);
574 #  endif
575 #endif
576 }
577 
578 template <class Operand>
WasmPush(MacroAssembler & masm,const Operand & op)579 static void WasmPush(MacroAssembler& masm, const Operand& op) {
580 #ifdef JS_CODEGEN_ARM64
581   // Allocate a pad word so that SP can remain properly aligned.  |op| will be
582   // written at the lower-addressed of the two words pushed here.
583   masm.reserveStack(WasmPushSize);
584   masm.storePtr(op, Address(masm.getStackPointer(), 0));
585 #else
586   masm.Push(op);
587 #endif
588 }
589 
WasmPop(MacroAssembler & masm,Register r)590 static void WasmPop(MacroAssembler& masm, Register r) {
591 #ifdef JS_CODEGEN_ARM64
592   // Also pop the pad word allocated by WasmPush.
593   masm.loadPtr(Address(masm.getStackPointer(), 0), r);
594   masm.freeStack(WasmPushSize);
595 #else
596   masm.Pop(r);
597 #endif
598 }
599 
MoveSPForJitABI(MacroAssembler & masm)600 static void MoveSPForJitABI(MacroAssembler& masm) {
601 #ifdef JS_CODEGEN_ARM64
602   masm.moveStackPtrTo(PseudoStackPointer);
603 #endif
604 }
605 
CallFuncExport(MacroAssembler & masm,const FuncExport & fe,const Maybe<ImmPtr> & funcPtr)606 static void CallFuncExport(MacroAssembler& masm, const FuncExport& fe,
607                            const Maybe<ImmPtr>& funcPtr) {
608   MOZ_ASSERT(fe.hasEagerStubs() == !funcPtr);
609   MoveSPForJitABI(masm);
610   if (funcPtr) {
611     masm.call(*funcPtr);
612   } else {
613     masm.call(CallSiteDesc(CallSiteDesc::Func), fe.funcIndex());
614   }
615 }
616 
617 STATIC_ASSERT_ANYREF_IS_JSOBJECT;  // Strings are currently boxed
618 
619 // Unboxing is branchy and contorted because of Spectre mitigations - we don't
620 // have enough scratch registers.  Were it not for the spectre mitigations in
621 // branchTestObjClass, the branch nest below would be restructured significantly
622 // by inverting branches and using fewer registers.
623 
624 // Unbox an anyref in src (clobbering src in the process) and then re-box it as
625 // a Value in *dst.  See the definition of AnyRef for a discussion of pointer
626 // representation.
UnboxAnyrefIntoValue(MacroAssembler & masm,Register tls,Register src,const Address & dst,Register scratch)627 static void UnboxAnyrefIntoValue(MacroAssembler& masm, Register tls,
628                                  Register src, const Address& dst,
629                                  Register scratch) {
630   MOZ_ASSERT(src != scratch);
631 
632   // Not actually the value we're passing, but we've no way of
633   // decoding anything better.
634   GenPrintPtr(DebugChannel::Import, masm, src);
635 
636   Label notNull, mustUnbox, done;
637   masm.branchTestPtr(Assembler::NonZero, src, src, &notNull);
638   masm.storeValue(NullValue(), dst);
639   masm.jump(&done);
640 
641   masm.bind(&notNull);
642   // The type test will clear src if the test fails, so store early.
643   masm.storeValue(JSVAL_TYPE_OBJECT, src, dst);
644   // Spectre mitigations: see comment above about efficiency.
645   masm.branchTestObjClass(Assembler::Equal, src,
646                           Address(tls, offsetof(TlsData, valueBoxClass)),
647                           scratch, src, &mustUnbox);
648   masm.jump(&done);
649 
650   masm.bind(&mustUnbox);
651   Move64(masm, Address(src, WasmValueBox::offsetOfValue()), dst, scratch);
652 
653   masm.bind(&done);
654 }
655 
656 // Unbox an anyref in src and then re-box it as a Value in dst.
657 // See the definition of AnyRef for a discussion of pointer representation.
UnboxAnyrefIntoValueReg(MacroAssembler & masm,Register tls,Register src,ValueOperand dst,Register scratch)658 static void UnboxAnyrefIntoValueReg(MacroAssembler& masm, Register tls,
659                                     Register src, ValueOperand dst,
660                                     Register scratch) {
661   MOZ_ASSERT(src != scratch);
662 #if JS_BITS_PER_WORD == 32
663   MOZ_ASSERT(dst.typeReg() != scratch);
664   MOZ_ASSERT(dst.payloadReg() != scratch);
665 #else
666   MOZ_ASSERT(dst.valueReg() != scratch);
667 #endif
668 
669   // Not actually the value we're passing, but we've no way of
670   // decoding anything better.
671   GenPrintPtr(DebugChannel::Import, masm, src);
672 
673   Label notNull, mustUnbox, done;
674   masm.branchTestPtr(Assembler::NonZero, src, src, &notNull);
675   masm.moveValue(NullValue(), dst);
676   masm.jump(&done);
677 
678   masm.bind(&notNull);
679   // The type test will clear src if the test fails, so store early.
680   masm.moveValue(TypedOrValueRegister(MIRType::Object, AnyRegister(src)), dst);
681   // Spectre mitigations: see comment above about efficiency.
682   masm.branchTestObjClass(Assembler::Equal, src,
683                           Address(tls, offsetof(TlsData, valueBoxClass)),
684                           scratch, src, &mustUnbox);
685   masm.jump(&done);
686 
687   masm.bind(&mustUnbox);
688   masm.loadValue(Address(src, WasmValueBox::offsetOfValue()), dst);
689 
690   masm.bind(&done);
691 }
692 
693 // Box the Value in src as an anyref in dest.  src and dest must not overlap.
694 // See the definition of AnyRef for a discussion of pointer representation.
BoxValueIntoAnyref(MacroAssembler & masm,ValueOperand src,Register dest,Label * oolConvert)695 static void BoxValueIntoAnyref(MacroAssembler& masm, ValueOperand src,
696                                Register dest, Label* oolConvert) {
697   Label nullValue, objectValue, done;
698   {
699     ScratchTagScope tag(masm, src);
700     masm.splitTagForTest(src, tag);
701     masm.branchTestObject(Assembler::Equal, tag, &objectValue);
702     masm.branchTestNull(Assembler::Equal, tag, &nullValue);
703     masm.jump(oolConvert);
704   }
705 
706   masm.bind(&nullValue);
707   masm.xorPtr(dest, dest);
708   masm.jump(&done);
709 
710   masm.bind(&objectValue);
711   masm.unboxObject(src, dest);
712 
713   masm.bind(&done);
714 }
715 
716 // Generate a stub that enters wasm from a C++ caller via the native ABI. The
717 // signature of the entry point is Module::ExportFuncPtr. The exported wasm
718 // function has an ABI derived from its specific signature, so this function
719 // must map from the ABI of ExportFuncPtr to the export's signature's ABI.
GenerateInterpEntry(MacroAssembler & masm,const FuncExport & fe,const Maybe<ImmPtr> & funcPtr,Offsets * offsets)720 static bool GenerateInterpEntry(MacroAssembler& masm, const FuncExport& fe,
721                                 const Maybe<ImmPtr>& funcPtr,
722                                 Offsets* offsets) {
723   AutoCreatedBy acb(masm, "GenerateInterpEntry");
724 
725   AssertExpectedSP(masm);
726   masm.haltingAlign(CodeAlignment);
727 
728   offsets->begin = masm.currentOffset();
729 
730   // Save the return address if it wasn't already saved by the call insn.
731 #ifdef JS_USE_LINK_REGISTER
732 #  if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS64) || \
733       defined(JS_CODEGEN_LOONG64)
734   masm.pushReturnAddress();
735 #  elif defined(JS_CODEGEN_ARM64)
736   // WasmPush updates framePushed() unlike pushReturnAddress(), but that's
737   // cancelled by the setFramePushed() below.
738   WasmPush(masm, lr);
739 #  else
740   MOZ_CRASH("Implement this");
741 #  endif
742 #endif
743 
744   // Save all caller non-volatile registers before we clobber them here and in
745   // the wasm callee (which does not preserve non-volatile registers).
746   masm.setFramePushed(0);
747   masm.PushRegsInMask(NonVolatileRegs);
748 
749   const unsigned nonVolatileRegsPushSize =
750       masm.PushRegsInMaskSizeInBytes(NonVolatileRegs);
751 
752   MOZ_ASSERT(masm.framePushed() == nonVolatileRegsPushSize);
753 
754   // Put the 'argv' argument into a non-argument/return/TLS register so that
755   // we can use 'argv' while we fill in the arguments for the wasm callee.
756   // Use a second non-argument/return register as temporary scratch.
757   Register argv = ABINonArgReturnReg0;
758   Register scratch = ABINonArgReturnReg1;
759 
760   // Read the arguments of wasm::ExportFuncPtr according to the native ABI.
761   // The entry stub's frame is 1 word.
762   const unsigned argBase = sizeof(void*) + masm.framePushed();
763   ABIArgGenerator abi;
764   ABIArg arg;
765 
766   // arg 1: ExportArg*
767   arg = abi.next(MIRType::Pointer);
768   if (arg.kind() == ABIArg::GPR) {
769     masm.movePtr(arg.gpr(), argv);
770   } else {
771     masm.loadPtr(
772         Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()),
773         argv);
774   }
775 
776   // Arg 2: TlsData*
777   arg = abi.next(MIRType::Pointer);
778   if (arg.kind() == ABIArg::GPR) {
779     masm.movePtr(arg.gpr(), WasmTlsReg);
780   } else {
781     masm.loadPtr(
782         Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()),
783         WasmTlsReg);
784   }
785 
786   WasmPush(masm, WasmTlsReg);
787 
788   // Save 'argv' on the stack so that we can recover it after the call.
789   WasmPush(masm, argv);
790 
791   // Since we're about to dynamically align the stack, reset the frame depth
792   // so we can still assert static stack depth balancing.
793   const unsigned framePushedBeforeAlign =
794       nonVolatileRegsPushSize + NumExtraPushed * WasmPushSize;
795 
796   MOZ_ASSERT(masm.framePushed() == framePushedBeforeAlign);
797   masm.setFramePushed(0);
798 
799   // Dynamically align the stack since ABIStackAlignment is not necessarily
800   // WasmStackAlignment. Preserve SP so it can be restored after the call.
801 #ifdef JS_CODEGEN_ARM64
802   static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
803 #else
804   masm.moveStackPtrTo(scratch);
805   masm.andToStackPtr(Imm32(~(WasmStackAlignment - 1)));
806   masm.Push(scratch);
807 #endif
808 
809   // Reserve stack space for the wasm call.
810   unsigned argDecrement =
811       StackDecrementForCall(WasmStackAlignment, masm.framePushed(),
812                             StackArgBytesForWasmABI(fe.funcType()));
813   masm.reserveStack(argDecrement);
814 
815   // Copy parameters out of argv and into the wasm ABI registers/stack-slots.
816   SetupABIArguments(masm, fe, argv, scratch);
817 
818   // Setup wasm register state. The nullness of the frame pointer is used to
819   // determine whether the call ended in success or failure.
820   masm.movePtr(ImmWord(0), FramePointer);
821   masm.loadWasmPinnedRegsFromTls();
822 
823   masm.storePtr(WasmTlsReg,
824                 Address(masm.getStackPointer(), WasmCalleeTlsOffsetBeforeCall));
825 
826   // Call into the real function. Note that, due to the throw stub, fp, tls
827   // and pinned registers may be clobbered.
828   masm.assertStackAlignment(WasmStackAlignment);
829   CallFuncExport(masm, fe, funcPtr);
830   masm.assertStackAlignment(WasmStackAlignment);
831 
832   // Pop the arguments pushed after the dynamic alignment.
833   masm.freeStack(argDecrement);
834 
835   // Pop the stack pointer to its value right before dynamic alignment.
836 #ifdef JS_CODEGEN_ARM64
837   static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
838 #else
839   masm.PopStackPtr();
840 #endif
841   MOZ_ASSERT(masm.framePushed() == 0);
842   masm.setFramePushed(framePushedBeforeAlign);
843 
844   // Recover the 'argv' pointer which was saved before aligning the stack.
845   WasmPop(masm, argv);
846 
847   WasmPop(masm, WasmTlsReg);
848 
849   // Store the register result, if any, in argv[0].
850   // No widening is required, as the value leaves ReturnReg.
851   StoreRegisterResult(masm, fe, argv);
852 
853   // After the ReturnReg is stored into argv[0] but before fp is clobbered by
854   // the PopRegsInMask(NonVolatileRegs) below, set the return value based on
855   // whether fp is null (which is the case for successful returns) or the
856   // FailFP magic value (set by the throw stub);
857   Label success, join;
858   masm.branchTestPtr(Assembler::Zero, FramePointer, FramePointer, &success);
859 #ifdef DEBUG
860   Label ok;
861   masm.branchPtr(Assembler::Equal, FramePointer, Imm32(FailFP), &ok);
862   masm.breakpoint();
863   masm.bind(&ok);
864 #endif
865   masm.move32(Imm32(false), ReturnReg);
866   masm.jump(&join);
867   masm.bind(&success);
868   masm.move32(Imm32(true), ReturnReg);
869   masm.bind(&join);
870 
871   // Restore clobbered non-volatile registers of the caller.
872   masm.PopRegsInMask(NonVolatileRegs);
873   MOZ_ASSERT(masm.framePushed() == 0);
874 
875 #if defined(JS_CODEGEN_ARM64)
876   masm.setFramePushed(WasmPushSize);
877   WasmPop(masm, lr);
878   masm.abiret();
879 #else
880   masm.ret();
881 #endif
882 
883   return FinishOffsets(masm, offsets);
884 }
885 
886 #ifdef JS_PUNBOX64
887 static const ValueOperand ScratchValIonEntry = ValueOperand(ABINonArgReg0);
888 #else
889 static const ValueOperand ScratchValIonEntry =
890     ValueOperand(ABINonArgReg0, ABINonArgReg1);
891 #endif
892 static const Register ScratchIonEntry = ABINonArgReg2;
893 
CallSymbolicAddress(MacroAssembler & masm,bool isAbsolute,SymbolicAddress sym)894 static void CallSymbolicAddress(MacroAssembler& masm, bool isAbsolute,
895                                 SymbolicAddress sym) {
896   if (isAbsolute) {
897     masm.call(ImmPtr(SymbolicAddressTarget(sym), ImmPtr::NoCheckToken()));
898   } else {
899     masm.call(sym);
900   }
901 }
902 
903 // Load instance's TLS from the callee.
GenerateJitEntryLoadTls(MacroAssembler & masm,unsigned frameSize)904 static void GenerateJitEntryLoadTls(MacroAssembler& masm, unsigned frameSize) {
905   AssertExpectedSP(masm);
906 
907   // ScratchIonEntry := callee => JSFunction*
908   unsigned offset = frameSize + JitFrameLayout::offsetOfCalleeToken();
909   masm.loadFunctionFromCalleeToken(Address(masm.getStackPointer(), offset),
910                                    ScratchIonEntry);
911 
912   // ScratchIonEntry := callee->getExtendedSlot(WASM_TLSDATA_SLOT)->toPrivate()
913   //                 => TlsData*
914   offset = FunctionExtended::offsetOfExtendedSlot(
915       FunctionExtended::WASM_TLSDATA_SLOT);
916   masm.loadPrivate(Address(ScratchIonEntry, offset), WasmTlsReg);
917 }
918 
919 // Creates a JS fake exit frame for wasm, so the frame iterators just use
920 // JSJit frame iteration.
GenerateJitEntryThrow(MacroAssembler & masm,unsigned frameSize)921 static void GenerateJitEntryThrow(MacroAssembler& masm, unsigned frameSize) {
922   AssertExpectedSP(masm);
923 
924   MOZ_ASSERT(masm.framePushed() == frameSize);
925 
926   GenerateJitEntryLoadTls(masm, frameSize);
927 
928   masm.freeStack(frameSize);
929   MoveSPForJitABI(masm);
930 
931   masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, cx)), ScratchIonEntry);
932   masm.enterFakeExitFrameForWasm(ScratchIonEntry, ScratchIonEntry,
933                                  ExitFrameType::WasmGenericJitEntry);
934 
935   masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, instance)),
936                ScratchIonEntry);
937   masm.loadPtr(
938       Address(ScratchIonEntry, Instance::offsetOfJSJitExceptionHandler()),
939       ScratchIonEntry);
940   masm.jump(ScratchIonEntry);
941 }
942 
943 // Helper function for allocating a BigInt and initializing it from an I64 in
944 // GenerateJitEntry.  The return result is written to scratch.
945 //
946 // Note that this will create a new frame and must not - in its current form -
947 // be called from a context where there is already another stub frame on the
948 // stack, as that confuses unwinding during profiling.  This was a problem for
949 // its use from GenerateImportJitExit, see bug 1754258.  Therefore,
950 // FuncType::canHaveJitExit prevents the present function from being called for
951 // exits.
GenerateBigIntInitialization(MacroAssembler & masm,unsigned bytesPushedByPrologue,Register64 input,Register scratch,const FuncExport * fe,Label * fail)952 static void GenerateBigIntInitialization(MacroAssembler& masm,
953                                          unsigned bytesPushedByPrologue,
954                                          Register64 input, Register scratch,
955                                          const FuncExport* fe, Label* fail) {
956 #if JS_BITS_PER_WORD == 32
957   MOZ_ASSERT(input.low != scratch);
958   MOZ_ASSERT(input.high != scratch);
959 #else
960   MOZ_ASSERT(input.reg != scratch);
961 #endif
962 
963   // We need to avoid clobbering other argument registers and the input.
964   AllocatableRegisterSet regs(RegisterSet::Volatile());
965   LiveRegisterSet save(regs.asLiveSet());
966   masm.PushRegsInMask(save);
967 
968   unsigned frameSize = StackDecrementForCall(
969       ABIStackAlignment, masm.framePushed() + bytesPushedByPrologue, 0);
970   masm.reserveStack(frameSize);
971   masm.assertStackAlignment(ABIStackAlignment);
972 
973   // Needs to use a different call type depending on stub it's used from.
974   if (fe) {
975     CallSymbolicAddress(masm, !fe->hasEagerStubs(),
976                         SymbolicAddress::AllocateBigInt);
977   } else {
978     masm.call(SymbolicAddress::AllocateBigInt);
979   }
980   masm.storeCallPointerResult(scratch);
981 
982   masm.assertStackAlignment(ABIStackAlignment);
983   masm.freeStack(frameSize);
984 
985   LiveRegisterSet ignore;
986   ignore.add(scratch);
987   masm.PopRegsInMaskIgnore(save, ignore);
988 
989   masm.branchTest32(Assembler::Zero, scratch, scratch, fail);
990   masm.initializeBigInt64(Scalar::BigInt64, scratch, input);
991 }
992 
993 // Generate a stub that enters wasm from a jit code caller via the jit ABI.
994 //
995 // ARM64 note: This does not save the PseudoStackPointer so we must be sure to
996 // recompute it on every return path, be it normal return or exception return.
997 // The JIT code we return to assumes it is correct.
998 
GenerateJitEntry(MacroAssembler & masm,size_t funcExportIndex,const FuncExport & fe,const Maybe<ImmPtr> & funcPtr,Offsets * offsets)999 static bool GenerateJitEntry(MacroAssembler& masm, size_t funcExportIndex,
1000                              const FuncExport& fe, const Maybe<ImmPtr>& funcPtr,
1001                              Offsets* offsets) {
1002   AutoCreatedBy acb(masm, "GenerateJitEntry");
1003 
1004   AssertExpectedSP(masm);
1005 
1006   RegisterOrSP sp = masm.getStackPointer();
1007 
1008   GenerateJitEntryPrologue(masm, offsets);
1009 
1010   // The jit caller has set up the following stack layout (sp grows to the
1011   // left):
1012   // <-- retAddr | descriptor | callee | argc | this | arg1..N
1013 
1014   unsigned normalBytesNeeded = StackArgBytesForWasmABI(fe.funcType());
1015 
1016   MIRTypeVector coerceArgTypes;
1017   MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Int32));
1018   MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
1019   MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
1020   unsigned oolBytesNeeded = StackArgBytesForWasmABI(coerceArgTypes);
1021 
1022   unsigned bytesNeeded = std::max(normalBytesNeeded, oolBytesNeeded);
1023 
1024   // Note the jit caller ensures the stack is aligned *after* the call
1025   // instruction.
1026   unsigned frameSize = StackDecrementForCall(WasmStackAlignment,
1027                                              masm.framePushed(), bytesNeeded);
1028 
1029   // Reserve stack space for wasm ABI arguments, set up like this:
1030   // <-- ABI args | padding
1031   masm.reserveStack(frameSize);
1032 
1033   GenerateJitEntryLoadTls(masm, frameSize);
1034 
1035   if (fe.funcType().hasUnexposableArgOrRet()) {
1036     CallSymbolicAddress(masm, !fe.hasEagerStubs(),
1037                         SymbolicAddress::ReportV128JSCall);
1038     GenerateJitEntryThrow(masm, frameSize);
1039     return FinishOffsets(masm, offsets);
1040   }
1041 
1042   FloatRegister scratchF = ABINonArgDoubleReg;
1043   Register scratchG = ScratchIonEntry;
1044   ValueOperand scratchV = ScratchValIonEntry;
1045 
1046   GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; arguments ",
1047             fe.funcIndex());
1048 
1049   // We do two loops:
1050   // - one loop up-front will make sure that all the Value tags fit the
1051   // expected signature argument types. If at least one inline conversion
1052   // fails, we just jump to the OOL path which will call into C++. Inline
1053   // conversions are ordered in the way we expect them to happen the most.
1054   // - the second loop will unbox the arguments into the right registers.
1055   Label oolCall;
1056   for (size_t i = 0; i < fe.funcType().args().length(); i++) {
1057     unsigned jitArgOffset = frameSize + JitFrameLayout::offsetOfActualArg(i);
1058     Address jitArgAddr(sp, jitArgOffset);
1059     masm.loadValue(jitArgAddr, scratchV);
1060 
1061     Label next;
1062     switch (fe.funcType().args()[i].kind()) {
1063       case ValType::I32: {
1064         ScratchTagScope tag(masm, scratchV);
1065         masm.splitTagForTest(scratchV, tag);
1066 
1067         // For int32 inputs, just skip.
1068         masm.branchTestInt32(Assembler::Equal, tag, &next);
1069 
1070         // For double inputs, unbox, truncate and store back.
1071         Label storeBack, notDouble;
1072         masm.branchTestDouble(Assembler::NotEqual, tag, &notDouble);
1073         {
1074           ScratchTagScopeRelease _(&tag);
1075           masm.unboxDouble(scratchV, scratchF);
1076           masm.branchTruncateDoubleMaybeModUint32(scratchF, scratchG, &oolCall);
1077           masm.jump(&storeBack);
1078         }
1079         masm.bind(&notDouble);
1080 
1081         // For null or undefined, store 0.
1082         Label nullOrUndefined, notNullOrUndefined;
1083         masm.branchTestUndefined(Assembler::Equal, tag, &nullOrUndefined);
1084         masm.branchTestNull(Assembler::NotEqual, tag, &notNullOrUndefined);
1085         masm.bind(&nullOrUndefined);
1086         {
1087           ScratchTagScopeRelease _(&tag);
1088           masm.storeValue(Int32Value(0), jitArgAddr);
1089         }
1090         masm.jump(&next);
1091         masm.bind(&notNullOrUndefined);
1092 
1093         // For booleans, store the number value back. Other types (symbol,
1094         // object, strings) go to the C++ call.
1095         masm.branchTestBoolean(Assembler::NotEqual, tag, &oolCall);
1096         masm.unboxBoolean(scratchV, scratchG);
1097         // fallthrough:
1098 
1099         masm.bind(&storeBack);
1100         {
1101           ScratchTagScopeRelease _(&tag);
1102           masm.storeValue(JSVAL_TYPE_INT32, scratchG, jitArgAddr);
1103         }
1104         break;
1105       }
1106       case ValType::I64: {
1107         ScratchTagScope tag(masm, scratchV);
1108         masm.splitTagForTest(scratchV, tag);
1109 
1110         // For BigInt inputs, just skip. Otherwise go to C++ for other
1111         // types that require creating a new BigInt or erroring.
1112         masm.branchTestBigInt(Assembler::NotEqual, tag, &oolCall);
1113         masm.jump(&next);
1114         break;
1115       }
1116       case ValType::F32:
1117       case ValType::F64: {
1118         // Note we can reuse the same code for f32/f64 here, since for the
1119         // case of f32, the conversion of f64 to f32 will happen in the
1120         // second loop.
1121         ScratchTagScope tag(masm, scratchV);
1122         masm.splitTagForTest(scratchV, tag);
1123 
1124         // For double inputs, just skip.
1125         masm.branchTestDouble(Assembler::Equal, tag, &next);
1126 
1127         // For int32 inputs, convert and rebox.
1128         Label storeBack, notInt32;
1129         {
1130           ScratchTagScopeRelease _(&tag);
1131           masm.branchTestInt32(Assembler::NotEqual, scratchV, &notInt32);
1132           masm.int32ValueToDouble(scratchV, scratchF);
1133           masm.jump(&storeBack);
1134         }
1135         masm.bind(&notInt32);
1136 
1137         // For undefined (missing argument), store NaN.
1138         Label notUndefined;
1139         masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
1140         {
1141           ScratchTagScopeRelease _(&tag);
1142           masm.storeValue(DoubleValue(JS::GenericNaN()), jitArgAddr);
1143           masm.jump(&next);
1144         }
1145         masm.bind(&notUndefined);
1146 
1147         // +null is 0.
1148         Label notNull;
1149         masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
1150         {
1151           ScratchTagScopeRelease _(&tag);
1152           masm.storeValue(DoubleValue(0.), jitArgAddr);
1153         }
1154         masm.jump(&next);
1155         masm.bind(&notNull);
1156 
1157         // For booleans, store the number value back. Other types (symbol,
1158         // object, strings) go to the C++ call.
1159         masm.branchTestBoolean(Assembler::NotEqual, tag, &oolCall);
1160         masm.boolValueToDouble(scratchV, scratchF);
1161         // fallthrough:
1162 
1163         masm.bind(&storeBack);
1164         {
1165           ScratchTagScopeRelease _(&tag);
1166           masm.boxDouble(scratchF, jitArgAddr);
1167         }
1168         break;
1169       }
1170       case ValType::Ref: {
1171         switch (fe.funcType().args()[i].refTypeKind()) {
1172           case RefType::Extern: {
1173             ScratchTagScope tag(masm, scratchV);
1174             masm.splitTagForTest(scratchV, tag);
1175 
1176             // For object inputs, we handle object and null inline, everything
1177             // else requires an actual box and we go out of line to allocate
1178             // that.
1179             masm.branchTestObject(Assembler::Equal, tag, &next);
1180             masm.branchTestNull(Assembler::Equal, tag, &next);
1181             masm.jump(&oolCall);
1182             break;
1183           }
1184           case RefType::Func:
1185           case RefType::Eq:
1186           case RefType::TypeIndex: {
1187             // Guarded against by temporarilyUnsupportedReftypeForEntry()
1188             MOZ_CRASH("unexpected argument type when calling from the jit");
1189           }
1190         }
1191         break;
1192       }
1193       case ValType::V128: {
1194         // Guarded against by hasUnexposableArgOrRet()
1195         MOZ_CRASH("unexpected argument type when calling from the jit");
1196       }
1197       default: {
1198         MOZ_CRASH("unexpected argument type when calling from the jit");
1199       }
1200     }
1201     masm.nopAlign(CodeAlignment);
1202     masm.bind(&next);
1203   }
1204 
1205   Label rejoinBeforeCall;
1206   masm.bind(&rejoinBeforeCall);
1207 
1208   // Convert all the expected values to unboxed values on the stack.
1209   ArgTypeVector args(fe.funcType());
1210   for (WasmABIArgIter iter(args); !iter.done(); iter++) {
1211     unsigned jitArgOffset =
1212         frameSize + JitFrameLayout::offsetOfActualArg(iter.index());
1213     Address argv(sp, jitArgOffset);
1214     bool isStackArg = iter->kind() == ABIArg::Stack;
1215     switch (iter.mirType()) {
1216       case MIRType::Int32: {
1217         Register target = isStackArg ? ScratchIonEntry : iter->gpr();
1218         masm.unboxInt32(argv, target);
1219         GenPrintIsize(DebugChannel::Function, masm, target);
1220         if (isStackArg) {
1221           masm.storePtr(target, Address(sp, iter->offsetFromArgBase()));
1222         }
1223         break;
1224       }
1225       case MIRType::Int64: {
1226         // The coercion has provided a BigInt value by this point, which
1227         // we need to convert to an I64 here.
1228         if (isStackArg) {
1229           Address dst(sp, iter->offsetFromArgBase());
1230           Register src = scratchV.payloadOrValueReg();
1231 #if JS_BITS_PER_WORD == 64
1232           Register64 scratch64(scratchG);
1233 #else
1234           Register64 scratch64(scratchG, ABINonArgReg3);
1235 #endif
1236           masm.unboxBigInt(argv, src);
1237           masm.loadBigInt64(src, scratch64);
1238           GenPrintI64(DebugChannel::Function, masm, scratch64);
1239           masm.store64(scratch64, dst);
1240         } else {
1241           Register src = scratchG;
1242           Register64 target = iter->gpr64();
1243           masm.unboxBigInt(argv, src);
1244           masm.loadBigInt64(src, target);
1245           GenPrintI64(DebugChannel::Function, masm, target);
1246         }
1247         break;
1248       }
1249       case MIRType::Float32: {
1250         FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
1251         masm.unboxDouble(argv, ABINonArgDoubleReg);
1252         masm.convertDoubleToFloat32(ABINonArgDoubleReg, target);
1253         GenPrintF32(DebugChannel::Function, masm, target.asSingle());
1254         if (isStackArg) {
1255           masm.storeFloat32(target, Address(sp, iter->offsetFromArgBase()));
1256         }
1257         break;
1258       }
1259       case MIRType::Double: {
1260         FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
1261         masm.unboxDouble(argv, target);
1262         GenPrintF64(DebugChannel::Function, masm, target);
1263         if (isStackArg) {
1264           masm.storeDouble(target, Address(sp, iter->offsetFromArgBase()));
1265         }
1266         break;
1267       }
1268       case MIRType::RefOrNull: {
1269         Register target = isStackArg ? ScratchIonEntry : iter->gpr();
1270         masm.unboxObjectOrNull(argv, target);
1271         GenPrintPtr(DebugChannel::Function, masm, target);
1272         if (isStackArg) {
1273           masm.storePtr(target, Address(sp, iter->offsetFromArgBase()));
1274         }
1275         break;
1276       }
1277       default: {
1278         MOZ_CRASH("unexpected input argument when calling from jit");
1279       }
1280     }
1281   }
1282 
1283   GenPrintf(DebugChannel::Function, masm, "\n");
1284 
1285   // Setup wasm register state.
1286   masm.loadWasmPinnedRegsFromTls();
1287 
1288   masm.storePtr(WasmTlsReg,
1289                 Address(masm.getStackPointer(), WasmCalleeTlsOffsetBeforeCall));
1290 
1291   // Call into the real function. Note that, due to the throw stub, fp, tls
1292   // and pinned registers may be clobbered.
1293   masm.assertStackAlignment(WasmStackAlignment);
1294   CallFuncExport(masm, fe, funcPtr);
1295   masm.assertStackAlignment(WasmStackAlignment);
1296 
1297   // If fp is equal to the FailFP magic value (set by the throw stub), then
1298   // report the exception to the JIT caller by jumping into the exception
1299   // stub; otherwise the FP value is still set to the parent ion frame value.
1300   Label exception;
1301   masm.branchPtr(Assembler::Equal, FramePointer, Imm32(FailFP), &exception);
1302 
1303   // Pop arguments.
1304   masm.freeStack(frameSize);
1305 
1306   GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; returns ",
1307             fe.funcIndex());
1308 
1309   // Store the return value in the JSReturnOperand.
1310   const ValTypeVector& results = fe.funcType().results();
1311   if (results.length() == 0) {
1312     GenPrintf(DebugChannel::Function, masm, "void");
1313     masm.moveValue(UndefinedValue(), JSReturnOperand);
1314   } else {
1315     MOZ_ASSERT(results.length() == 1, "multi-value return to JS unimplemented");
1316     switch (results[0].kind()) {
1317       case ValType::I32:
1318         GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
1319         // No widening is required, as the value is boxed.
1320         masm.boxNonDouble(JSVAL_TYPE_INT32, ReturnReg, JSReturnOperand);
1321         break;
1322       case ValType::F32: {
1323         masm.canonicalizeFloat(ReturnFloat32Reg);
1324         masm.convertFloat32ToDouble(ReturnFloat32Reg, ReturnDoubleReg);
1325         GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
1326         ScratchDoubleScope fpscratch(masm);
1327         masm.boxDouble(ReturnDoubleReg, JSReturnOperand, fpscratch);
1328         break;
1329       }
1330       case ValType::F64: {
1331         masm.canonicalizeDouble(ReturnDoubleReg);
1332         GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
1333         ScratchDoubleScope fpscratch(masm);
1334         masm.boxDouble(ReturnDoubleReg, JSReturnOperand, fpscratch);
1335         break;
1336       }
1337       case ValType::I64: {
1338         Label fail, done;
1339         GenPrintI64(DebugChannel::Function, masm, ReturnReg64);
1340         GenerateBigIntInitialization(masm, 0, ReturnReg64, scratchG, &fe,
1341                                      &fail);
1342         masm.boxNonDouble(JSVAL_TYPE_BIGINT, scratchG, JSReturnOperand);
1343         masm.jump(&done);
1344         masm.bind(&fail);
1345         // Fixup the stack for the exception tail so that we can share it.
1346         masm.reserveStack(frameSize);
1347         masm.jump(&exception);
1348         masm.bind(&done);
1349         // Un-fixup the stack for the benefit of the assertion below.
1350         masm.setFramePushed(0);
1351         break;
1352       }
1353       case ValType::Rtt:
1354       case ValType::V128: {
1355         MOZ_CRASH("unexpected return type when calling from ion to wasm");
1356       }
1357       case ValType::Ref: {
1358         switch (results[0].refTypeKind()) {
1359           case RefType::Func:
1360           case RefType::Eq:
1361             // For FuncRef and EqRef use the AnyRef path for now, since that
1362             // will work.
1363           case RefType::Extern:
1364             // Per comment above, the call may have clobbered the Tls register,
1365             // so reload since unboxing will need it.
1366             GenerateJitEntryLoadTls(masm, /* frameSize */ 0);
1367             UnboxAnyrefIntoValueReg(masm, WasmTlsReg, ReturnReg,
1368                                     JSReturnOperand, WasmJitEntryReturnScratch);
1369             break;
1370           case RefType::TypeIndex:
1371             MOZ_CRASH("unexpected return type when calling from ion to wasm");
1372         }
1373         break;
1374       }
1375     }
1376   }
1377 
1378   GenPrintf(DebugChannel::Function, masm, "\n");
1379 
1380   MOZ_ASSERT(masm.framePushed() == 0);
1381 #ifdef JS_CODEGEN_ARM64
1382   AssertExpectedSP(masm);
1383   masm.loadPtr(Address(sp, 0), lr);
1384   masm.addToStackPtr(Imm32(8));
1385   // Copy SP into PSP to enforce return-point invariants (SP == PSP).
1386   // `addToStackPtr` won't sync them because SP is the active pointer here.
1387   // For the same reason, we can't use initPseudoStackPtr to do the sync, so
1388   // we have to do it "by hand".  Omitting this causes many tests to segfault.
1389   masm.moveStackPtrTo(PseudoStackPointer);
1390   masm.abiret();
1391 #else
1392   masm.ret();
1393 #endif
1394 
1395   // Generate an OOL call to the C++ conversion path.
1396   if (fe.funcType().args().length()) {
1397     masm.bind(&oolCall);
1398     masm.setFramePushed(frameSize);
1399 
1400     // Baseline and Ion call C++ runtime via BuiltinThunk with wasm abi, so to
1401     // unify the BuiltinThunk's interface we call it here with wasm abi.
1402     jit::WasmABIArgIter<MIRTypeVector> argsIter(coerceArgTypes);
1403 
1404     // argument 0: function export index.
1405     if (argsIter->kind() == ABIArg::GPR) {
1406       masm.movePtr(ImmWord(funcExportIndex), argsIter->gpr());
1407     } else {
1408       masm.storePtr(ImmWord(funcExportIndex),
1409                     Address(sp, argsIter->offsetFromArgBase()));
1410     }
1411     argsIter++;
1412 
1413     // argument 1: tlsData
1414     if (argsIter->kind() == ABIArg::GPR) {
1415       masm.movePtr(WasmTlsReg, argsIter->gpr());
1416     } else {
1417       masm.storePtr(WasmTlsReg, Address(sp, argsIter->offsetFromArgBase()));
1418     }
1419     argsIter++;
1420 
1421     // argument 2: effective address of start of argv
1422     Address argv(sp, masm.framePushed() + JitFrameLayout::offsetOfActualArg(0));
1423     if (argsIter->kind() == ABIArg::GPR) {
1424       masm.computeEffectiveAddress(argv, argsIter->gpr());
1425     } else {
1426       masm.computeEffectiveAddress(argv, ScratchIonEntry);
1427       masm.storePtr(ScratchIonEntry,
1428                     Address(sp, argsIter->offsetFromArgBase()));
1429     }
1430     argsIter++;
1431     MOZ_ASSERT(argsIter.done());
1432 
1433     masm.assertStackAlignment(ABIStackAlignment);
1434     CallSymbolicAddress(masm, !fe.hasEagerStubs(),
1435                         SymbolicAddress::CoerceInPlace_JitEntry);
1436     masm.assertStackAlignment(ABIStackAlignment);
1437 
1438     // No widening is required, as the return value is used as a bool.
1439     masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg,
1440                       &rejoinBeforeCall);
1441   }
1442 
1443   // Prepare to throw: reload WasmTlsReg from the frame.
1444   masm.bind(&exception);
1445   masm.setFramePushed(frameSize);
1446   GenerateJitEntryThrow(masm, frameSize);
1447 
1448   return FinishOffsets(masm, offsets);
1449 }
1450 
GenerateDirectCallFromJit(MacroAssembler & masm,const FuncExport & fe,const Instance & inst,const JitCallStackArgVector & stackArgs,bool profilingEnabled,Register scratch,uint32_t * callOffset)1451 void wasm::GenerateDirectCallFromJit(MacroAssembler& masm, const FuncExport& fe,
1452                                      const Instance& inst,
1453                                      const JitCallStackArgVector& stackArgs,
1454                                      bool profilingEnabled, Register scratch,
1455                                      uint32_t* callOffset) {
1456   MOZ_ASSERT(!IsCompilingWasm());
1457 
1458   size_t framePushedAtStart = masm.framePushed();
1459 
1460   if (profilingEnabled) {
1461     // FramePointer isn't volatile, manually preserve it because it will be
1462     // clobbered below.
1463     masm.Push(FramePointer);
1464   } else {
1465 #ifdef DEBUG
1466     // Ensure that the FramePointer is actually Ion-volatile. This might
1467     // assert when bug 1426134 lands.
1468     AllocatableRegisterSet set(RegisterSet::All());
1469     TakeJitRegisters(/* profiling */ false, &set);
1470     MOZ_ASSERT(set.has(FramePointer),
1471                "replace the whole if branch by the then body when this fails");
1472 #endif
1473   }
1474 
1475   // Note, if code here pushes a reference value into the frame for its own
1476   // purposes (and not just as an argument to the callee) then the frame must be
1477   // traced in TraceJitExitFrame, see the case there for DirectWasmJitCall.  The
1478   // callee will trace values that are pushed as arguments, however.
1479 
1480   // Push a special frame descriptor that indicates the frame size so we can
1481   // directly iterate from the current JIT frame without an extra call.
1482   *callOffset = masm.buildFakeExitFrame(scratch);
1483   masm.loadJSContext(scratch);
1484 
1485   masm.moveStackPtrTo(FramePointer);
1486   masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::DirectWasmJitCall);
1487   masm.orPtr(Imm32(ExitOrJitEntryFPTag), FramePointer);
1488 
1489   // Move stack arguments to their final locations.
1490   unsigned bytesNeeded = StackArgBytesForWasmABI(fe.funcType());
1491   bytesNeeded = StackDecrementForCall(WasmStackAlignment, masm.framePushed(),
1492                                       bytesNeeded);
1493   if (bytesNeeded) {
1494     masm.reserveStack(bytesNeeded);
1495   }
1496 
1497   GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; arguments ",
1498             fe.funcIndex());
1499 
1500   ArgTypeVector args(fe.funcType());
1501   for (WasmABIArgIter iter(args); !iter.done(); iter++) {
1502     MOZ_ASSERT_IF(iter->kind() == ABIArg::GPR, iter->gpr() != scratch);
1503     MOZ_ASSERT_IF(iter->kind() == ABIArg::GPR, iter->gpr() != FramePointer);
1504     if (iter->kind() != ABIArg::Stack) {
1505       switch (iter.mirType()) {
1506         case MIRType::Int32:
1507           GenPrintIsize(DebugChannel::Function, masm, iter->gpr());
1508           break;
1509         case MIRType::Int64:
1510           GenPrintI64(DebugChannel::Function, masm, iter->gpr64());
1511           break;
1512         case MIRType::Float32:
1513           GenPrintF32(DebugChannel::Function, masm, iter->fpu());
1514           break;
1515         case MIRType::Double:
1516           GenPrintF64(DebugChannel::Function, masm, iter->fpu());
1517           break;
1518         case MIRType::RefOrNull:
1519           GenPrintPtr(DebugChannel::Function, masm, iter->gpr());
1520           break;
1521         case MIRType::StackResults:
1522           MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
1523           GenPrintPtr(DebugChannel::Function, masm, iter->gpr());
1524           break;
1525         default:
1526           MOZ_CRASH("ion to wasm fast path can only handle i32/f32/f64");
1527       }
1528       continue;
1529     }
1530 
1531     Address dst(masm.getStackPointer(), iter->offsetFromArgBase());
1532 
1533     const JitCallStackArg& stackArg = stackArgs[iter.index()];
1534     switch (stackArg.tag()) {
1535       case JitCallStackArg::Tag::Imm32:
1536         GenPrintf(DebugChannel::Function, masm, "%d ", stackArg.imm32());
1537         masm.storePtr(ImmWord(stackArg.imm32()), dst);
1538         break;
1539       case JitCallStackArg::Tag::GPR:
1540         MOZ_ASSERT(stackArg.gpr() != scratch);
1541         MOZ_ASSERT(stackArg.gpr() != FramePointer);
1542         GenPrintIsize(DebugChannel::Function, masm, stackArg.gpr());
1543         masm.storePtr(stackArg.gpr(), dst);
1544         break;
1545       case JitCallStackArg::Tag::FPU:
1546         switch (iter.mirType()) {
1547           case MIRType::Double:
1548             GenPrintF64(DebugChannel::Function, masm, stackArg.fpu());
1549             masm.storeDouble(stackArg.fpu(), dst);
1550             break;
1551           case MIRType::Float32:
1552             GenPrintF32(DebugChannel::Function, masm, stackArg.fpu());
1553             masm.storeFloat32(stackArg.fpu(), dst);
1554             break;
1555           default:
1556             MOZ_CRASH(
1557                 "unexpected MIR type for a float register in wasm fast call");
1558         }
1559         break;
1560       case JitCallStackArg::Tag::Address: {
1561         // The address offsets were valid *before* we pushed our frame.
1562         Address src = stackArg.addr();
1563         src.offset += masm.framePushed() - framePushedAtStart;
1564         switch (iter.mirType()) {
1565           case MIRType::Double: {
1566             ScratchDoubleScope fpscratch(masm);
1567             GenPrintF64(DebugChannel::Function, masm, fpscratch);
1568             masm.loadDouble(src, fpscratch);
1569             masm.storeDouble(fpscratch, dst);
1570             break;
1571           }
1572           case MIRType::Float32: {
1573             ScratchFloat32Scope fpscratch(masm);
1574             masm.loadFloat32(src, fpscratch);
1575             GenPrintF32(DebugChannel::Function, masm, fpscratch);
1576             masm.storeFloat32(fpscratch, dst);
1577             break;
1578           }
1579           case MIRType::Int32: {
1580             masm.loadPtr(src, scratch);
1581             GenPrintIsize(DebugChannel::Function, masm, scratch);
1582             masm.storePtr(scratch, dst);
1583             break;
1584           }
1585           case MIRType::RefOrNull: {
1586             masm.loadPtr(src, scratch);
1587             GenPrintPtr(DebugChannel::Function, masm, scratch);
1588             masm.storePtr(scratch, dst);
1589             break;
1590           }
1591           case MIRType::StackResults: {
1592             MOZ_CRASH("multi-value in ion to wasm fast path unimplemented");
1593           }
1594           default: {
1595             MOZ_CRASH("unexpected MIR type for a stack slot in wasm fast call");
1596           }
1597         }
1598         break;
1599       }
1600       case JitCallStackArg::Tag::Undefined: {
1601         MOZ_CRASH("can't happen because of arg.kind() check");
1602       }
1603     }
1604   }
1605 
1606   GenPrintf(DebugChannel::Function, masm, "\n");
1607 
1608   // Load tls; from now on, WasmTlsReg is live.
1609   masm.movePtr(ImmPtr(inst.tlsData()), WasmTlsReg);
1610   masm.storePtr(WasmTlsReg,
1611                 Address(masm.getStackPointer(), WasmCalleeTlsOffsetBeforeCall));
1612   masm.loadWasmPinnedRegsFromTls();
1613 
1614   // Actual call.
1615   const CodeTier& codeTier = inst.code().codeTier(inst.code().bestTier());
1616   const MetadataTier& metadata = codeTier.metadata();
1617   const CodeRange& codeRange = metadata.codeRange(fe);
1618   void* callee = codeTier.segment().base() + codeRange.funcUncheckedCallEntry();
1619 
1620   masm.assertStackAlignment(WasmStackAlignment);
1621   MoveSPForJitABI(masm);
1622   masm.callJit(ImmPtr(callee));
1623 #ifdef JS_CODEGEN_ARM64
1624   // WASM does not always keep PSP in sync with SP.  So reinitialize it as it
1625   // might be clobbered either by WASM or by any C++ calls within.
1626   masm.initPseudoStackPtr();
1627 #endif
1628   masm.assertStackAlignment(WasmStackAlignment);
1629 
1630   masm.branchPtr(Assembler::Equal, FramePointer, Imm32(wasm::FailFP),
1631                  masm.exceptionLabel());
1632 
1633   // Store the return value in the appropriate place.
1634   GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; returns ",
1635             fe.funcIndex());
1636   const ValTypeVector& results = fe.funcType().results();
1637   if (results.length() == 0) {
1638     masm.moveValue(UndefinedValue(), JSReturnOperand);
1639     GenPrintf(DebugChannel::Function, masm, "void");
1640   } else {
1641     MOZ_ASSERT(results.length() == 1, "multi-value return to JS unimplemented");
1642     switch (results[0].kind()) {
1643       case wasm::ValType::I32:
1644         // The return value is in ReturnReg, which is what Ion expects.
1645         GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
1646 #ifdef JS_64BIT
1647         masm.widenInt32(ReturnReg);
1648 #endif
1649         break;
1650       case wasm::ValType::I64:
1651         // The return value is in ReturnReg64, which is what Ion expects.
1652         GenPrintI64(DebugChannel::Function, masm, ReturnReg64);
1653         break;
1654       case wasm::ValType::F32:
1655         masm.canonicalizeFloat(ReturnFloat32Reg);
1656         GenPrintF32(DebugChannel::Function, masm, ReturnFloat32Reg);
1657         break;
1658       case wasm::ValType::F64:
1659         masm.canonicalizeDouble(ReturnDoubleReg);
1660         GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
1661         break;
1662       case wasm::ValType::Ref:
1663         switch (results[0].refTypeKind()) {
1664           case wasm::RefType::Func:
1665           case wasm::RefType::Eq:
1666             // For FuncRef and EqRef, use the AnyRef path for now, since that
1667             // will work.
1668           case wasm::RefType::Extern:
1669             // The call to wasm above preserves the WasmTlsReg, we don't need to
1670             // reload it here.
1671             UnboxAnyrefIntoValueReg(masm, WasmTlsReg, ReturnReg,
1672                                     JSReturnOperand, WasmJitEntryReturnScratch);
1673             break;
1674           case wasm::RefType::TypeIndex:
1675             MOZ_CRASH("unexpected return type when calling from ion to wasm");
1676         }
1677         break;
1678       case wasm::ValType::Rtt:
1679       case wasm::ValType::V128:
1680         MOZ_CRASH("unexpected return type when calling from ion to wasm");
1681     }
1682   }
1683 
1684   GenPrintf(DebugChannel::Function, masm, "\n");
1685 
1686   // Free args + frame descriptor.
1687   masm.leaveExitFrame(bytesNeeded + ExitFrameLayout::Size());
1688 
1689   // If we pushed it, free FramePointer.
1690   if (profilingEnabled) {
1691     masm.Pop(FramePointer);
1692   }
1693 
1694   MOZ_ASSERT(framePushedAtStart == masm.framePushed());
1695 }
1696 
StackCopy(MacroAssembler & masm,MIRType type,Register scratch,Address src,Address dst)1697 static void StackCopy(MacroAssembler& masm, MIRType type, Register scratch,
1698                       Address src, Address dst) {
1699   if (type == MIRType::Int32) {
1700     masm.load32(src, scratch);
1701     GenPrintIsize(DebugChannel::Import, masm, scratch);
1702     masm.store32(scratch, dst);
1703   } else if (type == MIRType::Int64) {
1704 #if JS_BITS_PER_WORD == 32
1705     MOZ_RELEASE_ASSERT(src.base != scratch && dst.base != scratch);
1706     GenPrintf(DebugChannel::Import, masm, "i64(");
1707     masm.load32(LowWord(src), scratch);
1708     GenPrintIsize(DebugChannel::Import, masm, scratch);
1709     masm.store32(scratch, LowWord(dst));
1710     masm.load32(HighWord(src), scratch);
1711     GenPrintIsize(DebugChannel::Import, masm, scratch);
1712     masm.store32(scratch, HighWord(dst));
1713     GenPrintf(DebugChannel::Import, masm, ") ");
1714 #else
1715     Register64 scratch64(scratch);
1716     masm.load64(src, scratch64);
1717     GenPrintIsize(DebugChannel::Import, masm, scratch);
1718     masm.store64(scratch64, dst);
1719 #endif
1720   } else if (type == MIRType::RefOrNull || type == MIRType::Pointer ||
1721              type == MIRType::StackResults) {
1722     masm.loadPtr(src, scratch);
1723     GenPrintPtr(DebugChannel::Import, masm, scratch);
1724     masm.storePtr(scratch, dst);
1725   } else if (type == MIRType::Float32) {
1726     ScratchFloat32Scope fpscratch(masm);
1727     masm.loadFloat32(src, fpscratch);
1728     GenPrintF32(DebugChannel::Import, masm, fpscratch);
1729     masm.storeFloat32(fpscratch, dst);
1730   } else if (type == MIRType::Double) {
1731     ScratchDoubleScope fpscratch(masm);
1732     masm.loadDouble(src, fpscratch);
1733     GenPrintF64(DebugChannel::Import, masm, fpscratch);
1734     masm.storeDouble(fpscratch, dst);
1735 #ifdef ENABLE_WASM_SIMD
1736   } else if (type == MIRType::Simd128) {
1737     ScratchSimd128Scope fpscratch(masm);
1738     masm.loadUnalignedSimd128(src, fpscratch);
1739     GenPrintV128(DebugChannel::Import, masm, fpscratch);
1740     masm.storeUnalignedSimd128(fpscratch, dst);
1741 #endif
1742   } else {
1743     MOZ_CRASH("StackCopy: unexpected type");
1744   }
1745 }
1746 
1747 using ToValue = bool;
1748 
1749 // Note, when toValue is true then this may destroy the values in incoming
1750 // argument registers as a result of Spectre mitigation.
FillArgumentArrayForExit(MacroAssembler & masm,Register tls,unsigned funcImportIndex,const FuncType & funcType,unsigned argOffset,unsigned offsetFromFPToCallerStackArgs,Register scratch,Register scratch2,Register scratch3,ToValue toValue,Label * throwLabel)1751 static void FillArgumentArrayForExit(
1752     MacroAssembler& masm, Register tls, unsigned funcImportIndex,
1753     const FuncType& funcType, unsigned argOffset,
1754     unsigned offsetFromFPToCallerStackArgs, Register scratch, Register scratch2,
1755     Register scratch3, ToValue toValue, Label* throwLabel) {
1756   MOZ_ASSERT(scratch != scratch2);
1757   MOZ_ASSERT(scratch != scratch3);
1758   MOZ_ASSERT(scratch2 != scratch3);
1759 
1760   // This loop does not root the values that are being constructed in
1761   // for the arguments. Allocations that are generated by code either
1762   // in the loop or called from it should be NoGC allocations.
1763   GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; arguments ",
1764             funcImportIndex);
1765 
1766   ArgTypeVector args(funcType);
1767   for (ABIArgIter i(args); !i.done(); i++) {
1768     Address dst(masm.getStackPointer(), argOffset + i.index() * sizeof(Value));
1769 
1770     MIRType type = i.mirType();
1771     MOZ_ASSERT(args.isSyntheticStackResultPointerArg(i.index()) ==
1772                (type == MIRType::StackResults));
1773     switch (i->kind()) {
1774       case ABIArg::GPR:
1775         if (type == MIRType::Int32) {
1776           GenPrintIsize(DebugChannel::Import, masm, i->gpr());
1777           if (toValue) {
1778             masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dst);
1779           } else {
1780             masm.store32(i->gpr(), dst);
1781           }
1782         } else if (type == MIRType::Int64) {
1783           GenPrintI64(DebugChannel::Import, masm, i->gpr64());
1784 
1785           if (toValue) {
1786             // FuncType::canHaveJitExit should prevent this.  Also see comments
1787             // at GenerateBigIntInitialization.
1788             MOZ_CRASH("Should not happen");
1789           } else {
1790             masm.store64(i->gpr64(), dst);
1791           }
1792         } else if (type == MIRType::RefOrNull) {
1793           if (toValue) {
1794             // This works also for FuncRef because it is distinguishable from
1795             // a boxed AnyRef.
1796             masm.movePtr(i->gpr(), scratch2);
1797             UnboxAnyrefIntoValue(masm, tls, scratch2, dst, scratch);
1798           } else {
1799             GenPrintPtr(DebugChannel::Import, masm, i->gpr());
1800             masm.storePtr(i->gpr(), dst);
1801           }
1802         } else if (type == MIRType::StackResults) {
1803           MOZ_ASSERT(!toValue, "Multi-result exit to JIT unimplemented");
1804           GenPrintPtr(DebugChannel::Import, masm, i->gpr());
1805           masm.storePtr(i->gpr(), dst);
1806         } else {
1807           MOZ_CRASH("FillArgumentArrayForExit, ABIArg::GPR: unexpected type");
1808         }
1809         break;
1810 #ifdef JS_CODEGEN_REGISTER_PAIR
1811       case ABIArg::GPR_PAIR:
1812         if (type == MIRType::Int64) {
1813           GenPrintI64(DebugChannel::Import, masm, i->gpr64());
1814 
1815           if (toValue) {
1816             // FuncType::canHaveJitExit should prevent this.  Also see comments
1817             // at GenerateBigIntInitialization.
1818             MOZ_CRASH("Should not happen");
1819           } else {
1820             masm.store64(i->gpr64(), dst);
1821           }
1822         } else {
1823           MOZ_CRASH("wasm uses hardfp for function calls.");
1824         }
1825         break;
1826 #endif
1827       case ABIArg::FPU: {
1828         FloatRegister srcReg = i->fpu();
1829         if (type == MIRType::Double) {
1830           if (toValue) {
1831             // Preserve the NaN pattern in the input.
1832             ScratchDoubleScope fpscratch(masm);
1833             masm.moveDouble(srcReg, fpscratch);
1834             masm.canonicalizeDouble(fpscratch);
1835             GenPrintF64(DebugChannel::Import, masm, fpscratch);
1836             masm.boxDouble(fpscratch, dst);
1837           } else {
1838             GenPrintF64(DebugChannel::Import, masm, srcReg);
1839             masm.storeDouble(srcReg, dst);
1840           }
1841         } else if (type == MIRType::Float32) {
1842           if (toValue) {
1843             // JS::Values can't store Float32, so convert to a Double.
1844             ScratchDoubleScope fpscratch(masm);
1845             masm.convertFloat32ToDouble(srcReg, fpscratch);
1846             masm.canonicalizeDouble(fpscratch);
1847             GenPrintF64(DebugChannel::Import, masm, fpscratch);
1848             masm.boxDouble(fpscratch, dst);
1849           } else {
1850             // Preserve the NaN pattern in the input.
1851             GenPrintF32(DebugChannel::Import, masm, srcReg);
1852             masm.storeFloat32(srcReg, dst);
1853           }
1854         } else if (type == MIRType::Simd128) {
1855           // The value should never escape; the call will be stopped later as
1856           // the import is being called.  But we should generate something sane
1857           // here for the boxed case since a debugger or the stack walker may
1858           // observe something.
1859           ScratchDoubleScope dscratch(masm);
1860           masm.loadConstantDouble(0, dscratch);
1861           GenPrintF64(DebugChannel::Import, masm, dscratch);
1862           if (toValue) {
1863             masm.boxDouble(dscratch, dst);
1864           } else {
1865             masm.storeDouble(dscratch, dst);
1866           }
1867         } else {
1868           MOZ_CRASH("Unknown MIRType in wasm exit stub");
1869         }
1870         break;
1871       }
1872       case ABIArg::Stack: {
1873         Address src(FramePointer,
1874                     offsetFromFPToCallerStackArgs + i->offsetFromArgBase());
1875         if (toValue) {
1876           if (type == MIRType::Int32) {
1877             masm.load32(src, scratch);
1878             GenPrintIsize(DebugChannel::Import, masm, scratch);
1879             masm.storeValue(JSVAL_TYPE_INT32, scratch, dst);
1880           } else if (type == MIRType::Int64) {
1881             // FuncType::canHaveJitExit should prevent this.  Also see comments
1882             // at GenerateBigIntInitialization.
1883             MOZ_CRASH("Should not happen");
1884           } else if (type == MIRType::RefOrNull) {
1885             // This works also for FuncRef because it is distinguishable from a
1886             // boxed AnyRef.
1887             masm.loadPtr(src, scratch);
1888             UnboxAnyrefIntoValue(masm, tls, scratch, dst, scratch2);
1889           } else if (IsFloatingPointType(type)) {
1890             ScratchDoubleScope dscratch(masm);
1891             FloatRegister fscratch = dscratch.asSingle();
1892             if (type == MIRType::Float32) {
1893               masm.loadFloat32(src, fscratch);
1894               masm.convertFloat32ToDouble(fscratch, dscratch);
1895             } else {
1896               masm.loadDouble(src, dscratch);
1897             }
1898             masm.canonicalizeDouble(dscratch);
1899             GenPrintF64(DebugChannel::Import, masm, dscratch);
1900             masm.boxDouble(dscratch, dst);
1901           } else if (type == MIRType::Simd128) {
1902             // The value should never escape; the call will be stopped later as
1903             // the import is being called.  But we should generate something
1904             // sane here for the boxed case since a debugger or the stack walker
1905             // may observe something.
1906             ScratchDoubleScope dscratch(masm);
1907             masm.loadConstantDouble(0, dscratch);
1908             GenPrintF64(DebugChannel::Import, masm, dscratch);
1909             masm.boxDouble(dscratch, dst);
1910           } else {
1911             MOZ_CRASH(
1912                 "FillArgumentArrayForExit, ABIArg::Stack: unexpected type");
1913           }
1914         } else {
1915           if (type == MIRType::Simd128) {
1916             // As above.  StackCopy does not know this trick.
1917             ScratchDoubleScope dscratch(masm);
1918             masm.loadConstantDouble(0, dscratch);
1919             GenPrintF64(DebugChannel::Import, masm, dscratch);
1920             masm.storeDouble(dscratch, dst);
1921           } else {
1922             StackCopy(masm, type, scratch, src, dst);
1923           }
1924         }
1925         break;
1926       }
1927       case ABIArg::Uninitialized:
1928         MOZ_CRASH("Uninitialized ABIArg kind");
1929     }
1930   }
1931   GenPrintf(DebugChannel::Import, masm, "\n");
1932 }
1933 
1934 // Generate a wrapper function with the standard intra-wasm call ABI which
1935 // simply calls an import. This wrapper function allows any import to be treated
1936 // like a normal wasm function for the purposes of exports and table calls. In
1937 // particular, the wrapper function provides:
1938 //  - a table entry, so JS imports can be put into tables
1939 //  - normal entries, so that, if the import is re-exported, an entry stub can
1940 //    be generated and called without any special cases
GenerateImportFunction(jit::MacroAssembler & masm,const FuncImport & fi,TypeIdDesc funcTypeId,FuncOffsets * offsets)1941 static bool GenerateImportFunction(jit::MacroAssembler& masm,
1942                                    const FuncImport& fi, TypeIdDesc funcTypeId,
1943                                    FuncOffsets* offsets) {
1944   AutoCreatedBy acb(masm, "wasm::GenerateImportFunction");
1945 
1946   AssertExpectedSP(masm);
1947 
1948   GenerateFunctionPrologue(masm, funcTypeId, Nothing(), offsets);
1949 
1950   MOZ_ASSERT(masm.framePushed() == 0);
1951   const unsigned sizeOfTlsSlot = sizeof(void*);
1952   unsigned framePushed = StackDecrementForCall(
1953       WasmStackAlignment,
1954       sizeof(Frame),  // pushed by prologue
1955       StackArgBytesForWasmABI(fi.funcType()) + sizeOfTlsSlot);
1956   masm.wasmReserveStackChecked(framePushed, BytecodeOffset(0));
1957   MOZ_ASSERT(masm.framePushed() == framePushed);
1958 
1959   masm.storePtr(WasmTlsReg,
1960                 Address(masm.getStackPointer(), framePushed - sizeOfTlsSlot));
1961 
1962   // The argument register state is already setup by our caller. We just need
1963   // to be sure not to clobber it before the call.
1964   Register scratch = ABINonArgReg0;
1965 
1966   // Copy our frame's stack arguments to the callee frame's stack argument.
1967   unsigned offsetFromFPToCallerStackArgs = sizeof(Frame);
1968   ArgTypeVector args(fi.funcType());
1969   for (WasmABIArgIter i(args); !i.done(); i++) {
1970     if (i->kind() != ABIArg::Stack) {
1971       continue;
1972     }
1973 
1974     Address src(FramePointer,
1975                 offsetFromFPToCallerStackArgs + i->offsetFromArgBase());
1976     Address dst(masm.getStackPointer(), i->offsetFromArgBase());
1977     GenPrintf(DebugChannel::Import, masm,
1978               "calling exotic import function with arguments: ");
1979     StackCopy(masm, i.mirType(), scratch, src, dst);
1980     GenPrintf(DebugChannel::Import, masm, "\n");
1981   }
1982 
1983   // Call the import exit stub.
1984   CallSiteDesc desc(CallSiteDesc::Import);
1985   MoveSPForJitABI(masm);
1986   masm.wasmCallImport(desc, CalleeDesc::import(fi.tlsDataOffset()));
1987 
1988   // Restore the TLS register and pinned regs, per wasm function ABI.
1989   masm.loadPtr(Address(masm.getStackPointer(), framePushed - sizeOfTlsSlot),
1990                WasmTlsReg);
1991   masm.loadWasmPinnedRegsFromTls();
1992 
1993   // Restore cx->realm.
1994   masm.switchToWasmTlsRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
1995 
1996   GenerateFunctionEpilogue(masm, framePushed, offsets);
1997   return FinishOffsets(masm, offsets);
1998 }
1999 
2000 static const unsigned STUBS_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
2001 
GenerateImportFunctions(const ModuleEnvironment & env,const FuncImportVector & imports,CompiledCode * code)2002 bool wasm::GenerateImportFunctions(const ModuleEnvironment& env,
2003                                    const FuncImportVector& imports,
2004                                    CompiledCode* code) {
2005   LifoAlloc lifo(STUBS_LIFO_DEFAULT_CHUNK_SIZE);
2006   TempAllocator alloc(&lifo);
2007   WasmMacroAssembler masm(alloc, env);
2008 
2009   for (uint32_t funcIndex = 0; funcIndex < imports.length(); funcIndex++) {
2010     const FuncImport& fi = imports[funcIndex];
2011 
2012     FuncOffsets offsets;
2013     if (!GenerateImportFunction(masm, fi, *env.funcs[funcIndex].typeId,
2014                                 &offsets)) {
2015       return false;
2016     }
2017     if (!code->codeRanges.emplaceBack(funcIndex, /* bytecodeOffset = */ 0,
2018                                       offsets)) {
2019       return false;
2020     }
2021   }
2022 
2023   masm.finish();
2024   if (masm.oom()) {
2025     return false;
2026   }
2027 
2028   return code->swap(masm);
2029 }
2030 
2031 // Generate a stub that is called via the internal ABI derived from the
2032 // signature of the import and calls into an appropriate callImport C++
2033 // function, having boxed all the ABI arguments into a homogeneous Value array.
GenerateImportInterpExit(MacroAssembler & masm,const FuncImport & fi,uint32_t funcImportIndex,Label * throwLabel,CallableOffsets * offsets)2034 static bool GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi,
2035                                      uint32_t funcImportIndex,
2036                                      Label* throwLabel,
2037                                      CallableOffsets* offsets) {
2038   AutoCreatedBy acb(masm, "GenerateImportInterpExit");
2039 
2040   AssertExpectedSP(masm);
2041   masm.setFramePushed(0);
2042 
2043   // Argument types for Instance::callImport_*:
2044   static const MIRType typeArray[] = {MIRType::Pointer,   // Instance*
2045                                       MIRType::Pointer,   // funcImportIndex
2046                                       MIRType::Int32,     // argc
2047                                       MIRType::Pointer};  // argv
2048   MIRTypeVector invokeArgTypes;
2049   MOZ_ALWAYS_TRUE(invokeArgTypes.append(typeArray, std::size(typeArray)));
2050 
2051   // At the point of the call, the stack layout shall be (sp grows to the left):
2052   //  | stack args | padding | argv[] | padding | retaddr | caller stack args |
2053   // The padding between stack args and argv ensures that argv is aligned. The
2054   // padding between argv and retaddr ensures that sp is aligned.
2055   unsigned argOffset =
2056       AlignBytes(StackArgBytesForNativeABI(invokeArgTypes), sizeof(double));
2057   // The abiArgCount includes a stack result pointer argument if needed.
2058   unsigned abiArgCount = ArgTypeVector(fi.funcType()).lengthWithStackResults();
2059   unsigned argBytes = std::max<size_t>(1, abiArgCount) * sizeof(Value);
2060   unsigned framePushed =
2061       StackDecrementForCall(ABIStackAlignment,
2062                             sizeof(Frame),  // pushed by prologue
2063                             argOffset + argBytes);
2064 
2065   GenerateExitPrologue(masm, framePushed, ExitReason::Fixed::ImportInterp,
2066                        offsets);
2067 
2068   // Fill the argument array.
2069   //
2070   // sizeof(FrameWithTls), not FrameWithTls::sizeOf(), is confusing but correct.
2071   // The only user of this value is FillArgumentArrayForExit, and it
2072   // incorporates the ShadowStackArea by way of its use of the ABIArgIter.
2073   unsigned offsetFromFPToCallerStackArgs = sizeof(FrameWithTls);
2074   Register scratch = ABINonArgReturnReg0;
2075   Register scratch2 = ABINonArgReturnReg1;
2076   // The scratch3 reg does not need to be non-volatile, but has to be
2077   // distinct from scratch & scratch2.
2078   Register scratch3 = ABINonVolatileReg;
2079   FillArgumentArrayForExit(masm, WasmTlsReg, funcImportIndex, fi.funcType(),
2080                            argOffset, offsetFromFPToCallerStackArgs, scratch,
2081                            scratch2, scratch3, ToValue(false), throwLabel);
2082 
2083   // Prepare the arguments for the call to Instance::callImport_*.
2084   ABIArgMIRTypeIter i(invokeArgTypes);
2085 
2086   // argument 0: Instance*
2087   Address instancePtr(WasmTlsReg, offsetof(TlsData, instance));
2088   if (i->kind() == ABIArg::GPR) {
2089     masm.loadPtr(instancePtr, i->gpr());
2090   } else {
2091     masm.loadPtr(instancePtr, scratch);
2092     masm.storePtr(scratch,
2093                   Address(masm.getStackPointer(), i->offsetFromArgBase()));
2094   }
2095   i++;
2096 
2097   // argument 1: funcImportIndex
2098   if (i->kind() == ABIArg::GPR) {
2099     masm.mov(ImmWord(funcImportIndex), i->gpr());
2100   } else {
2101     masm.store32(Imm32(funcImportIndex),
2102                  Address(masm.getStackPointer(), i->offsetFromArgBase()));
2103   }
2104   i++;
2105 
2106   // argument 2: argc
2107   unsigned argc = abiArgCount;
2108   if (i->kind() == ABIArg::GPR) {
2109     masm.mov(ImmWord(argc), i->gpr());
2110   } else {
2111     masm.store32(Imm32(argc),
2112                  Address(masm.getStackPointer(), i->offsetFromArgBase()));
2113   }
2114   i++;
2115 
2116   // argument 3: argv
2117   Address argv(masm.getStackPointer(), argOffset);
2118   if (i->kind() == ABIArg::GPR) {
2119     masm.computeEffectiveAddress(argv, i->gpr());
2120   } else {
2121     masm.computeEffectiveAddress(argv, scratch);
2122     masm.storePtr(scratch,
2123                   Address(masm.getStackPointer(), i->offsetFromArgBase()));
2124   }
2125   i++;
2126   MOZ_ASSERT(i.done());
2127 
2128   // Make the call, test whether it succeeded, and extract the return value.
2129   AssertStackAlignment(masm, ABIStackAlignment);
2130   masm.call(SymbolicAddress::CallImport_General);
2131   masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
2132 
2133   ResultType resultType = ResultType::Vector(fi.funcType().results());
2134   ValType registerResultType;
2135   for (ABIResultIter iter(resultType); !iter.done(); iter.next()) {
2136     if (iter.cur().inRegister()) {
2137       MOZ_ASSERT(!registerResultType.isValid());
2138       registerResultType = iter.cur().type();
2139     }
2140   }
2141   if (!registerResultType.isValid()) {
2142     GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
2143               funcImportIndex);
2144     GenPrintf(DebugChannel::Import, masm, "void");
2145   } else {
2146     switch (registerResultType.kind()) {
2147       case ValType::I32:
2148         masm.load32(argv, ReturnReg);
2149         // No widening is required, as we know the value comes from an i32 load.
2150         GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
2151                   funcImportIndex);
2152         GenPrintIsize(DebugChannel::Import, masm, ReturnReg);
2153         break;
2154       case ValType::I64:
2155         masm.load64(argv, ReturnReg64);
2156         GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
2157                   funcImportIndex);
2158         GenPrintI64(DebugChannel::Import, masm, ReturnReg64);
2159         break;
2160       case ValType::Rtt:
2161       case ValType::V128:
2162         // Note, CallImport_Rtt/V128 currently always throws, so we should never
2163         // reach this point.
2164         masm.breakpoint();
2165         break;
2166       case ValType::F32:
2167         masm.loadFloat32(argv, ReturnFloat32Reg);
2168         GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
2169                   funcImportIndex);
2170         GenPrintF32(DebugChannel::Import, masm, ReturnFloat32Reg);
2171         break;
2172       case ValType::F64:
2173         masm.loadDouble(argv, ReturnDoubleReg);
2174         GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
2175                   funcImportIndex);
2176         GenPrintF64(DebugChannel::Import, masm, ReturnDoubleReg);
2177         break;
2178       case ValType::Ref:
2179         switch (registerResultType.refTypeKind()) {
2180           case RefType::Func:
2181             masm.loadPtr(argv, ReturnReg);
2182             GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
2183                       funcImportIndex);
2184             GenPrintPtr(DebugChannel::Import, masm, ReturnReg);
2185             break;
2186           case RefType::Extern:
2187           case RefType::Eq:
2188             masm.loadPtr(argv, ReturnReg);
2189             GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
2190                       funcImportIndex);
2191             GenPrintPtr(DebugChannel::Import, masm, ReturnReg);
2192             break;
2193           case RefType::TypeIndex:
2194             MOZ_CRASH("No Ref support here yet");
2195         }
2196         break;
2197     }
2198   }
2199 
2200   GenPrintf(DebugChannel::Import, masm, "\n");
2201 
2202   // The native ABI preserves the TLS, heap and global registers since they
2203   // are non-volatile.
2204   MOZ_ASSERT(NonVolatileRegs.has(WasmTlsReg));
2205 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) ||      \
2206     defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS64) || \
2207     defined(JS_CODEGEN_LOONG64)
2208   MOZ_ASSERT(NonVolatileRegs.has(HeapReg));
2209 #endif
2210 
2211   GenerateExitEpilogue(masm, framePushed, ExitReason::Fixed::ImportInterp,
2212                        offsets);
2213 
2214   return FinishOffsets(masm, offsets);
2215 }
2216 
2217 // Generate a stub that is called via the internal ABI derived from the
2218 // signature of the import and calls into a compatible JIT function,
2219 // having boxed all the ABI arguments into the JIT stack frame layout.
GenerateImportJitExit(MacroAssembler & masm,const FuncImport & fi,unsigned funcImportIndex,Label * throwLabel,JitExitOffsets * offsets)2220 static bool GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi,
2221                                   unsigned funcImportIndex, Label* throwLabel,
2222                                   JitExitOffsets* offsets) {
2223   AutoCreatedBy acb(masm, "GenerateImportJitExit");
2224 
2225   AssertExpectedSP(masm);
2226   masm.setFramePushed(0);
2227 
2228   // JIT calls use the following stack layout (sp grows to the left):
2229   //   | WasmToJSJitFrameLayout | this | arg1..N | saved Tls |
2230   // Unlike most ABIs, the JIT ABI requires that sp be JitStackAlignment-
2231   // aligned *after* pushing the return address.
2232   static_assert(WasmStackAlignment >= JitStackAlignment, "subsumes");
2233   const unsigned sizeOfTlsSlot = sizeof(void*);
2234   const unsigned sizeOfRetAddr = sizeof(void*);
2235   const unsigned sizeOfPreFrame =
2236       WasmToJSJitFrameLayout::Size() - sizeOfRetAddr;
2237   const unsigned sizeOfThisAndArgs =
2238       (1 + fi.funcType().args().length()) * sizeof(Value);
2239   const unsigned totalJitFrameBytes =
2240       sizeOfRetAddr + sizeOfPreFrame + sizeOfThisAndArgs + sizeOfTlsSlot;
2241   const unsigned jitFramePushed =
2242       StackDecrementForCall(JitStackAlignment,
2243                             sizeof(Frame),  // pushed by prologue
2244                             totalJitFrameBytes) -
2245       sizeOfRetAddr;
2246   const unsigned sizeOfThisAndArgsAndPadding = jitFramePushed - sizeOfPreFrame;
2247 
2248   // On ARM64 we must align the SP to a 16-byte boundary.
2249 #ifdef JS_CODEGEN_ARM64
2250   const unsigned frameAlignExtra = sizeof(void*);
2251 #else
2252   const unsigned frameAlignExtra = 0;
2253 #endif
2254 
2255   GenerateJitExitPrologue(masm, jitFramePushed + frameAlignExtra, offsets);
2256 
2257   // 1. Descriptor.
2258   size_t argOffset = frameAlignExtra;
2259   uint32_t descriptor =
2260       MakeFrameDescriptor(sizeOfThisAndArgsAndPadding, FrameType::WasmToJSJit,
2261                           WasmToJSJitFrameLayout::Size());
2262   masm.storePtr(ImmWord(uintptr_t(descriptor)),
2263                 Address(masm.getStackPointer(), argOffset));
2264   argOffset += sizeof(size_t);
2265 
2266   // 2. Callee, part 1 -- need the callee register for argument filling, so
2267   // record offset here and set up callee later.
2268   size_t calleeArgOffset = argOffset;
2269   argOffset += sizeof(size_t);
2270 
2271   // 3. Argc.
2272   unsigned argc = fi.funcType().args().length();
2273   masm.storePtr(ImmWord(uintptr_t(argc)),
2274                 Address(masm.getStackPointer(), argOffset));
2275   argOffset += sizeof(size_t);
2276   MOZ_ASSERT(argOffset == sizeOfPreFrame + frameAlignExtra);
2277 
2278   // 4. |this| value.
2279   masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), argOffset));
2280   argOffset += sizeof(Value);
2281 
2282   // 5. Fill the arguments.
2283   //
2284   // sizeof(FrameWithTls), not FrameWithTls::sizeOf(), is confusing but correct.
2285   // The only user of this value is FillArgumentArrayForExit, and it
2286   // incorporates the ShadowStackArea by way of its use of the ABIArgIter.
2287   const uint32_t offsetFromFPToCallerStackArgs = sizeof(FrameWithTls);
2288   Register scratch = ABINonArgReturnReg1;   // Repeatedly clobbered
2289   Register scratch2 = ABINonArgReturnReg0;  // Reused as callee below
2290   // The scratch3 reg does not need to be non-volatile, but has to be
2291   // distinct from scratch & scratch2.
2292   Register scratch3 = ABINonVolatileReg;
2293   FillArgumentArrayForExit(masm, WasmTlsReg, funcImportIndex, fi.funcType(),
2294                            argOffset, offsetFromFPToCallerStackArgs, scratch,
2295                            scratch2, scratch3, ToValue(true), throwLabel);
2296   argOffset += fi.funcType().args().length() * sizeof(Value);
2297   MOZ_ASSERT(argOffset == sizeOfThisAndArgs + sizeOfPreFrame + frameAlignExtra);
2298 
2299   // Preserve Tls because the JIT callee clobbers it.
2300   const size_t savedTlsOffset = argOffset;
2301   masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), savedTlsOffset));
2302 
2303   // 2. Callee, part 2 -- now that the register is free, set up the callee.
2304   Register callee = ABINonArgReturnReg0;  // Live until call
2305 
2306   // 2.1. Get JSFunction callee.
2307   masm.loadWasmGlobalPtr(fi.tlsDataOffset() + offsetof(FuncImportTls, fun),
2308                          callee);
2309 
2310   // 2.2. Save callee.
2311   masm.storePtr(callee, Address(masm.getStackPointer(), calleeArgOffset));
2312 
2313   // 6. Check if we need to rectify arguments.
2314   masm.load32(Address(callee, JSFunction::offsetOfFlagsAndArgCount()), scratch);
2315   masm.rshift32(Imm32(JSFunction::ArgCountShift), scratch);
2316 
2317   Label rectify;
2318   masm.branch32(Assembler::Above, scratch, Imm32(fi.funcType().args().length()),
2319                 &rectify);
2320 
2321   // 7. If we haven't rectified arguments, load callee executable entry point.
2322 
2323   masm.loadJitCodeRaw(callee, callee);
2324 
2325   Label rejoinBeforeCall;
2326   masm.bind(&rejoinBeforeCall);
2327 
2328   AssertStackAlignment(masm, JitStackAlignment,
2329                        sizeOfRetAddr + frameAlignExtra);
2330 #ifdef JS_CODEGEN_ARM64
2331   AssertExpectedSP(masm);
2332   // Conform to JIT ABI.  Note this doesn't update PSP since SP is the active
2333   // pointer.
2334   masm.addToStackPtr(Imm32(8));
2335   // Manually resync PSP.  Omitting this causes eg tests/wasm/import-export.js
2336   // to segfault.
2337   masm.moveStackPtrTo(PseudoStackPointer);
2338 #endif
2339   masm.callJitNoProfiler(callee);
2340 #ifdef JS_CODEGEN_ARM64
2341   // Conform to platform conventions - align the SP.
2342   masm.subFromStackPtr(Imm32(8));
2343 #endif
2344 
2345   // Note that there might be a GC thing in the JSReturnOperand now.
2346   // In all the code paths from here:
2347   // - either the value is unboxed because it was a primitive and we don't
2348   //   need to worry about rooting anymore.
2349   // - or the value needs to be rooted, but nothing can cause a GC between
2350   //   here and CoerceInPlace, which roots before coercing to a primitive.
2351 
2352   // The JIT callee clobbers all registers, including WasmTlsReg and
2353   // FramePointer, so restore those here. During this sequence of
2354   // instructions, FP can't be trusted by the profiling frame iterator.
2355   offsets->untrustedFPStart = masm.currentOffset();
2356   AssertStackAlignment(masm, JitStackAlignment,
2357                        sizeOfRetAddr + frameAlignExtra);
2358 
2359   masm.loadPtr(Address(masm.getStackPointer(), savedTlsOffset), WasmTlsReg);
2360   masm.moveStackPtrTo(FramePointer);
2361   masm.addPtr(Imm32(masm.framePushed()), FramePointer);
2362   offsets->untrustedFPEnd = masm.currentOffset();
2363 
2364   // As explained above, the frame was aligned for the JIT ABI such that
2365   //   (sp + sizeof(void*)) % JitStackAlignment == 0
2366   // But now we possibly want to call one of several different C++ functions,
2367   // so subtract the sizeof(void*) so that sp is aligned for an ABI call.
2368   static_assert(ABIStackAlignment <= JitStackAlignment, "subsumes");
2369 #ifdef JS_CODEGEN_ARM64
2370   // We've already allocated the extra space for frame alignment.
2371   static_assert(sizeOfRetAddr == frameAlignExtra, "ARM64 SP alignment");
2372 #else
2373   masm.reserveStack(sizeOfRetAddr);
2374 #endif
2375   unsigned nativeFramePushed = masm.framePushed();
2376   AssertStackAlignment(masm, ABIStackAlignment);
2377 
2378 #ifdef DEBUG
2379   {
2380     Label ok;
2381     masm.branchTestMagic(Assembler::NotEqual, JSReturnOperand, &ok);
2382     masm.breakpoint();
2383     masm.bind(&ok);
2384   }
2385 #endif
2386 
2387   GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
2388             funcImportIndex);
2389 
2390   Label oolConvert;
2391   const ValTypeVector& results = fi.funcType().results();
2392   if (results.length() == 0) {
2393     GenPrintf(DebugChannel::Import, masm, "void");
2394   } else {
2395     MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
2396     switch (results[0].kind()) {
2397       case ValType::I32:
2398         // No widening is required, as the return value does not come to us in
2399         // ReturnReg.
2400         masm.truncateValueToInt32(JSReturnOperand, ReturnDoubleReg, ReturnReg,
2401                                   &oolConvert);
2402         GenPrintIsize(DebugChannel::Import, masm, ReturnReg);
2403         break;
2404       case ValType::I64:
2405         // No fastpath for now, go immediately to ool case
2406         masm.jump(&oolConvert);
2407         break;
2408       case ValType::Rtt:
2409       case ValType::V128:
2410         // Unreachable as callImport should not call the stub.
2411         masm.breakpoint();
2412         break;
2413       case ValType::F32:
2414         masm.convertValueToFloat(JSReturnOperand, ReturnFloat32Reg,
2415                                  &oolConvert);
2416         GenPrintF32(DebugChannel::Import, masm, ReturnFloat32Reg);
2417         break;
2418       case ValType::F64:
2419         masm.convertValueToDouble(JSReturnOperand, ReturnDoubleReg,
2420                                   &oolConvert);
2421         GenPrintF64(DebugChannel::Import, masm, ReturnDoubleReg);
2422         break;
2423       case ValType::Ref:
2424         switch (results[0].refTypeKind()) {
2425           case RefType::Extern:
2426             BoxValueIntoAnyref(masm, JSReturnOperand, ReturnReg, &oolConvert);
2427             GenPrintPtr(DebugChannel::Import, masm, ReturnReg);
2428             break;
2429           case RefType::Func:
2430           case RefType::Eq:
2431           case RefType::TypeIndex:
2432             MOZ_CRASH("typed reference returned by import (jit exit) NYI");
2433         }
2434         break;
2435     }
2436   }
2437 
2438   GenPrintf(DebugChannel::Import, masm, "\n");
2439 
2440   Label done;
2441   masm.bind(&done);
2442 
2443   GenerateJitExitEpilogue(masm, masm.framePushed(), offsets);
2444 
2445   {
2446     // Call the arguments rectifier.
2447     masm.bind(&rectify);
2448     masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, instance)), callee);
2449     masm.loadPtr(Address(callee, Instance::offsetOfJSJitArgsRectifier()),
2450                  callee);
2451     masm.jump(&rejoinBeforeCall);
2452   }
2453 
2454   if (oolConvert.used()) {
2455     masm.bind(&oolConvert);
2456     masm.setFramePushed(nativeFramePushed);
2457 
2458     // Coercion calls use the following stack layout (sp grows to the left):
2459     //   | args | padding | Value argv[1] | padding | exit Frame |
2460     MIRTypeVector coerceArgTypes;
2461     MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
2462     unsigned offsetToCoerceArgv =
2463         AlignBytes(StackArgBytesForNativeABI(coerceArgTypes), sizeof(Value));
2464     MOZ_ASSERT(nativeFramePushed >= offsetToCoerceArgv + sizeof(Value));
2465     AssertStackAlignment(masm, ABIStackAlignment);
2466 
2467     // Store return value into argv[0].
2468     masm.storeValue(JSReturnOperand,
2469                     Address(masm.getStackPointer(), offsetToCoerceArgv));
2470 
2471     // From this point, it's safe to reuse the scratch register (which
2472     // might be part of the JSReturnOperand).
2473 
2474     // The JIT might have clobbered exitFP at this point. Since there's
2475     // going to be a CoerceInPlace call, pretend we're still doing the JIT
2476     // call by restoring our tagged exitFP.
2477     SetExitFP(masm, ExitReason::Fixed::ImportJit, scratch);
2478 
2479     // argument 0: argv
2480     ABIArgMIRTypeIter i(coerceArgTypes);
2481     Address argv(masm.getStackPointer(), offsetToCoerceArgv);
2482     if (i->kind() == ABIArg::GPR) {
2483       masm.computeEffectiveAddress(argv, i->gpr());
2484     } else {
2485       masm.computeEffectiveAddress(argv, scratch);
2486       masm.storePtr(scratch,
2487                     Address(masm.getStackPointer(), i->offsetFromArgBase()));
2488     }
2489     i++;
2490     MOZ_ASSERT(i.done());
2491 
2492     // Call coercion function. Note that right after the call, the value of
2493     // FP is correct because FP is non-volatile in the native ABI.
2494     AssertStackAlignment(masm, ABIStackAlignment);
2495     const ValTypeVector& results = fi.funcType().results();
2496     if (results.length() > 0) {
2497       // NOTE that once there can be more than one result and we can box some of
2498       // the results (as we must for AnyRef), pointer and already-boxed results
2499       // must be rooted while subsequent results are boxed.
2500       MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
2501       switch (results[0].kind()) {
2502         case ValType::I32:
2503           masm.call(SymbolicAddress::CoerceInPlace_ToInt32);
2504           masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
2505           masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv),
2506                           ReturnReg);
2507           // No widening is required, as we generate a known-good value in a
2508           // safe way here.
2509           break;
2510         case ValType::I64: {
2511           masm.call(SymbolicAddress::CoerceInPlace_ToBigInt);
2512           masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
2513           Address argv(masm.getStackPointer(), offsetToCoerceArgv);
2514           masm.unboxBigInt(argv, scratch);
2515           masm.loadBigInt64(scratch, ReturnReg64);
2516           break;
2517         }
2518         case ValType::F64:
2519         case ValType::F32:
2520           masm.call(SymbolicAddress::CoerceInPlace_ToNumber);
2521           masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
2522           masm.unboxDouble(Address(masm.getStackPointer(), offsetToCoerceArgv),
2523                            ReturnDoubleReg);
2524           if (results[0].kind() == ValType::F32) {
2525             masm.convertDoubleToFloat32(ReturnDoubleReg, ReturnFloat32Reg);
2526           }
2527           break;
2528         case ValType::Ref:
2529           switch (results[0].refTypeKind()) {
2530             case RefType::Extern:
2531               masm.call(SymbolicAddress::BoxValue_Anyref);
2532               masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg,
2533                                  throwLabel);
2534               break;
2535             case RefType::Func:
2536             case RefType::Eq:
2537             case RefType::TypeIndex:
2538               MOZ_CRASH("Unsupported convert type");
2539           }
2540           break;
2541         default:
2542           MOZ_CRASH("Unsupported convert type");
2543       }
2544     }
2545 
2546     // Maintain the invariant that exitFP is either unset or not set to a
2547     // wasm tagged exitFP, per the jit exit contract.
2548     ClearExitFP(masm, scratch);
2549 
2550     masm.jump(&done);
2551     masm.setFramePushed(0);
2552   }
2553 
2554   MOZ_ASSERT(masm.framePushed() == 0);
2555 
2556   return FinishOffsets(masm, offsets);
2557 }
2558 
2559 struct ABIFunctionArgs {
2560   ABIFunctionType abiType;
2561   size_t len;
2562 
ABIFunctionArgsABIFunctionArgs2563   explicit ABIFunctionArgs(ABIFunctionType sig)
2564       : abiType(ABIFunctionType(sig >> ArgType_Shift)) {
2565     len = 0;
2566     uint64_t i = uint64_t(abiType);
2567     while (i) {
2568       i = i >> ArgType_Shift;
2569       len++;
2570     }
2571   }
2572 
lengthABIFunctionArgs2573   size_t length() const { return len; }
2574 
operator []ABIFunctionArgs2575   MIRType operator[](size_t i) const {
2576     MOZ_ASSERT(i < len);
2577     uint64_t abi = uint64_t(abiType);
2578     while (i--) {
2579       abi = abi >> ArgType_Shift;
2580     }
2581     return ToMIRType(ABIArgType(abi & ArgType_Mask));
2582   }
2583 };
2584 
GenerateBuiltinThunk(MacroAssembler & masm,ABIFunctionType abiType,ExitReason exitReason,void * funcPtr,CallableOffsets * offsets)2585 bool wasm::GenerateBuiltinThunk(MacroAssembler& masm, ABIFunctionType abiType,
2586                                 ExitReason exitReason, void* funcPtr,
2587                                 CallableOffsets* offsets) {
2588   AssertExpectedSP(masm);
2589   masm.setFramePushed(0);
2590 
2591   ABIFunctionArgs args(abiType);
2592   uint32_t framePushed =
2593       StackDecrementForCall(ABIStackAlignment,
2594                             sizeof(Frame),  // pushed by prologue
2595                             StackArgBytesForNativeABI(args));
2596 
2597   GenerateExitPrologue(masm, framePushed, exitReason, offsets);
2598 
2599   // Copy out and convert caller arguments, if needed.
2600   unsigned offsetFromFPToCallerStackArgs = sizeof(FrameWithTls);
2601   Register scratch = ABINonArgReturnReg0;
2602   for (ABIArgIter i(args); !i.done(); i++) {
2603     if (i->argInRegister()) {
2604 #ifdef JS_CODEGEN_ARM
2605       // Non hard-fp passes the args values in GPRs.
2606       if (!UseHardFpABI() && IsFloatingPointType(i.mirType())) {
2607         FloatRegister input = i->fpu();
2608         if (i.mirType() == MIRType::Float32) {
2609           masm.ma_vxfer(input, Register::FromCode(input.id()));
2610         } else if (i.mirType() == MIRType::Double) {
2611           uint32_t regId = input.singleOverlay().id();
2612           masm.ma_vxfer(input, Register::FromCode(regId),
2613                         Register::FromCode(regId + 1));
2614         }
2615       }
2616 #endif
2617       continue;
2618     }
2619 
2620     Address src(FramePointer,
2621                 offsetFromFPToCallerStackArgs + i->offsetFromArgBase());
2622     Address dst(masm.getStackPointer(), i->offsetFromArgBase());
2623     StackCopy(masm, i.mirType(), scratch, src, dst);
2624   }
2625 
2626   AssertStackAlignment(masm, ABIStackAlignment);
2627   MoveSPForJitABI(masm);
2628   masm.call(ImmPtr(funcPtr, ImmPtr::NoCheckToken()));
2629 
2630 #if defined(JS_CODEGEN_X64)
2631   // No widening is required, as the caller will widen.
2632 #elif defined(JS_CODEGEN_X86)
2633   // x86 passes the return value on the x87 FP stack.
2634   Operand op(esp, 0);
2635   MIRType retType = ToMIRType(ABIArgType(abiType & ArgType_Mask));
2636   if (retType == MIRType::Float32) {
2637     masm.fstp32(op);
2638     masm.loadFloat32(op, ReturnFloat32Reg);
2639   } else if (retType == MIRType::Double) {
2640     masm.fstp(op);
2641     masm.loadDouble(op, ReturnDoubleReg);
2642   }
2643 #elif defined(JS_CODEGEN_ARM)
2644   // Non hard-fp passes the return values in GPRs.
2645   MIRType retType = ToMIRType(ABIArgType(abiType & ArgType_Mask));
2646   if (!UseHardFpABI() && IsFloatingPointType(retType)) {
2647     masm.ma_vxfer(r0, r1, d0);
2648   }
2649 #endif
2650 
2651   GenerateExitEpilogue(masm, framePushed, exitReason, offsets);
2652   return FinishOffsets(masm, offsets);
2653 }
2654 
2655 #if defined(JS_CODEGEN_ARM)
2656 static const LiveRegisterSet RegsToPreserve(
2657     GeneralRegisterSet(Registers::AllMask &
2658                        ~((Registers::SetType(1) << Registers::sp) |
2659                          (Registers::SetType(1) << Registers::pc))),
2660     FloatRegisterSet(FloatRegisters::AllDoubleMask));
2661 #  ifdef ENABLE_WASM_SIMD
2662 #    error "high lanes of SIMD registers need to be saved too."
2663 #  endif
2664 #elif defined(JS_CODEGEN_MIPS64)
2665 static const LiveRegisterSet RegsToPreserve(
2666     GeneralRegisterSet(Registers::AllMask &
2667                        ~((Registers::SetType(1) << Registers::k0) |
2668                          (Registers::SetType(1) << Registers::k1) |
2669                          (Registers::SetType(1) << Registers::sp) |
2670                          (Registers::SetType(1) << Registers::zero))),
2671     FloatRegisterSet(FloatRegisters::AllDoubleMask));
2672 #  ifdef ENABLE_WASM_SIMD
2673 #    error "high lanes of SIMD registers need to be saved too."
2674 #  endif
2675 #elif defined(JS_CODEGEN_LOONG64)
2676 static const LiveRegisterSet RegsToPreserve(
2677     GeneralRegisterSet(Registers::AllMask &
2678                        ~((uint32_t(1) << Registers::tp) |
2679                          (uint32_t(1) << Registers::fp) |
2680                          (uint32_t(1) << Registers::sp) |
2681                          (uint32_t(1) << Registers::zero))),
2682     FloatRegisterSet(FloatRegisters::AllDoubleMask));
2683 #  ifdef ENABLE_WASM_SIMD
2684 #    error "high lanes of SIMD registers need to be saved too."
2685 #  endif
2686 #elif defined(JS_CODEGEN_ARM64)
2687 // We assume that traps do not happen while lr is live. This both ensures that
2688 // the size of RegsToPreserve is a multiple of 2 (preserving WasmStackAlignment)
2689 // and gives us a register to clobber in the return path.
2690 static const LiveRegisterSet RegsToPreserve(
2691     GeneralRegisterSet(Registers::AllMask &
2692                        ~((Registers::SetType(1) << RealStackPointer.code()) |
2693                          (Registers::SetType(1) << Registers::lr))),
2694 #  ifdef ENABLE_WASM_SIMD
2695     FloatRegisterSet(FloatRegisters::AllSimd128Mask));
2696 #  else
2697     // If SIMD is not enabled, it's pointless to save/restore the upper 64
2698     // bits of each vector register.
2699     FloatRegisterSet(FloatRegisters::AllDoubleMask));
2700 #  endif
2701 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
2702 // It's correct to use FloatRegisters::AllMask even when SIMD is not enabled;
2703 // PushRegsInMask strips out the high lanes of the XMM registers in this case,
2704 // while the singles will be stripped as they are aliased by the larger doubles.
2705 static const LiveRegisterSet RegsToPreserve(
2706     GeneralRegisterSet(Registers::AllMask &
2707                        ~(Registers::SetType(1) << Registers::StackPointer)),
2708     FloatRegisterSet(FloatRegisters::AllMask));
2709 #else
2710 static const LiveRegisterSet RegsToPreserve(
2711     GeneralRegisterSet(0), FloatRegisterSet(FloatRegisters::AllDoubleMask));
2712 #  ifdef ENABLE_WASM_SIMD
2713 #    error "no SIMD support"
2714 #  endif
2715 #endif
2716 
2717 // Generate a MachineState which describes the locations of the GPRs as saved
2718 // by GenerateTrapExit.  FP registers are ignored.  Note that the values
2719 // stored in the MachineState are offsets in words downwards from the top of
2720 // the save area.  That is, a higher value implies a lower address.
GenerateTrapExitMachineState(MachineState * machine,size_t * numWords)2721 void wasm::GenerateTrapExitMachineState(MachineState* machine,
2722                                         size_t* numWords) {
2723   // This is the number of words pushed by the initial WasmPush().
2724   *numWords = WasmPushSize / sizeof(void*);
2725   MOZ_ASSERT(*numWords == TrapExitDummyValueOffsetFromTop + 1);
2726 
2727   // And these correspond to the PushRegsInMask() that immediately follows.
2728   for (GeneralRegisterBackwardIterator iter(RegsToPreserve.gprs()); iter.more();
2729        ++iter) {
2730     machine->setRegisterLocation(*iter,
2731                                  reinterpret_cast<uintptr_t*>(*numWords));
2732     (*numWords)++;
2733   }
2734 }
2735 
2736 // Generate a stub which calls WasmReportTrap() and can be executed by having
2737 // the signal handler redirect PC from any trapping instruction.
GenerateTrapExit(MacroAssembler & masm,Label * throwLabel,Offsets * offsets)2738 static bool GenerateTrapExit(MacroAssembler& masm, Label* throwLabel,
2739                              Offsets* offsets) {
2740   AssertExpectedSP(masm);
2741   masm.haltingAlign(CodeAlignment);
2742 
2743   masm.setFramePushed(0);
2744 
2745   offsets->begin = masm.currentOffset();
2746 
2747   // Traps can only happen at well-defined program points. However, since
2748   // traps may resume and the optimal assumption for the surrounding code is
2749   // that registers are not clobbered, we need to preserve all registers in
2750   // the trap exit. One simplifying assumption is that flags may be clobbered.
2751   // Push a dummy word to use as return address below.
2752   WasmPush(masm, ImmWord(TrapExitDummyValue));
2753   unsigned framePushedBeforePreserve = masm.framePushed();
2754   masm.PushRegsInMask(RegsToPreserve);
2755   unsigned offsetOfReturnWord = masm.framePushed() - framePushedBeforePreserve;
2756 
2757   // We know that StackPointer is word-aligned, but not necessarily
2758   // stack-aligned, so we need to align it dynamically.
2759   Register preAlignStackPointer = ABINonVolatileReg;
2760   masm.moveStackPtrTo(preAlignStackPointer);
2761   masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
2762   if (ShadowStackSpace) {
2763     masm.subFromStackPtr(Imm32(ShadowStackSpace));
2764   }
2765 
2766   masm.assertStackAlignment(ABIStackAlignment);
2767   masm.call(SymbolicAddress::HandleTrap);
2768 
2769   // WasmHandleTrap returns null if control should transfer to the throw stub.
2770   masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
2771 
2772   // Otherwise, the return value is the TrapData::resumePC we must jump to.
2773   // We must restore register state before jumping, which will clobber
2774   // ReturnReg, so store ReturnReg in the above-reserved stack slot which we
2775   // use to jump to via ret.
2776   masm.moveToStackPtr(preAlignStackPointer);
2777   masm.storePtr(ReturnReg, Address(masm.getStackPointer(), offsetOfReturnWord));
2778   masm.PopRegsInMask(RegsToPreserve);
2779 #ifdef JS_CODEGEN_ARM64
2780   WasmPop(masm, lr);
2781   masm.abiret();
2782 #else
2783   masm.ret();
2784 #endif
2785 
2786   return FinishOffsets(masm, offsets);
2787 }
2788 
ClobberWasmRegsForLongJmp(MacroAssembler & masm,Register jumpReg)2789 static void ClobberWasmRegsForLongJmp(MacroAssembler& masm, Register jumpReg) {
2790   // Get the set of all registers that are allocatable in wasm functions
2791   AllocatableGeneralRegisterSet gprs(GeneralRegisterSet::All());
2792   RegisterAllocator::takeWasmRegisters(gprs);
2793   // Remove the TLS register from this set as landing pads require it to be
2794   // valid
2795   gprs.take(WasmTlsReg);
2796   // Remove a specified register that will be used for the longjmp
2797   gprs.take(jumpReg);
2798   // Set all of these registers to zero
2799   for (GeneralRegisterIterator iter(gprs.asLiveSet()); iter.more(); ++iter) {
2800     Register reg = *iter;
2801     masm.xorPtr(reg, reg);
2802   }
2803 
2804   // Get the set of all floating point registers that are allocatable in wasm
2805   // functions
2806   AllocatableFloatRegisterSet fprs(FloatRegisterSet::All());
2807   // Set all of these registers to NaN. We attempt for this to be a signalling
2808   // NaN, but the bit format for signalling NaNs are implementation defined
2809   // and so this is just best effort.
2810   Maybe<FloatRegister> regNaN;
2811   for (FloatRegisterIterator iter(fprs.asLiveSet()); iter.more(); ++iter) {
2812     FloatRegister reg = *iter;
2813     if (!reg.isDouble()) {
2814       continue;
2815     }
2816     if (regNaN) {
2817       masm.moveDouble(*regNaN, reg);
2818       continue;
2819     }
2820     masm.loadConstantDouble(std::numeric_limits<double>::signaling_NaN(), reg);
2821     regNaN = Some(reg);
2822   }
2823 }
2824 
2825 // Generate a stub that restores the stack pointer to what it was on entry to
2826 // the wasm activation, sets the return register to 'false' and then executes a
2827 // return which will return from this wasm activation to the caller. This stub
2828 // should only be called after the caller has reported an error.
GenerateThrowStub(MacroAssembler & masm,Label * throwLabel,Offsets * offsets)2829 static bool GenerateThrowStub(MacroAssembler& masm, Label* throwLabel,
2830                               Offsets* offsets) {
2831   Register scratch1 = ABINonArgReturnReg0;
2832   Register scratch2 = ABINonArgReturnReg1;
2833 
2834   AssertExpectedSP(masm);
2835   masm.haltingAlign(CodeAlignment);
2836   masm.setFramePushed(0);
2837 
2838   masm.bind(throwLabel);
2839 
2840   offsets->begin = masm.currentOffset();
2841 
2842   // Conservatively, the stack pointer can be unaligned and we must align it
2843   // dynamically.
2844   masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
2845   if (ShadowStackSpace) {
2846     masm.subFromStackPtr(Imm32(ShadowStackSpace));
2847   }
2848 
2849   // Allocate space for exception or regular resume information.
2850   masm.reserveStack(sizeof(jit::ResumeFromException));
2851   masm.moveStackPtrTo(scratch1);
2852 
2853   MIRTypeVector handleThrowTypes;
2854   MOZ_ALWAYS_TRUE(handleThrowTypes.append(MIRType::Pointer));
2855 
2856   unsigned frameSize =
2857       StackDecrementForCall(ABIStackAlignment, masm.framePushed(),
2858                             StackArgBytesForNativeABI(handleThrowTypes));
2859   masm.reserveStack(frameSize);
2860   masm.assertStackAlignment(ABIStackAlignment);
2861 
2862   ABIArgMIRTypeIter i(handleThrowTypes);
2863   if (i->kind() == ABIArg::GPR) {
2864     masm.movePtr(scratch1, i->gpr());
2865   } else {
2866     masm.storePtr(scratch1,
2867                   Address(masm.getStackPointer(), i->offsetFromArgBase()));
2868   }
2869   i++;
2870   MOZ_ASSERT(i.done());
2871 
2872   // WasmHandleThrow unwinds JitActivation::wasmExitFP() and returns the
2873   // address of the return address on the stack this stub should return to.
2874   // Set the FramePointer to a magic value to indicate a return by throw.
2875   //
2876   // If there is a Wasm catch handler present, it will instead return the
2877   // address of the handler to jump to and the FP/SP values to restore.
2878   masm.call(SymbolicAddress::HandleThrow);
2879 
2880   Label resumeCatch, leaveWasm;
2881 
2882   masm.load32(Address(ReturnReg, offsetof(jit::ResumeFromException, kind)),
2883               scratch1);
2884 
2885   masm.branch32(Assembler::Equal, scratch1,
2886                 Imm32(jit::ResumeFromException::RESUME_WASM_CATCH),
2887                 &resumeCatch);
2888   masm.branch32(Assembler::Equal, scratch1,
2889                 Imm32(jit::ResumeFromException::RESUME_WASM), &leaveWasm);
2890 
2891   masm.breakpoint();
2892 
2893   // The case where a Wasm catch handler was found while unwinding the stack.
2894   masm.bind(&resumeCatch);
2895   masm.loadPtr(Address(ReturnReg, offsetof(ResumeFromException, tlsData)),
2896                WasmTlsReg);
2897   masm.loadWasmPinnedRegsFromTls();
2898   masm.switchToWasmTlsRealm(scratch1, scratch2);
2899   masm.loadPtr(Address(ReturnReg, offsetof(ResumeFromException, target)),
2900                scratch1);
2901   masm.loadPtr(Address(ReturnReg, offsetof(ResumeFromException, framePointer)),
2902                FramePointer);
2903   masm.loadStackPtr(
2904       Address(ReturnReg, offsetof(ResumeFromException, stackPointer)));
2905   MoveSPForJitABI(masm);
2906   ClobberWasmRegsForLongJmp(masm, scratch1);
2907   masm.jump(scratch1);
2908 
2909   // No catch handler was found, so we will just return out.
2910   masm.bind(&leaveWasm);
2911   masm.loadPtr(Address(ReturnReg, offsetof(ResumeFromException, framePointer)),
2912                FramePointer);
2913   masm.loadPtr(Address(ReturnReg, offsetof(ResumeFromException, stackPointer)),
2914                scratch1);
2915   masm.moveToStackPtr(scratch1);
2916 #ifdef JS_CODEGEN_ARM64
2917   masm.loadPtr(Address(scratch1, 0), lr);
2918   masm.addToStackPtr(Imm32(8));
2919   masm.abiret();
2920 #else
2921   masm.ret();
2922 #endif
2923 
2924   return FinishOffsets(masm, offsets);
2925 }
2926 
2927 static const LiveRegisterSet AllAllocatableRegs =
2928     LiveRegisterSet(GeneralRegisterSet(Registers::AllocatableMask),
2929                     FloatRegisterSet(FloatRegisters::AllMask));
2930 
2931 // Generate a stub that handle toggable enter/leave frame traps or breakpoints.
2932 // The trap records frame pointer (via GenerateExitPrologue) and saves most of
2933 // registers to not affect the code generated by WasmBaselineCompile.
GenerateDebugTrapStub(MacroAssembler & masm,Label * throwLabel,CallableOffsets * offsets)2934 static bool GenerateDebugTrapStub(MacroAssembler& masm, Label* throwLabel,
2935                                   CallableOffsets* offsets) {
2936   AssertExpectedSP(masm);
2937   masm.haltingAlign(CodeAlignment);
2938   masm.setFramePushed(0);
2939 
2940   GenerateExitPrologue(masm, 0, ExitReason::Fixed::DebugTrap, offsets);
2941 
2942   // Save all registers used between baseline compiler operations.
2943   masm.PushRegsInMask(AllAllocatableRegs);
2944 
2945   uint32_t framePushed = masm.framePushed();
2946 
2947   // This method might be called with unaligned stack -- aligning and
2948   // saving old stack pointer at the top.
2949 #ifdef JS_CODEGEN_ARM64
2950   // On ARM64 however the stack is always aligned.
2951   static_assert(ABIStackAlignment == 16, "ARM64 SP alignment");
2952 #else
2953   Register scratch = ABINonArgReturnReg0;
2954   masm.moveStackPtrTo(scratch);
2955   masm.subFromStackPtr(Imm32(sizeof(intptr_t)));
2956   masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
2957   masm.storePtr(scratch, Address(masm.getStackPointer(), 0));
2958 #endif
2959 
2960   if (ShadowStackSpace) {
2961     masm.subFromStackPtr(Imm32(ShadowStackSpace));
2962   }
2963   masm.assertStackAlignment(ABIStackAlignment);
2964   masm.call(SymbolicAddress::HandleDebugTrap);
2965 
2966   masm.branchIfFalseBool(ReturnReg, throwLabel);
2967 
2968   if (ShadowStackSpace) {
2969     masm.addToStackPtr(Imm32(ShadowStackSpace));
2970   }
2971 #ifndef JS_CODEGEN_ARM64
2972   masm.Pop(scratch);
2973   masm.moveToStackPtr(scratch);
2974 #endif
2975 
2976   masm.setFramePushed(framePushed);
2977   masm.PopRegsInMask(AllAllocatableRegs);
2978 
2979   GenerateExitEpilogue(masm, 0, ExitReason::Fixed::DebugTrap, offsets);
2980 
2981   return FinishOffsets(masm, offsets);
2982 }
2983 
GenerateEntryStubs(MacroAssembler & masm,size_t funcExportIndex,const FuncExport & fe,const Maybe<ImmPtr> & callee,bool isAsmJS,CodeRangeVector * codeRanges)2984 bool wasm::GenerateEntryStubs(MacroAssembler& masm, size_t funcExportIndex,
2985                               const FuncExport& fe, const Maybe<ImmPtr>& callee,
2986                               bool isAsmJS, CodeRangeVector* codeRanges) {
2987   MOZ_ASSERT(!callee == fe.hasEagerStubs());
2988   MOZ_ASSERT_IF(isAsmJS, fe.hasEagerStubs());
2989 
2990   Offsets offsets;
2991   if (!GenerateInterpEntry(masm, fe, callee, &offsets)) {
2992     return false;
2993   }
2994   if (!codeRanges->emplaceBack(CodeRange::InterpEntry, fe.funcIndex(),
2995                                offsets)) {
2996     return false;
2997   }
2998 
2999   if (isAsmJS || !fe.canHaveJitEntry()) {
3000     return true;
3001   }
3002 
3003   if (!GenerateJitEntry(masm, funcExportIndex, fe, callee, &offsets)) {
3004     return false;
3005   }
3006   if (!codeRanges->emplaceBack(CodeRange::JitEntry, fe.funcIndex(), offsets)) {
3007     return false;
3008   }
3009 
3010   return true;
3011 }
3012 
GenerateProvisionalLazyJitEntryStub(MacroAssembler & masm,Offsets * offsets)3013 bool wasm::GenerateProvisionalLazyJitEntryStub(MacroAssembler& masm,
3014                                                Offsets* offsets) {
3015   AssertExpectedSP(masm);
3016   masm.setFramePushed(0);
3017   offsets->begin = masm.currentOffset();
3018 
3019 #ifdef JS_CODEGEN_ARM64
3020   // Unaligned ABI calls require SP+PSP, but our mode here is SP-only
3021   masm.SetStackPointer64(PseudoStackPointer64);
3022   masm.Mov(PseudoStackPointer64, sp);
3023 #endif
3024 
3025 #ifdef JS_USE_LINK_REGISTER
3026   masm.pushReturnAddress();
3027 #endif
3028 
3029   AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
3030   Register temp = regs.takeAny();
3031 
3032   using Fn = void* (*)();
3033   masm.setupUnalignedABICall(temp);
3034   masm.callWithABI<Fn, GetContextSensitiveInterpreterStub>(
3035       MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
3036 
3037 #ifdef JS_USE_LINK_REGISTER
3038   masm.popReturnAddress();
3039 #endif
3040 
3041   masm.jump(ReturnReg);
3042 
3043 #ifdef JS_CODEGEN_ARM64
3044   // Undo the SP+PSP mode
3045   masm.SetStackPointer64(sp);
3046 #endif
3047 
3048   return FinishOffsets(masm, offsets);
3049 }
3050 
GenerateStubs(const ModuleEnvironment & env,const FuncImportVector & imports,const FuncExportVector & exports,CompiledCode * code)3051 bool wasm::GenerateStubs(const ModuleEnvironment& env,
3052                          const FuncImportVector& imports,
3053                          const FuncExportVector& exports, CompiledCode* code) {
3054   LifoAlloc lifo(STUBS_LIFO_DEFAULT_CHUNK_SIZE);
3055   TempAllocator alloc(&lifo);
3056   WasmMacroAssembler masm(alloc, env);
3057   AutoCreatedBy acb(masm, "wasm::GenerateStubs");
3058 
3059   // Swap in already-allocated empty vectors to avoid malloc/free.
3060   if (!code->swap(masm)) {
3061     return false;
3062   }
3063 
3064   Label throwLabel;
3065 
3066   JitSpew(JitSpew_Codegen, "# Emitting wasm import stubs");
3067 
3068   for (uint32_t funcIndex = 0; funcIndex < imports.length(); funcIndex++) {
3069     const FuncImport& fi = imports[funcIndex];
3070 
3071     CallableOffsets interpOffsets;
3072     if (!GenerateImportInterpExit(masm, fi, funcIndex, &throwLabel,
3073                                   &interpOffsets)) {
3074       return false;
3075     }
3076     if (!code->codeRanges.emplaceBack(CodeRange::ImportInterpExit, funcIndex,
3077                                       interpOffsets)) {
3078       return false;
3079     }
3080 
3081     // Skip if the function does not have a signature that allows for a JIT
3082     // exit.
3083     if (!fi.canHaveJitExit()) {
3084       continue;
3085     }
3086 
3087     JitExitOffsets jitOffsets;
3088     if (!GenerateImportJitExit(masm, fi, funcIndex, &throwLabel, &jitOffsets)) {
3089       return false;
3090     }
3091     if (!code->codeRanges.emplaceBack(funcIndex, jitOffsets)) {
3092       return false;
3093     }
3094   }
3095 
3096   JitSpew(JitSpew_Codegen, "# Emitting wasm export stubs");
3097 
3098   Maybe<ImmPtr> noAbsolute;
3099   for (size_t i = 0; i < exports.length(); i++) {
3100     const FuncExport& fe = exports[i];
3101     if (!fe.hasEagerStubs()) {
3102       continue;
3103     }
3104     if (!GenerateEntryStubs(masm, i, fe, noAbsolute, env.isAsmJS(),
3105                             &code->codeRanges)) {
3106       return false;
3107     }
3108   }
3109 
3110   JitSpew(JitSpew_Codegen, "# Emitting wasm exit stubs");
3111 
3112   Offsets offsets;
3113 
3114   if (!GenerateTrapExit(masm, &throwLabel, &offsets)) {
3115     return false;
3116   }
3117   if (!code->codeRanges.emplaceBack(CodeRange::TrapExit, offsets)) {
3118     return false;
3119   }
3120 
3121   CallableOffsets callableOffsets;
3122   if (!GenerateDebugTrapStub(masm, &throwLabel, &callableOffsets)) {
3123     return false;
3124   }
3125   if (!code->codeRanges.emplaceBack(CodeRange::DebugTrap, callableOffsets)) {
3126     return false;
3127   }
3128 
3129   if (!GenerateThrowStub(masm, &throwLabel, &offsets)) {
3130     return false;
3131   }
3132   if (!code->codeRanges.emplaceBack(CodeRange::Throw, offsets)) {
3133     return false;
3134   }
3135 
3136   masm.finish();
3137   if (masm.oom()) {
3138     return false;
3139   }
3140 
3141   return code->swap(masm);
3142 }
3143