1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  *
4  * Copyright 2017 Mozilla Foundation
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 #include "wasm/WasmBuiltins.h"
20 
21 #include "mozilla/Atomics.h"
22 
23 #include "fdlibm.h"
24 #include "jslibmath.h"
25 
26 #include "jit/AtomicOperations.h"
27 #include "jit/InlinableNatives.h"
28 #include "jit/MacroAssembler.h"
29 #include "threading/Mutex.h"
30 #include "wasm/WasmInstance.h"
31 #include "wasm/WasmStubs.h"
32 
33 #include "vm/Debugger-inl.h"
34 #include "vm/Stack-inl.h"
35 
36 using namespace js;
37 using namespace jit;
38 using namespace wasm;
39 
40 using mozilla::HashGeneric;
41 using mozilla::IsNaN;
42 using mozilla::MakeEnumeratedRange;
43 
44 static const unsigned BUILTIN_THUNK_LIFO_SIZE = 64 * 1024;
45 
46 // ============================================================================
47 // WebAssembly builtin C++ functions called from wasm code to implement internal
48 // wasm operations.
49 
50 #if defined(JS_CODEGEN_ARM)
51 extern "C" {
52 
53 extern MOZ_EXPORT int64_t __aeabi_idivmod(int, int);
54 
55 extern MOZ_EXPORT int64_t __aeabi_uidivmod(int, int);
56 }
57 #endif
58 
59 // This utility function can only be called for builtins that are called
60 // directly from wasm code.
CallingActivation()61 static JitActivation* CallingActivation() {
62   Activation* act = TlsContext.get()->activation();
63   MOZ_ASSERT(act->asJit()->hasWasmExitFP());
64   return act->asJit();
65 }
66 
WasmHandleExecutionInterrupt()67 static void* WasmHandleExecutionInterrupt() {
68   JitActivation* activation = CallingActivation();
69   MOZ_ASSERT(activation->isWasmInterrupted());
70 
71   if (!CheckForInterrupt(activation->cx())) {
72     // If CheckForInterrupt failed, it is time to interrupt execution.
73     // Returning nullptr to the caller will jump to the throw stub which
74     // will call HandleThrow. The JitActivation must stay in the
75     // interrupted state until then so that stack unwinding works in
76     // HandleThrow.
77     return nullptr;
78   }
79 
80   // If CheckForInterrupt succeeded, then execution can proceed and the
81   // interrupt is over.
82   void* resumePC = activation->wasmInterruptResumePC();
83   activation->finishWasmInterrupt();
84   return resumePC;
85 }
86 
WasmHandleDebugTrap()87 static bool WasmHandleDebugTrap() {
88   JitActivation* activation = CallingActivation();
89   JSContext* cx = activation->cx();
90   Frame* fp = activation->wasmExitFP();
91   Instance* instance = fp->tls->instance;
92   const Code& code = instance->code();
93   MOZ_ASSERT(code.metadata().debugEnabled);
94 
95   // The debug trap stub is the innermost frame. It's return address is the
96   // actual trap site.
97   const CallSite* site = code.lookupCallSite(fp->returnAddress);
98   MOZ_ASSERT(site);
99 
100   // Advance to the actual trapping frame.
101   fp = fp->callerFP;
102   DebugFrame* debugFrame = DebugFrame::from(fp);
103 
104   if (site->kind() == CallSite::EnterFrame) {
105     if (!instance->enterFrameTrapsEnabled()) return true;
106     debugFrame->setIsDebuggee();
107     debugFrame->observe(cx);
108     // TODO call onEnterFrame
109     JSTrapStatus status = Debugger::onEnterFrame(cx, debugFrame);
110     if (status == JSTRAP_RETURN) {
111       // Ignoring forced return (JSTRAP_RETURN) -- changing code execution
112       // order is not yet implemented in the wasm baseline.
113       // TODO properly handle JSTRAP_RETURN and resume wasm execution.
114       JS_ReportErrorASCII(cx, "Unexpected resumption value from onEnterFrame");
115       return false;
116     }
117     return status == JSTRAP_CONTINUE;
118   }
119   if (site->kind() == CallSite::LeaveFrame) {
120     debugFrame->updateReturnJSValue();
121     bool ok = Debugger::onLeaveFrame(cx, debugFrame, nullptr, true);
122     debugFrame->leave(cx);
123     return ok;
124   }
125 
126   DebugState& debug = instance->debug();
127   MOZ_ASSERT(debug.hasBreakpointTrapAtOffset(site->lineOrBytecode()));
128   if (debug.stepModeEnabled(debugFrame->funcIndex())) {
129     RootedValue result(cx, UndefinedValue());
130     JSTrapStatus status = Debugger::onSingleStep(cx, &result);
131     if (status == JSTRAP_RETURN) {
132       // TODO properly handle JSTRAP_RETURN.
133       JS_ReportErrorASCII(cx, "Unexpected resumption value from onSingleStep");
134       return false;
135     }
136     if (status != JSTRAP_CONTINUE) return false;
137   }
138   if (debug.hasBreakpointSite(site->lineOrBytecode())) {
139     RootedValue result(cx, UndefinedValue());
140     JSTrapStatus status = Debugger::onTrap(cx, &result);
141     if (status == JSTRAP_RETURN) {
142       // TODO properly handle JSTRAP_RETURN.
143       JS_ReportErrorASCII(
144           cx, "Unexpected resumption value from breakpoint handler");
145       return false;
146     }
147     if (status != JSTRAP_CONTINUE) return false;
148   }
149   return true;
150 }
151 
152 // Unwind the entire activation in response to a thrown exception. This function
153 // is responsible for notifying the debugger of each unwound frame. The return
154 // value is the new stack address which the calling stub will set to the sp
155 // register before executing a return instruction.
156 
HandleThrow(JSContext * cx,WasmFrameIter & iter)157 void* wasm::HandleThrow(JSContext* cx, WasmFrameIter& iter) {
158   // WasmFrameIter iterates down wasm frames in the activation starting at
159   // JitActivation::wasmExitFP(). Pass Unwind::True to pop
160   // JitActivation::wasmExitFP() once each time WasmFrameIter is incremented,
161   // ultimately leaving exit FP null when the WasmFrameIter is done().  This
162   // is necessary to prevent a DebugFrame from being observed again after we
163   // just called onLeaveFrame (which would lead to the frame being re-added
164   // to the map of live frames, right as it becomes trash).
165 
166   MOZ_ASSERT(CallingActivation() == iter.activation());
167   MOZ_ASSERT(!iter.done());
168   iter.setUnwind(WasmFrameIter::Unwind::True);
169 
170   // Live wasm code on the stack is kept alive (in TraceJitActivation) by
171   // marking the instance of every wasm::Frame found by WasmFrameIter.
172   // However, as explained above, we're popping frames while iterating which
173   // means that a GC during this loop could collect the code of frames whose
174   // code is still on the stack. This is actually mostly fine: as soon as we
175   // return to the throw stub, the entire stack will be popped as a whole,
176   // returning to the C++ caller. However, we must keep the throw stub alive
177   // itself which is owned by the innermost instance.
178   RootedWasmInstanceObject keepAlive(cx, iter.instance()->object());
179 
180   for (; !iter.done(); ++iter) {
181     if (!iter.debugEnabled()) continue;
182 
183     DebugFrame* frame = iter.debugFrame();
184     frame->clearReturnJSValue();
185 
186     // Assume JSTRAP_ERROR status if no exception is pending --
187     // no onExceptionUnwind handlers must be fired.
188     if (cx->isExceptionPending()) {
189       JSTrapStatus status = Debugger::onExceptionUnwind(cx, frame);
190       if (status == JSTRAP_RETURN) {
191         // Unexpected trap return -- raising error since throw recovery
192         // is not yet implemented in the wasm baseline.
193         // TODO properly handle JSTRAP_RETURN and resume wasm execution.
194         JS_ReportErrorASCII(
195             cx, "Unexpected resumption value from onExceptionUnwind");
196       }
197     }
198 
199     bool ok = Debugger::onLeaveFrame(cx, frame, nullptr, false);
200     if (ok) {
201       // Unexpected success from the handler onLeaveFrame -- raising error
202       // since throw recovery is not yet implemented in the wasm baseline.
203       // TODO properly handle success and resume wasm execution.
204       JS_ReportErrorASCII(cx, "Unexpected success from onLeaveFrame");
205     }
206     frame->leave(cx);
207   }
208 
209   MOZ_ASSERT(!cx->activation()->asJit()->isWasmInterrupted(),
210              "unwinding clears the interrupt");
211   MOZ_ASSERT(!cx->activation()->asJit()->isWasmTrapping(),
212              "unwinding clears the trapping state");
213 
214   return iter.unwoundAddressOfReturnAddress();
215 }
216 
WasmHandleThrow()217 static void* WasmHandleThrow() {
218   JitActivation* activation = CallingActivation();
219   JSContext* cx = activation->cx();
220   WasmFrameIter iter(activation);
221   return HandleThrow(cx, iter);
222 }
223 
WasmOldReportTrap(int32_t trapIndex)224 static void WasmOldReportTrap(int32_t trapIndex) {
225   JSContext* cx = TlsContext.get();
226 
227   MOZ_ASSERT(trapIndex < int32_t(Trap::Limit) && trapIndex >= 0);
228   Trap trap = Trap(trapIndex);
229 
230   unsigned errorNumber;
231   switch (trap) {
232     case Trap::Unreachable:
233       errorNumber = JSMSG_WASM_UNREACHABLE;
234       break;
235     case Trap::IntegerOverflow:
236       errorNumber = JSMSG_WASM_INTEGER_OVERFLOW;
237       break;
238     case Trap::InvalidConversionToInteger:
239       errorNumber = JSMSG_WASM_INVALID_CONVERSION;
240       break;
241     case Trap::IntegerDivideByZero:
242       errorNumber = JSMSG_WASM_INT_DIVIDE_BY_ZERO;
243       break;
244     case Trap::IndirectCallToNull:
245       errorNumber = JSMSG_WASM_IND_CALL_TO_NULL;
246       break;
247     case Trap::IndirectCallBadSig:
248       errorNumber = JSMSG_WASM_IND_CALL_BAD_SIG;
249       break;
250     case Trap::ImpreciseSimdConversion:
251       errorNumber = JSMSG_SIMD_FAILED_CONVERSION;
252       break;
253     case Trap::OutOfBounds:
254       errorNumber = JSMSG_WASM_OUT_OF_BOUNDS;
255       break;
256     case Trap::UnalignedAccess:
257       errorNumber = JSMSG_WASM_UNALIGNED_ACCESS;
258       break;
259     case Trap::StackOverflow:
260       errorNumber = JSMSG_OVER_RECURSED;
261       break;
262     case Trap::ThrowReported:
263       // Error was already reported under another name.
264       return;
265     default:
266       MOZ_CRASH("unexpected trap");
267   }
268 
269   JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, errorNumber);
270 }
271 
WasmReportTrap()272 static void WasmReportTrap() {
273   Trap trap = TlsContext.get()->runtime()->wasmTrapData().trap;
274   WasmOldReportTrap(int32_t(trap));
275 }
276 
WasmReportOutOfBounds()277 static void WasmReportOutOfBounds() {
278   JSContext* cx = TlsContext.get();
279   JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
280                            JSMSG_WASM_OUT_OF_BOUNDS);
281 }
282 
WasmReportUnalignedAccess()283 static void WasmReportUnalignedAccess() {
284   JSContext* cx = TlsContext.get();
285   JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
286                            JSMSG_WASM_UNALIGNED_ACCESS);
287 }
288 
WasmReportInt64JSCall()289 static void WasmReportInt64JSCall() {
290   JSContext* cx = TlsContext.get();
291   JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
292                            JSMSG_WASM_BAD_I64_TYPE);
293 }
294 
CoerceInPlace_ToInt32(Value * rawVal)295 static int32_t CoerceInPlace_ToInt32(Value* rawVal) {
296   JSContext* cx = TlsContext.get();
297 
298   int32_t i32;
299   RootedValue val(cx, *rawVal);
300   if (!ToInt32(cx, val, &i32)) {
301     *rawVal = PoisonedObjectValue(0x42);
302     return false;
303   }
304 
305   *rawVal = Int32Value(i32);
306   return true;
307 }
308 
CoerceInPlace_ToNumber(Value * rawVal)309 static int32_t CoerceInPlace_ToNumber(Value* rawVal) {
310   JSContext* cx = TlsContext.get();
311 
312   double dbl;
313   RootedValue val(cx, *rawVal);
314   if (!ToNumber(cx, val, &dbl)) {
315     *rawVal = PoisonedObjectValue(0x42);
316     return false;
317   }
318 
319   *rawVal = DoubleValue(dbl);
320   return true;
321 }
322 
CoerceInPlace_JitEntry(int funcExportIndex,TlsData * tlsData,Value * argv)323 static int32_t CoerceInPlace_JitEntry(int funcExportIndex, TlsData* tlsData,
324                                       Value* argv) {
325   JSContext* cx = CallingActivation()->cx();
326 
327   const Code& code = tlsData->instance->code();
328   const FuncExport& fe =
329       code.metadata(code.stableTier()).funcExports[funcExportIndex];
330 
331   for (size_t i = 0; i < fe.sig().args().length(); i++) {
332     HandleValue arg = HandleValue::fromMarkedLocation(&argv[i]);
333     switch (fe.sig().args()[i]) {
334       case ValType::I32: {
335         int32_t i32;
336         if (!ToInt32(cx, arg, &i32)) return false;
337         argv[i] = Int32Value(i32);
338         break;
339       }
340       case ValType::F32:
341       case ValType::F64: {
342         double dbl;
343         if (!ToNumber(cx, arg, &dbl)) return false;
344         // No need to convert double-to-float for f32, it's done inline
345         // in the wasm stub later.
346         argv[i] = DoubleValue(dbl);
347         break;
348       }
349       default: {
350         MOZ_CRASH("unexpected input argument in CoerceInPlace_JitEntry");
351       }
352     }
353   }
354 
355   return true;
356 }
357 
DivI64(uint32_t x_hi,uint32_t x_lo,uint32_t y_hi,uint32_t y_lo)358 static int64_t DivI64(uint32_t x_hi, uint32_t x_lo, uint32_t y_hi,
359                       uint32_t y_lo) {
360   int64_t x = ((uint64_t)x_hi << 32) + x_lo;
361   int64_t y = ((uint64_t)y_hi << 32) + y_lo;
362   MOZ_ASSERT(x != INT64_MIN || y != -1);
363   MOZ_ASSERT(y != 0);
364   return x / y;
365 }
366 
UDivI64(uint32_t x_hi,uint32_t x_lo,uint32_t y_hi,uint32_t y_lo)367 static int64_t UDivI64(uint32_t x_hi, uint32_t x_lo, uint32_t y_hi,
368                        uint32_t y_lo) {
369   uint64_t x = ((uint64_t)x_hi << 32) + x_lo;
370   uint64_t y = ((uint64_t)y_hi << 32) + y_lo;
371   MOZ_ASSERT(y != 0);
372   return x / y;
373 }
374 
ModI64(uint32_t x_hi,uint32_t x_lo,uint32_t y_hi,uint32_t y_lo)375 static int64_t ModI64(uint32_t x_hi, uint32_t x_lo, uint32_t y_hi,
376                       uint32_t y_lo) {
377   int64_t x = ((uint64_t)x_hi << 32) + x_lo;
378   int64_t y = ((uint64_t)y_hi << 32) + y_lo;
379   MOZ_ASSERT(x != INT64_MIN || y != -1);
380   MOZ_ASSERT(y != 0);
381   return x % y;
382 }
383 
UModI64(uint32_t x_hi,uint32_t x_lo,uint32_t y_hi,uint32_t y_lo)384 static int64_t UModI64(uint32_t x_hi, uint32_t x_lo, uint32_t y_hi,
385                        uint32_t y_lo) {
386   uint64_t x = ((uint64_t)x_hi << 32) + x_lo;
387   uint64_t y = ((uint64_t)y_hi << 32) + y_lo;
388   MOZ_ASSERT(y != 0);
389   return x % y;
390 }
391 
TruncateDoubleToInt64(double input)392 static int64_t TruncateDoubleToInt64(double input) {
393   // Note: INT64_MAX is not representable in double. It is actually
394   // INT64_MAX + 1.  Therefore also sending the failure value.
395   if (input >= double(INT64_MAX) || input < double(INT64_MIN) || IsNaN(input))
396     return 0x8000000000000000;
397   return int64_t(input);
398 }
399 
TruncateDoubleToUint64(double input)400 static uint64_t TruncateDoubleToUint64(double input) {
401   // Note: UINT64_MAX is not representable in double. It is actually
402   // UINT64_MAX + 1.  Therefore also sending the failure value.
403   if (input >= double(UINT64_MAX) || input <= -1.0 || IsNaN(input))
404     return 0x8000000000000000;
405   return uint64_t(input);
406 }
407 
SaturatingTruncateDoubleToInt64(double input)408 static int64_t SaturatingTruncateDoubleToInt64(double input) {
409   // Handle in-range values (except INT64_MIN).
410   if (fabs(input) < -double(INT64_MIN)) return int64_t(input);
411   // Handle NaN.
412   if (IsNaN(input)) return 0;
413   // Handle positive overflow.
414   if (input > 0) return INT64_MAX;
415   // Handle negative overflow.
416   return INT64_MIN;
417 }
418 
SaturatingTruncateDoubleToUint64(double input)419 static uint64_t SaturatingTruncateDoubleToUint64(double input) {
420   // Handle positive overflow.
421   if (input >= -double(INT64_MIN) * 2.0) return UINT64_MAX;
422   // Handle in-range values.
423   if (input >= -1.0) return uint64_t(input);
424   // Handle NaN and negative overflow.
425   return 0;
426 }
427 
Int64ToDouble(int32_t x_hi,uint32_t x_lo)428 static double Int64ToDouble(int32_t x_hi, uint32_t x_lo) {
429   int64_t x = int64_t((uint64_t(x_hi) << 32)) + int64_t(x_lo);
430   return double(x);
431 }
432 
Int64ToFloat32(int32_t x_hi,uint32_t x_lo)433 static float Int64ToFloat32(int32_t x_hi, uint32_t x_lo) {
434   int64_t x = int64_t((uint64_t(x_hi) << 32)) + int64_t(x_lo);
435   return float(x);
436 }
437 
Uint64ToDouble(int32_t x_hi,uint32_t x_lo)438 static double Uint64ToDouble(int32_t x_hi, uint32_t x_lo) {
439   uint64_t x = (uint64_t(x_hi) << 32) + uint64_t(x_lo);
440   return double(x);
441 }
442 
Uint64ToFloat32(int32_t x_hi,uint32_t x_lo)443 static float Uint64ToFloat32(int32_t x_hi, uint32_t x_lo) {
444   uint64_t x = (uint64_t(x_hi) << 32) + uint64_t(x_lo);
445   return float(x);
446 }
447 
448 template <class F>
FuncCast(F * funcPtr,ABIFunctionType abiType)449 static inline void* FuncCast(F* funcPtr, ABIFunctionType abiType) {
450   void* pf = JS_FUNC_TO_DATA_PTR(void*, funcPtr);
451 #ifdef JS_SIMULATOR
452   pf = Simulator::RedirectNativeFunction(pf, abiType);
453 #endif
454   return pf;
455 }
456 
AddressOf(SymbolicAddress imm,ABIFunctionType * abiType)457 static void* AddressOf(SymbolicAddress imm, ABIFunctionType* abiType) {
458   switch (imm) {
459     case SymbolicAddress::HandleExecutionInterrupt:
460       *abiType = Args_General0;
461       return FuncCast(WasmHandleExecutionInterrupt, *abiType);
462     case SymbolicAddress::HandleDebugTrap:
463       *abiType = Args_General0;
464       return FuncCast(WasmHandleDebugTrap, *abiType);
465     case SymbolicAddress::HandleThrow:
466       *abiType = Args_General0;
467       return FuncCast(WasmHandleThrow, *abiType);
468     case SymbolicAddress::ReportTrap:
469       *abiType = Args_General0;
470       return FuncCast(WasmReportTrap, *abiType);
471     case SymbolicAddress::OldReportTrap:
472       *abiType = Args_General1;
473       return FuncCast(WasmOldReportTrap, *abiType);
474     case SymbolicAddress::ReportOutOfBounds:
475       *abiType = Args_General0;
476       return FuncCast(WasmReportOutOfBounds, *abiType);
477     case SymbolicAddress::ReportUnalignedAccess:
478       *abiType = Args_General0;
479       return FuncCast(WasmReportUnalignedAccess, *abiType);
480     case SymbolicAddress::ReportInt64JSCall:
481       *abiType = Args_General0;
482       return FuncCast(WasmReportInt64JSCall, *abiType);
483     case SymbolicAddress::CallImport_Void:
484       *abiType = Args_General4;
485       return FuncCast(Instance::callImport_void, *abiType);
486     case SymbolicAddress::CallImport_I32:
487       *abiType = Args_General4;
488       return FuncCast(Instance::callImport_i32, *abiType);
489     case SymbolicAddress::CallImport_I64:
490       *abiType = Args_General4;
491       return FuncCast(Instance::callImport_i64, *abiType);
492     case SymbolicAddress::CallImport_F64:
493       *abiType = Args_General4;
494       return FuncCast(Instance::callImport_f64, *abiType);
495     case SymbolicAddress::CoerceInPlace_ToInt32:
496       *abiType = Args_General1;
497       return FuncCast(CoerceInPlace_ToInt32, *abiType);
498     case SymbolicAddress::CoerceInPlace_ToNumber:
499       *abiType = Args_General1;
500       return FuncCast(CoerceInPlace_ToNumber, *abiType);
501     case SymbolicAddress::CoerceInPlace_JitEntry:
502       *abiType = Args_General3;
503       return FuncCast(CoerceInPlace_JitEntry, *abiType);
504     case SymbolicAddress::ToInt32:
505       *abiType = Args_Int_Double;
506       return FuncCast<int32_t(double)>(JS::ToInt32, *abiType);
507     case SymbolicAddress::DivI64:
508       *abiType = Args_General4;
509       return FuncCast(DivI64, *abiType);
510     case SymbolicAddress::UDivI64:
511       *abiType = Args_General4;
512       return FuncCast(UDivI64, *abiType);
513     case SymbolicAddress::ModI64:
514       *abiType = Args_General4;
515       return FuncCast(ModI64, *abiType);
516     case SymbolicAddress::UModI64:
517       *abiType = Args_General4;
518       return FuncCast(UModI64, *abiType);
519     case SymbolicAddress::TruncateDoubleToUint64:
520       *abiType = Args_Int64_Double;
521       return FuncCast(TruncateDoubleToUint64, *abiType);
522     case SymbolicAddress::TruncateDoubleToInt64:
523       *abiType = Args_Int64_Double;
524       return FuncCast(TruncateDoubleToInt64, *abiType);
525     case SymbolicAddress::SaturatingTruncateDoubleToUint64:
526       *abiType = Args_Int64_Double;
527       return FuncCast(SaturatingTruncateDoubleToUint64, *abiType);
528     case SymbolicAddress::SaturatingTruncateDoubleToInt64:
529       *abiType = Args_Int64_Double;
530       return FuncCast(SaturatingTruncateDoubleToInt64, *abiType);
531     case SymbolicAddress::Uint64ToDouble:
532       *abiType = Args_Double_IntInt;
533       return FuncCast(Uint64ToDouble, *abiType);
534     case SymbolicAddress::Uint64ToFloat32:
535       *abiType = Args_Float32_IntInt;
536       return FuncCast(Uint64ToFloat32, *abiType);
537     case SymbolicAddress::Int64ToDouble:
538       *abiType = Args_Double_IntInt;
539       return FuncCast(Int64ToDouble, *abiType);
540     case SymbolicAddress::Int64ToFloat32:
541       *abiType = Args_Float32_IntInt;
542       return FuncCast(Int64ToFloat32, *abiType);
543 #if defined(JS_CODEGEN_ARM)
544     case SymbolicAddress::aeabi_idivmod:
545       *abiType = Args_General2;
546       return FuncCast(__aeabi_idivmod, *abiType);
547     case SymbolicAddress::aeabi_uidivmod:
548       *abiType = Args_General2;
549       return FuncCast(__aeabi_uidivmod, *abiType);
550 #endif
551     case SymbolicAddress::ModD:
552       *abiType = Args_Double_DoubleDouble;
553       return FuncCast(NumberMod, *abiType);
554     case SymbolicAddress::SinD:
555       *abiType = Args_Double_Double;
556       return FuncCast<double(double)>(sin, *abiType);
557     case SymbolicAddress::CosD:
558       *abiType = Args_Double_Double;
559       return FuncCast<double(double)>(cos, *abiType);
560     case SymbolicAddress::TanD:
561       *abiType = Args_Double_Double;
562       return FuncCast<double(double)>(tan, *abiType);
563     case SymbolicAddress::ASinD:
564       *abiType = Args_Double_Double;
565       return FuncCast<double(double)>(fdlibm::asin, *abiType);
566     case SymbolicAddress::ACosD:
567       *abiType = Args_Double_Double;
568       return FuncCast<double(double)>(fdlibm::acos, *abiType);
569     case SymbolicAddress::ATanD:
570       *abiType = Args_Double_Double;
571       return FuncCast<double(double)>(fdlibm::atan, *abiType);
572     case SymbolicAddress::CeilD:
573       *abiType = Args_Double_Double;
574       return FuncCast<double(double)>(fdlibm::ceil, *abiType);
575     case SymbolicAddress::CeilF:
576       *abiType = Args_Float32_Float32;
577       return FuncCast<float(float)>(fdlibm::ceilf, *abiType);
578     case SymbolicAddress::FloorD:
579       *abiType = Args_Double_Double;
580       return FuncCast<double(double)>(fdlibm::floor, *abiType);
581     case SymbolicAddress::FloorF:
582       *abiType = Args_Float32_Float32;
583       return FuncCast<float(float)>(fdlibm::floorf, *abiType);
584     case SymbolicAddress::TruncD:
585       *abiType = Args_Double_Double;
586       return FuncCast<double(double)>(fdlibm::trunc, *abiType);
587     case SymbolicAddress::TruncF:
588       *abiType = Args_Float32_Float32;
589       return FuncCast<float(float)>(fdlibm::truncf, *abiType);
590     case SymbolicAddress::NearbyIntD:
591       *abiType = Args_Double_Double;
592       return FuncCast<double(double)>(fdlibm::nearbyint, *abiType);
593     case SymbolicAddress::NearbyIntF:
594       *abiType = Args_Float32_Float32;
595       return FuncCast<float(float)>(fdlibm::nearbyintf, *abiType);
596     case SymbolicAddress::ExpD:
597       *abiType = Args_Double_Double;
598       return FuncCast<double(double)>(fdlibm::exp, *abiType);
599     case SymbolicAddress::LogD:
600       *abiType = Args_Double_Double;
601       return FuncCast<double(double)>(fdlibm::log, *abiType);
602     case SymbolicAddress::PowD:
603       *abiType = Args_Double_DoubleDouble;
604       return FuncCast(ecmaPow, *abiType);
605     case SymbolicAddress::ATan2D:
606       *abiType = Args_Double_DoubleDouble;
607       return FuncCast(ecmaAtan2, *abiType);
608     case SymbolicAddress::GrowMemory:
609       *abiType = Args_General2;
610       return FuncCast(Instance::growMemory_i32, *abiType);
611     case SymbolicAddress::CurrentMemory:
612       *abiType = Args_General1;
613       return FuncCast(Instance::currentMemory_i32, *abiType);
614     case SymbolicAddress::WaitI32:
615       *abiType = Args_Int_GeneralGeneralGeneralInt64;
616       return FuncCast(Instance::wait_i32, *abiType);
617     case SymbolicAddress::WaitI64:
618       *abiType = Args_Int_GeneralGeneralInt64Int64;
619       return FuncCast(Instance::wait_i64, *abiType);
620     case SymbolicAddress::Wake:
621       *abiType = Args_General3;
622       return FuncCast(Instance::wake, *abiType);
623 #if defined(JS_CODEGEN_MIPS32)
624     case SymbolicAddress::js_jit_gAtomic64Lock:
625       return &js::jit::gAtomic64Lock;
626 #endif
627     case SymbolicAddress::Limit:
628       break;
629   }
630 
631   MOZ_CRASH("Bad SymbolicAddress");
632 }
633 
NeedsBuiltinThunk(SymbolicAddress sym)634 bool wasm::NeedsBuiltinThunk(SymbolicAddress sym) {
635   // Some functions don't want to a thunk, because they already have one or
636   // they don't have frame info.
637   switch (sym) {
638     case SymbolicAddress::HandleExecutionInterrupt:  // GenerateInterruptExit
639     case SymbolicAddress::HandleDebugTrap:           // GenerateDebugTrapStub
640     case SymbolicAddress::HandleThrow:               // GenerateThrowStub
641     case SymbolicAddress::ReportTrap:                // GenerateTrapExit
642     case SymbolicAddress::OldReportTrap:             // GenerateOldTrapExit
643     case SymbolicAddress::ReportOutOfBounds:         // GenerateOutOfBoundsExit
644     case SymbolicAddress::ReportUnalignedAccess:     // GenerateUnalignedExit
645     case SymbolicAddress::CallImport_Void:           // GenerateImportInterpExit
646     case SymbolicAddress::CallImport_I32:
647     case SymbolicAddress::CallImport_I64:
648     case SymbolicAddress::CallImport_F64:
649     case SymbolicAddress::CoerceInPlace_ToInt32:  // GenerateImportJitExit
650     case SymbolicAddress::CoerceInPlace_ToNumber:
651 #if defined(JS_CODEGEN_MIPS32)
652     case SymbolicAddress::js_jit_gAtomic64Lock:
653 #endif
654       return false;
655     case SymbolicAddress::ToInt32:
656     case SymbolicAddress::DivI64:
657     case SymbolicAddress::UDivI64:
658     case SymbolicAddress::ModI64:
659     case SymbolicAddress::UModI64:
660     case SymbolicAddress::TruncateDoubleToUint64:
661     case SymbolicAddress::TruncateDoubleToInt64:
662     case SymbolicAddress::SaturatingTruncateDoubleToUint64:
663     case SymbolicAddress::SaturatingTruncateDoubleToInt64:
664     case SymbolicAddress::Uint64ToDouble:
665     case SymbolicAddress::Uint64ToFloat32:
666     case SymbolicAddress::Int64ToDouble:
667     case SymbolicAddress::Int64ToFloat32:
668 #if defined(JS_CODEGEN_ARM)
669     case SymbolicAddress::aeabi_idivmod:
670     case SymbolicAddress::aeabi_uidivmod:
671 #endif
672     case SymbolicAddress::ModD:
673     case SymbolicAddress::SinD:
674     case SymbolicAddress::CosD:
675     case SymbolicAddress::TanD:
676     case SymbolicAddress::ASinD:
677     case SymbolicAddress::ACosD:
678     case SymbolicAddress::ATanD:
679     case SymbolicAddress::CeilD:
680     case SymbolicAddress::CeilF:
681     case SymbolicAddress::FloorD:
682     case SymbolicAddress::FloorF:
683     case SymbolicAddress::TruncD:
684     case SymbolicAddress::TruncF:
685     case SymbolicAddress::NearbyIntD:
686     case SymbolicAddress::NearbyIntF:
687     case SymbolicAddress::ExpD:
688     case SymbolicAddress::LogD:
689     case SymbolicAddress::PowD:
690     case SymbolicAddress::ATan2D:
691     case SymbolicAddress::GrowMemory:
692     case SymbolicAddress::CurrentMemory:
693     case SymbolicAddress::WaitI32:
694     case SymbolicAddress::WaitI64:
695     case SymbolicAddress::Wake:
696     case SymbolicAddress::CoerceInPlace_JitEntry:
697     case SymbolicAddress::ReportInt64JSCall:
698       return true;
699     case SymbolicAddress::Limit:
700       break;
701   }
702 
703   MOZ_CRASH("unexpected symbolic address");
704 }
705 
706   // ============================================================================
707   // JS builtins that can be imported by wasm modules and called efficiently
708   // through thunks. These thunks conform to the internal wasm ABI and thus can
709   // be patched in for import calls. Calling a JS builtin through a thunk is
710   // much faster than calling out through the generic import call trampoline
711   // which will end up in the slowest C++ Instance::callImport path.
712   //
713   // Each JS builtin can have several overloads. These must all be enumerated in
714   // PopulateTypedNatives() so they can be included in the process-wide thunk
715   // set.
716 
717 #define FOR_EACH_UNARY_NATIVE(_) \
718   _(math_sin, MathSin)           \
719   _(math_tan, MathTan)           \
720   _(math_cos, MathCos)           \
721   _(math_exp, MathExp)           \
722   _(math_log, MathLog)           \
723   _(math_asin, MathASin)         \
724   _(math_atan, MathATan)         \
725   _(math_acos, MathACos)         \
726   _(math_log10, MathLog10)       \
727   _(math_log2, MathLog2)         \
728   _(math_log1p, MathLog1P)       \
729   _(math_expm1, MathExpM1)       \
730   _(math_sinh, MathSinH)         \
731   _(math_tanh, MathTanH)         \
732   _(math_cosh, MathCosH)         \
733   _(math_asinh, MathASinH)       \
734   _(math_atanh, MathATanH)       \
735   _(math_acosh, MathACosH)       \
736   _(math_sign, MathSign)         \
737   _(math_trunc, MathTrunc)       \
738   _(math_cbrt, MathCbrt)
739 
740 #define FOR_EACH_BINARY_NATIVE(_) \
741   _(ecmaAtan2, MathATan2)         \
742   _(ecmaHypot, MathHypot)         \
743   _(ecmaPow, MathPow)
744 
745 #define DEFINE_UNARY_FLOAT_WRAPPER(func, _)   \
746   static float func##_uncached_f32(float x) { \
747     return float(func##_uncached(double(x))); \
748   }
749 
750 #define DEFINE_BINARY_FLOAT_WRAPPER(func, _)  \
751   static float func##_f32(float x, float y) { \
752     return float(func(double(x), double(y))); \
753   }
754 
755 FOR_EACH_UNARY_NATIVE(DEFINE_UNARY_FLOAT_WRAPPER)
756 FOR_EACH_BINARY_NATIVE(DEFINE_BINARY_FLOAT_WRAPPER)
757 
758 #undef DEFINE_UNARY_FLOAT_WRAPPER
759 #undef DEFINE_BINARY_FLOAT_WRAPPER
760 
761 struct TypedNative {
762   InlinableNative native;
763   ABIFunctionType abiType;
764 
TypedNativeTypedNative765   TypedNative(InlinableNative native, ABIFunctionType abiType)
766       : native(native), abiType(abiType) {}
767 
768   typedef TypedNative Lookup;
hashTypedNative769   static HashNumber hash(const Lookup& l) {
770     return HashGeneric(uint32_t(l.native), uint32_t(l.abiType));
771   }
matchTypedNative772   static bool match(const TypedNative& lhs, const Lookup& rhs) {
773     return lhs.native == rhs.native && lhs.abiType == rhs.abiType;
774   }
775 };
776 
777 using TypedNativeToFuncPtrMap =
778     HashMap<TypedNative, void*, TypedNative, SystemAllocPolicy>;
779 
PopulateTypedNatives(TypedNativeToFuncPtrMap * typedNatives)780 static bool PopulateTypedNatives(TypedNativeToFuncPtrMap* typedNatives) {
781   if (!typedNatives->init()) return false;
782 
783 #define ADD_OVERLOAD(funcName, native, abiType)                            \
784   if (!typedNatives->putNew(TypedNative(InlinableNative::native, abiType), \
785                             FuncCast(funcName, abiType)))                  \
786     return false;
787 
788 #define ADD_UNARY_OVERLOADS(funcName, native)                   \
789   ADD_OVERLOAD(funcName##_uncached, native, Args_Double_Double) \
790   ADD_OVERLOAD(funcName##_uncached_f32, native, Args_Float32_Float32)
791 
792 #define ADD_BINARY_OVERLOADS(funcName, native)             \
793   ADD_OVERLOAD(funcName, native, Args_Double_DoubleDouble) \
794   ADD_OVERLOAD(funcName##_f32, native, Args_Float32_Float32Float32)
795 
796   FOR_EACH_UNARY_NATIVE(ADD_UNARY_OVERLOADS)
797   FOR_EACH_BINARY_NATIVE(ADD_BINARY_OVERLOADS)
798 
799 #undef ADD_UNARY_OVERLOADS
800 #undef ADD_BINARY_OVERLOADS
801 
802   return true;
803 }
804 
805 #undef FOR_EACH_UNARY_NATIVE
806 #undef FOR_EACH_BINARY_NATIVE
807 
808 // ============================================================================
809 // Process-wide builtin thunk set
810 //
811 // Thunks are inserted between wasm calls and the C++ callee and achieve two
812 // things:
813 //  - bridging the few differences between the internal wasm ABI and the
814 //    external native ABI (viz. float returns on x86 and soft-fp ARM)
815 //  - executing an exit prologue/epilogue which in turn allows any profiling
816 //    iterator to see the full stack up to the wasm operation that called out
817 //
818 // Thunks are created for two kinds of C++ callees, enumerated above:
819 //  - SymbolicAddress: for statically compiled calls in the wasm module
820 //  - Imported JS builtins: optimized calls to imports
821 //
822 // All thunks are created up front, lazily, when the first wasm module is
823 // compiled in the process. Thunks are kept alive until the JS engine shuts down
824 // in the process. No thunks are created at runtime after initialization. This
825 // simple scheme allows several simplifications:
826 //  - no reference counting to keep thunks alive
827 //  - no problems toggling W^X permissions which, because of multiple executing
828 //    threads, would require each thunk allocation to be on its own page
829 // The cost for creating all thunks at once is relatively low since all thunks
830 // fit within the smallest executable quanta (64k).
831 
832 using TypedNativeToCodeRangeMap =
833     HashMap<TypedNative, uint32_t, TypedNative, SystemAllocPolicy>;
834 
835 using SymbolicAddressToCodeRangeArray =
836     EnumeratedArray<SymbolicAddress, SymbolicAddress::Limit, uint32_t>;
837 
838 struct BuiltinThunks {
839   uint8_t* codeBase;
840   size_t codeSize;
841   CodeRangeVector codeRanges;
842   TypedNativeToCodeRangeMap typedNativeToCodeRange;
843   SymbolicAddressToCodeRangeArray symbolicAddressToCodeRange;
844 
BuiltinThunksBuiltinThunks845   BuiltinThunks() : codeBase(nullptr), codeSize(0) {}
846 
~BuiltinThunksBuiltinThunks847   ~BuiltinThunks() {
848     if (codeBase) DeallocateExecutableMemory(codeBase, codeSize);
849   }
850 };
851 
852 Mutex initBuiltinThunks(mutexid::WasmInitBuiltinThunks);
853 Atomic<const BuiltinThunks*> builtinThunks;
854 
EnsureBuiltinThunksInitialized()855 bool wasm::EnsureBuiltinThunksInitialized() {
856   LockGuard<Mutex> guard(initBuiltinThunks);
857   if (builtinThunks) return true;
858 
859   auto thunks = MakeUnique<BuiltinThunks>();
860   if (!thunks) return false;
861 
862   LifoAlloc lifo(BUILTIN_THUNK_LIFO_SIZE);
863   TempAllocator tempAlloc(&lifo);
864   MacroAssembler masm(MacroAssembler::WasmToken(), tempAlloc);
865 
866   for (auto sym : MakeEnumeratedRange(SymbolicAddress::Limit)) {
867     if (!NeedsBuiltinThunk(sym)) {
868       thunks->symbolicAddressToCodeRange[sym] = UINT32_MAX;
869       continue;
870     }
871 
872     uint32_t codeRangeIndex = thunks->codeRanges.length();
873     thunks->symbolicAddressToCodeRange[sym] = codeRangeIndex;
874 
875     ABIFunctionType abiType;
876     void* funcPtr = AddressOf(sym, &abiType);
877 
878     ExitReason exitReason(sym);
879 
880     CallableOffsets offsets;
881     if (!GenerateBuiltinThunk(masm, abiType, exitReason, funcPtr, &offsets))
882       return false;
883     if (!thunks->codeRanges.emplaceBack(CodeRange::BuiltinThunk, offsets))
884       return false;
885   }
886 
887   TypedNativeToFuncPtrMap typedNatives;
888   if (!PopulateTypedNatives(&typedNatives)) return false;
889 
890   if (!thunks->typedNativeToCodeRange.init()) return false;
891 
892   for (TypedNativeToFuncPtrMap::Range r = typedNatives.all(); !r.empty();
893        r.popFront()) {
894     TypedNative typedNative = r.front().key();
895 
896     uint32_t codeRangeIndex = thunks->codeRanges.length();
897     if (!thunks->typedNativeToCodeRange.putNew(typedNative, codeRangeIndex))
898       return false;
899 
900     ABIFunctionType abiType = typedNative.abiType;
901     void* funcPtr = r.front().value();
902 
903     ExitReason exitReason = ExitReason::Fixed::BuiltinNative;
904 
905     CallableOffsets offsets;
906     if (!GenerateBuiltinThunk(masm, abiType, exitReason, funcPtr, &offsets))
907       return false;
908     if (!thunks->codeRanges.emplaceBack(CodeRange::BuiltinThunk, offsets))
909       return false;
910   }
911 
912   masm.finish();
913   if (masm.oom()) return false;
914 
915   size_t allocSize = AlignBytes(masm.bytesNeeded(), ExecutableCodePageSize);
916 
917   thunks->codeSize = allocSize;
918   thunks->codeBase = (uint8_t*)AllocateExecutableMemory(
919       allocSize, ProtectionSetting::Writable);
920   if (!thunks->codeBase) return false;
921 
922   masm.executableCopy(thunks->codeBase, /* flushICache = */ false);
923   memset(thunks->codeBase + masm.bytesNeeded(), 0,
924          allocSize - masm.bytesNeeded());
925 
926   masm.processCodeLabels(thunks->codeBase);
927 
928   MOZ_ASSERT(masm.callSites().empty());
929   MOZ_ASSERT(masm.callSiteTargets().empty());
930   MOZ_ASSERT(masm.callFarJumps().empty());
931   MOZ_ASSERT(masm.trapSites().empty());
932   MOZ_ASSERT(masm.oldTrapSites().empty());
933   MOZ_ASSERT(masm.oldTrapFarJumps().empty());
934   MOZ_ASSERT(masm.callFarJumps().empty());
935   MOZ_ASSERT(masm.memoryAccesses().empty());
936   MOZ_ASSERT(masm.symbolicAccesses().empty());
937 
938   ExecutableAllocator::cacheFlush(thunks->codeBase, thunks->codeSize);
939   if (!ExecutableAllocator::makeExecutable(thunks->codeBase, thunks->codeSize))
940     return false;
941 
942   builtinThunks = thunks.release();
943   return true;
944 }
945 
ReleaseBuiltinThunks()946 void wasm::ReleaseBuiltinThunks() {
947   if (builtinThunks) {
948     const BuiltinThunks* ptr = builtinThunks;
949     js_delete(const_cast<BuiltinThunks*>(ptr));
950     builtinThunks = nullptr;
951   }
952 }
953 
SymbolicAddressTarget(SymbolicAddress sym)954 void* wasm::SymbolicAddressTarget(SymbolicAddress sym) {
955   MOZ_ASSERT(builtinThunks);
956 
957   ABIFunctionType abiType;
958   void* funcPtr = AddressOf(sym, &abiType);
959 
960   if (!NeedsBuiltinThunk(sym)) return funcPtr;
961 
962   const BuiltinThunks& thunks = *builtinThunks;
963   uint32_t codeRangeIndex = thunks.symbolicAddressToCodeRange[sym];
964   return thunks.codeBase + thunks.codeRanges[codeRangeIndex].begin();
965 }
966 
ToBuiltinABIFunctionType(const Sig & sig)967 static Maybe<ABIFunctionType> ToBuiltinABIFunctionType(const Sig& sig) {
968   const ValTypeVector& args = sig.args();
969   ExprType ret = sig.ret();
970 
971   uint32_t abiType;
972   switch (ret) {
973     case ExprType::F32:
974       abiType = ArgType_Float32 << RetType_Shift;
975       break;
976     case ExprType::F64:
977       abiType = ArgType_Double << RetType_Shift;
978       break;
979     default:
980       return Nothing();
981   }
982 
983   if ((args.length() + 1) > (sizeof(uint32_t) * 8 / ArgType_Shift))
984     return Nothing();
985 
986   for (size_t i = 0; i < args.length(); i++) {
987     switch (args[i]) {
988       case ValType::F32:
989         abiType |= (ArgType_Float32 << (ArgType_Shift * (i + 1)));
990         break;
991       case ValType::F64:
992         abiType |= (ArgType_Double << (ArgType_Shift * (i + 1)));
993         break;
994       default:
995         return Nothing();
996     }
997   }
998 
999   return Some(ABIFunctionType(abiType));
1000 }
1001 
MaybeGetBuiltinThunk(HandleFunction f,const Sig & sig)1002 void* wasm::MaybeGetBuiltinThunk(HandleFunction f, const Sig& sig) {
1003   MOZ_ASSERT(builtinThunks);
1004 
1005   if (!f->isNative() || !f->hasJitInfo() ||
1006       f->jitInfo()->type() != JSJitInfo::InlinableNative)
1007     return nullptr;
1008 
1009   Maybe<ABIFunctionType> abiType = ToBuiltinABIFunctionType(sig);
1010   if (!abiType) return nullptr;
1011 
1012   TypedNative typedNative(f->jitInfo()->inlinableNative, *abiType);
1013 
1014   const BuiltinThunks& thunks = *builtinThunks;
1015   auto p = thunks.typedNativeToCodeRange.readonlyThreadsafeLookup(typedNative);
1016   if (!p) return nullptr;
1017 
1018   return thunks.codeBase + thunks.codeRanges[p->value()].begin();
1019 }
1020 
LookupBuiltinThunk(void * pc,const CodeRange ** codeRange,uint8_t ** codeBase)1021 bool wasm::LookupBuiltinThunk(void* pc, const CodeRange** codeRange,
1022                               uint8_t** codeBase) {
1023   if (!builtinThunks) return false;
1024 
1025   const BuiltinThunks& thunks = *builtinThunks;
1026   if (pc < thunks.codeBase || pc >= thunks.codeBase + thunks.codeSize)
1027     return false;
1028 
1029   *codeBase = thunks.codeBase;
1030 
1031   CodeRange::OffsetInCode target((uint8_t*)pc - thunks.codeBase);
1032   *codeRange = LookupInSorted(thunks.codeRanges, target);
1033 
1034   return !!*codeRange;
1035 }
1036