1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  *
4  * Copyright 2016 Mozilla Foundation
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 #include "wasm/WasmInstance.h"
20 
21 #include "mozilla/CheckedInt.h"
22 #include "mozilla/DebugOnly.h"
23 
24 #include <algorithm>
25 #include <utility>
26 
27 #include "jsmath.h"
28 
29 #include "jit/AtomicOperations.h"
30 #include "jit/Disassemble.h"
31 #include "jit/InlinableNatives.h"
32 #include "jit/JitCommon.h"
33 #include "jit/JitRuntime.h"
34 #include "jit/JitScript.h"
35 #include "js/ForOfIterator.h"
36 #include "js/friend/ErrorMessages.h"  // js::GetErrorMessage, JSMSG_*
37 #include "util/StringBuffer.h"
38 #include "util/Text.h"
39 #include "vm/BigIntType.h"
40 #include "vm/PlainObject.h"  // js::PlainObject
41 #include "wasm/TypedObject.h"
42 #include "wasm/WasmBuiltins.h"
43 #include "wasm/WasmJS.h"
44 #include "wasm/WasmModule.h"
45 #include "wasm/WasmStubs.h"
46 #include "wasm/WasmTypes.h"
47 
48 #include "gc/StoreBuffer-inl.h"
49 #include "vm/ArrayBufferObject-inl.h"
50 #include "vm/JSObject-inl.h"
51 
52 using namespace js;
53 using namespace js::jit;
54 using namespace js::wasm;
55 
56 using mozilla::BitwiseCast;
57 using mozilla::CheckedInt;
58 using mozilla::DebugOnly;
59 
60 using CheckedU32 = CheckedInt<uint32_t>;
61 
62 class FuncTypeIdSet {
63   using Map =
64       HashMap<const FuncType*, uint32_t, FuncTypeHashPolicy, SystemAllocPolicy>;
65   Map map_;
66 
67  public:
~FuncTypeIdSet()68   ~FuncTypeIdSet() {
69     MOZ_ASSERT_IF(!JSRuntime::hasLiveRuntimes(), map_.empty());
70   }
71 
allocateFuncTypeId(JSContext * cx,const FuncType & funcType,const void ** funcTypeId)72   bool allocateFuncTypeId(JSContext* cx, const FuncType& funcType,
73                           const void** funcTypeId) {
74     Map::AddPtr p = map_.lookupForAdd(funcType);
75     if (p) {
76       MOZ_ASSERT(p->value() > 0);
77       p->value()++;
78       *funcTypeId = p->key();
79       return true;
80     }
81 
82     UniquePtr<FuncType> clone = MakeUnique<FuncType>();
83     if (!clone || !clone->clone(funcType) || !map_.add(p, clone.get(), 1)) {
84       ReportOutOfMemory(cx);
85       return false;
86     }
87 
88     *funcTypeId = clone.release();
89     MOZ_ASSERT(!(uintptr_t(*funcTypeId) & TypeIdDesc::ImmediateBit));
90     return true;
91   }
92 
deallocateFuncTypeId(const FuncType & funcType,const void * funcTypeId)93   void deallocateFuncTypeId(const FuncType& funcType, const void* funcTypeId) {
94     Map::Ptr p = map_.lookup(funcType);
95     MOZ_RELEASE_ASSERT(p && p->key() == funcTypeId && p->value() > 0);
96 
97     p->value()--;
98     if (!p->value()) {
99       js_delete(p->key());
100       map_.remove(p);
101     }
102   }
103 };
104 
105 ExclusiveData<FuncTypeIdSet> funcTypeIdSet(mutexid::WasmFuncTypeIdSet);
106 
addressOfTypeId(const TypeIdDesc & typeId) const107 const void** Instance::addressOfTypeId(const TypeIdDesc& typeId) const {
108   return (const void**)(globalData() + typeId.globalDataOffset());
109 }
110 
funcImportTls(const FuncImport & fi)111 FuncImportTls& Instance::funcImportTls(const FuncImport& fi) {
112   return *(FuncImportTls*)(globalData() + fi.tlsDataOffset());
113 }
114 
tableTls(const TableDesc & td) const115 TableTls& Instance::tableTls(const TableDesc& td) const {
116   return *(TableTls*)(globalData() + td.globalDataOffset);
117 }
118 
119 // TODO(1626251): Consolidate definitions into Iterable.h
IterableToArray(JSContext * cx,HandleValue iterable,MutableHandle<ArrayObject * > array)120 static bool IterableToArray(JSContext* cx, HandleValue iterable,
121                             MutableHandle<ArrayObject*> array) {
122   JS::ForOfIterator iterator(cx);
123   if (!iterator.init(iterable, JS::ForOfIterator::ThrowOnNonIterable)) {
124     return false;
125   }
126 
127   array.set(NewDenseEmptyArray(cx));
128   if (!array) {
129     return false;
130   }
131 
132   RootedValue nextValue(cx);
133   while (true) {
134     bool done;
135     if (!iterator.next(&nextValue, &done)) {
136       return false;
137     }
138     if (done) {
139       break;
140     }
141 
142     if (!NewbornArrayPush(cx, array, nextValue)) {
143       return false;
144     }
145   }
146   return true;
147 }
148 
UnpackResults(JSContext * cx,const ValTypeVector & resultTypes,const Maybe<char * > stackResultsArea,uint64_t * argv,MutableHandleValue rval)149 static bool UnpackResults(JSContext* cx, const ValTypeVector& resultTypes,
150                           const Maybe<char*> stackResultsArea, uint64_t* argv,
151                           MutableHandleValue rval) {
152   if (!stackResultsArea) {
153     MOZ_ASSERT(resultTypes.length() <= 1);
154     // Result is either one scalar value to unpack to a wasm value, or
155     // an ignored value for a zero-valued function.
156     if (resultTypes.length() == 1) {
157       return ToWebAssemblyValue(cx, rval, resultTypes[0], argv, true);
158     }
159     return true;
160   }
161 
162   MOZ_ASSERT(stackResultsArea.isSome());
163   RootedArrayObject array(cx);
164   if (!IterableToArray(cx, rval, &array)) {
165     return false;
166   }
167 
168   if (resultTypes.length() != array->length()) {
169     UniqueChars expected(JS_smprintf("%zu", resultTypes.length()));
170     UniqueChars got(JS_smprintf("%u", array->length()));
171 
172     JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
173                              JSMSG_WASM_WRONG_NUMBER_OF_VALUES, expected.get(),
174                              got.get());
175     return false;
176   }
177 
178   DebugOnly<uint64_t> previousOffset = ~(uint64_t)0;
179 
180   ABIResultIter iter(ResultType::Vector(resultTypes));
181   // The values are converted in the order they are pushed on the
182   // abstract WebAssembly stack; switch to iterate in push order.
183   while (!iter.done()) {
184     iter.next();
185   }
186   DebugOnly<bool> seenRegisterResult = false;
187   for (iter.switchToPrev(); !iter.done(); iter.prev()) {
188     const ABIResult& result = iter.cur();
189     MOZ_ASSERT(!seenRegisterResult);
190     // Use rval as a scratch area to hold the extracted result.
191     rval.set(array->getDenseElement(iter.index()));
192     if (result.inRegister()) {
193       // Currently, if a function type has results, there can be only
194       // one register result.  If there is only one result, it is
195       // returned as a scalar and not an iterable, so we don't get here.
196       // If there are multiple results, we extract the register result
197       // and set `argv[0]` set to the extracted result, to be returned by
198       // register in the stub.  The register result follows any stack
199       // results, so this preserves conversion order.
200       if (!ToWebAssemblyValue(cx, rval, result.type(), argv, true)) {
201         return false;
202       }
203       seenRegisterResult = true;
204       continue;
205     }
206     uint32_t result_size = result.size();
207     MOZ_ASSERT(result_size == 4 || result_size == 8);
208 #ifdef DEBUG
209     if (previousOffset == ~(uint64_t)0) {
210       previousOffset = (uint64_t)result.stackOffset();
211     } else {
212       MOZ_ASSERT(previousOffset - (uint64_t)result_size ==
213                  (uint64_t)result.stackOffset());
214       previousOffset -= (uint64_t)result_size;
215     }
216 #endif
217     char* loc = stackResultsArea.value() + result.stackOffset();
218     if (!ToWebAssemblyValue(cx, rval, result.type(), loc, result_size == 8)) {
219       return false;
220     }
221   }
222 
223   return true;
224 }
225 
callImport(JSContext * cx,uint32_t funcImportIndex,unsigned argc,uint64_t * argv)226 bool Instance::callImport(JSContext* cx, uint32_t funcImportIndex,
227                           unsigned argc, uint64_t* argv) {
228   AssertRealmUnchanged aru(cx);
229 
230   Tier tier = code().bestTier();
231 
232   const FuncImport& fi = metadata(tier).funcImports[funcImportIndex];
233 
234   ArgTypeVector argTypes(fi.funcType());
235   InvokeArgs args(cx);
236   if (!args.init(cx, argTypes.lengthWithoutStackResults())) {
237     return false;
238   }
239 
240   if (fi.funcType().hasUnexposableArgOrRet()) {
241     JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
242                              JSMSG_WASM_BAD_VAL_TYPE);
243     return false;
244   }
245 
246   MOZ_ASSERT(argTypes.lengthWithStackResults() == argc);
247   Maybe<char*> stackResultPointer;
248   for (size_t i = 0; i < argc; i++) {
249     const void* rawArgLoc = &argv[i];
250     if (argTypes.isSyntheticStackResultPointerArg(i)) {
251       stackResultPointer = Some(*(char**)rawArgLoc);
252       continue;
253     }
254     size_t naturalIndex = argTypes.naturalIndex(i);
255     ValType type = fi.funcType().args()[naturalIndex];
256     MutableHandleValue argValue = args[naturalIndex];
257     if (!ToJSValue(cx, rawArgLoc, type, argValue)) {
258       return false;
259     }
260   }
261 
262   FuncImportTls& import = funcImportTls(fi);
263   RootedFunction importFun(cx, import.fun);
264   MOZ_ASSERT(cx->realm() == importFun->realm());
265 
266   RootedValue fval(cx, ObjectValue(*importFun));
267   RootedValue thisv(cx, UndefinedValue());
268   RootedValue rval(cx);
269   if (!Call(cx, fval, thisv, args, &rval)) {
270     return false;
271   }
272 
273   if (!UnpackResults(cx, fi.funcType().results(), stackResultPointer, argv,
274                      &rval)) {
275     return false;
276   }
277 
278   if (!JitOptions.enableWasmJitExit) {
279     return true;
280   }
281 
282   // The import may already have become optimized.
283   for (auto t : code().tiers()) {
284     void* jitExitCode = codeBase(t) + fi.jitExitCodeOffset();
285     if (import.code == jitExitCode) {
286       return true;
287     }
288   }
289 
290   void* jitExitCode = codeBase(tier) + fi.jitExitCodeOffset();
291 
292   // Test if the function is JIT compiled.
293   if (!importFun->hasBytecode()) {
294     return true;
295   }
296 
297   JSScript* script = importFun->nonLazyScript();
298   if (!script->hasJitScript()) {
299     return true;
300   }
301 
302   // Skip if the function does not have a signature that allows for a JIT exit.
303   if (!fi.canHaveJitExit()) {
304     return true;
305   }
306 
307   // Let's optimize it!
308 
309   import.code = jitExitCode;
310   return true;
311 }
312 
313 /* static */ int32_t /* 0 to signal trap; 1 to signal OK */
callImport_general(Instance * instance,int32_t funcImportIndex,int32_t argc,uint64_t * argv)314 Instance::callImport_general(Instance* instance, int32_t funcImportIndex,
315                              int32_t argc, uint64_t* argv) {
316   JSContext* cx = TlsContext.get();
317   return instance->callImport(cx, funcImportIndex, argc, argv);
318 }
319 
memoryGrow_i32(Instance * instance,uint32_t delta)320 /* static */ uint32_t Instance::memoryGrow_i32(Instance* instance,
321                                                uint32_t delta) {
322   MOZ_ASSERT(SASigMemoryGrow.failureMode == FailureMode::Infallible);
323   MOZ_ASSERT(!instance->isAsmJS());
324 
325   JSContext* cx = TlsContext.get();
326   RootedWasmMemoryObject memory(cx, instance->memory_);
327 
328   uint32_t ret = WasmMemoryObject::grow(memory, delta, cx);
329 
330   // If there has been a moving grow, this Instance should have been notified.
331   MOZ_RELEASE_ASSERT(instance->tlsData()->memoryBase ==
332                      instance->memory_->buffer().dataPointerEither());
333 
334   return ret;
335 }
336 
memorySize_i32(Instance * instance)337 /* static */ uint32_t Instance::memorySize_i32(Instance* instance) {
338   MOZ_ASSERT(SASigMemorySize.failureMode == FailureMode::Infallible);
339 
340   // This invariant must hold when running Wasm code. Assert it here so we can
341   // write tests for cross-realm calls.
342   MOZ_ASSERT(TlsContext.get()->realm() == instance->realm());
343 
344   Pages pages = instance->memory()->volatilePages();
345 #ifdef JS_64BIT
346   // Ensure that the memory size is no more than 4GiB.
347   MOZ_ASSERT(pages <= Pages(MaxMemory32LimitField));
348 #endif
349   return uint32_t(pages.value());
350 }
351 
352 template <typename T>
PerformWait(Instance * instance,uint32_t byteOffset,T value,int64_t timeout_ns)353 static int32_t PerformWait(Instance* instance, uint32_t byteOffset, T value,
354                            int64_t timeout_ns) {
355   JSContext* cx = TlsContext.get();
356 
357   if (!instance->memory()->isShared()) {
358     JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
359                               JSMSG_WASM_NONSHARED_WAIT);
360     return -1;
361   }
362 
363   if (byteOffset & (sizeof(T) - 1)) {
364     JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
365                               JSMSG_WASM_UNALIGNED_ACCESS);
366     return -1;
367   }
368 
369   if (byteOffset + sizeof(T) > instance->memory()->volatileMemoryLength()) {
370     JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
371                               JSMSG_WASM_OUT_OF_BOUNDS);
372     return -1;
373   }
374 
375   mozilla::Maybe<mozilla::TimeDuration> timeout;
376   if (timeout_ns >= 0) {
377     timeout = mozilla::Some(
378         mozilla::TimeDuration::FromMicroseconds(timeout_ns / 1000));
379   }
380 
381   switch (atomics_wait_impl(cx, instance->sharedMemoryBuffer(), byteOffset,
382                             value, timeout)) {
383     case FutexThread::WaitResult::OK:
384       return 0;
385     case FutexThread::WaitResult::NotEqual:
386       return 1;
387     case FutexThread::WaitResult::TimedOut:
388       return 2;
389     case FutexThread::WaitResult::Error:
390       return -1;
391     default:
392       MOZ_CRASH();
393   }
394 }
395 
wait_i32(Instance * instance,uint32_t byteOffset,int32_t value,int64_t timeout_ns)396 /* static */ int32_t Instance::wait_i32(Instance* instance, uint32_t byteOffset,
397                                         int32_t value, int64_t timeout_ns) {
398   MOZ_ASSERT(SASigWaitI32.failureMode == FailureMode::FailOnNegI32);
399   return PerformWait<int32_t>(instance, byteOffset, value, timeout_ns);
400 }
401 
wait_i64(Instance * instance,uint32_t byteOffset,int64_t value,int64_t timeout_ns)402 /* static */ int32_t Instance::wait_i64(Instance* instance, uint32_t byteOffset,
403                                         int64_t value, int64_t timeout_ns) {
404   MOZ_ASSERT(SASigWaitI64.failureMode == FailureMode::FailOnNegI32);
405   return PerformWait<int64_t>(instance, byteOffset, value, timeout_ns);
406 }
407 
wake(Instance * instance,uint32_t byteOffset,int32_t count)408 /* static */ int32_t Instance::wake(Instance* instance, uint32_t byteOffset,
409                                     int32_t count) {
410   MOZ_ASSERT(SASigWake.failureMode == FailureMode::FailOnNegI32);
411 
412   JSContext* cx = TlsContext.get();
413 
414   // The alignment guard is not in the wasm spec as of 2017-11-02, but is
415   // considered likely to appear, as 4-byte alignment is required for WAKE by
416   // the spec's validation algorithm.
417 
418   if (byteOffset & 3) {
419     JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
420                               JSMSG_WASM_UNALIGNED_ACCESS);
421     return -1;
422   }
423 
424   if (byteOffset >= instance->memory()->volatileMemoryLength()) {
425     JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
426                               JSMSG_WASM_OUT_OF_BOUNDS);
427     return -1;
428   }
429 
430   if (!instance->memory()->isShared()) {
431     return 0;
432   }
433 
434   int64_t woken = atomics_notify_impl(instance->sharedMemoryBuffer(),
435                                       byteOffset, int64_t(count));
436 
437   if (woken > INT32_MAX) {
438     JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
439                               JSMSG_WASM_WAKE_OVERFLOW);
440     return -1;
441   }
442 
443   return int32_t(woken);
444 }
445 
446 template <typename T, typename F>
WasmMemoryCopy32(T memBase,size_t memLen,uint32_t dstByteOffset,uint32_t srcByteOffset,uint32_t len,F memMove)447 inline int32_t WasmMemoryCopy32(T memBase, size_t memLen,
448                                 uint32_t dstByteOffset, uint32_t srcByteOffset,
449                                 uint32_t len, F memMove) {
450   // Bounds check and deal with arithmetic overflow.
451   uint64_t dstOffsetLimit = uint64_t(dstByteOffset) + uint64_t(len);
452   uint64_t srcOffsetLimit = uint64_t(srcByteOffset) + uint64_t(len);
453 
454   if (dstOffsetLimit > memLen || srcOffsetLimit > memLen) {
455     JSContext* cx = TlsContext.get();
456     JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
457                               JSMSG_WASM_OUT_OF_BOUNDS);
458     return -1;
459   }
460 
461   memMove(memBase + dstByteOffset, memBase + srcByteOffset, size_t(len));
462   return 0;
463 }
464 
memCopy32(Instance * instance,uint32_t dstByteOffset,uint32_t srcByteOffset,uint32_t len,uint8_t * memBase)465 /* static */ int32_t Instance::memCopy32(Instance* instance,
466                                          uint32_t dstByteOffset,
467                                          uint32_t srcByteOffset, uint32_t len,
468                                          uint8_t* memBase) {
469   MOZ_ASSERT(SASigMemCopy32.failureMode == FailureMode::FailOnNegI32);
470 
471   const WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
472   size_t memLen = rawBuf->byteLength();
473 
474   return WasmMemoryCopy32(memBase, memLen, dstByteOffset, srcByteOffset, len,
475                           memmove);
476 }
477 
memCopyShared32(Instance * instance,uint32_t dstByteOffset,uint32_t srcByteOffset,uint32_t len,uint8_t * memBase)478 /* static */ int32_t Instance::memCopyShared32(Instance* instance,
479                                                uint32_t dstByteOffset,
480                                                uint32_t srcByteOffset,
481                                                uint32_t len, uint8_t* memBase) {
482   MOZ_ASSERT(SASigMemCopy32.failureMode == FailureMode::FailOnNegI32);
483 
484   using RacyMemMove =
485       void (*)(SharedMem<uint8_t*>, SharedMem<uint8_t*>, size_t);
486 
487   const SharedArrayRawBuffer* rawBuf =
488       SharedArrayRawBuffer::fromDataPtr(memBase);
489   size_t memLen = rawBuf->volatileByteLength();
490 
491   return WasmMemoryCopy32<SharedMem<uint8_t*>, RacyMemMove>(
492       SharedMem<uint8_t*>::shared(memBase), memLen, dstByteOffset,
493       srcByteOffset, len, AtomicOperations::memmoveSafeWhenRacy);
494 }
495 
dataDrop(Instance * instance,uint32_t segIndex)496 /* static */ int32_t Instance::dataDrop(Instance* instance, uint32_t segIndex) {
497   MOZ_ASSERT(SASigDataDrop.failureMode == FailureMode::FailOnNegI32);
498 
499   MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
500                      "ensured by validation");
501 
502   if (!instance->passiveDataSegments_[segIndex]) {
503     return 0;
504   }
505 
506   SharedDataSegment& segRefPtr = instance->passiveDataSegments_[segIndex];
507   MOZ_RELEASE_ASSERT(!segRefPtr->active());
508 
509   // Drop this instance's reference to the DataSegment so it can be released.
510   segRefPtr = nullptr;
511   return 0;
512 }
513 
514 template <typename T, typename F>
WasmMemoryFill32(T memBase,size_t memLen,uint32_t byteOffset,uint32_t value,uint32_t len,F memSet)515 inline int32_t WasmMemoryFill32(T memBase, size_t memLen, uint32_t byteOffset,
516                                 uint32_t value, uint32_t len, F memSet) {
517   // Bounds check and deal with arithmetic overflow.
518   uint64_t offsetLimit = uint64_t(byteOffset) + uint64_t(len);
519 
520   if (offsetLimit > memLen) {
521     JSContext* cx = TlsContext.get();
522     JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
523                               JSMSG_WASM_OUT_OF_BOUNDS);
524     return -1;
525   }
526 
527   // The required write direction is upward, but that is not currently
528   // observable as there are no fences nor any read/write protect operation.
529   memSet(memBase + byteOffset, int(value), size_t(len));
530   return 0;
531 }
532 
memFill32(Instance * instance,uint32_t byteOffset,uint32_t value,uint32_t len,uint8_t * memBase)533 /* static */ int32_t Instance::memFill32(Instance* instance,
534                                          uint32_t byteOffset, uint32_t value,
535                                          uint32_t len, uint8_t* memBase) {
536   MOZ_ASSERT(SASigMemFill32.failureMode == FailureMode::FailOnNegI32);
537 
538   const WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
539   size_t memLen = rawBuf->byteLength();
540 
541   return WasmMemoryFill32(memBase, memLen, byteOffset, value, len, memset);
542 }
543 
memFillShared32(Instance * instance,uint32_t byteOffset,uint32_t value,uint32_t len,uint8_t * memBase)544 /* static */ int32_t Instance::memFillShared32(Instance* instance,
545                                                uint32_t byteOffset,
546                                                uint32_t value, uint32_t len,
547                                                uint8_t* memBase) {
548   MOZ_ASSERT(SASigMemFill32.failureMode == FailureMode::FailOnNegI32);
549 
550   const SharedArrayRawBuffer* rawBuf =
551       SharedArrayRawBuffer::fromDataPtr(memBase);
552   size_t memLen = rawBuf->volatileByteLength();
553 
554   return WasmMemoryFill32(SharedMem<uint8_t*>::shared(memBase), memLen,
555                           byteOffset, value, len,
556                           AtomicOperations::memsetSafeWhenRacy);
557 }
558 
memInit32(Instance * instance,uint32_t dstOffset,uint32_t srcOffset,uint32_t len,uint32_t segIndex)559 /* static */ int32_t Instance::memInit32(Instance* instance, uint32_t dstOffset,
560                                          uint32_t srcOffset, uint32_t len,
561                                          uint32_t segIndex) {
562   MOZ_ASSERT(SASigMemInit32.failureMode == FailureMode::FailOnNegI32);
563 
564   MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
565                      "ensured by validation");
566 
567   if (!instance->passiveDataSegments_[segIndex]) {
568     if (len == 0 && srcOffset == 0) {
569       return 0;
570     }
571 
572     JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
573                               JSMSG_WASM_OUT_OF_BOUNDS);
574     return -1;
575   }
576 
577   const DataSegment& seg = *instance->passiveDataSegments_[segIndex];
578   MOZ_RELEASE_ASSERT(!seg.active());
579 
580   const uint32_t segLen = seg.bytes.length();
581 
582   WasmMemoryObject* mem = instance->memory();
583   const size_t memLen = mem->volatileMemoryLength();
584 
585   // We are proposing to copy
586   //
587   //   seg.bytes.begin()[ srcOffset .. srcOffset + len - 1 ]
588   // to
589   //   memoryBase[ dstOffset .. dstOffset + len - 1 ]
590 
591   // Bounds check and deal with arithmetic overflow.
592   uint64_t dstOffsetLimit = uint64_t(dstOffset) + uint64_t(len);
593   uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
594 
595   if (dstOffsetLimit > memLen || srcOffsetLimit > segLen) {
596     JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
597                               JSMSG_WASM_OUT_OF_BOUNDS);
598     return -1;
599   }
600 
601   // The required read/write direction is upward, but that is not currently
602   // observable as there are no fences nor any read/write protect operation.
603   SharedMem<uint8_t*> dataPtr = mem->buffer().dataPointerEither();
604   if (mem->isShared()) {
605     AtomicOperations::memcpySafeWhenRacy(
606         dataPtr + dstOffset, (uint8_t*)seg.bytes.begin() + srcOffset, len);
607   } else {
608     uint8_t* rawBuf = dataPtr.unwrap(/*Unshared*/);
609     memcpy(rawBuf + dstOffset, (const char*)seg.bytes.begin() + srcOffset, len);
610   }
611   return 0;
612 }
613 
tableCopy(Instance * instance,uint32_t dstOffset,uint32_t srcOffset,uint32_t len,uint32_t dstTableIndex,uint32_t srcTableIndex)614 /* static */ int32_t Instance::tableCopy(Instance* instance, uint32_t dstOffset,
615                                          uint32_t srcOffset, uint32_t len,
616                                          uint32_t dstTableIndex,
617                                          uint32_t srcTableIndex) {
618   MOZ_ASSERT(SASigTableCopy.failureMode == FailureMode::FailOnNegI32);
619 
620   const SharedTable& srcTable = instance->tables()[srcTableIndex];
621   uint32_t srcTableLen = srcTable->length();
622 
623   const SharedTable& dstTable = instance->tables()[dstTableIndex];
624   uint32_t dstTableLen = dstTable->length();
625 
626   // Bounds check and deal with arithmetic overflow.
627   uint64_t dstOffsetLimit = uint64_t(dstOffset) + len;
628   uint64_t srcOffsetLimit = uint64_t(srcOffset) + len;
629 
630   if (dstOffsetLimit > dstTableLen || srcOffsetLimit > srcTableLen) {
631     JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
632                               JSMSG_WASM_OUT_OF_BOUNDS);
633     return -1;
634   }
635 
636   bool isOOM = false;
637 
638   if (&srcTable == &dstTable && dstOffset > srcOffset) {
639     for (uint32_t i = len; i > 0; i--) {
640       if (!dstTable->copy(*srcTable, dstOffset + (i - 1),
641                           srcOffset + (i - 1))) {
642         isOOM = true;
643         break;
644       }
645     }
646   } else if (&srcTable == &dstTable && dstOffset == srcOffset) {
647     // No-op
648   } else {
649     for (uint32_t i = 0; i < len; i++) {
650       if (!dstTable->copy(*srcTable, dstOffset + i, srcOffset + i)) {
651         isOOM = true;
652         break;
653       }
654     }
655   }
656 
657   if (isOOM) {
658     return -1;
659   }
660   return 0;
661 }
662 
elemDrop(Instance * instance,uint32_t segIndex)663 /* static */ int32_t Instance::elemDrop(Instance* instance, uint32_t segIndex) {
664   MOZ_ASSERT(SASigDataDrop.failureMode == FailureMode::FailOnNegI32);
665 
666   MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveElemSegments_.length(),
667                      "ensured by validation");
668 
669   if (!instance->passiveElemSegments_[segIndex]) {
670     return 0;
671   }
672 
673   SharedElemSegment& segRefPtr = instance->passiveElemSegments_[segIndex];
674   MOZ_RELEASE_ASSERT(!segRefPtr->active());
675 
676   // Drop this instance's reference to the ElemSegment so it can be released.
677   segRefPtr = nullptr;
678   return 0;
679 }
680 
initElems(uint32_t tableIndex,const ElemSegment & seg,uint32_t dstOffset,uint32_t srcOffset,uint32_t len)681 bool Instance::initElems(uint32_t tableIndex, const ElemSegment& seg,
682                          uint32_t dstOffset, uint32_t srcOffset, uint32_t len) {
683   Table& table = *tables_[tableIndex];
684   MOZ_ASSERT(dstOffset <= table.length());
685   MOZ_ASSERT(len <= table.length() - dstOffset);
686 
687   Tier tier = code().bestTier();
688   const MetadataTier& metadataTier = metadata(tier);
689   const FuncImportVector& funcImports = metadataTier.funcImports;
690   const CodeRangeVector& codeRanges = metadataTier.codeRanges;
691   const Uint32Vector& funcToCodeRange = metadataTier.funcToCodeRange;
692   const Uint32Vector& elemFuncIndices = seg.elemFuncIndices;
693   MOZ_ASSERT(srcOffset <= elemFuncIndices.length());
694   MOZ_ASSERT(len <= elemFuncIndices.length() - srcOffset);
695 
696   uint8_t* codeBaseTier = codeBase(tier);
697   for (uint32_t i = 0; i < len; i++) {
698     uint32_t funcIndex = elemFuncIndices[srcOffset + i];
699     if (funcIndex == NullFuncIndex) {
700       table.setNull(dstOffset + i);
701     } else if (!table.isFunction()) {
702       // Note, fnref must be rooted if we do anything more than just store it.
703       void* fnref = Instance::refFunc(this, funcIndex);
704       if (fnref == AnyRef::invalid().forCompiledCode()) {
705         return false;  // OOM, which has already been reported.
706       }
707       table.fillAnyRef(dstOffset + i, 1, AnyRef::fromCompiledCode(fnref));
708     } else {
709       if (funcIndex < funcImports.length()) {
710         FuncImportTls& import = funcImportTls(funcImports[funcIndex]);
711         JSFunction* fun = import.fun;
712         if (IsWasmExportedFunction(fun)) {
713           // This element is a wasm function imported from another
714           // instance. To preserve the === function identity required by
715           // the JS embedding spec, we must set the element to the
716           // imported function's underlying CodeRange.funcCheckedCallEntry and
717           // Instance so that future Table.get()s produce the same
718           // function object as was imported.
719           WasmInstanceObject* calleeInstanceObj =
720               ExportedFunctionToInstanceObject(fun);
721           Instance& calleeInstance = calleeInstanceObj->instance();
722           Tier calleeTier = calleeInstance.code().bestTier();
723           const CodeRange& calleeCodeRange =
724               calleeInstanceObj->getExportedFunctionCodeRange(fun, calleeTier);
725           void* code = calleeInstance.codeBase(calleeTier) +
726                        calleeCodeRange.funcCheckedCallEntry();
727           table.setFuncRef(dstOffset + i, code, &calleeInstance);
728           continue;
729         }
730       }
731       void* code =
732           codeBaseTier +
733           codeRanges[funcToCodeRange[funcIndex]].funcCheckedCallEntry();
734       table.setFuncRef(dstOffset + i, code, this);
735     }
736   }
737   return true;
738 }
739 
tableInit(Instance * instance,uint32_t dstOffset,uint32_t srcOffset,uint32_t len,uint32_t segIndex,uint32_t tableIndex)740 /* static */ int32_t Instance::tableInit(Instance* instance, uint32_t dstOffset,
741                                          uint32_t srcOffset, uint32_t len,
742                                          uint32_t segIndex,
743                                          uint32_t tableIndex) {
744   MOZ_ASSERT(SASigTableInit.failureMode == FailureMode::FailOnNegI32);
745 
746   MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveElemSegments_.length(),
747                      "ensured by validation");
748 
749   if (!instance->passiveElemSegments_[segIndex]) {
750     if (len == 0 && srcOffset == 0) {
751       return 0;
752     }
753 
754     JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
755                               JSMSG_WASM_OUT_OF_BOUNDS);
756     return -1;
757   }
758 
759   const ElemSegment& seg = *instance->passiveElemSegments_[segIndex];
760   MOZ_RELEASE_ASSERT(!seg.active());
761   const uint32_t segLen = seg.length();
762 
763   const Table& table = *instance->tables()[tableIndex];
764   const uint32_t tableLen = table.length();
765 
766   // We are proposing to copy
767   //
768   //   seg[ srcOffset .. srcOffset + len - 1 ]
769   // to
770   //   tableBase[ dstOffset .. dstOffset + len - 1 ]
771 
772   // Bounds check and deal with arithmetic overflow.
773   uint64_t dstOffsetLimit = uint64_t(dstOffset) + uint64_t(len);
774   uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
775 
776   if (dstOffsetLimit > tableLen || srcOffsetLimit > segLen) {
777     JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
778                               JSMSG_WASM_OUT_OF_BOUNDS);
779     return -1;
780   }
781 
782   if (!instance->initElems(tableIndex, seg, dstOffset, srcOffset, len)) {
783     return -1;  // OOM, which has already been reported.
784   }
785 
786   return 0;
787 }
788 
tableFill(Instance * instance,uint32_t start,void * value,uint32_t len,uint32_t tableIndex)789 /* static */ int32_t Instance::tableFill(Instance* instance, uint32_t start,
790                                          void* value, uint32_t len,
791                                          uint32_t tableIndex) {
792   MOZ_ASSERT(SASigTableFill.failureMode == FailureMode::FailOnNegI32);
793 
794   JSContext* cx = TlsContext.get();
795   Table& table = *instance->tables()[tableIndex];
796 
797   // Bounds check and deal with arithmetic overflow.
798   uint64_t offsetLimit = uint64_t(start) + uint64_t(len);
799 
800   if (offsetLimit > table.length()) {
801     JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
802                               JSMSG_WASM_OUT_OF_BOUNDS);
803     return -1;
804   }
805 
806   switch (table.repr()) {
807     case TableRepr::Ref:
808       table.fillAnyRef(start, len, AnyRef::fromCompiledCode(value));
809       break;
810     case TableRepr::Func:
811       MOZ_RELEASE_ASSERT(!table.isAsmJS());
812       table.fillFuncRef(start, len, FuncRef::fromCompiledCode(value), cx);
813       break;
814   }
815 
816   return 0;
817 }
818 
tableGet(Instance * instance,uint32_t index,uint32_t tableIndex)819 /* static */ void* Instance::tableGet(Instance* instance, uint32_t index,
820                                       uint32_t tableIndex) {
821   MOZ_ASSERT(SASigTableGet.failureMode == FailureMode::FailOnInvalidRef);
822 
823   const Table& table = *instance->tables()[tableIndex];
824   if (index >= table.length()) {
825     JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
826                               JSMSG_WASM_TABLE_OUT_OF_BOUNDS);
827     return AnyRef::invalid().forCompiledCode();
828   }
829 
830   if (table.repr() == TableRepr::Ref) {
831     return table.getAnyRef(index).forCompiledCode();
832   }
833 
834   MOZ_RELEASE_ASSERT(!table.isAsmJS());
835 
836   JSContext* cx = TlsContext.get();
837   RootedFunction fun(cx);
838   if (!table.getFuncRef(cx, index, &fun)) {
839     return AnyRef::invalid().forCompiledCode();
840   }
841 
842   return FuncRef::fromJSFunction(fun).forCompiledCode();
843 }
844 
tableGrow(Instance * instance,void * initValue,uint32_t delta,uint32_t tableIndex)845 /* static */ uint32_t Instance::tableGrow(Instance* instance, void* initValue,
846                                           uint32_t delta, uint32_t tableIndex) {
847   MOZ_ASSERT(SASigTableGrow.failureMode == FailureMode::Infallible);
848 
849   RootedAnyRef ref(TlsContext.get(), AnyRef::fromCompiledCode(initValue));
850   Table& table = *instance->tables()[tableIndex];
851 
852   uint32_t oldSize = table.grow(delta);
853 
854   if (oldSize != uint32_t(-1) && initValue != nullptr) {
855     switch (table.repr()) {
856       case TableRepr::Ref:
857         table.fillAnyRef(oldSize, delta, ref);
858         break;
859       case TableRepr::Func:
860         MOZ_RELEASE_ASSERT(!table.isAsmJS());
861         table.fillFuncRef(oldSize, delta, FuncRef::fromAnyRefUnchecked(ref),
862                           TlsContext.get());
863         break;
864     }
865   }
866 
867   return oldSize;
868 }
869 
tableSet(Instance * instance,uint32_t index,void * value,uint32_t tableIndex)870 /* static */ int32_t Instance::tableSet(Instance* instance, uint32_t index,
871                                         void* value, uint32_t tableIndex) {
872   MOZ_ASSERT(SASigTableSet.failureMode == FailureMode::FailOnNegI32);
873 
874   Table& table = *instance->tables()[tableIndex];
875   if (index >= table.length()) {
876     JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
877                               JSMSG_WASM_TABLE_OUT_OF_BOUNDS);
878     return -1;
879   }
880 
881   switch (table.repr()) {
882     case TableRepr::Ref:
883       table.fillAnyRef(index, 1, AnyRef::fromCompiledCode(value));
884       break;
885     case TableRepr::Func:
886       MOZ_RELEASE_ASSERT(!table.isAsmJS());
887       table.fillFuncRef(index, 1, FuncRef::fromCompiledCode(value),
888                         TlsContext.get());
889       break;
890   }
891 
892   return 0;
893 }
894 
tableSize(Instance * instance,uint32_t tableIndex)895 /* static */ uint32_t Instance::tableSize(Instance* instance,
896                                           uint32_t tableIndex) {
897   MOZ_ASSERT(SASigTableSize.failureMode == FailureMode::Infallible);
898   Table& table = *instance->tables()[tableIndex];
899   return table.length();
900 }
901 
refFunc(Instance * instance,uint32_t funcIndex)902 /* static */ void* Instance::refFunc(Instance* instance, uint32_t funcIndex) {
903   MOZ_ASSERT(SASigRefFunc.failureMode == FailureMode::FailOnInvalidRef);
904   JSContext* cx = TlsContext.get();
905 
906   Tier tier = instance->code().bestTier();
907   const MetadataTier& metadataTier = instance->metadata(tier);
908   const FuncImportVector& funcImports = metadataTier.funcImports;
909 
910   // If this is an import, we need to recover the original function to maintain
911   // reference equality between a re-exported function and 'ref.func'. The
912   // identity of the imported function object is stable across tiers, which is
913   // what we want.
914   //
915   // Use the imported function only if it is an exported function, otherwise
916   // fall through to get a (possibly new) exported function.
917   if (funcIndex < funcImports.length()) {
918     FuncImportTls& import = instance->funcImportTls(funcImports[funcIndex]);
919     if (IsWasmExportedFunction(import.fun)) {
920       return FuncRef::fromJSFunction(import.fun).forCompiledCode();
921     }
922   }
923 
924   RootedFunction fun(cx);
925   RootedWasmInstanceObject instanceObj(cx, instance->object());
926   if (!WasmInstanceObject::getExportedFunction(cx, instanceObj, funcIndex,
927                                                &fun)) {
928     // Validation ensures that we always have a valid funcIndex, so we must
929     // have OOM'ed
930     ReportOutOfMemory(cx);
931     return AnyRef::invalid().forCompiledCode();
932   }
933 
934   return FuncRef::fromJSFunction(fun).forCompiledCode();
935 }
936 
preBarrierFiltering(Instance * instance,gc::Cell ** location)937 /* static */ void Instance::preBarrierFiltering(Instance* instance,
938                                                 gc::Cell** location) {
939   MOZ_ASSERT(SASigPreBarrierFiltering.failureMode == FailureMode::Infallible);
940   MOZ_ASSERT(location);
941   gc::PreWriteBarrier(*reinterpret_cast<JSObject**>(location));
942 }
943 
postBarrier(Instance * instance,gc::Cell ** location)944 /* static */ void Instance::postBarrier(Instance* instance,
945                                         gc::Cell** location) {
946   MOZ_ASSERT(SASigPostBarrier.failureMode == FailureMode::Infallible);
947   MOZ_ASSERT(location);
948   TlsContext.get()->runtime()->gc.storeBuffer().putCell(
949       reinterpret_cast<JSObject**>(location));
950 }
951 
postBarrierFiltering(Instance * instance,gc::Cell ** location)952 /* static */ void Instance::postBarrierFiltering(Instance* instance,
953                                                  gc::Cell** location) {
954   MOZ_ASSERT(SASigPostBarrier.failureMode == FailureMode::Infallible);
955   MOZ_ASSERT(location);
956   if (*location == nullptr || !gc::IsInsideNursery(*location)) {
957     return;
958   }
959   TlsContext.get()->runtime()->gc.storeBuffer().putCell(
960       reinterpret_cast<JSObject**>(location));
961 }
962 
963 // The typeIndex is an index into the rttValues_ table in the instance.
964 // That table holds RttValue objects.
965 //
966 // When we fail to allocate we return a nullptr; the wasm side must check this
967 // and propagate it as an error.
968 
structNew(Instance * instance,void * structDescr)969 /* static */ void* Instance::structNew(Instance* instance, void* structDescr) {
970   MOZ_ASSERT(SASigStructNew.failureMode == FailureMode::FailOnNullPtr);
971   JSContext* cx = TlsContext.get();
972   Rooted<RttValue*> rttValue(cx, (RttValue*)structDescr);
973   MOZ_ASSERT(rttValue);
974   return TypedObject::createStruct(cx, rttValue);
975 }
976 
arrayNew(Instance * instance,uint32_t length,void * arrayDescr)977 /* static */ void* Instance::arrayNew(Instance* instance, uint32_t length,
978                                       void* arrayDescr) {
979   MOZ_ASSERT(SASigArrayNew.failureMode == FailureMode::FailOnNullPtr);
980   JSContext* cx = TlsContext.get();
981   Rooted<RttValue*> rttValue(cx, (RttValue*)arrayDescr);
982   MOZ_ASSERT(rttValue);
983   return TypedObject::createArray(cx, rttValue, length);
984 }
985 
986 #ifdef ENABLE_WASM_EXCEPTIONS
exceptionNew(Instance * instance,uint32_t exnIndex,uint32_t nbytes)987 /* static */ void* Instance::exceptionNew(Instance* instance, uint32_t exnIndex,
988                                           uint32_t nbytes) {
989   MOZ_ASSERT(SASigExceptionNew.failureMode == FailureMode::FailOnNullPtr);
990 
991   JSContext* cx = TlsContext.get();
992 
993   SharedExceptionTag tag = instance->exceptionTags()[exnIndex];
994   RootedArrayBufferObject buf(cx, ArrayBufferObject::createZeroed(cx, nbytes));
995 
996   if (!buf) {
997     return nullptr;
998   }
999 
1000   RootedArrayObject refs(cx, NewDenseEmptyArray(cx));
1001 
1002   if (!refs) {
1003     return nullptr;
1004   }
1005 
1006   return AnyRef::fromJSObject(
1007              WasmRuntimeExceptionObject::create(cx, tag, buf, refs))
1008       .forCompiledCode();
1009 }
1010 
throwException(Instance * instance,JSObject * exn)1011 /* static */ void* Instance::throwException(Instance* instance, JSObject* exn) {
1012   MOZ_ASSERT(SASigThrowException.failureMode == FailureMode::FailOnNullPtr);
1013 
1014   JSContext* cx = TlsContext.get();
1015   RootedObject exnObj(cx, exn);
1016   RootedValue exnVal(cx);
1017 
1018   if (exnObj->is<WasmJSExceptionObject>()) {
1019     exnVal.set(exnObj.as<WasmJSExceptionObject>()->value());
1020   } else {
1021     exnVal.set(ObjectValue(*exnObj));
1022   }
1023   cx->setPendingException(exnVal, nullptr);
1024 
1025   // By always returning a nullptr, we trigger a wasmTrap(Trap::ThrowReported),
1026   // and use that to trigger the stack walking for this exception.
1027   return nullptr;
1028 }
1029 
getLocalExceptionIndex(Instance * instance,JSObject * exn)1030 /* static */ uint32_t Instance::getLocalExceptionIndex(Instance* instance,
1031                                                        JSObject* exn) {
1032   MOZ_ASSERT(SASigGetLocalExceptionIndex.failureMode ==
1033              FailureMode::Infallible);
1034 
1035   if (exn->is<WasmRuntimeExceptionObject>()) {
1036     ExceptionTag& exnTag = exn->as<WasmRuntimeExceptionObject>().tag();
1037     for (size_t i = 0; i < instance->exceptionTags().length(); i++) {
1038       ExceptionTag& tag = *instance->exceptionTags()[i];
1039       if (&tag == &exnTag) {
1040         return i;
1041       }
1042     }
1043   }
1044 
1045   // Signal an unknown exception tag, e.g., for a non-imported exception or
1046   // JS value.
1047   return UINT32_MAX;
1048 }
1049 
pushRefIntoExn(Instance * instance,JSObject * exn,JSObject * ref)1050 /* static */ int32_t Instance::pushRefIntoExn(Instance* instance, JSObject* exn,
1051                                               JSObject* ref) {
1052   MOZ_ASSERT(SASigPushRefIntoExn.failureMode == FailureMode::FailOnNegI32);
1053 
1054   JSContext* cx = TlsContext.get();
1055 
1056   MOZ_ASSERT(exn->is<WasmRuntimeExceptionObject>());
1057   RootedWasmRuntimeExceptionObject exnObj(
1058       cx, &exn->as<WasmRuntimeExceptionObject>());
1059 
1060   // TODO/AnyRef-boxing: With boxed immediates and strings, this may need to
1061   // handle other kinds of values.
1062   ASSERT_ANYREF_IS_JSOBJECT;
1063 
1064   RootedValue refVal(cx, ObjectOrNullValue(ref));
1065   RootedArrayObject arr(cx, &exnObj->refs());
1066 
1067   if (!NewbornArrayPush(cx, arr, refVal)) {
1068     return -1;
1069   }
1070 
1071   return 0;
1072 }
1073 #endif
1074 
refTest(Instance * instance,void * refPtr,void * rttPtr)1075 /* static */ int32_t Instance::refTest(Instance* instance, void* refPtr,
1076                                        void* rttPtr) {
1077   MOZ_ASSERT(SASigRefTest.failureMode == FailureMode::Infallible);
1078 
1079   if (!refPtr) {
1080     return 0;
1081   }
1082 
1083   JSContext* cx = TlsContext.get();
1084 
1085   ASSERT_ANYREF_IS_JSOBJECT;
1086   RootedTypedObject ref(
1087       cx, (TypedObject*)AnyRef::fromCompiledCode(refPtr).asJSObject());
1088   RootedRttValue rtt(
1089       cx, &AnyRef::fromCompiledCode(rttPtr).asJSObject()->as<RttValue>());
1090   return int32_t(ref->isRuntimeSubtype(rtt));
1091 }
1092 
rttSub(Instance * instance,void * rttPtr)1093 /* static */ void* Instance::rttSub(Instance* instance, void* rttPtr) {
1094   MOZ_ASSERT(SASigRttSub.failureMode == FailureMode::FailOnNullPtr);
1095   JSContext* cx = TlsContext.get();
1096 
1097   ASSERT_ANYREF_IS_JSOBJECT;
1098   RootedRttValue parentRtt(
1099       cx, &AnyRef::fromCompiledCode(rttPtr).asJSObject()->as<RttValue>());
1100   RootedRttValue subRtt(cx, RttValue::createFromParent(cx, parentRtt));
1101   return AnyRef::fromJSObject(subRtt.get()).forCompiledCode();
1102 }
1103 
1104 // Note, dst must point into nonmoveable storage that is not in the nursery,
1105 // this matters for the write barriers.  Furthermore, for pointer types the
1106 // current value of *dst must be null so that only a post-barrier is required.
1107 //
1108 // Regarding the destination not being in the nursery, we have these cases.
1109 // Either the written location is in the global data section in the
1110 // WasmInstanceObject, or the Cell of a WasmGlobalObject:
1111 //
1112 // - WasmInstanceObjects are always tenured and u.ref_ may point to a
1113 //   nursery object, so we need a post-barrier since the global data of an
1114 //   instance is effectively a field of the WasmInstanceObject.
1115 //
1116 // - WasmGlobalObjects are always tenured, and they have a Cell field, so a
1117 //   post-barrier may be needed for the same reason as above.
1118 
CopyValPostBarriered(uint8_t * dst,const Val & src)1119 void CopyValPostBarriered(uint8_t* dst, const Val& src) {
1120   switch (src.type().kind()) {
1121     case ValType::I32: {
1122       int32_t x = src.i32();
1123       memcpy(dst, &x, sizeof(x));
1124       break;
1125     }
1126     case ValType::I64: {
1127       int64_t x = src.i64();
1128       memcpy(dst, &x, sizeof(x));
1129       break;
1130     }
1131     case ValType::F32: {
1132       float x = src.f32();
1133       memcpy(dst, &x, sizeof(x));
1134       break;
1135     }
1136     case ValType::F64: {
1137       double x = src.f64();
1138       memcpy(dst, &x, sizeof(x));
1139       break;
1140     }
1141     case ValType::V128: {
1142       V128 x = src.v128();
1143       memcpy(dst, &x, sizeof(x));
1144       break;
1145     }
1146     case ValType::Rtt:
1147     case ValType::Ref: {
1148       // TODO/AnyRef-boxing: With boxed immediates and strings, the write
1149       // barrier is going to have to be more complicated.
1150       ASSERT_ANYREF_IS_JSOBJECT;
1151       MOZ_ASSERT(*(void**)dst == nullptr,
1152                  "should be null so no need for a pre-barrier");
1153       AnyRef x = src.ref();
1154       memcpy(dst, x.asJSObjectAddress(), sizeof(*x.asJSObjectAddress()));
1155       if (!x.isNull()) {
1156         JSObject::postWriteBarrier((JSObject**)dst, nullptr, x.asJSObject());
1157       }
1158       break;
1159     }
1160   }
1161 }
1162 
Instance(JSContext * cx,Handle<WasmInstanceObject * > object,SharedCode code,UniqueTlsData tlsDataIn,HandleWasmMemoryObject memory,SharedExceptionTagVector && exceptionTags,SharedTableVector && tables,UniqueDebugState maybeDebug)1163 Instance::Instance(JSContext* cx, Handle<WasmInstanceObject*> object,
1164                    SharedCode code, UniqueTlsData tlsDataIn,
1165                    HandleWasmMemoryObject memory,
1166                    SharedExceptionTagVector&& exceptionTags,
1167                    SharedTableVector&& tables, UniqueDebugState maybeDebug)
1168     : realm_(cx->realm()),
1169       object_(object),
1170       jsJitArgsRectifier_(
1171           cx->runtime()->jitRuntime()->getArgumentsRectifier().value),
1172       jsJitExceptionHandler_(
1173           cx->runtime()->jitRuntime()->getExceptionTail().value),
1174       preBarrierCode_(
1175           cx->runtime()->jitRuntime()->preBarrier(MIRType::Object).value),
1176       code_(std::move(code)),
1177       tlsData_(std::move(tlsDataIn)),
1178       memory_(memory),
1179       exceptionTags_(std::move(exceptionTags)),
1180       tables_(std::move(tables)),
1181       maybeDebug_(std::move(maybeDebug))
1182 #ifdef ENABLE_WASM_GC
1183       ,
1184       hasGcTypes_(false)
1185 #endif
1186 {
1187 }
1188 
init(JSContext * cx,const JSFunctionVector & funcImports,const ValVector & globalImportValues,const WasmGlobalObjectVector & globalObjs,const DataSegmentVector & dataSegments,const ElemSegmentVector & elemSegments)1189 bool Instance::init(JSContext* cx, const JSFunctionVector& funcImports,
1190                     const ValVector& globalImportValues,
1191                     const WasmGlobalObjectVector& globalObjs,
1192                     const DataSegmentVector& dataSegments,
1193                     const ElemSegmentVector& elemSegments) {
1194   MOZ_ASSERT(!!maybeDebug_ == metadata().debugEnabled);
1195 #ifdef ENABLE_WASM_EXCEPTIONS
1196   // Currently the only events are exceptions.
1197   MOZ_ASSERT(exceptionTags_.length() == metadata().events.length());
1198 #else
1199   MOZ_ASSERT(exceptionTags_.length() == 0);
1200 #endif
1201 
1202 #ifdef DEBUG
1203   for (auto t : code_->tiers()) {
1204     MOZ_ASSERT(funcImports.length() == metadata(t).funcImports.length());
1205   }
1206 #endif
1207   MOZ_ASSERT(tables_.length() == metadata().tables.length());
1208 
1209   tlsData()->memoryBase =
1210       memory_ ? memory_->buffer().dataPointerEither().unwrap() : nullptr;
1211   size_t limit = memory_ ? memory_->boundsCheckLimit() : 0;
1212 #if !defined(JS_64BIT) || defined(ENABLE_WASM_CRANELIFT)
1213   // We assume that the limit is a 32-bit quantity
1214   MOZ_ASSERT(limit <= UINT32_MAX);
1215 #endif
1216   tlsData()->boundsCheckLimit = limit;
1217   tlsData()->instance = this;
1218   tlsData()->realm = realm_;
1219   tlsData()->cx = cx;
1220   tlsData()->valueBoxClass = &WasmValueBox::class_;
1221   tlsData()->resetInterrupt(cx);
1222   tlsData()->jumpTable = code_->tieringJumpTable();
1223   tlsData()->addressOfNeedsIncrementalBarrier =
1224       (uint8_t*)cx->compartment()->zone()->addressOfNeedsIncrementalBarrier();
1225 
1226   // Initialize function imports in the tls data
1227   Tier callerTier = code_->bestTier();
1228   for (size_t i = 0; i < metadata(callerTier).funcImports.length(); i++) {
1229     JSFunction* f = funcImports[i];
1230     const FuncImport& fi = metadata(callerTier).funcImports[i];
1231     FuncImportTls& import = funcImportTls(fi);
1232     import.fun = f;
1233     if (!isAsmJS() && IsWasmExportedFunction(f)) {
1234       WasmInstanceObject* calleeInstanceObj =
1235           ExportedFunctionToInstanceObject(f);
1236       Instance& calleeInstance = calleeInstanceObj->instance();
1237       Tier calleeTier = calleeInstance.code().bestTier();
1238       const CodeRange& codeRange =
1239           calleeInstanceObj->getExportedFunctionCodeRange(f, calleeTier);
1240       import.tls = calleeInstance.tlsData();
1241       import.realm = f->realm();
1242       import.code = calleeInstance.codeBase(calleeTier) +
1243                     codeRange.funcUncheckedCallEntry();
1244     } else if (void* thunk = MaybeGetBuiltinThunk(f, fi.funcType())) {
1245       import.tls = tlsData();
1246       import.realm = f->realm();
1247       import.code = thunk;
1248     } else {
1249       import.tls = tlsData();
1250       import.realm = f->realm();
1251       import.code = codeBase(callerTier) + fi.interpExitCodeOffset();
1252     }
1253   }
1254 
1255   // Initialize tables in the tls data
1256   for (size_t i = 0; i < tables_.length(); i++) {
1257     const TableDesc& td = metadata().tables[i];
1258     TableTls& table = tableTls(td);
1259     table.length = tables_[i]->length();
1260     table.functionBase = tables_[i]->functionBase();
1261   }
1262 
1263   // Initialize globals in the tls data
1264   for (size_t i = 0; i < metadata().globals.length(); i++) {
1265     const GlobalDesc& global = metadata().globals[i];
1266 
1267     // Constants are baked into the code, never stored in the global area.
1268     if (global.isConstant()) {
1269       continue;
1270     }
1271 
1272     uint8_t* globalAddr = globalData() + global.offset();
1273     switch (global.kind()) {
1274       case GlobalKind::Import: {
1275         size_t imported = global.importIndex();
1276         if (global.isIndirect()) {
1277           *(void**)globalAddr =
1278               (void*)&globalObjs[imported]->val().get().cell();
1279         } else {
1280           CopyValPostBarriered(globalAddr, globalImportValues[imported]);
1281         }
1282         break;
1283       }
1284       case GlobalKind::Variable: {
1285         RootedVal val(cx);
1286         const InitExpr& init = global.initExpr();
1287         RootedWasmInstanceObject instanceObj(cx, object());
1288         if (!init.evaluate(cx, globalImportValues, instanceObj, &val)) {
1289           return false;
1290         }
1291 
1292         if (global.isIndirect()) {
1293           void* address = (void*)&globalObjs[i]->val().get().cell();
1294           *(void**)globalAddr = address;
1295           CopyValPostBarriered((uint8_t*)address, val.get());
1296         } else {
1297           CopyValPostBarriered(globalAddr, val.get());
1298         }
1299         break;
1300       }
1301       case GlobalKind::Constant: {
1302         MOZ_CRASH("skipped at the top");
1303       }
1304     }
1305   }
1306 
1307   // Add observer if our memory base may grow
1308   if (memory_ && memory_->movingGrowable() &&
1309       !memory_->addMovingGrowObserver(cx, object_)) {
1310     return false;
1311   }
1312 
1313   // Add observers if our tables may grow
1314   for (const SharedTable& table : tables_) {
1315     if (table->movingGrowable() && !table->addMovingGrowObserver(cx, object_)) {
1316       return false;
1317     }
1318   }
1319 
1320   // Allocate in the global type sets for structural type checks
1321   if (!metadata().types.empty()) {
1322     // Transfer and allocate type objects for the struct types in the module
1323     if (GcAvailable(cx)) {
1324       uint32_t baseIndex = 0;
1325       if (!cx->wasm().typeContext->transferTypes(metadata().types,
1326                                                  &baseIndex)) {
1327         return false;
1328       }
1329 
1330       for (uint32_t typeIndex = 0; typeIndex < metadata().types.length();
1331            typeIndex++) {
1332         const TypeDefWithId& typeDef = metadata().types[typeIndex];
1333         if (!typeDef.isStructType() && !typeDef.isArrayType()) {
1334           continue;
1335         }
1336 #ifndef ENABLE_WASM_GC
1337         MOZ_CRASH("Should not have seen any gc types");
1338 #else
1339         uint32_t globalTypeIndex = baseIndex + typeIndex;
1340         Rooted<RttValue*> rttValue(
1341             cx, RttValue::createFromHandle(cx, TypeHandle(globalTypeIndex)));
1342 
1343         if (!rttValue) {
1344           return false;
1345         }
1346         *((GCPtrObject*)addressOfTypeId(typeDef.id)) = rttValue;
1347         hasGcTypes_ = true;
1348 #endif
1349       }
1350     }
1351 
1352     // Handle functions specially (for now) as they're guaranteed to be
1353     // acyclical and can use simpler hash-consing logic.
1354     ExclusiveData<FuncTypeIdSet>::Guard lockedFuncTypeIdSet =
1355         funcTypeIdSet.lock();
1356 
1357     for (const TypeDefWithId& typeDef : metadata().types) {
1358       switch (typeDef.kind()) {
1359         case TypeDefKind::Func: {
1360           const FuncType& funcType = typeDef.funcType();
1361           const void* funcTypeId;
1362           if (!lockedFuncTypeIdSet->allocateFuncTypeId(cx, funcType,
1363                                                        &funcTypeId)) {
1364             return false;
1365           }
1366           *addressOfTypeId(typeDef.id) = funcTypeId;
1367           break;
1368         }
1369         case TypeDefKind::Struct:
1370         case TypeDefKind::Array:
1371           continue;
1372         default:
1373           MOZ_CRASH();
1374       }
1375     }
1376   }
1377 
1378   // Take references to the passive data segments
1379   if (!passiveDataSegments_.resize(dataSegments.length())) {
1380     return false;
1381   }
1382   for (size_t i = 0; i < dataSegments.length(); i++) {
1383     if (!dataSegments[i]->active()) {
1384       passiveDataSegments_[i] = dataSegments[i];
1385     }
1386   }
1387 
1388   // Take references to the passive element segments
1389   if (!passiveElemSegments_.resize(elemSegments.length())) {
1390     return false;
1391   }
1392   for (size_t i = 0; i < elemSegments.length(); i++) {
1393     if (elemSegments[i]->kind != ElemSegment::Kind::Active) {
1394       passiveElemSegments_[i] = elemSegments[i];
1395     }
1396   }
1397 
1398   return true;
1399 }
1400 
~Instance()1401 Instance::~Instance() {
1402   realm_->wasm.unregisterInstance(*this);
1403 
1404   if (!metadata().types.empty()) {
1405     ExclusiveData<FuncTypeIdSet>::Guard lockedFuncTypeIdSet =
1406         funcTypeIdSet.lock();
1407 
1408     for (const TypeDefWithId& typeDef : metadata().types) {
1409       if (!typeDef.isFuncType()) {
1410         continue;
1411       }
1412       const FuncType& funcType = typeDef.funcType();
1413       if (const void* funcTypeId = *addressOfTypeId(typeDef.id)) {
1414         lockedFuncTypeIdSet->deallocateFuncTypeId(funcType, funcTypeId);
1415       }
1416     }
1417   }
1418 }
1419 
memoryMappedSize() const1420 size_t Instance::memoryMappedSize() const {
1421   return memory_->buffer().wasmMappedSize();
1422 }
1423 
memoryAccessInGuardRegion(const uint8_t * addr,unsigned numBytes) const1424 bool Instance::memoryAccessInGuardRegion(const uint8_t* addr,
1425                                          unsigned numBytes) const {
1426   MOZ_ASSERT(numBytes > 0);
1427 
1428   if (!metadata().usesMemory()) {
1429     return false;
1430   }
1431 
1432   uint8_t* base = memoryBase().unwrap(/* comparison */);
1433   if (addr < base) {
1434     return false;
1435   }
1436 
1437   size_t lastByteOffset = addr - base + (numBytes - 1);
1438   return lastByteOffset >= memory()->volatileMemoryLength() &&
1439          lastByteOffset < memoryMappedSize();
1440 }
1441 
tracePrivate(JSTracer * trc)1442 void Instance::tracePrivate(JSTracer* trc) {
1443   // This method is only called from WasmInstanceObject so the only reason why
1444   // TraceEdge is called is so that the pointer can be updated during a moving
1445   // GC.
1446   MOZ_ASSERT_IF(trc->isMarkingTracer(), gc::IsMarked(trc->runtime(), &object_));
1447   TraceEdge(trc, &object_, "wasm instance object");
1448 
1449   // OK to just do one tier here; though the tiers have different funcImports
1450   // tables, they share the tls object.
1451   for (const FuncImport& fi : metadata(code().stableTier()).funcImports) {
1452     TraceNullableEdge(trc, &funcImportTls(fi).fun, "wasm import");
1453   }
1454 
1455   for (const SharedTable& table : tables_) {
1456     table->trace(trc);
1457   }
1458 
1459   for (const GlobalDesc& global : code().metadata().globals) {
1460     // Indirect reference globals get traced by the owning WebAssembly.Global.
1461     if (!global.type().isReference() || global.isConstant() ||
1462         global.isIndirect()) {
1463       continue;
1464     }
1465     GCPtrObject* obj = (GCPtrObject*)(globalData() + global.offset());
1466     TraceNullableEdge(trc, obj, "wasm reference-typed global");
1467   }
1468 
1469   TraceNullableEdge(trc, &memory_, "wasm buffer");
1470 #ifdef ENABLE_WASM_GC
1471   if (hasGcTypes_) {
1472     for (const TypeDefWithId& typeDef : metadata().types) {
1473       if (!typeDef.isStructType() && !typeDef.isArrayType()) {
1474         continue;
1475       }
1476       TraceNullableEdge(trc, ((GCPtrObject*)addressOfTypeId(typeDef.id)),
1477                         "wasm rtt value");
1478     }
1479   }
1480 #endif
1481 
1482   if (maybeDebug_) {
1483     maybeDebug_->trace(trc);
1484   }
1485 }
1486 
trace(JSTracer * trc)1487 void Instance::trace(JSTracer* trc) {
1488   // Technically, instead of having this method, the caller could use
1489   // Instance::object() to get the owning WasmInstanceObject to mark,
1490   // but this method is simpler and more efficient. The trace hook of
1491   // WasmInstanceObject will call Instance::tracePrivate at which point we
1492   // can mark the rest of the children.
1493   TraceEdge(trc, &object_, "wasm instance object");
1494 }
1495 
traceFrame(JSTracer * trc,const wasm::WasmFrameIter & wfi,uint8_t * nextPC,uintptr_t highestByteVisitedInPrevFrame)1496 uintptr_t Instance::traceFrame(JSTracer* trc, const wasm::WasmFrameIter& wfi,
1497                                uint8_t* nextPC,
1498                                uintptr_t highestByteVisitedInPrevFrame) {
1499   const StackMap* map = code().lookupStackMap(nextPC);
1500   if (!map) {
1501     return 0;
1502   }
1503 
1504   Frame* frame = wfi.frame();
1505 
1506   // |frame| points somewhere in the middle of the area described by |map|.
1507   // We have to calculate |scanStart|, the lowest address that is described by
1508   // |map|, by consulting |map->frameOffsetFromTop|.
1509 
1510   const size_t numMappedBytes = map->numMappedWords * sizeof(void*);
1511   const uintptr_t scanStart = uintptr_t(frame) +
1512                               (map->frameOffsetFromTop * sizeof(void*)) -
1513                               numMappedBytes;
1514   MOZ_ASSERT(0 == scanStart % sizeof(void*));
1515 
1516   // Do what we can to assert that, for consecutive wasm frames, their stack
1517   // maps also abut exactly.  This is a useful sanity check on the sizing of
1518   // stackmaps.
1519   //
1520   // In debug builds, the stackmap construction machinery goes to considerable
1521   // efforts to ensure that the stackmaps for consecutive frames abut exactly.
1522   // This is so as to ensure there are no areas of stack inadvertently ignored
1523   // by a stackmap, nor covered by two stackmaps.  Hence any failure of this
1524   // assertion is serious and should be investigated.
1525 
1526   // This condition isn't kept for Cranelift
1527   // (https://github.com/bytecodealliance/wasmtime/issues/2281), but this is ok
1528   // to disable this assertion because when CL compiles a function, in the
1529   // prologue, it (generates code) copies all of the in-memory arguments into
1530   // registers. So, because of that, none of the in-memory argument words are
1531   // actually live.
1532 #ifndef JS_CODEGEN_ARM64
1533   MOZ_ASSERT_IF(highestByteVisitedInPrevFrame != 0,
1534                 highestByteVisitedInPrevFrame + 1 == scanStart);
1535 #endif
1536 
1537   uintptr_t* stackWords = (uintptr_t*)scanStart;
1538 
1539   // If we have some exit stub words, this means the map also covers an area
1540   // created by a exit stub, and so the highest word of that should be a
1541   // constant created by (code created by) GenerateTrapExit.
1542   MOZ_ASSERT_IF(
1543       map->numExitStubWords > 0,
1544       stackWords[map->numExitStubWords - 1 - TrapExitDummyValueOffsetFromTop] ==
1545           TrapExitDummyValue);
1546 
1547   // And actually hand them off to the GC.
1548   for (uint32_t i = 0; i < map->numMappedWords; i++) {
1549     if (map->getBit(i) == 0) {
1550       continue;
1551     }
1552 
1553     // TODO/AnyRef-boxing: With boxed immediates and strings, the value may
1554     // not be a traceable JSObject*.
1555     ASSERT_ANYREF_IS_JSOBJECT;
1556 
1557     // This assertion seems at least moderately effective in detecting
1558     // discrepancies or misalignments between the map and reality.
1559     MOZ_ASSERT(js::gc::IsCellPointerValidOrNull((const void*)stackWords[i]));
1560 
1561     if (stackWords[i]) {
1562       TraceRoot(trc, (JSObject**)&stackWords[i],
1563                 "Instance::traceWasmFrame: normal word");
1564     }
1565   }
1566 
1567   // Finally, deal with any GC-managed fields in the DebugFrame, if it is
1568   // present.
1569   if (map->hasDebugFrame) {
1570     DebugFrame* debugFrame = DebugFrame::from(frame);
1571     char* debugFrameP = (char*)debugFrame;
1572 
1573     // TODO/AnyRef-boxing: With boxed immediates and strings, the value may
1574     // not be a traceable JSObject*.
1575     ASSERT_ANYREF_IS_JSOBJECT;
1576 
1577     for (size_t i = 0; i < MaxRegisterResults; i++) {
1578       if (debugFrame->hasSpilledRegisterRefResult(i)) {
1579         char* resultRefP = debugFrameP + DebugFrame::offsetOfRegisterResult(i);
1580         TraceNullableRoot(
1581             trc, (JSObject**)resultRefP,
1582             "Instance::traceWasmFrame: DebugFrame::resultResults_");
1583       }
1584     }
1585 
1586     if (debugFrame->hasCachedReturnJSValue()) {
1587       char* cachedReturnJSValueP =
1588           debugFrameP + DebugFrame::offsetOfCachedReturnJSValue();
1589       TraceRoot(trc, (js::Value*)cachedReturnJSValueP,
1590                 "Instance::traceWasmFrame: DebugFrame::cachedReturnJSValue_");
1591     }
1592   }
1593 
1594   return scanStart + numMappedBytes - 1;
1595 }
1596 
memory() const1597 WasmMemoryObject* Instance::memory() const { return memory_; }
1598 
memoryBase() const1599 SharedMem<uint8_t*> Instance::memoryBase() const {
1600   MOZ_ASSERT(metadata().usesMemory());
1601   MOZ_ASSERT(tlsData()->memoryBase == memory_->buffer().dataPointerEither());
1602   return memory_->buffer().dataPointerEither();
1603 }
1604 
sharedMemoryBuffer() const1605 SharedArrayRawBuffer* Instance::sharedMemoryBuffer() const {
1606   MOZ_ASSERT(memory_->isShared());
1607   return memory_->sharedArrayRawBuffer();
1608 }
1609 
objectUnbarriered() const1610 WasmInstanceObject* Instance::objectUnbarriered() const {
1611   return object_.unbarrieredGet();
1612 }
1613 
object() const1614 WasmInstanceObject* Instance::object() const { return object_; }
1615 
EnsureEntryStubs(const Instance & instance,uint32_t funcIndex,const FuncExport ** funcExport,void ** interpEntry)1616 static bool EnsureEntryStubs(const Instance& instance, uint32_t funcIndex,
1617                              const FuncExport** funcExport,
1618                              void** interpEntry) {
1619   Tier tier = instance.code().bestTier();
1620 
1621   size_t funcExportIndex;
1622   *funcExport =
1623       &instance.metadata(tier).lookupFuncExport(funcIndex, &funcExportIndex);
1624 
1625   const FuncExport& fe = **funcExport;
1626   if (fe.hasEagerStubs()) {
1627     *interpEntry = instance.codeBase(tier) + fe.eagerInterpEntryOffset();
1628     return true;
1629   }
1630 
1631   MOZ_ASSERT(!instance.isAsmJS(), "only wasm can lazily export functions");
1632 
1633   // If the best tier is Ion, life is simple: background compilation has
1634   // already completed and has been committed, so there's no risk of race
1635   // conditions here.
1636   //
1637   // If the best tier is Baseline, there could be a background compilation
1638   // happening at the same time. The background compilation will lock the
1639   // first tier lazy stubs first to stop new baseline stubs from being
1640   // generated, then the second tier stubs to generate them.
1641   //
1642   // - either we take the tier1 lazy stub lock before the background
1643   // compilation gets it, then we generate the lazy stub for tier1. When the
1644   // background thread gets the tier1 lazy stub lock, it will see it has a
1645   // lazy stub and will recompile it for tier2.
1646   // - or we don't take the lock here first. Background compilation won't
1647   // find a lazy stub for this function, thus won't generate it. So we'll do
1648   // it ourselves after taking the tier2 lock.
1649   //
1650   // Also see doc block for stubs in WasmJS.cpp.
1651 
1652   auto stubs = instance.code(tier).lazyStubs().lock();
1653   *interpEntry = stubs->lookupInterpEntry(fe.funcIndex());
1654   if (*interpEntry) {
1655     return true;
1656   }
1657 
1658   // The best tier might have changed after we've taken the lock.
1659   Tier prevTier = tier;
1660   tier = instance.code().bestTier();
1661   const CodeTier& codeTier = instance.code(tier);
1662   if (tier == prevTier) {
1663     if (!stubs->createOne(funcExportIndex, codeTier)) {
1664       return false;
1665     }
1666 
1667     *interpEntry = stubs->lookupInterpEntry(fe.funcIndex());
1668     MOZ_ASSERT(*interpEntry);
1669     return true;
1670   }
1671 
1672   MOZ_RELEASE_ASSERT(prevTier == Tier::Baseline && tier == Tier::Optimized);
1673   auto stubs2 = instance.code(tier).lazyStubs().lock();
1674 
1675   // If it didn't have a stub in the first tier, background compilation
1676   // shouldn't have made one in the second tier.
1677   MOZ_ASSERT(!stubs2->hasStub(fe.funcIndex()));
1678 
1679   if (!stubs2->createOne(funcExportIndex, codeTier)) {
1680     return false;
1681   }
1682 
1683   *interpEntry = stubs2->lookupInterpEntry(fe.funcIndex());
1684   MOZ_ASSERT(*interpEntry);
1685   return true;
1686 }
1687 
GetInterpEntryAndEnsureStubs(JSContext * cx,Instance & instance,uint32_t funcIndex,CallArgs args,void ** interpEntry,const FuncType ** funcType)1688 static bool GetInterpEntryAndEnsureStubs(JSContext* cx, Instance& instance,
1689                                          uint32_t funcIndex, CallArgs args,
1690                                          void** interpEntry,
1691                                          const FuncType** funcType) {
1692   const FuncExport* funcExport;
1693   if (!EnsureEntryStubs(instance, funcIndex, &funcExport, interpEntry)) {
1694     return false;
1695   }
1696 
1697 #ifdef DEBUG
1698   // EnsureEntryStubs() has ensured proper jit-entry stubs have been created and
1699   // installed in funcIndex's JumpTable entry, so check against the presence of
1700   // the provisional lazy stub.  See also
1701   // WasmInstanceObject::getExportedFunction().
1702   if (!funcExport->hasEagerStubs() && funcExport->canHaveJitEntry()) {
1703     if (!EnsureBuiltinThunksInitialized()) {
1704       return false;
1705     }
1706     JSFunction& callee = args.callee().as<JSFunction>();
1707     void* provisionalLazyJitEntryStub = ProvisionalLazyJitEntryStub();
1708     MOZ_ASSERT(provisionalLazyJitEntryStub);
1709     MOZ_ASSERT(callee.isWasmWithJitEntry());
1710     MOZ_ASSERT(*callee.wasmJitEntry() != provisionalLazyJitEntryStub);
1711   }
1712 #endif
1713 
1714   *funcType = &funcExport->funcType();
1715   return true;
1716 }
1717 
ResultsToJSValue(JSContext * cx,ResultType type,void * registerResultLoc,Maybe<char * > stackResultsLoc,MutableHandleValue rval,CoercionLevel level)1718 bool wasm::ResultsToJSValue(JSContext* cx, ResultType type,
1719                             void* registerResultLoc,
1720                             Maybe<char*> stackResultsLoc,
1721                             MutableHandleValue rval, CoercionLevel level) {
1722   if (type.empty()) {
1723     // No results: set to undefined, and we're done.
1724     rval.setUndefined();
1725     return true;
1726   }
1727 
1728   // If we added support for multiple register results, we'd need to establish a
1729   // convention for how to store them to memory in registerResultLoc.  For now
1730   // we can punt.
1731   static_assert(MaxRegisterResults == 1);
1732 
1733   // Stack results written to stackResultsLoc; register result written
1734   // to registerResultLoc.
1735 
1736   // First, convert the register return value, and prepare to iterate in
1737   // push order.  Note that if the register result is a reference type,
1738   // it may be unrooted, so ToJSValue_anyref must not GC in that case.
1739   ABIResultIter iter(type);
1740   DebugOnly<bool> usedRegisterResult = false;
1741   for (; !iter.done(); iter.next()) {
1742     if (iter.cur().inRegister()) {
1743       MOZ_ASSERT(!usedRegisterResult);
1744       if (!ToJSValue<DebugCodegenVal>(cx, registerResultLoc, iter.cur().type(),
1745                                       rval, level)) {
1746         return false;
1747       }
1748       usedRegisterResult = true;
1749     }
1750   }
1751   MOZ_ASSERT(usedRegisterResult);
1752 
1753   MOZ_ASSERT((stackResultsLoc.isSome()) == (iter.count() > 1));
1754   if (!stackResultsLoc) {
1755     // A single result: we're done.
1756     return true;
1757   }
1758 
1759   // Otherwise, collect results in an array, in push order.
1760   Rooted<ArrayObject*> array(cx, NewDenseEmptyArray(cx));
1761   if (!array) {
1762     return false;
1763   }
1764   RootedValue tmp(cx);
1765   for (iter.switchToPrev(); !iter.done(); iter.prev()) {
1766     const ABIResult& result = iter.cur();
1767     if (result.onStack()) {
1768       char* loc = stackResultsLoc.value() + result.stackOffset();
1769       if (!ToJSValue<DebugCodegenVal>(cx, loc, result.type(), &tmp, level)) {
1770         return false;
1771       }
1772       if (!NewbornArrayPush(cx, array, tmp)) {
1773         return false;
1774       }
1775     } else {
1776       if (!NewbornArrayPush(cx, array, rval)) {
1777         return false;
1778       }
1779     }
1780   }
1781   rval.set(ObjectValue(*array));
1782   return true;
1783 }
1784 
1785 class MOZ_RAII ReturnToJSResultCollector {
1786   class MOZ_RAII StackResultsRooter : public JS::CustomAutoRooter {
1787     ReturnToJSResultCollector& collector_;
1788 
1789    public:
StackResultsRooter(JSContext * cx,ReturnToJSResultCollector & collector)1790     StackResultsRooter(JSContext* cx, ReturnToJSResultCollector& collector)
1791         : JS::CustomAutoRooter(cx), collector_(collector) {}
1792 
trace(JSTracer * trc)1793     void trace(JSTracer* trc) final {
1794       for (ABIResultIter iter(collector_.type_); !iter.done(); iter.next()) {
1795         const ABIResult& result = iter.cur();
1796         if (result.onStack() && result.type().isReference()) {
1797           char* loc = collector_.stackResultsArea_.get() + result.stackOffset();
1798           JSObject** refLoc = reinterpret_cast<JSObject**>(loc);
1799           TraceNullableRoot(trc, refLoc, "StackResultsRooter::trace");
1800         }
1801       }
1802     }
1803   };
1804   friend class StackResultsRooter;
1805 
1806   ResultType type_;
1807   UniquePtr<char[], JS::FreePolicy> stackResultsArea_;
1808   Maybe<StackResultsRooter> rooter_;
1809 
1810  public:
ReturnToJSResultCollector(const ResultType & type)1811   explicit ReturnToJSResultCollector(const ResultType& type) : type_(type){};
init(JSContext * cx)1812   bool init(JSContext* cx) {
1813     bool needRooter = false;
1814     ABIResultIter iter(type_);
1815     for (; !iter.done(); iter.next()) {
1816       const ABIResult& result = iter.cur();
1817       if (result.onStack() && result.type().isReference()) {
1818         needRooter = true;
1819       }
1820     }
1821     uint32_t areaBytes = iter.stackBytesConsumedSoFar();
1822     MOZ_ASSERT_IF(needRooter, areaBytes > 0);
1823     if (areaBytes > 0) {
1824       // It is necessary to zero storage for ref results, and it doesn't
1825       // hurt to do so for other POD results.
1826       stackResultsArea_ = cx->make_zeroed_pod_array<char>(areaBytes);
1827       if (!stackResultsArea_) {
1828         return false;
1829       }
1830       if (needRooter) {
1831         rooter_.emplace(cx, *this);
1832       }
1833     }
1834     return true;
1835   }
1836 
stackResultsArea()1837   void* stackResultsArea() {
1838     MOZ_ASSERT(stackResultsArea_);
1839     return stackResultsArea_.get();
1840   }
1841 
collect(JSContext * cx,void * registerResultLoc,MutableHandleValue rval,CoercionLevel level)1842   bool collect(JSContext* cx, void* registerResultLoc, MutableHandleValue rval,
1843                CoercionLevel level) {
1844     Maybe<char*> stackResultsLoc =
1845         stackResultsArea_ ? Some(stackResultsArea_.get()) : Nothing();
1846     return ResultsToJSValue(cx, type_, registerResultLoc, stackResultsLoc, rval,
1847                             level);
1848   }
1849 };
1850 
callExport(JSContext * cx,uint32_t funcIndex,CallArgs args,CoercionLevel level)1851 bool Instance::callExport(JSContext* cx, uint32_t funcIndex, CallArgs args,
1852                           CoercionLevel level) {
1853   if (memory_) {
1854     // If there has been a moving grow, this Instance should have been notified.
1855     MOZ_RELEASE_ASSERT(memory_->buffer().dataPointerEither() == memoryBase());
1856   }
1857 
1858   void* interpEntry;
1859   const FuncType* funcType;
1860   if (!GetInterpEntryAndEnsureStubs(cx, *this, funcIndex, args, &interpEntry,
1861                                     &funcType)) {
1862     return false;
1863   }
1864 
1865   // Lossless coercions can handle unexposable arguments or returns. This is
1866   // only available in testing code.
1867   if (level != CoercionLevel::Lossless && funcType->hasUnexposableArgOrRet()) {
1868     JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
1869                              JSMSG_WASM_BAD_VAL_TYPE);
1870     return false;
1871   }
1872 
1873   ArgTypeVector argTypes(*funcType);
1874   ResultType resultType(ResultType::Vector(funcType->results()));
1875   ReturnToJSResultCollector results(resultType);
1876   if (!results.init(cx)) {
1877     return false;
1878   }
1879 
1880   // The calling convention for an external call into wasm is to pass an
1881   // array of 16-byte values where each value contains either a coerced int32
1882   // (in the low word), or a double value (in the low dword) value, with the
1883   // coercions specified by the wasm signature. The external entry point
1884   // unpacks this array into the system-ABI-specified registers and stack
1885   // memory and then calls into the internal entry point. The return value is
1886   // stored in the first element of the array (which, therefore, must have
1887   // length >= 1).
1888   Vector<ExportArg, 8> exportArgs(cx);
1889   if (!exportArgs.resize(
1890           std::max<size_t>(1, argTypes.lengthWithStackResults()))) {
1891     return false;
1892   }
1893 
1894   ASSERT_ANYREF_IS_JSOBJECT;
1895   Rooted<GCVector<JSObject*, 8, SystemAllocPolicy>> refs(cx);
1896 
1897   DebugCodegen(DebugChannel::Function, "wasm-function[%d] arguments [",
1898                funcIndex);
1899   RootedValue v(cx);
1900   for (size_t i = 0; i < argTypes.lengthWithStackResults(); ++i) {
1901     void* rawArgLoc = &exportArgs[i];
1902     if (argTypes.isSyntheticStackResultPointerArg(i)) {
1903       *reinterpret_cast<void**>(rawArgLoc) = results.stackResultsArea();
1904       continue;
1905     }
1906     size_t naturalIdx = argTypes.naturalIndex(i);
1907     v = naturalIdx < args.length() ? args[naturalIdx] : UndefinedValue();
1908     ValType type = funcType->arg(naturalIdx);
1909     if (!ToWebAssemblyValue<DebugCodegenVal>(cx, v, type, rawArgLoc, true,
1910                                              level)) {
1911       return false;
1912     }
1913     if (type.isReference()) {
1914       void* ptr = *reinterpret_cast<void**>(rawArgLoc);
1915       // Store in rooted array until no more GC is possible.
1916       switch (type.refTypeKind()) {
1917         case RefType::Func: {
1918           RootedFunction ref(cx, FuncRef::fromCompiledCode(ptr).asJSFunction());
1919           if (!refs.emplaceBack(ref)) {
1920             return false;
1921           }
1922           break;
1923         }
1924         case RefType::Extern:
1925         case RefType::Eq: {
1926           RootedAnyRef ref(cx, AnyRef::fromCompiledCode(ptr));
1927           ASSERT_ANYREF_IS_JSOBJECT;
1928           if (!refs.emplaceBack(ref.get().asJSObject())) {
1929             return false;
1930           }
1931           break;
1932         }
1933         case RefType::TypeIndex:
1934           MOZ_CRASH("temporarily unsupported Ref type in callExport");
1935       }
1936       DebugCodegen(DebugChannel::Function, "/(#%d)", int(refs.length() - 1));
1937     }
1938   }
1939 
1940   // Copy over reference values from the rooted array, if any.
1941   if (refs.length() > 0) {
1942     DebugCodegen(DebugChannel::Function, "; ");
1943     size_t nextRef = 0;
1944     for (size_t i = 0; i < argTypes.lengthWithStackResults(); ++i) {
1945       if (argTypes.isSyntheticStackResultPointerArg(i)) {
1946         continue;
1947       }
1948       size_t naturalIdx = argTypes.naturalIndex(i);
1949       ValType type = funcType->arg(naturalIdx);
1950       if (type.isReference()) {
1951         void** rawArgLoc = (void**)&exportArgs[i];
1952         *rawArgLoc = refs[nextRef++];
1953         DebugCodegen(DebugChannel::Function, " ref(#%d) := %p ",
1954                      int(nextRef - 1), *rawArgLoc);
1955       }
1956     }
1957     refs.clear();
1958   }
1959 
1960   DebugCodegen(DebugChannel::Function, "]\n");
1961 
1962   {
1963     JitActivation activation(cx);
1964 
1965     // Call the per-exported-function trampoline created by GenerateEntry.
1966     auto funcPtr = JS_DATA_TO_FUNC_PTR(ExportFuncPtr, interpEntry);
1967     if (!CALL_GENERATED_2(funcPtr, exportArgs.begin(), tlsData())) {
1968       return false;
1969     }
1970   }
1971 
1972   if (isAsmJS() && args.isConstructing()) {
1973     // By spec, when a JS function is called as a constructor and this
1974     // function returns a primary type, which is the case for all asm.js
1975     // exported functions, the returned value is discarded and an empty
1976     // object is returned instead.
1977     PlainObject* obj = NewBuiltinClassInstance<PlainObject>(cx);
1978     if (!obj) {
1979       return false;
1980     }
1981     args.rval().set(ObjectValue(*obj));
1982     return true;
1983   }
1984 
1985   // Note that we're not rooting the register result, if any; we depend
1986   // on ResultsCollector::collect to root the value on our behalf,
1987   // before causing any GC.
1988   void* registerResultLoc = &exportArgs[0];
1989   DebugCodegen(DebugChannel::Function, "wasm-function[%d]; results [",
1990                funcIndex);
1991   if (!results.collect(cx, registerResultLoc, args.rval(), level)) {
1992     return false;
1993   }
1994   DebugCodegen(DebugChannel::Function, "]\n");
1995 
1996   return true;
1997 }
1998 
getFuncDisplayAtom(JSContext * cx,uint32_t funcIndex) const1999 JSAtom* Instance::getFuncDisplayAtom(JSContext* cx, uint32_t funcIndex) const {
2000   // The "display name" of a function is primarily shown in Error.stack which
2001   // also includes location, so use getFuncNameBeforeLocation.
2002   UTF8Bytes name;
2003   if (!metadata().getFuncNameBeforeLocation(funcIndex, &name)) {
2004     return nullptr;
2005   }
2006 
2007   return AtomizeUTF8Chars(cx, name.begin(), name.length());
2008 }
2009 
ensureProfilingLabels(bool profilingEnabled) const2010 void Instance::ensureProfilingLabels(bool profilingEnabled) const {
2011   return code_->ensureProfilingLabels(profilingEnabled);
2012 }
2013 
onMovingGrowMemory()2014 void Instance::onMovingGrowMemory() {
2015   MOZ_ASSERT(!isAsmJS());
2016   MOZ_ASSERT(!memory_->isShared());
2017 
2018   ArrayBufferObject& buffer = memory_->buffer().as<ArrayBufferObject>();
2019   tlsData()->memoryBase = buffer.dataPointer();
2020   size_t limit = memory_->boundsCheckLimit();
2021 #if !defined(JS_64BIT) || defined(ENABLE_WASM_CRANELIFT)
2022   // We assume that the limit is a 32-bit quantity
2023   MOZ_ASSERT(limit <= UINT32_MAX);
2024 #endif
2025   tlsData()->boundsCheckLimit = limit;
2026 }
2027 
onMovingGrowTable(const Table * theTable)2028 void Instance::onMovingGrowTable(const Table* theTable) {
2029   MOZ_ASSERT(!isAsmJS());
2030 
2031   // `theTable` has grown and we must update cached data for it.  Importantly,
2032   // we can have cached those data in more than one location: we'll have
2033   // cached them once for each time the table was imported into this instance.
2034   //
2035   // When an instance is registered as an observer of a table it is only
2036   // registered once, regardless of how many times the table was imported.
2037   // Thus when a table is grown, onMovingGrowTable() is only invoked once for
2038   // the table.
2039   //
2040   // Ergo we must go through the entire list of tables in the instance here
2041   // and check for the table in all the cached-data slots; we can't exit after
2042   // the first hit.
2043 
2044   for (uint32_t i = 0; i < tables_.length(); i++) {
2045     if (tables_[i] == theTable) {
2046       TableTls& table = tableTls(metadata().tables[i]);
2047       table.length = tables_[i]->length();
2048       table.functionBase = tables_[i]->functionBase();
2049     }
2050   }
2051 }
2052 
createDisplayURL(JSContext * cx)2053 JSString* Instance::createDisplayURL(JSContext* cx) {
2054   // In the best case, we simply have a URL, from a streaming compilation of a
2055   // fetched Response.
2056 
2057   if (metadata().filenameIsURL) {
2058     return NewStringCopyZ<CanGC>(cx, metadata().filename.get());
2059   }
2060 
2061   // Otherwise, build wasm module URL from following parts:
2062   // - "wasm:" as protocol;
2063   // - URI encoded filename from metadata (if can be encoded), plus ":";
2064   // - 64-bit hash of the module bytes (as hex dump).
2065 
2066   JSStringBuilder result(cx);
2067   if (!result.append("wasm:")) {
2068     return nullptr;
2069   }
2070 
2071   if (const char* filename = metadata().filename.get()) {
2072     // EncodeURI returns false due to invalid chars or OOM -- fail only
2073     // during OOM.
2074     JSString* filenamePrefix = EncodeURI(cx, filename, strlen(filename));
2075     if (!filenamePrefix) {
2076       if (cx->isThrowingOutOfMemory()) {
2077         return nullptr;
2078       }
2079 
2080       MOZ_ASSERT(!cx->isThrowingOverRecursed());
2081       cx->clearPendingException();
2082       return nullptr;
2083     }
2084 
2085     if (!result.append(filenamePrefix)) {
2086       return nullptr;
2087     }
2088   }
2089 
2090   if (metadata().debugEnabled) {
2091     if (!result.append(":")) {
2092       return nullptr;
2093     }
2094 
2095     const ModuleHash& hash = metadata().debugHash;
2096     for (unsigned char byte : hash) {
2097       char digit1 = byte / 16, digit2 = byte % 16;
2098       if (!result.append(
2099               (char)(digit1 < 10 ? digit1 + '0' : digit1 + 'a' - 10))) {
2100         return nullptr;
2101       }
2102       if (!result.append(
2103               (char)(digit2 < 10 ? digit2 + '0' : digit2 + 'a' - 10))) {
2104         return nullptr;
2105       }
2106     }
2107   }
2108 
2109   return result.finishString();
2110 }
2111 
getOrCreateBreakpointSite(JSContext * cx,uint32_t offset)2112 WasmBreakpointSite* Instance::getOrCreateBreakpointSite(JSContext* cx,
2113                                                         uint32_t offset) {
2114   MOZ_ASSERT(debugEnabled());
2115   return debug().getOrCreateBreakpointSite(cx, this, offset);
2116 }
2117 
destroyBreakpointSite(JSFreeOp * fop,uint32_t offset)2118 void Instance::destroyBreakpointSite(JSFreeOp* fop, uint32_t offset) {
2119   MOZ_ASSERT(debugEnabled());
2120   return debug().destroyBreakpointSite(fop, this, offset);
2121 }
2122 
disassembleExport(JSContext * cx,uint32_t funcIndex,Tier tier,PrintCallback printString) const2123 void Instance::disassembleExport(JSContext* cx, uint32_t funcIndex, Tier tier,
2124                                  PrintCallback printString) const {
2125   const MetadataTier& metadataTier = metadata(tier);
2126   const FuncExport& funcExport = metadataTier.lookupFuncExport(funcIndex);
2127   const CodeRange& range = metadataTier.codeRange(funcExport);
2128   const CodeTier& codeTier = code(tier);
2129   const ModuleSegment& segment = codeTier.segment();
2130 
2131   MOZ_ASSERT(range.begin() < segment.length());
2132   MOZ_ASSERT(range.end() < segment.length());
2133 
2134   uint8_t* functionCode = segment.base() + range.begin();
2135   jit::Disassemble(functionCode, range.end() - range.begin(), printString);
2136 }
2137 
addSizeOfMisc(MallocSizeOf mallocSizeOf,Metadata::SeenSet * seenMetadata,Code::SeenSet * seenCode,Table::SeenSet * seenTables,size_t * code,size_t * data) const2138 void Instance::addSizeOfMisc(MallocSizeOf mallocSizeOf,
2139                              Metadata::SeenSet* seenMetadata,
2140                              Code::SeenSet* seenCode,
2141                              Table::SeenSet* seenTables, size_t* code,
2142                              size_t* data) const {
2143   *data += mallocSizeOf(this);
2144   *data += mallocSizeOf(tlsData_.get());
2145   for (const SharedTable& table : tables_) {
2146     *data += table->sizeOfIncludingThisIfNotSeen(mallocSizeOf, seenTables);
2147   }
2148 
2149   if (maybeDebug_) {
2150     maybeDebug_->addSizeOfMisc(mallocSizeOf, seenMetadata, seenCode, code,
2151                                data);
2152   }
2153 
2154   code_->addSizeOfMiscIfNotSeen(mallocSizeOf, seenMetadata, seenCode, code,
2155                                 data);
2156 }
2157