1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  *
4  * Copyright 2015 Mozilla Foundation
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 #ifndef asmjs_wasm_h
20 #define asmjs_wasm_h
21 
22 #include "mozilla/HashFunctions.h"
23 
24 #include "ds/LifoAlloc.h"
25 #include "jit/IonTypes.h"
26 #include "js/Utility.h"
27 #include "js/Vector.h"
28 
29 namespace js {
30 namespace wasm {
31 
32 using mozilla::Move;
33 
34 // The ValType enum represents the WebAssembly "value type", which are used to
35 // specify the type of locals and parameters.
36 
37 // FIXME: uint8_t would make more sense for the underlying storage class, but
38 // causes miscompilations in GCC (fixed in 4.8.5 and 4.9.3).
39 enum class ValType
40 {
41     I32,
42     I64,
43     F32,
44     F64,
45     I32x4,
46     F32x4
47 };
48 
49 static inline bool
IsSimdType(ValType vt)50 IsSimdType(ValType vt)
51 {
52     return vt == ValType::I32x4 || vt == ValType::F32x4;
53 }
54 
55 static inline jit::MIRType
ToMIRType(ValType vt)56 ToMIRType(ValType vt)
57 {
58     switch (vt) {
59       case ValType::I32: return jit::MIRType_Int32;
60       case ValType::I64: MOZ_CRASH("NYI");
61       case ValType::F32: return jit::MIRType_Float32;
62       case ValType::F64: return jit::MIRType_Double;
63       case ValType::I32x4: return jit::MIRType_Int32x4;
64       case ValType::F32x4: return jit::MIRType_Float32x4;
65     }
66     MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("bad type");
67 }
68 
69 // The Val class represents a single WebAssembly value of a given value type,
70 // mostly for the purpose of numeric literals and initializers. A Val does not
71 // directly map to a JS value since there is not (currently) a precise
72 // representation of i64 values. A Val may contain non-canonical NaNs since,
73 // within WebAssembly, floats are not canonicalized. Canonicalization must
74 // happen at the JS boundary.
75 
76 class Val
77 {
78   public:
79     typedef int32_t I32x4[4];
80     typedef float F32x4[4];
81 
82   private:
83     ValType type_;
84     union {
85         uint32_t i32_;
86         uint64_t i64_;
87         float f32_;
88         double f64_;
89         I32x4 i32x4_;
90         F32x4 f32x4_;
91     } u;
92 
93   public:
94     Val() = default;
95 
Val(uint32_t i32)96     explicit Val(uint32_t i32) : type_(ValType::I32) { u.i32_ = i32; }
Val(uint64_t i64)97     explicit Val(uint64_t i64) : type_(ValType::I64) { u.i64_ = i64; }
Val(float f32)98     explicit Val(float f32) : type_(ValType::F32) { u.f32_ = f32; }
Val(double f64)99     explicit Val(double f64) : type_(ValType::F64) { u.f64_ = f64; }
Val(const I32x4 & i32x4)100     explicit Val(const I32x4& i32x4) : type_(ValType::I32x4) { memcpy(u.i32x4_, i32x4, sizeof(u.i32x4_)); }
Val(const F32x4 & f32x4)101     explicit Val(const F32x4& f32x4) : type_(ValType::F32x4) { memcpy(u.f32x4_, f32x4, sizeof(u.f32x4_)); }
102 
type()103     ValType type() const { return type_; }
isSimd()104     bool isSimd() const { return IsSimdType(type()); }
105 
i32()106     uint32_t i32() const { MOZ_ASSERT(type_ == ValType::I32); return u.i32_; }
i64()107     uint64_t i64() const { MOZ_ASSERT(type_ == ValType::I64); return u.i64_; }
f32()108     float f32() const { MOZ_ASSERT(type_ == ValType::F32); return u.f32_; }
f64()109     double f64() const { MOZ_ASSERT(type_ == ValType::F64); return u.f64_; }
i32x4()110     const I32x4& i32x4() const { MOZ_ASSERT(type_ == ValType::I32x4); return u.i32x4_; }
f32x4()111     const F32x4& f32x4() const { MOZ_ASSERT(type_ == ValType::F32x4); return u.f32x4_; }
112 };
113 
114 // The ExprType enum represents the type of a WebAssembly expression or return
115 // value and may either be a value type or void. A future WebAssembly extension
116 // may generalize expression types to instead be a list of value types (with
117 // void represented by the empty list). For now it's easier to have a flat enum
118 // and be explicit about conversions to/from value types.
119 
120 enum class ExprType : uint8_t
121 {
122     I32 = uint8_t(ValType::I32),
123     I64 = uint8_t(ValType::I64),
124     F32 = uint8_t(ValType::F32),
125     F64 = uint8_t(ValType::F64),
126     I32x4 = uint8_t(ValType::I32x4),
127     F32x4 = uint8_t(ValType::F32x4),
128     Void
129 };
130 
131 static inline bool
IsVoid(ExprType et)132 IsVoid(ExprType et)
133 {
134     return et == ExprType::Void;
135 }
136 
137 static inline ValType
NonVoidToValType(ExprType et)138 NonVoidToValType(ExprType et)
139 {
140     MOZ_ASSERT(!IsVoid(et));
141     return ValType(et);
142 }
143 
144 static inline ExprType
ToExprType(ValType vt)145 ToExprType(ValType vt)
146 {
147     return ExprType(vt);
148 }
149 
150 static inline bool
IsSimdType(ExprType et)151 IsSimdType(ExprType et)
152 {
153     return IsVoid(et) ? false : IsSimdType(ValType(et));
154 }
155 
156 static inline jit::MIRType
ToMIRType(ExprType et)157 ToMIRType(ExprType et)
158 {
159     return IsVoid(et) ? jit::MIRType_None : ToMIRType(ValType(et));
160 }
161 
162 // The Sig class represents a WebAssembly function signature which takes a list
163 // of value types and returns an expression type. The engine uses two in-memory
164 // representations of the argument Vector's memory (when elements do not fit
165 // inline): normal malloc allocation (via SystemAllocPolicy) and allocation in
166 // a LifoAlloc (via LifoAllocPolicy). The former Sig objects can have any
167 // lifetime since they own the memory. The latter Sig objects must not outlive
168 // the associated LifoAlloc mark/release interval (which is currently the
169 // duration of module validation+compilation). Thus, long-lived objects like
170 // WasmModule must use malloced allocation.
171 
172 template <class AllocPolicy>
173 class Sig
174 {
175   public:
176     typedef Vector<ValType, 4, AllocPolicy> ArgVector;
177 
178   private:
179     ArgVector args_;
180     ExprType ret_;
181 
182   protected:
args_(alloc)183     explicit Sig(AllocPolicy alloc = AllocPolicy()) : args_(alloc) {}
Sig(Sig && rhs)184     Sig(Sig&& rhs) : args_(Move(rhs.args_)), ret_(rhs.ret_) {}
Sig(ArgVector && args,ExprType ret)185     Sig(ArgVector&& args, ExprType ret) : args_(Move(args)), ret_(ret) {}
186 
187   public:
init(ArgVector && args,ExprType ret)188     void init(ArgVector&& args, ExprType ret) {
189         MOZ_ASSERT(args_.empty());
190         args_ = Move(args);
191         ret_ = ret;
192     }
193 
arg(unsigned i)194     ValType arg(unsigned i) const { return args_[i]; }
args()195     const ArgVector& args() const { return args_; }
ret()196     const ExprType& ret() const { return ret_; }
197 
hash()198     HashNumber hash() const {
199         HashNumber hn = HashNumber(ret_);
200         for (unsigned i = 0; i < args_.length(); i++)
201             hn = mozilla::AddToHash(hn, HashNumber(args_[i]));
202         return hn;
203     }
204 
205     template <class AllocPolicy2>
206     bool operator==(const Sig<AllocPolicy2>& rhs) const {
207         if (ret() != rhs.ret())
208             return false;
209         if (args().length() != rhs.args().length())
210             return false;
211         for (unsigned i = 0; i < args().length(); i++) {
212             if (arg(i) != rhs.arg(i))
213                 return false;
214         }
215         return true;
216     }
217 
218     template <class AllocPolicy2>
219     bool operator!=(const Sig<AllocPolicy2>& rhs) const {
220         return !(*this == rhs);
221     }
222 };
223 
224 class MallocSig : public Sig<SystemAllocPolicy>
225 {
226     typedef Sig<SystemAllocPolicy> BaseSig;
227 
228   public:
229     MallocSig() = default;
MallocSig(MallocSig && rhs)230     MallocSig(MallocSig&& rhs) : BaseSig(Move(rhs)) {}
MallocSig(ArgVector && args,ExprType ret)231     MallocSig(ArgVector&& args, ExprType ret) : BaseSig(Move(args), ret) {}
232 };
233 
234 class LifoSig : public Sig<LifoAllocPolicy<Fallible>>
235 {
236     typedef Sig<LifoAllocPolicy<Fallible>> BaseSig;
LifoSig(ArgVector && args,ExprType ret)237     LifoSig(ArgVector&& args, ExprType ret) : BaseSig(Move(args), ret) {}
238 
239   public:
new_(LifoAlloc & lifo,const MallocSig & src)240     static LifoSig* new_(LifoAlloc& lifo, const MallocSig& src) {
241         void* mem = lifo.alloc(sizeof(LifoSig));
242         if (!mem)
243             return nullptr;
244         ArgVector args(lifo);
245         if (!args.appendAll(src.args()))
246             return nullptr;
247         return new (mem) LifoSig(Move(args), src.ret());
248     }
249 };
250 
251 // While the frame-pointer chain allows the stack to be unwound without
252 // metadata, Error.stack still needs to know the line/column of every call in
253 // the chain. A CallSiteDesc describes a single callsite to which CallSite adds
254 // the metadata necessary to walk up to the next frame. Lastly CallSiteAndTarget
255 // adds the function index of the callee.
256 
257 class CallSiteDesc
258 {
259     uint32_t line_;
260     uint32_t column_ : 31;
261     uint32_t kind_ : 1;
262   public:
263     enum Kind {
264         Relative,  // pc-relative call
265         Register   // call *register
266     };
CallSiteDesc()267     CallSiteDesc() {}
CallSiteDesc(Kind kind)268     explicit CallSiteDesc(Kind kind)
269       : line_(0), column_(0), kind_(kind)
270     {}
CallSiteDesc(uint32_t line,uint32_t column,Kind kind)271     CallSiteDesc(uint32_t line, uint32_t column, Kind kind)
272       : line_(line), column_(column), kind_(kind)
273     {
274         MOZ_ASSERT(column_ == column, "column must fit in 31 bits");
275     }
line()276     uint32_t line() const { return line_; }
column()277     uint32_t column() const { return column_; }
kind()278     Kind kind() const { return Kind(kind_); }
279 };
280 
281 class CallSite : public CallSiteDesc
282 {
283     uint32_t returnAddressOffset_;
284     uint32_t stackDepth_;
285 
286   public:
CallSite()287     CallSite() {}
288 
CallSite(CallSiteDesc desc,uint32_t returnAddressOffset,uint32_t stackDepth)289     CallSite(CallSiteDesc desc, uint32_t returnAddressOffset, uint32_t stackDepth)
290       : CallSiteDesc(desc),
291         returnAddressOffset_(returnAddressOffset),
292         stackDepth_(stackDepth)
293     { }
294 
setReturnAddressOffset(uint32_t r)295     void setReturnAddressOffset(uint32_t r) { returnAddressOffset_ = r; }
offsetReturnAddressBy(int32_t o)296     void offsetReturnAddressBy(int32_t o) { returnAddressOffset_ += o; }
returnAddressOffset()297     uint32_t returnAddressOffset() const { return returnAddressOffset_; }
298 
299     // The stackDepth measures the amount of stack space pushed since the
300     // function was called. In particular, this includes the pushed return
301     // address on all archs (whether or not the call instruction pushes the
302     // return address (x86/x64) or the prologue does (ARM/MIPS)).
stackDepth()303     uint32_t stackDepth() const { return stackDepth_; }
304 };
305 
306 class CallSiteAndTarget : public CallSite
307 {
308     uint32_t targetIndex_;
309 
310   public:
CallSiteAndTarget(CallSite cs,uint32_t targetIndex)311     CallSiteAndTarget(CallSite cs, uint32_t targetIndex)
312       : CallSite(cs), targetIndex_(targetIndex)
313     { }
314 
315     static const uint32_t NOT_INTERNAL = UINT32_MAX;
316 
isInternal()317     bool isInternal() const { return targetIndex_ != NOT_INTERNAL; }
targetIndex()318     uint32_t targetIndex() const { MOZ_ASSERT(isInternal()); return targetIndex_; }
319 };
320 
321 typedef Vector<CallSite, 0, SystemAllocPolicy> CallSiteVector;
322 typedef Vector<CallSiteAndTarget, 0, SystemAllocPolicy> CallSiteAndTargetVector;
323 
324 // Summarizes a heap access made by wasm code that needs to be patched later
325 // and/or looked up by the wasm signal handlers. Different architectures need
326 // to know different things (x64: offset and length, ARM: where to patch in
327 // heap length, x86: where to patch in heap length and base).
328 
329 #if defined(JS_CODEGEN_X86)
330 class HeapAccess
331 {
332     uint32_t insnOffset_;
333     uint8_t opLength_;  // the length of the load/store instruction
334     uint8_t cmpDelta_;  // the number of bytes from the cmp to the load/store instruction
335 
336   public:
337     HeapAccess() = default;
338     static const uint32_t NoLengthCheck = UINT32_MAX;
339 
340     // If 'cmp' equals 'insnOffset' or if it is not supplied then the
341     // cmpDelta_ is zero indicating that there is no length to patch.
342     HeapAccess(uint32_t insnOffset, uint32_t after, uint32_t cmp = NoLengthCheck) {
343         mozilla::PodZero(this);  // zero padding for Valgrind
344         insnOffset_ = insnOffset;
345         opLength_ = after - insnOffset;
346         cmpDelta_ = cmp == NoLengthCheck ? 0 : insnOffset - cmp;
347     }
348 
insnOffset()349     uint32_t insnOffset() const { return insnOffset_; }
setInsnOffset(uint32_t insnOffset)350     void setInsnOffset(uint32_t insnOffset) { insnOffset_ = insnOffset; }
offsetInsnOffsetBy(uint32_t offset)351     void offsetInsnOffsetBy(uint32_t offset) { insnOffset_ += offset; }
patchHeapPtrImmAt(uint8_t * code)352     void* patchHeapPtrImmAt(uint8_t* code) const { return code + (insnOffset_ + opLength_); }
hasLengthCheck()353     bool hasLengthCheck() const { return cmpDelta_ > 0; }
patchLengthAt(uint8_t * code)354     void* patchLengthAt(uint8_t* code) const {
355         MOZ_ASSERT(hasLengthCheck());
356         return code + (insnOffset_ - cmpDelta_);
357     }
358 };
359 #elif defined(JS_CODEGEN_X64)
360 class HeapAccess
361 {
362   public:
363     enum WhatToDoOnOOB {
364         CarryOn, // loads return undefined, stores do nothing.
365         Throw    // throw a RangeError
366     };
367 
368   private:
369     uint32_t insnOffset_;
370     uint8_t offsetWithinWholeSimdVector_; // if is this e.g. the Z of an XYZ
371     bool throwOnOOB_;                     // should we throw on OOB?
372     uint8_t cmpDelta_;                    // the number of bytes from the cmp to the load/store instruction
373 
374   public:
375     HeapAccess() = default;
376     static const uint32_t NoLengthCheck = UINT32_MAX;
377 
378     // If 'cmp' equals 'insnOffset' or if it is not supplied then the
379     // cmpDelta_ is zero indicating that there is no length to patch.
380     HeapAccess(uint32_t insnOffset, WhatToDoOnOOB oob,
381                uint32_t cmp = NoLengthCheck,
382                uint32_t offsetWithinWholeSimdVector = 0)
383     {
384         mozilla::PodZero(this);  // zero padding for Valgrind
385         insnOffset_ = insnOffset;
386         offsetWithinWholeSimdVector_ = offsetWithinWholeSimdVector;
387         throwOnOOB_ = oob == Throw;
388         cmpDelta_ = cmp == NoLengthCheck ? 0 : insnOffset - cmp;
389         MOZ_ASSERT(offsetWithinWholeSimdVector_ == offsetWithinWholeSimdVector);
390     }
391 
insnOffset()392     uint32_t insnOffset() const { return insnOffset_; }
setInsnOffset(uint32_t insnOffset)393     void setInsnOffset(uint32_t insnOffset) { insnOffset_ = insnOffset; }
offsetInsnOffsetBy(uint32_t offset)394     void offsetInsnOffsetBy(uint32_t offset) { insnOffset_ += offset; }
throwOnOOB()395     bool throwOnOOB() const { return throwOnOOB_; }
offsetWithinWholeSimdVector()396     uint32_t offsetWithinWholeSimdVector() const { return offsetWithinWholeSimdVector_; }
hasLengthCheck()397     bool hasLengthCheck() const { return cmpDelta_ > 0; }
patchLengthAt(uint8_t * code)398     void* patchLengthAt(uint8_t* code) const {
399         MOZ_ASSERT(hasLengthCheck());
400         return code + (insnOffset_ - cmpDelta_);
401     }
402 };
403 #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
404       defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
405 class HeapAccess
406 {
407     uint32_t insnOffset_;
408   public:
409     HeapAccess() = default;
HeapAccess(uint32_t insnOffset)410     explicit HeapAccess(uint32_t insnOffset) : insnOffset_(insnOffset) {}
insnOffset()411     uint32_t insnOffset() const { return insnOffset_; }
setInsnOffset(uint32_t insnOffset)412     void setInsnOffset(uint32_t insnOffset) { insnOffset_ = insnOffset; }
offsetInsnOffsetBy(uint32_t offset)413     void offsetInsnOffsetBy(uint32_t offset) { insnOffset_ += offset; }
414 };
415 #elif defined(JS_CODEGEN_NONE)
416 class HeapAccess {
417   public:
offsetInsnOffsetBy(uint32_t)418     void offsetInsnOffsetBy(uint32_t) { MOZ_CRASH(); }
insnOffset()419     uint32_t insnOffset() const { MOZ_CRASH(); }
420 };
421 #endif
422 
423 typedef Vector<HeapAccess, 0, SystemAllocPolicy> HeapAccessVector;
424 
425 // A wasm::Builtin represents a function implemented by the engine that is
426 // called directly from wasm code and should show up in the callstack.
427 
428 enum class Builtin : uint16_t
429 {
430     ToInt32,
431 #if defined(JS_CODEGEN_ARM)
432     aeabi_idivmod,
433     aeabi_uidivmod,
434     AtomicCmpXchg,
435     AtomicXchg,
436     AtomicFetchAdd,
437     AtomicFetchSub,
438     AtomicFetchAnd,
439     AtomicFetchOr,
440     AtomicFetchXor,
441 #endif
442     ModD,
443     SinD,
444     CosD,
445     TanD,
446     ASinD,
447     ACosD,
448     ATanD,
449     CeilD,
450     CeilF,
451     FloorD,
452     FloorF,
453     ExpD,
454     LogD,
455     PowD,
456     ATan2D,
457     Limit
458 };
459 
460 // A wasm::SymbolicAddress represents a pointer to a well-known function or
461 // object that is embedded in wasm code. Since wasm code is serialized and
462 // later deserialized into a different address space, symbolic addresses must be
463 // used for *all* pointers into the address space. The MacroAssembler records a
464 // list of all SymbolicAddresses and the offsets of their use in the code for
465 // later patching during static linking.
466 
467 enum class SymbolicAddress
468 {
469     ToInt32         = unsigned(Builtin::ToInt32),
470 #if defined(JS_CODEGEN_ARM)
471     aeabi_idivmod   = unsigned(Builtin::aeabi_idivmod),
472     aeabi_uidivmod  = unsigned(Builtin::aeabi_uidivmod),
473     AtomicCmpXchg   = unsigned(Builtin::AtomicCmpXchg),
474     AtomicXchg      = unsigned(Builtin::AtomicXchg),
475     AtomicFetchAdd  = unsigned(Builtin::AtomicFetchAdd),
476     AtomicFetchSub  = unsigned(Builtin::AtomicFetchSub),
477     AtomicFetchAnd  = unsigned(Builtin::AtomicFetchAnd),
478     AtomicFetchOr   = unsigned(Builtin::AtomicFetchOr),
479     AtomicFetchXor  = unsigned(Builtin::AtomicFetchXor),
480 #endif
481     ModD            = unsigned(Builtin::ModD),
482     SinD            = unsigned(Builtin::SinD),
483     CosD            = unsigned(Builtin::CosD),
484     TanD            = unsigned(Builtin::TanD),
485     ASinD           = unsigned(Builtin::ASinD),
486     ACosD           = unsigned(Builtin::ACosD),
487     ATanD           = unsigned(Builtin::ATanD),
488     CeilD           = unsigned(Builtin::CeilD),
489     CeilF           = unsigned(Builtin::CeilF),
490     FloorD          = unsigned(Builtin::FloorD),
491     FloorF          = unsigned(Builtin::FloorF),
492     ExpD            = unsigned(Builtin::ExpD),
493     LogD            = unsigned(Builtin::LogD),
494     PowD            = unsigned(Builtin::PowD),
495     ATan2D          = unsigned(Builtin::ATan2D),
496     Runtime,
497     RuntimeInterruptUint32,
498     StackLimit,
499     ReportOverRecursed,
500     OnDetached,
501     OnOutOfBounds,
502     OnImpreciseConversion,
503     HandleExecutionInterrupt,
504     InvokeFromAsmJS_Ignore,
505     InvokeFromAsmJS_ToInt32,
506     InvokeFromAsmJS_ToNumber,
507     CoerceInPlace_ToInt32,
508     CoerceInPlace_ToNumber,
509     Limit
510 };
511 
512 static inline SymbolicAddress
BuiltinToImmediate(Builtin b)513 BuiltinToImmediate(Builtin b)
514 {
515     return SymbolicAddress(b);
516 }
517 
518 static inline bool
ImmediateIsBuiltin(SymbolicAddress imm,Builtin * builtin)519 ImmediateIsBuiltin(SymbolicAddress imm, Builtin* builtin)
520 {
521     if (uint32_t(imm) < uint32_t(Builtin::Limit)) {
522         *builtin = Builtin(imm);
523         return true;
524     }
525     return false;
526 }
527 
528 // An ExitReason describes the possible reasons for leaving compiled wasm code
529 // or the state of not having left compiled wasm code (ExitReason::None).
530 
531 class ExitReason
532 {
533   public:
534     // List of reasons for execution leaving compiled wasm code (or None, if
535     // control hasn't exited).
536     enum Kind
537     {
538         None,       // default state, the pc is in wasm code
539         Jit,        // fast-path exit to JIT code
540         Slow,       // general case exit to C++ Invoke
541         Interrupt,  // executing an interrupt callback
542         Builtin     // calling into a builtin (native) function
543     };
544 
545   private:
546     Kind kind_;
547     wasm::Builtin builtin_;
548 
549   public:
550     ExitReason() = default;
ExitReason(Kind kind)551     MOZ_IMPLICIT ExitReason(Kind kind) : kind_(kind) { MOZ_ASSERT(kind != Builtin); }
ExitReason(wasm::Builtin builtin)552     MOZ_IMPLICIT ExitReason(wasm::Builtin builtin) : kind_(Builtin), builtin_(builtin) {}
kind()553     Kind kind() const { return kind_; }
builtin()554     wasm::Builtin builtin() const { MOZ_ASSERT(kind_ == Builtin); return builtin_; }
555 
pack()556     uint32_t pack() const {
557         static_assert(sizeof(wasm::Builtin) == 2, "fits");
558         return uint16_t(kind_) | (uint16_t(builtin_) << 16);
559     }
unpack(uint32_t u32)560     static ExitReason unpack(uint32_t u32) {
561         static_assert(sizeof(wasm::Builtin) == 2, "fits");
562         ExitReason r;
563         r.kind_ = Kind(uint16_t(u32));
564         r.builtin_ = wasm::Builtin(uint16_t(u32 >> 16));
565         return r;
566     }
567 };
568 
569 // A hoisting of constants that would otherwise require #including WasmModule.h
570 // everywhere. Values are asserted in WasmModule.h.
571 
572 static const unsigned ActivationGlobalDataOffset = 0;
573 static const unsigned HeapGlobalDataOffset = sizeof(void*);
574 static const unsigned NaN64GlobalDataOffset = 2 * sizeof(void*);
575 static const unsigned NaN32GlobalDataOffset = 2 * sizeof(void*) + sizeof(double);
576 
577 } // namespace wasm
578 } // namespace js
579 
580 #endif // asmjs_wasm_h
581