1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 *
4 * Copyright 2015 Mozilla Foundation
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 #ifndef wasm_types_h
20 #define wasm_types_h
21
22 #include "mozilla/EnumeratedArray.h"
23 #include "mozilla/HashFunctions.h"
24 #include "mozilla/Maybe.h"
25 #include "mozilla/Move.h"
26 #include "mozilla/RefCounted.h"
27 #include "mozilla/RefPtr.h"
28 #include "mozilla/Unused.h"
29
30 #include "NamespaceImports.h"
31
32 #include "ds/LifoAlloc.h"
33 #include "jit/IonTypes.h"
34 #include "js/UniquePtr.h"
35 #include "js/Utility.h"
36 #include "js/Vector.h"
37 #include "vm/MallocProvider.h"
38 #include "wasm/WasmBinaryConstants.h"
39
40 namespace js {
41
42 class PropertyName;
43 namespace jit { struct BaselineScript; }
44
45 // This is a widespread header, so lets keep out the core wasm impl types.
46
47 class WasmMemoryObject;
48 typedef GCPtr<WasmMemoryObject*> GCPtrWasmMemoryObject;
49 typedef Rooted<WasmMemoryObject*> RootedWasmMemoryObject;
50 typedef Handle<WasmMemoryObject*> HandleWasmMemoryObject;
51 typedef MutableHandle<WasmMemoryObject*> MutableHandleWasmMemoryObject;
52
53 class WasmModuleObject;
54 typedef Rooted<WasmModuleObject*> RootedWasmModuleObject;
55 typedef Handle<WasmModuleObject*> HandleWasmModuleObject;
56 typedef MutableHandle<WasmModuleObject*> MutableHandleWasmModuleObject;
57
58 class WasmInstanceObject;
59 typedef GCVector<WasmInstanceObject*> WasmInstanceObjectVector;
60 typedef Rooted<WasmInstanceObject*> RootedWasmInstanceObject;
61 typedef Handle<WasmInstanceObject*> HandleWasmInstanceObject;
62 typedef MutableHandle<WasmInstanceObject*> MutableHandleWasmInstanceObject;
63
64 class WasmTableObject;
65 typedef Rooted<WasmTableObject*> RootedWasmTableObject;
66 typedef Handle<WasmTableObject*> HandleWasmTableObject;
67 typedef MutableHandle<WasmTableObject*> MutableHandleWasmTableObject;
68
69 namespace wasm {
70
71 using mozilla::DebugOnly;
72 using mozilla::EnumeratedArray;
73 using mozilla::Maybe;
74 using mozilla::Move;
75 using mozilla::MallocSizeOf;
76 using mozilla::Nothing;
77 using mozilla::PodZero;
78 using mozilla::PodCopy;
79 using mozilla::PodEqual;
80 using mozilla::RefCounted;
81 using mozilla::Some;
82 using mozilla::Unused;
83
84 typedef Vector<uint32_t, 0, SystemAllocPolicy> Uint32Vector;
85 typedef Vector<uint8_t, 0, SystemAllocPolicy> Bytes;
86
87 typedef int8_t I8x16[16];
88 typedef int16_t I16x8[8];
89 typedef int32_t I32x4[4];
90 typedef float F32x4[4];
91
92 class Code;
93 class CodeRange;
94 class Memory;
95 class Module;
96 class Instance;
97 class Table;
98
99 // To call Vector::podResizeToFit, a type must specialize mozilla::IsPod
100 // which is pretty verbose to do within js::wasm, so factor that process out
101 // into a macro.
102
103 #define WASM_DECLARE_POD_VECTOR(Type, VectorName) \
104 } } namespace mozilla { \
105 template <> struct IsPod<js::wasm::Type> : TrueType {}; \
106 } namespace js { namespace wasm { \
107 typedef Vector<Type, 0, SystemAllocPolicy> VectorName;
108
109 // A wasm Module and everything it contains must support serialization and
110 // deserialization. Some data can be simply copied as raw bytes and,
111 // as a convention, is stored in an inline CacheablePod struct. Everything else
112 // should implement the below methods which are called recusively by the
113 // containing Module.
114
115 #define WASM_DECLARE_SERIALIZABLE(Type) \
116 size_t serializedSize() const; \
117 uint8_t* serialize(uint8_t* cursor) const; \
118 const uint8_t* deserialize(const uint8_t* cursor); \
119 size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
120
121 #define WASM_DECLARE_SERIALIZABLE_VIRTUAL(Type) \
122 virtual size_t serializedSize() const; \
123 virtual uint8_t* serialize(uint8_t* cursor) const; \
124 virtual const uint8_t* deserialize(const uint8_t* cursor); \
125 virtual size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
126
127 #define WASM_DECLARE_SERIALIZABLE_OVERRIDE(Type) \
128 size_t serializedSize() const override; \
129 uint8_t* serialize(uint8_t* cursor) const override; \
130 const uint8_t* deserialize(const uint8_t* cursor) override; \
131 size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const override;
132
133 // This reusable base class factors out the logic for a resource that is shared
134 // by multiple instances/modules but should only be counted once when computing
135 // about:memory stats.
136
137 template <class T>
138 struct ShareableBase : RefCounted<T>
139 {
140 using SeenSet = HashSet<const T*, DefaultHasher<const T*>, SystemAllocPolicy>;
141
sizeOfIncludingThisIfNotSeenShareableBase142 size_t sizeOfIncludingThisIfNotSeen(MallocSizeOf mallocSizeOf, SeenSet* seen) const {
143 const T* self = static_cast<const T*>(this);
144 typename SeenSet::AddPtr p = seen->lookupForAdd(self);
145 if (p)
146 return 0;
147 bool ok = seen->add(p, self);
148 (void)ok; // oh well
149 return mallocSizeOf(self) + self->sizeOfExcludingThis(mallocSizeOf);
150 }
151 };
152
153 // ValType utilities
154
155 static inline bool
IsSimdType(ValType vt)156 IsSimdType(ValType vt)
157 {
158 switch (vt) {
159 case ValType::I8x16:
160 case ValType::I16x8:
161 case ValType::I32x4:
162 case ValType::F32x4:
163 case ValType::B8x16:
164 case ValType::B16x8:
165 case ValType::B32x4:
166 return true;
167 default:
168 return false;
169 }
170 }
171
172 static inline uint32_t
NumSimdElements(ValType vt)173 NumSimdElements(ValType vt)
174 {
175 MOZ_ASSERT(IsSimdType(vt));
176 switch (vt) {
177 case ValType::I8x16:
178 case ValType::B8x16:
179 return 16;
180 case ValType::I16x8:
181 case ValType::B16x8:
182 return 8;
183 case ValType::I32x4:
184 case ValType::F32x4:
185 case ValType::B32x4:
186 return 4;
187 default:
188 MOZ_CRASH("Unhandled SIMD type");
189 }
190 }
191
192 static inline ValType
SimdElementType(ValType vt)193 SimdElementType(ValType vt)
194 {
195 MOZ_ASSERT(IsSimdType(vt));
196 switch (vt) {
197 case ValType::I8x16:
198 case ValType::I16x8:
199 case ValType::I32x4:
200 return ValType::I32;
201 case ValType::F32x4:
202 return ValType::F32;
203 case ValType::B8x16:
204 case ValType::B16x8:
205 case ValType::B32x4:
206 return ValType::I32;
207 default:
208 MOZ_CRASH("Unhandled SIMD type");
209 }
210 }
211
212 static inline ValType
SimdBoolType(ValType vt)213 SimdBoolType(ValType vt)
214 {
215 MOZ_ASSERT(IsSimdType(vt));
216 switch (vt) {
217 case ValType::I8x16:
218 case ValType::B8x16:
219 return ValType::B8x16;
220 case ValType::I16x8:
221 case ValType::B16x8:
222 return ValType::B16x8;
223 case ValType::I32x4:
224 case ValType::F32x4:
225 case ValType::B32x4:
226 return ValType::B32x4;
227 default:
228 MOZ_CRASH("Unhandled SIMD type");
229 }
230 }
231
232 static inline bool
IsSimdBoolType(ValType vt)233 IsSimdBoolType(ValType vt)
234 {
235 return vt == ValType::B8x16 || vt == ValType::B16x8 || vt == ValType::B32x4;
236 }
237
238 static inline jit::MIRType
ToMIRType(ValType vt)239 ToMIRType(ValType vt)
240 {
241 switch (vt) {
242 case ValType::I32: return jit::MIRType::Int32;
243 case ValType::I64: return jit::MIRType::Int64;
244 case ValType::F32: return jit::MIRType::Float32;
245 case ValType::F64: return jit::MIRType::Double;
246 case ValType::I8x16: return jit::MIRType::Int8x16;
247 case ValType::I16x8: return jit::MIRType::Int16x8;
248 case ValType::I32x4: return jit::MIRType::Int32x4;
249 case ValType::F32x4: return jit::MIRType::Float32x4;
250 case ValType::B8x16: return jit::MIRType::Bool8x16;
251 case ValType::B16x8: return jit::MIRType::Bool16x8;
252 case ValType::B32x4: return jit::MIRType::Bool32x4;
253 }
254 MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("bad type");
255 }
256
257 // The ExprType enum represents the type of a WebAssembly expression or return
258 // value and may either be a value type or void. Soon, expression types will be
259 // generalized to a list of ValType and this enum will go away, replaced,
260 // wherever it is used, by a varU32 + list of ValType.
261
262 enum class ExprType
263 {
264 Void = uint8_t(TypeCode::BlockVoid),
265
266 I32 = uint8_t(TypeCode::I32),
267 I64 = uint8_t(TypeCode::I64),
268 F32 = uint8_t(TypeCode::F32),
269 F64 = uint8_t(TypeCode::F64),
270
271 I8x16 = uint8_t(TypeCode::I8x16),
272 I16x8 = uint8_t(TypeCode::I16x8),
273 I32x4 = uint8_t(TypeCode::I32x4),
274 F32x4 = uint8_t(TypeCode::F32x4),
275 B8x16 = uint8_t(TypeCode::B8x16),
276 B16x8 = uint8_t(TypeCode::B16x8),
277 B32x4 = uint8_t(TypeCode::B32x4),
278
279 Limit = uint8_t(TypeCode::Limit)
280 };
281
282 static inline bool
IsVoid(ExprType et)283 IsVoid(ExprType et)
284 {
285 return et == ExprType::Void;
286 }
287
288 static inline ValType
NonVoidToValType(ExprType et)289 NonVoidToValType(ExprType et)
290 {
291 MOZ_ASSERT(!IsVoid(et));
292 return ValType(et);
293 }
294
295 static inline ExprType
ToExprType(ValType vt)296 ToExprType(ValType vt)
297 {
298 return ExprType(vt);
299 }
300
301 static inline bool
IsSimdType(ExprType et)302 IsSimdType(ExprType et)
303 {
304 return IsVoid(et) ? false : IsSimdType(ValType(et));
305 }
306
307 static inline jit::MIRType
ToMIRType(ExprType et)308 ToMIRType(ExprType et)
309 {
310 return IsVoid(et) ? jit::MIRType::None : ToMIRType(ValType(et));
311 }
312
313 static inline const char*
ToCString(ExprType type)314 ToCString(ExprType type)
315 {
316 switch (type) {
317 case ExprType::Void: return "void";
318 case ExprType::I32: return "i32";
319 case ExprType::I64: return "i64";
320 case ExprType::F32: return "f32";
321 case ExprType::F64: return "f64";
322 case ExprType::I8x16: return "i8x16";
323 case ExprType::I16x8: return "i16x8";
324 case ExprType::I32x4: return "i32x4";
325 case ExprType::F32x4: return "f32x4";
326 case ExprType::B8x16: return "b8x16";
327 case ExprType::B16x8: return "b16x8";
328 case ExprType::B32x4: return "b32x4";
329 case ExprType::Limit:;
330 }
331 MOZ_CRASH("bad expression type");
332 }
333
334 static inline const char*
ToCString(ValType type)335 ToCString(ValType type)
336 {
337 return ToCString(ToExprType(type));
338 }
339
340 // Because WebAssembly allows one to define the payload of a NaN value,
341 // including the signal/quiet bit (highest order bit of payload), another
342 // represenation of floating-point values is required: on some platforms (x86
343 // without SSE2), passing a floating-point argument to a function call may use
344 // the x87 stack, which has the side-effect of clearing the signal/quiet bit.
345 // Because the signal/quiet bit must be preserved (by spec), we use the raw
346 // punned integer representation of floating points instead, in function calls.
347 //
348 // When we leave the WebAssembly sandbox back to JS, NaNs are canonicalized, so
349 // this isn't observable from JS.
350
351 template<class T>
352 class Raw
353 {
354 typedef typename mozilla::FloatingPoint<T>::Bits Bits;
355 Bits value_;
356
357 public:
Raw()358 Raw() : value_(0) {}
359
Raw(T value)360 explicit Raw(T value)
361 : value_(mozilla::BitwiseCast<Bits>(value))
362 {}
363
364 template<class U> MOZ_IMPLICIT Raw(U) = delete;
365
fromBits(Bits bits)366 static Raw fromBits(Bits bits) { Raw r; r.value_ = bits; return r; }
367
bits()368 Bits bits() const { return value_; }
fp()369 T fp() const { return mozilla::BitwiseCast<T>(value_); }
370 };
371
372 using RawF64 = Raw<double>;
373 using RawF32 = Raw<float>;
374
375 // The Val class represents a single WebAssembly value of a given value type,
376 // mostly for the purpose of numeric literals and initializers. A Val does not
377 // directly map to a JS value since there is not (currently) a precise
378 // representation of i64 values. A Val may contain non-canonical NaNs since,
379 // within WebAssembly, floats are not canonicalized. Canonicalization must
380 // happen at the JS boundary.
381
382 class Val
383 {
384 ValType type_;
385 union U {
386 uint32_t i32_;
387 uint64_t i64_;
388 RawF32 f32_;
389 RawF64 f64_;
390 I8x16 i8x16_;
391 I16x8 i16x8_;
392 I32x4 i32x4_;
393 F32x4 f32x4_;
U()394 U() {}
395 } u;
396
397 public:
398 Val() = default;
399
Val(uint32_t i32)400 explicit Val(uint32_t i32) : type_(ValType::I32) { u.i32_ = i32; }
Val(uint64_t i64)401 explicit Val(uint64_t i64) : type_(ValType::I64) { u.i64_ = i64; }
402
Val(RawF32 f32)403 explicit Val(RawF32 f32) : type_(ValType::F32) { u.f32_ = f32; }
Val(RawF64 f64)404 explicit Val(RawF64 f64) : type_(ValType::F64) { u.f64_ = f64; }
405 MOZ_IMPLICIT Val(float) = delete;
406 MOZ_IMPLICIT Val(double) = delete;
407
type_(type)408 explicit Val(const I8x16& i8x16, ValType type = ValType::I8x16) : type_(type) {
409 MOZ_ASSERT(type_ == ValType::I8x16 || type_ == ValType::B8x16);
410 memcpy(u.i8x16_, i8x16, sizeof(u.i8x16_));
411 }
type_(type)412 explicit Val(const I16x8& i16x8, ValType type = ValType::I16x8) : type_(type) {
413 MOZ_ASSERT(type_ == ValType::I16x8 || type_ == ValType::B16x8);
414 memcpy(u.i16x8_, i16x8, sizeof(u.i16x8_));
415 }
type_(type)416 explicit Val(const I32x4& i32x4, ValType type = ValType::I32x4) : type_(type) {
417 MOZ_ASSERT(type_ == ValType::I32x4 || type_ == ValType::B32x4);
418 memcpy(u.i32x4_, i32x4, sizeof(u.i32x4_));
419 }
Val(const F32x4 & f32x4)420 explicit Val(const F32x4& f32x4) : type_(ValType::F32x4) {
421 memcpy(u.f32x4_, f32x4, sizeof(u.f32x4_));
422 }
423
type()424 ValType type() const { return type_; }
isSimd()425 bool isSimd() const { return IsSimdType(type()); }
426
i32()427 uint32_t i32() const { MOZ_ASSERT(type_ == ValType::I32); return u.i32_; }
i64()428 uint64_t i64() const { MOZ_ASSERT(type_ == ValType::I64); return u.i64_; }
f32()429 RawF32 f32() const { MOZ_ASSERT(type_ == ValType::F32); return u.f32_; }
f64()430 RawF64 f64() const { MOZ_ASSERT(type_ == ValType::F64); return u.f64_; }
431
i8x16()432 const I8x16& i8x16() const {
433 MOZ_ASSERT(type_ == ValType::I8x16 || type_ == ValType::B8x16);
434 return u.i8x16_;
435 }
i16x8()436 const I16x8& i16x8() const {
437 MOZ_ASSERT(type_ == ValType::I16x8 || type_ == ValType::B16x8);
438 return u.i16x8_;
439 }
i32x4()440 const I32x4& i32x4() const {
441 MOZ_ASSERT(type_ == ValType::I32x4 || type_ == ValType::B32x4);
442 return u.i32x4_;
443 }
f32x4()444 const F32x4& f32x4() const {
445 MOZ_ASSERT(type_ == ValType::F32x4);
446 return u.f32x4_;
447 }
448
449 void writePayload(uint8_t* dst) const;
450 };
451
452 typedef Vector<Val, 0, SystemAllocPolicy> ValVector;
453
454 // The Sig class represents a WebAssembly function signature which takes a list
455 // of value types and returns an expression type. The engine uses two in-memory
456 // representations of the argument Vector's memory (when elements do not fit
457 // inline): normal malloc allocation (via SystemAllocPolicy) and allocation in
458 // a LifoAlloc (via LifoAllocPolicy). The former Sig objects can have any
459 // lifetime since they own the memory. The latter Sig objects must not outlive
460 // the associated LifoAlloc mark/release interval (which is currently the
461 // duration of module validation+compilation). Thus, long-lived objects like
462 // WasmModule must use malloced allocation.
463
464 class Sig
465 {
466 ValTypeVector args_;
467 ExprType ret_;
468
469 public:
Sig()470 Sig() : args_(), ret_(ExprType::Void) {}
Sig(ValTypeVector && args,ExprType ret)471 Sig(ValTypeVector&& args, ExprType ret) : args_(Move(args)), ret_(ret) {}
472
clone(const Sig & rhs)473 MOZ_MUST_USE bool clone(const Sig& rhs) {
474 ret_ = rhs.ret_;
475 MOZ_ASSERT(args_.empty());
476 return args_.appendAll(rhs.args_);
477 }
478
arg(unsigned i)479 ValType arg(unsigned i) const { return args_[i]; }
args()480 const ValTypeVector& args() const { return args_; }
ret()481 const ExprType& ret() const { return ret_; }
482
hash()483 HashNumber hash() const {
484 return AddContainerToHash(args_, HashNumber(ret_));
485 }
486 bool operator==(const Sig& rhs) const {
487 return ret() == rhs.ret() && EqualContainers(args(), rhs.args());
488 }
489 bool operator!=(const Sig& rhs) const {
490 return !(*this == rhs);
491 }
492
493 WASM_DECLARE_SERIALIZABLE(Sig)
494 };
495
496 struct SigHashPolicy
497 {
498 typedef const Sig& Lookup;
hashSigHashPolicy499 static HashNumber hash(Lookup sig) { return sig.hash(); }
matchSigHashPolicy500 static bool match(const Sig* lhs, Lookup rhs) { return *lhs == rhs; }
501 };
502
503 // An InitExpr describes a deferred initializer expression, used to initialize
504 // a global or a table element offset. Such expressions are created during
505 // decoding and actually executed on module instantiation.
506
507 class InitExpr
508 {
509 public:
510 enum class Kind {
511 Constant,
512 GetGlobal
513 };
514
515 private:
516 Kind kind_;
517 union U {
518 Val val_;
519 struct {
520 uint32_t index_;
521 ValType type_;
522 } global;
U()523 U() {}
524 } u;
525
526 public:
527 InitExpr() = default;
528
InitExpr(Val val)529 explicit InitExpr(Val val) : kind_(Kind::Constant) {
530 u.val_ = val;
531 }
532
InitExpr(uint32_t globalIndex,ValType type)533 explicit InitExpr(uint32_t globalIndex, ValType type) : kind_(Kind::GetGlobal) {
534 u.global.index_ = globalIndex;
535 u.global.type_ = type;
536 }
537
kind()538 Kind kind() const { return kind_; }
539
isVal()540 bool isVal() const { return kind() == Kind::Constant; }
val()541 Val val() const { MOZ_ASSERT(isVal()); return u.val_; }
542
globalIndex()543 uint32_t globalIndex() const { MOZ_ASSERT(kind() == Kind::GetGlobal); return u.global.index_; }
544
type()545 ValType type() const {
546 switch (kind()) {
547 case Kind::Constant: return u.val_.type();
548 case Kind::GetGlobal: return u.global.type_;
549 }
550 MOZ_CRASH("unexpected initExpr type");
551 }
552 };
553
554 // CacheableChars is used to cacheably store UniqueChars.
555
556 struct CacheableChars : UniqueChars
557 {
558 CacheableChars() = default;
CacheableCharsCacheableChars559 explicit CacheableChars(char* ptr) : UniqueChars(ptr) {}
CacheableCharsCacheableChars560 MOZ_IMPLICIT CacheableChars(UniqueChars&& rhs) : UniqueChars(Move(rhs)) {}
561 WASM_DECLARE_SERIALIZABLE(CacheableChars)
562 };
563
564 typedef Vector<CacheableChars, 0, SystemAllocPolicy> CacheableCharsVector;
565
566 // Import describes a single wasm import. An ImportVector describes all
567 // of a single module's imports.
568 //
569 // ImportVector is built incrementally by ModuleGenerator and then stored
570 // immutably by Module.
571
572 struct Import
573 {
574 CacheableChars module;
575 CacheableChars field;
576 DefinitionKind kind;
577
578 Import() = default;
ImportImport579 Import(UniqueChars&& module, UniqueChars&& field, DefinitionKind kind)
580 : module(Move(module)), field(Move(field)), kind(kind)
581 {}
582
583 WASM_DECLARE_SERIALIZABLE(Import)
584 };
585
586 typedef Vector<Import, 0, SystemAllocPolicy> ImportVector;
587
588 // A GlobalDesc describes a single global variable. Currently, asm.js and wasm
589 // exposes mutable and immutable private globals, but can't import nor export
590 // mutable globals.
591
592 enum class GlobalKind
593 {
594 Import,
595 Constant,
596 Variable
597 };
598
599 class GlobalDesc
600 {
601 union V {
602 struct {
603 union U {
604 InitExpr initial_;
605 struct {
606 ValType type_;
607 uint32_t index_;
608 } import;
U()609 U() {}
610 } val;
611 unsigned offset_;
612 bool isMutable_;
613 } var;
614 Val cst_;
V()615 V() {}
616 } u;
617 GlobalKind kind_;
618
619 public:
620 GlobalDesc() = default;
621
GlobalDesc(InitExpr initial,bool isMutable)622 explicit GlobalDesc(InitExpr initial, bool isMutable)
623 : kind_((isMutable || !initial.isVal()) ? GlobalKind::Variable : GlobalKind::Constant)
624 {
625 if (isVariable()) {
626 u.var.val.initial_ = initial;
627 u.var.isMutable_ = isMutable;
628 u.var.offset_ = UINT32_MAX;
629 } else {
630 u.cst_ = initial.val();
631 }
632 }
633
GlobalDesc(ValType type,bool isMutable,uint32_t importIndex)634 explicit GlobalDesc(ValType type, bool isMutable, uint32_t importIndex)
635 : kind_(GlobalKind::Import)
636 {
637 u.var.val.import.type_ = type;
638 u.var.val.import.index_ = importIndex;
639 u.var.isMutable_ = isMutable;
640 u.var.offset_ = UINT32_MAX;
641 }
642
setOffset(unsigned offset)643 void setOffset(unsigned offset) {
644 MOZ_ASSERT(!isConstant());
645 MOZ_ASSERT(u.var.offset_ == UINT32_MAX);
646 u.var.offset_ = offset;
647 }
offset()648 unsigned offset() const {
649 MOZ_ASSERT(!isConstant());
650 MOZ_ASSERT(u.var.offset_ != UINT32_MAX);
651 return u.var.offset_;
652 }
653
kind()654 GlobalKind kind() const { return kind_; }
isVariable()655 bool isVariable() const { return kind_ == GlobalKind::Variable; }
isConstant()656 bool isConstant() const { return kind_ == GlobalKind::Constant; }
isImport()657 bool isImport() const { return kind_ == GlobalKind::Import; }
658
isMutable()659 bool isMutable() const { return !isConstant() && u.var.isMutable_; }
constantValue()660 Val constantValue() const { MOZ_ASSERT(isConstant()); return u.cst_; }
initExpr()661 const InitExpr& initExpr() const { MOZ_ASSERT(isVariable()); return u.var.val.initial_; }
importIndex()662 uint32_t importIndex() const { MOZ_ASSERT(isImport()); return u.var.val.import.index_; }
663
type()664 ValType type() const {
665 switch (kind_) {
666 case GlobalKind::Import: return u.var.val.import.type_;
667 case GlobalKind::Variable: return u.var.val.initial_.type();
668 case GlobalKind::Constant: return u.cst_.type();
669 }
670 MOZ_CRASH("unexpected global kind");
671 }
672 };
673
674 typedef Vector<GlobalDesc, 0, SystemAllocPolicy> GlobalDescVector;
675
676 // DataSegment describes the offset of a data segment in the bytecode that is
677 // to be copied at a given offset into linear memory upon instantiation.
678
679 struct DataSegment
680 {
681 InitExpr offset;
682 uint32_t bytecodeOffset;
683 uint32_t length;
684 };
685
686 typedef Vector<DataSegment, 0, SystemAllocPolicy> DataSegmentVector;
687
688 // SigIdDesc describes a signature id that can be used by call_indirect and
689 // table-entry prologues to structurally compare whether the caller and callee's
690 // signatures *structurally* match. To handle the general case, a Sig is
691 // allocated and stored in a process-wide hash table, so that pointer equality
692 // implies structural equality. As an optimization for the 99% case where the
693 // Sig has a small number of parameters, the Sig is bit-packed into a uint32
694 // immediate value so that integer equality implies structural equality. Both
695 // cases can be handled with a single comparison by always setting the LSB for
696 // the immediates (the LSB is necessarily 0 for allocated Sig pointers due to
697 // alignment).
698
699 class SigIdDesc
700 {
701 public:
702 enum class Kind { None, Immediate, Global };
703 static const uintptr_t ImmediateBit = 0x1;
704
705 private:
706 Kind kind_;
707 size_t bits_;
708
SigIdDesc(Kind kind,size_t bits)709 SigIdDesc(Kind kind, size_t bits) : kind_(kind), bits_(bits) {}
710
711 public:
kind()712 Kind kind() const { return kind_; }
713 static bool isGlobal(const Sig& sig);
714
SigIdDesc()715 SigIdDesc() : kind_(Kind::None), bits_(0) {}
716 static SigIdDesc global(const Sig& sig, uint32_t globalDataOffset);
717 static SigIdDesc immediate(const Sig& sig);
718
isGlobal()719 bool isGlobal() const { return kind_ == Kind::Global; }
720
immediate()721 size_t immediate() const { MOZ_ASSERT(kind_ == Kind::Immediate); return bits_; }
globalDataOffset()722 uint32_t globalDataOffset() const { MOZ_ASSERT(kind_ == Kind::Global); return bits_; }
723 };
724
725 // SigWithId pairs a Sig with SigIdDesc, describing either how to compile code
726 // that compares this signature's id or, at instantiation what signature ids to
727 // allocate in the global hash and where to put them.
728
729 struct SigWithId : Sig
730 {
731 SigIdDesc id;
732
733 SigWithId() = default;
SigWithIdSigWithId734 explicit SigWithId(Sig&& sig, SigIdDesc id) : Sig(Move(sig)), id(id) {}
735 void operator=(Sig&& rhs) { Sig::operator=(Move(rhs)); }
736
737 WASM_DECLARE_SERIALIZABLE(SigWithId)
738 };
739
740 typedef Vector<SigWithId, 0, SystemAllocPolicy> SigWithIdVector;
741 typedef Vector<const SigWithId*, 0, SystemAllocPolicy> SigWithIdPtrVector;
742
743 // The (,Profiling,Func)Offsets classes are used to record the offsets of
744 // different key points in a CodeRange during compilation.
745
746 struct Offsets
747 {
748 explicit Offsets(uint32_t begin = 0, uint32_t end = 0)
beginOffsets749 : begin(begin), end(end)
750 {}
751
752 // These define a [begin, end) contiguous range of instructions compiled
753 // into a CodeRange.
754 uint32_t begin;
755 uint32_t end;
756
offsetByOffsets757 void offsetBy(uint32_t offset) {
758 begin += offset;
759 end += offset;
760 }
761 };
762
763 struct ProfilingOffsets : Offsets
764 {
765 MOZ_IMPLICIT ProfilingOffsets(uint32_t profilingReturn = 0)
OffsetsProfilingOffsets766 : Offsets(), profilingReturn(profilingReturn)
767 {}
768
769 // For CodeRanges with ProfilingOffsets, 'begin' is the offset of the
770 // profiling entry.
profilingEntryProfilingOffsets771 uint32_t profilingEntry() const { return begin; }
772
773 // The profiling return is the offset of the return instruction, which
774 // precedes the 'end' by a variable number of instructions due to
775 // out-of-line codegen.
776 uint32_t profilingReturn;
777
offsetByProfilingOffsets778 void offsetBy(uint32_t offset) {
779 Offsets::offsetBy(offset);
780 profilingReturn += offset;
781 }
782 };
783
784 struct FuncOffsets : ProfilingOffsets
785 {
FuncOffsetsFuncOffsets786 MOZ_IMPLICIT FuncOffsets()
787 : ProfilingOffsets(),
788 tableEntry(0),
789 tableProfilingJump(0),
790 nonProfilingEntry(0),
791 profilingJump(0),
792 profilingEpilogue(0)
793 {}
794
795 // Function CodeRanges have a table entry which takes an extra signature
796 // argument which is checked against the callee's signature before falling
797 // through to the normal prologue. When profiling is enabled, a nop on the
798 // fallthrough is patched to instead jump to the profiling epilogue.
799 uint32_t tableEntry;
800 uint32_t tableProfilingJump;
801
802 // Function CodeRanges have an additional non-profiling entry that comes
803 // after the profiling entry and a non-profiling epilogue that comes before
804 // the profiling epilogue.
805 uint32_t nonProfilingEntry;
806
807 // When profiling is enabled, the 'nop' at offset 'profilingJump' is
808 // overwritten to be a jump to 'profilingEpilogue'.
809 uint32_t profilingJump;
810 uint32_t profilingEpilogue;
811
offsetByFuncOffsets812 void offsetBy(uint32_t offset) {
813 ProfilingOffsets::offsetBy(offset);
814 tableEntry += offset;
815 tableProfilingJump += offset;
816 nonProfilingEntry += offset;
817 profilingJump += offset;
818 profilingEpilogue += offset;
819 }
820 };
821
822 // A wasm::Trap represents a wasm-defined trap that can occur during execution
823 // which triggers a WebAssembly.RuntimeError. Generated code may jump to a Trap
824 // symbolically, passing the bytecode offset to report as the trap offset. The
825 // generated jump will be bound to a tiny stub which fills the offset and
826 // then jumps to a per-Trap shared stub at the end of the module.
827
828 enum class Trap
829 {
830 // The Unreachable opcode has been executed.
831 Unreachable,
832 // An integer arithmetic operation led to an overflow.
833 IntegerOverflow,
834 // Trying to coerce NaN to an integer.
835 InvalidConversionToInteger,
836 // Integer division by zero.
837 IntegerDivideByZero,
838 // Out of bounds on wasm memory accesses and asm.js SIMD/atomic accesses.
839 OutOfBounds,
840 // call_indirect to null.
841 IndirectCallToNull,
842 // call_indirect signature mismatch.
843 IndirectCallBadSig,
844
845 // (asm.js only) SIMD float to int conversion failed because the input
846 // wasn't in bounds.
847 ImpreciseSimdConversion,
848
849 // The internal stack space was exhausted. For compatibility, this throws
850 // the same over-recursed error as JS.
851 StackOverflow,
852
853 Limit
854 };
855
856 // A wrapper around the bytecode offset of a wasm instruction within a whole
857 // module. Trap offsets should refer to the first byte of the instruction that
858 // triggered the trap and should ultimately derive from OpIter::trapOffset.
859
860 struct TrapOffset
861 {
862 uint32_t bytecodeOffset;
863
864 TrapOffset() = default;
TrapOffsetTrapOffset865 explicit TrapOffset(uint32_t bytecodeOffset) : bytecodeOffset(bytecodeOffset) {}
866 };
867
868 // While the frame-pointer chain allows the stack to be unwound without
869 // metadata, Error.stack still needs to know the line/column of every call in
870 // the chain. A CallSiteDesc describes a single callsite to which CallSite adds
871 // the metadata necessary to walk up to the next frame. Lastly CallSiteAndTarget
872 // adds the function index of the callee.
873
874 class CallSiteDesc
875 {
876 uint32_t lineOrBytecode_ : 30;
877 uint32_t kind_ : 2;
878 public:
879 enum Kind {
880 Func, // pc-relative call to a specific function
881 Dynamic, // dynamic callee called via register
882 Symbolic, // call to a single symbolic callee
883 TrapExit // call to a trap exit
884 };
CallSiteDesc()885 CallSiteDesc() {}
CallSiteDesc(Kind kind)886 explicit CallSiteDesc(Kind kind)
887 : lineOrBytecode_(0), kind_(kind)
888 {
889 MOZ_ASSERT(kind == Kind(kind_));
890 }
CallSiteDesc(uint32_t lineOrBytecode,Kind kind)891 CallSiteDesc(uint32_t lineOrBytecode, Kind kind)
892 : lineOrBytecode_(lineOrBytecode), kind_(kind)
893 {
894 MOZ_ASSERT(kind == Kind(kind_));
895 MOZ_ASSERT(lineOrBytecode == lineOrBytecode_);
896 }
lineOrBytecode()897 uint32_t lineOrBytecode() const { return lineOrBytecode_; }
kind()898 Kind kind() const { return Kind(kind_); }
899 };
900
901 class CallSite : public CallSiteDesc
902 {
903 uint32_t returnAddressOffset_;
904 uint32_t stackDepth_;
905
906 public:
CallSite()907 CallSite() {}
908
CallSite(CallSiteDesc desc,uint32_t returnAddressOffset,uint32_t stackDepth)909 CallSite(CallSiteDesc desc, uint32_t returnAddressOffset, uint32_t stackDepth)
910 : CallSiteDesc(desc),
911 returnAddressOffset_(returnAddressOffset),
912 stackDepth_(stackDepth)
913 { }
914
setReturnAddressOffset(uint32_t r)915 void setReturnAddressOffset(uint32_t r) { returnAddressOffset_ = r; }
offsetReturnAddressBy(int32_t o)916 void offsetReturnAddressBy(int32_t o) { returnAddressOffset_ += o; }
returnAddressOffset()917 uint32_t returnAddressOffset() const { return returnAddressOffset_; }
918
919 // The stackDepth measures the amount of stack space pushed since the
920 // function was called. In particular, this includes the pushed return
921 // address on all archs (whether or not the call instruction pushes the
922 // return address (x86/x64) or the prologue does (ARM/MIPS)).
stackDepth()923 uint32_t stackDepth() const { return stackDepth_; }
924 };
925
WASM_DECLARE_POD_VECTOR(CallSite,CallSiteVector)926 WASM_DECLARE_POD_VECTOR(CallSite, CallSiteVector)
927
928 class CallSiteAndTarget : public CallSite
929 {
930 uint32_t index_;
931
932 public:
933 explicit CallSiteAndTarget(CallSite cs)
934 : CallSite(cs)
935 {
936 MOZ_ASSERT(cs.kind() != Func);
937 }
938 CallSiteAndTarget(CallSite cs, uint32_t funcIndex)
939 : CallSite(cs), index_(funcIndex)
940 {
941 MOZ_ASSERT(cs.kind() == Func);
942 }
943 CallSiteAndTarget(CallSite cs, Trap trap)
944 : CallSite(cs),
945 index_(uint32_t(trap))
946 {
947 MOZ_ASSERT(cs.kind() == TrapExit);
948 }
949
950 uint32_t funcIndex() const { MOZ_ASSERT(kind() == Func); return index_; }
951 Trap trap() const { MOZ_ASSERT(kind() == TrapExit); return Trap(index_); }
952 };
953
954 typedef Vector<CallSiteAndTarget, 0, SystemAllocPolicy> CallSiteAndTargetVector;
955
956 // A wasm::SymbolicAddress represents a pointer to a well-known function or
957 // object that is embedded in wasm code. Since wasm code is serialized and
958 // later deserialized into a different address space, symbolic addresses must be
959 // used for *all* pointers into the address space. The MacroAssembler records a
960 // list of all SymbolicAddresses and the offsets of their use in the code for
961 // later patching during static linking.
962
963 enum class SymbolicAddress
964 {
965 ToInt32,
966 #if defined(JS_CODEGEN_ARM)
967 aeabi_idivmod,
968 aeabi_uidivmod,
969 AtomicCmpXchg,
970 AtomicXchg,
971 AtomicFetchAdd,
972 AtomicFetchSub,
973 AtomicFetchAnd,
974 AtomicFetchOr,
975 AtomicFetchXor,
976 #endif
977 ModD,
978 SinD,
979 CosD,
980 TanD,
981 ASinD,
982 ACosD,
983 ATanD,
984 CeilD,
985 CeilF,
986 FloorD,
987 FloorF,
988 TruncD,
989 TruncF,
990 NearbyIntD,
991 NearbyIntF,
992 ExpD,
993 LogD,
994 PowD,
995 ATan2D,
996 Context,
997 InterruptUint32,
998 ReportOverRecursed,
999 HandleExecutionInterrupt,
1000 ReportTrap,
1001 ReportOutOfBounds,
1002 ReportUnalignedAccess,
1003 CallImport_Void,
1004 CallImport_I32,
1005 CallImport_I64,
1006 CallImport_F64,
1007 CoerceInPlace_ToInt32,
1008 CoerceInPlace_ToNumber,
1009 DivI64,
1010 UDivI64,
1011 ModI64,
1012 UModI64,
1013 TruncateDoubleToInt64,
1014 TruncateDoubleToUint64,
1015 Uint64ToFloatingPoint,
1016 Int64ToFloatingPoint,
1017 GrowMemory,
1018 CurrentMemory,
1019 Limit
1020 };
1021
1022 void*
1023 AddressOf(SymbolicAddress imm, ExclusiveContext* cx);
1024
1025 // Assumptions captures ambient state that must be the same when compiling and
1026 // deserializing a module for the compiled code to be valid. If it's not, then
1027 // the module must be recompiled from scratch.
1028
1029 struct Assumptions
1030 {
1031 uint32_t cpuId;
1032 JS::BuildIdCharVector buildId;
1033
1034 explicit Assumptions(JS::BuildIdCharVector&& buildId);
1035
1036 // If Assumptions is constructed without arguments, initBuildIdFromContext()
1037 // must be called to complete initialization.
1038 Assumptions();
1039 bool initBuildIdFromContext(ExclusiveContext* cx);
1040
1041 bool clone(const Assumptions& other);
1042
1043 bool operator==(const Assumptions& rhs) const;
1044 bool operator!=(const Assumptions& rhs) const { return !(*this == rhs); }
1045
1046 size_t serializedSize() const;
1047 uint8_t* serialize(uint8_t* cursor) const;
1048 const uint8_t* deserialize(const uint8_t* cursor, size_t limit);
1049 size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
1050 };
1051
1052 // A Module can either be asm.js or wasm.
1053
1054 enum ModuleKind
1055 {
1056 Wasm,
1057 AsmJS
1058 };
1059
1060 // Represents the resizable limits of memories and tables.
1061
1062 struct Limits
1063 {
1064 uint32_t initial;
1065 Maybe<uint32_t> maximum;
1066 };
1067
1068 // TableDesc describes a table as well as the offset of the table's base pointer
1069 // in global memory. Currently, wasm only has "any function" and asm.js only
1070 // "typed function".
1071
1072 enum class TableKind
1073 {
1074 AnyFunction,
1075 TypedFunction
1076 };
1077
1078 struct TableDesc
1079 {
1080 TableKind kind;
1081 bool external;
1082 uint32_t globalDataOffset;
1083 Limits limits;
1084
1085 TableDesc() = default;
TableDescTableDesc1086 TableDesc(TableKind kind, Limits limits)
1087 : kind(kind),
1088 external(false),
1089 globalDataOffset(UINT32_MAX),
1090 limits(limits)
1091 {}
1092 };
1093
1094 typedef Vector<TableDesc, 0, SystemAllocPolicy> TableDescVector;
1095
1096 // ExportArg holds the unboxed operands to the wasm entry trampoline which can
1097 // be called through an ExportFuncPtr.
1098
1099 struct ExportArg
1100 {
1101 uint64_t lo;
1102 uint64_t hi;
1103 };
1104
1105 // TLS data for a single module instance.
1106 //
1107 // Every WebAssembly function expects to be passed a hidden TLS pointer argument
1108 // in WasmTlsReg. The TLS pointer argument points to a TlsData struct.
1109 // Compiled functions expect that the TLS pointer does not change for the
1110 // lifetime of the thread.
1111 //
1112 // There is a TlsData per module instance per thread, so inter-module calls need
1113 // to pass the TLS pointer appropriate for the callee module.
1114 //
1115 // After the TlsData struct follows the module's declared TLS variables.
1116
1117 struct TlsData
1118 {
1119 // Pointer to the JSContext that contains this TLS data.
1120 JSContext* cx;
1121
1122 // Pointer to the Instance that contains this TLS data.
1123 Instance* instance;
1124
1125 // Pointer to the global data for this Instance.
1126 uint8_t* globalData;
1127
1128 // Pointer to the base of the default memory (or null if there is none).
1129 uint8_t* memoryBase;
1130
1131 // Stack limit for the current thread. This limit is checked against the
1132 // stack pointer in the prologue of functions that allocate stack space. See
1133 // `CodeGenerator::generateWasm`.
1134 void* stackLimit;
1135 };
1136
1137 typedef int32_t (*ExportFuncPtr)(ExportArg* args, TlsData* tls);
1138
1139 // FuncImportTls describes the region of wasm global memory allocated in the
1140 // instance's thread-local storage for a function import. This is accessed
1141 // directly from JIT code and mutated by Instance as exits become optimized and
1142 // deoptimized.
1143
1144 struct FuncImportTls
1145 {
1146 // The code to call at an import site: a wasm callee, a thunk into C++, or a
1147 // thunk into JIT code.
1148 void* code;
1149
1150 // The callee's TlsData pointer, which must be loaded to WasmTlsReg (along
1151 // with any pinned registers) before calling 'code'.
1152 TlsData* tls;
1153
1154 // If 'code' points into a JIT code thunk, the BaselineScript of the callee,
1155 // for bidirectional registration purposes.
1156 jit::BaselineScript* baselineScript;
1157
1158 // A GC pointer which keeps the callee alive. For imported wasm functions,
1159 // this points to the wasm function's WasmInstanceObject. For all other
1160 // imported functions, 'obj' points to the JSFunction.
1161 GCPtrObject obj;
1162 static_assert(sizeof(GCPtrObject) == sizeof(void*), "for JIT access");
1163 };
1164
1165 // TableTls describes the region of wasm global memory allocated in the
1166 // instance's thread-local storage which is accessed directly from JIT code
1167 // to bounds-check and index the table.
1168
1169 struct TableTls
1170 {
1171 // Length of the table in number of elements (not bytes).
1172 uint32_t length;
1173
1174 // Pointer to the array of elements (of type either ExternalTableElem or
1175 // void*).
1176 void* base;
1177 };
1178
1179 // When a table can contain functions from other instances (it is "external"),
1180 // the internal representation is an array of ExternalTableElem instead of just
1181 // an array of code pointers.
1182
1183 struct ExternalTableElem
1184 {
1185 // The code to call when calling this element. The table ABI is the system
1186 // ABI with the additional ABI requirements that:
1187 // - WasmTlsReg and any pinned registers have been loaded appropriately
1188 // - if this is a heterogeneous table that requires a signature check,
1189 // WasmTableCallSigReg holds the signature id.
1190 void* code;
1191
1192 // The pointer to the callee's instance's TlsData. This must be loaded into
1193 // WasmTlsReg before calling 'code'.
1194 TlsData* tls;
1195 };
1196
1197 // CalleeDesc describes how to compile one of the variety of asm.js/wasm calls.
1198 // This is hoisted into WasmTypes.h for sharing between Ion and Baseline.
1199
1200 class CalleeDesc
1201 {
1202 public:
1203 enum Which {
1204 // Calls a function defined in the same module by its index.
1205 Func,
1206
1207 // Calls the import identified by the offset of its FuncImportTls in
1208 // thread-local data.
1209 Import,
1210
1211 // Calls a WebAssembly table (heterogeneous, index must be bounds
1212 // checked, callee instance depends on TableDesc).
1213 WasmTable,
1214
1215 // Calls an asm.js table (homogeneous, masked index, same-instance).
1216 AsmJSTable,
1217
1218 // Call a C++ function identified by SymbolicAddress.
1219 Builtin,
1220
1221 // Like Builtin, but automatically passes Instance* as first argument.
1222 BuiltinInstanceMethod
1223 };
1224
1225 private:
1226 Which which_;
1227 union U {
U()1228 U() {}
1229 uint32_t funcIndex_;
1230 struct {
1231 uint32_t globalDataOffset_;
1232 } import;
1233 struct {
1234 uint32_t globalDataOffset_;
1235 bool external_;
1236 SigIdDesc sigId_;
1237 } table;
1238 SymbolicAddress builtin_;
1239 } u;
1240
1241 public:
CalleeDesc()1242 CalleeDesc() {}
function(uint32_t funcIndex)1243 static CalleeDesc function(uint32_t funcIndex) {
1244 CalleeDesc c;
1245 c.which_ = Func;
1246 c.u.funcIndex_ = funcIndex;
1247 return c;
1248 }
import(uint32_t globalDataOffset)1249 static CalleeDesc import(uint32_t globalDataOffset) {
1250 CalleeDesc c;
1251 c.which_ = Import;
1252 c.u.import.globalDataOffset_ = globalDataOffset;
1253 return c;
1254 }
wasmTable(const TableDesc & desc,SigIdDesc sigId)1255 static CalleeDesc wasmTable(const TableDesc& desc, SigIdDesc sigId) {
1256 CalleeDesc c;
1257 c.which_ = WasmTable;
1258 c.u.table.globalDataOffset_ = desc.globalDataOffset;
1259 c.u.table.external_ = desc.external;
1260 c.u.table.sigId_ = sigId;
1261 return c;
1262 }
asmJSTable(const TableDesc & desc)1263 static CalleeDesc asmJSTable(const TableDesc& desc) {
1264 CalleeDesc c;
1265 c.which_ = AsmJSTable;
1266 c.u.table.globalDataOffset_ = desc.globalDataOffset;
1267 return c;
1268 }
builtin(SymbolicAddress callee)1269 static CalleeDesc builtin(SymbolicAddress callee) {
1270 CalleeDesc c;
1271 c.which_ = Builtin;
1272 c.u.builtin_ = callee;
1273 return c;
1274 }
builtinInstanceMethod(SymbolicAddress callee)1275 static CalleeDesc builtinInstanceMethod(SymbolicAddress callee) {
1276 CalleeDesc c;
1277 c.which_ = BuiltinInstanceMethod;
1278 c.u.builtin_ = callee;
1279 return c;
1280 }
which()1281 Which which() const {
1282 return which_;
1283 }
funcIndex()1284 uint32_t funcIndex() const {
1285 MOZ_ASSERT(which_ == Func);
1286 return u.funcIndex_;
1287 }
importGlobalDataOffset()1288 uint32_t importGlobalDataOffset() const {
1289 MOZ_ASSERT(which_ == Import);
1290 return u.import.globalDataOffset_;
1291 }
isTable()1292 bool isTable() const {
1293 return which_ == WasmTable || which_ == AsmJSTable;
1294 }
tableLengthGlobalDataOffset()1295 uint32_t tableLengthGlobalDataOffset() const {
1296 MOZ_ASSERT(isTable());
1297 return u.table.globalDataOffset_ + offsetof(TableTls, length);
1298 }
tableBaseGlobalDataOffset()1299 uint32_t tableBaseGlobalDataOffset() const {
1300 MOZ_ASSERT(isTable());
1301 return u.table.globalDataOffset_ + offsetof(TableTls, base);
1302 }
wasmTableIsExternal()1303 bool wasmTableIsExternal() const {
1304 MOZ_ASSERT(which_ == WasmTable);
1305 return u.table.external_;
1306 }
wasmTableSigId()1307 SigIdDesc wasmTableSigId() const {
1308 MOZ_ASSERT(which_ == WasmTable);
1309 return u.table.sigId_;
1310 }
builtin()1311 SymbolicAddress builtin() const {
1312 MOZ_ASSERT(which_ == Builtin || which_ == BuiltinInstanceMethod);
1313 return u.builtin_;
1314 }
1315 };
1316
1317 // Because ARM has a fixed-width instruction encoding, ARM can only express a
1318 // limited subset of immediates (in a single instruction).
1319
1320 extern bool
1321 IsValidARMImmediate(uint32_t i);
1322
1323 extern uint32_t
1324 RoundUpToNextValidARMImmediate(uint32_t i);
1325
1326 // The WebAssembly spec hard-codes the virtual page size to be 64KiB and
1327 // requires the size of linear memory to always be a multiple of 64KiB.
1328
1329 static const unsigned PageSize = 64 * 1024;
1330
1331 // Bounds checks always compare the base of the memory access with the bounds
1332 // check limit. If the memory access is unaligned, this means that, even if the
1333 // bounds check succeeds, a few bytes of the access can extend past the end of
1334 // memory. To guard against this, extra space is included in the guard region to
1335 // catch the overflow. MaxMemoryAccessSize is a conservative approximation of
1336 // the maximum guard space needed to catch all unaligned overflows.
1337
1338 static const unsigned MaxMemoryAccessSize = sizeof(Val);
1339
1340 #ifdef JS_CODEGEN_X64
1341
1342 // All other code should use WASM_HUGE_MEMORY instead of JS_CODEGEN_X64 so that
1343 // it is easy to use the huge-mapping optimization for other 64-bit platforms in
1344 // the future.
1345 # define WASM_HUGE_MEMORY
1346
1347 // On WASM_HUGE_MEMORY platforms, every asm.js or WebAssembly memory
1348 // unconditionally allocates a huge region of virtual memory of size
1349 // wasm::HugeMappedSize. This allows all memory resizing to work without
1350 // reallocation and provides enough guard space for all offsets to be folded
1351 // into memory accesses.
1352
1353 static const uint64_t IndexRange = uint64_t(UINT32_MAX) + 1;
1354 static const uint64_t OffsetGuardLimit = uint64_t(INT32_MAX) + 1;
1355 static const uint64_t UnalignedGuardPage = PageSize;
1356 static const uint64_t HugeMappedSize = IndexRange + OffsetGuardLimit + UnalignedGuardPage;
1357
1358 static_assert(MaxMemoryAccessSize <= UnalignedGuardPage, "rounded up to static page size");
1359
1360 #else // !WASM_HUGE_MEMORY
1361
1362 // On !WASM_HUGE_MEMORY platforms:
1363 // - To avoid OOM in ArrayBuffer::prepareForAsmJS, asm.js continues to use the
1364 // original ArrayBuffer allocation which has no guard region at all.
1365 // - For WebAssembly memories, an additional GuardSize is mapped after the
1366 // accessible region of the memory to catch folded (base+offset) accesses
1367 // where `offset < OffsetGuardLimit` as well as the overflow from unaligned
1368 // accesses, as described above for MaxMemoryAccessSize.
1369
1370 static const size_t OffsetGuardLimit = PageSize - MaxMemoryAccessSize;
1371 static const size_t GuardSize = PageSize;
1372
1373 // Return whether the given immediate satisfies the constraints of the platform
1374 // (viz. that, on ARM, IsValidARMImmediate).
1375
1376 extern bool
1377 IsValidBoundsCheckImmediate(uint32_t i);
1378
1379 // For a given WebAssembly/asm.js max size, return the number of bytes to
1380 // map which will necessarily be a multiple of the system page size and greater
1381 // than maxSize. For a returned mappedSize:
1382 // boundsCheckLimit = mappedSize - GuardSize
1383 // IsValidBoundsCheckImmediate(boundsCheckLimit)
1384
1385 extern size_t
1386 ComputeMappedSize(uint32_t maxSize);
1387
1388 #endif // WASM_HUGE_MEMORY
1389
1390 // Metadata for bounds check instructions that are patched at runtime with the
1391 // appropriate bounds check limit. On WASM_HUGE_MEMORY platforms for wasm (and
1392 // SIMD/Atomic) bounds checks, no BoundsCheck is created: the signal handler
1393 // catches everything. On !WASM_HUGE_MEMORY, a BoundsCheck is created for each
1394 // memory access (except when statically eliminated by optimizations) so that
1395 // the length can be patched in as an immediate. This requires that the bounds
1396 // check limit IsValidBoundsCheckImmediate.
1397
1398 class BoundsCheck
1399 {
1400 public:
1401 BoundsCheck() = default;
1402
BoundsCheck(uint32_t cmpOffset)1403 explicit BoundsCheck(uint32_t cmpOffset)
1404 : cmpOffset_(cmpOffset)
1405 { }
1406
patchAt(uint8_t * code)1407 uint8_t* patchAt(uint8_t* code) const { return code + cmpOffset_; }
offsetBy(uint32_t offset)1408 void offsetBy(uint32_t offset) { cmpOffset_ += offset; }
1409
1410 private:
1411 uint32_t cmpOffset_;
1412 };
1413
WASM_DECLARE_POD_VECTOR(BoundsCheck,BoundsCheckVector)1414 WASM_DECLARE_POD_VECTOR(BoundsCheck, BoundsCheckVector)
1415
1416 // Metadata for memory accesses. On WASM_HUGE_MEMORY platforms, only
1417 // (non-SIMD/Atomic) asm.js loads and stores create a MemoryAccess so that the
1418 // signal handler can implement the semantically-correct wraparound logic; the
1419 // rest simply redirect to the out-of-bounds stub in the signal handler. On x86,
1420 // the base address of memory is baked into each memory access instruction so
1421 // the MemoryAccess records the location of each for patching. On all other
1422 // platforms, no MemoryAccess is created.
1423
1424 class MemoryAccess
1425 {
1426 uint32_t insnOffset_;
1427 uint32_t trapOutOfLineOffset_;
1428
1429 public:
1430 MemoryAccess() = default;
1431 explicit MemoryAccess(uint32_t insnOffset, uint32_t trapOutOfLineOffset = UINT32_MAX)
1432 : insnOffset_(insnOffset),
1433 trapOutOfLineOffset_(trapOutOfLineOffset)
1434 {}
1435
1436 uint32_t insnOffset() const {
1437 return insnOffset_;
1438 }
1439 bool hasTrapOutOfLineCode() const {
1440 return trapOutOfLineOffset_ != UINT32_MAX;
1441 }
1442 uint8_t* trapOutOfLineCode(uint8_t* code) const {
1443 MOZ_ASSERT(hasTrapOutOfLineCode());
1444 return code + trapOutOfLineOffset_;
1445 }
1446
1447 void offsetBy(uint32_t delta) {
1448 insnOffset_ += delta;
1449 if (hasTrapOutOfLineCode())
1450 trapOutOfLineOffset_ += delta;
1451 }
1452 };
1453
1454 WASM_DECLARE_POD_VECTOR(MemoryAccess, MemoryAccessVector)
1455
1456 // Metadata for the offset of an instruction to patch with the base address of
1457 // memory. In practice, this is only used for x86 where the offset points to the
1458 // *end* of the instruction (which is a non-fixed offset from the beginning of
1459 // the instruction). As part of the move away from code patching, this should be
1460 // removed.
1461
1462 struct MemoryPatch
1463 {
1464 uint32_t offset;
1465
1466 MemoryPatch() = default;
MemoryPatchMemoryPatch1467 explicit MemoryPatch(uint32_t offset) : offset(offset) {}
1468
offsetByMemoryPatch1469 void offsetBy(uint32_t delta) {
1470 offset += delta;
1471 }
1472 };
1473
1474 WASM_DECLARE_POD_VECTOR(MemoryPatch, MemoryPatchVector)
1475
1476 // Constants:
1477
1478 static const unsigned NaN64GlobalDataOffset = 0;
1479 static const unsigned NaN32GlobalDataOffset = NaN64GlobalDataOffset + sizeof(double);
1480 static const unsigned InitialGlobalDataBytes = NaN32GlobalDataOffset + sizeof(float);
1481
1482 static const unsigned MaxSigs = 4 * 1024;
1483 static const unsigned MaxFuncs = 512 * 1024;
1484 static const unsigned MaxGlobals = 4 * 1024;
1485 static const unsigned MaxLocals = 64 * 1024;
1486 static const unsigned MaxImports = 64 * 1024;
1487 static const unsigned MaxExports = 64 * 1024;
1488 static const unsigned MaxTables = 4 * 1024;
1489 static const unsigned MaxTableElems = 1024 * 1024;
1490 static const unsigned MaxDataSegments = 64 * 1024;
1491 static const unsigned MaxElemSegments = 64 * 1024;
1492 static const unsigned MaxArgsPerFunc = 4 * 1024;
1493 static const unsigned MaxBrTableElems = 4 * 1024 * 1024;
1494
1495 // To be able to assign function indices during compilation while the number of
1496 // imports is still unknown, asm.js sets a maximum number of imports so it can
1497 // immediately start handing out function indices starting at the maximum + 1.
1498 // this means that there is a "hole" between the last import and the first
1499 // definition, but that's fine.
1500
1501 static const unsigned AsmJSMaxImports = 4 * 1024;
1502 static const unsigned AsmJSFirstDefFuncIndex = AsmJSMaxImports + 1;
1503
1504 static_assert(AsmJSMaxImports <= MaxImports, "conservative");
1505 static_assert(AsmJSFirstDefFuncIndex < MaxFuncs, "conservative");
1506
1507 } // namespace wasm
1508 } // namespace js
1509
1510 #endif // wasm_types_h
1511