1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  *
4  * Copyright 2016 Mozilla Foundation
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 #ifndef wasm_code_h
20 #define wasm_code_h
21 
22 #include "jit/shared/Assembler-shared.h"
23 #include "js/HashTable.h"
24 #include "threading/ExclusiveData.h"
25 #include "util/Memory.h"
26 #include "vm/MutexIDs.h"
27 #include "wasm/WasmGC.h"
28 #include "wasm/WasmTypes.h"
29 
30 namespace js {
31 
32 struct AsmJSMetadata;
33 
34 namespace wasm {
35 
36 struct MetadataTier;
37 struct Metadata;
38 
39 // LinkData contains all the metadata necessary to patch all the locations
40 // that depend on the absolute address of a ModuleSegment. This happens in a
41 // "linking" step after compilation and after the module's code is serialized.
42 // The LinkData is serialized along with the Module but does not (normally, see
43 // Module::debugLinkData_ comment) persist after (de)serialization, which
44 // distinguishes it from Metadata, which is stored in the Code object.
45 
46 struct LinkDataCacheablePod {
47   uint32_t trapOffset = 0;
48 
49   LinkDataCacheablePod() = default;
50 };
51 
52 struct LinkData : LinkDataCacheablePod {
53   const Tier tier;
54 
LinkDataLinkData55   explicit LinkData(Tier tier) : tier(tier) {}
56 
podLinkData57   LinkDataCacheablePod& pod() { return *this; }
podLinkData58   const LinkDataCacheablePod& pod() const { return *this; }
59 
60   struct InternalLink {
61     uint32_t patchAtOffset;
62     uint32_t targetOffset;
63 #ifdef JS_CODELABEL_LINKMODE
64     uint32_t mode;
65 #endif
66   };
67   typedef Vector<InternalLink, 0, SystemAllocPolicy> InternalLinkVector;
68 
69   struct SymbolicLinkArray
70       : EnumeratedArray<SymbolicAddress, SymbolicAddress::Limit, Uint32Vector> {
71     WASM_DECLARE_SERIALIZABLE(SymbolicLinkArray)
72   };
73 
74   InternalLinkVector internalLinks;
75   SymbolicLinkArray symbolicLinks;
76 
77   WASM_DECLARE_SERIALIZABLE(LinkData)
78 };
79 
80 using UniqueLinkData = UniquePtr<LinkData>;
81 
82 // Executable code must be deallocated specially.
83 
84 struct FreeCode {
85   uint32_t codeLength;
FreeCodeFreeCode86   FreeCode() : codeLength(0) {}
FreeCodeFreeCode87   explicit FreeCode(uint32_t codeLength) : codeLength(codeLength) {}
88   void operator()(uint8_t* codeBytes);
89 };
90 
91 using UniqueCodeBytes = UniquePtr<uint8_t, FreeCode>;
92 
93 class Code;
94 class CodeTier;
95 class ModuleSegment;
96 class LazyStubSegment;
97 
98 // CodeSegment contains common helpers for determining the base and length of a
99 // code segment and if a pc belongs to this segment. It is inherited by:
100 // - ModuleSegment, i.e. the code segment of a Module, generated
101 // eagerly when a Module is instanciated.
102 // - LazyStubSegment, i.e. the code segment of entry stubs that are lazily
103 // generated.
104 
105 class CodeSegment {
106  protected:
107   static UniqueCodeBytes AllocateCodeBytes(uint32_t codeLength);
108 
109   enum class Kind { LazyStubs, Module };
110 
CodeSegment(UniqueCodeBytes bytes,uint32_t length,Kind kind)111   CodeSegment(UniqueCodeBytes bytes, uint32_t length, Kind kind)
112       : bytes_(std::move(bytes)),
113         length_(length),
114         kind_(kind),
115         codeTier_(nullptr),
116         unregisterOnDestroy_(false) {}
117 
118   bool initialize(const CodeTier& codeTier);
119 
120  private:
121   const UniqueCodeBytes bytes_;
122   const uint32_t length_;
123   const Kind kind_;
124   const CodeTier* codeTier_;
125   bool unregisterOnDestroy_;
126 
127  public:
initialized()128   bool initialized() const { return !!codeTier_; }
129   ~CodeSegment();
130 
isLazyStubs()131   bool isLazyStubs() const { return kind_ == Kind::LazyStubs; }
isModule()132   bool isModule() const { return kind_ == Kind::Module; }
asModule()133   const ModuleSegment* asModule() const {
134     MOZ_ASSERT(isModule());
135     return (ModuleSegment*)this;
136   }
asLazyStub()137   const LazyStubSegment* asLazyStub() const {
138     MOZ_ASSERT(isLazyStubs());
139     return (LazyStubSegment*)this;
140   }
141 
base()142   uint8_t* base() const { return bytes_.get(); }
length()143   uint32_t length() const {
144     MOZ_ASSERT(length_ != UINT32_MAX);
145     return length_;
146   }
147 
containsCodePC(const void * pc)148   bool containsCodePC(const void* pc) const {
149     return pc >= base() && pc < (base() + length_);
150   }
151 
codeTier()152   const CodeTier& codeTier() const {
153     MOZ_ASSERT(initialized());
154     return *codeTier_;
155   }
156   const Code& code() const;
157 
158   void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code) const;
159 };
160 
161 // A wasm ModuleSegment owns the allocated executable code for a wasm module.
162 
163 using UniqueModuleSegment = UniquePtr<ModuleSegment>;
164 
165 class ModuleSegment : public CodeSegment {
166   const Tier tier_;
167   uint8_t* const trapCode_;
168 
169  public:
170   ModuleSegment(Tier tier, UniqueCodeBytes codeBytes, uint32_t codeLength,
171                 const LinkData& linkData);
172 
173   static UniqueModuleSegment create(Tier tier, jit::MacroAssembler& masm,
174                                     const LinkData& linkData);
175   static UniqueModuleSegment create(Tier tier, const Bytes& unlinkedBytes,
176                                     const LinkData& linkData);
177 
178   bool initialize(const CodeTier& codeTier, const LinkData& linkData,
179                   const Metadata& metadata, const MetadataTier& metadataTier);
180 
tier()181   Tier tier() const { return tier_; }
182 
183   // Pointers to stubs to which PC is redirected from the signal-handler.
184 
trapCode()185   uint8_t* trapCode() const { return trapCode_; }
186 
187   // Structured clone support:
188 
189   size_t serializedSize() const;
190   uint8_t* serialize(uint8_t* cursor, const LinkData& linkData) const;
191   static const uint8_t* deserialize(const uint8_t* cursor,
192                                     const LinkData& linkData,
193                                     UniqueModuleSegment* segment);
194 
195   const CodeRange* lookupRange(const void* pc) const;
196 
197   void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* code,
198                      size_t* data) const;
199 };
200 
201 // A FuncExport represents a single function definition inside a wasm Module
202 // that has been exported one or more times. A FuncExport represents an
203 // internal entry point that can be called via function definition index by
204 // Instance::callExport(). To allow O(log(n)) lookup of a FuncExport by
205 // function definition index, the FuncExportVector is stored sorted by
206 // function definition index.
207 
208 class FuncExport {
209   FuncType funcType_;
210   MOZ_INIT_OUTSIDE_CTOR struct CacheablePod {
211     uint32_t funcIndex_;
212     uint32_t eagerInterpEntryOffset_;  // Machine code offset
213     bool hasEagerStubs_;
214   } pod;
215 
216  public:
217   FuncExport() = default;
FuncExport(FuncType && funcType,uint32_t funcIndex,bool hasEagerStubs)218   explicit FuncExport(FuncType&& funcType, uint32_t funcIndex,
219                       bool hasEagerStubs)
220       : funcType_(std::move(funcType)) {
221     pod.funcIndex_ = funcIndex;
222     pod.eagerInterpEntryOffset_ = UINT32_MAX;
223     pod.hasEagerStubs_ = hasEagerStubs;
224   }
initEagerInterpEntryOffset(uint32_t entryOffset)225   void initEagerInterpEntryOffset(uint32_t entryOffset) {
226     MOZ_ASSERT(pod.eagerInterpEntryOffset_ == UINT32_MAX);
227     MOZ_ASSERT(hasEagerStubs());
228     pod.eagerInterpEntryOffset_ = entryOffset;
229   }
230 
hasEagerStubs()231   bool hasEagerStubs() const { return pod.hasEagerStubs_; }
funcType()232   const FuncType& funcType() const { return funcType_; }
funcIndex()233   uint32_t funcIndex() const { return pod.funcIndex_; }
eagerInterpEntryOffset()234   uint32_t eagerInterpEntryOffset() const {
235     MOZ_ASSERT(pod.eagerInterpEntryOffset_ != UINT32_MAX);
236     MOZ_ASSERT(hasEagerStubs());
237     return pod.eagerInterpEntryOffset_;
238   }
239 
canHaveJitEntry()240   bool canHaveJitEntry() const {
241     return
242 #ifdef ENABLE_WASM_SIMD
243         !funcType_.hasV128ArgOrRet() &&
244 #endif
245         !funcType_.temporarilyUnsupportedReftypeForEntry() &&
246         !funcType_.temporarilyUnsupportedResultCountForJitEntry() &&
247         JitOptions.enableWasmJitEntry;
248   }
249 
clone(const FuncExport & src)250   bool clone(const FuncExport& src) {
251     mozilla::PodAssign(&pod, &src.pod);
252     return funcType_.clone(src.funcType_);
253   }
254 
255   WASM_DECLARE_SERIALIZABLE(FuncExport)
256 };
257 
258 typedef Vector<FuncExport, 0, SystemAllocPolicy> FuncExportVector;
259 
260 // An FuncImport contains the runtime metadata needed to implement a call to an
261 // imported function. Each function import has two call stubs: an optimized path
262 // into JIT code and a slow path into the generic C++ js::Invoke and these
263 // offsets of these stubs are stored so that function-import callsites can be
264 // dynamically patched at runtime.
265 
266 class FuncImport {
267   FuncType funcType_;
268   struct CacheablePod {
269     uint32_t tlsDataOffset_;
270     uint32_t interpExitCodeOffset_;  // Machine code offset
271     uint32_t jitExitCodeOffset_;     // Machine code offset
272   } pod;
273 
274  public:
FuncImport()275   FuncImport() { memset(&pod, 0, sizeof(CacheablePod)); }
276 
FuncImport(FuncType && funcType,uint32_t tlsDataOffset)277   FuncImport(FuncType&& funcType, uint32_t tlsDataOffset)
278       : funcType_(std::move(funcType)) {
279     pod.tlsDataOffset_ = tlsDataOffset;
280     pod.interpExitCodeOffset_ = 0;
281     pod.jitExitCodeOffset_ = 0;
282   }
283 
initInterpExitOffset(uint32_t off)284   void initInterpExitOffset(uint32_t off) {
285     MOZ_ASSERT(!pod.interpExitCodeOffset_);
286     pod.interpExitCodeOffset_ = off;
287   }
initJitExitOffset(uint32_t off)288   void initJitExitOffset(uint32_t off) {
289     MOZ_ASSERT(!pod.jitExitCodeOffset_);
290     pod.jitExitCodeOffset_ = off;
291   }
292 
funcType()293   const FuncType& funcType() const { return funcType_; }
tlsDataOffset()294   uint32_t tlsDataOffset() const { return pod.tlsDataOffset_; }
interpExitCodeOffset()295   uint32_t interpExitCodeOffset() const { return pod.interpExitCodeOffset_; }
jitExitCodeOffset()296   uint32_t jitExitCodeOffset() const { return pod.jitExitCodeOffset_; }
297 
clone(const FuncImport & src)298   bool clone(const FuncImport& src) {
299     mozilla::PodAssign(&pod, &src.pod);
300     return funcType_.clone(src.funcType_);
301   }
302 
303   WASM_DECLARE_SERIALIZABLE(FuncImport)
304 };
305 
306 typedef Vector<FuncImport, 0, SystemAllocPolicy> FuncImportVector;
307 
308 // Metadata holds all the data that is needed to describe compiled wasm code
309 // at runtime (as opposed to data that is only used to statically link or
310 // instantiate a module).
311 //
312 // Metadata is built incrementally by ModuleGenerator and then shared immutably
313 // between modules.
314 //
315 // The Metadata structure is split into tier-invariant and tier-variant parts;
316 // the former points to instances of the latter.  Additionally, the asm.js
317 // subsystem subclasses the Metadata, adding more tier-invariant data, some of
318 // which is serialized.  See AsmJS.cpp.
319 
320 struct MetadataCacheablePod {
321   ModuleKind kind;
322   MemoryUsage memoryUsage;
323   uint32_t minMemoryLength;
324   uint32_t globalDataLength;
325   Maybe<uint32_t> maxMemoryLength;
326   Maybe<uint32_t> startFuncIndex;
327   Maybe<uint32_t> nameCustomSectionIndex;
328   bool filenameIsURL;
329   bool v128Enabled;
330   bool omitsBoundsChecks;
331 
MetadataCacheablePodMetadataCacheablePod332   explicit MetadataCacheablePod(ModuleKind kind)
333       : kind(kind),
334         memoryUsage(MemoryUsage::None),
335         minMemoryLength(0),
336         globalDataLength(0),
337         filenameIsURL(false),
338         v128Enabled(false),
339         omitsBoundsChecks(false) {}
340 };
341 
342 typedef uint8_t ModuleHash[8];
343 typedef Vector<ValTypeVector, 0, SystemAllocPolicy> FuncArgTypesVector;
344 typedef Vector<ValTypeVector, 0, SystemAllocPolicy> FuncReturnTypesVector;
345 
346 struct Metadata : public ShareableBase<Metadata>, public MetadataCacheablePod {
347   FuncTypeWithIdVector funcTypeIds;
348   GlobalDescVector globals;
349   TableDescVector tables;
350   CacheableChars filename;
351   CacheableChars sourceMapURL;
352 
353   // namePayload points at the name section's CustomSection::payload so that
354   // the Names (which are use payload-relative offsets) can be used
355   // independently of the Module without duplicating the name section.
356   SharedBytes namePayload;
357   Maybe<Name> moduleName;
358   NameVector funcNames;
359 
360   // Debug-enabled code is not serialized.
361   bool debugEnabled;
362   FuncArgTypesVector debugFuncArgTypes;
363   FuncReturnTypesVector debugFuncReturnTypes;
364   ModuleHash debugHash;
365 
366   explicit Metadata(ModuleKind kind = ModuleKind::Wasm)
MetadataCacheablePodMetadata367       : MetadataCacheablePod(kind), debugEnabled(false), debugHash() {}
368   virtual ~Metadata() = default;
369 
podMetadata370   MetadataCacheablePod& pod() { return *this; }
podMetadata371   const MetadataCacheablePod& pod() const { return *this; }
372 
usesMemoryMetadata373   bool usesMemory() const { return memoryUsage != MemoryUsage::None; }
usesSharedMemoryMetadata374   bool usesSharedMemory() const { return memoryUsage == MemoryUsage::Shared; }
375 
376   // Invariant: The result of getFuncResultType can only be used as long as
377   // MetaData is live, because the returned ResultType may encode a pointer to
378   // debugFuncReturnTypes.
getFuncResultTypeMetadata379   ResultType getFuncResultType(uint32_t funcIndex) const {
380     return ResultType::Vector(debugFuncReturnTypes[funcIndex]);
381   };
382 
383   // AsmJSMetadata derives Metadata iff isAsmJS(). Mostly this distinction is
384   // encapsulated within AsmJS.cpp, but the additional virtual functions allow
385   // asm.js to override wasm behavior in the handful of cases that can't be
386   // easily encapsulated by AsmJS.cpp.
387 
isAsmJSMetadata388   bool isAsmJS() const { return kind == ModuleKind::AsmJS; }
asAsmJSMetadata389   const AsmJSMetadata& asAsmJS() const {
390     MOZ_ASSERT(isAsmJS());
391     return *(const AsmJSMetadata*)this;
392   }
mutedErrorsMetadata393   virtual bool mutedErrors() const { return false; }
displayURLMetadata394   virtual const char16_t* displayURL() const { return nullptr; }
maybeScriptSourceMetadata395   virtual ScriptSource* maybeScriptSource() const { return nullptr; }
396 
397   // The Developer-Facing Display Conventions section of the WebAssembly Web
398   // API spec defines two cases for displaying a wasm function name:
399   //  1. the function name stands alone
400   //  2. the function name precedes the location
401 
402   enum NameContext { Standalone, BeforeLocation };
403 
404   virtual bool getFuncName(NameContext ctx, uint32_t funcIndex,
405                            UTF8Bytes* name) const;
406 
getFuncNameStandaloneMetadata407   bool getFuncNameStandalone(uint32_t funcIndex, UTF8Bytes* name) const {
408     return getFuncName(NameContext::Standalone, funcIndex, name);
409   }
getFuncNameBeforeLocationMetadata410   bool getFuncNameBeforeLocation(uint32_t funcIndex, UTF8Bytes* name) const {
411     return getFuncName(NameContext::BeforeLocation, funcIndex, name);
412   }
413 
414   WASM_DECLARE_SERIALIZABLE(Metadata);
415 };
416 
417 using MutableMetadata = RefPtr<Metadata>;
418 using SharedMetadata = RefPtr<const Metadata>;
419 
420 struct MetadataTier {
MetadataTierMetadataTier421   explicit MetadataTier(Tier tier) : tier(tier) {}
422 
423   const Tier tier;
424 
425   Uint32Vector funcToCodeRange;
426   CodeRangeVector codeRanges;
427   CallSiteVector callSites;
428   TrapSiteVectorArray trapSites;
429   FuncImportVector funcImports;
430   FuncExportVector funcExports;
431   StackMaps stackMaps;
432 
433   // Debug information, not serialized.
434   Uint32Vector debugTrapFarJumpOffsets;
435 
436   FuncExport& lookupFuncExport(uint32_t funcIndex,
437                                size_t* funcExportIndex = nullptr);
438   const FuncExport& lookupFuncExport(uint32_t funcIndex,
439                                      size_t* funcExportIndex = nullptr) const;
440 
codeRangeMetadataTier441   const CodeRange& codeRange(const FuncExport& funcExport) const {
442     return codeRanges[funcToCodeRange[funcExport.funcIndex()]];
443   }
444 
445   bool clone(const MetadataTier& src);
446 
447   WASM_DECLARE_SERIALIZABLE(MetadataTier);
448 };
449 
450 using UniqueMetadataTier = UniquePtr<MetadataTier>;
451 
452 // LazyStubSegment is a code segment lazily generated for function entry stubs
453 // (both interpreter and jit ones).
454 //
455 // Because a stub is usually small (a few KiB) and an executable code segment
456 // isn't (64KiB), a given stub segment can contain entry stubs of many
457 // functions.
458 
459 using UniqueLazyStubSegment = UniquePtr<LazyStubSegment>;
460 using LazyStubSegmentVector =
461     Vector<UniqueLazyStubSegment, 0, SystemAllocPolicy>;
462 
463 class LazyStubSegment : public CodeSegment {
464   CodeRangeVector codeRanges_;
465   size_t usedBytes_;
466 
467  public:
LazyStubSegment(UniqueCodeBytes bytes,size_t length)468   LazyStubSegment(UniqueCodeBytes bytes, size_t length)
469       : CodeSegment(std::move(bytes), length, CodeSegment::Kind::LazyStubs),
470         usedBytes_(0) {}
471 
472   static UniqueLazyStubSegment create(const CodeTier& codeTier,
473                                       size_t codeLength);
474 
AlignBytesNeeded(size_t bytes)475   static size_t AlignBytesNeeded(size_t bytes) {
476     return AlignBytes(bytes, gc::SystemPageSize());
477   }
478 
479   bool hasSpace(size_t bytes) const;
480   bool addStubs(size_t codeLength, const Uint32Vector& funcExportIndices,
481                 const FuncExportVector& funcExports,
482                 const CodeRangeVector& codeRanges, uint8_t** codePtr,
483                 size_t* indexFirstInsertedCodeRange);
484 
codeRanges()485   const CodeRangeVector& codeRanges() const { return codeRanges_; }
486   const CodeRange* lookupRange(const void* pc) const;
487 
488   void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
489                      size_t* data) const;
490 };
491 
492 // LazyFuncExport helps to efficiently lookup a CodeRange from a given function
493 // index. It is inserted in a vector sorted by function index, to perform
494 // binary search on it later.
495 
496 struct LazyFuncExport {
497   size_t funcIndex;
498   size_t lazyStubSegmentIndex;
499   size_t funcCodeRangeIndex;
LazyFuncExportLazyFuncExport500   LazyFuncExport(size_t funcIndex, size_t lazyStubSegmentIndex,
501                  size_t funcCodeRangeIndex)
502       : funcIndex(funcIndex),
503         lazyStubSegmentIndex(lazyStubSegmentIndex),
504         funcCodeRangeIndex(funcCodeRangeIndex) {}
505 };
506 
507 using LazyFuncExportVector = Vector<LazyFuncExport, 0, SystemAllocPolicy>;
508 
509 // LazyStubTier contains all the necessary information for lazy function entry
510 // stubs that are generated at runtime. None of its data is ever serialized.
511 //
512 // It must be protected by a lock, because the main thread can both read and
513 // write lazy stubs at any time while a background thread can regenerate lazy
514 // stubs for tier2 at any time.
515 
516 class LazyStubTier {
517   LazyStubSegmentVector stubSegments_;
518   LazyFuncExportVector exports_;
519   size_t lastStubSegmentIndex_;
520 
521   bool createMany(const Uint32Vector& funcExportIndices,
522                   const CodeTier& codeTier, size_t* stubSegmentIndex);
523 
524  public:
LazyStubTier()525   LazyStubTier() : lastStubSegmentIndex_(0) {}
526 
empty()527   bool empty() const { return stubSegments_.empty(); }
528   bool hasStub(uint32_t funcIndex) const;
529 
530   // Returns a pointer to the raw interpreter entry of a given function which
531   // stubs have been lazily generated.
532   void* lookupInterpEntry(uint32_t funcIndex) const;
533 
534   // Creates one lazy stub for the exported function, for which the jit entry
535   // will be set to the lazily-generated one.
536   bool createOne(uint32_t funcExportIndex, const CodeTier& codeTier);
537 
538   // Create one lazy stub for all the functions in funcExportIndices, putting
539   // them in a single stub. Jit entries won't be used until
540   // setJitEntries() is actually called, after the Code owner has committed
541   // tier2.
542   bool createTier2(const Uint32Vector& funcExportIndices,
543                    const CodeTier& codeTier, Maybe<size_t>* stubSegmentIndex);
544   void setJitEntries(const Maybe<size_t>& stubSegmentIndex, const Code& code);
545 
546   void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
547                      size_t* data) const;
548 };
549 
550 // CodeTier contains all the data related to a given compilation tier. It is
551 // built during module generation and then immutably stored in a Code.
552 
553 using UniqueCodeTier = UniquePtr<CodeTier>;
554 using UniqueConstCodeTier = UniquePtr<const CodeTier>;
555 
556 class CodeTier {
557   const Code* code_;
558 
559   // Serialized information.
560   const UniqueMetadataTier metadata_;
561   const UniqueModuleSegment segment_;
562 
563   // Lazy stubs, not serialized.
564   ExclusiveData<LazyStubTier> lazyStubs_;
565 
mutexForTier(Tier tier)566   static const MutexId& mutexForTier(Tier tier) {
567     if (tier == Tier::Baseline) {
568       return mutexid::WasmLazyStubsTier1;
569     }
570     MOZ_ASSERT(tier == Tier::Optimized);
571     return mutexid::WasmLazyStubsTier2;
572   }
573 
574  public:
CodeTier(UniqueMetadataTier metadata,UniqueModuleSegment segment)575   CodeTier(UniqueMetadataTier metadata, UniqueModuleSegment segment)
576       : code_(nullptr),
577         metadata_(std::move(metadata)),
578         segment_(std::move(segment)),
579         lazyStubs_(mutexForTier(segment_->tier())) {}
580 
initialized()581   bool initialized() const { return !!code_ && segment_->initialized(); }
582   bool initialize(const Code& code, const LinkData& linkData,
583                   const Metadata& metadata);
584 
tier()585   Tier tier() const { return segment_->tier(); }
lazyStubs()586   const ExclusiveData<LazyStubTier>& lazyStubs() const { return lazyStubs_; }
metadata()587   const MetadataTier& metadata() const { return *metadata_.get(); }
segment()588   const ModuleSegment& segment() const { return *segment_.get(); }
code()589   const Code& code() const {
590     MOZ_ASSERT(initialized());
591     return *code_;
592   }
593 
594   const CodeRange* lookupRange(const void* pc) const;
595 
596   size_t serializedSize() const;
597   uint8_t* serialize(uint8_t* cursor, const LinkData& linkData) const;
598   static const uint8_t* deserialize(const uint8_t* cursor,
599                                     const LinkData& linkData,
600                                     UniqueCodeTier* codeTier);
601   void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
602                      size_t* data) const;
603 };
604 
605 // Jump tables to take tiering into account, when calling either from wasm to
606 // wasm (through rabaldr) or from jit to wasm (jit entry).
607 
608 class JumpTables {
609   using TablePointer = mozilla::UniquePtr<void*[], JS::FreePolicy>;
610 
611   CompileMode mode_;
612   TablePointer tiering_;
613   TablePointer jit_;
614   size_t numFuncs_;
615 
616  public:
617   bool init(CompileMode mode, const ModuleSegment& ms,
618             const CodeRangeVector& codeRanges);
619 
setJitEntry(size_t i,void * target)620   void setJitEntry(size_t i, void* target) const {
621     // Make sure that write is atomic; see comment in wasm::Module::finishTier2
622     // to that effect.
623     MOZ_ASSERT(i < numFuncs_);
624     jit_.get()[i] = target;
625   }
getAddressOfJitEntry(size_t i)626   void** getAddressOfJitEntry(size_t i) const {
627     MOZ_ASSERT(i < numFuncs_);
628     MOZ_ASSERT(jit_.get()[i]);
629     return &jit_.get()[i];
630   }
funcIndexFromJitEntry(void ** target)631   size_t funcIndexFromJitEntry(void** target) const {
632     MOZ_ASSERT(target >= &jit_.get()[0]);
633     MOZ_ASSERT(target <= &(jit_.get()[numFuncs_ - 1]));
634     return (intptr_t*)target - (intptr_t*)&jit_.get()[0];
635   }
636 
setTieringEntry(size_t i,void * target)637   void setTieringEntry(size_t i, void* target) const {
638     MOZ_ASSERT(i < numFuncs_);
639     // See comment in wasm::Module::finishTier2.
640     if (mode_ == CompileMode::Tier1) {
641       tiering_.get()[i] = target;
642     }
643   }
tiering()644   void** tiering() const { return tiering_.get(); }
645 
sizeOfMiscExcludingThis()646   size_t sizeOfMiscExcludingThis() const {
647     // 2 words per function for the jit entry table, plus maybe 1 per
648     // function if we're tiering.
649     return sizeof(void*) * (2 + (tiering_ ? 1 : 0)) * numFuncs_;
650   }
651 };
652 
653 // Code objects own executable code and the metadata that describe it. A single
654 // Code object is normally shared between a module and all its instances.
655 //
656 // profilingLabels_ is lazily initialized, but behind a lock.
657 
658 using SharedCode = RefPtr<const Code>;
659 using MutableCode = RefPtr<Code>;
660 
661 class Code : public ShareableBase<Code> {
662   UniqueCodeTier tier1_;
663   mutable UniqueConstCodeTier tier2_;  // Access only when hasTier2() is true
664   mutable Atomic<bool> hasTier2_;
665   SharedMetadata metadata_;
666   ExclusiveData<CacheableCharsVector> profilingLabels_;
667   JumpTables jumpTables_;
668   StructTypeVector structTypes_;
669 
670  public:
671   Code(UniqueCodeTier tier1, const Metadata& metadata,
672        JumpTables&& maybeJumpTables, StructTypeVector&& structTypes);
initialized()673   bool initialized() const { return tier1_->initialized(); }
674 
675   bool initialize(const LinkData& linkData);
676 
setTieringEntry(size_t i,void * target)677   void setTieringEntry(size_t i, void* target) const {
678     jumpTables_.setTieringEntry(i, target);
679   }
tieringJumpTable()680   void** tieringJumpTable() const { return jumpTables_.tiering(); }
681 
setJitEntry(size_t i,void * target)682   void setJitEntry(size_t i, void* target) const {
683     jumpTables_.setJitEntry(i, target);
684   }
getAddressOfJitEntry(size_t i)685   void** getAddressOfJitEntry(size_t i) const {
686     return jumpTables_.getAddressOfJitEntry(i);
687   }
688   uint32_t getFuncIndex(JSFunction* fun) const;
689 
690   bool setTier2(UniqueCodeTier tier2, const LinkData& linkData) const;
691   void commitTier2() const;
692 
hasTier2()693   bool hasTier2() const { return hasTier2_; }
694   Tiers tiers() const;
695   bool hasTier(Tier t) const;
696 
697   Tier stableTier() const;  // This is stable during a run
698   Tier bestTier()
699       const;  // This may transition from Baseline -> Ion at any time
700 
701   const CodeTier& codeTier(Tier tier) const;
metadata()702   const Metadata& metadata() const { return *metadata_; }
structTypes()703   const StructTypeVector& structTypes() const { return structTypes_; }
704 
segment(Tier iter)705   const ModuleSegment& segment(Tier iter) const {
706     return codeTier(iter).segment();
707   }
metadata(Tier iter)708   const MetadataTier& metadata(Tier iter) const {
709     return codeTier(iter).metadata();
710   }
711 
712   // Metadata lookup functions:
713 
714   const CallSite* lookupCallSite(void* returnAddress) const;
715   const CodeRange* lookupFuncRange(void* pc) const;
716   const StackMap* lookupStackMap(uint8_t* nextPC) const;
717   bool containsCodePC(const void* pc) const;
718   bool lookupTrap(void* pc, Trap* trap, BytecodeOffset* bytecode) const;
719 
720   // To save memory, profilingLabels_ are generated lazily when profiling mode
721   // is enabled.
722 
723   void ensureProfilingLabels(bool profilingEnabled) const;
724   const char* profilingLabel(uint32_t funcIndex) const;
725 
726   // about:memory reporting:
727 
728   void addSizeOfMiscIfNotSeen(MallocSizeOf mallocSizeOf,
729                               Metadata::SeenSet* seenMetadata,
730                               Code::SeenSet* seenCode, size_t* code,
731                               size_t* data) const;
732 
733   // A Code object is serialized as the length and bytes of the machine code
734   // after statically unlinking it; the Code is then later recreated from the
735   // machine code and other parts.
736 
737   size_t serializedSize() const;
738   uint8_t* serialize(uint8_t* cursor, const LinkData& linkData) const;
739   static const uint8_t* deserialize(const uint8_t* cursor,
740                                     const LinkData& linkData,
741                                     Metadata& metadata, SharedCode* code);
742 };
743 
744 void PatchDebugSymbolicAccesses(uint8_t* codeBase, jit::MacroAssembler& masm);
745 
746 }  // namespace wasm
747 }  // namespace js
748 
749 #endif  // wasm_code_h
750