1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #ifndef jit_JitCompartment_h
8 #define jit_JitCompartment_h
9 
10 #include "mozilla/Array.h"
11 #include "mozilla/DebugOnly.h"
12 #include "mozilla/MemoryReporting.h"
13 
14 #include "builtin/TypedObject.h"
15 #include "jit/CompileInfo.h"
16 #include "jit/ICStubSpace.h"
17 #include "jit/IonCode.h"
18 #include "jit/IonControlFlow.h"
19 #include "jit/JitFrames.h"
20 #include "jit/shared/Assembler-shared.h"
21 #include "js/GCHashTable.h"
22 #include "js/Value.h"
23 #include "vm/Stack.h"
24 
25 namespace js {
26 namespace jit {
27 
28 class FrameSizeClass;
29 
30 struct EnterJitData {
EnterJitDataEnterJitData31   explicit EnterJitData(JSContext* cx) : envChain(cx), result(cx) {}
32 
33   uint8_t* jitcode;
34   InterpreterFrame* osrFrame;
35 
36   void* calleeToken;
37 
38   Value* maxArgv;
39   unsigned maxArgc;
40   unsigned numActualArgs;
41   unsigned osrNumStackValues;
42 
43   RootedObject envChain;
44   RootedValue result;
45 
46   bool constructing;
47 };
48 
49 typedef void (*EnterJitCode)(void* code, unsigned argc, Value* argv,
50                              InterpreterFrame* fp, CalleeToken calleeToken,
51                              JSObject* envChain, size_t numStackValues,
52                              Value* vp);
53 
54 class JitcodeGlobalTable;
55 
56 // Information about a loop backedge in the runtime, which can be set to
57 // point to either the loop header or to an OOL interrupt checking stub,
58 // if signal handlers are being used to implement interrupts.
59 class PatchableBackedge : public InlineListNode<PatchableBackedge> {
60   friend class JitZoneGroup;
61 
62   CodeLocationJump backedge;
63   CodeLocationLabel loopHeader;
64   CodeLocationLabel interruptCheck;
65 
66  public:
PatchableBackedge(CodeLocationJump backedge,CodeLocationLabel loopHeader,CodeLocationLabel interruptCheck)67   PatchableBackedge(CodeLocationJump backedge, CodeLocationLabel loopHeader,
68                     CodeLocationLabel interruptCheck)
69       : backedge(backedge),
70         loopHeader(loopHeader),
71         interruptCheck(interruptCheck) {}
72 };
73 
74 class JitRuntime {
75  private:
76   friend class JitCompartment;
77 
78   // Executable allocator for all code except wasm code and Ion code with
79   // patchable backedges (see below).
80   ActiveThreadData<ExecutableAllocator> execAlloc_;
81 
82   // Executable allocator for Ion scripts with patchable backedges.
83   ActiveThreadData<ExecutableAllocator> backedgeExecAlloc_;
84 
85   // Shared exception-handler tail.
86   ExclusiveAccessLockWriteOnceData<uint32_t> exceptionTailOffset_;
87 
88   // Shared post-bailout-handler tail.
89   ExclusiveAccessLockWriteOnceData<uint32_t> bailoutTailOffset_;
90 
91   // Shared profiler exit frame tail.
92   ExclusiveAccessLockWriteOnceData<uint32_t> profilerExitFrameTailOffset_;
93 
94   // Trampoline for entering JIT code.
95   ExclusiveAccessLockWriteOnceData<uint32_t> enterJITOffset_;
96 
97   // Vector mapping frame class sizes to bailout tables.
98   struct BailoutTable {
99     uint32_t startOffset;
100     uint32_t size;
BailoutTableBailoutTable101     BailoutTable(uint32_t startOffset, uint32_t size)
102         : startOffset(startOffset), size(size) {}
103   };
104   typedef Vector<BailoutTable, 4, SystemAllocPolicy> BailoutTableVector;
105   ExclusiveAccessLockWriteOnceData<BailoutTableVector> bailoutTables_;
106 
107   // Generic bailout table; used if the bailout table overflows.
108   ExclusiveAccessLockWriteOnceData<uint32_t> bailoutHandlerOffset_;
109 
110   // Argument-rectifying thunk, in the case of insufficient arguments passed
111   // to a function call site.
112   ExclusiveAccessLockWriteOnceData<uint32_t> argumentsRectifierOffset_;
113   ExclusiveAccessLockWriteOnceData<uint32_t> argumentsRectifierReturnOffset_;
114 
115   // Thunk that invalides an (Ion compiled) caller on the Ion stack.
116   ExclusiveAccessLockWriteOnceData<uint32_t> invalidatorOffset_;
117 
118   // Thunk that calls the GC pre barrier.
119   ExclusiveAccessLockWriteOnceData<uint32_t> valuePreBarrierOffset_;
120   ExclusiveAccessLockWriteOnceData<uint32_t> stringPreBarrierOffset_;
121   ExclusiveAccessLockWriteOnceData<uint32_t> objectPreBarrierOffset_;
122   ExclusiveAccessLockWriteOnceData<uint32_t> shapePreBarrierOffset_;
123   ExclusiveAccessLockWriteOnceData<uint32_t> objectGroupPreBarrierOffset_;
124 
125   // Thunk to call malloc/free.
126   ExclusiveAccessLockWriteOnceData<uint32_t> mallocStubOffset_;
127   ExclusiveAccessLockWriteOnceData<uint32_t> freeStubOffset_;
128 
129   // Thunk called to finish compilation of an IonScript.
130   ExclusiveAccessLockWriteOnceData<uint32_t> lazyLinkStubOffset_;
131 
132   // Thunk to enter the interpreter from JIT code.
133   ExclusiveAccessLockWriteOnceData<uint32_t> interpreterStubOffset_;
134 
135   // Thunk used by the debugger for breakpoint and step mode.
136   ExclusiveAccessLockWriteOnceData<JitCode*> debugTrapHandler_;
137 
138   // Thunk used to fix up on-stack recompile of baseline scripts.
139   ExclusiveAccessLockWriteOnceData<JitCode*> baselineDebugModeOSRHandler_;
140   ExclusiveAccessLockWriteOnceData<void*>
141       baselineDebugModeOSRHandlerNoFrameRegPopAddr_;
142 
143   // Code for trampolines and VMFunction wrappers.
144   ExclusiveAccessLockWriteOnceData<JitCode*> trampolineCode_;
145 
146   // Map VMFunction addresses to the offset of the wrapper in
147   // trampolineCode_.
148   using VMWrapperMap = HashMap<const VMFunction*, uint32_t, VMFunction>;
149   ExclusiveAccessLockWriteOnceData<VMWrapperMap*> functionWrappers_;
150 
151   // If true, the signal handler to interrupt Ion code should not attempt to
152   // patch backedges, as some thread is busy modifying data structures.
153   mozilla::Atomic<bool> preventBackedgePatching_;
154 
155   // Global table of jitcode native address => bytecode address mappings.
156   UnprotectedData<JitcodeGlobalTable*> jitcodeGlobalTable_;
157 
158  private:
159   void generateLazyLinkStub(MacroAssembler& masm);
160   void generateInterpreterStub(MacroAssembler& masm);
161   void generateProfilerExitFrameTailStub(MacroAssembler& masm,
162                                          Label* profilerExitTail);
163   void generateExceptionTailStub(MacroAssembler& masm, void* handler,
164                                  Label* profilerExitTail);
165   void generateBailoutTailStub(MacroAssembler& masm, Label* bailoutTail);
166   void generateEnterJIT(JSContext* cx, MacroAssembler& masm);
167   void generateArgumentsRectifier(MacroAssembler& masm);
168   BailoutTable generateBailoutTable(MacroAssembler& masm, Label* bailoutTail,
169                                     uint32_t frameClass);
170   void generateBailoutHandler(MacroAssembler& masm, Label* bailoutTail);
171   void generateInvalidator(MacroAssembler& masm, Label* bailoutTail);
172   uint32_t generatePreBarrier(JSContext* cx, MacroAssembler& masm,
173                               MIRType type);
174   void generateMallocStub(MacroAssembler& masm);
175   void generateFreeStub(MacroAssembler& masm);
176   JitCode* generateDebugTrapHandler(JSContext* cx);
177   JitCode* generateBaselineDebugModeOSRHandler(
178       JSContext* cx, uint32_t* noFrameRegPopOffsetOut);
179   bool generateVMWrapper(JSContext* cx, MacroAssembler& masm,
180                          const VMFunction& f);
181 
182   bool generateTLEventVM(MacroAssembler& masm, const VMFunction& f, bool enter);
183 
generateTLEnterVM(MacroAssembler & masm,const VMFunction & f)184   inline bool generateTLEnterVM(MacroAssembler& masm, const VMFunction& f) {
185     return generateTLEventVM(masm, f, /* enter = */ true);
186   }
generateTLExitVM(MacroAssembler & masm,const VMFunction & f)187   inline bool generateTLExitVM(MacroAssembler& masm, const VMFunction& f) {
188     return generateTLEventVM(masm, f, /* enter = */ false);
189   }
190 
191   uint32_t startTrampolineCode(MacroAssembler& masm);
192 
trampolineCode(uint32_t offset)193   TrampolinePtr trampolineCode(uint32_t offset) const {
194     MOZ_ASSERT(offset > 0);
195     MOZ_ASSERT(offset < trampolineCode_->instructionsSize());
196     return TrampolinePtr(trampolineCode_->raw() + offset);
197   }
198 
199  public:
200   explicit JitRuntime(JSRuntime* rt);
201   ~JitRuntime();
202   MOZ_MUST_USE bool initialize(JSContext* cx,
203                                js::AutoLockForExclusiveAccess& lock);
204 
205   static void Trace(JSTracer* trc, js::AutoLockForExclusiveAccess& lock);
206   static void TraceJitcodeGlobalTableForMinorGC(JSTracer* trc);
207   static MOZ_MUST_USE bool MarkJitcodeGlobalTableIteratively(GCMarker* marker);
208   static void SweepJitcodeGlobalTable(JSRuntime* rt);
209 
execAlloc()210   ExecutableAllocator& execAlloc() { return execAlloc_.ref(); }
backedgeExecAlloc()211   ExecutableAllocator& backedgeExecAlloc() { return backedgeExecAlloc_.ref(); }
212 
213   class AutoPreventBackedgePatching {
214     mozilla::DebugOnly<JSRuntime*> rt_;
215     JitRuntime* jrt_;
216     bool prev_;
217 
218    public:
219     // This two-arg constructor is provided for JSRuntime::createJitRuntime,
220     // where we have a JitRuntime but didn't set rt->jitRuntime_ yet.
AutoPreventBackedgePatching(JSRuntime * rt,JitRuntime * jrt)221     AutoPreventBackedgePatching(JSRuntime* rt, JitRuntime* jrt)
222         : rt_(rt),
223           jrt_(jrt),
224           prev_(false)  // silence GCC warning
225     {
226       if (jrt_) {
227         prev_ = jrt_->preventBackedgePatching_;
228         jrt_->preventBackedgePatching_ = true;
229       }
230     }
AutoPreventBackedgePatching(JSRuntime * rt)231     explicit AutoPreventBackedgePatching(JSRuntime* rt)
232         : AutoPreventBackedgePatching(rt, rt->jitRuntime()) {}
~AutoPreventBackedgePatching()233     ~AutoPreventBackedgePatching() {
234       MOZ_ASSERT(jrt_ == rt_->jitRuntime());
235       if (jrt_) {
236         MOZ_ASSERT(jrt_->preventBackedgePatching_);
237         jrt_->preventBackedgePatching_ = prev_;
238       }
239     }
240   };
241 
preventBackedgePatching()242   bool preventBackedgePatching() const { return preventBackedgePatching_; }
243 
244   TrampolinePtr getVMWrapper(const VMFunction& f) const;
245   JitCode* debugTrapHandler(JSContext* cx);
246   JitCode* getBaselineDebugModeOSRHandler(JSContext* cx);
247   void* getBaselineDebugModeOSRHandlerAddress(JSContext* cx, bool popFrameReg);
248 
getGenericBailoutHandler()249   TrampolinePtr getGenericBailoutHandler() const {
250     return trampolineCode(bailoutHandlerOffset_);
251   }
252 
getExceptionTail()253   TrampolinePtr getExceptionTail() const {
254     return trampolineCode(exceptionTailOffset_);
255   }
256 
getBailoutTail()257   TrampolinePtr getBailoutTail() const {
258     return trampolineCode(bailoutTailOffset_);
259   }
260 
getProfilerExitFrameTail()261   TrampolinePtr getProfilerExitFrameTail() const {
262     return trampolineCode(profilerExitFrameTailOffset_);
263   }
264 
265   TrampolinePtr getBailoutTable(const FrameSizeClass& frameClass) const;
266   uint32_t getBailoutTableSize(const FrameSizeClass& frameClass) const;
267 
getArgumentsRectifier()268   TrampolinePtr getArgumentsRectifier() const {
269     return trampolineCode(argumentsRectifierOffset_);
270   }
271 
getArgumentsRectifierReturnAddr()272   TrampolinePtr getArgumentsRectifierReturnAddr() const {
273     return trampolineCode(argumentsRectifierReturnOffset_);
274   }
275 
getInvalidationThunk()276   TrampolinePtr getInvalidationThunk() const {
277     return trampolineCode(invalidatorOffset_);
278   }
279 
enterJit()280   EnterJitCode enterJit() const {
281     return JS_DATA_TO_FUNC_PTR(EnterJitCode,
282                                trampolineCode(enterJITOffset_).value);
283   }
284 
preBarrier(MIRType type)285   TrampolinePtr preBarrier(MIRType type) const {
286     switch (type) {
287       case MIRType::Value:
288         return trampolineCode(valuePreBarrierOffset_);
289       case MIRType::String:
290         return trampolineCode(stringPreBarrierOffset_);
291       case MIRType::Object:
292         return trampolineCode(objectPreBarrierOffset_);
293       case MIRType::Shape:
294         return trampolineCode(shapePreBarrierOffset_);
295       case MIRType::ObjectGroup:
296         return trampolineCode(objectGroupPreBarrierOffset_);
297       default:
298         MOZ_CRASH();
299     }
300   }
301 
mallocStub()302   TrampolinePtr mallocStub() const { return trampolineCode(mallocStubOffset_); }
303 
freeStub()304   TrampolinePtr freeStub() const { return trampolineCode(freeStubOffset_); }
305 
lazyLinkStub()306   TrampolinePtr lazyLinkStub() const {
307     return trampolineCode(lazyLinkStubOffset_);
308   }
interpreterStub()309   TrampolinePtr interpreterStub() const {
310     return trampolineCode(interpreterStubOffset_);
311   }
312 
hasJitcodeGlobalTable()313   bool hasJitcodeGlobalTable() const { return jitcodeGlobalTable_ != nullptr; }
314 
getJitcodeGlobalTable()315   JitcodeGlobalTable* getJitcodeGlobalTable() {
316     MOZ_ASSERT(hasJitcodeGlobalTable());
317     return jitcodeGlobalTable_;
318   }
319 
isProfilerInstrumentationEnabled(JSRuntime * rt)320   bool isProfilerInstrumentationEnabled(JSRuntime* rt) {
321     return rt->geckoProfiler().enabled();
322   }
323 
isOptimizationTrackingEnabled(ZoneGroup * group)324   bool isOptimizationTrackingEnabled(ZoneGroup* group) {
325     return isProfilerInstrumentationEnabled(group->runtime);
326   }
327 };
328 
329 class JitZoneGroup {
330  public:
331   enum BackedgeTarget { BackedgeLoopHeader, BackedgeInterruptCheck };
332 
333  private:
334   // Whether patchable backedges currently jump to the loop header or the
335   // interrupt check.
336   ZoneGroupData<BackedgeTarget> backedgeTarget_;
337 
338   // List of all backedges in all Ion code. The backedge edge list is accessed
339   // asynchronously when the active thread is paused and
340   // preventBackedgePatching_ is false. Thus, the list must only be mutated
341   // while preventBackedgePatching_ is true.
342   ZoneGroupData<InlineList<PatchableBackedge>> backedgeList_;
backedgeList()343   InlineList<PatchableBackedge>& backedgeList() { return backedgeList_.ref(); }
344 
345  public:
346   explicit JitZoneGroup(ZoneGroup* group);
347 
backedgeTarget()348   BackedgeTarget backedgeTarget() const { return backedgeTarget_; }
addPatchableBackedge(JitRuntime * jrt,PatchableBackedge * backedge)349   void addPatchableBackedge(JitRuntime* jrt, PatchableBackedge* backedge) {
350     MOZ_ASSERT(jrt->preventBackedgePatching());
351     backedgeList().pushFront(backedge);
352   }
removePatchableBackedge(JitRuntime * jrt,PatchableBackedge * backedge)353   void removePatchableBackedge(JitRuntime* jrt, PatchableBackedge* backedge) {
354     MOZ_ASSERT(jrt->preventBackedgePatching());
355     backedgeList().remove(backedge);
356   }
357 
358   void patchIonBackedges(JSContext* cx, BackedgeTarget target);
359 };
360 
361 enum class CacheKind : uint8_t;
362 class CacheIRStubInfo;
363 
364 enum class ICStubEngine : uint8_t {
365   // Baseline IC, see SharedIC.h and BaselineIC.h.
366   Baseline = 0,
367 
368   // Ion IC that reuses Baseline IC code, see SharedIC.h.
369   IonSharedIC,
370 
371   // Ion IC, see IonIC.h.
372   IonIC
373 };
374 
375 struct CacheIRStubKey : public DefaultHasher<CacheIRStubKey> {
376   struct Lookup {
377     CacheKind kind;
378     ICStubEngine engine;
379     const uint8_t* code;
380     uint32_t length;
381 
LookupCacheIRStubKey::Lookup382     Lookup(CacheKind kind, ICStubEngine engine, const uint8_t* code,
383            uint32_t length)
384         : kind(kind), engine(engine), code(code), length(length) {}
385   };
386 
387   static HashNumber hash(const Lookup& l);
388   static bool match(const CacheIRStubKey& entry, const Lookup& l);
389 
390   UniquePtr<CacheIRStubInfo, JS::FreePolicy> stubInfo;
391 
CacheIRStubKeyCacheIRStubKey392   explicit CacheIRStubKey(CacheIRStubInfo* info) : stubInfo(info) {}
CacheIRStubKeyCacheIRStubKey393   CacheIRStubKey(CacheIRStubKey&& other) : stubInfo(Move(other.stubInfo)) {}
394 
395   void operator=(CacheIRStubKey&& other) { stubInfo = Move(other.stubInfo); }
396 };
397 
398 template <typename Key>
399 struct IcStubCodeMapGCPolicy {
needsSweepIcStubCodeMapGCPolicy400   static bool needsSweep(Key*, ReadBarrieredJitCode* value) {
401     return IsAboutToBeFinalized(value);
402   }
403 };
404 
405 class JitZone {
406   // Allocated space for optimized baseline stubs.
407   OptimizedICStubSpace optimizedStubSpace_;
408   // Allocated space for cached cfg.
409   CFGSpace cfgSpace_;
410 
411   // Set of CacheIRStubInfo instances used by Ion stubs in this Zone.
412   using IonCacheIRStubInfoSet =
413       HashSet<CacheIRStubKey, CacheIRStubKey, SystemAllocPolicy>;
414   IonCacheIRStubInfoSet ionCacheIRStubInfoSet_;
415 
416   // Map CacheIRStubKey to shared JitCode objects.
417   using BaselineCacheIRStubCodeMap =
418       GCHashMap<CacheIRStubKey, ReadBarrieredJitCode, CacheIRStubKey,
419                 SystemAllocPolicy, IcStubCodeMapGCPolicy<CacheIRStubKey>>;
420   BaselineCacheIRStubCodeMap baselineCacheIRStubCodes_;
421 
422  public:
423   MOZ_MUST_USE bool init(JSContext* cx);
424   void sweep();
425 
426   void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
427                               size_t* jitZone, size_t* baselineStubsOptimized,
428                               size_t* cachedCFG) const;
429 
optimizedStubSpace()430   OptimizedICStubSpace* optimizedStubSpace() { return &optimizedStubSpace_; }
cfgSpace()431   CFGSpace* cfgSpace() { return &cfgSpace_; }
432 
getBaselineCacheIRStubCode(const CacheIRStubKey::Lookup & key,CacheIRStubInfo ** stubInfo)433   JitCode* getBaselineCacheIRStubCode(const CacheIRStubKey::Lookup& key,
434                                       CacheIRStubInfo** stubInfo) {
435     auto p = baselineCacheIRStubCodes_.lookup(key);
436     if (p) {
437       *stubInfo = p->key().stubInfo.get();
438       return p->value();
439     }
440     *stubInfo = nullptr;
441     return nullptr;
442   }
putBaselineCacheIRStubCode(const CacheIRStubKey::Lookup & lookup,CacheIRStubKey & key,JitCode * stubCode)443   MOZ_MUST_USE bool putBaselineCacheIRStubCode(
444       const CacheIRStubKey::Lookup& lookup, CacheIRStubKey& key,
445       JitCode* stubCode) {
446     auto p = baselineCacheIRStubCodes_.lookupForAdd(lookup);
447     MOZ_ASSERT(!p);
448     return baselineCacheIRStubCodes_.add(p, Move(key), stubCode);
449   }
450 
getIonCacheIRStubInfo(const CacheIRStubKey::Lookup & key)451   CacheIRStubInfo* getIonCacheIRStubInfo(const CacheIRStubKey::Lookup& key) {
452     if (!ionCacheIRStubInfoSet_.initialized()) return nullptr;
453     IonCacheIRStubInfoSet::Ptr p = ionCacheIRStubInfoSet_.lookup(key);
454     return p ? p->stubInfo.get() : nullptr;
455   }
putIonCacheIRStubInfo(const CacheIRStubKey::Lookup & lookup,CacheIRStubKey & key)456   MOZ_MUST_USE bool putIonCacheIRStubInfo(const CacheIRStubKey::Lookup& lookup,
457                                           CacheIRStubKey& key) {
458     if (!ionCacheIRStubInfoSet_.initialized() && !ionCacheIRStubInfoSet_.init())
459       return false;
460     IonCacheIRStubInfoSet::AddPtr p =
461         ionCacheIRStubInfoSet_.lookupForAdd(lookup);
462     MOZ_ASSERT(!p);
463     return ionCacheIRStubInfoSet_.add(p, Move(key));
464   }
purgeIonCacheIRStubInfo()465   void purgeIonCacheIRStubInfo() { ionCacheIRStubInfoSet_.finish(); }
466 };
467 
468 enum class BailoutReturnStub {
469   GetProp,
470   GetPropSuper,
471   SetProp,
472   Call,
473   New,
474   Count
475 };
476 
477 class JitCompartment {
478   friend class JitActivation;
479 
480   // Map ICStub keys to ICStub shared code objects.
481   using ICStubCodeMap =
482       GCHashMap<uint32_t, ReadBarrieredJitCode, DefaultHasher<uint32_t>,
483                 ZoneAllocPolicy, IcStubCodeMapGCPolicy<uint32_t>>;
484   ICStubCodeMap* stubCodes_;
485 
486   // Keep track of offset into various baseline stubs' code at return
487   // point from called script.
488   struct BailoutReturnStubInfo {
489     void* addr;
490     uint32_t key;
491 
BailoutReturnStubInfoBailoutReturnStubInfo492     BailoutReturnStubInfo() : addr(nullptr), key(0) {}
BailoutReturnStubInfoBailoutReturnStubInfo493     BailoutReturnStubInfo(void* addr_, uint32_t key_)
494         : addr(addr_), key(key_) {}
495   };
496   mozilla::EnumeratedArray<BailoutReturnStub, BailoutReturnStub::Count,
497                            BailoutReturnStubInfo>
498       bailoutReturnStubInfo_;
499 
500   // The JitCompartment stores stubs to concatenate strings inline and perform
501   // RegExp calls inline.  These bake in zone and compartment specific
502   // pointers and can't be stored in JitRuntime.
503   //
504   // These are weak pointers, but they can by accessed during off-thread Ion
505   // compilation and therefore can't use the usual read barrier. Instead, we
506   // record which stubs have been read and perform the appropriate barriers in
507   // CodeGenerator::link().
508 
509   enum StubIndex : uint32_t {
510     StringConcat = 0,
511     RegExpMatcher,
512     RegExpSearcher,
513     RegExpTester,
514     Count
515   };
516 
517   mozilla::EnumeratedArray<StubIndex, StubIndex::Count, ReadBarrieredJitCode>
518       stubs_;
519 
520   // The same approach is taken for SIMD template objects.
521 
522   mozilla::EnumeratedArray<SimdType, SimdType::Count, ReadBarrieredObject>
523       simdTemplateObjects_;
524 
525   JitCode* generateStringConcatStub(JSContext* cx);
526   JitCode* generateRegExpMatcherStub(JSContext* cx);
527   JitCode* generateRegExpSearcherStub(JSContext* cx);
528   JitCode* generateRegExpTesterStub(JSContext* cx);
529 
getStubNoBarrier(StubIndex stub,uint32_t * requiredBarriersOut)530   JitCode* getStubNoBarrier(StubIndex stub,
531                             uint32_t* requiredBarriersOut) const {
532     MOZ_ASSERT(CurrentThreadIsIonCompiling());
533     *requiredBarriersOut |= 1 << uint32_t(stub);
534     return stubs_[stub].unbarrieredGet();
535   }
536 
537  public:
getSimdTemplateObjectFor(JSContext * cx,Handle<SimdTypeDescr * > descr)538   JSObject* getSimdTemplateObjectFor(JSContext* cx,
539                                      Handle<SimdTypeDescr*> descr) {
540     ReadBarrieredObject& tpl = simdTemplateObjects_[descr->type()];
541     if (!tpl) tpl.set(TypedObject::createZeroed(cx, descr, 0, gc::TenuredHeap));
542     return tpl.get();
543   }
544 
maybeGetSimdTemplateObjectFor(SimdType type)545   JSObject* maybeGetSimdTemplateObjectFor(SimdType type) const {
546     // This function is used by Eager Simd Unbox phase which can run
547     // off-thread, so we cannot use the usual read barrier. For more
548     // information, see the comment above
549     // CodeGenerator::simdRefreshTemplatesDuringLink_.
550 
551     MOZ_ASSERT(CurrentThreadIsIonCompiling());
552     return simdTemplateObjects_[type].unbarrieredGet();
553   }
554 
getStubCode(uint32_t key)555   JitCode* getStubCode(uint32_t key) {
556     ICStubCodeMap::Ptr p = stubCodes_->lookup(key);
557     if (p) return p->value();
558     return nullptr;
559   }
putStubCode(JSContext * cx,uint32_t key,Handle<JitCode * > stubCode)560   MOZ_MUST_USE bool putStubCode(JSContext* cx, uint32_t key,
561                                 Handle<JitCode*> stubCode) {
562     MOZ_ASSERT(stubCode);
563     if (!stubCodes_->putNew(key, stubCode.get())) {
564       ReportOutOfMemory(cx);
565       return false;
566     }
567     return true;
568   }
initBailoutReturnAddr(void * addr,uint32_t key,BailoutReturnStub kind)569   void initBailoutReturnAddr(void* addr, uint32_t key, BailoutReturnStub kind) {
570     MOZ_ASSERT(bailoutReturnStubInfo_[kind].addr == nullptr);
571     bailoutReturnStubInfo_[kind] = BailoutReturnStubInfo{addr, key};
572   }
bailoutReturnAddr(BailoutReturnStub kind)573   void* bailoutReturnAddr(BailoutReturnStub kind) {
574     MOZ_ASSERT(bailoutReturnStubInfo_[kind].addr);
575     return bailoutReturnStubInfo_[kind].addr;
576   }
577 
578   JitCompartment();
579   ~JitCompartment();
580 
581   MOZ_MUST_USE bool initialize(JSContext* cx);
582 
583   // Initialize code stubs only used by Ion, not Baseline.
ensureIonStubsExist(JSContext * cx)584   MOZ_MUST_USE bool ensureIonStubsExist(JSContext* cx) {
585     if (stubs_[StringConcat]) return true;
586     stubs_[StringConcat] = generateStringConcatStub(cx);
587     return stubs_[StringConcat];
588   }
589 
590   void sweep(JSCompartment* compartment);
591 
discardStubs()592   void discardStubs() {
593     for (ReadBarrieredJitCode& stubRef : stubs_) stubRef = nullptr;
594   }
595 
stringConcatStubNoBarrier(uint32_t * requiredBarriersOut)596   JitCode* stringConcatStubNoBarrier(uint32_t* requiredBarriersOut) const {
597     return getStubNoBarrier(StringConcat, requiredBarriersOut);
598   }
599 
regExpMatcherStubNoBarrier(uint32_t * requiredBarriersOut)600   JitCode* regExpMatcherStubNoBarrier(uint32_t* requiredBarriersOut) const {
601     return getStubNoBarrier(RegExpMatcher, requiredBarriersOut);
602   }
603 
ensureRegExpMatcherStubExists(JSContext * cx)604   MOZ_MUST_USE bool ensureRegExpMatcherStubExists(JSContext* cx) {
605     if (stubs_[RegExpMatcher]) return true;
606     stubs_[RegExpMatcher] = generateRegExpMatcherStub(cx);
607     return stubs_[RegExpMatcher];
608   }
609 
regExpSearcherStubNoBarrier(uint32_t * requiredBarriersOut)610   JitCode* regExpSearcherStubNoBarrier(uint32_t* requiredBarriersOut) const {
611     return getStubNoBarrier(RegExpSearcher, requiredBarriersOut);
612   }
613 
ensureRegExpSearcherStubExists(JSContext * cx)614   MOZ_MUST_USE bool ensureRegExpSearcherStubExists(JSContext* cx) {
615     if (stubs_[RegExpSearcher]) return true;
616     stubs_[RegExpSearcher] = generateRegExpSearcherStub(cx);
617     return stubs_[RegExpSearcher];
618   }
619 
regExpTesterStubNoBarrier(uint32_t * requiredBarriersOut)620   JitCode* regExpTesterStubNoBarrier(uint32_t* requiredBarriersOut) const {
621     return getStubNoBarrier(RegExpTester, requiredBarriersOut);
622   }
623 
ensureRegExpTesterStubExists(JSContext * cx)624   MOZ_MUST_USE bool ensureRegExpTesterStubExists(JSContext* cx) {
625     if (stubs_[RegExpTester]) return true;
626     stubs_[RegExpTester] = generateRegExpTesterStub(cx);
627     return stubs_[RegExpTester];
628   }
629 
630   // Perform the necessary read barriers on stubs and SIMD template object
631   // described by the bitmasks passed in. This function can only be called
632   // from the active thread.
633   //
634   // The stub and template object pointers must still be valid by the time
635   // these methods are called. This is arranged by cancelling off-thread Ion
636   // compilation at the start of GC and at the start of sweeping.
637   void performStubReadBarriers(uint32_t stubsToBarrier) const;
638   void performSIMDTemplateReadBarriers(uint32_t simdTemplatesToBarrier) const;
639 
640   size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
641 
642   bool stringsCanBeInNursery;
643 };
644 
645 // Called from JSCompartment::discardJitCode().
646 void InvalidateAll(FreeOp* fop, JS::Zone* zone);
647 void FinishInvalidation(FreeOp* fop, JSScript* script);
648 
649 // On windows systems, really large frames need to be incrementally touched.
650 // The following constant defines the minimum increment of the touch.
651 #ifdef XP_WIN
652 const unsigned WINDOWS_BIG_FRAME_TOUCH_INCREMENT = 4096 - 1;
653 #endif
654 
655 // If NON_WRITABLE_JIT_CODE is enabled, this class will ensure
656 // JIT code is writable (has RW permissions) in its scope.
657 // Otherwise it's a no-op.
658 class MOZ_STACK_CLASS AutoWritableJitCode {
659   // Backedge patching from the signal handler will change memory protection
660   // flags, so don't allow it in a AutoWritableJitCode scope.
661   JitRuntime::AutoPreventBackedgePatching preventPatching_;
662   JSRuntime* rt_;
663   void* addr_;
664   size_t size_;
665 
666  public:
AutoWritableJitCode(JSRuntime * rt,void * addr,size_t size)667   AutoWritableJitCode(JSRuntime* rt, void* addr, size_t size)
668       : preventPatching_(rt), rt_(rt), addr_(addr), size_(size) {
669     rt_->toggleAutoWritableJitCodeActive(true);
670     if (!ExecutableAllocator::makeWritable(addr_, size_)) MOZ_CRASH();
671   }
AutoWritableJitCode(void * addr,size_t size)672   AutoWritableJitCode(void* addr, size_t size)
673       : AutoWritableJitCode(TlsContext.get()->runtime(), addr, size) {}
AutoWritableJitCode(JitCode * code)674   explicit AutoWritableJitCode(JitCode* code)
675       : AutoWritableJitCode(code->runtimeFromActiveCooperatingThread(),
676                             code->raw(), code->bufferSize()) {}
~AutoWritableJitCode()677   ~AutoWritableJitCode() {
678     if (!ExecutableAllocator::makeExecutable(addr_, size_)) MOZ_CRASH();
679     rt_->toggleAutoWritableJitCodeActive(false);
680   }
681 };
682 
683 class MOZ_STACK_CLASS MaybeAutoWritableJitCode {
684   mozilla::Maybe<AutoWritableJitCode> awjc_;
685 
686  public:
MaybeAutoWritableJitCode(void * addr,size_t size,ReprotectCode reprotect)687   MaybeAutoWritableJitCode(void* addr, size_t size, ReprotectCode reprotect) {
688     if (reprotect) awjc_.emplace(addr, size);
689   }
MaybeAutoWritableJitCode(JitCode * code,ReprotectCode reprotect)690   MaybeAutoWritableJitCode(JitCode* code, ReprotectCode reprotect) {
691     if (reprotect) awjc_.emplace(code);
692   }
693 };
694 
695 }  // namespace jit
696 }  // namespace js
697 
698 #endif /* jit_JitCompartment_h */
699