1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #ifndef vm_Activation_h
8 #define vm_Activation_h
9 
10 #include "mozilla/Assertions.h"  // MOZ_ASSERT
11 #include "mozilla/Attributes.h"  // MOZ_RAII
12 
13 #include <stddef.h>  // size_t
14 #include <stdint.h>  // uint8_t, uint32_t
15 
16 #include "jstypes.h"  // JS_PUBLIC_API
17 
18 #include "jit/CalleeToken.h"  // js::jit::CalleeToken
19 #include "js/CallArgs.h"      // JS::CallArgs
20 #include "js/RootingAPI.h"    // JS::Handle, JS::Rooted
21 #include "js/TypeDecls.h"     // jsbytecode
22 #include "js/UniquePtr.h"     // js::UniquePtr
23 #include "js/Value.h"         // JS::Value
24 #include "vm/SavedFrame.h"    // js::SavedFrame
25 #include "vm/Stack.h"         // js::InterpreterRegs
26 
27 struct JS_PUBLIC_API JSContext;
28 
29 class JSFunction;
30 class JSObject;
31 class JSScript;
32 
33 namespace JS {
34 
35 class JS_PUBLIC_API Compartment;
36 
37 namespace dbg {
38 class JS_PUBLIC_API AutoEntryMonitor;
39 }  // namespace dbg
40 
41 }  // namespace JS
42 
43 namespace js {
44 
45 class InterpreterActivation;
46 class InterpreterFrame;
47 
48 namespace jit {
49 class JitActivation;
50 class JitFrameLayout;
51 }  // namespace jit
52 
53 // This class is separate from Activation, because it calls Compartment::wrap()
54 // which can GC and walk the stack. It's not safe to do that within the
55 // JitActivation constructor.
56 class MOZ_RAII ActivationEntryMonitor {
57   JSContext* cx_;
58 
59   // The entry point monitor that was set on cx_->runtime() when this
60   // ActivationEntryMonitor was created.
61   JS::dbg::AutoEntryMonitor* entryMonitor_;
62 
63   explicit inline ActivationEntryMonitor(JSContext* cx);
64 
65   ActivationEntryMonitor(const ActivationEntryMonitor& other) = delete;
66   void operator=(const ActivationEntryMonitor& other) = delete;
67 
68   void init(JSContext* cx, jit::CalleeToken entryToken);
69   void init(JSContext* cx, InterpreterFrame* entryFrame);
70 
71   JS::Value asyncStack(JSContext* cx);
72 
73  public:
74   inline ActivationEntryMonitor(JSContext* cx, InterpreterFrame* entryFrame);
75   inline ActivationEntryMonitor(JSContext* cx, jit::CalleeToken entryToken);
76   inline ~ActivationEntryMonitor();
77 };
78 
79 // [SMDOC] LiveSavedFrameCache: SavedFrame caching to minimize stack walking
80 //
81 // Since each SavedFrame object includes a 'parent' pointer to the SavedFrame
82 // for its caller, if we could easily find the right SavedFrame for a given
83 // stack frame, we wouldn't need to walk the rest of the stack. Traversing deep
84 // stacks can be expensive, and when we're profiling or instrumenting code, we
85 // may want to capture JavaScript stacks frequently, so such cases would benefit
86 // if we could avoid walking the entire stack.
87 //
88 // We could have a cache mapping frame addresses to their SavedFrame objects,
89 // but invalidating its entries would be a challenge. Popping a stack frame is
90 // extremely performance-sensitive, and SpiderMonkey stack frames can be OSR'd,
91 // thrown, rematerialized, and perhaps meet other fates; we would rather our
92 // cache not depend on handling so many tricky cases.
93 //
94 // It turns out that we can keep the cache accurate by reserving a single bit in
95 // the stack frame, which must be clear on any newly pushed frame. When we
96 // insert an entry into the cache mapping a given frame address to its
97 // SavedFrame, we set the bit in the frame. Then, we take care to probe the
98 // cache only for frames whose bit is set; the bit tells us that the frame has
99 // never left the stack, so its cache entry must be accurate, at least about
100 // which function the frame is executing (the line may have changed; more about
101 // that below). The code refers to this bit as the 'hasCachedSavedFrame' flag.
102 //
103 // We could manage such a cache replacing least-recently used entries, but we
104 // can do better than that: the cache can be a stack, of which we need examine
105 // only entries from the top.
106 //
107 // First, observe that stacks are walked from the youngest frame to the oldest,
108 // but SavedFrame chains are built from oldest to youngest, to ensure common
109 // tails are shared. This means that capturing a stack is necessarily a
110 // two-phase process: walk the stack, and then build the SavedFrames.
111 //
112 // Naturally, the first time we capture the stack, the cache is empty, and we
113 // must traverse the entire stack. As we build each SavedFrame, we push an entry
114 // associating the frame's address to its SavedFrame on the cache, and set the
115 // frame's bit. At the end, every frame has its bit set and an entry in the
116 // cache.
117 //
118 // Then the program runs some more. Some, none, or all of the frames are popped.
119 // Any new frames are pushed with their bit clear. Any frame with its bit set
120 // has never left the stack. The cache is left untouched.
121 //
122 // For the next capture, we walk the stack up to the first frame with its bit
123 // set, if there is one. Call it F; it must have a cache entry. We pop entries
124 // from the cache - all invalid, because they are above F's entry, and hence
125 // younger - until we find the entry matching F's address. Since F's bit is set,
126 // we know it never left the stack, and hence that no younger frame could have
127 // had a colliding address. And since the frame's bit was set when we pushed the
128 // cache entry, we know the entry is still valid.
129 //
130 // F's cache entry's SavedFrame covers the rest of the stack, so we don't need
131 // to walk the stack any further. Now we begin building SavedFrame objects for
132 // the new frames, pushing cache entries, and setting bits on the frames. By the
133 // end, the cache again covers the full stack, and every frame's bit is set.
134 //
135 // If we walk the stack to the end, and find no frame with its bit set, then the
136 // entire cache is invalid. At this point, it must be emptied, so that the new
137 // entries we are about to push are the only frames in the cache.
138 //
139 // For example, suppose we have the following stack (let 'A > B' mean "A called
140 // B", so the frames are listed oldest first):
141 //
142 //     P  > Q  > R  > S          Initial stack, bits not set.
143 //     P* > Q* > R* > S*         Capture a SavedFrame stack, set bits.
144 //                               The cache now holds: P > Q > R > S.
145 //     P* > Q* > R*              Return from S.
146 //     P* > Q*                   Return from R.
147 //     P* > Q* > T  > U          Call T and U. New frames have clear bits.
148 //
149 // If we capture the stack now, the cache still holds:
150 //
151 //     P  > Q  > R  > S
152 //
153 // As we traverse the stack, we'll cross U and T, and then find Q with its bit
154 // set. We pop entries from the cache until we find the entry for Q; this
155 // removes entries R and S, which were indeed invalid. In Q's cache entry, we
156 // find the SavedFrame representing the stack P > Q. Now we build SavedFrames
157 // for the new portion of the stack, pushing an entry for T and setting the bit
158 // on the frame, and then doing the same for U. In the end, the call stack again
159 // has bits set on all its frames:
160 //
161 //     P* > Q* > T* > U*         All frames are now in the cache.
162 //
163 // And the cache again holds entries for the entire stack:
164 //
165 //     P  > Q  > T  > U
166 //
167 // Details:
168 //
169 // - When we find a cache entry whose frame address matches our frame F, we know
170 //   that F has never left the stack, but it may certainly be the case that
171 //   execution took place in that frame, and that the current source position
172 //   within F's function has changed. This means that the entry's SavedFrame,
173 //   which records the source line and column as well as the function, is not
174 //   correct. To detect this case, when we push a cache entry, we record the
175 //   frame's pc. When consulting the cache, if a frame's address matches but its
176 //   pc does not, then we pop the cache entry, clear the frame's bit, and
177 //   continue walking the stack. The next stack frame will definitely hit: since
178 //   its callee frame never left the stack, the calling frame never got the
179 //   chance to execute.
180 //
181 // - Generators, at least conceptually, have long-lived stack frames that
182 //   disappear from the stack when the generator yields, and reappear on the
183 //   stack when the generator's 'next' method is called. When a generator's
184 //   frame is placed again atop the stack, its bit must be cleared - for the
185 //   purposes of the cache, treating the frame as a new frame - to respect the
186 //   invariants we used to justify the algorithm above. Async function
187 //   activations usually appear atop empty stacks, since they are invoked as a
188 //   promise callback, but the same rule applies.
189 //
190 // - SpiderMonkey has many types of stack frames, and not all have a place to
191 //   store a bit indicating a cached SavedFrame. But as long as we don't create
192 //   cache entries for frames we can't mark, simply omitting them from the cache
193 //   is harmless. Uncacheable frame types include inlined Ion frames and
194 //   non-Debug wasm frames. The LiveSavedFrameCache::FramePtr type represents
195 //   only pointers to frames that can be cached, so if you have a FramePtr, you
196 //   don't need to further check the frame for cachability. FramePtr provides
197 //   access to the hasCachedSavedFrame bit.
198 //
199 // - We actually break up the cache into one cache per Activation. Popping an
200 //   activation invalidates all its cache entries, simply by freeing the cache
201 //   altogether.
202 //
203 // - The entire chain of SavedFrames for a given stack capture is created in the
204 //   compartment of the code that requested the capture, *not* in that of the
205 //   frames it represents, so in general, different compartments may have
206 //   different SavedFrame objects representing the same actual stack frame. The
207 //   LiveSavedFrameCache simply records whichever SavedFrames were used in the
208 //   most recent captures. When we find a cache hit, we check the entry's
209 //   SavedFrame's compartment against the current compartment; if they do not
210 //   match, we clear the entire cache.
211 //
212 //   This means that it is not always true that, if a frame's
213 //   hasCachedSavedFrame bit is set, it must have an entry in the cache. The
214 //   actual invariant is: either the cache is completely empty, or the frames'
215 //   bits are trustworthy. This invariant holds even though capture can be
216 //   interrupted at many places by OOM failures. Clearing the cache is a single,
217 //   uninterruptible step. When we try to look up a frame whose bit is set and
218 //   find an empty cache, we clear the frame's bit. And we only add the first
219 //   frame to an empty cache once we've walked the stack all the way, so we know
220 //   that all frames' bits are cleared by that point.
221 //
222 // - When the Debugger API evaluates an expression in some frame (the 'target
223 //   frame'), it's SpiderMonkey's convention that the target frame be treated as
224 //   the parent of the eval frame. In reality, of course, the eval frame is
225 //   pushed on the top of the stack like any other frame, but stack captures
226 //   simply jump straight over the intervening frames, so that the '.parent'
227 //   property of a SavedFrame for the eval is the SavedFrame for the target.
228 //   This is arranged by giving the eval frame an 'evalInFramePrev` link
229 //   pointing to the target, which an ordinary FrameIter will notice and
230 //   respect.
231 //
232 //   If the LiveSavedFrameCache were presented with stack traversals that
233 //   skipped frames in this way, it would cause havoc. First, with no debugger
234 //   eval frames present, capture the stack, populating the cache. Then push a
235 //   debugger eval frame and capture again; the skipped frames to appear to be
236 //   absent from the stack. Now pop the debugger eval frame, and capture a third
237 //   time: the no-longer-skipped frames seem to reappear on the stack, with
238 //   their cached bits still set.
239 //
240 //   The LiveSavedFrameCache assumes that the stack it sees is used in a
241 //   stack-like fashion: if a frame has its bit set, it has never left the
242 //   stack. To support this assumption, when the cache is in use, we do not skip
243 //   the frames between a debugger eval frame an its target; we always traverse
244 //   the entire stack, invalidating and populating the cache in the usual way.
245 //   Instead, when we construct a SavedFrame for a debugger eval frame, we
246 //   select the appropriate parent at that point: rather than the next-older
247 //   frame, we find the SavedFrame for the eval's target frame. The skip appears
248 //   in the SavedFrame chains, even as the traversal covers all the frames.
249 //
250 // - Rematerialized frames (see ../jit/RematerializedFrame.h) are always created
251 //   with their hasCachedSavedFrame bits clear: although there may be extant
252 //   SavedFrames built from the original IonMonkey frame, the Rematerialized
253 //   frames will not have cache entries for them until they are traversed in a
254 //   capture themselves.
255 //
256 //   This means that, oddly, it is not always true that, once we reach a frame
257 //   with its hasCachedSavedFrame bit set, all its parents will have the bit set
258 //   as well. However, clear bits under younger set bits will only occur on
259 //   Rematerialized frames.
260 class LiveSavedFrameCache {
261  public:
262   // The address of a live frame for which we can cache SavedFrames: it has a
263   // 'hasCachedSavedFrame' bit we can examine and set, and can be converted to
264   // a Key to index the cache.
265   class FramePtr {
266     // We use jit::CommonFrameLayout for both Baseline frames and Ion
267     // physical frames.
268     using Ptr = mozilla::Variant<InterpreterFrame*, jit::CommonFrameLayout*,
269                                  jit::RematerializedFrame*, wasm::DebugFrame*>;
270 
271     Ptr ptr;
272 
273     template <typename Frame>
FramePtr(Frame ptr)274     explicit FramePtr(Frame ptr) : ptr(ptr) {}
275 
276     struct HasCachedMatcher;
277     struct SetHasCachedMatcher;
278     struct ClearHasCachedMatcher;
279 
280    public:
281     // If iter's frame is of a type that can be cached, construct a FramePtr
282     // for its frame. Otherwise, return Nothing.
283     static inline mozilla::Maybe<FramePtr> create(const FrameIter& iter);
284 
285     inline bool hasCachedSavedFrame() const;
286     inline void setHasCachedSavedFrame();
287     inline void clearHasCachedSavedFrame();
288 
289     // Return true if this FramePtr refers to an interpreter frame.
isInterpreterFrame()290     inline bool isInterpreterFrame() const {
291       return ptr.is<InterpreterFrame*>();
292     }
293 
294     // If this FramePtr is an interpreter frame, return a pointer to it.
asInterpreterFrame()295     inline InterpreterFrame& asInterpreterFrame() const {
296       return *ptr.as<InterpreterFrame*>();
297     }
298 
299     // Return true if this FramePtr refers to a rematerialized frame.
isRematerializedFrame()300     inline bool isRematerializedFrame() const {
301       return ptr.is<jit::RematerializedFrame*>();
302     }
303 
304     bool operator==(const FramePtr& rhs) const { return rhs.ptr == this->ptr; }
305     bool operator!=(const FramePtr& rhs) const { return !(rhs == *this); }
306   };
307 
308  private:
309   // A key in the cache: the address of a frame, live or dead, for which we
310   // can cache SavedFrames. Since the pointer may not be live, the only
311   // operation this type permits is comparison.
312   class Key {
313     FramePtr framePtr;
314 
315    public:
Key(const FramePtr & framePtr)316     MOZ_IMPLICIT Key(const FramePtr& framePtr) : framePtr(framePtr) {}
317 
318     bool operator==(const Key& rhs) const {
319       return rhs.framePtr == this->framePtr;
320     }
321     bool operator!=(const Key& rhs) const { return !(rhs == *this); }
322   };
323 
324   struct Entry {
325     const Key key;
326     const jsbytecode* pc;
327     HeapPtr<SavedFrame*> savedFrame;
328 
EntryEntry329     Entry(const Key& key, const jsbytecode* pc, SavedFrame* savedFrame)
330         : key(key), pc(pc), savedFrame(savedFrame) {}
331   };
332 
333   using EntryVector = Vector<Entry, 0, SystemAllocPolicy>;
334   EntryVector* frames;
335 
336   LiveSavedFrameCache(const LiveSavedFrameCache&) = delete;
337   LiveSavedFrameCache& operator=(const LiveSavedFrameCache&) = delete;
338 
339  public:
LiveSavedFrameCache()340   explicit LiveSavedFrameCache() : frames(nullptr) {}
341 
LiveSavedFrameCache(LiveSavedFrameCache && rhs)342   LiveSavedFrameCache(LiveSavedFrameCache&& rhs) : frames(rhs.frames) {
343     MOZ_ASSERT(this != &rhs, "self-move disallowed");
344     rhs.frames = nullptr;
345   }
346 
~LiveSavedFrameCache()347   ~LiveSavedFrameCache() {
348     if (frames) {
349       js_delete(frames);
350       frames = nullptr;
351     }
352   }
353 
initialized()354   bool initialized() const { return !!frames; }
init(JSContext * cx)355   bool init(JSContext* cx) {
356     frames = js_new<EntryVector>();
357     if (!frames) {
358       JS_ReportOutOfMemory(cx);
359       return false;
360     }
361     return true;
362   }
363 
364   void trace(JSTracer* trc);
365 
366   // Set |frame| to the cached SavedFrame corresponding to |framePtr| at |pc|.
367   // |framePtr|'s hasCachedSavedFrame bit must be set. Remove all cache
368   // entries for frames younger than that one.
369   //
370   // This may set |frame| to nullptr if |pc| is different from the pc supplied
371   // when the cache entry was inserted. In this case, the cached SavedFrame
372   // (probably) has the wrong source position. Entries for younger frames are
373   // still removed. The next frame, if any, will be a cache hit.
374   //
375   // This may also set |frame| to nullptr if the cache was populated with
376   // SavedFrame objects for a different compartment than cx's current
377   // compartment. In this case, the entire cache is flushed.
378   void find(JSContext* cx, FramePtr& framePtr, const jsbytecode* pc,
379             MutableHandleSavedFrame frame) const;
380 
381   // Search the cache for a frame matching |framePtr|, without removing any
382   // entries. Return the matching saved frame, or nullptr if none is found.
383   // This is used for resolving |evalInFramePrev| links.
384   void findWithoutInvalidation(const FramePtr& framePtr,
385                                MutableHandleSavedFrame frame) const;
386 
387   // Push a cache entry mapping |framePtr| and |pc| to |savedFrame| on the top
388   // of the cache's stack. You must insert entries for frames from oldest to
389   // youngest. They must all be younger than the frame that the |find| method
390   // found a hit for; or you must have cleared the entire cache with the
391   // |clear| method.
392   bool insert(JSContext* cx, FramePtr&& framePtr, const jsbytecode* pc,
393               HandleSavedFrame savedFrame);
394 
395   // Remove all entries from the cache.
clear()396   void clear() {
397     if (frames) frames->clear();
398   }
399 };
400 
401 static_assert(
402     sizeof(LiveSavedFrameCache) == sizeof(uintptr_t),
403     "Every js::Activation has a LiveSavedFrameCache, so we need to be pretty "
404     "careful "
405     "about avoiding bloat. If you're adding members to LiveSavedFrameCache, "
406     "maybe you "
407     "should consider figuring out a way to make js::Activation have a "
408     "LiveSavedFrameCache* instead of a Rooted<LiveSavedFrameCache>.");
409 
410 class Activation {
411  protected:
412   JSContext* cx_;
413   JS::Compartment* compartment_;
414   Activation* prev_;
415   Activation* prevProfiling_;
416 
417   // Counter incremented by JS::HideScriptedCaller and decremented by
418   // JS::UnhideScriptedCaller. If > 0 for the top activation,
419   // DescribeScriptedCaller will return null instead of querying that
420   // activation, which should prompt the caller to consult embedding-specific
421   // data structures instead.
422   size_t hideScriptedCallerCount_;
423 
424   // The cache of SavedFrame objects we have already captured when walking
425   // this activation's stack.
426   JS::Rooted<LiveSavedFrameCache> frameCache_;
427 
428   // Youngest saved frame of an async stack that will be iterated during stack
429   // capture in place of the actual stack of previous activations. Note that
430   // the stack of this activation is captured entirely before this is used.
431   //
432   // Usually this is nullptr, meaning that normal stack capture will occur.
433   // When this is set, the stack of any previous activation is ignored.
434   JS::Rooted<SavedFrame*> asyncStack_;
435 
436   // Value of asyncCause to be attached to asyncStack_.
437   const char* asyncCause_;
438 
439   // True if the async call was explicitly requested, e.g. via
440   // callFunctionWithAsyncStack.
441   bool asyncCallIsExplicit_;
442 
443   enum Kind { Interpreter, Jit };
444   Kind kind_;
445 
446   inline Activation(JSContext* cx, Kind kind);
447   inline ~Activation();
448 
449  public:
cx()450   JSContext* cx() const { return cx_; }
compartment()451   JS::Compartment* compartment() const { return compartment_; }
prev()452   Activation* prev() const { return prev_; }
prevProfiling()453   Activation* prevProfiling() const { return prevProfiling_; }
454   inline Activation* mostRecentProfiling();
455 
isInterpreter()456   bool isInterpreter() const { return kind_ == Interpreter; }
isJit()457   bool isJit() const { return kind_ == Jit; }
458   inline bool hasWasmExitFP() const;
459 
460   inline bool isProfiling() const;
461   void registerProfiling();
462   void unregisterProfiling();
463 
asInterpreter()464   InterpreterActivation* asInterpreter() const {
465     MOZ_ASSERT(isInterpreter());
466     return (InterpreterActivation*)this;
467   }
asJit()468   jit::JitActivation* asJit() const {
469     MOZ_ASSERT(isJit());
470     return (jit::JitActivation*)this;
471   }
472 
hideScriptedCaller()473   void hideScriptedCaller() { hideScriptedCallerCount_++; }
unhideScriptedCaller()474   void unhideScriptedCaller() {
475     MOZ_ASSERT(hideScriptedCallerCount_ > 0);
476     hideScriptedCallerCount_--;
477   }
scriptedCallerIsHidden()478   bool scriptedCallerIsHidden() const { return hideScriptedCallerCount_ > 0; }
479 
asyncStack()480   SavedFrame* asyncStack() { return asyncStack_; }
481 
asyncCause()482   const char* asyncCause() const { return asyncCause_; }
483 
asyncCallIsExplicit()484   bool asyncCallIsExplicit() const { return asyncCallIsExplicit_; }
485 
486   inline LiveSavedFrameCache* getLiveSavedFrameCache(JSContext* cx);
clearLiveSavedFrameCache()487   void clearLiveSavedFrameCache() { frameCache_.get().clear(); }
488 
489  private:
490   Activation(const Activation& other) = delete;
491   void operator=(const Activation& other) = delete;
492 };
493 
494 // This variable holds a special opcode value which is greater than all normal
495 // opcodes, and is chosen such that the bitwise or of this value with any
496 // opcode is this value.
497 constexpr jsbytecode EnableInterruptsPseudoOpcode = -1;
498 
499 static_assert(EnableInterruptsPseudoOpcode >= JSOP_LIMIT,
500               "EnableInterruptsPseudoOpcode must be greater than any opcode");
501 static_assert(
502     EnableInterruptsPseudoOpcode == jsbytecode(-1),
503     "EnableInterruptsPseudoOpcode must be the maximum jsbytecode value");
504 
505 class InterpreterFrameIterator;
506 class RunState;
507 
508 class InterpreterActivation : public Activation {
509   friend class js::InterpreterFrameIterator;
510 
511   InterpreterRegs regs_;
512   InterpreterFrame* entryFrame_;
513   size_t opMask_;  // For debugger interrupts, see js::Interpret.
514 
515 #ifdef DEBUG
516   size_t oldFrameCount_;
517 #endif
518 
519  public:
520   inline InterpreterActivation(RunState& state, JSContext* cx,
521                                InterpreterFrame* entryFrame);
522   inline ~InterpreterActivation();
523 
524   inline bool pushInlineFrame(const JS::CallArgs& args,
525                               JS::Handle<JSScript*> script,
526                               MaybeConstruct constructing);
527   inline void popInlineFrame(InterpreterFrame* frame);
528 
529   inline bool resumeGeneratorFrame(JS::Handle<JSFunction*> callee,
530                                    JS::Handle<JSObject*> envChain);
531 
current()532   InterpreterFrame* current() const { return regs_.fp(); }
regs()533   InterpreterRegs& regs() { return regs_; }
entryFrame()534   InterpreterFrame* entryFrame() const { return entryFrame_; }
opMask()535   size_t opMask() const { return opMask_; }
536 
isProfiling()537   bool isProfiling() const { return false; }
538 
539   // If this js::Interpret frame is running |script|, enable interrupts.
enableInterruptsIfRunning(JSScript * script)540   void enableInterruptsIfRunning(JSScript* script) {
541     if (regs_.fp()->script() == script) {
542       enableInterruptsUnconditionally();
543     }
544   }
enableInterruptsUnconditionally()545   void enableInterruptsUnconditionally() {
546     opMask_ = EnableInterruptsPseudoOpcode;
547   }
clearInterruptsMask()548   void clearInterruptsMask() { opMask_ = 0; }
549 };
550 
551 // Iterates over a thread's activation list.
552 class ActivationIterator {
553  protected:
554   Activation* activation_;
555 
556  public:
557   explicit ActivationIterator(JSContext* cx);
558 
559   ActivationIterator& operator++();
560 
561   Activation* operator->() const { return activation_; }
activation()562   Activation* activation() const { return activation_; }
done()563   bool done() const { return activation_ == nullptr; }
564 };
565 
566 }  // namespace js
567 
568 #endif  // vm_Activation_h
569