1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  *
4  * Copyright 2014 Mozilla Foundation
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 #include "wasm/WasmFrameIter.h"
20 
21 #include "wasm/WasmInstance.h"
22 #include "wasm/WasmStubs.h"
23 
24 #include "jit/MacroAssembler-inl.h"
25 
26 using namespace js;
27 using namespace js::jit;
28 using namespace js::wasm;
29 
30 using mozilla::DebugOnly;
31 using mozilla::Maybe;
32 
33 /*****************************************************************************/
34 // WasmFrameIter implementation
35 
WasmFrameIter(JitActivation * activation,wasm::Frame * fp)36 WasmFrameIter::WasmFrameIter(JitActivation* activation, wasm::Frame* fp)
37     : activation_(activation),
38       code_(nullptr),
39       codeRange_(nullptr),
40       lineOrBytecode_(0),
41       fp_(fp ? fp : activation->wasmExitFP()),
42       unwoundIonCallerFP_(nullptr),
43       unwind_(Unwind::False) {
44   MOZ_ASSERT(fp_);
45 
46   // When the stack is captured during a trap (viz., to create the .stack
47   // for an Error object), use the pc/bytecode information captured by the
48   // signal handler in the runtime.
49 
50   if (activation->isWasmTrapping()) {
51     code_ = &fp_->tls->instance->code();
52     MOZ_ASSERT(code_ == LookupCode(activation->wasmTrapPC()));
53 
54     codeRange_ = code_->lookupFuncRange(activation->wasmTrapPC());
55     MOZ_ASSERT(codeRange_);
56 
57     lineOrBytecode_ = activation->wasmTrapBytecodeOffset();
58 
59     MOZ_ASSERT(!done());
60     return;
61   }
62 
63   // When asynchronously interrupted, exitFP is set to the interrupted frame
64   // itself and so we do not want to skip it. Instead, we can recover the
65   // Code and CodeRange from the JitActivation, which are set when control
66   // flow was interrupted. There is no CallSite (b/c the interrupt was
67   // async), but this is fine because CallSite is only used for line number
68   // for which we can use the beginning of the function from the CodeRange
69   // instead.
70 
71   if (activation->isWasmInterrupted()) {
72     code_ = &fp_->tls->instance->code();
73     MOZ_ASSERT(code_ == LookupCode(activation->wasmInterruptUnwindPC()));
74 
75     codeRange_ = code_->lookupFuncRange(activation->wasmInterruptUnwindPC());
76     MOZ_ASSERT(codeRange_);
77 
78     lineOrBytecode_ = codeRange_->funcLineOrBytecode();
79 
80     MOZ_ASSERT(!done());
81     return;
82   }
83 
84   // Otherwise, execution exits wasm code via an exit stub which sets exitFP
85   // to the exit stub's frame. Thus, in this case, we want to start iteration
86   // at the caller of the exit frame, whose Code, CodeRange and CallSite are
87   // indicated by the returnAddress of the exit stub's frame. If the caller
88   // was Ion, we can just skip the wasm frames.
89 
90   popFrame();
91   MOZ_ASSERT(!done() || unwoundIonCallerFP_);
92 }
93 
done() const94 bool WasmFrameIter::done() const {
95   MOZ_ASSERT(!!fp_ == !!code_);
96   MOZ_ASSERT(!!fp_ == !!codeRange_);
97   return !fp_;
98 }
99 
operator ++()100 void WasmFrameIter::operator++() {
101   MOZ_ASSERT(!done());
102 
103   // When the iterator is set to unwind, each time the iterator pops a frame,
104   // the JitActivation is updated so that the just-popped frame is no longer
105   // visible. This is necessary since Debugger::onLeaveFrame is called before
106   // popping each frame and, once onLeaveFrame is called for a given frame,
107   // that frame must not be visible to subsequent stack iteration (or it
108   // could be added as a "new" frame just as it becomes garbage).  When the
109   // frame is "interrupted", then exitFP is included in the callstack
110   // (otherwise, it is skipped, as explained above). So to unwind the
111   // innermost frame, we just clear the interrupt state.
112 
113   if (unwind_ == Unwind::True) {
114     if (activation_->isWasmInterrupted())
115       activation_->finishWasmInterrupt();
116     else if (activation_->isWasmTrapping())
117       activation_->finishWasmTrap();
118     activation_->setWasmExitFP(fp_);
119   }
120 
121   popFrame();
122 }
123 
popFrame()124 void WasmFrameIter::popFrame() {
125   Frame* prevFP = fp_;
126   fp_ = prevFP->callerFP;
127   MOZ_ASSERT(!(uintptr_t(fp_) & JitActivation::ExitFpWasmBit));
128 
129   if (!fp_) {
130     code_ = nullptr;
131     codeRange_ = nullptr;
132 
133     if (unwind_ == Unwind::True) {
134       // We're exiting via the interpreter entry; we can safely reset
135       // exitFP.
136       activation_->setWasmExitFP(nullptr);
137       unwoundAddressOfReturnAddress_ = &prevFP->returnAddress;
138     }
139 
140     MOZ_ASSERT(done());
141     return;
142   }
143 
144   void* returnAddress = prevFP->returnAddress;
145 
146   code_ = LookupCode(returnAddress, &codeRange_);
147   MOZ_ASSERT(codeRange_);
148 
149   if (codeRange_->isJitEntry()) {
150     unwoundIonCallerFP_ = (uint8_t*)fp_;
151 
152     fp_ = nullptr;
153     code_ = nullptr;
154     codeRange_ = nullptr;
155 
156     if (unwind_ == Unwind::True) {
157       activation_->setJSExitFP(unwoundIonCallerFP_);
158       unwoundAddressOfReturnAddress_ = &prevFP->returnAddress;
159     }
160 
161     MOZ_ASSERT(done());
162     return;
163   }
164 
165   MOZ_ASSERT(code_ == &fp_->tls->instance->code());
166   MOZ_ASSERT(codeRange_->kind() == CodeRange::Function);
167 
168   const CallSite* callsite = code_->lookupCallSite(returnAddress);
169   MOZ_ASSERT(callsite);
170 
171   lineOrBytecode_ = callsite->lineOrBytecode();
172 
173   MOZ_ASSERT(!done());
174 }
175 
filename() const176 const char* WasmFrameIter::filename() const {
177   MOZ_ASSERT(!done());
178   return code_->metadata().filename.get();
179 }
180 
displayURL() const181 const char16_t* WasmFrameIter::displayURL() const {
182   MOZ_ASSERT(!done());
183   return code_->metadata().displayURL();
184 }
185 
mutedErrors() const186 bool WasmFrameIter::mutedErrors() const {
187   MOZ_ASSERT(!done());
188   return code_->metadata().mutedErrors();
189 }
190 
functionDisplayAtom() const191 JSAtom* WasmFrameIter::functionDisplayAtom() const {
192   MOZ_ASSERT(!done());
193 
194   JSContext* cx = activation_->cx();
195   JSAtom* atom = instance()->getFuncAtom(cx, codeRange_->funcIndex());
196   if (!atom) {
197     cx->clearPendingException();
198     return cx->names().empty;
199   }
200 
201   return atom;
202 }
203 
lineOrBytecode() const204 unsigned WasmFrameIter::lineOrBytecode() const {
205   MOZ_ASSERT(!done());
206   return lineOrBytecode_;
207 }
208 
instance() const209 Instance* WasmFrameIter::instance() const {
210   MOZ_ASSERT(!done());
211   return fp_->tls->instance;
212 }
213 
unwoundAddressOfReturnAddress() const214 void** WasmFrameIter::unwoundAddressOfReturnAddress() const {
215   MOZ_ASSERT(done());
216   MOZ_ASSERT(unwind_ == Unwind::True);
217   MOZ_ASSERT(unwoundAddressOfReturnAddress_);
218   return unwoundAddressOfReturnAddress_;
219 }
220 
debugEnabled() const221 bool WasmFrameIter::debugEnabled() const {
222   MOZ_ASSERT(!done());
223 
224   // Only non-imported functions can have debug frames.
225   //
226   // Metadata::debugEnabled is only set if debugging is actually enabled (both
227   // requested, and available via baseline compilation), and Tier::Debug code
228   // will be available.
229   return code_->metadata().debugEnabled &&
230          codeRange_->funcIndex() >=
231              code_->metadata(Tier::Debug).funcImports.length();
232 }
233 
debugFrame() const234 DebugFrame* WasmFrameIter::debugFrame() const {
235   MOZ_ASSERT(!done());
236   return DebugFrame::from(fp_);
237 }
238 
239 /*****************************************************************************/
240 // Prologue/epilogue code generation
241 
242 // These constants reflect statically-determined offsets in the
243 // prologue/epilogue. The offsets are dynamically asserted during code
244 // generation.
245 #if defined(JS_CODEGEN_X64)
246 static const unsigned PushedRetAddr = 0;
247 static const unsigned PushedTLS = 2;
248 static const unsigned PushedFP = 3;
249 static const unsigned SetFP = 6;
250 static const unsigned PoppedFP = 2;
251 static const unsigned PoppedTLSReg = 0;
252 #elif defined(JS_CODEGEN_X86)
253 static const unsigned PushedRetAddr = 0;
254 static const unsigned PushedTLS = 1;
255 static const unsigned PushedFP = 2;
256 static const unsigned SetFP = 4;
257 static const unsigned PoppedFP = 1;
258 static const unsigned PoppedTLSReg = 0;
259 #elif defined(JS_CODEGEN_ARM)
260 static const unsigned BeforePushRetAddr = 0;
261 static const unsigned PushedRetAddr = 4;
262 static const unsigned PushedTLS = 8;
263 static const unsigned PushedFP = 12;
264 static const unsigned SetFP = 16;
265 static const unsigned PoppedFP = 4;
266 static const unsigned PoppedTLSReg = 0;
267 #elif defined(JS_CODEGEN_ARM64)
268 static const unsigned BeforePushRetAddr = 0;
269 static const unsigned PushedRetAddr = 0;
270 static const unsigned PushedTLS = 1;
271 static const unsigned PushedFP = 1;
272 static const unsigned SetFP = 0;
273 static const unsigned PoppedFP = 0;
274 static const unsigned PoppedTLSReg = 0;
275 #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
276 static const unsigned PushedRetAddr = 8;
277 static const unsigned PushedTLS = 12;
278 static const unsigned PushedFP = 16;
279 static const unsigned SetFP = 20;
280 static const unsigned PoppedFP = 8;
281 static const unsigned PoppedTLSReg = 4;
282 #elif defined(JS_CODEGEN_NONE)
283 // Synthetic values to satisfy asserts and avoid compiler warnings.
284 static const unsigned PushedRetAddr = 0;
285 static const unsigned PushedTLS = 1;
286 static const unsigned PushedFP = 2;
287 static const unsigned SetFP = 3;
288 static const unsigned PoppedFP = 4;
289 static const unsigned PoppedTLSReg = 5;
290 #else
291 #error "Unknown architecture!"
292 #endif
293 static constexpr unsigned SetJitEntryFP = PushedRetAddr + SetFP - PushedFP;
294 
LoadActivation(MacroAssembler & masm,const Register & dest)295 static void LoadActivation(MacroAssembler& masm, const Register& dest) {
296   // WasmCall pushes a JitActivation.
297   masm.loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, cx)), dest);
298   masm.loadPtr(Address(dest, JSContext::offsetOfActivation()), dest);
299 }
300 
SetExitFP(MacroAssembler & masm,ExitReason reason,Register scratch)301 void wasm::SetExitFP(MacroAssembler& masm, ExitReason reason,
302                      Register scratch) {
303   MOZ_ASSERT(!reason.isNone());
304 
305   LoadActivation(masm, scratch);
306 
307   masm.store32(
308       Imm32(reason.encode()),
309       Address(scratch, JitActivation::offsetOfEncodedWasmExitReason()));
310 
311   masm.orPtr(Imm32(JitActivation::ExitFpWasmBit), FramePointer);
312   masm.storePtr(FramePointer,
313                 Address(scratch, JitActivation::offsetOfPackedExitFP()));
314   masm.andPtr(Imm32(int32_t(~JitActivation::ExitFpWasmBit)), FramePointer);
315 }
316 
ClearExitFP(MacroAssembler & masm,Register scratch)317 void wasm::ClearExitFP(MacroAssembler& masm, Register scratch) {
318   LoadActivation(masm, scratch);
319   masm.storePtr(ImmWord(0x0),
320                 Address(scratch, JitActivation::offsetOfPackedExitFP()));
321   masm.store32(
322       Imm32(0x0),
323       Address(scratch, JitActivation::offsetOfEncodedWasmExitReason()));
324 }
325 
GenerateCallablePrologue(MacroAssembler & masm,uint32_t * entry)326 static void GenerateCallablePrologue(MacroAssembler& masm, uint32_t* entry) {
327   masm.setFramePushed(0);
328 
329   // ProfilingFrameIterator needs to know the offsets of several key
330   // instructions from entry. To save space, we make these offsets static
331   // constants and assert that they match the actual codegen below. On ARM,
332   // this requires AutoForbidPools to prevent a constant pool from being
333   // randomly inserted between two instructions.
334 #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
335   *entry = masm.currentOffset();
336 
337   masm.subFromStackPtr(Imm32(sizeof(Frame)));
338   masm.storePtr(ra, Address(StackPointer, offsetof(Frame, returnAddress)));
339   MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - *entry);
340   masm.storePtr(WasmTlsReg, Address(StackPointer, offsetof(Frame, tls)));
341   MOZ_ASSERT_IF(!masm.oom(), PushedTLS == masm.currentOffset() - *entry);
342   masm.storePtr(FramePointer, Address(StackPointer, offsetof(Frame, callerFP)));
343   MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - *entry);
344   masm.moveStackPtrTo(FramePointer);
345   MOZ_ASSERT_IF(!masm.oom(), SetFP == masm.currentOffset() - *entry);
346 #else
347   {
348 #if defined(JS_CODEGEN_ARM)
349     AutoForbidPools afp(&masm, /* number of instructions in scope = */ 7);
350 
351     *entry = masm.currentOffset();
352 
353     MOZ_ASSERT(BeforePushRetAddr == 0);
354     masm.push(lr);
355 #else
356     *entry = masm.currentOffset();
357     // The x86/x64 call instruction pushes the return address.
358 #endif
359 
360     MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - *entry);
361     masm.push(WasmTlsReg);
362     MOZ_ASSERT_IF(!masm.oom(), PushedTLS == masm.currentOffset() - *entry);
363     masm.push(FramePointer);
364     MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - *entry);
365     masm.moveStackPtrTo(FramePointer);
366     MOZ_ASSERT_IF(!masm.oom(), SetFP == masm.currentOffset() - *entry);
367   }
368 #endif
369 }
370 
GenerateCallableEpilogue(MacroAssembler & masm,unsigned framePushed,ExitReason reason,uint32_t * ret)371 static void GenerateCallableEpilogue(MacroAssembler& masm, unsigned framePushed,
372                                      ExitReason reason, uint32_t* ret) {
373   if (framePushed) masm.freeStack(framePushed);
374 
375   if (!reason.isNone()) ClearExitFP(masm, ABINonArgReturnVolatileReg);
376 
377   DebugOnly<uint32_t> poppedFP;
378   DebugOnly<uint32_t> poppedTlsReg;
379 
380 #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
381 
382   masm.loadPtr(Address(StackPointer, offsetof(Frame, callerFP)), FramePointer);
383   poppedFP = masm.currentOffset();
384   masm.loadPtr(Address(StackPointer, offsetof(Frame, tls)), WasmTlsReg);
385   poppedTlsReg = masm.currentOffset();
386   masm.loadPtr(Address(StackPointer, offsetof(Frame, returnAddress)), ra);
387 
388   *ret = masm.currentOffset();
389   masm.as_jr(ra);
390   masm.addToStackPtr(Imm32(sizeof(Frame)));
391 
392 #else
393   // Forbid pools for the same reason as described in GenerateCallablePrologue.
394 #if defined(JS_CODEGEN_ARM)
395   AutoForbidPools afp(&masm, /* number of instructions in scope = */ 7);
396 #endif
397 
398   // There is an important ordering constraint here: fp must be repointed to
399   // the caller's frame before any field of the frame currently pointed to by
400   // fp is popped: asynchronous signal handlers (which use stack space
401   // starting at sp) could otherwise clobber these fields while they are still
402   // accessible via fp (fp fields are read during frame iteration which is
403   // *also* done asynchronously).
404 
405   masm.pop(FramePointer);
406   poppedFP = masm.currentOffset();
407 
408   masm.pop(WasmTlsReg);
409   poppedTlsReg = masm.currentOffset();
410 
411   *ret = masm.currentOffset();
412   masm.ret();
413 
414 #endif
415 
416   MOZ_ASSERT_IF(!masm.oom(), PoppedFP == *ret - poppedFP);
417   MOZ_ASSERT_IF(!masm.oom(), PoppedTLSReg == *ret - poppedTlsReg);
418 }
419 
GenerateFunctionPrologue(MacroAssembler & masm,uint32_t framePushed,IsLeaf isLeaf,const SigIdDesc & sigId,BytecodeOffset trapOffset,FuncOffsets * offsets,const Maybe<uint32_t> & tier1FuncIndex)420 void wasm::GenerateFunctionPrologue(MacroAssembler& masm, uint32_t framePushed,
421                                     IsLeaf isLeaf, const SigIdDesc& sigId,
422                                     BytecodeOffset trapOffset,
423                                     FuncOffsets* offsets,
424                                     const Maybe<uint32_t>& tier1FuncIndex) {
425   // Flush pending pools so they do not get dumped between the 'begin' and
426   // 'normalEntry' offsets since the difference must be less than UINT8_MAX
427   // to be stored in CodeRange::funcBeginToNormalEntry_.
428   masm.flushBuffer();
429   masm.haltingAlign(CodeAlignment);
430 
431   // The table entry falls through into the normal entry after it has checked
432   // the signature.
433   Label normalEntry;
434 
435   // Generate table entry. The BytecodeOffset of the trap is fixed up to be
436   // the bytecode offset of the callsite by JitActivation::startWasmTrap.
437   offsets->begin = masm.currentOffset();
438   switch (sigId.kind()) {
439     case SigIdDesc::Kind::Global: {
440       Register scratch = WasmTableCallScratchReg;
441       masm.loadWasmGlobalPtr(sigId.globalDataOffset(), scratch);
442       masm.branchPtr(Assembler::Condition::Equal, WasmTableCallSigReg, scratch,
443                      &normalEntry);
444       masm.wasmTrap(Trap::IndirectCallBadSig, BytecodeOffset(0));
445       break;
446     }
447     case SigIdDesc::Kind::Immediate: {
448       masm.branch32(Assembler::Condition::Equal, WasmTableCallSigReg,
449                     Imm32(sigId.immediate()), &normalEntry);
450       masm.wasmTrap(Trap::IndirectCallBadSig, BytecodeOffset(0));
451       break;
452     }
453     case SigIdDesc::Kind::None:
454       break;
455   }
456 
457   // The table entry might have generated a small constant pool in case of
458   // immediate comparison.
459   masm.flushBuffer();
460 
461   // Generate normal entry:
462   masm.nopAlign(CodeAlignment);
463   masm.bind(&normalEntry);
464   GenerateCallablePrologue(masm, &offsets->normalEntry);
465 
466   // Tiering works as follows.  The Code owns a jumpTable, which has one
467   // pointer-sized element for each function up to the largest funcIndex in
468   // the module.  Each table element is an address into the Tier-1 or the
469   // Tier-2 function at that index; the elements are updated when Tier-2 code
470   // becomes available.  The Tier-1 function will unconditionally jump to this
471   // address.  The table elements are written racily but without tearing when
472   // Tier-2 compilation is finished.
473   //
474   // The address in the table is either to the instruction following the jump
475   // in Tier-1 code, or into the function prologue after the standard setup in
476   // Tier-2 code.  Effectively, Tier-1 code performs standard frame setup on
477   // behalf of whatever code it jumps to, and the target code allocates its
478   // own frame in whatever way it wants.
479   if (tier1FuncIndex) {
480     Register scratch = ABINonArgReg0;
481     masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, jumpTable)), scratch);
482     masm.jump(Address(scratch, *tier1FuncIndex * sizeof(uintptr_t)));
483   }
484 
485   offsets->tierEntry = masm.currentOffset();
486 
487   // The framePushed value is tier-variant and thus the stack increment must
488   // go after the tiering jump/entry.
489   if (framePushed > 0) {
490     // If the frame is large, don't bump sp until after the stack limit check so
491     // that the trap handler isn't called with a wild sp.
492     if (framePushed > MAX_UNCHECKED_LEAF_FRAME_SIZE) {
493       Label ok;
494       Register scratch = ABINonArgReg0;
495       masm.moveStackPtrTo(scratch);
496       masm.subPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, stackLimit)),
497                   scratch);
498       masm.branchPtr(Assembler::GreaterThan, scratch, Imm32(framePushed), &ok);
499       masm.wasmTrap(wasm::Trap::StackOverflow, trapOffset);
500       masm.bind(&ok);
501     }
502 
503     masm.reserveStack(framePushed);
504 
505     if (framePushed <= MAX_UNCHECKED_LEAF_FRAME_SIZE && !isLeaf) {
506       Label ok;
507       masm.branchStackPtrRhs(
508           Assembler::Below,
509           Address(WasmTlsReg, offsetof(wasm::TlsData, stackLimit)), &ok);
510       masm.wasmTrap(wasm::Trap::StackOverflow, trapOffset);
511       masm.bind(&ok);
512     }
513   }
514 
515   MOZ_ASSERT(masm.framePushed() == framePushed);
516 }
517 
GenerateFunctionEpilogue(MacroAssembler & masm,unsigned framePushed,FuncOffsets * offsets)518 void wasm::GenerateFunctionEpilogue(MacroAssembler& masm, unsigned framePushed,
519                                     FuncOffsets* offsets) {
520   // Inverse of GenerateFunctionPrologue:
521   MOZ_ASSERT(masm.framePushed() == framePushed);
522   GenerateCallableEpilogue(masm, framePushed, ExitReason::None(),
523                            &offsets->ret);
524   MOZ_ASSERT(masm.framePushed() == 0);
525 }
526 
GenerateExitPrologue(MacroAssembler & masm,unsigned framePushed,ExitReason reason,CallableOffsets * offsets)527 void wasm::GenerateExitPrologue(MacroAssembler& masm, unsigned framePushed,
528                                 ExitReason reason, CallableOffsets* offsets) {
529   masm.haltingAlign(CodeAlignment);
530 
531   GenerateCallablePrologue(masm, &offsets->begin);
532 
533   // This frame will be exiting compiled code to C++ so record the fp and
534   // reason in the JitActivation so the frame iterators can unwind.
535   SetExitFP(masm, reason, ABINonArgReturnVolatileReg);
536 
537   MOZ_ASSERT(masm.framePushed() == 0);
538   masm.reserveStack(framePushed);
539 }
540 
GenerateExitEpilogue(MacroAssembler & masm,unsigned framePushed,ExitReason reason,CallableOffsets * offsets)541 void wasm::GenerateExitEpilogue(MacroAssembler& masm, unsigned framePushed,
542                                 ExitReason reason, CallableOffsets* offsets) {
543   // Inverse of GenerateExitPrologue:
544   MOZ_ASSERT(masm.framePushed() == framePushed);
545   GenerateCallableEpilogue(masm, framePushed, reason, &offsets->ret);
546   MOZ_ASSERT(masm.framePushed() == 0);
547 }
548 
AssertNoWasmExitFPInJitExit(MacroAssembler & masm)549 static void AssertNoWasmExitFPInJitExit(MacroAssembler& masm) {
550 // As a general stack invariant, if Activation::packedExitFP is tagged as
551 // wasm, it must point to a valid wasm::Frame. The JIT exit stub calls into
552 // JIT code and thus does not really exit, thus, when entering/leaving the
553 // JIT exit stub from/to normal wasm code, packedExitFP is not tagged wasm.
554 #ifdef DEBUG
555   Register scratch = ABINonArgReturnReg0;
556   LoadActivation(masm, scratch);
557 
558   Label ok;
559   masm.branchTestPtr(Assembler::Zero,
560                      Address(scratch, JitActivation::offsetOfPackedExitFP()),
561                      Imm32(uintptr_t(JitActivation::ExitFpWasmBit)), &ok);
562   masm.breakpoint();
563   masm.bind(&ok);
564 #endif
565 }
566 
GenerateJitExitPrologue(MacroAssembler & masm,unsigned framePushed,CallableOffsets * offsets)567 void wasm::GenerateJitExitPrologue(MacroAssembler& masm, unsigned framePushed,
568                                    CallableOffsets* offsets) {
569   masm.haltingAlign(CodeAlignment);
570 
571   GenerateCallablePrologue(masm, &offsets->begin);
572   AssertNoWasmExitFPInJitExit(masm);
573 
574   MOZ_ASSERT(masm.framePushed() == 0);
575   masm.reserveStack(framePushed);
576 }
577 
GenerateJitExitEpilogue(MacroAssembler & masm,unsigned framePushed,CallableOffsets * offsets)578 void wasm::GenerateJitExitEpilogue(MacroAssembler& masm, unsigned framePushed,
579                                    CallableOffsets* offsets) {
580   // Inverse of GenerateJitExitPrologue:
581   MOZ_ASSERT(masm.framePushed() == framePushed);
582   AssertNoWasmExitFPInJitExit(masm);
583   GenerateCallableEpilogue(masm, framePushed, ExitReason::None(),
584                            &offsets->ret);
585   MOZ_ASSERT(masm.framePushed() == 0);
586 }
587 
GenerateJitEntryPrologue(MacroAssembler & masm,Offsets * offsets)588 void wasm::GenerateJitEntryPrologue(MacroAssembler& masm, Offsets* offsets) {
589   masm.haltingAlign(CodeAlignment);
590 
591   {
592 #if defined(JS_CODEGEN_ARM)
593     AutoForbidPools afp(&masm, /* number of instructions in scope = */ 2);
594     offsets->begin = masm.currentOffset();
595     MOZ_ASSERT(BeforePushRetAddr == 0);
596     masm.push(lr);
597 #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
598     offsets->begin = masm.currentOffset();
599     masm.push(ra);
600 #else
601     // The x86/x64 call instruction pushes the return address.
602     offsets->begin = masm.currentOffset();
603 #endif
604     MOZ_ASSERT_IF(!masm.oom(),
605                   PushedRetAddr == masm.currentOffset() - offsets->begin);
606 
607     // Save jit frame pointer, so unwinding from wasm to jit frames is trivial.
608     masm.moveStackPtrTo(FramePointer);
609     MOZ_ASSERT_IF(!masm.oom(),
610                   SetJitEntryFP == masm.currentOffset() - offsets->begin);
611   }
612 
613   masm.setFramePushed(0);
614 }
615 
616 /*****************************************************************************/
617 // ProfilingFrameIterator
618 
ProfilingFrameIterator()619 ProfilingFrameIterator::ProfilingFrameIterator()
620     : code_(nullptr),
621       codeRange_(nullptr),
622       callerFP_(nullptr),
623       callerPC_(nullptr),
624       stackAddress_(nullptr),
625       unwoundIonCallerFP_(nullptr),
626       exitReason_(ExitReason::Fixed::None) {
627   MOZ_ASSERT(done());
628 }
629 
ProfilingFrameIterator(const JitActivation & activation)630 ProfilingFrameIterator::ProfilingFrameIterator(const JitActivation& activation)
631     : code_(nullptr),
632       codeRange_(nullptr),
633       callerFP_(nullptr),
634       callerPC_(nullptr),
635       stackAddress_(nullptr),
636       unwoundIonCallerFP_(nullptr),
637       exitReason_(activation.wasmExitReason()) {
638   initFromExitFP(activation.wasmExitFP());
639 }
640 
ProfilingFrameIterator(const JitActivation & activation,const Frame * fp)641 ProfilingFrameIterator::ProfilingFrameIterator(const JitActivation& activation,
642                                                const Frame* fp)
643     : code_(nullptr),
644       codeRange_(nullptr),
645       callerFP_(nullptr),
646       callerPC_(nullptr),
647       stackAddress_(nullptr),
648       unwoundIonCallerFP_(nullptr),
649       exitReason_(ExitReason::Fixed::ImportJit) {
650   MOZ_ASSERT(fp);
651   initFromExitFP(fp);
652 }
653 
AssertMatchesCallSite(void * callerPC,Frame * callerFP)654 static inline void AssertMatchesCallSite(void* callerPC, Frame* callerFP) {
655 #ifdef DEBUG
656   const CodeRange* callerCodeRange;
657   const Code* code = LookupCode(callerPC, &callerCodeRange);
658 
659   MOZ_ASSERT(code);
660   MOZ_ASSERT(callerCodeRange);
661 
662   if (callerCodeRange->isInterpEntry()) {
663     MOZ_ASSERT(callerFP == nullptr);
664     return;
665   }
666 
667   if (callerCodeRange->isJitEntry()) {
668     MOZ_ASSERT(callerFP != nullptr);
669     return;
670   }
671 
672   const CallSite* callsite = code->lookupCallSite(callerPC);
673   MOZ_ASSERT(callsite);
674 #endif
675 }
676 
initFromExitFP(const Frame * fp)677 void ProfilingFrameIterator::initFromExitFP(const Frame* fp) {
678   MOZ_ASSERT(fp);
679   stackAddress_ = (void*)fp;
680 
681   void* pc = fp->returnAddress;
682 
683   code_ = LookupCode(pc, &codeRange_);
684   MOZ_ASSERT(code_);
685   MOZ_ASSERT(codeRange_);
686 
687   // Since we don't have the pc for fp, start unwinding at the caller of fp.
688   // This means that the innermost frame is skipped. This is fine because:
689   //  - for import exit calls, the innermost frame is a thunk, so the first
690   //    frame that shows up is the function calling the import;
691   //  - for Math and other builtin calls as well as interrupts, we note the
692   //    absence of an exit reason and inject a fake "builtin" frame; and
693   //  - for async interrupts, we just accept that we'll lose the innermost
694   //    frame.
695   switch (codeRange_->kind()) {
696     case CodeRange::InterpEntry:
697       callerPC_ = nullptr;
698       callerFP_ = nullptr;
699       codeRange_ = nullptr;
700       exitReason_ = ExitReason(ExitReason::Fixed::FakeInterpEntry);
701       break;
702     case CodeRange::JitEntry:
703       callerPC_ = nullptr;
704       callerFP_ = nullptr;
705       unwoundIonCallerFP_ = (uint8_t*)fp->callerFP;
706       break;
707     case CodeRange::Function:
708       fp = fp->callerFP;
709       callerPC_ = fp->returnAddress;
710       callerFP_ = fp->callerFP;
711       AssertMatchesCallSite(callerPC_, callerFP_);
712       break;
713     case CodeRange::ImportJitExit:
714     case CodeRange::ImportInterpExit:
715     case CodeRange::BuiltinThunk:
716     case CodeRange::TrapExit:
717     case CodeRange::OldTrapExit:
718     case CodeRange::DebugTrap:
719     case CodeRange::OutOfBoundsExit:
720     case CodeRange::UnalignedExit:
721     case CodeRange::Throw:
722     case CodeRange::Interrupt:
723     case CodeRange::FarJumpIsland:
724       MOZ_CRASH("Unexpected CodeRange kind");
725   }
726 
727   MOZ_ASSERT(!done());
728 }
729 
StartUnwinding(const RegisterState & registers,UnwindState * unwindState,bool * unwoundCaller)730 bool js::wasm::StartUnwinding(const RegisterState& registers,
731                               UnwindState* unwindState, bool* unwoundCaller) {
732   // Shorthands.
733   uint8_t* const pc = (uint8_t*)registers.pc;
734   void** const sp = (void**)registers.sp;
735 
736   // The frame pointer might be in the process of tagging/untagging; make
737   // sure it's untagged.
738   Frame* const fp =
739       (Frame*)(intptr_t(registers.fp) & ~JitActivation::ExitFpWasmBit);
740 
741   // Get the CodeRange describing pc and the base address to which the
742   // CodeRange is relative. If the pc is not in a wasm module or a builtin
743   // thunk, then execution must be entering from or leaving to the C++ caller
744   // that pushed the JitActivation.
745   const CodeRange* codeRange;
746   uint8_t* codeBase;
747   const Code* code = nullptr;
748 
749   const CodeSegment* codeSegment = LookupCodeSegment(pc, &codeRange);
750   if (codeSegment) {
751     code = &codeSegment->code();
752     codeBase = codeSegment->base();
753     MOZ_ASSERT(codeRange);
754   } else if (!LookupBuiltinThunk(pc, &codeRange, &codeBase)) {
755     return false;
756   }
757 
758   // When the pc is inside the prologue/epilogue, the innermost call's Frame
759   // is not complete and thus fp points to the second-to-innermost call's
760   // Frame. Since fp can only tell you about its caller, naively unwinding
761   // while pc is in the prologue/epilogue would skip the second-to-innermost
762   // call. To avoid this problem, we use the static structure of the code in
763   // the prologue and epilogue to do the Right Thing.
764   uint32_t offsetInCode = pc - codeBase;
765   MOZ_ASSERT(offsetInCode >= codeRange->begin());
766   MOZ_ASSERT(offsetInCode < codeRange->end());
767 
768   // Compute the offset of the pc from the (normal) entry of the code range.
769   // The stack state of the pc for the entire table-entry is equivalent to
770   // that of the first pc of the normal-entry. Thus, we can simplify the below
771   // case analysis by redirecting all pc-in-table-entry cases to the
772   // pc-at-normal-entry case.
773   uint32_t offsetFromEntry;
774   if (codeRange->isFunction()) {
775     if (offsetInCode < codeRange->funcNormalEntry())
776       offsetFromEntry = 0;
777     else
778       offsetFromEntry = offsetInCode - codeRange->funcNormalEntry();
779   } else {
780     offsetFromEntry = offsetInCode - codeRange->begin();
781   }
782 
783   // Most cases end up unwinding to the caller state; not unwinding is the
784   // exception here.
785   *unwoundCaller = true;
786 
787   Frame* fixedFP = nullptr;
788   void* fixedPC = nullptr;
789   switch (codeRange->kind()) {
790     case CodeRange::Function:
791     case CodeRange::FarJumpIsland:
792     case CodeRange::ImportJitExit:
793     case CodeRange::ImportInterpExit:
794     case CodeRange::BuiltinThunk:
795     case CodeRange::OldTrapExit:
796     case CodeRange::DebugTrap:
797 #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
798       if (offsetFromEntry < PushedFP || codeRange->isThunk()) {
799         // On MIPS we relay on register state instead of state saved on
800         // stack until the wasm::Frame is completely built.
801         // On entry the return address is in ra (registers.lr) and
802         // fp holds the caller's fp.
803         fixedPC = (uint8_t*)registers.lr;
804         fixedFP = fp;
805         AssertMatchesCallSite(fixedPC, fixedFP);
806       } else
807 #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
808       if (offsetFromEntry == BeforePushRetAddr || codeRange->isThunk()) {
809         // The return address is still in lr and fp holds the caller's fp.
810         fixedPC = (uint8_t*)registers.lr;
811         fixedFP = fp;
812         AssertMatchesCallSite(fixedPC, fixedFP);
813       } else
814 #endif
815           if (offsetFromEntry == PushedRetAddr || codeRange->isThunk()) {
816         // The return address has been pushed on the stack but fp still
817         // points to the caller's fp.
818         fixedPC = sp[0];
819         fixedFP = fp;
820         AssertMatchesCallSite(fixedPC, fixedFP);
821       } else if (offsetFromEntry >= PushedTLS && offsetFromEntry < PushedFP) {
822         // The return address and caller's TLS have been pushed on the
823         // stack; fp is still the caller's fp.
824         fixedPC = sp[1];
825         fixedFP = fp;
826         AssertMatchesCallSite(fixedPC, fixedFP);
827       } else if (offsetFromEntry == PushedFP) {
828         // The full Frame has been pushed; fp is still the caller's fp.
829         MOZ_ASSERT(fp == reinterpret_cast<Frame*>(sp)->callerFP);
830         fixedPC = reinterpret_cast<Frame*>(sp)->returnAddress;
831         fixedFP = fp;
832         AssertMatchesCallSite(fixedPC, fixedFP);
833 #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
834       } else if (offsetInCode >= codeRange->ret() - PoppedFP &&
835                  offsetInCode <= codeRange->ret()) {
836         (void)PoppedTLSReg;
837         // The fixedFP field of the Frame has been loaded into fp.
838         // The ra and TLS might also be loaded, but the Frame structure is
839         // still on stack, so we can acess the ra form there.
840         MOZ_ASSERT(fp == reinterpret_cast<Frame*>(sp)->callerFP);
841         fixedPC = reinterpret_cast<Frame*>(sp)->returnAddress;
842         fixedFP = fp;
843         AssertMatchesCallSite(fixedPC, fixedFP);
844 #else
845       } else if (offsetInCode >= codeRange->ret() - PoppedFP &&
846                  offsetInCode < codeRange->ret() - PoppedTLSReg) {
847         // The fixedFP field of the Frame has been popped into fp.
848         fixedPC = sp[1];
849         fixedFP = fp;
850         AssertMatchesCallSite(fixedPC, fixedFP);
851       } else if (offsetInCode == codeRange->ret()) {
852         // Both the TLS and fixedFP fields have been popped and fp now
853         // points to the caller's frame.
854         fixedPC = sp[0];
855         fixedFP = fp;
856         AssertMatchesCallSite(fixedPC, fixedFP);
857 #endif
858       } else {
859         if (codeRange->kind() == CodeRange::ImportJitExit) {
860           // The jit exit contains a range where the value of FP can't be
861           // trusted. Technically, we could recover fp from sp, but since
862           // the range is so short, for now just drop the stack.
863           if (offsetInCode >= codeRange->jitExitUntrustedFPStart() &&
864               offsetInCode < codeRange->jitExitUntrustedFPEnd()) {
865             return false;
866           }
867         }
868         // Not in the prologue/epilogue.
869         fixedPC = pc;
870         fixedFP = fp;
871         *unwoundCaller = false;
872         AssertMatchesCallSite(fp->returnAddress, fp->callerFP);
873         break;
874       }
875       break;
876     case CodeRange::TrapExit:
877     case CodeRange::OutOfBoundsExit:
878     case CodeRange::UnalignedExit:
879       // These code stubs execute after the prologue/epilogue have completed
880       // so pc/fp contains the right values here.
881       fixedPC = pc;
882       fixedFP = fp;
883       *unwoundCaller = false;
884       AssertMatchesCallSite(fp->returnAddress, fp->callerFP);
885       break;
886     case CodeRange::InterpEntry:
887       // The entry trampoline is the final frame in an wasm JitActivation. The
888       // entry trampoline also doesn't GeneratePrologue/Epilogue so we can't
889       // use the general unwinding logic above.
890       break;
891     case CodeRange::JitEntry:
892       // There's a jit frame above the current one; we don't care about pc
893       // since the Jit entry frame is a jit frame which can be considered as
894       // an exit frame.
895 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || \
896     defined(JS_CODEGEN_MIPS64)
897       if (offsetFromEntry < PushedRetAddr) {
898         // We haven't pushed the jit return address yet, thus the jit
899         // frame is incomplete. During profiling frame iteration, it means
900         // that the jit profiling frame iterator won't be able to unwind
901         // this frame; drop it.
902         return false;
903       }
904 #endif
905       fixedFP = offsetFromEntry < SetJitEntryFP ? (Frame*)sp : fp;
906       fixedPC = nullptr;
907 
908       // On the error return path, FP might be set to FailFP. Ignore these
909       // transient frames.
910       if (intptr_t(fixedFP) == (FailFP & ~JitActivation::ExitFpWasmBit))
911         return false;
912       break;
913     case CodeRange::Throw:
914       // The throw stub executes a small number of instructions before popping
915       // the entire activation. To simplify testing, we simply pretend throw
916       // stubs have already popped the entire stack.
917       return false;
918     case CodeRange::Interrupt:
919       // When the PC is in the async interrupt stub, the fp may be garbage and
920       // so we cannot blindly unwind it. Since the percent of time spent in
921       // the interrupt stub is extremely small, just ignore the stack.
922       return false;
923   }
924 
925   unwindState->code = code;
926   unwindState->codeRange = codeRange;
927   unwindState->fp = fixedFP;
928   unwindState->pc = fixedPC;
929   return true;
930 }
931 
ProfilingFrameIterator(const JitActivation & activation,const RegisterState & state)932 ProfilingFrameIterator::ProfilingFrameIterator(const JitActivation& activation,
933                                                const RegisterState& state)
934     : code_(nullptr),
935       codeRange_(nullptr),
936       callerFP_(nullptr),
937       callerPC_(nullptr),
938       stackAddress_(nullptr),
939       unwoundIonCallerFP_(nullptr),
940       exitReason_(ExitReason::Fixed::None) {
941   // Let wasmExitFP take precedence to StartUnwinding when it is set since
942   // during the body of an exit stub, the register state may not be valid
943   // causing StartUnwinding() to abandon unwinding this activation.
944   if (activation.hasWasmExitFP()) {
945     exitReason_ = activation.wasmExitReason();
946     initFromExitFP(activation.wasmExitFP());
947     return;
948   }
949 
950   bool unwoundCaller;
951   UnwindState unwindState;
952   if (!StartUnwinding(state, &unwindState, &unwoundCaller)) {
953     MOZ_ASSERT(done());
954     return;
955   }
956 
957   if (unwoundCaller) {
958     callerFP_ = unwindState.fp;
959     callerPC_ = unwindState.pc;
960   } else {
961     callerFP_ = unwindState.fp->callerFP;
962     callerPC_ = unwindState.fp->returnAddress;
963   }
964 
965   if (unwindState.codeRange->isJitEntry())
966     unwoundIonCallerFP_ = (uint8_t*)callerFP_;
967 
968   if (unwindState.codeRange->isInterpEntry()) {
969     unwindState.codeRange = nullptr;
970     exitReason_ = ExitReason(ExitReason::Fixed::FakeInterpEntry);
971   }
972 
973   code_ = unwindState.code;
974   codeRange_ = unwindState.codeRange;
975   stackAddress_ = state.sp;
976   MOZ_ASSERT(!done());
977 }
978 
operator ++()979 void ProfilingFrameIterator::operator++() {
980   if (!exitReason_.isNone()) {
981     DebugOnly<ExitReason> prevExitReason = exitReason_;
982     exitReason_ = ExitReason::None();
983     MOZ_ASSERT(!codeRange_ == prevExitReason.value.isInterpEntry());
984     MOZ_ASSERT(done() == prevExitReason.value.isInterpEntry());
985     return;
986   }
987 
988   if (unwoundIonCallerFP_) {
989     MOZ_ASSERT(codeRange_->isJitEntry());
990     callerPC_ = nullptr;
991     callerFP_ = nullptr;
992     codeRange_ = nullptr;
993     MOZ_ASSERT(done());
994     return;
995   }
996 
997   if (!callerPC_) {
998     MOZ_ASSERT(!callerFP_);
999     codeRange_ = nullptr;
1000     MOZ_ASSERT(done());
1001     return;
1002   }
1003 
1004   if (!callerFP_) {
1005     MOZ_ASSERT(LookupCode(callerPC_, &codeRange_) == code_);
1006     MOZ_ASSERT(codeRange_->kind() == CodeRange::InterpEntry);
1007     exitReason_ = ExitReason(ExitReason::Fixed::FakeInterpEntry);
1008     codeRange_ = nullptr;
1009     callerPC_ = nullptr;
1010     MOZ_ASSERT(!done());
1011     return;
1012   }
1013 
1014   code_ = LookupCode(callerPC_, &codeRange_);
1015   MOZ_ASSERT(codeRange_);
1016 
1017   if (codeRange_->isJitEntry()) {
1018     unwoundIonCallerFP_ = (uint8_t*)callerFP_;
1019     MOZ_ASSERT(!done());
1020     return;
1021   }
1022 
1023   MOZ_ASSERT(code_ == &callerFP_->tls->instance->code());
1024 
1025   switch (codeRange_->kind()) {
1026     case CodeRange::Function:
1027     case CodeRange::ImportJitExit:
1028     case CodeRange::ImportInterpExit:
1029     case CodeRange::BuiltinThunk:
1030     case CodeRange::TrapExit:
1031     case CodeRange::OldTrapExit:
1032     case CodeRange::DebugTrap:
1033     case CodeRange::OutOfBoundsExit:
1034     case CodeRange::UnalignedExit:
1035     case CodeRange::FarJumpIsland:
1036       stackAddress_ = callerFP_;
1037       callerPC_ = callerFP_->returnAddress;
1038       AssertMatchesCallSite(callerPC_, callerFP_->callerFP);
1039       callerFP_ = callerFP_->callerFP;
1040       break;
1041     case CodeRange::InterpEntry:
1042       MOZ_CRASH("should have had null caller fp");
1043     case CodeRange::JitEntry:
1044       MOZ_CRASH("should have been guarded above");
1045     case CodeRange::Interrupt:
1046     case CodeRange::Throw:
1047       MOZ_CRASH("code range doesn't have frame");
1048   }
1049 
1050   MOZ_ASSERT(!done());
1051 }
1052 
ThunkedNativeToDescription(SymbolicAddress func)1053 static const char* ThunkedNativeToDescription(SymbolicAddress func) {
1054   MOZ_ASSERT(NeedsBuiltinThunk(func));
1055   switch (func) {
1056     case SymbolicAddress::HandleExecutionInterrupt:
1057     case SymbolicAddress::HandleDebugTrap:
1058     case SymbolicAddress::HandleThrow:
1059     case SymbolicAddress::ReportTrap:
1060     case SymbolicAddress::OldReportTrap:
1061     case SymbolicAddress::ReportOutOfBounds:
1062     case SymbolicAddress::ReportUnalignedAccess:
1063     case SymbolicAddress::CallImport_Void:
1064     case SymbolicAddress::CallImport_I32:
1065     case SymbolicAddress::CallImport_I64:
1066     case SymbolicAddress::CallImport_F64:
1067     case SymbolicAddress::CoerceInPlace_ToInt32:
1068     case SymbolicAddress::CoerceInPlace_ToNumber:
1069       MOZ_ASSERT(!NeedsBuiltinThunk(func),
1070                  "not in sync with NeedsBuiltinThunk");
1071       break;
1072     case SymbolicAddress::ToInt32:
1073       return "call to asm.js native ToInt32 coercion (in wasm)";
1074     case SymbolicAddress::DivI64:
1075       return "call to native i64.div_s (in wasm)";
1076     case SymbolicAddress::UDivI64:
1077       return "call to native i64.div_u (in wasm)";
1078     case SymbolicAddress::ModI64:
1079       return "call to native i64.rem_s (in wasm)";
1080     case SymbolicAddress::UModI64:
1081       return "call to native i64.rem_u (in wasm)";
1082     case SymbolicAddress::TruncateDoubleToUint64:
1083       return "call to native i64.trunc_u/f64 (in wasm)";
1084     case SymbolicAddress::TruncateDoubleToInt64:
1085       return "call to native i64.trunc_s/f64 (in wasm)";
1086     case SymbolicAddress::SaturatingTruncateDoubleToUint64:
1087       return "call to native i64.trunc_u:sat/f64 (in wasm)";
1088     case SymbolicAddress::SaturatingTruncateDoubleToInt64:
1089       return "call to native i64.trunc_s:sat/f64 (in wasm)";
1090     case SymbolicAddress::Uint64ToDouble:
1091       return "call to native f64.convert_u/i64 (in wasm)";
1092     case SymbolicAddress::Uint64ToFloat32:
1093       return "call to native f32.convert_u/i64 (in wasm)";
1094     case SymbolicAddress::Int64ToDouble:
1095       return "call to native f64.convert_s/i64 (in wasm)";
1096     case SymbolicAddress::Int64ToFloat32:
1097       return "call to native f32.convert_s/i64 (in wasm)";
1098 #if defined(JS_CODEGEN_ARM)
1099     case SymbolicAddress::aeabi_idivmod:
1100       return "call to native i32.div_s (in wasm)";
1101     case SymbolicAddress::aeabi_uidivmod:
1102       return "call to native i32.div_u (in wasm)";
1103 #endif
1104     case SymbolicAddress::ModD:
1105       return "call to asm.js native f64 % (mod)";
1106     case SymbolicAddress::SinD:
1107       return "call to asm.js native f64 Math.sin";
1108     case SymbolicAddress::CosD:
1109       return "call to asm.js native f64 Math.cos";
1110     case SymbolicAddress::TanD:
1111       return "call to asm.js native f64 Math.tan";
1112     case SymbolicAddress::ASinD:
1113       return "call to asm.js native f64 Math.asin";
1114     case SymbolicAddress::ACosD:
1115       return "call to asm.js native f64 Math.acos";
1116     case SymbolicAddress::ATanD:
1117       return "call to asm.js native f64 Math.atan";
1118     case SymbolicAddress::CeilD:
1119       return "call to native f64.ceil (in wasm)";
1120     case SymbolicAddress::CeilF:
1121       return "call to native f32.ceil (in wasm)";
1122     case SymbolicAddress::FloorD:
1123       return "call to native f64.floor (in wasm)";
1124     case SymbolicAddress::FloorF:
1125       return "call to native f32.floor (in wasm)";
1126     case SymbolicAddress::TruncD:
1127       return "call to native f64.trunc (in wasm)";
1128     case SymbolicAddress::TruncF:
1129       return "call to native f32.trunc (in wasm)";
1130     case SymbolicAddress::NearbyIntD:
1131       return "call to native f64.nearest (in wasm)";
1132     case SymbolicAddress::NearbyIntF:
1133       return "call to native f32.nearest (in wasm)";
1134     case SymbolicAddress::ExpD:
1135       return "call to asm.js native f64 Math.exp";
1136     case SymbolicAddress::LogD:
1137       return "call to asm.js native f64 Math.log";
1138     case SymbolicAddress::PowD:
1139       return "call to asm.js native f64 Math.pow";
1140     case SymbolicAddress::ATan2D:
1141       return "call to asm.js native f64 Math.atan2";
1142     case SymbolicAddress::GrowMemory:
1143       return "call to native grow_memory (in wasm)";
1144     case SymbolicAddress::CurrentMemory:
1145       return "call to native current_memory (in wasm)";
1146     case SymbolicAddress::WaitI32:
1147       return "call to native i32.wait (in wasm)";
1148     case SymbolicAddress::WaitI64:
1149       return "call to native i64.wait (in wasm)";
1150     case SymbolicAddress::Wake:
1151       return "call to native wake (in wasm)";
1152     case SymbolicAddress::CoerceInPlace_JitEntry:
1153       return "out-of-line coercion for jit entry arguments (in wasm)";
1154     case SymbolicAddress::ReportInt64JSCall:
1155       return "jit call to int64 wasm function";
1156 #if defined(JS_CODEGEN_MIPS32)
1157     case SymbolicAddress::js_jit_gAtomic64Lock:
1158       MOZ_CRASH();
1159 #endif
1160     case SymbolicAddress::Limit:
1161       break;
1162   }
1163   return "?";
1164 }
1165 
label() const1166 const char* ProfilingFrameIterator::label() const {
1167   MOZ_ASSERT(!done());
1168 
1169   // Use the same string for both time inside and under so that the two
1170   // entries will be coalesced by the profiler.
1171   // Must be kept in sync with /tools/profiler/tests/test_asm.js
1172   static const char* importJitDescription = "fast exit trampoline (in wasm)";
1173   static const char* importInterpDescription = "slow exit trampoline (in wasm)";
1174   static const char* builtinNativeDescription =
1175       "fast exit trampoline to native (in wasm)";
1176   static const char* trapDescription = "trap handling (in wasm)";
1177   static const char* debugTrapDescription = "debug trap handling (in wasm)";
1178 
1179   if (!exitReason_.isFixed())
1180     return ThunkedNativeToDescription(exitReason_.symbolic());
1181 
1182   switch (exitReason_.fixed()) {
1183     case ExitReason::Fixed::None:
1184       break;
1185     case ExitReason::Fixed::ImportJit:
1186       return importJitDescription;
1187     case ExitReason::Fixed::ImportInterp:
1188       return importInterpDescription;
1189     case ExitReason::Fixed::BuiltinNative:
1190       return builtinNativeDescription;
1191     case ExitReason::Fixed::Trap:
1192       return trapDescription;
1193     case ExitReason::Fixed::DebugTrap:
1194       return debugTrapDescription;
1195     case ExitReason::Fixed::FakeInterpEntry:
1196       return "slow entry trampoline (in wasm)";
1197   }
1198 
1199   switch (codeRange_->kind()) {
1200     case CodeRange::Function:
1201       return code_->profilingLabel(codeRange_->funcIndex());
1202     case CodeRange::InterpEntry:
1203       MOZ_CRASH("should be an ExitReason");
1204     case CodeRange::JitEntry:
1205       return "fast entry trampoline (in wasm)";
1206     case CodeRange::ImportJitExit:
1207       return importJitDescription;
1208     case CodeRange::BuiltinThunk:
1209       return builtinNativeDescription;
1210     case CodeRange::ImportInterpExit:
1211       return importInterpDescription;
1212     case CodeRange::TrapExit:
1213       return trapDescription;
1214     case CodeRange::OldTrapExit:
1215       return trapDescription;
1216     case CodeRange::DebugTrap:
1217       return debugTrapDescription;
1218     case CodeRange::OutOfBoundsExit:
1219       return "out-of-bounds stub (in wasm)";
1220     case CodeRange::UnalignedExit:
1221       return "unaligned trap stub (in wasm)";
1222     case CodeRange::FarJumpIsland:
1223       return "interstitial (in wasm)";
1224     case CodeRange::Throw:
1225       MOZ_FALLTHROUGH;
1226     case CodeRange::Interrupt:
1227       MOZ_CRASH("does not have a frame");
1228   }
1229 
1230   MOZ_CRASH("bad code range kind");
1231 }
1232 
LookupFaultingInstance(const ModuleSegment & codeSegment,void * pc,void * fp)1233 Instance* wasm::LookupFaultingInstance(const ModuleSegment& codeSegment,
1234                                        void* pc, void* fp) {
1235   // Assume bug-caused faults can be raised at any PC and apply the logic of
1236   // ProfilingFrameIterator to reject any pc outside the (post-prologue,
1237   // pre-epilogue) body of a wasm function. This is exhaustively tested by the
1238   // simulators which call this function at every load/store before even
1239   // knowing whether there is a fault.
1240 
1241   const CodeRange* codeRange = codeSegment.code().lookupFuncRange(pc);
1242   if (!codeRange) return nullptr;
1243 
1244   size_t offsetInModule = ((uint8_t*)pc) - codeSegment.base();
1245   if ((offsetInModule >= codeRange->funcNormalEntry() &&
1246        offsetInModule < codeRange->funcNormalEntry() + SetFP) ||
1247       (offsetInModule >= codeRange->ret() - PoppedFP &&
1248        offsetInModule <= codeRange->ret())) {
1249     return nullptr;
1250   }
1251 
1252   Instance* instance = reinterpret_cast<Frame*>(fp)->tls->instance;
1253 
1254   // TODO: In the special case of a cross-instance indirect call bad-signature
1255   // fault, fp can point to the caller frame which is in a different
1256   // instance/module than pc. This special case should go away when old-style
1257   // traps go away and signal handling is reworked.
1258   // MOZ_RELEASE_ASSERT(&instance->code() == &codeSegment.code());
1259 
1260   return instance;
1261 }
1262 
InCompiledCode(void * pc)1263 bool wasm::InCompiledCode(void* pc) {
1264   if (LookupCodeSegment(pc)) return true;
1265 
1266   const CodeRange* codeRange;
1267   uint8_t* codeBase;
1268   return LookupBuiltinThunk(pc, &codeRange, &codeBase);
1269 }
1270