1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/Bailouts.h"
8 #include "jit/JitCompartment.h"
9 #include "jit/JitFrames.h"
10 #include "jit/Linker.h"
11 #ifdef JS_ION_PERF
12 #include "jit/PerfSpewer.h"
13 #endif
14 #include "jit/arm64/SharedICHelpers-arm64.h"
15 #include "jit/VMFunctions.h"
16
17 #include "jit/MacroAssembler-inl.h"
18 #include "jit/SharedICHelpers-inl.h"
19
20 using namespace js;
21 using namespace js::jit;
22
23 // All registers to save and restore. This includes the stack pointer, since we
24 // use the ability to reference register values on the stack by index.
25 static const LiveRegisterSet AllRegs = LiveRegisterSet(
26 GeneralRegisterSet(Registers::AllMask &
27 ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28)),
28 FloatRegisterSet(FloatRegisters::AllMask));
29
30 /* This method generates a trampoline on ARM64 for a c++ function with
31 * the following signature:
32 * bool blah(void* code, int argc, Value* argv,
33 * JSObject* scopeChain, Value* vp)
34 * ...using standard AArch64 calling convention
35 */
generateEnterJIT(JSContext * cx,MacroAssembler & masm)36 void JitRuntime::generateEnterJIT(JSContext* cx, MacroAssembler& masm) {
37 enterJITOffset_ = startTrampolineCode(masm);
38
39 const Register reg_code = IntArgReg0; // EnterJitData::jitcode.
40 const Register reg_argc = IntArgReg1; // EnterJitData::maxArgc.
41 const Register reg_argv = IntArgReg2; // EnterJitData::maxArgv.
42 const Register reg_osrFrame = IntArgReg3; // EnterJitData::osrFrame.
43 const Register reg_callee = IntArgReg4; // EnterJitData::calleeToken.
44 const Register reg_scope = IntArgReg5; // EnterJitData::scopeChain.
45 const Register reg_osrNStack =
46 IntArgReg6; // EnterJitData::osrNumStackValues.
47 const Register reg_vp = IntArgReg7; // Address of EnterJitData::result.
48
49 MOZ_ASSERT(OsrFrameReg == IntArgReg3);
50
51 // During the pushes below, use the normal stack pointer.
52 masm.SetStackPointer64(sp);
53
54 // Save old frame pointer and return address; set new frame pointer.
55 masm.push(r29, r30);
56 masm.moveStackPtrTo(r29);
57
58 // Save callee-save integer registers.
59 // Also save x7 (reg_vp) and x30 (lr), for use later.
60 masm.push(r19, r20, r21, r22);
61 masm.push(r23, r24, r25, r26);
62 masm.push(r27, r28, r7, r30);
63
64 // Save callee-save floating-point registers.
65 // AArch64 ABI specifies that only the lower 64 bits must be saved.
66 masm.push(d8, d9, d10, d11);
67 masm.push(d12, d13, d14, d15);
68
69 #ifdef DEBUG
70 // Emit stack canaries.
71 masm.movePtr(ImmWord(0xdeadd00d), r23);
72 masm.movePtr(ImmWord(0xdeadd11d), r24);
73 masm.push(r23, r24);
74 #endif
75
76 // Common code below attempts to push single registers at a time,
77 // which breaks the stack pointer's 16-byte alignment requirement.
78 // Note that movePtr() is invalid because StackPointer is treated as xzr.
79 //
80 // FIXME: After testing, this entire function should be rewritten to not
81 // use the PseudoStackPointer: since the amount of data pushed is
82 // precalculated, we can just allocate the whole frame header at once and
83 // index off sp. This will save a significant number of instructions where
84 // Push() updates sp.
85 masm.Mov(PseudoStackPointer64, sp);
86 masm.SetStackPointer64(PseudoStackPointer64);
87
88 // Save the stack pointer at this point for Baseline OSR.
89 masm.moveStackPtrTo(BaselineFrameReg);
90 // Remember stack depth without padding and arguments.
91 masm.moveStackPtrTo(r19);
92
93 // If constructing, include newTarget in argument vector.
94 {
95 Label noNewTarget;
96 Imm32 constructingToken(CalleeToken_FunctionConstructing);
97 masm.branchTest32(Assembler::Zero, reg_callee, constructingToken,
98 &noNewTarget);
99 masm.add32(Imm32(1), reg_argc);
100 masm.bind(&noNewTarget);
101 }
102
103 // JitFrameLayout is as follows (higher is higher in memory):
104 // N*8 - [ JS argument vector ] (base 16-byte aligned)
105 // 8 - numActualArgs
106 // 8 - calleeToken (16-byte aligned)
107 // 8 - frameDescriptor
108 // 8 - returnAddress (16-byte aligned, pushed by callee)
109
110 // Push the argument vector onto the stack.
111 // WARNING: destructively modifies reg_argv
112 {
113 vixl::UseScratchRegisterScope temps(&masm.asVIXL());
114
115 const ARMRegister tmp_argc = temps.AcquireX();
116 const ARMRegister tmp_sp = temps.AcquireX();
117
118 Label noArguments;
119 Label loopHead;
120
121 masm.movePtr(reg_argc, tmp_argc.asUnsized());
122
123 // sp -= 8
124 // Since we're using PostIndex Str below, this is necessary to avoid
125 // overwriting the Gecko Profiler mark pushed above.
126 masm.subFromStackPtr(Imm32(8));
127
128 // sp -= 8 * argc
129 masm.Sub(PseudoStackPointer64, PseudoStackPointer64,
130 Operand(tmp_argc, vixl::SXTX, 3));
131
132 // Give sp 16-byte alignment and sync stack pointers.
133 masm.andToStackPtr(Imm32(~0xff));
134 masm.moveStackPtrTo(tmp_sp.asUnsized());
135
136 masm.branchTestPtr(Assembler::Zero, reg_argc, reg_argc, &noArguments);
137
138 // Begin argument-pushing loop.
139 // This could be optimized using Ldp and Stp.
140 {
141 masm.bind(&loopHead);
142
143 // Load an argument from argv, then increment argv by 8.
144 masm.Ldr(x24, MemOperand(ARMRegister(reg_argv, 64), Operand(8),
145 vixl::PostIndex));
146
147 // Store the argument to tmp_sp, then increment tmp_sp by 8.
148 masm.Str(x24, MemOperand(tmp_sp, Operand(8), vixl::PostIndex));
149
150 // Set the condition codes for |cmp tmp_argc, 2| (using the old value).
151 masm.Subs(tmp_argc, tmp_argc, Operand(1));
152
153 // Branch if arguments remain.
154 masm.B(&loopHead, vixl::Condition::ge);
155 }
156
157 masm.bind(&noArguments);
158 }
159 masm.checkStackAlignment();
160
161 // Push the number of actual arguments and the calleeToken.
162 // The result address is used to store the actual number of arguments
163 // without adding an argument to EnterJIT.
164 masm.unboxInt32(Address(reg_vp, 0x0), ip0);
165 masm.push(ip0, reg_callee);
166 masm.checkStackAlignment();
167
168 // Calculate the number of bytes pushed so far.
169 masm.subStackPtrFrom(r19);
170
171 // Push the frameDescriptor.
172 masm.makeFrameDescriptor(r19, JitFrame_CppToJSJit, JitFrameLayout::Size());
173 masm.Push(r19);
174
175 Label osrReturnPoint;
176 {
177 // Check for Interpreter -> Baseline OSR.
178 Label notOsr;
179 masm.branchTestPtr(Assembler::Zero, OsrFrameReg, OsrFrameReg, ¬Osr);
180
181 // Push return address and previous frame pointer.
182 masm.Adr(ScratchReg2_64, &osrReturnPoint);
183 masm.push(ScratchReg2, BaselineFrameReg);
184
185 // Reserve frame.
186 masm.subFromStackPtr(Imm32(BaselineFrame::Size()));
187 masm.moveStackPtrTo(BaselineFrameReg);
188
189 // Reserve space for locals and stack values.
190 masm.Lsl(w19, ARMRegister(reg_osrNStack, 32),
191 3); // w19 = num_stack_values * sizeof(Value).
192 masm.subFromStackPtr(r19);
193
194 // Enter exit frame.
195 masm.addPtr(
196 Imm32(BaselineFrame::Size() + BaselineFrame::FramePointerOffset), r19);
197 masm.makeFrameDescriptor(r19, JitFrame_BaselineJS, ExitFrameLayout::Size());
198 masm.asVIXL().Push(x19, xzr); // Push xzr for a fake return address.
199 // No GC things to mark: push a bare token.
200 masm.loadJSContext(r19);
201 masm.enterFakeExitFrame(r19, r19, ExitFrameType::Bare);
202
203 masm.push(BaselineFrameReg, reg_code);
204
205 // Initialize the frame, including filling in the slots.
206 masm.setupUnalignedABICall(r19);
207 masm.passABIArg(BaselineFrameReg); // BaselineFrame.
208 masm.passABIArg(reg_osrFrame); // InterpreterFrame.
209 masm.passABIArg(reg_osrNStack);
210 masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, jit::InitBaselineFrameForOsr),
211 MoveOp::GENERAL,
212 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
213
214 masm.pop(r19, BaselineFrameReg);
215 MOZ_ASSERT(r19 != ReturnReg);
216
217 masm.addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
218 masm.addPtr(Imm32(BaselineFrame::Size()), BaselineFrameReg);
219
220 Label error;
221 masm.branchIfFalseBool(ReturnReg, &error);
222
223 masm.jump(r19);
224
225 // OOM: load error value, discard return address and previous frame
226 // pointer, and return.
227 masm.bind(&error);
228 masm.Add(masm.GetStackPointer64(), BaselineFrameReg64,
229 Operand(2 * sizeof(uintptr_t)));
230 masm.syncStackPtr();
231 masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
232 masm.B(&osrReturnPoint);
233
234 masm.bind(¬Osr);
235 masm.movePtr(reg_scope, R1_);
236 }
237
238 // Call function.
239 // Since AArch64 doesn't have the pc register available, the callee must push
240 // lr.
241 masm.callJitNoProfiler(reg_code);
242
243 // Interpreter -> Baseline OSR will return here.
244 masm.bind(&osrReturnPoint);
245
246 // Return back to SP.
247 masm.Pop(r19);
248 masm.Add(masm.GetStackPointer64(), masm.GetStackPointer64(),
249 Operand(x19, vixl::LSR, FRAMESIZE_SHIFT));
250 masm.syncStackPtr();
251 masm.SetStackPointer64(sp);
252
253 #ifdef DEBUG
254 // Check that canaries placed on function entry are still present.
255 masm.pop(r24, r23);
256 Label x23OK, x24OK;
257
258 masm.branchPtr(Assembler::Equal, r23, ImmWord(0xdeadd00d), &x23OK);
259 masm.breakpoint();
260 masm.bind(&x23OK);
261
262 masm.branchPtr(Assembler::Equal, r24, ImmWord(0xdeadd11d), &x24OK);
263 masm.breakpoint();
264 masm.bind(&x24OK);
265 #endif
266
267 // Restore callee-save floating-point registers.
268 masm.pop(d15, d14, d13, d12);
269 masm.pop(d11, d10, d9, d8);
270
271 // Restore callee-save integer registers.
272 // Also restore x7 (reg_vp) and x30 (lr).
273 masm.pop(r30, r7, r28, r27);
274 masm.pop(r26, r25, r24, r23);
275 masm.pop(r22, r21, r20, r19);
276
277 // Store return value (in JSReturnReg = x2 to just-popped reg_vp).
278 masm.storeValue(JSReturnOperand, Address(reg_vp, 0));
279
280 // Restore old frame pointer.
281 masm.pop(r30, r29);
282
283 // Return using the value popped into x30.
284 masm.abiret();
285
286 // Reset stack pointer.
287 masm.SetStackPointer64(PseudoStackPointer64);
288 }
289
generateInvalidator(MacroAssembler & masm,Label * bailoutTail)290 void JitRuntime::generateInvalidator(MacroAssembler& masm, Label* bailoutTail) {
291 invalidatorOffset_ = startTrampolineCode(masm);
292
293 masm.push(r0, r1, r2, r3);
294
295 masm.PushRegsInMask(AllRegs);
296 masm.moveStackPtrTo(r0);
297
298 masm.Sub(x1, masm.GetStackPointer64(), Operand(sizeof(size_t)));
299 masm.Sub(x2, masm.GetStackPointer64(),
300 Operand(sizeof(size_t) + sizeof(void*)));
301 masm.moveToStackPtr(r2);
302
303 masm.setupUnalignedABICall(r10);
304 masm.passABIArg(r0);
305 masm.passABIArg(r1);
306 masm.passABIArg(r2);
307
308 masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, InvalidationBailout),
309 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
310
311 masm.pop(r2, r1);
312
313 masm.Add(masm.GetStackPointer64(), masm.GetStackPointer64(), x1);
314 masm.Add(masm.GetStackPointer64(), masm.GetStackPointer64(),
315 Operand(sizeof(InvalidationBailoutStack)));
316 masm.syncStackPtr();
317
318 masm.jump(bailoutTail);
319 }
320
generateArgumentsRectifier(MacroAssembler & masm)321 void JitRuntime::generateArgumentsRectifier(MacroAssembler& masm) {
322 argumentsRectifierOffset_ = startTrampolineCode(masm);
323
324 // Save the return address for later.
325 masm.push(lr);
326
327 // Load the information that the rectifier needs from the stack.
328 masm.Ldr(w0, MemOperand(masm.GetStackPointer64(),
329 RectifierFrameLayout::offsetOfNumActualArgs()));
330 masm.Ldr(x1, MemOperand(masm.GetStackPointer64(),
331 RectifierFrameLayout::offsetOfCalleeToken()));
332
333 // Extract a JSFunction pointer from the callee token and keep the
334 // intermediary to avoid later recalculation.
335 masm.And(x5, x1, Operand(CalleeTokenMask));
336
337 // Get the arguments from the function object.
338 masm.Ldrh(x6, MemOperand(x5, JSFunction::offsetOfNargs()));
339
340 static_assert(CalleeToken_FunctionConstructing == 0x1,
341 "Constructing must be low-order bit");
342 masm.And(x4, x1, Operand(CalleeToken_FunctionConstructing));
343 masm.Add(x7, x6, x4);
344
345 // Copy the number of actual arguments into r8.
346 masm.mov(r0, r8);
347
348 // Calculate the position that our arguments are at before sp gets modified.
349 masm.Add(x3, masm.GetStackPointer64(), Operand(x8, vixl::LSL, 3));
350 masm.Add(x3, x3, Operand(sizeof(RectifierFrameLayout)));
351
352 // Pad to a multiple of 16 bytes. This neglects the |this| value,
353 // which will also be pushed, because the rest of the frame will
354 // round off that value. See pushes of |argc|, |callee| and |desc| below.
355 Label noPadding;
356 masm.Tbnz(x7, 0, &noPadding);
357 masm.asVIXL().Push(xzr);
358 masm.Add(x7, x7, Operand(1));
359 masm.bind(&noPadding);
360
361 {
362 Label notConstructing;
363 masm.Cbz(x4, ¬Constructing);
364
365 // new.target lives at the end of the pushed args
366 // NB: The arg vector holder starts at the beginning of the last arg,
367 // add a value to get to argv[argc]
368 masm.loadPtr(Address(r3, sizeof(Value)), r4);
369 masm.Push(r4);
370
371 masm.bind(¬Constructing);
372 }
373
374 // Calculate the number of undefineds that need to be pushed.
375 masm.Sub(w2, w6, w8);
376
377 // Put an undefined in a register so it can be pushed.
378 masm.moveValue(UndefinedValue(), ValueOperand(r4));
379
380 // Push undefined N times.
381 {
382 Label undefLoopTop;
383 masm.bind(&undefLoopTop);
384 masm.Push(r4);
385 masm.Subs(w2, w2, Operand(1));
386 masm.B(&undefLoopTop, Assembler::NonZero);
387 }
388
389 // Arguments copy loop. Copy for x8 >= 0 to include |this|.
390 {
391 Label copyLoopTop;
392 masm.bind(©LoopTop);
393 masm.Ldr(x4, MemOperand(x3, -sizeof(Value), vixl::PostIndex));
394 masm.Push(r4);
395 masm.Subs(x8, x8, Operand(1));
396 masm.B(©LoopTop, Assembler::NotSigned);
397 }
398
399 // Fix up the size of the stack frame. +1 accounts for |this|.
400 masm.Add(x6, x7, Operand(1));
401 masm.Lsl(x6, x6, 3);
402
403 // Make that into a frame descriptor.
404 masm.makeFrameDescriptor(r6, JitFrame_Rectifier, JitFrameLayout::Size());
405
406 masm.push(r0, // Number of actual arguments.
407 r1, // Callee token.
408 r6); // Frame descriptor.
409
410 // Load the address of the code that is getting called.
411 masm.loadJitCodeRaw(r5, r3);
412 argumentsRectifierReturnOffset_ = masm.callJitNoProfiler(r3);
413
414 // Clean up!
415 // Get the size of the stack frame, and clean up the later fixed frame.
416 masm.Ldr(x4, MemOperand(masm.GetStackPointer64(), 24, vixl::PostIndex));
417
418 // Now that the size of the stack frame sans the fixed frame has been loaded,
419 // add that onto the stack pointer.
420 masm.Add(masm.GetStackPointer64(), masm.GetStackPointer64(),
421 Operand(x4, vixl::LSR, FRAMESIZE_SHIFT));
422
423 // Pop the return address from earlier and branch.
424 masm.ret();
425 }
426
PushBailoutFrame(MacroAssembler & masm,uint32_t frameClass,Register spArg)427 static void PushBailoutFrame(MacroAssembler& masm, uint32_t frameClass,
428 Register spArg) {
429 // the stack should look like:
430 // [IonFrame]
431 // bailoutFrame.registersnapshot
432 // bailoutFrame.fpsnapshot
433 // bailoutFrame.snapshotOffset
434 // bailoutFrame.frameSize
435
436 // STEP 1a: Save our register sets to the stack so Bailout() can read
437 // everything.
438 // sp % 8 == 0
439
440 // We don't have to push everything, but this is likely easier.
441 // Setting regs_.
442 masm.subFromStackPtr(Imm32(Registers::TotalPhys * sizeof(void*)));
443 for (uint32_t i = 0; i < Registers::TotalPhys; i += 2) {
444 masm.Stp(ARMRegister::XRegFromCode(i), ARMRegister::XRegFromCode(i + 1),
445 MemOperand(masm.GetStackPointer64(), i * sizeof(void*)));
446 }
447
448 // Since our datastructures for stack inspection are compile-time fixed,
449 // if there are only 16 double registers, then we need to reserve
450 // space on the stack for the missing 16.
451 masm.subFromStackPtr(Imm32(FloatRegisters::TotalPhys * sizeof(double)));
452 for (uint32_t i = 0; i < FloatRegisters::TotalPhys; i += 2) {
453 masm.Stp(ARMFPRegister::DRegFromCode(i), ARMFPRegister::DRegFromCode(i + 1),
454 MemOperand(masm.GetStackPointer64(), i * sizeof(void*)));
455 }
456
457 // STEP 1b: Push both the "return address" of the function call (the address
458 // of the instruction after the call that we used to get here) as
459 // well as the callee token onto the stack. The return address is
460 // currently in r14. We will proceed by loading the callee token
461 // into a sacrificial register <= r14, then pushing both onto the
462 // stack.
463
464 // Now place the frameClass onto the stack, via a register.
465 masm.Mov(x9, frameClass);
466
467 // And onto the stack. Since the stack is full, we need to put this one past
468 // the end of the current stack. Sadly, the ABI says that we need to always
469 // point to the lowest place that has been written. The OS is free to do
470 // whatever it wants below sp.
471 masm.push(r30, r9);
472 masm.moveStackPtrTo(spArg);
473 }
474
GenerateBailoutThunk(MacroAssembler & masm,uint32_t frameClass,Label * bailoutTail)475 static void GenerateBailoutThunk(MacroAssembler& masm, uint32_t frameClass,
476 Label* bailoutTail) {
477 PushBailoutFrame(masm, frameClass, r0);
478
479 // SP % 8 == 4
480 // STEP 1c: Call the bailout function, giving a pointer to the
481 // structure we just blitted onto the stack.
482 // Make space for the BaselineBailoutInfo* outparam.
483 const int sizeOfBailoutInfo = sizeof(void*) * 2;
484 masm.reserveStack(sizeOfBailoutInfo);
485 masm.moveStackPtrTo(r1);
486
487 masm.setupUnalignedABICall(r2);
488 masm.passABIArg(r0);
489 masm.passABIArg(r1);
490 masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, Bailout), MoveOp::GENERAL,
491 CheckUnsafeCallWithABI::DontCheckOther);
492
493 masm.Ldr(x2, MemOperand(masm.GetStackPointer64(), 0));
494 masm.addToStackPtr(Imm32(sizeOfBailoutInfo));
495
496 static const uint32_t BailoutDataSize =
497 sizeof(void*) * Registers::Total +
498 sizeof(double) * FloatRegisters::TotalPhys;
499
500 if (frameClass == NO_FRAME_SIZE_CLASS_ID) {
501 vixl::UseScratchRegisterScope temps(&masm.asVIXL());
502 const ARMRegister scratch64 = temps.AcquireX();
503
504 masm.Ldr(scratch64,
505 MemOperand(masm.GetStackPointer64(), sizeof(uintptr_t)));
506 masm.addToStackPtr(Imm32(BailoutDataSize + 32));
507 masm.addToStackPtr(scratch64.asUnsized());
508 } else {
509 uint32_t frameSize = FrameSizeClass::FromClass(frameClass).frameSize();
510 masm.addToStackPtr(Imm32(frameSize + BailoutDataSize + sizeof(void*)));
511 }
512
513 // Jump to shared bailout tail. The BailoutInfo pointer has to be in r9.
514 masm.jump(bailoutTail);
515 }
516
generateBailoutTable(MacroAssembler & masm,Label * bailoutTail,uint32_t frameClass)517 JitRuntime::BailoutTable JitRuntime::generateBailoutTable(MacroAssembler& masm,
518 Label* bailoutTail,
519 uint32_t frameClass) {
520 MOZ_CRASH("arm64 does not use bailout tables");
521 }
522
generateBailoutHandler(MacroAssembler & masm,Label * bailoutTail)523 void JitRuntime::generateBailoutHandler(MacroAssembler& masm,
524 Label* bailoutTail) {
525 bailoutHandlerOffset_ = startTrampolineCode(masm);
526
527 GenerateBailoutThunk(masm, NO_FRAME_SIZE_CLASS_ID, bailoutTail);
528 }
529
generateVMWrapper(JSContext * cx,MacroAssembler & masm,const VMFunction & f)530 bool JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm,
531 const VMFunction& f) {
532 MOZ_ASSERT(functionWrappers_);
533 MOZ_ASSERT(functionWrappers_->initialized());
534
535 uint32_t wrapperOffset = startTrampolineCode(masm);
536
537 // Avoid conflicts with argument registers while discarding the result after
538 // the function call.
539 AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
540
541 static_assert(
542 (Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0,
543 "Wrapper register set must be a superset of the Volatile register set.");
544
545 // Unlike on other platforms, it is the responsibility of the VM *callee* to
546 // push the return address, while the caller must ensure that the address
547 // is stored in lr on entry. This allows the VM wrapper to work with both
548 // direct calls and tail calls.
549 masm.push(lr);
550
551 // First argument is the JSContext.
552 Register reg_cx = IntArgReg0;
553 regs.take(reg_cx);
554
555 // Stack is:
556 // ... frame ...
557 // +12 [args]
558 // +8 descriptor
559 // +0 returnAddress (pushed by this function, caller sets as lr)
560 //
561 // We're aligned to an exit frame, so link it up.
562 masm.loadJSContext(reg_cx);
563 masm.enterExitFrame(reg_cx, regs.getAny(), &f);
564
565 // Save the current stack pointer as the base for copying arguments.
566 Register argsBase = InvalidReg;
567 if (f.explicitArgs) {
568 // argsBase can't be an argument register. Bad things would happen if
569 // the MoveResolver didn't throw an assertion failure first.
570 argsBase = r8;
571 regs.take(argsBase);
572 masm.Add(ARMRegister(argsBase, 64), masm.GetStackPointer64(),
573 Operand(ExitFrameLayout::SizeWithFooter()));
574 }
575
576 // Reserve space for any outparameter.
577 Register outReg = InvalidReg;
578 switch (f.outParam) {
579 case Type_Value:
580 outReg = regs.takeAny();
581 masm.reserveStack(sizeof(Value));
582 masm.moveStackPtrTo(outReg);
583 break;
584
585 case Type_Handle:
586 outReg = regs.takeAny();
587 masm.PushEmptyRooted(f.outParamRootType);
588 masm.moveStackPtrTo(outReg);
589 break;
590
591 case Type_Int32:
592 case Type_Bool:
593 outReg = regs.takeAny();
594 masm.reserveStack(sizeof(int64_t));
595 masm.moveStackPtrTo(outReg);
596 break;
597
598 case Type_Double:
599 outReg = regs.takeAny();
600 masm.reserveStack(sizeof(double));
601 masm.moveStackPtrTo(outReg);
602 break;
603
604 case Type_Pointer:
605 outReg = regs.takeAny();
606 masm.reserveStack(sizeof(uintptr_t));
607 masm.moveStackPtrTo(outReg);
608 break;
609
610 default:
611 MOZ_ASSERT(f.outParam == Type_Void);
612 break;
613 }
614
615 if (!generateTLEnterVM(masm, f)) return false;
616
617 masm.setupUnalignedABICall(regs.getAny());
618 masm.passABIArg(reg_cx);
619
620 size_t argDisp = 0;
621
622 // Copy arguments.
623 for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
624 MoveOperand from;
625 switch (f.argProperties(explicitArg)) {
626 case VMFunction::WordByValue:
627 masm.passABIArg(MoveOperand(argsBase, argDisp),
628 (f.argPassedInFloatReg(explicitArg) ? MoveOp::DOUBLE
629 : MoveOp::GENERAL));
630 argDisp += sizeof(void*);
631 break;
632
633 case VMFunction::WordByRef:
634 masm.passABIArg(
635 MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS),
636 MoveOp::GENERAL);
637 argDisp += sizeof(void*);
638 break;
639
640 case VMFunction::DoubleByValue:
641 case VMFunction::DoubleByRef:
642 MOZ_CRASH("NYI: AArch64 callVM should not be used with 128bit values.");
643 }
644 }
645
646 // Copy the semi-implicit outparam, if any.
647 // It is not a C++-abi outparam, which would get passed in the
648 // outparam register, but a real parameter to the function, which
649 // was stack-allocated above.
650 if (outReg != InvalidReg) masm.passABIArg(outReg);
651
652 masm.callWithABI(f.wrapped, MoveOp::GENERAL,
653 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
654
655 if (!generateTLExitVM(masm, f)) return false;
656
657 // SP is used to transfer stack across call boundaries.
658 if (!masm.GetStackPointer64().Is(vixl::sp))
659 masm.Mov(masm.GetStackPointer64(), vixl::sp);
660
661 // Test for failure.
662 switch (f.failType()) {
663 case Type_Object:
664 masm.branchTestPtr(Assembler::Zero, r0, r0, masm.failureLabel());
665 break;
666 case Type_Bool:
667 masm.branchIfFalseBool(r0, masm.failureLabel());
668 break;
669 case Type_Void:
670 break;
671 default:
672 MOZ_CRASH("unknown failure kind");
673 }
674
675 // Load the outparam and free any allocated stack.
676 switch (f.outParam) {
677 case Type_Value:
678 masm.Ldr(ARMRegister(JSReturnReg, 64),
679 MemOperand(masm.GetStackPointer64()));
680 masm.freeStack(sizeof(Value));
681 break;
682
683 case Type_Handle:
684 masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
685 break;
686
687 case Type_Int32:
688 masm.Ldr(ARMRegister(ReturnReg, 32),
689 MemOperand(masm.GetStackPointer64()));
690 masm.freeStack(sizeof(int64_t));
691 break;
692
693 case Type_Bool:
694 masm.Ldrb(ARMRegister(ReturnReg, 32),
695 MemOperand(masm.GetStackPointer64()));
696 masm.freeStack(sizeof(int64_t));
697 break;
698
699 case Type_Double:
700 MOZ_ASSERT(cx->runtime()->jitSupportsFloatingPoint);
701 masm.Ldr(ARMFPRegister(ReturnDoubleReg, 64),
702 MemOperand(masm.GetStackPointer64()));
703 masm.freeStack(sizeof(double));
704 break;
705
706 case Type_Pointer:
707 masm.Ldr(ARMRegister(ReturnReg, 64),
708 MemOperand(masm.GetStackPointer64()));
709 masm.freeStack(sizeof(uintptr_t));
710 break;
711
712 default:
713 MOZ_ASSERT(f.outParam == Type_Void);
714 break;
715 }
716
717 // Until C++ code is instrumented against Spectre, prevent speculative
718 // execution from returning any private data.
719 if (f.returnsData() && JitOptions.spectreJitToCxxCalls)
720 masm.speculationBarrier();
721
722 masm.leaveExitFrame();
723 masm.retn(Imm32(sizeof(ExitFrameLayout) +
724 f.explicitStackSlots() * sizeof(void*) +
725 f.extraValuesToPop * sizeof(Value)));
726
727 return functionWrappers_->putNew(&f, wrapperOffset);
728 }
729
generatePreBarrier(JSContext * cx,MacroAssembler & masm,MIRType type)730 uint32_t JitRuntime::generatePreBarrier(JSContext* cx, MacroAssembler& masm,
731 MIRType type) {
732 uint32_t offset = startTrampolineCode(masm);
733
734 MOZ_ASSERT(PreBarrierReg == r1);
735 Register temp1 = r2;
736 Register temp2 = r3;
737 Register temp3 = r4;
738 masm.push(temp1);
739 masm.push(temp2);
740 masm.push(temp3);
741
742 Label noBarrier;
743 masm.emitPreBarrierFastPath(cx->runtime(), type, temp1, temp2, temp3,
744 &noBarrier);
745
746 // Call into C++ to mark this GC thing.
747 masm.pop(temp3);
748 masm.pop(temp2);
749 masm.pop(temp1);
750
751 LiveRegisterSet regs =
752 LiveRegisterSet(GeneralRegisterSet(Registers::VolatileMask),
753 FloatRegisterSet(FloatRegisters::VolatileMask));
754
755 // Also preserve the return address.
756 regs.add(lr);
757
758 masm.PushRegsInMask(regs);
759
760 masm.movePtr(ImmPtr(cx->runtime()), r3);
761
762 masm.setupUnalignedABICall(r0);
763 masm.passABIArg(r3);
764 masm.passABIArg(PreBarrierReg);
765 masm.callWithABI(JitMarkFunction(type));
766
767 // Pop the volatile regs and restore LR.
768 masm.PopRegsInMask(regs);
769 masm.abiret();
770
771 masm.bind(&noBarrier);
772 masm.pop(temp3);
773 masm.pop(temp2);
774 masm.pop(temp1);
775 masm.abiret();
776
777 return offset;
778 }
779
780 typedef bool (*HandleDebugTrapFn)(JSContext*, BaselineFrame*, uint8_t*, bool*);
781 static const VMFunction HandleDebugTrapInfo =
782 FunctionInfo<HandleDebugTrapFn>(HandleDebugTrap, "HandleDebugTrap");
783
generateDebugTrapHandler(JSContext * cx)784 JitCode* JitRuntime::generateDebugTrapHandler(JSContext* cx) {
785 MacroAssembler masm(cx);
786 #ifndef JS_USE_LINK_REGISTER
787 // The first value contains the return addres,
788 // which we pull into ICTailCallReg for tail calls.
789 masm.setFramePushed(sizeof(intptr_t));
790 #endif
791
792 Register scratch1 = r0;
793 Register scratch2 = r1;
794
795 // Load BaselineFrame pointer into scratch1.
796 masm.Sub(ARMRegister(scratch1, 64), BaselineFrameReg64,
797 Operand(BaselineFrame::Size()));
798
799 // Enter a stub frame and call the HandleDebugTrap VM function. Ensure the
800 // stub frame has a nullptr ICStub pointer, since this pointer is marked
801 // during GC.
802 masm.movePtr(ImmPtr(nullptr), ICStubReg);
803 EmitBaselineEnterStubFrame(masm, scratch2);
804
805 TrampolinePtr code =
806 cx->runtime()->jitRuntime()->getVMWrapper(HandleDebugTrapInfo);
807 masm.asVIXL().Push(vixl::lr, ARMRegister(scratch1, 64));
808 EmitBaselineCallVM(code, masm);
809
810 EmitBaselineLeaveStubFrame(masm);
811
812 // If the stub returns |true|, we have to perform a forced return (return
813 // from the JS frame). If the stub returns |false|, just return from the
814 // trap stub so that execution continues at the current pc.
815 Label forcedReturn;
816 masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg, &forcedReturn);
817 masm.abiret();
818
819 masm.bind(&forcedReturn);
820 masm.loadValue(
821 Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
822 JSReturnOperand);
823 masm.Mov(masm.GetStackPointer64(), BaselineFrameReg64);
824
825 masm.pop(BaselineFrameReg, lr);
826 masm.syncStackPtr();
827 masm.abiret();
828
829 Linker linker(masm);
830 AutoFlushICache afc("DebugTrapHandler");
831 JitCode* codeDbg = linker.newCode(cx, CodeKind::Other);
832
833 #ifdef JS_ION_PERF
834 writePerfSpewerJitCodeProfile(codeDbg, "DebugTrapHandler");
835 #endif
836
837 return codeDbg;
838 }
839
generateExceptionTailStub(MacroAssembler & masm,void * handler,Label * profilerExitTail)840 void JitRuntime::generateExceptionTailStub(MacroAssembler& masm, void* handler,
841 Label* profilerExitTail) {
842 exceptionTailOffset_ = startTrampolineCode(masm);
843
844 masm.bind(masm.failureLabel());
845 masm.handleFailureWithHandlerTail(handler, profilerExitTail);
846 }
847
generateBailoutTailStub(MacroAssembler & masm,Label * bailoutTail)848 void JitRuntime::generateBailoutTailStub(MacroAssembler& masm,
849 Label* bailoutTail) {
850 bailoutTailOffset_ = startTrampolineCode(masm);
851 masm.bind(bailoutTail);
852
853 masm.generateBailoutTail(r1, r2);
854 }
855
generateProfilerExitFrameTailStub(MacroAssembler & masm,Label * profilerExitTail)856 void JitRuntime::generateProfilerExitFrameTailStub(MacroAssembler& masm,
857 Label* profilerExitTail) {
858 profilerExitFrameTailOffset_ = startTrampolineCode(masm);
859 masm.bind(profilerExitTail);
860
861 Register scratch1 = r8;
862 Register scratch2 = r9;
863 Register scratch3 = r10;
864 Register scratch4 = r11;
865
866 //
867 // The code generated below expects that the current stack pointer points
868 // to an Ion or Baseline frame, at the state it would be immediately
869 // before a ret(). Thus, after this stub's business is done, it executes
870 // a ret() and returns directly to the caller script, on behalf of the
871 // callee script that jumped to this code.
872 //
873 // Thus the expected stack is:
874 //
875 // StackPointer ----+
876 // v
877 // ..., ActualArgc, CalleeToken, Descriptor, ReturnAddr
878 // MEM-HI MEM-LOW
879 //
880 //
881 // The generated jitcode is responsible for overwriting the
882 // jitActivation->lastProfilingFrame field with a pointer to the previous
883 // Ion or Baseline jit-frame that was pushed before this one. It is also
884 // responsible for overwriting jitActivation->lastProfilingCallSite with
885 // the return address into that frame. The frame could either be an
886 // immediate "caller" frame, or it could be a frame in a previous
887 // JitActivation (if the current frame was entered from C++, and the C++
888 // was entered by some caller jit-frame further down the stack).
889 //
890 // So this jitcode is responsible for "walking up" the jit stack, finding
891 // the previous Ion or Baseline JS frame, and storing its address and the
892 // return address into the appropriate fields on the current jitActivation.
893 //
894 // There are a fixed number of different path types that can lead to the
895 // current frame, which is either a baseline or ion frame:
896 //
897 // <Baseline-Or-Ion>
898 // ^
899 // |
900 // ^--- Ion
901 // |
902 // ^--- Baseline Stub <---- Baseline
903 // |
904 // ^--- Argument Rectifier
905 // | ^
906 // | |
907 // | ^--- Ion
908 // | |
909 // | ^--- Baseline Stub <---- Baseline
910 // |
911 // ^--- Entry Frame (From C++)
912 //
913 Register actReg = scratch4;
914 masm.loadJSContext(actReg);
915 masm.loadPtr(Address(actReg, offsetof(JSContext, profilingActivation_)),
916 actReg);
917
918 Address lastProfilingFrame(actReg,
919 JitActivation::offsetOfLastProfilingFrame());
920 Address lastProfilingCallSite(actReg,
921 JitActivation::offsetOfLastProfilingCallSite());
922
923 #ifdef DEBUG
924 // Ensure that frame we are exiting is current lastProfilingFrame
925 {
926 masm.loadPtr(lastProfilingFrame, scratch1);
927 Label checkOk;
928 masm.branchPtr(Assembler::Equal, scratch1, ImmWord(0), &checkOk);
929 masm.branchStackPtr(Assembler::Equal, scratch1, &checkOk);
930 masm.assumeUnreachable(
931 "Mismatch between stored lastProfilingFrame and current stack "
932 "pointer.");
933 masm.bind(&checkOk);
934 }
935 #endif
936
937 // Load the frame descriptor into |scratch1|, figure out what to do depending
938 // on its type.
939 masm.loadPtr(
940 Address(masm.getStackPointer(), JitFrameLayout::offsetOfDescriptor()),
941 scratch1);
942
943 // Going into the conditionals, we will have:
944 // FrameDescriptor.size in scratch1
945 // FrameDescriptor.type in scratch2
946 masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1, scratch2);
947 masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch1);
948
949 // Handling of each case is dependent on FrameDescriptor.type
950 Label handle_IonJS;
951 Label handle_BaselineStub;
952 Label handle_Rectifier;
953 Label handle_IonICCall;
954 Label handle_Entry;
955 Label end;
956
957 masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonJS),
958 &handle_IonJS);
959 masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineJS),
960 &handle_IonJS);
961 masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineStub),
962 &handle_BaselineStub);
963 masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Rectifier),
964 &handle_Rectifier);
965 masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonICCall),
966 &handle_IonICCall);
967 masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_CppToJSJit),
968 &handle_Entry);
969
970 // The WasmToJSJit is just another kind of entry.
971 masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_WasmToJSJit),
972 &handle_Entry);
973
974 masm.assumeUnreachable(
975 "Invalid caller frame type when exiting from Ion frame.");
976
977 //
978 // JitFrame_IonJS
979 //
980 // Stack layout:
981 // ...
982 // Ion-Descriptor
983 // Prev-FP ---> Ion-ReturnAddr
984 // ... previous frame data ... |- Descriptor.Size
985 // ... arguments ... |
986 // ActualArgc |
987 // CalleeToken |- JitFrameLayout::Size()
988 // Descriptor |
989 // FP -----> ReturnAddr |
990 //
991 masm.bind(&handle_IonJS);
992 {
993 // |scratch1| contains Descriptor.size
994
995 // returning directly to an IonJS frame. Store return addr to frame
996 // in lastProfilingCallSite.
997 masm.loadPtr(Address(masm.getStackPointer(),
998 JitFrameLayout::offsetOfReturnAddress()),
999 scratch2);
1000 masm.storePtr(scratch2, lastProfilingCallSite);
1001
1002 // Store return frame in lastProfilingFrame.
1003 // scratch2 := masm.getStackPointer() + Descriptor.size*1 +
1004 // JitFrameLayout::Size();
1005 masm.Add(ARMRegister(scratch2, 64), masm.GetStackPointer64(),
1006 ARMRegister(scratch1, 64));
1007 masm.syncStackPtr();
1008 masm.addPtr(Imm32(JitFrameLayout::Size()), scratch2, scratch2);
1009 masm.storePtr(scratch2, lastProfilingFrame);
1010 masm.ret();
1011 }
1012
1013 //
1014 // JitFrame_BaselineStub
1015 //
1016 // Look past the stub and store the frame pointer to
1017 // the baselineJS frame prior to it.
1018 //
1019 // Stack layout:
1020 // ...
1021 // BL-Descriptor
1022 // Prev-FP ---> BL-ReturnAddr
1023 // +-----> BL-PrevFramePointer
1024 // | ... BL-FrameData ...
1025 // | BLStub-Descriptor
1026 // | BLStub-ReturnAddr
1027 // | BLStub-StubPointer |
1028 // +------ BLStub-SavedFramePointer |- Descriptor.Size
1029 // ... arguments ... |
1030 // ActualArgc |
1031 // CalleeToken |- JitFrameLayout::Size()
1032 // Descriptor |
1033 // FP -----> ReturnAddr |
1034 //
1035 // We take advantage of the fact that the stub frame saves the frame
1036 // pointer pointing to the baseline frame, so a bunch of calculation can
1037 // be avoided.
1038 //
1039 masm.bind(&handle_BaselineStub);
1040 {
1041 masm.Add(ARMRegister(scratch3, 64), masm.GetStackPointer64(),
1042 ARMRegister(scratch1, 64));
1043 masm.syncStackPtr();
1044 Address stubFrameReturnAddr(
1045 scratch3, JitFrameLayout::Size() +
1046 BaselineStubFrameLayout::offsetOfReturnAddress());
1047 masm.loadPtr(stubFrameReturnAddr, scratch2);
1048 masm.storePtr(scratch2, lastProfilingCallSite);
1049
1050 Address stubFrameSavedFramePtr(
1051 scratch3, JitFrameLayout::Size() - (2 * sizeof(void*)));
1052 masm.loadPtr(stubFrameSavedFramePtr, scratch2);
1053 masm.addPtr(Imm32(sizeof(void*)), scratch2); // Skip past BL-PrevFramePtr.
1054 masm.storePtr(scratch2, lastProfilingFrame);
1055 masm.ret();
1056 }
1057
1058 //
1059 // JitFrame_Rectifier
1060 //
1061 // The rectifier frame can be preceded by either an IonJS, a BaselineStub,
1062 // or a CppToJSJit/WasmToJSJit frame.
1063 //
1064 // Stack layout if caller of rectifier was Ion or CppToJSJit/WasmToJSJit:
1065 //
1066 // Ion-Descriptor
1067 // Ion-ReturnAddr
1068 // ... ion frame data ... |- Rect-Descriptor.Size
1069 // < COMMON LAYOUT >
1070 //
1071 // Stack layout if caller of rectifier was Baseline:
1072 //
1073 // BL-Descriptor
1074 // Prev-FP ---> BL-ReturnAddr
1075 // +-----> BL-SavedFramePointer
1076 // | ... baseline frame data ...
1077 // | BLStub-Descriptor
1078 // | BLStub-ReturnAddr
1079 // | BLStub-StubPointer |
1080 // +------ BLStub-SavedFramePointer |- Rect-Descriptor.Size
1081 // ... args to rectifier ... |
1082 // < COMMON LAYOUT >
1083 //
1084 // Common stack layout:
1085 //
1086 // ActualArgc |
1087 // CalleeToken |- IonRectitiferFrameLayout::Size()
1088 // Rect-Descriptor |
1089 // Rect-ReturnAddr |
1090 // ... rectifier data & args ... |- Descriptor.Size
1091 // ActualArgc |
1092 // CalleeToken |- JitFrameLayout::Size()
1093 // Descriptor |
1094 // FP -----> ReturnAddr |
1095 //
1096 masm.bind(&handle_Rectifier);
1097 {
1098 // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size();
1099 masm.Add(ARMRegister(scratch2, 64), masm.GetStackPointer64(),
1100 ARMRegister(scratch1, 64));
1101 masm.syncStackPtr();
1102 masm.addPtr(Imm32(JitFrameLayout::Size()), scratch2);
1103 masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfDescriptor()),
1104 scratch3);
1105 masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch3, scratch1);
1106 masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch3);
1107
1108 // Now |scratch1| contains Rect-Descriptor.Size
1109 // and |scratch2| points to Rectifier frame
1110 // and |scratch3| contains Rect-Descriptor.Type
1111
1112 masm.assertRectifierFrameParentType(scratch3);
1113
1114 // Check for either Ion or BaselineStub frame.
1115 Label notIonFrame;
1116 masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS),
1117 ¬IonFrame);
1118
1119 // Handle Rectifier <- IonJS
1120 // scratch3 := RectFrame[ReturnAddr]
1121 masm.loadPtr(
1122 Address(scratch2, RectifierFrameLayout::offsetOfReturnAddress()),
1123 scratch3);
1124 masm.storePtr(scratch3, lastProfilingCallSite);
1125
1126 // scratch3 := RectFrame + Rect-Descriptor.Size +
1127 // RectifierFrameLayout::Size()
1128 masm.addPtr(scratch2, scratch1, scratch3);
1129 masm.add32(Imm32(RectifierFrameLayout::Size()), scratch3);
1130 masm.storePtr(scratch3, lastProfilingFrame);
1131 masm.ret();
1132
1133 masm.bind(¬IonFrame);
1134
1135 // Check for either BaselineStub or a CppToJSJit/WasmToJSJit entry
1136 // frame.
1137 masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_BaselineStub),
1138 &handle_Entry);
1139
1140 // Handle Rectifier <- BaselineStub <- BaselineJS
1141 masm.addPtr(scratch2, scratch1, scratch3);
1142 Address stubFrameReturnAddr(
1143 scratch3, RectifierFrameLayout::Size() +
1144 BaselineStubFrameLayout::offsetOfReturnAddress());
1145 masm.loadPtr(stubFrameReturnAddr, scratch2);
1146 masm.storePtr(scratch2, lastProfilingCallSite);
1147
1148 Address stubFrameSavedFramePtr(
1149 scratch3, RectifierFrameLayout::Size() - (2 * sizeof(void*)));
1150 masm.loadPtr(stubFrameSavedFramePtr, scratch2);
1151 masm.addPtr(Imm32(sizeof(void*)), scratch2);
1152 masm.storePtr(scratch2, lastProfilingFrame);
1153 masm.ret();
1154 }
1155
1156 // JitFrame_IonICCall
1157 //
1158 // The caller is always an IonJS frame.
1159 //
1160 // Ion-Descriptor
1161 // Ion-ReturnAddr
1162 // ... ion frame data ... |- CallFrame-Descriptor.Size
1163 // StubCode |
1164 // ICCallFrame-Descriptor |- IonICCallFrameLayout::Size()
1165 // ICCallFrame-ReturnAddr |
1166 // ... call frame data & args ... |- Descriptor.Size
1167 // ActualArgc |
1168 // CalleeToken |- JitFrameLayout::Size()
1169 // Descriptor |
1170 // FP -----> ReturnAddr |
1171 masm.bind(&handle_IonICCall);
1172 {
1173 // scratch2 := StackPointer + Descriptor.size + JitFrameLayout::Size()
1174 masm.Add(ARMRegister(scratch2, 64), masm.GetStackPointer64(),
1175 ARMRegister(scratch1, 64));
1176 masm.syncStackPtr();
1177 masm.addPtr(Imm32(JitFrameLayout::Size()), scratch2);
1178
1179 // scratch3 := ICCallFrame-Descriptor.Size
1180 masm.loadPtr(Address(scratch2, IonICCallFrameLayout::offsetOfDescriptor()),
1181 scratch3);
1182 #ifdef DEBUG
1183 // Assert previous frame is an IonJS frame.
1184 masm.movePtr(scratch3, scratch1);
1185 masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1);
1186 {
1187 Label checkOk;
1188 masm.branch32(Assembler::Equal, scratch1, Imm32(JitFrame_IonJS),
1189 &checkOk);
1190 masm.assumeUnreachable("IonICCall frame must be preceded by IonJS frame");
1191 masm.bind(&checkOk);
1192 }
1193 #endif
1194 masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch3);
1195
1196 // lastProfilingCallSite := ICCallFrame-ReturnAddr
1197 masm.loadPtr(
1198 Address(scratch2, IonICCallFrameLayout::offsetOfReturnAddress()),
1199 scratch1);
1200 masm.storePtr(scratch1, lastProfilingCallSite);
1201
1202 // lastProfilingFrame := ICCallFrame + ICCallFrame-Descriptor.Size +
1203 // IonICCallFrameLayout::Size()
1204 masm.addPtr(scratch2, scratch3, scratch1);
1205 masm.addPtr(Imm32(IonICCallFrameLayout::Size()), scratch1);
1206 masm.storePtr(scratch1, lastProfilingFrame);
1207 masm.ret();
1208 }
1209
1210 //
1211 // JitFrame_CppToJSJit / JitFrame_WasmToJSJit
1212 //
1213 // If at an entry frame, store null into both fields.
1214 // A fast-path wasm->jit transition frame is an entry frame from the point
1215 // of view of the JIT.
1216 //
1217 masm.bind(&handle_Entry);
1218 {
1219 masm.movePtr(ImmPtr(nullptr), scratch1);
1220 masm.storePtr(scratch1, lastProfilingCallSite);
1221 masm.storePtr(scratch1, lastProfilingFrame);
1222 masm.ret();
1223 }
1224 }
1225