1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jscompartment.h"
8
9 #include "jit/arm/SharedICHelpers-arm.h"
10 #include "jit/Bailouts.h"
11 #include "jit/JitCompartment.h"
12 #include "jit/JitFrames.h"
13 #include "jit/JitSpewer.h"
14 #include "jit/Linker.h"
15 #ifdef JS_ION_PERF
16 # include "jit/PerfSpewer.h"
17 #endif
18 #include "jit/VMFunctions.h"
19
20 #include "jit/MacroAssembler-inl.h"
21
22 using namespace js;
23 using namespace js::jit;
24
25 static const FloatRegisterSet NonVolatileFloatRegs =
26 FloatRegisterSet((1ULL << FloatRegisters::d8) |
27 (1ULL << FloatRegisters::d9) |
28 (1ULL << FloatRegisters::d10) |
29 (1ULL << FloatRegisters::d11) |
30 (1ULL << FloatRegisters::d12) |
31 (1ULL << FloatRegisters::d13) |
32 (1ULL << FloatRegisters::d14) |
33 (1ULL << FloatRegisters::d15));
34
35 static void
GenerateReturn(MacroAssembler & masm,int returnCode,SPSProfiler * prof)36 GenerateReturn(MacroAssembler& masm, int returnCode, SPSProfiler* prof)
37 {
38 // Restore non-volatile floating point registers.
39 masm.transferMultipleByRuns(NonVolatileFloatRegs, IsLoad, StackPointer, IA);
40
41 // Get rid of padding word.
42 masm.addPtr(Imm32(sizeof(void*)), sp);
43
44 // Set up return value
45 masm.ma_mov(Imm32(returnCode), r0);
46
47 // Pop and return
48 masm.startDataTransferM(IsLoad, sp, IA, WriteBack);
49 masm.transferReg(r4);
50 masm.transferReg(r5);
51 masm.transferReg(r6);
52 masm.transferReg(r7);
53 masm.transferReg(r8);
54 masm.transferReg(r9);
55 masm.transferReg(r10);
56 masm.transferReg(r11);
57 // r12 isn't saved, so it shouldn't be restored.
58 masm.transferReg(pc);
59 masm.finishDataTransfer();
60 masm.flushBuffer();
61 }
62
63 struct EnterJITStack
64 {
65 double d8;
66 double d9;
67 double d10;
68 double d11;
69 double d12;
70 double d13;
71 double d14;
72 double d15;
73
74 // Padding.
75 void* padding;
76
77 // Non-volatile registers.
78 void* r4;
79 void* r5;
80 void* r6;
81 void* r7;
82 void* r8;
83 void* r9;
84 void* r10;
85 void* r11;
86 // The abi does not expect r12 (ip) to be preserved
87 void* lr;
88
89 // Arguments.
90 // code == r0
91 // argc == r1
92 // argv == r2
93 // frame == r3
94 CalleeToken token;
95 JSObject* scopeChain;
96 size_t numStackValues;
97 Value* vp;
98 };
99
100 /*
101 * This method generates a trampoline for a c++ function with the following
102 * signature:
103 * void enter(void* code, int argc, Value* argv, InterpreterFrame* fp, CalleeToken
104 * calleeToken, JSObject* scopeChain, Value* vp)
105 * ...using standard EABI calling convention
106 */
107 JitCode*
generateEnterJIT(JSContext * cx,EnterJitType type)108 JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
109 {
110 const Address slot_token(sp, offsetof(EnterJITStack, token));
111 const Address slot_vp(sp, offsetof(EnterJITStack, vp));
112
113 MOZ_ASSERT(OsrFrameReg == r3);
114
115 MacroAssembler masm(cx);
116 Assembler* aasm = &masm;
117
118 // Save non-volatile registers. These must be saved by the trampoline,
119 // rather than the JIT'd code, because they are scanned by the conservative
120 // scanner.
121 masm.startDataTransferM(IsStore, sp, DB, WriteBack);
122 masm.transferReg(r4); // [sp,0]
123 masm.transferReg(r5); // [sp,4]
124 masm.transferReg(r6); // [sp,8]
125 masm.transferReg(r7); // [sp,12]
126 masm.transferReg(r8); // [sp,16]
127 masm.transferReg(r9); // [sp,20]
128 masm.transferReg(r10); // [sp,24]
129 masm.transferReg(r11); // [sp,28]
130 // The abi does not expect r12 (ip) to be preserved
131 masm.transferReg(lr); // [sp,32]
132 // The 5th argument is located at [sp, 36]
133 masm.finishDataTransfer();
134
135 // Add padding word.
136 masm.subPtr(Imm32(sizeof(void*)), sp);
137
138 // Push the float registers.
139 masm.transferMultipleByRuns(NonVolatileFloatRegs, IsStore, sp, DB);
140
141 // Save stack pointer into r8
142 masm.movePtr(sp, r8);
143
144 // Load calleeToken into r9.
145 masm.loadPtr(slot_token, r9);
146
147 // Save stack pointer.
148 if (type == EnterJitBaseline)
149 masm.movePtr(sp, r11);
150
151 // Load the number of actual arguments into r10.
152 masm.loadPtr(slot_vp, r10);
153 masm.unboxInt32(Address(r10, 0), r10);
154
155 {
156 Label noNewTarget;
157 masm.branchTest32(Assembler::Zero, r9, Imm32(CalleeToken_FunctionConstructing),
158 &noNewTarget);
159
160 masm.add32(Imm32(1), r1);
161
162 masm.bind(&noNewTarget);
163 }
164
165 // Guarantee stack alignment of Jit frames.
166 //
167 // This code moves the stack pointer to the location where it should be when
168 // we enter the Jit frame. It moves the stack pointer such that we have
169 // enough space reserved for pushing the arguments, and the JitFrameLayout.
170 // The stack pointer is also aligned on the alignment expected by the Jit
171 // frames.
172 //
173 // At the end the register r4, is a pointer to the stack where the first
174 // argument is expected by the Jit frame.
175 //
176 aasm->as_sub(r4, sp, O2RegImmShift(r1, LSL, 3)); // r4 = sp - argc*8
177 masm.ma_and(Imm32(~(JitStackAlignment - 1)), r4, r4);
178 // r4 is now the aligned on the bottom of the list of arguments.
179 static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
180 "No need to consider the JitFrameLayout for aligning the stack");
181 // sp' = ~(JitStackAlignment - 1) & (sp - argc * sizeof(Value)) - sizeof(JitFrameLayout)
182 aasm->as_sub(sp, r4, Imm8(sizeof(JitFrameLayout)));
183
184 // Get a copy of the number of args to use as a decrement counter, also set
185 // the zero condition code.
186 aasm->as_mov(r5, O2Reg(r1), SetCC);
187
188 // Loop over arguments, copying them from an unknown buffer onto the Ion
189 // stack so they can be accessed from JIT'ed code.
190 {
191 Label header, footer;
192 // If there aren't any arguments, don't do anything.
193 aasm->as_b(&footer, Assembler::Zero);
194 // Get the top of the loop.
195 masm.bind(&header);
196 aasm->as_sub(r5, r5, Imm8(1), SetCC);
197 // We could be more awesome, and unroll this, using a loadm
198 // (particularly since the offset is effectively 0) but that seems more
199 // error prone, and complex.
200 // BIG FAT WARNING: this loads both r6 and r7.
201 aasm->as_extdtr(IsLoad, 64, true, PostIndex, r6, EDtrAddr(r2, EDtrOffImm(8)));
202 aasm->as_extdtr(IsStore, 64, true, PostIndex, r6, EDtrAddr(r4, EDtrOffImm(8)));
203 aasm->as_b(&header, Assembler::NonZero);
204 masm.bind(&footer);
205 }
206
207 masm.ma_sub(r8, sp, r8);
208 masm.makeFrameDescriptor(r8, JitFrame_Entry);
209
210 masm.startDataTransferM(IsStore, sp, IB, NoWriteBack);
211 // [sp] = return address (written later)
212 masm.transferReg(r8); // [sp',4] = descriptor, argc*8+20
213 masm.transferReg(r9); // [sp',8] = callee token
214 masm.transferReg(r10); // [sp',12] = actual arguments
215 masm.finishDataTransfer();
216
217 Label returnLabel;
218 if (type == EnterJitBaseline) {
219 // Handle OSR.
220 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
221 regs.take(JSReturnOperand);
222 regs.takeUnchecked(OsrFrameReg);
223 regs.take(r11);
224 regs.take(ReturnReg);
225
226 const Address slot_numStackValues(r11, offsetof(EnterJITStack, numStackValues));
227
228 Label notOsr;
229 masm.branchTestPtr(Assembler::Zero, OsrFrameReg, OsrFrameReg, ¬Osr);
230
231 Register scratch = regs.takeAny();
232
233 Register numStackValues = regs.takeAny();
234 masm.load32(slot_numStackValues, numStackValues);
235
236 // Write return address. On ARM, CodeLabel is only used for tableswitch,
237 // so we can't use it here to get the return address. Instead, we use pc
238 // + a fixed offset to a jump to returnLabel. The pc register holds pc +
239 // 8, so we add the size of 2 instructions to skip the instructions
240 // emitted by storePtr and jump(&skipJump).
241 {
242 AutoForbidPools afp(&masm, 5);
243 Label skipJump;
244 masm.mov(pc, scratch);
245 masm.addPtr(Imm32(2 * sizeof(uint32_t)), scratch);
246 masm.storePtr(scratch, Address(sp, 0));
247 masm.jump(&skipJump);
248 masm.jump(&returnLabel);
249 masm.bind(&skipJump);
250 }
251
252 // Push previous frame pointer.
253 masm.push(r11);
254
255 // Reserve frame.
256 Register framePtr = r11;
257 masm.subPtr(Imm32(BaselineFrame::Size()), sp);
258 masm.mov(sp, framePtr);
259
260 #ifdef XP_WIN
261 // Can't push large frames blindly on windows. Touch frame memory
262 // incrementally.
263 masm.ma_lsl(Imm32(3), numStackValues, scratch);
264 masm.subPtr(scratch, framePtr);
265 {
266 masm.ma_sub(sp, Imm32(WINDOWS_BIG_FRAME_TOUCH_INCREMENT), scratch);
267
268 Label touchFrameLoop;
269 Label touchFrameLoopEnd;
270 masm.bind(&touchFrameLoop);
271 masm.branchPtr(Assembler::Below, scratch, framePtr, &touchFrameLoopEnd);
272 masm.store32(Imm32(0), Address(scratch, 0));
273 masm.subPtr(Imm32(WINDOWS_BIG_FRAME_TOUCH_INCREMENT), scratch);
274 masm.jump(&touchFrameLoop);
275 masm.bind(&touchFrameLoopEnd);
276 }
277 masm.mov(sp, framePtr);
278 #endif
279
280 // Reserve space for locals and stack values.
281 masm.ma_lsl(Imm32(3), numStackValues, scratch);
282 masm.ma_sub(sp, scratch, sp);
283
284 // Enter exit frame.
285 masm.addPtr(Imm32(BaselineFrame::Size() + BaselineFrame::FramePointerOffset), scratch);
286 masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS);
287 masm.push(scratch);
288 masm.push(Imm32(0)); // Fake return address.
289 // No GC things to mark on the stack, push a bare token.
290 masm.enterFakeExitFrame(ExitFrameLayoutBareToken);
291
292 masm.push(framePtr); // BaselineFrame
293 masm.push(r0); // jitcode
294
295 masm.setupUnalignedABICall(scratch);
296 masm.passABIArg(r11); // BaselineFrame
297 masm.passABIArg(OsrFrameReg); // InterpreterFrame
298 masm.passABIArg(numStackValues);
299 masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, jit::InitBaselineFrameForOsr));
300
301 Register jitcode = regs.takeAny();
302 masm.pop(jitcode);
303 masm.pop(framePtr);
304
305 MOZ_ASSERT(jitcode != ReturnReg);
306
307 Label error;
308 masm.addPtr(Imm32(ExitFrameLayout::SizeWithFooter()), sp);
309 masm.addPtr(Imm32(BaselineFrame::Size()), framePtr);
310 masm.branchIfFalseBool(ReturnReg, &error);
311
312 // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
313 // if profiler instrumentation is enabled.
314 {
315 Label skipProfilingInstrumentation;
316 Register realFramePtr = numStackValues;
317 AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
318 masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
319 &skipProfilingInstrumentation);
320 masm.ma_add(framePtr, Imm32(sizeof(void*)), realFramePtr);
321 masm.profilerEnterFrame(realFramePtr, scratch);
322 masm.bind(&skipProfilingInstrumentation);
323 }
324
325 masm.jump(jitcode);
326
327 // OOM: Load error value, discard return address and previous frame
328 // pointer and return.
329 masm.bind(&error);
330 masm.mov(framePtr, sp);
331 masm.addPtr(Imm32(2 * sizeof(uintptr_t)), sp);
332 masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
333 masm.jump(&returnLabel);
334
335 masm.bind(¬Osr);
336 // Load the scope chain in R1.
337 MOZ_ASSERT(R1.scratchReg() != r0);
338 masm.loadPtr(Address(r11, offsetof(EnterJITStack, scopeChain)), R1.scratchReg());
339 }
340
341 // The Data transfer is pushing 4 words, which already account for the
342 // return address space of the Jit frame. We have to undo what the data
343 // transfer did before making the call.
344 masm.addPtr(Imm32(sizeof(uintptr_t)), sp);
345
346 // The callee will push the return address on the stack, thus we check that
347 // the stack would be aligned once the call is complete.
348 masm.assertStackAlignment(JitStackAlignment, sizeof(uintptr_t));
349
350 // Call the function.
351 masm.callJitNoProfiler(r0);
352
353 if (type == EnterJitBaseline) {
354 // Baseline OSR will return here.
355 masm.bind(&returnLabel);
356 }
357
358 // The top of the stack now points to the address of the field following the
359 // return address because the return address is popped for the return, so we
360 // need to remove the size of the return address field.
361 aasm->as_sub(sp, sp, Imm8(4));
362
363 // Load off of the stack the size of our local stack.
364 masm.loadPtr(Address(sp, JitFrameLayout::offsetOfDescriptor()), r5);
365 aasm->as_add(sp, sp, lsr(r5, FRAMESIZE_SHIFT));
366
367 // Store the returned value into the slot_vp
368 masm.loadPtr(slot_vp, r5);
369 masm.storeValue(JSReturnOperand, Address(r5, 0));
370
371 // :TODO: Optimize storeValue with:
372 // We're using a load-double here. In order for that to work, the data needs
373 // to be stored in two consecutive registers, make sure this is the case
374 // MOZ_ASSERT(JSReturnReg_Type.code() == JSReturnReg_Data.code()+1);
375 // aasm->as_extdtr(IsStore, 64, true, Offset,
376 // JSReturnReg_Data, EDtrAddr(r5, EDtrOffImm(0)));
377
378 // Restore non-volatile registers and return.
379 GenerateReturn(masm, true, &cx->runtime()->spsProfiler);
380
381 Linker linker(masm);
382 AutoFlushICache afc("EnterJIT");
383 JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
384
385 #ifdef JS_ION_PERF
386 writePerfSpewerJitCodeProfile(code, "EnterJIT");
387 #endif
388
389 return code;
390 }
391
392 JitCode*
generateInvalidator(JSContext * cx)393 JitRuntime::generateInvalidator(JSContext* cx)
394 {
395 // See large comment in x86's JitRuntime::generateInvalidator.
396 MacroAssembler masm(cx);
397 // At this point, one of two things has happened:
398 // 1) Execution has just returned from C code, which left the stack aligned
399 // 2) Execution has just returned from Ion code, which left the stack unaligned.
400 // The old return address should not matter, but we still want the stack to
401 // be aligned, and there is no good reason to automatically align it with a
402 // call to setupUnalignedABICall.
403 masm.ma_and(Imm32(~7), sp, sp);
404 masm.startDataTransferM(IsStore, sp, DB, WriteBack);
405 // We don't have to push everything, but this is likely easier.
406 // Setting regs_.
407 for (uint32_t i = 0; i < Registers::Total; i++)
408 masm.transferReg(Register::FromCode(i));
409 masm.finishDataTransfer();
410
411 // Since our datastructures for stack inspection are compile-time fixed,
412 // if there are only 16 double registers, then we need to reserve
413 // space on the stack for the missing 16.
414 if (FloatRegisters::ActualTotalPhys() != FloatRegisters::TotalPhys) {
415 int missingRegs = FloatRegisters::TotalPhys - FloatRegisters::ActualTotalPhys();
416 masm.ma_sub(Imm32(missingRegs * sizeof(double)), sp);
417 }
418
419 masm.startFloatTransferM(IsStore, sp, DB, WriteBack);
420 for (uint32_t i = 0; i < FloatRegisters::ActualTotalPhys(); i++)
421 masm.transferFloatReg(FloatRegister(i, FloatRegister::Double));
422 masm.finishFloatTransfer();
423
424 masm.ma_mov(sp, r0);
425 const int sizeOfRetval = sizeof(size_t)*2;
426 masm.reserveStack(sizeOfRetval);
427 masm.mov(sp, r1);
428 const int sizeOfBailoutInfo = sizeof(void*)*2;
429 masm.reserveStack(sizeOfBailoutInfo);
430 masm.mov(sp, r2);
431 masm.setupAlignedABICall();
432 masm.passABIArg(r0);
433 masm.passABIArg(r1);
434 masm.passABIArg(r2);
435 masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, InvalidationBailout));
436
437 masm.ma_ldr(Address(sp, 0), r2);
438 masm.ma_ldr(Address(sp, sizeOfBailoutInfo), r1);
439 // Remove the return address, the IonScript, the register state
440 // (InvaliationBailoutStack) and the space that was allocated for the return
441 // value.
442 masm.ma_add(sp, Imm32(sizeof(InvalidationBailoutStack) + sizeOfRetval + sizeOfBailoutInfo), sp);
443 // Remove the space that this frame was using before the bailout (computed
444 // by InvalidationBailout)
445 masm.ma_add(sp, r1, sp);
446
447 // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
448 JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
449 masm.branch(bailoutTail);
450
451 Linker linker(masm);
452 AutoFlushICache afc("Invalidator");
453 JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
454 JitSpew(JitSpew_IonInvalidate, " invalidation thunk created at %p", (void*) code->raw());
455
456 #ifdef JS_ION_PERF
457 writePerfSpewerJitCodeProfile(code, "Invalidator");
458 #endif
459
460 return code;
461 }
462
463 JitCode*
generateArgumentsRectifier(JSContext * cx,void ** returnAddrOut)464 JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut)
465 {
466 MacroAssembler masm(cx);
467 masm.pushReturnAddress();
468
469 // ArgumentsRectifierReg contains the |nargs| pushed onto the current frame.
470 // Including |this|, there are (|nargs| + 1) arguments to copy.
471 MOZ_ASSERT(ArgumentsRectifierReg == r8);
472
473 // Copy number of actual arguments into r0.
474 masm.ma_ldr(DTRAddr(sp, DtrOffImm(RectifierFrameLayout::offsetOfNumActualArgs())), r0);
475
476 // Load the number of |undefined|s to push into r6.
477 masm.ma_ldr(DTRAddr(sp, DtrOffImm(RectifierFrameLayout::offsetOfCalleeToken())), r1);
478 masm.ma_and(Imm32(CalleeTokenMask), r1, r6);
479 masm.ma_ldrh(EDtrAddr(r6, EDtrOffImm(JSFunction::offsetOfNargs())), r6);
480
481 masm.ma_sub(r6, r8, r2);
482
483 // Get the topmost argument.
484 masm.ma_alu(sp, lsl(r8, 3), r3, OpAdd); // r3 <- r3 + nargs * 8
485 masm.ma_add(r3, Imm32(sizeof(RectifierFrameLayout)), r3);
486
487 {
488 Label notConstructing;
489
490 masm.branchTest32(Assembler::Zero, r1, Imm32(CalleeToken_FunctionConstructing),
491 ¬Constructing);
492
493 // Add sizeof(Value) to overcome |this|
494 masm.ma_dataTransferN(IsLoad, 64, true, r3, Imm32(8), r4, Offset);
495 masm.ma_dataTransferN(IsStore, 64, true, sp, Imm32(-8), r4, PreIndex);
496
497 // Include the newly pushed newTarget value in the frame size
498 // calculated below.
499 masm.add32(Imm32(1), r6);
500
501 masm.bind(¬Constructing);
502 }
503
504 // Push undefined.
505 masm.moveValue(UndefinedValue(), r5, r4);
506 {
507 Label undefLoopTop;
508 masm.bind(&undefLoopTop);
509 masm.ma_dataTransferN(IsStore, 64, true, sp, Imm32(-8), r4, PreIndex);
510 masm.ma_sub(r2, Imm32(1), r2, SetCC);
511
512 masm.ma_b(&undefLoopTop, Assembler::NonZero);
513 }
514
515 // Push arguments, |nargs| + 1 times (to include |this|).
516 {
517 Label copyLoopTop;
518 masm.bind(©LoopTop);
519 masm.ma_dataTransferN(IsLoad, 64, true, r3, Imm32(-8), r4, PostIndex);
520 masm.ma_dataTransferN(IsStore, 64, true, sp, Imm32(-8), r4, PreIndex);
521
522 masm.ma_sub(r8, Imm32(1), r8, SetCC);
523 masm.ma_b(©LoopTop, Assembler::NotSigned);
524 }
525
526 // translate the framesize from values into bytes
527 masm.ma_add(r6, Imm32(1), r6);
528 masm.ma_lsl(Imm32(3), r6, r6);
529
530 // Construct sizeDescriptor.
531 masm.makeFrameDescriptor(r6, JitFrame_Rectifier);
532
533 // Construct JitFrameLayout.
534 masm.ma_push(r0); // actual arguments.
535 masm.ma_push(r1); // callee token
536 masm.ma_push(r6); // frame descriptor.
537
538 // Call the target function.
539 // Note that this code assumes the function is JITted.
540 masm.andPtr(Imm32(CalleeTokenMask), r1);
541 masm.ma_ldr(DTRAddr(r1, DtrOffImm(JSFunction::offsetOfNativeOrScript())), r3);
542 masm.loadBaselineOrIonRaw(r3, r3, nullptr);
543 uint32_t returnOffset = masm.callJitNoProfiler(r3);
544
545 // arg1
546 // ...
547 // argN
548 // num actual args
549 // callee token
550 // sizeDescriptor <- sp now
551 // return address
552
553 // Remove the rectifier frame.
554 masm.ma_dtr(IsLoad, sp, Imm32(12), r4, PostIndex);
555
556 // arg1
557 // ...
558 // argN <- sp now; r4 <- frame descriptor
559 // num actual args
560 // callee token
561 // sizeDescriptor
562 // return address
563
564 // Discard pushed arguments.
565 masm.ma_alu(sp, lsr(r4, FRAMESIZE_SHIFT), sp, OpAdd);
566
567 masm.ret();
568 Linker linker(masm);
569 AutoFlushICache afc("ArgumentsRectifier");
570 JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
571
572 if (returnAddrOut)
573 *returnAddrOut = (void*) (code->raw() + returnOffset);
574
575 #ifdef JS_ION_PERF
576 writePerfSpewerJitCodeProfile(code, "ArgumentsRectifier");
577 #endif
578
579 return code;
580 }
581
582 static void
PushBailoutFrame(MacroAssembler & masm,uint32_t frameClass,Register spArg)583 PushBailoutFrame(MacroAssembler& masm, uint32_t frameClass, Register spArg)
584 {
585 // the stack should look like:
586 // [IonFrame]
587 // bailoutFrame.registersnapshot
588 // bailoutFrame.fpsnapshot
589 // bailoutFrame.snapshotOffset
590 // bailoutFrame.frameSize
591
592 // STEP 1a: Save our register sets to the stack so Bailout() can read
593 // everything.
594 // sp % 8 == 0
595
596 masm.startDataTransferM(IsStore, sp, DB, WriteBack);
597 // We don't have to push everything, but this is likely easier.
598 // Setting regs_.
599 for (uint32_t i = 0; i < Registers::Total; i++)
600 masm.transferReg(Register::FromCode(i));
601 masm.finishDataTransfer();
602
603 // Since our datastructures for stack inspection are compile-time fixed,
604 // if there are only 16 double registers, then we need to reserve
605 // space on the stack for the missing 16.
606 if (FloatRegisters::ActualTotalPhys() != FloatRegisters::TotalPhys) {
607 int missingRegs = FloatRegisters::TotalPhys - FloatRegisters::ActualTotalPhys();
608 masm.ma_sub(Imm32(missingRegs * sizeof(double)), sp);
609 }
610 masm.startFloatTransferM(IsStore, sp, DB, WriteBack);
611 for (uint32_t i = 0; i < FloatRegisters::ActualTotalPhys(); i++)
612 masm.transferFloatReg(FloatRegister(i, FloatRegister::Double));
613 masm.finishFloatTransfer();
614
615 // STEP 1b: Push both the "return address" of the function call (the address
616 // of the instruction after the call that we used to get here) as
617 // well as the callee token onto the stack. The return address is
618 // currently in r14. We will proceed by loading the callee token
619 // into a sacrificial register <= r14, then pushing both onto the
620 // stack.
621
622 // Now place the frameClass onto the stack, via a register.
623 masm.ma_mov(Imm32(frameClass), r4);
624 // And onto the stack. Since the stack is full, we need to put this one past
625 // the end of the current stack. Sadly, the ABI says that we need to always
626 // point to the lowest place that has been written. The OS is free to do
627 // whatever it wants below sp.
628 masm.startDataTransferM(IsStore, sp, DB, WriteBack);
629 // Set frameClassId_.
630 masm.transferReg(r4);
631 // Set tableOffset_; higher registers are stored at higher locations on the
632 // stack.
633 masm.transferReg(lr);
634 masm.finishDataTransfer();
635
636 masm.ma_mov(sp, spArg);
637 }
638
639 static void
GenerateBailoutThunk(JSContext * cx,MacroAssembler & masm,uint32_t frameClass)640 GenerateBailoutThunk(JSContext* cx, MacroAssembler& masm, uint32_t frameClass)
641 {
642 PushBailoutFrame(masm, frameClass, r0);
643
644 // SP % 8 == 4
645 // STEP 1c: Call the bailout function, giving a pointer to the
646 // structure we just blitted onto the stack.
647 const int sizeOfBailoutInfo = sizeof(void*)*2;
648 masm.reserveStack(sizeOfBailoutInfo);
649 masm.mov(sp, r1);
650 masm.setupAlignedABICall();
651
652 // Decrement sp by another 4, so we keep alignment. Not Anymore! Pushing
653 // both the snapshotoffset as well as the: masm.as_sub(sp, sp, Imm8(4));
654
655 // Set the old (4-byte aligned) value of the sp as the first argument.
656 masm.passABIArg(r0);
657 masm.passABIArg(r1);
658
659 // Sp % 8 == 0
660 masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, Bailout));
661 masm.ma_ldr(Address(sp, 0), r2);
662 masm.ma_add(sp, Imm32(sizeOfBailoutInfo), sp);
663 // Common size of a bailout frame.
664 uint32_t bailoutFrameSize = 0
665 + sizeof(void*) // frameClass
666 + sizeof(RegisterDump);
667
668 if (frameClass == NO_FRAME_SIZE_CLASS_ID) {
669 // Make sure the bailout frame size fits into the offset for a load.
670 masm.as_dtr(IsLoad, 32, Offset,
671 r4, DTRAddr(sp, DtrOffImm(4)));
672 // Used to be: offsetof(BailoutStack, frameSize_)
673 // This structure is no longer available to us :(
674 // We add 12 to the bailoutFrameSize because:
675 // sizeof(uint32_t) for the tableOffset that was pushed onto the stack
676 // sizeof(uintptr_t) for the snapshotOffset;
677 // alignment to round the uintptr_t up to a multiple of 8 bytes.
678 masm.ma_add(sp, Imm32(bailoutFrameSize+12), sp);
679 masm.as_add(sp, sp, O2Reg(r4));
680 } else {
681 uint32_t frameSize = FrameSizeClass::FromClass(frameClass).frameSize();
682 masm.ma_add(Imm32(// The frame that was added when we entered the most
683 // recent function.
684 frameSize
685 // The size of the "return address" that was dumped on
686 // the stack.
687 + sizeof(void*)
688 // Everything else that was pushed on the stack.
689 + bailoutFrameSize)
690 , sp);
691 }
692
693 // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
694 JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
695 masm.branch(bailoutTail);
696 }
697
698 JitCode*
generateBailoutTable(JSContext * cx,uint32_t frameClass)699 JitRuntime::generateBailoutTable(JSContext* cx, uint32_t frameClass)
700 {
701 MacroAssembler masm(cx);
702
703 {
704 // Emit the table without any pools being inserted.
705 Label bailout;
706 AutoForbidPools afp(&masm, BAILOUT_TABLE_SIZE);
707 for (size_t i = 0; i < BAILOUT_TABLE_SIZE; i++)
708 masm.ma_bl(&bailout);
709 masm.bind(&bailout);
710 }
711
712 GenerateBailoutThunk(cx, masm, frameClass);
713
714 Linker linker(masm);
715 AutoFlushICache afc("BailoutTable");
716 JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
717
718 #ifdef JS_ION_PERF
719 writePerfSpewerJitCodeProfile(code, "BailoutTable");
720 #endif
721
722 return code;
723 }
724
725 JitCode*
generateBailoutHandler(JSContext * cx)726 JitRuntime::generateBailoutHandler(JSContext* cx)
727 {
728 MacroAssembler masm(cx);
729 GenerateBailoutThunk(cx, masm, NO_FRAME_SIZE_CLASS_ID);
730
731 Linker linker(masm);
732 AutoFlushICache afc("BailoutHandler");
733 JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
734
735 #ifdef JS_ION_PERF
736 writePerfSpewerJitCodeProfile(code, "BailoutHandler");
737 #endif
738
739 return code;
740 }
741
742 JitCode*
generateVMWrapper(JSContext * cx,const VMFunction & f)743 JitRuntime::generateVMWrapper(JSContext* cx, const VMFunction& f)
744 {
745 MOZ_ASSERT(functionWrappers_);
746 MOZ_ASSERT(functionWrappers_->initialized());
747 VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
748 if (p)
749 return p->value();
750
751 // Generate a separated code for the wrapper.
752 MacroAssembler masm(cx);
753 AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
754
755 // Wrapper register set is a superset of Volatile register set.
756 JS_STATIC_ASSERT((Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0);
757
758 // The context is the first argument; r0 is the first argument register.
759 Register cxreg = r0;
760 regs.take(cxreg);
761
762 // Stack is:
763 // ... frame ...
764 // +8 [args] + argPadding
765 // +0 ExitFrame
766 //
767 // We're aligned to an exit frame, so link it up.
768 // If it isn't a tail call, then the return address needs to be saved
769 if (f.expectTailCall == NonTailCall)
770 masm.pushReturnAddress();
771
772 masm.enterExitFrame(&f);
773 masm.loadJSContext(cxreg);
774
775 // Save the base of the argument set stored on the stack.
776 Register argsBase = InvalidReg;
777 if (f.explicitArgs) {
778 argsBase = r5;
779 regs.take(argsBase);
780 masm.ma_add(sp, Imm32(ExitFrameLayout::SizeWithFooter()), argsBase);
781 }
782
783 // Reserve space for the outparameter.
784 Register outReg = InvalidReg;
785 switch (f.outParam) {
786 case Type_Value:
787 outReg = r4;
788 regs.take(outReg);
789 masm.reserveStack(sizeof(Value));
790 masm.ma_mov(sp, outReg);
791 break;
792
793 case Type_Handle:
794 outReg = r4;
795 regs.take(outReg);
796 masm.PushEmptyRooted(f.outParamRootType);
797 masm.ma_mov(sp, outReg);
798 break;
799
800 case Type_Int32:
801 case Type_Pointer:
802 case Type_Bool:
803 outReg = r4;
804 regs.take(outReg);
805 masm.reserveStack(sizeof(int32_t));
806 masm.ma_mov(sp, outReg);
807 break;
808
809 case Type_Double:
810 outReg = r4;
811 regs.take(outReg);
812 masm.reserveStack(sizeof(double));
813 masm.ma_mov(sp, outReg);
814 break;
815
816 default:
817 MOZ_ASSERT(f.outParam == Type_Void);
818 break;
819 }
820
821 masm.setupUnalignedABICall(regs.getAny());
822 masm.passABIArg(cxreg);
823
824 size_t argDisp = 0;
825
826 // Copy any arguments.
827 for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
828 MoveOperand from;
829 switch (f.argProperties(explicitArg)) {
830 case VMFunction::WordByValue:
831 masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
832 argDisp += sizeof(void*);
833 break;
834 case VMFunction::DoubleByValue:
835 // Values should be passed by reference, not by value, so we assert
836 // that the argument is a double-precision float.
837 MOZ_ASSERT(f.argPassedInFloatReg(explicitArg));
838 masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
839 argDisp += sizeof(double);
840 break;
841 case VMFunction::WordByRef:
842 masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS), MoveOp::GENERAL);
843 argDisp += sizeof(void*);
844 break;
845 case VMFunction::DoubleByRef:
846 masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS), MoveOp::GENERAL);
847 argDisp += 2 * sizeof(void*);
848 break;
849 }
850 }
851
852 // Copy the implicit outparam, if any.
853 if (outReg != InvalidReg)
854 masm.passABIArg(outReg);
855
856 masm.callWithABI(f.wrapped);
857
858 // Test for failure.
859 switch (f.failType()) {
860 case Type_Object:
861 masm.branchTestPtr(Assembler::Zero, r0, r0, masm.failureLabel());
862 break;
863 case Type_Bool:
864 masm.branchIfFalseBool(r0, masm.failureLabel());
865 break;
866 default:
867 MOZ_CRASH("unknown failure kind");
868 }
869
870 // Load the outparam and free any allocated stack.
871 switch (f.outParam) {
872 case Type_Handle:
873 masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
874 break;
875
876 case Type_Value:
877 masm.loadValue(Address(sp, 0), JSReturnOperand);
878 masm.freeStack(sizeof(Value));
879 break;
880
881 case Type_Int32:
882 case Type_Pointer:
883 masm.load32(Address(sp, 0), ReturnReg);
884 masm.freeStack(sizeof(int32_t));
885 break;
886
887 case Type_Bool:
888 masm.load8ZeroExtend(Address(sp, 0), ReturnReg);
889 masm.freeStack(sizeof(int32_t));
890 break;
891
892 case Type_Double:
893 if (cx->runtime()->jitSupportsFloatingPoint)
894 masm.loadDouble(Address(sp, 0), ReturnDoubleReg);
895 else
896 masm.assumeUnreachable("Unable to load into float reg, with no FP support.");
897 masm.freeStack(sizeof(double));
898 break;
899
900 default:
901 MOZ_ASSERT(f.outParam == Type_Void);
902 break;
903 }
904 masm.leaveExitFrame();
905 masm.retn(Imm32(sizeof(ExitFrameLayout) +
906 f.explicitStackSlots() * sizeof(void*) +
907 f.extraValuesToPop * sizeof(Value)));
908
909 Linker linker(masm);
910 AutoFlushICache afc("VMWrapper");
911 JitCode* wrapper = linker.newCode<NoGC>(cx, OTHER_CODE);
912 if (!wrapper)
913 return nullptr;
914
915 // linker.newCode may trigger a GC and sweep functionWrappers_ so we have to
916 // use relookupOrAdd instead of add.
917 if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
918 return nullptr;
919
920 #ifdef JS_ION_PERF
921 writePerfSpewerJitCodeProfile(wrapper, "VMWrapper");
922 #endif
923
924 return wrapper;
925 }
926
927 JitCode*
generatePreBarrier(JSContext * cx,MIRType type)928 JitRuntime::generatePreBarrier(JSContext* cx, MIRType type)
929 {
930 MacroAssembler masm(cx);
931
932 LiveRegisterSet save;
933 if (cx->runtime()->jitSupportsFloatingPoint) {
934 save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
935 FloatRegisterSet(FloatRegisters::VolatileDoubleMask));
936 } else {
937 save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
938 FloatRegisterSet());
939 }
940 save.add(lr);
941 masm.PushRegsInMask(save);
942
943 MOZ_ASSERT(PreBarrierReg == r1);
944 masm.movePtr(ImmPtr(cx->runtime()), r0);
945
946 masm.setupUnalignedABICall(r2);
947 masm.passABIArg(r0);
948 masm.passABIArg(r1);
949 masm.callWithABI(IonMarkFunction(type));
950 save.take(AnyRegister(lr));
951 save.add(pc);
952 masm.PopRegsInMask(save);
953
954 Linker linker(masm);
955 AutoFlushICache afc("PreBarrier");
956 JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
957
958 #ifdef JS_ION_PERF
959 writePerfSpewerJitCodeProfile(code, "PreBarrier");
960 #endif
961
962 return code;
963 }
964
965 typedef bool (*HandleDebugTrapFn)(JSContext*, BaselineFrame*, uint8_t*, bool*);
966 static const VMFunction HandleDebugTrapInfo = FunctionInfo<HandleDebugTrapFn>(HandleDebugTrap);
967
968 JitCode*
generateDebugTrapHandler(JSContext * cx)969 JitRuntime::generateDebugTrapHandler(JSContext* cx)
970 {
971 MacroAssembler masm;
972
973 Register scratch1 = r0;
974 Register scratch2 = r1;
975
976 // Load BaselineFrame pointer in scratch1.
977 masm.mov(r11, scratch1);
978 masm.subPtr(Imm32(BaselineFrame::Size()), scratch1);
979
980 // Enter a stub frame and call the HandleDebugTrap VM function. Ensure the
981 // stub frame has a nullptr ICStub pointer, since this pointer is marked
982 // during GC.
983 masm.movePtr(ImmPtr(nullptr), ICStubReg);
984 EmitBaselineEnterStubFrame(masm, scratch2);
985
986 JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(HandleDebugTrapInfo);
987 if (!code)
988 return nullptr;
989
990 masm.push(lr);
991 masm.push(scratch1);
992 EmitBaselineCallVM(code, masm);
993
994 EmitBaselineLeaveStubFrame(masm);
995
996 // If the stub returns |true|, we have to perform a forced return (return
997 // from the JS frame). If the stub returns |false|, just return from the
998 // trap stub so that execution continues at the current pc.
999 Label forcedReturn;
1000 masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg, &forcedReturn);
1001 masm.mov(lr, pc);
1002
1003 masm.bind(&forcedReturn);
1004 masm.loadValue(Address(r11, BaselineFrame::reverseOffsetOfReturnValue()),
1005 JSReturnOperand);
1006 masm.mov(r11, sp);
1007 masm.pop(r11);
1008
1009 // Before returning, if profiling is turned on, make sure that lastProfilingFrame
1010 // is set to the correct caller frame.
1011 {
1012 Label skipProfilingInstrumentation;
1013 AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
1014 masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skipProfilingInstrumentation);
1015 masm.profilerExitFrame();
1016 masm.bind(&skipProfilingInstrumentation);
1017 }
1018
1019 masm.ret();
1020
1021 Linker linker(masm);
1022 AutoFlushICache afc("DebugTrapHandler");
1023 JitCode* codeDbg = linker.newCode<NoGC>(cx, OTHER_CODE);
1024
1025 #ifdef JS_ION_PERF
1026 writePerfSpewerJitCodeProfile(codeDbg, "DebugTrapHandler");
1027 #endif
1028
1029 return codeDbg;
1030 }
1031
1032 JitCode*
generateExceptionTailStub(JSContext * cx,void * handler)1033 JitRuntime::generateExceptionTailStub(JSContext* cx, void* handler)
1034 {
1035 MacroAssembler masm;
1036
1037 masm.handleFailureWithHandlerTail(handler);
1038
1039 Linker linker(masm);
1040 AutoFlushICache afc("ExceptionTailStub");
1041 JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
1042
1043 #ifdef JS_ION_PERF
1044 writePerfSpewerJitCodeProfile(code, "ExceptionTailStub");
1045 #endif
1046
1047 return code;
1048 }
1049
1050 JitCode*
generateBailoutTailStub(JSContext * cx)1051 JitRuntime::generateBailoutTailStub(JSContext* cx)
1052 {
1053 MacroAssembler masm;
1054
1055 masm.generateBailoutTail(r1, r2);
1056
1057 Linker linker(masm);
1058 AutoFlushICache afc("BailoutTailStub");
1059 JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
1060
1061 #ifdef JS_ION_PERF
1062 writePerfSpewerJitCodeProfile(code, "BailoutTailStub");
1063 #endif
1064
1065 return code;
1066 }
1067
1068 JitCode*
generateProfilerExitFrameTailStub(JSContext * cx)1069 JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
1070 {
1071 MacroAssembler masm;
1072
1073 Register scratch1 = r5;
1074 Register scratch2 = r6;
1075 Register scratch3 = r7;
1076 Register scratch4 = r8;
1077
1078 //
1079 // The code generated below expects that the current stack pointer points
1080 // to an Ion or Baseline frame, at the state it would be immediately
1081 // before a ret(). Thus, after this stub's business is done, it executes
1082 // a ret() and returns directly to the caller script, on behalf of the
1083 // callee script that jumped to this code.
1084 //
1085 // Thus the expected stack is:
1086 //
1087 // StackPointer ----+
1088 // v
1089 // ..., ActualArgc, CalleeToken, Descriptor, ReturnAddr
1090 // MEM-HI MEM-LOW
1091 //
1092 //
1093 // The generated jitcode is responsible for overwriting the
1094 // jitActivation->lastProfilingFrame field with a pointer to the previous
1095 // Ion or Baseline jit-frame that was pushed before this one. It is also
1096 // responsible for overwriting jitActivation->lastProfilingCallSite with
1097 // the return address into that frame. The frame could either be an
1098 // immediate "caller" frame, or it could be a frame in a previous
1099 // JitActivation (if the current frame was entered from C++, and the C++
1100 // was entered by some caller jit-frame further down the stack).
1101 //
1102 // So this jitcode is responsible for "walking up" the jit stack, finding
1103 // the previous Ion or Baseline JS frame, and storing its address and the
1104 // return address into the appropriate fields on the current jitActivation.
1105 //
1106 // There are a fixed number of different path types that can lead to the
1107 // current frame, which is either a baseline or ion frame:
1108 //
1109 // <Baseline-Or-Ion>
1110 // ^
1111 // |
1112 // ^--- Ion
1113 // |
1114 // ^--- Baseline Stub <---- Baseline
1115 // |
1116 // ^--- Argument Rectifier
1117 // | ^
1118 // | |
1119 // | ^--- Ion
1120 // | |
1121 // | ^--- Baseline Stub <---- Baseline
1122 // |
1123 // ^--- Entry Frame (From C++)
1124 //
1125 Register actReg = scratch4;
1126 AbsoluteAddress activationAddr(GetJitContext()->runtime->addressOfProfilingActivation());
1127 masm.loadPtr(activationAddr, actReg);
1128
1129 Address lastProfilingFrame(actReg, JitActivation::offsetOfLastProfilingFrame());
1130 Address lastProfilingCallSite(actReg, JitActivation::offsetOfLastProfilingCallSite());
1131
1132 #ifdef DEBUG
1133 // Ensure that frame we are exiting is current lastProfilingFrame
1134 {
1135 masm.loadPtr(lastProfilingFrame, scratch1);
1136 Label checkOk;
1137 masm.branchPtr(Assembler::Equal, scratch1, ImmWord(0), &checkOk);
1138 masm.branchPtr(Assembler::Equal, StackPointer, scratch1, &checkOk);
1139 masm.assumeUnreachable(
1140 "Mismatch between stored lastProfilingFrame and current stack pointer.");
1141 masm.bind(&checkOk);
1142 }
1143 #endif
1144
1145 // Load the frame descriptor into |scratch1|, figure out what to do depending on its type.
1146 masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfDescriptor()), scratch1);
1147
1148 // Going into the conditionals, we will have:
1149 // FrameDescriptor.size in scratch1
1150 // FrameDescriptor.type in scratch2
1151 masm.ma_and(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1, scratch2);
1152 masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch1);
1153
1154 // Handling of each case is dependent on FrameDescriptor.type
1155 Label handle_IonJS;
1156 Label handle_BaselineStub;
1157 Label handle_Rectifier;
1158 Label handle_IonAccessorIC;
1159 Label handle_Entry;
1160 Label end;
1161
1162 masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonJS), &handle_IonJS);
1163 masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineJS), &handle_IonJS);
1164 masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineStub), &handle_BaselineStub);
1165 masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Rectifier), &handle_Rectifier);
1166 masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonAccessorIC), &handle_IonAccessorIC);
1167 masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Entry), &handle_Entry);
1168
1169 masm.assumeUnreachable("Invalid caller frame type when exiting from Ion frame.");
1170
1171 //
1172 // JitFrame_IonJS
1173 //
1174 // Stack layout:
1175 // ...
1176 // Ion-Descriptor
1177 // Prev-FP ---> Ion-ReturnAddr
1178 // ... previous frame data ... |- Descriptor.Size
1179 // ... arguments ... |
1180 // ActualArgc |
1181 // CalleeToken |- JitFrameLayout::Size()
1182 // Descriptor |
1183 // FP -----> ReturnAddr |
1184 //
1185 masm.bind(&handle_IonJS);
1186 {
1187 // |scratch1| contains Descriptor.size
1188
1189 // returning directly to an IonJS frame. Store return addr to frame
1190 // in lastProfilingCallSite.
1191 masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfReturnAddress()), scratch2);
1192 masm.storePtr(scratch2, lastProfilingCallSite);
1193
1194 // Store return frame in lastProfilingFrame.
1195 // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size();
1196 masm.ma_add(StackPointer, scratch1, scratch2);
1197 masm.ma_add(scratch2, Imm32(JitFrameLayout::Size()), scratch2);
1198 masm.storePtr(scratch2, lastProfilingFrame);
1199 masm.ret();
1200 }
1201
1202 //
1203 // JitFrame_BaselineStub
1204 //
1205 // Look past the stub and store the frame pointer to
1206 // the baselineJS frame prior to it.
1207 //
1208 // Stack layout:
1209 // ...
1210 // BL-Descriptor
1211 // Prev-FP ---> BL-ReturnAddr
1212 // +-----> BL-PrevFramePointer
1213 // | ... BL-FrameData ...
1214 // | BLStub-Descriptor
1215 // | BLStub-ReturnAddr
1216 // | BLStub-StubPointer |
1217 // +------ BLStub-SavedFramePointer |- Descriptor.Size
1218 // ... arguments ... |
1219 // ActualArgc |
1220 // CalleeToken |- JitFrameLayout::Size()
1221 // Descriptor |
1222 // FP -----> ReturnAddr |
1223 //
1224 // We take advantage of the fact that the stub frame saves the frame
1225 // pointer pointing to the baseline frame, so a bunch of calculation can
1226 // be avoided.
1227 //
1228 masm.bind(&handle_BaselineStub);
1229 {
1230 masm.ma_add(StackPointer, scratch1, scratch3);
1231 Address stubFrameReturnAddr(scratch3,
1232 JitFrameLayout::Size() +
1233 BaselineStubFrameLayout::offsetOfReturnAddress());
1234 masm.loadPtr(stubFrameReturnAddr, scratch2);
1235 masm.storePtr(scratch2, lastProfilingCallSite);
1236
1237 Address stubFrameSavedFramePtr(scratch3,
1238 JitFrameLayout::Size() - (2 * sizeof(void*)));
1239 masm.loadPtr(stubFrameSavedFramePtr, scratch2);
1240 masm.addPtr(Imm32(sizeof(void*)), scratch2); // Skip past BL-PrevFramePtr
1241 masm.storePtr(scratch2, lastProfilingFrame);
1242 masm.ret();
1243 }
1244
1245
1246 //
1247 // JitFrame_Rectifier
1248 //
1249 // The rectifier frame can be preceded by either an IonJS or a
1250 // BaselineStub frame.
1251 //
1252 // Stack layout if caller of rectifier was Ion:
1253 //
1254 // Ion-Descriptor
1255 // Ion-ReturnAddr
1256 // ... ion frame data ... |- Rect-Descriptor.Size
1257 // < COMMON LAYOUT >
1258 //
1259 // Stack layout if caller of rectifier was Baseline:
1260 //
1261 // BL-Descriptor
1262 // Prev-FP ---> BL-ReturnAddr
1263 // +-----> BL-SavedFramePointer
1264 // | ... baseline frame data ...
1265 // | BLStub-Descriptor
1266 // | BLStub-ReturnAddr
1267 // | BLStub-StubPointer |
1268 // +------ BLStub-SavedFramePointer |- Rect-Descriptor.Size
1269 // ... args to rectifier ... |
1270 // < COMMON LAYOUT >
1271 //
1272 // Common stack layout:
1273 //
1274 // ActualArgc |
1275 // CalleeToken |- IonRectitiferFrameLayout::Size()
1276 // Rect-Descriptor |
1277 // Rect-ReturnAddr |
1278 // ... rectifier data & args ... |- Descriptor.Size
1279 // ActualArgc |
1280 // CalleeToken |- JitFrameLayout::Size()
1281 // Descriptor |
1282 // FP -----> ReturnAddr |
1283 //
1284 masm.bind(&handle_Rectifier);
1285 {
1286 // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size();
1287 masm.ma_add(StackPointer, scratch1, scratch2);
1288 masm.add32(Imm32(JitFrameLayout::Size()), scratch2);
1289 masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfDescriptor()), scratch3);
1290 masm.ma_lsr(Imm32(FRAMESIZE_SHIFT), scratch3, scratch1);
1291 masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch3);
1292
1293 // Now |scratch1| contains Rect-Descriptor.Size
1294 // and |scratch2| points to Rectifier frame
1295 // and |scratch3| contains Rect-Descriptor.Type
1296
1297 // Check for either Ion or BaselineStub frame.
1298 Label handle_Rectifier_BaselineStub;
1299 masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS),
1300 &handle_Rectifier_BaselineStub);
1301
1302 // Handle Rectifier <- IonJS
1303 // scratch3 := RectFrame[ReturnAddr]
1304 masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfReturnAddress()), scratch3);
1305 masm.storePtr(scratch3, lastProfilingCallSite);
1306
1307 // scratch3 := RectFrame + Rect-Descriptor.Size + RectifierFrameLayout::Size()
1308 masm.ma_add(scratch2, scratch1, scratch3);
1309 masm.add32(Imm32(RectifierFrameLayout::Size()), scratch3);
1310 masm.storePtr(scratch3, lastProfilingFrame);
1311 masm.ret();
1312
1313 // Handle Rectifier <- BaselineStub <- BaselineJS
1314 masm.bind(&handle_Rectifier_BaselineStub);
1315 #ifdef DEBUG
1316 {
1317 Label checkOk;
1318 masm.branch32(Assembler::Equal, scratch3, Imm32(JitFrame_BaselineStub), &checkOk);
1319 masm.assumeUnreachable("Unrecognized frame preceding baselineStub.");
1320 masm.bind(&checkOk);
1321 }
1322 #endif
1323 masm.ma_add(scratch2, scratch1, scratch3);
1324 Address stubFrameReturnAddr(scratch3, RectifierFrameLayout::Size() +
1325 BaselineStubFrameLayout::offsetOfReturnAddress());
1326 masm.loadPtr(stubFrameReturnAddr, scratch2);
1327 masm.storePtr(scratch2, lastProfilingCallSite);
1328
1329 Address stubFrameSavedFramePtr(scratch3,
1330 RectifierFrameLayout::Size() - (2 * sizeof(void*)));
1331 masm.loadPtr(stubFrameSavedFramePtr, scratch2);
1332 masm.addPtr(Imm32(sizeof(void*)), scratch2);
1333 masm.storePtr(scratch2, lastProfilingFrame);
1334 masm.ret();
1335 }
1336
1337 // JitFrame_IonAccessorIC
1338 //
1339 // The caller is always an IonJS frame.
1340 //
1341 // Ion-Descriptor
1342 // Ion-ReturnAddr
1343 // ... ion frame data ... |- AccFrame-Descriptor.Size
1344 // StubCode |
1345 // AccFrame-Descriptor |- IonAccessorICFrameLayout::Size()
1346 // AccFrame-ReturnAddr |
1347 // ... accessor frame data & args ... |- Descriptor.Size
1348 // ActualArgc |
1349 // CalleeToken |- JitFrameLayout::Size()
1350 // Descriptor |
1351 // FP -----> ReturnAddr |
1352 masm.bind(&handle_IonAccessorIC);
1353 {
1354 // scratch2 := StackPointer + Descriptor.size + JitFrameLayout::Size()
1355 masm.ma_add(StackPointer, scratch1, scratch2);
1356 masm.addPtr(Imm32(JitFrameLayout::Size()), scratch2);
1357
1358 // scratch3 := AccFrame-Descriptor.Size
1359 masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfDescriptor()), scratch3);
1360 #ifdef DEBUG
1361 // Assert previous frame is an IonJS frame.
1362 masm.movePtr(scratch3, scratch1);
1363 masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1);
1364 {
1365 Label checkOk;
1366 masm.branch32(Assembler::Equal, scratch1, Imm32(JitFrame_IonJS), &checkOk);
1367 masm.assumeUnreachable("IonAccessorIC frame must be preceded by IonJS frame");
1368 masm.bind(&checkOk);
1369 }
1370 #endif
1371 masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch3);
1372
1373 // lastProfilingCallSite := AccFrame-ReturnAddr
1374 masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfReturnAddress()), scratch1);
1375 masm.storePtr(scratch1, lastProfilingCallSite);
1376
1377 // lastProfilingFrame := AccessorFrame + AccFrame-Descriptor.Size +
1378 // IonAccessorICFrameLayout::Size()
1379 masm.ma_add(scratch2, scratch3, scratch1);
1380 masm.addPtr(Imm32(IonAccessorICFrameLayout::Size()), scratch1);
1381 masm.storePtr(scratch1, lastProfilingFrame);
1382 masm.ret();
1383 }
1384
1385 //
1386 // JitFrame_Entry
1387 //
1388 // If at an entry frame, store null into both fields.
1389 //
1390 masm.bind(&handle_Entry);
1391 {
1392 masm.movePtr(ImmPtr(nullptr), scratch1);
1393 masm.storePtr(scratch1, lastProfilingCallSite);
1394 masm.storePtr(scratch1, lastProfilingFrame);
1395 masm.ret();
1396 }
1397
1398 Linker linker(masm);
1399 AutoFlushICache afc("ProfilerExitFrameTailStub");
1400 JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
1401
1402 #ifdef JS_ION_PERF
1403 writePerfSpewerJitCodeProfile(code, "ProfilerExitFrameStub");
1404 #endif
1405
1406 return code;
1407 }
1408