1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/arm64/MacroAssembler-arm64.h"
8
9 #include "jit/arm64/MoveEmitter-arm64.h"
10 #include "jit/arm64/SharedICRegisters-arm64.h"
11 #include "jit/Bailouts.h"
12 #include "jit/BaselineFrame.h"
13 #include "jit/MacroAssembler.h"
14
15 #include "jit/MacroAssembler-inl.h"
16
17 namespace js {
18 namespace jit {
19
20 void
clampDoubleToUint8(FloatRegister input,Register output)21 MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
22 {
23 ARMRegister dest(output, 32);
24 Fcvtns(dest, ARMFPRegister(input, 64));
25
26 {
27 vixl::UseScratchRegisterScope temps(this);
28 const ARMRegister scratch32 = temps.AcquireW();
29
30 Mov(scratch32, Operand(0xff));
31 Cmp(dest, scratch32);
32 Csel(dest, dest, scratch32, LessThan);
33 }
34
35 Cmp(dest, Operand(0));
36 Csel(dest, dest, wzr, GreaterThan);
37 }
38
39 void
alignFrameForICArguments(MacroAssembler::AfterICSaveLive & aic)40 MacroAssembler::alignFrameForICArguments(MacroAssembler::AfterICSaveLive& aic)
41 {
42 // Exists for MIPS compatibility.
43 }
44
45 void
restoreFrameAlignmentForICArguments(MacroAssembler::AfterICSaveLive & aic)46 MacroAssembler::restoreFrameAlignmentForICArguments(MacroAssembler::AfterICSaveLive& aic)
47 {
48 // Exists for MIPS compatibility.
49 }
50
51 js::jit::MacroAssembler&
asMasm()52 MacroAssemblerCompat::asMasm()
53 {
54 return *static_cast<js::jit::MacroAssembler*>(this);
55 }
56
57 const js::jit::MacroAssembler&
asMasm() const58 MacroAssemblerCompat::asMasm() const
59 {
60 return *static_cast<const js::jit::MacroAssembler*>(this);
61 }
62
63 vixl::MacroAssembler&
asVIXL()64 MacroAssemblerCompat::asVIXL()
65 {
66 return *static_cast<vixl::MacroAssembler*>(this);
67 }
68
69 const vixl::MacroAssembler&
asVIXL() const70 MacroAssemblerCompat::asVIXL() const
71 {
72 return *static_cast<const vixl::MacroAssembler*>(this);
73 }
74
75 BufferOffset
movePatchablePtr(ImmPtr ptr,Register dest)76 MacroAssemblerCompat::movePatchablePtr(ImmPtr ptr, Register dest)
77 {
78 const size_t numInst = 1; // Inserting one load instruction.
79 const unsigned numPoolEntries = 2; // Every pool entry is 4 bytes.
80 uint8_t* literalAddr = (uint8_t*)(&ptr.value); // TODO: Should be const.
81
82 // Scratch space for generating the load instruction.
83 //
84 // allocEntry() will use InsertIndexIntoTag() to store a temporary
85 // index to the corresponding PoolEntry in the instruction itself.
86 //
87 // That index will be fixed up later when finishPool()
88 // walks over all marked loads and calls PatchConstantPoolLoad().
89 uint32_t instructionScratch = 0;
90
91 // Emit the instruction mask in the scratch space.
92 // The offset doesn't matter: it will be fixed up later.
93 vixl::Assembler::ldr((Instruction*)&instructionScratch, ARMRegister(dest, 64), 0);
94
95 // Add the entry to the pool, fix up the LDR imm19 offset,
96 // and add the completed instruction to the buffer.
97 return allocEntry(numInst, numPoolEntries, (uint8_t*)&instructionScratch,
98 literalAddr);
99 }
100
101 BufferOffset
movePatchablePtr(ImmWord ptr,Register dest)102 MacroAssemblerCompat::movePatchablePtr(ImmWord ptr, Register dest)
103 {
104 const size_t numInst = 1; // Inserting one load instruction.
105 const unsigned numPoolEntries = 2; // Every pool entry is 4 bytes.
106 uint8_t* literalAddr = (uint8_t*)(&ptr.value);
107
108 // Scratch space for generating the load instruction.
109 //
110 // allocEntry() will use InsertIndexIntoTag() to store a temporary
111 // index to the corresponding PoolEntry in the instruction itself.
112 //
113 // That index will be fixed up later when finishPool()
114 // walks over all marked loads and calls PatchConstantPoolLoad().
115 uint32_t instructionScratch = 0;
116
117 // Emit the instruction mask in the scratch space.
118 // The offset doesn't matter: it will be fixed up later.
119 vixl::Assembler::ldr((Instruction*)&instructionScratch, ARMRegister(dest, 64), 0);
120
121 // Add the entry to the pool, fix up the LDR imm19 offset,
122 // and add the completed instruction to the buffer.
123 return allocEntry(numInst, numPoolEntries, (uint8_t*)&instructionScratch,
124 literalAddr);
125 }
126
127 void
loadPrivate(const Address & src,Register dest)128 MacroAssemblerCompat::loadPrivate(const Address& src, Register dest)
129 {
130 loadPtr(src, dest);
131 asMasm().lshiftPtr(Imm32(1), dest);
132 }
133
134 void
handleFailureWithHandlerTail(void * handler)135 MacroAssemblerCompat::handleFailureWithHandlerTail(void* handler)
136 {
137 // Reserve space for exception information.
138 int64_t size = (sizeof(ResumeFromException) + 7) & ~7;
139 Sub(GetStackPointer64(), GetStackPointer64(), Operand(size));
140 if (!GetStackPointer64().Is(sp))
141 Mov(sp, GetStackPointer64());
142
143 Mov(x0, GetStackPointer64());
144
145 // Call the handler.
146 asMasm().setupUnalignedABICall(r1);
147 asMasm().passABIArg(r0);
148 asMasm().callWithABI(handler);
149
150 Label entryFrame;
151 Label catch_;
152 Label finally;
153 Label return_;
154 Label bailout;
155
156 MOZ_ASSERT(GetStackPointer64().Is(x28)); // Lets the code below be a little cleaner.
157
158 loadPtr(Address(r28, offsetof(ResumeFromException, kind)), r0);
159 branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME), &entryFrame);
160 branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_CATCH), &catch_);
161 branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FINALLY), &finally);
162 branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_);
163 branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
164
165 breakpoint(); // Invalid kind.
166
167 // No exception handler. Load the error value, load the new stack pointer,
168 // and return from the entry frame.
169 bind(&entryFrame);
170 moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
171 loadPtr(Address(r28, offsetof(ResumeFromException, stackPointer)), r28);
172 retn(Imm32(1 * sizeof(void*))); // Pop from stack and return.
173
174 // If we found a catch handler, this must be a baseline frame. Restore state
175 // and jump to the catch block.
176 bind(&catch_);
177 loadPtr(Address(r28, offsetof(ResumeFromException, target)), r0);
178 loadPtr(Address(r28, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
179 loadPtr(Address(r28, offsetof(ResumeFromException, stackPointer)), r28);
180 syncStackPtr();
181 Br(x0);
182
183 // If we found a finally block, this must be a baseline frame.
184 // Push two values expected by JSOP_RETSUB: BooleanValue(true)
185 // and the exception.
186 bind(&finally);
187 ARMRegister exception = x1;
188 Ldr(exception, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, exception)));
189 Ldr(x0, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, target)));
190 Ldr(ARMRegister(BaselineFrameReg, 64),
191 MemOperand(GetStackPointer64(), offsetof(ResumeFromException, framePointer)));
192 Ldr(GetStackPointer64(), MemOperand(GetStackPointer64(), offsetof(ResumeFromException, stackPointer)));
193 syncStackPtr();
194 pushValue(BooleanValue(true));
195 push(exception);
196 Br(x0);
197
198 // Only used in debug mode. Return BaselineFrame->returnValue() to the caller.
199 bind(&return_);
200 loadPtr(Address(r28, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
201 loadPtr(Address(r28, offsetof(ResumeFromException, stackPointer)), r28);
202 loadValue(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
203 JSReturnOperand);
204 movePtr(BaselineFrameReg, r28);
205 vixl::MacroAssembler::Pop(ARMRegister(BaselineFrameReg, 64), vixl::lr);
206 syncStackPtr();
207 vixl::MacroAssembler::Ret(vixl::lr);
208
209 // If we are bailing out to baseline to handle an exception,
210 // jump to the bailout tail stub.
211 bind(&bailout);
212 Ldr(x2, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, bailoutInfo)));
213 Ldr(x1, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, target)));
214 Mov(x0, BAILOUT_RETURN_OK);
215 Br(x1);
216 }
217
218 void
branchPtrInNurseryRange(Condition cond,Register ptr,Register temp,Label * label)219 MacroAssemblerCompat::branchPtrInNurseryRange(Condition cond, Register ptr, Register temp,
220 Label* label)
221 {
222 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
223 MOZ_ASSERT(ptr != temp);
224 MOZ_ASSERT(ptr != ScratchReg && ptr != ScratchReg2); // Both may be used internally.
225 MOZ_ASSERT(temp != ScratchReg && temp != ScratchReg2);
226
227 const Nursery& nursery = GetJitContext()->runtime->gcNursery();
228 movePtr(ImmWord(-ptrdiff_t(nursery.start())), temp);
229 addPtr(ptr, temp);
230 branchPtr(cond == Assembler::Equal ? Assembler::Below : Assembler::AboveOrEqual,
231 temp, ImmWord(nursery.nurserySize()), label);
232 }
233
234 void
branchValueIsNurseryObject(Condition cond,ValueOperand value,Register temp,Label * label)235 MacroAssemblerCompat::branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp,
236 Label* label)
237 {
238 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
239 MOZ_ASSERT(temp != ScratchReg && temp != ScratchReg2); // Both may be used internally.
240
241 const Nursery& nursery = GetJitContext()->runtime->gcNursery();
242
243 // Avoid creating a bogus ObjectValue below.
244 if (!nursery.exists())
245 return;
246
247 // 'Value' representing the start of the nursery tagged as a JSObject
248 Value start = ObjectValue(*reinterpret_cast<JSObject*>(nursery.start()));
249
250 movePtr(ImmWord(-ptrdiff_t(start.asRawBits())), temp);
251 addPtr(value.valueReg(), temp);
252 branchPtr(cond == Assembler::Equal ? Assembler::Below : Assembler::AboveOrEqual,
253 temp, ImmWord(nursery.nurserySize()), label);
254 }
255
256 void
breakpoint()257 MacroAssemblerCompat::breakpoint()
258 {
259 static int code = 0xA77;
260 Brk((code++) & 0xffff);
261 }
262
263 template<typename T>
264 void
compareExchangeToTypedIntArray(Scalar::Type arrayType,const T & mem,Register oldval,Register newval,Register temp,AnyRegister output)265 MacroAssemblerCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
266 Register oldval, Register newval,
267 Register temp, AnyRegister output)
268 {
269 switch (arrayType) {
270 case Scalar::Int8:
271 compareExchange8SignExtend(mem, oldval, newval, output.gpr());
272 break;
273 case Scalar::Uint8:
274 compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
275 break;
276 case Scalar::Int16:
277 compareExchange16SignExtend(mem, oldval, newval, output.gpr());
278 break;
279 case Scalar::Uint16:
280 compareExchange16ZeroExtend(mem, oldval, newval, output.gpr());
281 break;
282 case Scalar::Int32:
283 compareExchange32(mem, oldval, newval, output.gpr());
284 break;
285 case Scalar::Uint32:
286 // At the moment, the code in MCallOptimize.cpp requires the output
287 // type to be double for uint32 arrays. See bug 1077305.
288 MOZ_ASSERT(output.isFloat());
289 compareExchange32(mem, oldval, newval, temp);
290 convertUInt32ToDouble(temp, output.fpu());
291 break;
292 default:
293 MOZ_CRASH("Invalid typed array type");
294 }
295 }
296
297 template void
298 MacroAssemblerCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
299 Register oldval, Register newval, Register temp,
300 AnyRegister output);
301 template void
302 MacroAssemblerCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
303 Register oldval, Register newval, Register temp,
304 AnyRegister output);
305
306 template<typename T>
307 void
atomicExchangeToTypedIntArray(Scalar::Type arrayType,const T & mem,Register value,Register temp,AnyRegister output)308 MacroAssemblerCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
309 Register value, Register temp, AnyRegister output)
310 {
311 switch (arrayType) {
312 case Scalar::Int8:
313 atomicExchange8SignExtend(mem, value, output.gpr());
314 break;
315 case Scalar::Uint8:
316 atomicExchange8ZeroExtend(mem, value, output.gpr());
317 break;
318 case Scalar::Int16:
319 atomicExchange16SignExtend(mem, value, output.gpr());
320 break;
321 case Scalar::Uint16:
322 atomicExchange16ZeroExtend(mem, value, output.gpr());
323 break;
324 case Scalar::Int32:
325 atomicExchange32(mem, value, output.gpr());
326 break;
327 case Scalar::Uint32:
328 // At the moment, the code in MCallOptimize.cpp requires the output
329 // type to be double for uint32 arrays. See bug 1077305.
330 MOZ_ASSERT(output.isFloat());
331 atomicExchange32(mem, value, temp);
332 convertUInt32ToDouble(temp, output.fpu());
333 break;
334 default:
335 MOZ_CRASH("Invalid typed array type");
336 }
337 }
338
339 template void
340 MacroAssemblerCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
341 Register value, Register temp, AnyRegister output);
342 template void
343 MacroAssemblerCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
344 Register value, Register temp, AnyRegister output);
345
346 //{{{ check_macroassembler_style
347 // ===============================================================
348 // Stack manipulation functions.
349
350 void
PushRegsInMask(LiveRegisterSet set)351 MacroAssembler::PushRegsInMask(LiveRegisterSet set)
352 {
353 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ) {
354 vixl::CPURegister src[4] = { vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg };
355
356 for (size_t i = 0; i < 4 && iter.more(); i++) {
357 src[i] = ARMRegister(*iter, 64);
358 ++iter;
359 adjustFrame(8);
360 }
361 vixl::MacroAssembler::Push(src[0], src[1], src[2], src[3]);
362 }
363
364 for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ) {
365 vixl::CPURegister src[4] = { vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg };
366
367 for (size_t i = 0; i < 4 && iter.more(); i++) {
368 src[i] = ARMFPRegister(*iter, 64);
369 ++iter;
370 adjustFrame(8);
371 }
372 vixl::MacroAssembler::Push(src[0], src[1], src[2], src[3]);
373 }
374 }
375
376 void
PopRegsInMaskIgnore(LiveRegisterSet set,LiveRegisterSet ignore)377 MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
378 {
379 // The offset of the data from the stack pointer.
380 uint32_t offset = 0;
381
382 for (FloatRegisterIterator iter(set.fpus().reduceSetForPush()); iter.more(); ) {
383 vixl::CPURegister dest[2] = { vixl::NoCPUReg, vixl::NoCPUReg };
384 uint32_t nextOffset = offset;
385
386 for (size_t i = 0; i < 2 && iter.more(); i++) {
387 if (!ignore.has(*iter))
388 dest[i] = ARMFPRegister(*iter, 64);
389 ++iter;
390 nextOffset += sizeof(double);
391 }
392
393 if (!dest[0].IsNone() && !dest[1].IsNone())
394 Ldp(dest[0], dest[1], MemOperand(GetStackPointer64(), offset));
395 else if (!dest[0].IsNone())
396 Ldr(dest[0], MemOperand(GetStackPointer64(), offset));
397 else if (!dest[1].IsNone())
398 Ldr(dest[1], MemOperand(GetStackPointer64(), offset + sizeof(double)));
399
400 offset = nextOffset;
401 }
402
403 MOZ_ASSERT(offset == set.fpus().getPushSizeInBytes());
404
405 for (GeneralRegisterIterator iter(set.gprs()); iter.more(); ) {
406 vixl::CPURegister dest[2] = { vixl::NoCPUReg, vixl::NoCPUReg };
407 uint32_t nextOffset = offset;
408
409 for (size_t i = 0; i < 2 && iter.more(); i++) {
410 if (!ignore.has(*iter))
411 dest[i] = ARMRegister(*iter, 64);
412 ++iter;
413 nextOffset += sizeof(uint64_t);
414 }
415
416 if (!dest[0].IsNone() && !dest[1].IsNone())
417 Ldp(dest[0], dest[1], MemOperand(GetStackPointer64(), offset));
418 else if (!dest[0].IsNone())
419 Ldr(dest[0], MemOperand(GetStackPointer64(), offset));
420 else if (!dest[1].IsNone())
421 Ldr(dest[1], MemOperand(GetStackPointer64(), offset + sizeof(uint64_t)));
422
423 offset = nextOffset;
424 }
425
426 size_t bytesPushed = set.gprs().size() * sizeof(uint64_t) + set.fpus().getPushSizeInBytes();
427 MOZ_ASSERT(offset == bytesPushed);
428 freeStack(bytesPushed);
429 }
430
431 void
Push(Register reg)432 MacroAssembler::Push(Register reg)
433 {
434 push(reg);
435 adjustFrame(sizeof(intptr_t));
436 }
437
438 void
Push(Register reg1,Register reg2,Register reg3,Register reg4)439 MacroAssembler::Push(Register reg1, Register reg2, Register reg3, Register reg4)
440 {
441 push(reg1, reg2, reg3, reg4);
442 adjustFrame(4 * sizeof(intptr_t));
443 }
444
445 void
Push(const Imm32 imm)446 MacroAssembler::Push(const Imm32 imm)
447 {
448 push(imm);
449 adjustFrame(sizeof(intptr_t));
450 }
451
452 void
Push(const ImmWord imm)453 MacroAssembler::Push(const ImmWord imm)
454 {
455 push(imm);
456 adjustFrame(sizeof(intptr_t));
457 }
458
459 void
Push(const ImmPtr imm)460 MacroAssembler::Push(const ImmPtr imm)
461 {
462 push(imm);
463 adjustFrame(sizeof(intptr_t));
464 }
465
466 void
Push(const ImmGCPtr ptr)467 MacroAssembler::Push(const ImmGCPtr ptr)
468 {
469 push(ptr);
470 adjustFrame(sizeof(intptr_t));
471 }
472
473 void
Push(FloatRegister f)474 MacroAssembler::Push(FloatRegister f)
475 {
476 push(f);
477 adjustFrame(sizeof(double));
478 }
479
480 void
Pop(Register reg)481 MacroAssembler::Pop(Register reg)
482 {
483 pop(reg);
484 adjustFrame(-1 * int64_t(sizeof(int64_t)));
485 }
486
487 void
Pop(const ValueOperand & val)488 MacroAssembler::Pop(const ValueOperand& val)
489 {
490 pop(val);
491 adjustFrame(-1 * int64_t(sizeof(int64_t)));
492 }
493
494 void
reserveStack(uint32_t amount)495 MacroAssembler::reserveStack(uint32_t amount)
496 {
497 // TODO: This bumps |sp| every time we reserve using a second register.
498 // It would save some instructions if we had a fixed frame size.
499 vixl::MacroAssembler::Claim(Operand(amount));
500 adjustFrame(amount);
501 }
502
503 // ===============================================================
504 // Simple call functions.
505
506 CodeOffset
call(Register reg)507 MacroAssembler::call(Register reg)
508 {
509 syncStackPtr();
510 Blr(ARMRegister(reg, 64));
511 return CodeOffset(currentOffset());
512 }
513
514 CodeOffset
call(Label * label)515 MacroAssembler::call(Label* label)
516 {
517 syncStackPtr();
518 Bl(label);
519 return CodeOffset(currentOffset());
520 }
521
522 void
call(ImmWord imm)523 MacroAssembler::call(ImmWord imm)
524 {
525 call(ImmPtr((void*)imm.value));
526 }
527
528 void
call(ImmPtr imm)529 MacroAssembler::call(ImmPtr imm)
530 {
531 syncStackPtr();
532 movePtr(imm, ip0);
533 Blr(vixl::ip0);
534 }
535
536 void
call(wasm::SymbolicAddress imm)537 MacroAssembler::call(wasm::SymbolicAddress imm)
538 {
539 vixl::UseScratchRegisterScope temps(this);
540 const Register scratch = temps.AcquireX().asUnsized();
541 syncStackPtr();
542 movePtr(imm, scratch);
543 call(scratch);
544 }
545
546 void
call(JitCode * c)547 MacroAssembler::call(JitCode* c)
548 {
549 vixl::UseScratchRegisterScope temps(this);
550 const ARMRegister scratch64 = temps.AcquireX();
551 syncStackPtr();
552 BufferOffset off = immPool64(scratch64, uint64_t(c->raw()));
553 addPendingJump(off, ImmPtr(c->raw()), Relocation::JITCODE);
554 blr(scratch64);
555 }
556
557 CodeOffset
callWithPatch()558 MacroAssembler::callWithPatch()
559 {
560 MOZ_CRASH("NYI");
561 return CodeOffset();
562 }
563 void
patchCall(uint32_t callerOffset,uint32_t calleeOffset)564 MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset)
565 {
566 MOZ_CRASH("NYI");
567 }
568
569 void
pushReturnAddress()570 MacroAssembler::pushReturnAddress()
571 {
572 push(lr);
573 }
574
575 // ===============================================================
576 // ABI function calls.
577
578 void
setupUnalignedABICall(Register scratch)579 MacroAssembler::setupUnalignedABICall(Register scratch)
580 {
581 setupABICall();
582 dynamicAlignment_ = true;
583
584 int64_t alignment = ~(int64_t(ABIStackAlignment) - 1);
585 ARMRegister scratch64(scratch, 64);
586
587 // Always save LR -- Baseline ICs assume that LR isn't modified.
588 push(lr);
589
590 // Unhandled for sp -- needs slightly different logic.
591 MOZ_ASSERT(!GetStackPointer64().Is(sp));
592
593 // Remember the stack address on entry.
594 Mov(scratch64, GetStackPointer64());
595
596 // Make alignment, including the effective push of the previous sp.
597 Sub(GetStackPointer64(), GetStackPointer64(), Operand(8));
598 And(GetStackPointer64(), GetStackPointer64(), Operand(alignment));
599
600 // If the PseudoStackPointer is used, sp must be <= psp before a write is valid.
601 syncStackPtr();
602
603 // Store previous sp to the top of the stack, aligned.
604 Str(scratch64, MemOperand(GetStackPointer64(), 0));
605 }
606
607 void
callWithABIPre(uint32_t * stackAdjust,bool callFromAsmJS)608 MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS)
609 {
610 MOZ_ASSERT(inCall_);
611 uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
612
613 // ARM64 /really/ wants the stack to always be aligned. Since we're already tracking it
614 // getting it aligned for an abi call is pretty easy.
615 MOZ_ASSERT(dynamicAlignment_);
616 stackForCall += ComputeByteAlignment(stackForCall, StackAlignment);
617 *stackAdjust = stackForCall;
618 reserveStack(*stackAdjust);
619 {
620 moveResolver_.resolve();
621 MoveEmitter emitter(*this);
622 emitter.emit(moveResolver_);
623 emitter.finish();
624 }
625
626 // Call boundaries communicate stack via sp.
627 syncStackPtr();
628 }
629
630 void
callWithABIPost(uint32_t stackAdjust,MoveOp::Type result)631 MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
632 {
633 // Call boundaries communicate stack via sp.
634 if (!GetStackPointer64().Is(sp))
635 Mov(GetStackPointer64(), sp);
636
637 freeStack(stackAdjust);
638
639 // Restore the stack pointer from entry.
640 if (dynamicAlignment_)
641 Ldr(GetStackPointer64(), MemOperand(GetStackPointer64(), 0));
642
643 // Restore LR.
644 pop(lr);
645
646 // TODO: This one shouldn't be necessary -- check that callers
647 // aren't enforcing the ABI themselves!
648 syncStackPtr();
649
650 // If the ABI's return regs are where ION is expecting them, then
651 // no other work needs to be done.
652
653 #ifdef DEBUG
654 MOZ_ASSERT(inCall_);
655 inCall_ = false;
656 #endif
657 }
658
659 void
callWithABINoProfiler(Register fun,MoveOp::Type result)660 MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
661 {
662 vixl::UseScratchRegisterScope temps(this);
663 const Register scratch = temps.AcquireX().asUnsized();
664 movePtr(fun, scratch);
665
666 uint32_t stackAdjust;
667 callWithABIPre(&stackAdjust);
668 call(scratch);
669 callWithABIPost(stackAdjust, result);
670 }
671
672 void
callWithABINoProfiler(const Address & fun,MoveOp::Type result)673 MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result)
674 {
675 vixl::UseScratchRegisterScope temps(this);
676 const Register scratch = temps.AcquireX().asUnsized();
677 loadPtr(fun, scratch);
678
679 uint32_t stackAdjust;
680 callWithABIPre(&stackAdjust);
681 call(scratch);
682 callWithABIPost(stackAdjust, result);
683 }
684
685 // ===============================================================
686 // Jit Frames.
687
688 uint32_t
pushFakeReturnAddress(Register scratch)689 MacroAssembler::pushFakeReturnAddress(Register scratch)
690 {
691 enterNoPool(3);
692 Label fakeCallsite;
693
694 Adr(ARMRegister(scratch, 64), &fakeCallsite);
695 Push(scratch);
696 bind(&fakeCallsite);
697 uint32_t pseudoReturnOffset = currentOffset();
698
699 leaveNoPool();
700 return pseudoReturnOffset;
701 }
702
703 //}}} check_macroassembler_style
704
705 } // namespace jit
706 } // namespace js
707