1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/arm64/MacroAssembler-arm64.h"
8 
9 #include "jit/arm64/MoveEmitter-arm64.h"
10 #include "jit/arm64/SharedICRegisters-arm64.h"
11 #include "jit/Bailouts.h"
12 #include "jit/BaselineFrame.h"
13 #include "jit/MacroAssembler.h"
14 
15 #include "jit/MacroAssembler-inl.h"
16 
17 namespace js {
18 namespace jit {
19 
20 void
clampDoubleToUint8(FloatRegister input,Register output)21 MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
22 {
23     ARMRegister dest(output, 32);
24     Fcvtns(dest, ARMFPRegister(input, 64));
25 
26     {
27         vixl::UseScratchRegisterScope temps(this);
28         const ARMRegister scratch32 = temps.AcquireW();
29 
30         Mov(scratch32, Operand(0xff));
31         Cmp(dest, scratch32);
32         Csel(dest, dest, scratch32, LessThan);
33     }
34 
35     Cmp(dest, Operand(0));
36     Csel(dest, dest, wzr, GreaterThan);
37 }
38 
39 void
alignFrameForICArguments(MacroAssembler::AfterICSaveLive & aic)40 MacroAssembler::alignFrameForICArguments(MacroAssembler::AfterICSaveLive& aic)
41 {
42     // Exists for MIPS compatibility.
43 }
44 
45 void
restoreFrameAlignmentForICArguments(MacroAssembler::AfterICSaveLive & aic)46 MacroAssembler::restoreFrameAlignmentForICArguments(MacroAssembler::AfterICSaveLive& aic)
47 {
48     // Exists for MIPS compatibility.
49 }
50 
51 js::jit::MacroAssembler&
asMasm()52 MacroAssemblerCompat::asMasm()
53 {
54     return *static_cast<js::jit::MacroAssembler*>(this);
55 }
56 
57 const js::jit::MacroAssembler&
asMasm() const58 MacroAssemblerCompat::asMasm() const
59 {
60     return *static_cast<const js::jit::MacroAssembler*>(this);
61 }
62 
63 vixl::MacroAssembler&
asVIXL()64 MacroAssemblerCompat::asVIXL()
65 {
66     return *static_cast<vixl::MacroAssembler*>(this);
67 }
68 
69 const vixl::MacroAssembler&
asVIXL() const70 MacroAssemblerCompat::asVIXL() const
71 {
72     return *static_cast<const vixl::MacroAssembler*>(this);
73 }
74 
75 BufferOffset
movePatchablePtr(ImmPtr ptr,Register dest)76 MacroAssemblerCompat::movePatchablePtr(ImmPtr ptr, Register dest)
77 {
78     const size_t numInst = 1; // Inserting one load instruction.
79     const unsigned numPoolEntries = 2; // Every pool entry is 4 bytes.
80     uint8_t* literalAddr = (uint8_t*)(&ptr.value); // TODO: Should be const.
81 
82     // Scratch space for generating the load instruction.
83     //
84     // allocEntry() will use InsertIndexIntoTag() to store a temporary
85     // index to the corresponding PoolEntry in the instruction itself.
86     //
87     // That index will be fixed up later when finishPool()
88     // walks over all marked loads and calls PatchConstantPoolLoad().
89     uint32_t instructionScratch = 0;
90 
91     // Emit the instruction mask in the scratch space.
92     // The offset doesn't matter: it will be fixed up later.
93     vixl::Assembler::ldr((Instruction*)&instructionScratch, ARMRegister(dest, 64), 0);
94 
95     // Add the entry to the pool, fix up the LDR imm19 offset,
96     // and add the completed instruction to the buffer.
97     return allocEntry(numInst, numPoolEntries, (uint8_t*)&instructionScratch,
98                       literalAddr);
99 }
100 
101 BufferOffset
movePatchablePtr(ImmWord ptr,Register dest)102 MacroAssemblerCompat::movePatchablePtr(ImmWord ptr, Register dest)
103 {
104     const size_t numInst = 1; // Inserting one load instruction.
105     const unsigned numPoolEntries = 2; // Every pool entry is 4 bytes.
106     uint8_t* literalAddr = (uint8_t*)(&ptr.value);
107 
108     // Scratch space for generating the load instruction.
109     //
110     // allocEntry() will use InsertIndexIntoTag() to store a temporary
111     // index to the corresponding PoolEntry in the instruction itself.
112     //
113     // That index will be fixed up later when finishPool()
114     // walks over all marked loads and calls PatchConstantPoolLoad().
115     uint32_t instructionScratch = 0;
116 
117     // Emit the instruction mask in the scratch space.
118     // The offset doesn't matter: it will be fixed up later.
119     vixl::Assembler::ldr((Instruction*)&instructionScratch, ARMRegister(dest, 64), 0);
120 
121     // Add the entry to the pool, fix up the LDR imm19 offset,
122     // and add the completed instruction to the buffer.
123     return allocEntry(numInst, numPoolEntries, (uint8_t*)&instructionScratch,
124                       literalAddr);
125 }
126 
127 void
loadPrivate(const Address & src,Register dest)128 MacroAssemblerCompat::loadPrivate(const Address& src, Register dest)
129 {
130     loadPtr(src, dest);
131     asMasm().lshiftPtr(Imm32(1), dest);
132 }
133 
134 void
handleFailureWithHandlerTail(void * handler)135 MacroAssemblerCompat::handleFailureWithHandlerTail(void* handler)
136 {
137     // Reserve space for exception information.
138     int64_t size = (sizeof(ResumeFromException) + 7) & ~7;
139     Sub(GetStackPointer64(), GetStackPointer64(), Operand(size));
140     if (!GetStackPointer64().Is(sp))
141         Mov(sp, GetStackPointer64());
142 
143     Mov(x0, GetStackPointer64());
144 
145     // Call the handler.
146     asMasm().setupUnalignedABICall(r1);
147     asMasm().passABIArg(r0);
148     asMasm().callWithABI(handler);
149 
150     Label entryFrame;
151     Label catch_;
152     Label finally;
153     Label return_;
154     Label bailout;
155 
156     MOZ_ASSERT(GetStackPointer64().Is(x28)); // Lets the code below be a little cleaner.
157 
158     loadPtr(Address(r28, offsetof(ResumeFromException, kind)), r0);
159     asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME),
160                       &entryFrame);
161     asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_CATCH), &catch_);
162     asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FINALLY), &finally);
163     asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FORCED_RETURN),
164                       &return_);
165     asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
166 
167     breakpoint(); // Invalid kind.
168 
169     // No exception handler. Load the error value, load the new stack pointer,
170     // and return from the entry frame.
171     bind(&entryFrame);
172     moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
173     loadPtr(Address(r28, offsetof(ResumeFromException, stackPointer)), r28);
174     retn(Imm32(1 * sizeof(void*))); // Pop from stack and return.
175 
176     // If we found a catch handler, this must be a baseline frame. Restore state
177     // and jump to the catch block.
178     bind(&catch_);
179     loadPtr(Address(r28, offsetof(ResumeFromException, target)), r0);
180     loadPtr(Address(r28, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
181     loadPtr(Address(r28, offsetof(ResumeFromException, stackPointer)), r28);
182     syncStackPtr();
183     Br(x0);
184 
185     // If we found a finally block, this must be a baseline frame.
186     // Push two values expected by JSOP_RETSUB: BooleanValue(true)
187     // and the exception.
188     bind(&finally);
189     ARMRegister exception = x1;
190     Ldr(exception, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, exception)));
191     Ldr(x0, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, target)));
192     Ldr(ARMRegister(BaselineFrameReg, 64),
193         MemOperand(GetStackPointer64(), offsetof(ResumeFromException, framePointer)));
194     Ldr(GetStackPointer64(), MemOperand(GetStackPointer64(), offsetof(ResumeFromException, stackPointer)));
195     syncStackPtr();
196     pushValue(BooleanValue(true));
197     push(exception);
198     Br(x0);
199 
200     // Only used in debug mode. Return BaselineFrame->returnValue() to the caller.
201     bind(&return_);
202     loadPtr(Address(r28, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
203     loadPtr(Address(r28, offsetof(ResumeFromException, stackPointer)), r28);
204     loadValue(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
205               JSReturnOperand);
206     movePtr(BaselineFrameReg, r28);
207     vixl::MacroAssembler::Pop(ARMRegister(BaselineFrameReg, 64), vixl::lr);
208     syncStackPtr();
209     vixl::MacroAssembler::Ret(vixl::lr);
210 
211     // If we are bailing out to baseline to handle an exception,
212     // jump to the bailout tail stub.
213     bind(&bailout);
214     Ldr(x2, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, bailoutInfo)));
215     Ldr(x1, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, target)));
216     Mov(x0, BAILOUT_RETURN_OK);
217     Br(x1);
218 }
219 
220 void
breakpoint()221 MacroAssemblerCompat::breakpoint()
222 {
223     static int code = 0xA77;
224     Brk((code++) & 0xffff);
225 }
226 
227 template<typename T>
228 void
compareExchangeToTypedIntArray(Scalar::Type arrayType,const T & mem,Register oldval,Register newval,Register temp,AnyRegister output)229 MacroAssemblerCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
230                                                      Register oldval, Register newval,
231                                                      Register temp, AnyRegister output)
232 {
233     switch (arrayType) {
234       case Scalar::Int8:
235         compareExchange8SignExtend(mem, oldval, newval, output.gpr());
236         break;
237       case Scalar::Uint8:
238         compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
239         break;
240       case Scalar::Int16:
241         compareExchange16SignExtend(mem, oldval, newval, output.gpr());
242         break;
243       case Scalar::Uint16:
244         compareExchange16ZeroExtend(mem, oldval, newval, output.gpr());
245         break;
246       case Scalar::Int32:
247         compareExchange32(mem, oldval, newval, output.gpr());
248         break;
249       case Scalar::Uint32:
250         // At the moment, the code in MCallOptimize.cpp requires the output
251         // type to be double for uint32 arrays.  See bug 1077305.
252         MOZ_ASSERT(output.isFloat());
253         compareExchange32(mem, oldval, newval, temp);
254         convertUInt32ToDouble(temp, output.fpu());
255         break;
256       default:
257         MOZ_CRASH("Invalid typed array type");
258     }
259 }
260 
261 template void
262 MacroAssemblerCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
263                                                      Register oldval, Register newval, Register temp,
264                                                      AnyRegister output);
265 template void
266 MacroAssemblerCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
267                                                      Register oldval, Register newval, Register temp,
268                                                      AnyRegister output);
269 
270 template<typename T>
271 void
atomicExchangeToTypedIntArray(Scalar::Type arrayType,const T & mem,Register value,Register temp,AnyRegister output)272 MacroAssemblerCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
273                                                     Register value, Register temp, AnyRegister output)
274 {
275     switch (arrayType) {
276       case Scalar::Int8:
277         atomicExchange8SignExtend(mem, value, output.gpr());
278         break;
279       case Scalar::Uint8:
280         atomicExchange8ZeroExtend(mem, value, output.gpr());
281         break;
282       case Scalar::Int16:
283         atomicExchange16SignExtend(mem, value, output.gpr());
284         break;
285       case Scalar::Uint16:
286         atomicExchange16ZeroExtend(mem, value, output.gpr());
287         break;
288       case Scalar::Int32:
289         atomicExchange32(mem, value, output.gpr());
290         break;
291       case Scalar::Uint32:
292         // At the moment, the code in MCallOptimize.cpp requires the output
293         // type to be double for uint32 arrays.  See bug 1077305.
294         MOZ_ASSERT(output.isFloat());
295         atomicExchange32(mem, value, temp);
296         convertUInt32ToDouble(temp, output.fpu());
297         break;
298       default:
299         MOZ_CRASH("Invalid typed array type");
300     }
301 }
302 
303 template void
304 MacroAssemblerCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
305                                                     Register value, Register temp, AnyRegister output);
306 template void
307 MacroAssemblerCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
308                                                     Register value, Register temp, AnyRegister output);
309 
310 void
reserveStack(uint32_t amount)311 MacroAssembler::reserveStack(uint32_t amount)
312 {
313     // TODO: This bumps |sp| every time we reserve using a second register.
314     // It would save some instructions if we had a fixed frame size.
315     vixl::MacroAssembler::Claim(Operand(amount));
316     adjustFrame(amount);
317 }
318 
319 //{{{ check_macroassembler_style
320 // ===============================================================
321 // MacroAssembler high-level usage.
322 
323 void
flush()324 MacroAssembler::flush()
325 {
326     Assembler::flush();
327 }
328 
329 // ===============================================================
330 // Stack manipulation functions.
331 
332 void
PushRegsInMask(LiveRegisterSet set)333 MacroAssembler::PushRegsInMask(LiveRegisterSet set)
334 {
335     for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ) {
336         vixl::CPURegister src[4] = { vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg };
337 
338         for (size_t i = 0; i < 4 && iter.more(); i++) {
339             src[i] = ARMRegister(*iter, 64);
340             ++iter;
341             adjustFrame(8);
342         }
343         vixl::MacroAssembler::Push(src[0], src[1], src[2], src[3]);
344     }
345 
346     for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ) {
347         vixl::CPURegister src[4] = { vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg };
348 
349         for (size_t i = 0; i < 4 && iter.more(); i++) {
350             src[i] = ARMFPRegister(*iter, 64);
351             ++iter;
352             adjustFrame(8);
353         }
354         vixl::MacroAssembler::Push(src[0], src[1], src[2], src[3]);
355     }
356 }
357 
358 void
PopRegsInMaskIgnore(LiveRegisterSet set,LiveRegisterSet ignore)359 MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
360 {
361     // The offset of the data from the stack pointer.
362     uint32_t offset = 0;
363 
364     for (FloatRegisterIterator iter(set.fpus().reduceSetForPush()); iter.more(); ) {
365         vixl::CPURegister dest[2] = { vixl::NoCPUReg, vixl::NoCPUReg };
366         uint32_t nextOffset = offset;
367 
368         for (size_t i = 0; i < 2 && iter.more(); i++) {
369             if (!ignore.has(*iter))
370                 dest[i] = ARMFPRegister(*iter, 64);
371             ++iter;
372             nextOffset += sizeof(double);
373         }
374 
375         if (!dest[0].IsNone() && !dest[1].IsNone())
376             Ldp(dest[0], dest[1], MemOperand(GetStackPointer64(), offset));
377         else if (!dest[0].IsNone())
378             Ldr(dest[0], MemOperand(GetStackPointer64(), offset));
379         else if (!dest[1].IsNone())
380             Ldr(dest[1], MemOperand(GetStackPointer64(), offset + sizeof(double)));
381 
382         offset = nextOffset;
383     }
384 
385     MOZ_ASSERT(offset == set.fpus().getPushSizeInBytes());
386 
387     for (GeneralRegisterIterator iter(set.gprs()); iter.more(); ) {
388         vixl::CPURegister dest[2] = { vixl::NoCPUReg, vixl::NoCPUReg };
389         uint32_t nextOffset = offset;
390 
391         for (size_t i = 0; i < 2 && iter.more(); i++) {
392             if (!ignore.has(*iter))
393                 dest[i] = ARMRegister(*iter, 64);
394             ++iter;
395             nextOffset += sizeof(uint64_t);
396         }
397 
398         if (!dest[0].IsNone() && !dest[1].IsNone())
399             Ldp(dest[0], dest[1], MemOperand(GetStackPointer64(), offset));
400         else if (!dest[0].IsNone())
401             Ldr(dest[0], MemOperand(GetStackPointer64(), offset));
402         else if (!dest[1].IsNone())
403             Ldr(dest[1], MemOperand(GetStackPointer64(), offset + sizeof(uint64_t)));
404 
405         offset = nextOffset;
406     }
407 
408     size_t bytesPushed = set.gprs().size() * sizeof(uint64_t) + set.fpus().getPushSizeInBytes();
409     MOZ_ASSERT(offset == bytesPushed);
410     freeStack(bytesPushed);
411 }
412 
413 void
Push(Register reg)414 MacroAssembler::Push(Register reg)
415 {
416     push(reg);
417     adjustFrame(sizeof(intptr_t));
418 }
419 
420 void
Push(Register reg1,Register reg2,Register reg3,Register reg4)421 MacroAssembler::Push(Register reg1, Register reg2, Register reg3, Register reg4)
422 {
423     push(reg1, reg2, reg3, reg4);
424     adjustFrame(4 * sizeof(intptr_t));
425 }
426 
427 void
Push(const Imm32 imm)428 MacroAssembler::Push(const Imm32 imm)
429 {
430     push(imm);
431     adjustFrame(sizeof(intptr_t));
432 }
433 
434 void
Push(const ImmWord imm)435 MacroAssembler::Push(const ImmWord imm)
436 {
437     push(imm);
438     adjustFrame(sizeof(intptr_t));
439 }
440 
441 void
Push(const ImmPtr imm)442 MacroAssembler::Push(const ImmPtr imm)
443 {
444     push(imm);
445     adjustFrame(sizeof(intptr_t));
446 }
447 
448 void
Push(const ImmGCPtr ptr)449 MacroAssembler::Push(const ImmGCPtr ptr)
450 {
451     push(ptr);
452     adjustFrame(sizeof(intptr_t));
453 }
454 
455 void
Push(FloatRegister f)456 MacroAssembler::Push(FloatRegister f)
457 {
458     push(f);
459     adjustFrame(sizeof(double));
460 }
461 
462 void
Pop(Register reg)463 MacroAssembler::Pop(Register reg)
464 {
465     pop(reg);
466     adjustFrame(-1 * int64_t(sizeof(int64_t)));
467 }
468 
469 void
Pop(FloatRegister f)470 MacroAssembler::Pop(FloatRegister f)
471 {
472     MOZ_CRASH("NYI: Pop(FloatRegister)");
473 }
474 
475 void
Pop(const ValueOperand & val)476 MacroAssembler::Pop(const ValueOperand& val)
477 {
478     pop(val);
479     adjustFrame(-1 * int64_t(sizeof(int64_t)));
480 }
481 
482 // ===============================================================
483 // Simple call functions.
484 
485 CodeOffset
call(Register reg)486 MacroAssembler::call(Register reg)
487 {
488     syncStackPtr();
489     Blr(ARMRegister(reg, 64));
490     return CodeOffset(currentOffset());
491 }
492 
493 CodeOffset
call(Label * label)494 MacroAssembler::call(Label* label)
495 {
496     syncStackPtr();
497     Bl(label);
498     return CodeOffset(currentOffset());
499 }
500 
501 void
call(ImmWord imm)502 MacroAssembler::call(ImmWord imm)
503 {
504     call(ImmPtr((void*)imm.value));
505 }
506 
507 void
call(ImmPtr imm)508 MacroAssembler::call(ImmPtr imm)
509 {
510     syncStackPtr();
511     movePtr(imm, ip0);
512     Blr(vixl::ip0);
513 }
514 
515 void
call(wasm::SymbolicAddress imm)516 MacroAssembler::call(wasm::SymbolicAddress imm)
517 {
518     vixl::UseScratchRegisterScope temps(this);
519     const Register scratch = temps.AcquireX().asUnsized();
520     syncStackPtr();
521     movePtr(imm, scratch);
522     call(scratch);
523 }
524 
525 void
call(JitCode * c)526 MacroAssembler::call(JitCode* c)
527 {
528     vixl::UseScratchRegisterScope temps(this);
529     const ARMRegister scratch64 = temps.AcquireX();
530     syncStackPtr();
531     BufferOffset off = immPool64(scratch64, uint64_t(c->raw()));
532     addPendingJump(off, ImmPtr(c->raw()), Relocation::JITCODE);
533     blr(scratch64);
534 }
535 
536 CodeOffset
callWithPatch()537 MacroAssembler::callWithPatch()
538 {
539     MOZ_CRASH("NYI");
540     return CodeOffset();
541 }
542 void
patchCall(uint32_t callerOffset,uint32_t calleeOffset)543 MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset)
544 {
545     MOZ_CRASH("NYI");
546 }
547 
548 CodeOffset
farJumpWithPatch()549 MacroAssembler::farJumpWithPatch()
550 {
551     MOZ_CRASH("NYI");
552 }
553 
554 void
patchFarJump(CodeOffset farJump,uint32_t targetOffset)555 MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset)
556 {
557     MOZ_CRASH("NYI");
558 }
559 
560 void
repatchFarJump(uint8_t * code,uint32_t farJumpOffset,uint32_t targetOffset)561 MacroAssembler::repatchFarJump(uint8_t* code, uint32_t farJumpOffset, uint32_t targetOffset)
562 {
563     MOZ_CRASH("NYI");
564 }
565 
566 CodeOffset
nopPatchableToNearJump()567 MacroAssembler::nopPatchableToNearJump()
568 {
569     MOZ_CRASH("NYI");
570 }
571 
572 void
patchNopToNearJump(uint8_t * jump,uint8_t * target)573 MacroAssembler::patchNopToNearJump(uint8_t* jump, uint8_t* target)
574 {
575     MOZ_CRASH("NYI");
576 }
577 
578 void
patchNearJumpToNop(uint8_t * jump)579 MacroAssembler::patchNearJumpToNop(uint8_t* jump)
580 {
581     MOZ_CRASH("NYI");
582 }
583 
584 void
pushReturnAddress()585 MacroAssembler::pushReturnAddress()
586 {
587     push(lr);
588 }
589 
590 void
popReturnAddress()591 MacroAssembler::popReturnAddress()
592 {
593     pop(lr);
594 }
595 
596 // ===============================================================
597 // ABI function calls.
598 
599 void
setupUnalignedABICall(Register scratch)600 MacroAssembler::setupUnalignedABICall(Register scratch)
601 {
602     setupABICall();
603     dynamicAlignment_ = true;
604 
605     int64_t alignment = ~(int64_t(ABIStackAlignment) - 1);
606     ARMRegister scratch64(scratch, 64);
607 
608     // Always save LR -- Baseline ICs assume that LR isn't modified.
609     push(lr);
610 
611     // Unhandled for sp -- needs slightly different logic.
612     MOZ_ASSERT(!GetStackPointer64().Is(sp));
613 
614     // Remember the stack address on entry.
615     Mov(scratch64, GetStackPointer64());
616 
617     // Make alignment, including the effective push of the previous sp.
618     Sub(GetStackPointer64(), GetStackPointer64(), Operand(8));
619     And(GetStackPointer64(), GetStackPointer64(), Operand(alignment));
620 
621     // If the PseudoStackPointer is used, sp must be <= psp before a write is valid.
622     syncStackPtr();
623 
624     // Store previous sp to the top of the stack, aligned.
625     Str(scratch64, MemOperand(GetStackPointer64(), 0));
626 }
627 
628 void
callWithABIPre(uint32_t * stackAdjust,bool callFromWasm)629 MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm)
630 {
631     MOZ_ASSERT(inCall_);
632     uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
633 
634     // ARM64 /really/ wants the stack to always be aligned.  Since we're already tracking it
635     // getting it aligned for an abi call is pretty easy.
636     MOZ_ASSERT(dynamicAlignment_);
637     stackForCall += ComputeByteAlignment(stackForCall, StackAlignment);
638     *stackAdjust = stackForCall;
639     reserveStack(*stackAdjust);
640     {
641         moveResolver_.resolve();
642         MoveEmitter emitter(*this);
643         emitter.emit(moveResolver_);
644         emitter.finish();
645     }
646 
647     // Call boundaries communicate stack via sp.
648     syncStackPtr();
649 }
650 
651 void
callWithABIPost(uint32_t stackAdjust,MoveOp::Type result)652 MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
653 {
654     // Call boundaries communicate stack via sp.
655     if (!GetStackPointer64().Is(sp))
656         Mov(GetStackPointer64(), sp);
657 
658     freeStack(stackAdjust);
659 
660     // Restore the stack pointer from entry.
661     if (dynamicAlignment_)
662         Ldr(GetStackPointer64(), MemOperand(GetStackPointer64(), 0));
663 
664     // Restore LR.
665     pop(lr);
666 
667     // TODO: This one shouldn't be necessary -- check that callers
668     // aren't enforcing the ABI themselves!
669     syncStackPtr();
670 
671     // If the ABI's return regs are where ION is expecting them, then
672     // no other work needs to be done.
673 
674 #ifdef DEBUG
675     MOZ_ASSERT(inCall_);
676     inCall_ = false;
677 #endif
678 }
679 
680 void
callWithABINoProfiler(Register fun,MoveOp::Type result)681 MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
682 {
683     vixl::UseScratchRegisterScope temps(this);
684     const Register scratch = temps.AcquireX().asUnsized();
685     movePtr(fun, scratch);
686 
687     uint32_t stackAdjust;
688     callWithABIPre(&stackAdjust);
689     call(scratch);
690     callWithABIPost(stackAdjust, result);
691 }
692 
693 void
callWithABINoProfiler(const Address & fun,MoveOp::Type result)694 MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result)
695 {
696     vixl::UseScratchRegisterScope temps(this);
697     const Register scratch = temps.AcquireX().asUnsized();
698     loadPtr(fun, scratch);
699 
700     uint32_t stackAdjust;
701     callWithABIPre(&stackAdjust);
702     call(scratch);
703     callWithABIPost(stackAdjust, result);
704 }
705 
706 // ===============================================================
707 // Jit Frames.
708 
709 uint32_t
pushFakeReturnAddress(Register scratch)710 MacroAssembler::pushFakeReturnAddress(Register scratch)
711 {
712     enterNoPool(3);
713     Label fakeCallsite;
714 
715     Adr(ARMRegister(scratch, 64), &fakeCallsite);
716     Push(scratch);
717     bind(&fakeCallsite);
718     uint32_t pseudoReturnOffset = currentOffset();
719 
720     leaveNoPool();
721     return pseudoReturnOffset;
722 }
723 
724 // ===============================================================
725 // Branch functions
726 
727 void
branchPtrInNurseryChunk(Condition cond,Register ptr,Register temp,Label * label)728 MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp,
729                                         Label* label)
730 {
731     MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
732     MOZ_ASSERT(ptr != temp);
733     MOZ_ASSERT(ptr != ScratchReg && ptr != ScratchReg2); // Both may be used internally.
734     MOZ_ASSERT(temp != ScratchReg && temp != ScratchReg2);
735 
736     movePtr(ptr, temp);
737     orPtr(Imm32(gc::ChunkMask), temp);
738     branch32(cond, Address(temp, gc::ChunkLocationOffsetFromLastByte),
739              Imm32(int32_t(gc::ChunkLocation::Nursery)), label);
740 }
741 
742 void
branchValueIsNurseryObject(Condition cond,const Address & address,Register temp,Label * label)743 MacroAssembler::branchValueIsNurseryObject(Condition cond, const Address& address, Register temp,
744                                            Label* label)
745 {
746     branchValueIsNurseryObjectImpl(cond, address, temp, label);
747 }
748 
749 void
branchValueIsNurseryObject(Condition cond,ValueOperand value,Register temp,Label * label)750 MacroAssembler::branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp,
751                                            Label* label)
752 {
753     branchValueIsNurseryObjectImpl(cond, value, temp, label);
754 }
755 
756 template <typename T>
757 void
branchValueIsNurseryObjectImpl(Condition cond,const T & value,Register temp,Label * label)758 MacroAssembler::branchValueIsNurseryObjectImpl(Condition cond, const T& value, Register temp,
759                                                Label* label)
760 {
761     MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
762     MOZ_ASSERT(temp != ScratchReg && temp != ScratchReg2); // Both may be used internally.
763 
764     Label done;
765     branchTestObject(Assembler::NotEqual, value, cond == Assembler::Equal ? &done : label);
766 
767     extractObject(value, temp);
768     orPtr(Imm32(gc::ChunkMask), temp);
769     branch32(cond, Address(temp, gc::ChunkLocationOffsetFromLastByte),
770              Imm32(int32_t(gc::ChunkLocation::Nursery)), label);
771 
772     bind(&done);
773 }
774 
775 void
branchTestValue(Condition cond,const ValueOperand & lhs,const Value & rhs,Label * label)776 MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
777                                 const Value& rhs, Label* label)
778 {
779     MOZ_ASSERT(cond == Equal || cond == NotEqual);
780     vixl::UseScratchRegisterScope temps(this);
781     const ARMRegister scratch64 = temps.AcquireX();
782     MOZ_ASSERT(scratch64.asUnsized() != lhs.valueReg());
783     moveValue(rhs, ValueOperand(scratch64.asUnsized()));
784     Cmp(ARMRegister(lhs.valueReg(), 64), scratch64);
785     B(label, cond);
786 }
787 
788 // ========================================================================
789 // Memory access primitives.
790 template <typename T>
791 void
storeUnboxedValue(const ConstantOrRegister & value,MIRType valueType,const T & dest,MIRType slotType)792 MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
793                                   const T& dest, MIRType slotType)
794 {
795     if (valueType == MIRType::Double) {
796         storeDouble(value.reg().typedReg().fpu(), dest);
797         return;
798     }
799 
800     // For known integers and booleans, we can just store the unboxed value if
801     // the slot has the same type.
802     if ((valueType == MIRType::Int32 || valueType == MIRType::Boolean) && slotType == valueType) {
803         if (value.constant()) {
804             Value val = value.value();
805             if (valueType == MIRType::Int32)
806                 store32(Imm32(val.toInt32()), dest);
807             else
808                 store32(Imm32(val.toBoolean() ? 1 : 0), dest);
809         } else {
810             store32(value.reg().typedReg().gpr(), dest);
811         }
812         return;
813     }
814 
815     if (value.constant())
816         storeValue(value.value(), dest);
817     else
818         storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(), dest);
819 
820 }
821 
822 template void
823 MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
824                                   const Address& dest, MIRType slotType);
825 template void
826 MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
827                                   const BaseIndex& dest, MIRType slotType);
828 
829 void
comment(const char * msg)830 MacroAssembler::comment(const char* msg)
831 {
832     Assembler::comment(msg);
833 }
834 
835 //}}} check_macroassembler_style
836 
837 } // namespace jit
838 } // namespace js
839