1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #ifndef jit_arm64_MacroAssembler_arm64_h
8 #define jit_arm64_MacroAssembler_arm64_h
9 
10 #include "jit/arm64/Assembler-arm64.h"
11 #include "jit/arm64/vixl/Debugger-vixl.h"
12 #include "jit/arm64/vixl/MacroAssembler-vixl.h"
13 
14 #include "jit/AtomicOp.h"
15 #include "jit/JitFrames.h"
16 #include "jit/MoveResolver.h"
17 
18 namespace js {
19 namespace jit {
20 
21 // Import VIXL operands directly into the jit namespace for shared code.
22 using vixl::Operand;
23 using vixl::MemOperand;
24 
25 struct ImmShiftedTag : public ImmWord
26 {
ImmShiftedTagImmShiftedTag27     ImmShiftedTag(JSValueShiftedTag shtag)
28       : ImmWord((uintptr_t)shtag)
29     { }
30 
ImmShiftedTagImmShiftedTag31     ImmShiftedTag(JSValueType type)
32       : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type))))
33     { }
34 };
35 
36 struct ImmTag : public Imm32
37 {
ImmTagImmTag38     ImmTag(JSValueTag tag)
39       : Imm32(tag)
40     { }
41 };
42 
43 class MacroAssemblerCompat : public vixl::MacroAssembler
44 {
45   public:
46     typedef vixl::Condition Condition;
47 
48   private:
49     // Perform a downcast. Should be removed by Bug 996602.
50     js::jit::MacroAssembler& asMasm();
51     const js::jit::MacroAssembler& asMasm() const;
52 
53   public:
54     // Restrict to only VIXL-internal functions.
55     vixl::MacroAssembler& asVIXL();
56     const MacroAssembler& asVIXL() const;
57 
58   protected:
59     bool enoughMemory_;
60     uint32_t framePushed_;
61 
MacroAssemblerCompat()62     MacroAssemblerCompat()
63       : vixl::MacroAssembler(),
64         enoughMemory_(true),
65         framePushed_(0)
66     { }
67 
68   protected:
69     MoveResolver moveResolver_;
70 
71   public:
oom()72     bool oom() const {
73         return Assembler::oom() || !enoughMemory_;
74     }
toMemOperand(Address & a)75     static MemOperand toMemOperand(Address& a) {
76         return MemOperand(ARMRegister(a.base, 64), a.offset);
77     }
doBaseIndex(const vixl::CPURegister & rt,const BaseIndex & addr,vixl::LoadStoreOp op)78     void doBaseIndex(const vixl::CPURegister& rt, const BaseIndex& addr, vixl::LoadStoreOp op) {
79         const ARMRegister base = ARMRegister(addr.base, 64);
80         const ARMRegister index = ARMRegister(addr.index, 64);
81         const unsigned scale = addr.scale;
82 
83         if (!addr.offset && (!scale || scale == static_cast<unsigned>(CalcLSDataSize(op)))) {
84             LoadStoreMacro(rt, MemOperand(base, index, vixl::LSL, scale), op);
85             return;
86         }
87 
88         vixl::UseScratchRegisterScope temps(this);
89         ARMRegister scratch64 = temps.AcquireX();
90         MOZ_ASSERT(!scratch64.Is(rt));
91         MOZ_ASSERT(!scratch64.Is(base));
92         MOZ_ASSERT(!scratch64.Is(index));
93 
94         Add(scratch64, base, Operand(index, vixl::LSL, scale));
95         LoadStoreMacro(rt, MemOperand(scratch64, addr.offset), op);
96     }
Push(ARMRegister reg)97     void Push(ARMRegister reg) {
98         push(reg);
99         adjustFrame(reg.size() / 8);
100     }
Push(Register reg)101     void Push(Register reg) {
102         vixl::MacroAssembler::Push(ARMRegister(reg, 64));
103         adjustFrame(8);
104     }
Push(Imm32 imm)105     void Push(Imm32 imm) {
106         push(imm);
107         adjustFrame(8);
108     }
Push(FloatRegister f)109     void Push(FloatRegister f) {
110         push(ARMFPRegister(f, 64));
111         adjustFrame(8);
112     }
Push(ImmPtr imm)113     void Push(ImmPtr imm) {
114         push(imm);
115         adjustFrame(sizeof(void*));
116     }
push(FloatRegister f)117     void push(FloatRegister f) {
118         vixl::MacroAssembler::Push(ARMFPRegister(f, 64));
119     }
push(ARMFPRegister f)120     void push(ARMFPRegister f) {
121         vixl::MacroAssembler::Push(f);
122     }
push(Imm32 imm)123     void push(Imm32 imm) {
124         if (imm.value == 0) {
125             vixl::MacroAssembler::Push(vixl::xzr);
126         } else {
127             vixl::UseScratchRegisterScope temps(this);
128             const ARMRegister scratch64 = temps.AcquireX();
129             move32(imm, scratch64.asUnsized());
130             vixl::MacroAssembler::Push(scratch64);
131         }
132     }
push(ImmWord imm)133     void push(ImmWord imm) {
134         if (imm.value == 0) {
135             vixl::MacroAssembler::Push(vixl::xzr);
136         } else {
137             vixl::UseScratchRegisterScope temps(this);
138             const ARMRegister scratch64 = temps.AcquireX();
139             Mov(scratch64, imm.value);
140             vixl::MacroAssembler::Push(scratch64);
141         }
142     }
push(ImmPtr imm)143     void push(ImmPtr imm) {
144         if (imm.value == nullptr) {
145             vixl::MacroAssembler::Push(vixl::xzr);
146         } else {
147             vixl::UseScratchRegisterScope temps(this);
148             const ARMRegister scratch64 = temps.AcquireX();
149             movePtr(imm, scratch64.asUnsized());
150             vixl::MacroAssembler::Push(scratch64);
151         }
152     }
push(ImmGCPtr imm)153     void push(ImmGCPtr imm) {
154         if (imm.value == nullptr) {
155             vixl::MacroAssembler::Push(vixl::xzr);
156         } else {
157             vixl::UseScratchRegisterScope temps(this);
158             const ARMRegister scratch64 = temps.AcquireX();
159             movePtr(imm, scratch64.asUnsized());
160             vixl::MacroAssembler::Push(scratch64);
161         }
162     }
push(ARMRegister reg)163     void push(ARMRegister reg) {
164         vixl::MacroAssembler::Push(reg);
165     }
push(Address a)166     void push(Address a) {
167         vixl::UseScratchRegisterScope temps(this);
168         const ARMRegister scratch64 = temps.AcquireX();
169         MOZ_ASSERT(a.base != scratch64.asUnsized());
170         loadPtr(a, scratch64.asUnsized());
171         vixl::MacroAssembler::Push(scratch64);
172     }
173 
174     // Push registers.
push(Register reg)175     void push(Register reg) {
176         vixl::MacroAssembler::Push(ARMRegister(reg, 64));
177     }
push(Register r0,Register r1)178     void push(Register r0, Register r1) {
179         vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64));
180     }
push(Register r0,Register r1,Register r2)181     void push(Register r0, Register r1, Register r2) {
182         vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64), ARMRegister(r2, 64));
183     }
push(Register r0,Register r1,Register r2,Register r3)184     void push(Register r0, Register r1, Register r2, Register r3) {
185         vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64),
186                                    ARMRegister(r2, 64), ARMRegister(r3, 64));
187     }
push(ARMFPRegister r0,ARMFPRegister r1,ARMFPRegister r2,ARMFPRegister r3)188     void push(ARMFPRegister r0, ARMFPRegister r1, ARMFPRegister r2, ARMFPRegister r3) {
189         vixl::MacroAssembler::Push(r0, r1, r2, r3);
190     }
191 
192     // Pop registers.
pop(Register reg)193     void pop(Register reg) {
194         vixl::MacroAssembler::Pop(ARMRegister(reg, 64));
195     }
pop(Register r0,Register r1)196     void pop(Register r0, Register r1) {
197         vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64));
198     }
pop(Register r0,Register r1,Register r2)199     void pop(Register r0, Register r1, Register r2) {
200         vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64), ARMRegister(r2, 64));
201     }
pop(Register r0,Register r1,Register r2,Register r3)202     void pop(Register r0, Register r1, Register r2, Register r3) {
203         vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64),
204                                   ARMRegister(r2, 64), ARMRegister(r3, 64));
205     }
pop(ARMFPRegister r0,ARMFPRegister r1,ARMFPRegister r2,ARMFPRegister r3)206     void pop(ARMFPRegister r0, ARMFPRegister r1, ARMFPRegister r2, ARMFPRegister r3) {
207         vixl::MacroAssembler::Pop(r0, r1, r2, r3);
208     }
209 
pop(const ValueOperand & v)210     void pop(const ValueOperand& v) {
211         pop(v.valueReg());
212     }
pop(const FloatRegister & f)213     void pop(const FloatRegister& f) {
214         vixl::MacroAssembler::Pop(ARMRegister(f.code(), 64));
215     }
216 
implicitPop(uint32_t args)217     void implicitPop(uint32_t args) {
218         MOZ_ASSERT(args % sizeof(intptr_t) == 0);
219         adjustFrame(-args);
220     }
Pop(ARMRegister r)221     void Pop(ARMRegister r) {
222         vixl::MacroAssembler::Pop(r);
223         adjustFrame(- r.size() / 8);
224     }
225     // FIXME: This is the same on every arch.
226     // FIXME: If we can share framePushed_, we can share this.
227     // FIXME: Or just make it at the highest level.
PushWithPatch(ImmWord word)228     CodeOffset PushWithPatch(ImmWord word) {
229         framePushed_ += sizeof(word.value);
230         return pushWithPatch(word);
231     }
PushWithPatch(ImmPtr ptr)232     CodeOffset PushWithPatch(ImmPtr ptr) {
233         return PushWithPatch(ImmWord(uintptr_t(ptr.value)));
234     }
235 
framePushed()236     uint32_t framePushed() const {
237         return framePushed_;
238     }
adjustFrame(int32_t diff)239     void adjustFrame(int32_t diff) {
240         setFramePushed(framePushed_ + diff);
241     }
242 
setFramePushed(uint32_t framePushed)243     void setFramePushed(uint32_t framePushed) {
244         framePushed_ = framePushed;
245     }
246 
freeStack(Register amount)247     void freeStack(Register amount) {
248         vixl::MacroAssembler::Drop(Operand(ARMRegister(amount, 64)));
249     }
250 
251     // Update sp with the value of the current active stack pointer, if necessary.
syncStackPtr()252     void syncStackPtr() {
253         if (!GetStackPointer64().Is(vixl::sp))
254             Mov(vixl::sp, GetStackPointer64());
255     }
initStackPtr()256     void initStackPtr() {
257         if (!GetStackPointer64().Is(vixl::sp))
258             Mov(GetStackPointer64(), vixl::sp);
259     }
storeValue(ValueOperand val,const Address & dest)260     void storeValue(ValueOperand val, const Address& dest) {
261         storePtr(val.valueReg(), dest);
262     }
263 
264     template <typename T>
storeValue(JSValueType type,Register reg,const T & dest)265     void storeValue(JSValueType type, Register reg, const T& dest) {
266         vixl::UseScratchRegisterScope temps(this);
267         const Register scratch = temps.AcquireX().asUnsized();
268         MOZ_ASSERT(scratch != reg);
269         tagValue(type, reg, ValueOperand(scratch));
270         storeValue(ValueOperand(scratch), dest);
271     }
272     template <typename T>
storeValue(const Value & val,const T & dest)273     void storeValue(const Value& val, const T& dest) {
274         vixl::UseScratchRegisterScope temps(this);
275         const Register scratch = temps.AcquireX().asUnsized();
276         moveValue(val, ValueOperand(scratch));
277         storeValue(ValueOperand(scratch), dest);
278     }
storeValue(ValueOperand val,BaseIndex dest)279     void storeValue(ValueOperand val, BaseIndex dest) {
280         storePtr(val.valueReg(), dest);
281     }
282 
283     template <typename T>
storeUnboxedValue(ConstantOrRegister value,MIRType valueType,const T & dest,MIRType slotType)284     void storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const T& dest, MIRType slotType) {
285         if (valueType == MIRType_Double) {
286             storeDouble(value.reg().typedReg().fpu(), dest);
287             return;
288         }
289 
290         // For known integers and booleans, we can just store the unboxed value if
291         // the slot has the same type.
292         if ((valueType == MIRType_Int32 || valueType == MIRType_Boolean) && slotType == valueType) {
293             if (value.constant()) {
294                 Value val = value.value();
295                 if (valueType == MIRType_Int32)
296                     store32(Imm32(val.toInt32()), dest);
297                 else
298                     store32(Imm32(val.toBoolean() ? 1 : 0), dest);
299             } else {
300                 store32(value.reg().typedReg().gpr(), dest);
301             }
302             return;
303         }
304 
305         if (value.constant())
306             storeValue(value.value(), dest);
307         else
308             storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(), dest);
309 
310     }
loadValue(Address src,Register val)311     void loadValue(Address src, Register val) {
312         Ldr(ARMRegister(val, 64), MemOperand(src));
313     }
loadValue(Address src,ValueOperand val)314     void loadValue(Address src, ValueOperand val) {
315         Ldr(ARMRegister(val.valueReg(), 64), MemOperand(src));
316     }
loadValue(const BaseIndex & src,ValueOperand val)317     void loadValue(const BaseIndex& src, ValueOperand val) {
318         doBaseIndex(ARMRegister(val.valueReg(), 64), src, vixl::LDR_x);
319     }
tagValue(JSValueType type,Register payload,ValueOperand dest)320     void tagValue(JSValueType type, Register payload, ValueOperand dest) {
321         // This could be cleverer, but the first attempt had bugs.
322         Orr(ARMRegister(dest.valueReg(), 64), ARMRegister(payload, 64), Operand(ImmShiftedTag(type).value));
323     }
pushValue(ValueOperand val)324     void pushValue(ValueOperand val) {
325         vixl::MacroAssembler::Push(ARMRegister(val.valueReg(), 64));
326     }
popValue(ValueOperand val)327     void popValue(ValueOperand val) {
328         vixl::MacroAssembler::Pop(ARMRegister(val.valueReg(), 64));
329     }
pushValue(const Value & val)330     void pushValue(const Value& val) {
331         vixl::UseScratchRegisterScope temps(this);
332         const Register scratch = temps.AcquireX().asUnsized();
333         jsval_layout jv = JSVAL_TO_IMPL(val);
334         if (val.isMarkable()) {
335             BufferOffset load = movePatchablePtr(ImmPtr((void*)jv.asBits), scratch);
336             writeDataRelocation(val, load);
337             push(scratch);
338         } else {
339             moveValue(val, scratch);
340             push(scratch);
341         }
342     }
pushValue(JSValueType type,Register reg)343     void pushValue(JSValueType type, Register reg) {
344         vixl::UseScratchRegisterScope temps(this);
345         const Register scratch = temps.AcquireX().asUnsized();
346         MOZ_ASSERT(scratch != reg);
347         tagValue(type, reg, ValueOperand(scratch));
348         push(scratch);
349     }
pushValue(const Address & addr)350     void pushValue(const Address& addr) {
351         vixl::UseScratchRegisterScope temps(this);
352         const Register scratch = temps.AcquireX().asUnsized();
353         MOZ_ASSERT(scratch != addr.base);
354         loadValue(addr, scratch);
355         push(scratch);
356     }
357     template <typename T>
storeUnboxedPayload(ValueOperand value,T address,size_t nbytes)358     void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes) {
359         switch (nbytes) {
360           case 8: {
361             vixl::UseScratchRegisterScope temps(this);
362             const Register scratch = temps.AcquireX().asUnsized();
363             unboxNonDouble(value, scratch);
364             storePtr(scratch, address);
365             return;
366           }
367           case 4:
368             storePtr(value.valueReg(), address);
369             return;
370           case 1:
371             store8(value.valueReg(), address);
372             return;
373           default: MOZ_CRASH("Bad payload width");
374         }
375     }
moveValue(const Value & val,Register dest)376     void moveValue(const Value& val, Register dest) {
377         if (val.isMarkable()) {
378             BufferOffset load = movePatchablePtr(ImmPtr((void*)val.asRawBits()), dest);
379             writeDataRelocation(val, load);
380         } else {
381             movePtr(ImmWord(val.asRawBits()), dest);
382         }
383     }
moveValue(const Value & src,const ValueOperand & dest)384     void moveValue(const Value& src, const ValueOperand& dest) {
385         moveValue(src, dest.valueReg());
386     }
moveValue(const ValueOperand & src,const ValueOperand & dest)387     void moveValue(const ValueOperand& src, const ValueOperand& dest) {
388         if (src.valueReg() != dest.valueReg())
389             movePtr(src.valueReg(), dest.valueReg());
390     }
391 
pushWithPatch(ImmWord imm)392     CodeOffset pushWithPatch(ImmWord imm) {
393         vixl::UseScratchRegisterScope temps(this);
394         const Register scratch = temps.AcquireX().asUnsized();
395         CodeOffset label = movWithPatch(imm, scratch);
396         push(scratch);
397         return label;
398     }
399 
movWithPatch(ImmWord imm,Register dest)400     CodeOffset movWithPatch(ImmWord imm, Register dest) {
401         BufferOffset off = immPool64(ARMRegister(dest, 64), imm.value);
402         return CodeOffset(off.getOffset());
403     }
movWithPatch(ImmPtr imm,Register dest)404     CodeOffset movWithPatch(ImmPtr imm, Register dest) {
405         BufferOffset off = immPool64(ARMRegister(dest, 64), uint64_t(imm.value));
406         return CodeOffset(off.getOffset());
407     }
408 
boxValue(JSValueType type,Register src,Register dest)409     void boxValue(JSValueType type, Register src, Register dest) {
410         Orr(ARMRegister(dest, 64), ARMRegister(src, 64), Operand(ImmShiftedTag(type).value));
411     }
splitTag(Register src,Register dest)412     void splitTag(Register src, Register dest) {
413         ubfx(ARMRegister(dest, 64), ARMRegister(src, 64), JSVAL_TAG_SHIFT, (64 - JSVAL_TAG_SHIFT));
414     }
extractTag(const Address & address,Register scratch)415     Register extractTag(const Address& address, Register scratch) {
416         loadPtr(address, scratch);
417         splitTag(scratch, scratch);
418         return scratch;
419     }
extractTag(const ValueOperand & value,Register scratch)420     Register extractTag(const ValueOperand& value, Register scratch) {
421         splitTag(value.valueReg(), scratch);
422         return scratch;
423     }
extractObject(const Address & address,Register scratch)424     Register extractObject(const Address& address, Register scratch) {
425         loadPtr(address, scratch);
426         unboxObject(scratch, scratch);
427         return scratch;
428     }
extractObject(const ValueOperand & value,Register scratch)429     Register extractObject(const ValueOperand& value, Register scratch) {
430         unboxObject(value, scratch);
431         return scratch;
432     }
extractInt32(const ValueOperand & value,Register scratch)433     Register extractInt32(const ValueOperand& value, Register scratch) {
434         unboxInt32(value, scratch);
435         return scratch;
436     }
extractBoolean(const ValueOperand & value,Register scratch)437     Register extractBoolean(const ValueOperand& value, Register scratch) {
438         unboxBoolean(value, scratch);
439         return scratch;
440     }
441 
442     // If source is a double, load into dest.
443     // If source is int32, convert to double and store in dest.
444     // Else, branch to failure.
ensureDouble(const ValueOperand & source,FloatRegister dest,Label * failure)445     void ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure) {
446         Label isDouble, done;
447 
448         // TODO: splitTagForTest really should not leak a scratch register.
449         Register tag = splitTagForTest(source);
450         {
451             vixl::UseScratchRegisterScope temps(this);
452             temps.Exclude(ARMRegister(tag, 64));
453 
454             branchTestDouble(Assembler::Equal, tag, &isDouble);
455             branchTestInt32(Assembler::NotEqual, tag, failure);
456         }
457 
458         convertInt32ToDouble(source.valueReg(), dest);
459         jump(&done);
460 
461         bind(&isDouble);
462         unboxDouble(source, dest);
463 
464         bind(&done);
465     }
466 
emitSet(Condition cond,Register dest)467     void emitSet(Condition cond, Register dest) {
468         Cset(ARMRegister(dest, 64), cond);
469     }
470 
471     template <typename T1, typename T2>
cmpPtrSet(Condition cond,T1 lhs,T2 rhs,Register dest)472     void cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
473         cmpPtr(lhs, rhs);
474         emitSet(cond, dest);
475     }
476 
477     template <typename T1, typename T2>
cmp32Set(Condition cond,T1 lhs,T2 rhs,Register dest)478     void cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) {
479         cmp32(lhs, rhs);
480         emitSet(cond, dest);
481     }
482 
testNullSet(Condition cond,const ValueOperand & value,Register dest)483     void testNullSet(Condition cond, const ValueOperand& value, Register dest) {
484         cond = testNull(cond, value);
485         emitSet(cond, dest);
486     }
testObjectSet(Condition cond,const ValueOperand & value,Register dest)487     void testObjectSet(Condition cond, const ValueOperand& value, Register dest) {
488         cond = testObject(cond, value);
489         emitSet(cond, dest);
490     }
testUndefinedSet(Condition cond,const ValueOperand & value,Register dest)491     void testUndefinedSet(Condition cond, const ValueOperand& value, Register dest) {
492         cond = testUndefined(cond, value);
493         emitSet(cond, dest);
494     }
495 
convertBoolToInt32(Register source,Register dest)496     void convertBoolToInt32(Register source, Register dest) {
497         Uxtb(ARMRegister(dest, 64), ARMRegister(source, 64));
498     }
499 
convertInt32ToDouble(Register src,FloatRegister dest)500     void convertInt32ToDouble(Register src, FloatRegister dest) {
501         Scvtf(ARMFPRegister(dest, 64), ARMRegister(src, 32)); // Uses FPCR rounding mode.
502     }
convertInt32ToDouble(const Address & src,FloatRegister dest)503     void convertInt32ToDouble(const Address& src, FloatRegister dest) {
504         vixl::UseScratchRegisterScope temps(this);
505         const Register scratch = temps.AcquireX().asUnsized();
506         MOZ_ASSERT(scratch != src.base);
507         load32(src, scratch);
508         convertInt32ToDouble(scratch, dest);
509     }
convertInt32ToDouble(const BaseIndex & src,FloatRegister dest)510     void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest) {
511         vixl::UseScratchRegisterScope temps(this);
512         const Register scratch = temps.AcquireX().asUnsized();
513         MOZ_ASSERT(scratch != src.base);
514         MOZ_ASSERT(scratch != src.index);
515         load32(src, scratch);
516         convertInt32ToDouble(scratch, dest);
517     }
518 
convertInt32ToFloat32(Register src,FloatRegister dest)519     void convertInt32ToFloat32(Register src, FloatRegister dest) {
520         Scvtf(ARMFPRegister(dest, 32), ARMRegister(src, 32)); // Uses FPCR rounding mode.
521     }
convertInt32ToFloat32(const Address & src,FloatRegister dest)522     void convertInt32ToFloat32(const Address& src, FloatRegister dest) {
523         vixl::UseScratchRegisterScope temps(this);
524         const Register scratch = temps.AcquireX().asUnsized();
525         MOZ_ASSERT(scratch != src.base);
526         load32(src, scratch);
527         convertInt32ToFloat32(scratch, dest);
528     }
529 
convertUInt32ToDouble(Register src,FloatRegister dest)530     void convertUInt32ToDouble(Register src, FloatRegister dest) {
531         Ucvtf(ARMFPRegister(dest, 64), ARMRegister(src, 32)); // Uses FPCR rounding mode.
532     }
convertUInt32ToDouble(const Address & src,FloatRegister dest)533     void convertUInt32ToDouble(const Address& src, FloatRegister dest) {
534         vixl::UseScratchRegisterScope temps(this);
535         const Register scratch = temps.AcquireX().asUnsized();
536         MOZ_ASSERT(scratch != src.base);
537         load32(src, scratch);
538         convertUInt32ToDouble(scratch, dest);
539     }
540 
convertUInt32ToFloat32(Register src,FloatRegister dest)541     void convertUInt32ToFloat32(Register src, FloatRegister dest) {
542         Ucvtf(ARMFPRegister(dest, 32), ARMRegister(src, 32)); // Uses FPCR rounding mode.
543     }
convertUInt32ToFloat32(const Address & src,FloatRegister dest)544     void convertUInt32ToFloat32(const Address& src, FloatRegister dest) {
545         vixl::UseScratchRegisterScope temps(this);
546         const Register scratch = temps.AcquireX().asUnsized();
547         MOZ_ASSERT(scratch != src.base);
548         load32(src, scratch);
549         convertUInt32ToFloat32(scratch, dest);
550     }
551 
convertFloat32ToDouble(FloatRegister src,FloatRegister dest)552     void convertFloat32ToDouble(FloatRegister src, FloatRegister dest) {
553         Fcvt(ARMFPRegister(dest, 64), ARMFPRegister(src, 32));
554     }
convertDoubleToFloat32(FloatRegister src,FloatRegister dest)555     void convertDoubleToFloat32(FloatRegister src, FloatRegister dest) {
556         Fcvt(ARMFPRegister(dest, 32), ARMFPRegister(src, 64));
557     }
558 
branchTruncateDouble(FloatRegister src,Register dest,Label * fail)559     void branchTruncateDouble(FloatRegister src, Register dest, Label* fail) {
560         vixl::UseScratchRegisterScope temps(this);
561         const ARMRegister scratch64 = temps.AcquireX();
562 
563         // An out of range integer will be saturated to the destination size.
564         ARMFPRegister src64(src, 64);
565         ARMRegister dest64(dest, 64);
566 
567         MOZ_ASSERT(!scratch64.Is(dest64));
568 
569         //breakpoint();
570         Fcvtzs(dest64, src64);
571         Add(scratch64, dest64, Operand(0x7fffffffffffffff));
572         Cmn(scratch64, 3);
573         B(fail, Assembler::Above);
574         And(dest64, dest64, Operand(0xffffffff));
575     }
576     void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
577                               bool negativeZeroCheck = true)
578     {
579         vixl::UseScratchRegisterScope temps(this);
580         const ARMFPRegister scratch64 = temps.AcquireD();
581 
582         ARMFPRegister fsrc(src, 64);
583         ARMRegister dest32(dest, 32);
584         ARMRegister dest64(dest, 64);
585 
586         MOZ_ASSERT(!scratch64.Is(fsrc));
587 
588         Fcvtzs(dest32, fsrc); // Convert, rounding toward zero.
589         Scvtf(scratch64, dest32); // Convert back, using FPCR rounding mode.
590         Fcmp(scratch64, fsrc);
591         B(fail, Assembler::NotEqual);
592 
593         if (negativeZeroCheck) {
594             Label nonzero;
595             Cbnz(dest32, &nonzero);
596             Fmov(dest64, fsrc);
597             Cbnz(dest64, fail);
598             bind(&nonzero);
599         }
600     }
601     void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
602                                bool negativeZeroCheck = true)
603     {
604         vixl::UseScratchRegisterScope temps(this);
605         const ARMFPRegister scratch32 = temps.AcquireS();
606 
607         ARMFPRegister fsrc(src, 32);
608         ARMRegister dest32(dest, 32);
609         ARMRegister dest64(dest, 64);
610 
611         MOZ_ASSERT(!scratch32.Is(fsrc));
612 
613         Fcvtzs(dest64, fsrc); // Convert, rounding toward zero.
614         Scvtf(scratch32, dest32); // Convert back, using FPCR rounding mode.
615         Fcmp(scratch32, fsrc);
616         B(fail, Assembler::NotEqual);
617 
618         if (negativeZeroCheck) {
619             Label nonzero;
620             Cbnz(dest32, &nonzero);
621             Fmov(dest32, fsrc);
622             Cbnz(dest32, fail);
623             bind(&nonzero);
624         }
625         And(dest64, dest64, Operand(0xffffffff));
626     }
627 
branchTruncateFloat32(FloatRegister src,Register dest,Label * fail)628     void branchTruncateFloat32(FloatRegister src, Register dest, Label* fail) {
629         vixl::UseScratchRegisterScope temps(this);
630         const ARMRegister scratch64 = temps.AcquireX();
631 
632         ARMFPRegister src32(src, 32);
633         ARMRegister dest64(dest, 64);
634 
635         MOZ_ASSERT(!scratch64.Is(dest64));
636 
637         Fcvtzs(dest64, src32);
638         Add(scratch64, dest64, Operand(0x7fffffffffffffff));
639         Cmn(scratch64, 3);
640         B(fail, Assembler::Above);
641         And(dest64, dest64, Operand(0xffffffff));
642     }
floor(FloatRegister input,Register output,Label * bail)643     void floor(FloatRegister input, Register output, Label* bail) {
644         Label handleZero;
645         //Label handleNeg;
646         Label fin;
647         ARMFPRegister iDbl(input, 64);
648         ARMRegister o64(output, 64);
649         ARMRegister o32(output, 32);
650         Fcmp(iDbl, 0.0);
651         B(Assembler::Equal, &handleZero);
652         //B(Assembler::Signed, &handleNeg);
653         // NaN is always a bail condition, just bail directly.
654         B(Assembler::Overflow, bail);
655         Fcvtms(o64, iDbl);
656         Cmp(o64, Operand(o64, vixl::SXTW));
657         B(NotEqual, bail);
658         Mov(o32, o32);
659         B(&fin);
660 
661         bind(&handleZero);
662         // Move the top word of the double into the output reg, if it is non-zero,
663         // then the original value was -0.0.
664         Fmov(o64, iDbl);
665         Cbnz(o64, bail);
666         bind(&fin);
667     }
668 
floorf(FloatRegister input,Register output,Label * bail)669     void floorf(FloatRegister input, Register output, Label* bail) {
670         Label handleZero;
671         //Label handleNeg;
672         Label fin;
673         ARMFPRegister iFlt(input, 32);
674         ARMRegister o64(output, 64);
675         ARMRegister o32(output, 32);
676         Fcmp(iFlt, 0.0);
677         B(Assembler::Equal, &handleZero);
678         //B(Assembler::Signed, &handleNeg);
679         // NaN is always a bail condition, just bail directly.
680         B(Assembler::Overflow, bail);
681         Fcvtms(o64, iFlt);
682         Cmp(o64, Operand(o64, vixl::SXTW));
683         B(NotEqual, bail);
684         Mov(o32, o32);
685         B(&fin);
686 
687         bind(&handleZero);
688         // Move the top word of the double into the output reg, if it is non-zero,
689         // then the original value was -0.0.
690         Fmov(o32, iFlt);
691         Cbnz(o32, bail);
692         bind(&fin);
693     }
694 
ceil(FloatRegister input,Register output,Label * bail)695     void ceil(FloatRegister input, Register output, Label* bail) {
696         Label handleZero;
697         Label fin;
698         ARMFPRegister iDbl(input, 64);
699         ARMRegister o64(output, 64);
700         ARMRegister o32(output, 32);
701         Fcmp(iDbl, 0.0);
702         B(Assembler::Overflow, bail);
703         Fcvtps(o64, iDbl);
704         Cmp(o64, Operand(o64, vixl::SXTW));
705         B(NotEqual, bail);
706         Cbz(o64, &handleZero);
707         Mov(o32, o32);
708         B(&fin);
709 
710         bind(&handleZero);
711         vixl::UseScratchRegisterScope temps(this);
712         const ARMRegister scratch = temps.AcquireX();
713         Fmov(scratch, iDbl);
714         Cbnz(scratch, bail);
715         bind(&fin);
716     }
717 
ceilf(FloatRegister input,Register output,Label * bail)718     void ceilf(FloatRegister input, Register output, Label* bail) {
719         Label handleZero;
720         Label fin;
721         ARMFPRegister iFlt(input, 32);
722         ARMRegister o64(output, 64);
723         ARMRegister o32(output, 32);
724         Fcmp(iFlt, 0.0);
725 
726         // NaN is always a bail condition, just bail directly.
727         B(Assembler::Overflow, bail);
728         Fcvtps(o64, iFlt);
729         Cmp(o64, Operand(o64, vixl::SXTW));
730         B(NotEqual, bail);
731         Cbz(o64, &handleZero);
732         Mov(o32, o32);
733         B(&fin);
734 
735         bind(&handleZero);
736         // Move the top word of the double into the output reg, if it is non-zero,
737         // then the original value was -0.0.
738         Fmov(o32, iFlt);
739         Cbnz(o32, bail);
740         bind(&fin);
741     }
742 
jump(Label * label)743     void jump(Label* label) {
744         B(label);
745     }
jump(JitCode * code)746     void jump(JitCode* code) {
747         branch(code);
748     }
jump(RepatchLabel * label)749     void jump(RepatchLabel* label) {
750         MOZ_CRASH("jump (repatchlabel)");
751     }
jump(Register reg)752     void jump(Register reg) {
753         Br(ARMRegister(reg, 64));
754     }
jump(const Address & addr)755     void jump(const Address& addr) {
756         loadPtr(addr, ip0);
757         Br(vixl::ip0);
758     }
759 
align(int alignment)760     void align(int alignment) {
761         armbuffer_.align(alignment);
762     }
763 
haltingAlign(int alignment)764     void haltingAlign(int alignment) {
765         // TODO: Implement a proper halting align.
766         // ARM doesn't have one either.
767         armbuffer_.align(alignment);
768     }
769 
movePtr(Register src,Register dest)770     void movePtr(Register src, Register dest) {
771         Mov(ARMRegister(dest, 64), ARMRegister(src, 64));
772     }
movePtr(ImmWord imm,Register dest)773     void movePtr(ImmWord imm, Register dest) {
774         Mov(ARMRegister(dest, 64), int64_t(imm.value));
775     }
movePtr(ImmPtr imm,Register dest)776     void movePtr(ImmPtr imm, Register dest) {
777         Mov(ARMRegister(dest, 64), int64_t(imm.value));
778     }
movePtr(wasm::SymbolicAddress imm,Register dest)779     void movePtr(wasm::SymbolicAddress imm, Register dest) {
780         BufferOffset off = movePatchablePtr(ImmWord(0xffffffffffffffffULL), dest);
781         append(AsmJSAbsoluteLink(CodeOffset(off.getOffset()), imm));
782     }
movePtr(ImmGCPtr imm,Register dest)783     void movePtr(ImmGCPtr imm, Register dest) {
784         BufferOffset load = movePatchablePtr(ImmPtr(imm.value), dest);
785         writeDataRelocation(imm, load);
786     }
move64(Register64 src,Register64 dest)787     void move64(Register64 src, Register64 dest) {
788         movePtr(src.reg, dest.reg);
789     }
790 
mov(ImmWord imm,Register dest)791     void mov(ImmWord imm, Register dest) {
792         movePtr(imm, dest);
793     }
mov(ImmPtr imm,Register dest)794     void mov(ImmPtr imm, Register dest) {
795         movePtr(imm, dest);
796     }
mov(wasm::SymbolicAddress imm,Register dest)797     void mov(wasm::SymbolicAddress imm, Register dest) {
798         movePtr(imm, dest);
799     }
mov(Register src,Register dest)800     void mov(Register src, Register dest) {
801         movePtr(src, dest);
802     }
803 
move32(Imm32 imm,Register dest)804     void move32(Imm32 imm, Register dest) {
805         Mov(ARMRegister(dest, 32), (int64_t)imm.value);
806     }
move32(Register src,Register dest)807     void move32(Register src, Register dest) {
808         Mov(ARMRegister(dest, 32), ARMRegister(src, 32));
809     }
810 
811     // Move a pointer using a literal pool, so that the pointer
812     // may be easily patched or traced.
813     // Returns the BufferOffset of the load instruction emitted.
814     BufferOffset movePatchablePtr(ImmWord ptr, Register dest);
815     BufferOffset movePatchablePtr(ImmPtr ptr, Register dest);
816 
neg32(Register reg)817     void neg32(Register reg) {
818         Negs(ARMRegister(reg, 32), Operand(ARMRegister(reg, 32)));
819     }
820 
loadPtr(wasm::SymbolicAddress address,Register dest)821     void loadPtr(wasm::SymbolicAddress address, Register dest) {
822         vixl::UseScratchRegisterScope temps(this);
823         const ARMRegister scratch = temps.AcquireX();
824         movePtr(address, scratch.asUnsized());
825         Ldr(ARMRegister(dest, 64), MemOperand(scratch));
826     }
loadPtr(AbsoluteAddress address,Register dest)827     void loadPtr(AbsoluteAddress address, Register dest) {
828         vixl::UseScratchRegisterScope temps(this);
829         const ARMRegister scratch = temps.AcquireX();
830         movePtr(ImmWord((uintptr_t)address.addr), scratch.asUnsized());
831         Ldr(ARMRegister(dest, 64), MemOperand(scratch));
832     }
loadPtr(const Address & address,Register dest)833     void loadPtr(const Address& address, Register dest) {
834         Ldr(ARMRegister(dest, 64), MemOperand(address));
835     }
loadPtr(const BaseIndex & src,Register dest)836     void loadPtr(const BaseIndex& src, Register dest) {
837         Register base = src.base;
838         uint32_t scale = Imm32::ShiftOf(src.scale).value;
839         ARMRegister dest64(dest, 64);
840         ARMRegister index64(src.index, 64);
841 
842         if (src.offset) {
843             vixl::UseScratchRegisterScope temps(this);
844             const ARMRegister scratch = temps.AcquireX();
845             MOZ_ASSERT(!scratch.Is(ARMRegister(base, 64)));
846             MOZ_ASSERT(!scratch.Is(dest64));
847             MOZ_ASSERT(!scratch.Is(index64));
848 
849             Add(scratch, ARMRegister(base, 64), Operand(int64_t(src.offset)));
850             Ldr(dest64, MemOperand(scratch, index64, vixl::LSL, scale));
851             return;
852         }
853 
854         Ldr(dest64, MemOperand(ARMRegister(base, 64), index64, vixl::LSL, scale));
855     }
856     void loadPrivate(const Address& src, Register dest);
857 
store8(Register src,const Address & address)858     void store8(Register src, const Address& address) {
859         Strb(ARMRegister(src, 32), MemOperand(ARMRegister(address.base, 64), address.offset));
860     }
store8(Imm32 imm,const Address & address)861     void store8(Imm32 imm, const Address& address) {
862         vixl::UseScratchRegisterScope temps(this);
863         const ARMRegister scratch32 = temps.AcquireW();
864         MOZ_ASSERT(scratch32.asUnsized() != address.base);
865         move32(imm, scratch32.asUnsized());
866         Strb(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset));
867     }
store8(Register src,const BaseIndex & address)868     void store8(Register src, const BaseIndex& address) {
869         doBaseIndex(ARMRegister(src, 32), address, vixl::STRB_w);
870     }
store8(Imm32 imm,const BaseIndex & address)871     void store8(Imm32 imm, const BaseIndex& address) {
872         vixl::UseScratchRegisterScope temps(this);
873         const ARMRegister scratch32 = temps.AcquireW();
874         MOZ_ASSERT(scratch32.asUnsized() != address.base);
875         MOZ_ASSERT(scratch32.asUnsized() != address.index);
876         Mov(scratch32, Operand(imm.value));
877         doBaseIndex(scratch32, address, vixl::STRB_w);
878     }
879 
store16(Register src,const Address & address)880     void store16(Register src, const Address& address) {
881         Strh(ARMRegister(src, 32), MemOperand(ARMRegister(address.base, 64), address.offset));
882     }
store16(Imm32 imm,const Address & address)883     void store16(Imm32 imm, const Address& address) {
884         vixl::UseScratchRegisterScope temps(this);
885         const ARMRegister scratch32 = temps.AcquireW();
886         MOZ_ASSERT(scratch32.asUnsized() != address.base);
887         move32(imm, scratch32.asUnsized());
888         Strh(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset));
889     }
store16(Register src,const BaseIndex & address)890     void store16(Register src, const BaseIndex& address) {
891         doBaseIndex(ARMRegister(src, 32), address, vixl::STRH_w);
892     }
store16(Imm32 imm,const BaseIndex & address)893     void store16(Imm32 imm, const BaseIndex& address) {
894         vixl::UseScratchRegisterScope temps(this);
895         const ARMRegister scratch32 = temps.AcquireW();
896         MOZ_ASSERT(scratch32.asUnsized() != address.base);
897         MOZ_ASSERT(scratch32.asUnsized() != address.index);
898         Mov(scratch32, Operand(imm.value));
899         doBaseIndex(scratch32, address, vixl::STRH_w);
900     }
901 
storePtr(ImmWord imm,const Address & address)902     void storePtr(ImmWord imm, const Address& address) {
903         vixl::UseScratchRegisterScope temps(this);
904         const Register scratch = temps.AcquireX().asUnsized();
905         MOZ_ASSERT(scratch != address.base);
906         movePtr(imm, scratch);
907         storePtr(scratch, address);
908     }
storePtr(ImmPtr imm,const Address & address)909     void storePtr(ImmPtr imm, const Address& address) {
910         vixl::UseScratchRegisterScope temps(this);
911         const ARMRegister scratch64 = temps.AcquireX();
912         MOZ_ASSERT(scratch64.asUnsized() != address.base);
913         Mov(scratch64, uint64_t(imm.value));
914         Str(scratch64, MemOperand(ARMRegister(address.base, 64), address.offset));
915     }
storePtr(ImmGCPtr imm,const Address & address)916     void storePtr(ImmGCPtr imm, const Address& address) {
917         vixl::UseScratchRegisterScope temps(this);
918         const Register scratch = temps.AcquireX().asUnsized();
919         MOZ_ASSERT(scratch != address.base);
920         movePtr(imm, scratch);
921         storePtr(scratch, address);
922     }
storePtr(Register src,const Address & address)923     void storePtr(Register src, const Address& address) {
924         Str(ARMRegister(src, 64), MemOperand(ARMRegister(address.base, 64), address.offset));
925     }
926 
storePtr(ImmWord imm,const BaseIndex & address)927     void storePtr(ImmWord imm, const BaseIndex& address) {
928         vixl::UseScratchRegisterScope temps(this);
929         const ARMRegister scratch64 = temps.AcquireX();
930         MOZ_ASSERT(scratch64.asUnsized() != address.base);
931         MOZ_ASSERT(scratch64.asUnsized() != address.index);
932         Mov(scratch64, Operand(imm.value));
933         doBaseIndex(scratch64, address, vixl::STR_x);
934     }
storePtr(ImmGCPtr imm,const BaseIndex & address)935     void storePtr(ImmGCPtr imm, const BaseIndex& address) {
936         vixl::UseScratchRegisterScope temps(this);
937         const Register scratch = temps.AcquireX().asUnsized();
938         MOZ_ASSERT(scratch != address.base);
939         MOZ_ASSERT(scratch != address.index);
940         movePtr(imm, scratch);
941         doBaseIndex(ARMRegister(scratch, 64), address, vixl::STR_x);
942     }
storePtr(Register src,const BaseIndex & address)943     void storePtr(Register src, const BaseIndex& address) {
944         doBaseIndex(ARMRegister(src, 64), address, vixl::STR_x);
945     }
946 
storePtr(Register src,AbsoluteAddress address)947     void storePtr(Register src, AbsoluteAddress address) {
948         vixl::UseScratchRegisterScope temps(this);
949         const ARMRegister scratch64 = temps.AcquireX();
950         Mov(scratch64, uint64_t(address.addr));
951         Str(ARMRegister(src, 64), MemOperand(scratch64));
952     }
953 
store32(Register src,AbsoluteAddress address)954     void store32(Register src, AbsoluteAddress address) {
955         vixl::UseScratchRegisterScope temps(this);
956         const ARMRegister scratch64 = temps.AcquireX();
957         Mov(scratch64, uint64_t(address.addr));
958         Str(ARMRegister(src, 32), MemOperand(scratch64));
959     }
store32(Imm32 imm,const Address & address)960     void store32(Imm32 imm, const Address& address) {
961         vixl::UseScratchRegisterScope temps(this);
962         const ARMRegister scratch32 = temps.AcquireW();
963         MOZ_ASSERT(scratch32.asUnsized() != address.base);
964         Mov(scratch32, uint64_t(imm.value));
965         Str(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset));
966     }
store32(Register r,const Address & address)967     void store32(Register r, const Address& address) {
968         Str(ARMRegister(r, 32), MemOperand(ARMRegister(address.base, 64), address.offset));
969     }
store32(Imm32 imm,const BaseIndex & address)970     void store32(Imm32 imm, const BaseIndex& address) {
971         vixl::UseScratchRegisterScope temps(this);
972         const ARMRegister scratch32 = temps.AcquireW();
973         MOZ_ASSERT(scratch32.asUnsized() != address.base);
974         MOZ_ASSERT(scratch32.asUnsized() != address.index);
975         Mov(scratch32, imm.value);
976         doBaseIndex(scratch32, address, vixl::STR_w);
977     }
store32(Register r,const BaseIndex & address)978     void store32(Register r, const BaseIndex& address) {
979         doBaseIndex(ARMRegister(r, 32), address, vixl::STR_w);
980     }
981 
store32_NoSecondScratch(Imm32 imm,const Address & address)982     void store32_NoSecondScratch(Imm32 imm, const Address& address) {
983         vixl::UseScratchRegisterScope temps(this);
984         temps.Exclude(ARMRegister(ScratchReg2, 32)); // Disallow ScratchReg2.
985         const ARMRegister scratch32 = temps.AcquireW();
986 
987         MOZ_ASSERT(scratch32.asUnsized() != address.base);
988         Mov(scratch32, uint64_t(imm.value));
989         Str(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset));
990     }
991 
store64(Register64 src,Address address)992     void store64(Register64 src, Address address) {
993         storePtr(src.reg, address);
994     }
995 
996     // SIMD.
loadInt32x1(const Address & addr,FloatRegister dest)997     void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
loadInt32x1(const BaseIndex & addr,FloatRegister dest)998     void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
loadInt32x2(const Address & addr,FloatRegister dest)999     void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
loadInt32x2(const BaseIndex & addr,FloatRegister dest)1000     void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
loadInt32x3(const Address & src,FloatRegister dest)1001     void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
loadInt32x3(const BaseIndex & src,FloatRegister dest)1002     void loadInt32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
storeInt32x1(FloatRegister src,const Address & dest)1003     void storeInt32x1(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
storeInt32x1(FloatRegister src,const BaseIndex & dest)1004     void storeInt32x1(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
storeInt32x2(FloatRegister src,const Address & dest)1005     void storeInt32x2(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
storeInt32x2(FloatRegister src,const BaseIndex & dest)1006     void storeInt32x2(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
storeInt32x3(FloatRegister src,const Address & dest)1007     void storeInt32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
storeInt32x3(FloatRegister src,const BaseIndex & dest)1008     void storeInt32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
loadAlignedInt32x4(const Address & addr,FloatRegister dest)1009     void loadAlignedInt32x4(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
loadAlignedInt32x4(const BaseIndex & addr,FloatRegister dest)1010     void loadAlignedInt32x4(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
storeAlignedInt32x4(FloatRegister src,const Address & addr)1011     void storeAlignedInt32x4(FloatRegister src, const Address& addr) { MOZ_CRASH("NYI"); }
storeAlignedInt32x4(FloatRegister src,const BaseIndex & addr)1012     void storeAlignedInt32x4(FloatRegister src, const BaseIndex& addr) { MOZ_CRASH("NYI"); }
loadUnalignedInt32x4(const Address & addr,FloatRegister dest)1013     void loadUnalignedInt32x4(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
loadUnalignedInt32x4(const BaseIndex & addr,FloatRegister dest)1014     void loadUnalignedInt32x4(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
storeUnalignedInt32x4(FloatRegister dest,const Address & addr)1015     void storeUnalignedInt32x4(FloatRegister dest, const Address& addr) { MOZ_CRASH("NYI"); }
storeUnalignedInt32x4(FloatRegister dest,const BaseIndex & addr)1016     void storeUnalignedInt32x4(FloatRegister dest, const BaseIndex& addr) { MOZ_CRASH("NYI"); }
1017 
loadFloat32x3(const Address & src,FloatRegister dest)1018     void loadFloat32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
loadFloat32x3(const BaseIndex & src,FloatRegister dest)1019     void loadFloat32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
storeFloat32x3(FloatRegister src,const Address & dest)1020     void storeFloat32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
storeFloat32x3(FloatRegister src,const BaseIndex & dest)1021     void storeFloat32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
loadAlignedFloat32x4(const Address & addr,FloatRegister dest)1022     void loadAlignedFloat32x4(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
loadAlignedFloat32x4(const BaseIndex & addr,FloatRegister dest)1023     void loadAlignedFloat32x4(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
storeAlignedFloat32x4(FloatRegister src,const Address & addr)1024     void storeAlignedFloat32x4(FloatRegister src, const Address& addr) { MOZ_CRASH("NYI"); }
storeAlignedFloat32x4(FloatRegister src,const BaseIndex & addr)1025     void storeAlignedFloat32x4(FloatRegister src, const BaseIndex& addr) { MOZ_CRASH("NYI"); }
loadUnalignedFloat32x4(const Address & addr,FloatRegister dest)1026     void loadUnalignedFloat32x4(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
loadUnalignedFloat32x4(const BaseIndex & addr,FloatRegister dest)1027     void loadUnalignedFloat32x4(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
storeUnalignedFloat32x4(FloatRegister dest,const Address & addr)1028     void storeUnalignedFloat32x4(FloatRegister dest, const Address& addr) { MOZ_CRASH("NYI"); }
storeUnalignedFloat32x4(FloatRegister dest,const BaseIndex & addr)1029     void storeUnalignedFloat32x4(FloatRegister dest, const BaseIndex& addr) { MOZ_CRASH("NYI"); }
1030 
1031     // StackPointer manipulation.
1032     template <typename T>
addToStackPtr(T t)1033     void addToStackPtr(T t) { addPtr(t, getStackPointer()); }
1034     template <typename T>
addStackPtrTo(T t)1035     void addStackPtrTo(T t) { addPtr(getStackPointer(), t); }
1036 
1037     template <typename T>
subFromStackPtr(T t)1038     void subFromStackPtr(T t) { subPtr(t, getStackPointer()); syncStackPtr(); }
1039     template <typename T>
subStackPtrFrom(T t)1040     void subStackPtrFrom(T t) { subPtr(getStackPointer(), t); }
1041 
1042     template <typename T> void andToStackPtr(T t);
1043     template <typename T> void andStackPtrTo(T t);
1044 
1045     template <typename T>
moveToStackPtr(T t)1046     void moveToStackPtr(T t) { movePtr(t, getStackPointer()); syncStackPtr(); }
1047     template <typename T>
moveStackPtrTo(T t)1048     void moveStackPtrTo(T t) { movePtr(getStackPointer(), t); }
1049 
1050     template <typename T>
loadStackPtr(T t)1051     void loadStackPtr(T t) { loadPtr(t, getStackPointer()); syncStackPtr(); }
1052     template <typename T>
storeStackPtr(T t)1053     void storeStackPtr(T t) { storePtr(getStackPointer(), t); }
1054 
1055     // StackPointer testing functions.
1056     template <typename T>
branchTestStackPtr(Condition cond,T t,Label * label)1057     void branchTestStackPtr(Condition cond, T t, Label* label) {
1058         branchTestPtr(cond, getStackPointer(), t, label);
1059     }
1060     template <typename T>
branchStackPtr(Condition cond,T rhs,Label * label)1061     void branchStackPtr(Condition cond, T rhs, Label* label) {
1062         branchPtr(cond, getStackPointer(), rhs, label);
1063     }
1064     template <typename T>
branchStackPtrRhs(Condition cond,T lhs,Label * label)1065     void branchStackPtrRhs(Condition cond, T lhs, Label* label) {
1066         branchPtr(cond, lhs, getStackPointer(), label);
1067     }
1068 
testPtr(Register lhs,Register rhs)1069     void testPtr(Register lhs, Register rhs) {
1070         Tst(ARMRegister(lhs, 64), Operand(ARMRegister(rhs, 64)));
1071     }
test32(Register lhs,Register rhs)1072     void test32(Register lhs, Register rhs) {
1073         Tst(ARMRegister(lhs, 32), Operand(ARMRegister(rhs, 32)));
1074     }
test32(const Address & addr,Imm32 imm)1075     void test32(const Address& addr, Imm32 imm) {
1076         vixl::UseScratchRegisterScope temps(this);
1077         const ARMRegister scratch32 = temps.AcquireW();
1078         MOZ_ASSERT(scratch32.asUnsized() != addr.base);
1079         load32(addr, scratch32.asUnsized());
1080         Tst(scratch32, Operand(imm.value));
1081     }
test32(Register lhs,Imm32 rhs)1082     void test32(Register lhs, Imm32 rhs) {
1083         Tst(ARMRegister(lhs, 32), Operand(rhs.value));
1084     }
cmp32(Register lhs,Imm32 rhs)1085     void cmp32(Register lhs, Imm32 rhs) {
1086         Cmp(ARMRegister(lhs, 32), Operand(rhs.value));
1087     }
cmp32(Register a,Register b)1088     void cmp32(Register a, Register b) {
1089         Cmp(ARMRegister(a, 32), Operand(ARMRegister(b, 32)));
1090     }
cmp32(const Operand & lhs,Imm32 rhs)1091     void cmp32(const Operand& lhs, Imm32 rhs) {
1092         vixl::UseScratchRegisterScope temps(this);
1093         const ARMRegister scratch32 = temps.AcquireW();
1094         Mov(scratch32, lhs);
1095         Cmp(scratch32, Operand(rhs.value));
1096     }
cmp32(const Operand & lhs,Register rhs)1097     void cmp32(const Operand& lhs, Register rhs) {
1098         vixl::UseScratchRegisterScope temps(this);
1099         const ARMRegister scratch32 = temps.AcquireW();
1100         Mov(scratch32, lhs);
1101         Cmp(scratch32, Operand(ARMRegister(rhs, 32)));
1102     }
1103 
cmpPtr(Register lhs,Imm32 rhs)1104     void cmpPtr(Register lhs, Imm32 rhs) {
1105         Cmp(ARMRegister(lhs, 64), Operand(rhs.value));
1106     }
cmpPtr(Register lhs,ImmWord rhs)1107     void cmpPtr(Register lhs, ImmWord rhs) {
1108         Cmp(ARMRegister(lhs, 64), Operand(rhs.value));
1109     }
cmpPtr(Register lhs,ImmPtr rhs)1110     void cmpPtr(Register lhs, ImmPtr rhs) {
1111         Cmp(ARMRegister(lhs, 64), Operand(uint64_t(rhs.value)));
1112     }
cmpPtr(Register lhs,Register rhs)1113     void cmpPtr(Register lhs, Register rhs) {
1114         Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64));
1115     }
cmpPtr(Register lhs,ImmGCPtr rhs)1116     void cmpPtr(Register lhs, ImmGCPtr rhs) {
1117         vixl::UseScratchRegisterScope temps(this);
1118         const Register scratch = temps.AcquireX().asUnsized();
1119         MOZ_ASSERT(scratch != lhs);
1120         movePtr(rhs, scratch);
1121         cmpPtr(lhs, scratch);
1122     }
1123 
cmpPtr(const Address & lhs,Register rhs)1124     void cmpPtr(const Address& lhs, Register rhs) {
1125         vixl::UseScratchRegisterScope temps(this);
1126         const ARMRegister scratch64 = temps.AcquireX();
1127         MOZ_ASSERT(scratch64.asUnsized() != lhs.base);
1128         MOZ_ASSERT(scratch64.asUnsized() != rhs);
1129         Ldr(scratch64, MemOperand(ARMRegister(lhs.base, 64), lhs.offset));
1130         Cmp(scratch64, Operand(ARMRegister(rhs, 64)));
1131     }
cmpPtr(const Address & lhs,ImmWord rhs)1132     void cmpPtr(const Address& lhs, ImmWord rhs) {
1133         vixl::UseScratchRegisterScope temps(this);
1134         const ARMRegister scratch64 = temps.AcquireX();
1135         MOZ_ASSERT(scratch64.asUnsized() != lhs.base);
1136         Ldr(scratch64, MemOperand(ARMRegister(lhs.base, 64), lhs.offset));
1137         Cmp(scratch64, Operand(rhs.value));
1138     }
cmpPtr(const Address & lhs,ImmPtr rhs)1139     void cmpPtr(const Address& lhs, ImmPtr rhs) {
1140         vixl::UseScratchRegisterScope temps(this);
1141         const ARMRegister scratch64 = temps.AcquireX();
1142         MOZ_ASSERT(scratch64.asUnsized() != lhs.base);
1143         Ldr(scratch64, MemOperand(ARMRegister(lhs.base, 64), lhs.offset));
1144         Cmp(scratch64, Operand(uint64_t(rhs.value)));
1145     }
cmpPtr(const Address & lhs,ImmGCPtr rhs)1146     void cmpPtr(const Address& lhs, ImmGCPtr rhs) {
1147         vixl::UseScratchRegisterScope temps(this);
1148         const Register scratch = temps.AcquireX().asUnsized();
1149         MOZ_ASSERT(scratch != lhs.base);
1150         loadPtr(lhs, scratch);
1151         cmpPtr(scratch, rhs);
1152     }
1153 
loadDouble(const Address & src,FloatRegister dest)1154     void loadDouble(const Address& src, FloatRegister dest) {
1155         Ldr(ARMFPRegister(dest, 64), MemOperand(src));
1156     }
loadDouble(const BaseIndex & src,FloatRegister dest)1157     void loadDouble(const BaseIndex& src, FloatRegister dest) {
1158         ARMRegister base(src.base, 64);
1159         ARMRegister index(src.index, 64);
1160 
1161         if (src.offset == 0) {
1162             Ldr(ARMFPRegister(dest, 64), MemOperand(base, index, vixl::LSL, unsigned(src.scale)));
1163             return;
1164         }
1165 
1166         vixl::UseScratchRegisterScope temps(this);
1167         const ARMRegister scratch64 = temps.AcquireX();
1168         MOZ_ASSERT(scratch64.asUnsized() != src.base);
1169         MOZ_ASSERT(scratch64.asUnsized() != src.index);
1170 
1171         Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale)));
1172         Ldr(ARMFPRegister(dest, 64), MemOperand(scratch64, src.offset));
1173     }
loadFloatAsDouble(const Address & addr,FloatRegister dest)1174     void loadFloatAsDouble(const Address& addr, FloatRegister dest) {
1175         Ldr(ARMFPRegister(dest, 32), MemOperand(ARMRegister(addr.base,64), addr.offset));
1176         fcvt(ARMFPRegister(dest, 64), ARMFPRegister(dest, 32));
1177     }
loadFloatAsDouble(const BaseIndex & src,FloatRegister dest)1178     void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest) {
1179         ARMRegister base(src.base, 64);
1180         ARMRegister index(src.index, 64);
1181         if (src.offset == 0) {
1182             Ldr(ARMFPRegister(dest, 32), MemOperand(base, index, vixl::LSL, unsigned(src.scale)));
1183         } else {
1184             vixl::UseScratchRegisterScope temps(this);
1185             const ARMRegister scratch64 = temps.AcquireX();
1186             MOZ_ASSERT(scratch64.asUnsized() != src.base);
1187             MOZ_ASSERT(scratch64.asUnsized() != src.index);
1188 
1189             Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale)));
1190             Ldr(ARMFPRegister(dest, 32), MemOperand(scratch64, src.offset));
1191         }
1192         fcvt(ARMFPRegister(dest, 64), ARMFPRegister(dest, 32));
1193     }
1194 
loadFloat32(const Address & addr,FloatRegister dest)1195     void loadFloat32(const Address& addr, FloatRegister dest) {
1196         Ldr(ARMFPRegister(dest, 32), MemOperand(ARMRegister(addr.base,64), addr.offset));
1197     }
loadFloat32(const BaseIndex & src,FloatRegister dest)1198     void loadFloat32(const BaseIndex& src, FloatRegister dest) {
1199         ARMRegister base(src.base, 64);
1200         ARMRegister index(src.index, 64);
1201         if (src.offset == 0) {
1202             Ldr(ARMFPRegister(dest, 32), MemOperand(base, index, vixl::LSL, unsigned(src.scale)));
1203         } else {
1204             vixl::UseScratchRegisterScope temps(this);
1205             const ARMRegister scratch64 = temps.AcquireX();
1206             MOZ_ASSERT(scratch64.asUnsized() != src.base);
1207             MOZ_ASSERT(scratch64.asUnsized() != src.index);
1208 
1209             Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale)));
1210             Ldr(ARMFPRegister(dest, 32), MemOperand(scratch64, src.offset));
1211         }
1212     }
1213 
storeDouble(FloatRegister src,const Address & dest)1214     void storeDouble(FloatRegister src, const Address& dest) {
1215         Str(ARMFPRegister(src, 64), MemOperand(ARMRegister(dest.base, 64), dest.offset));
1216     }
storeDouble(FloatRegister src,const BaseIndex & dest)1217     void storeDouble(FloatRegister src, const BaseIndex& dest) {
1218         doBaseIndex(ARMFPRegister(src, 64), dest, vixl::STR_d);
1219     }
1220 
storeFloat32(FloatRegister src,Address addr)1221     void storeFloat32(FloatRegister src, Address addr) {
1222         Str(ARMFPRegister(src, 32), MemOperand(ARMRegister(addr.base, 64), addr.offset));
1223     }
storeFloat32(FloatRegister src,BaseIndex addr)1224     void storeFloat32(FloatRegister src, BaseIndex addr) {
1225         doBaseIndex(ARMFPRegister(src, 32), addr, vixl::STR_s);
1226     }
1227 
moveDouble(FloatRegister src,FloatRegister dest)1228     void moveDouble(FloatRegister src, FloatRegister dest) {
1229         fmov(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
1230     }
zeroDouble(FloatRegister reg)1231     void zeroDouble(FloatRegister reg) {
1232         fmov(ARMFPRegister(reg, 64), vixl::xzr);
1233     }
zeroFloat32(FloatRegister reg)1234     void zeroFloat32(FloatRegister reg) {
1235         fmov(ARMFPRegister(reg, 32), vixl::wzr);
1236     }
negateDouble(FloatRegister reg)1237     void negateDouble(FloatRegister reg) {
1238         fneg(ARMFPRegister(reg, 64), ARMFPRegister(reg, 64));
1239     }
negateFloat(FloatRegister reg)1240     void negateFloat(FloatRegister reg) {
1241         fneg(ARMFPRegister(reg, 32), ARMFPRegister(reg, 32));
1242     }
addDouble(FloatRegister src,FloatRegister dest)1243     void addDouble(FloatRegister src, FloatRegister dest) {
1244         fadd(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
1245     }
subDouble(FloatRegister src,FloatRegister dest)1246     void subDouble(FloatRegister src, FloatRegister dest) {
1247         fsub(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
1248     }
mulDouble(FloatRegister src,FloatRegister dest)1249     void mulDouble(FloatRegister src, FloatRegister dest) {
1250         fmul(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
1251     }
divDouble(FloatRegister src,FloatRegister dest)1252     void divDouble(FloatRegister src, FloatRegister dest) {
1253         fdiv(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
1254     }
1255 
moveFloat32(FloatRegister src,FloatRegister dest)1256     void moveFloat32(FloatRegister src, FloatRegister dest) {
1257         fmov(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
1258     }
moveFloatAsDouble(Register src,FloatRegister dest)1259     void moveFloatAsDouble(Register src, FloatRegister dest) {
1260         MOZ_CRASH("moveFloatAsDouble");
1261     }
1262 
splitTag(const ValueOperand & operand,Register dest)1263     void splitTag(const ValueOperand& operand, Register dest) {
1264         splitTag(operand.valueReg(), dest);
1265     }
splitTag(const Address & operand,Register dest)1266     void splitTag(const Address& operand, Register dest) {
1267         loadPtr(operand, dest);
1268         splitTag(dest, dest);
1269     }
splitTag(const BaseIndex & operand,Register dest)1270     void splitTag(const BaseIndex& operand, Register dest) {
1271         loadPtr(operand, dest);
1272         splitTag(dest, dest);
1273     }
1274 
1275     // Extracts the tag of a value and places it in ScratchReg.
splitTagForTest(const ValueOperand & value)1276     Register splitTagForTest(const ValueOperand& value) {
1277         vixl::UseScratchRegisterScope temps(this);
1278         const ARMRegister scratch64 = temps.AcquireX();
1279         MOZ_ASSERT(scratch64.asUnsized() != value.valueReg());
1280         Lsr(scratch64, ARMRegister(value.valueReg(), 64), JSVAL_TAG_SHIFT);
1281         return scratch64.asUnsized(); // FIXME: Surely we can make a better interface.
1282     }
cmpTag(const ValueOperand & operand,ImmTag tag)1283     void cmpTag(const ValueOperand& operand, ImmTag tag) {
1284         MOZ_CRASH("cmpTag");
1285     }
1286 
load32(const Address & address,Register dest)1287     void load32(const Address& address, Register dest) {
1288         Ldr(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset));
1289     }
load32(const BaseIndex & src,Register dest)1290     void load32(const BaseIndex& src, Register dest) {
1291         doBaseIndex(ARMRegister(dest, 32), src, vixl::LDR_w);
1292     }
load32(AbsoluteAddress address,Register dest)1293     void load32(AbsoluteAddress address, Register dest) {
1294         vixl::UseScratchRegisterScope temps(this);
1295         const ARMRegister scratch64 = temps.AcquireX();
1296         movePtr(ImmWord((uintptr_t)address.addr), scratch64.asUnsized());
1297         ldr(ARMRegister(dest, 32), MemOperand(scratch64));
1298     }
load64(const Address & address,Register64 dest)1299     void load64(const Address& address, Register64 dest) {
1300         loadPtr(address, dest.reg);
1301     }
1302 
load8SignExtend(const Address & address,Register dest)1303     void load8SignExtend(const Address& address, Register dest) {
1304         Ldrsb(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset));
1305     }
load8SignExtend(const BaseIndex & src,Register dest)1306     void load8SignExtend(const BaseIndex& src, Register dest) {
1307         doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRSB_w);
1308     }
1309 
load8ZeroExtend(const Address & address,Register dest)1310     void load8ZeroExtend(const Address& address, Register dest) {
1311         Ldrb(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset));
1312     }
load8ZeroExtend(const BaseIndex & src,Register dest)1313     void load8ZeroExtend(const BaseIndex& src, Register dest) {
1314         doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRB_w);
1315     }
1316 
load16SignExtend(const Address & address,Register dest)1317     void load16SignExtend(const Address& address, Register dest) {
1318         Ldrsh(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset));
1319     }
load16SignExtend(const BaseIndex & src,Register dest)1320     void load16SignExtend(const BaseIndex& src, Register dest) {
1321         doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRSH_w);
1322     }
1323 
load16ZeroExtend(const Address & address,Register dest)1324     void load16ZeroExtend(const Address& address, Register dest) {
1325         Ldrh(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset));
1326     }
load16ZeroExtend(const BaseIndex & src,Register dest)1327     void load16ZeroExtend(const BaseIndex& src, Register dest) {
1328         doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRH_w);
1329     }
1330 
add32(Register src,Register dest)1331     void add32(Register src, Register dest) {
1332         Add(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32)));
1333     }
add32(Imm32 imm,Register dest)1334     void add32(Imm32 imm, Register dest) {
1335         Add(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
1336     }
add32(Imm32 imm,const Address & dest)1337     void add32(Imm32 imm, const Address& dest) {
1338         vixl::UseScratchRegisterScope temps(this);
1339         const ARMRegister scratch32 = temps.AcquireW();
1340         MOZ_ASSERT(scratch32.asUnsized() != dest.base);
1341 
1342         Ldr(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset));
1343         Add(scratch32, scratch32, Operand(imm.value));
1344         Str(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset));
1345     }
1346 
adds32(Register src,Register dest)1347     void adds32(Register src, Register dest) {
1348         Adds(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32)));
1349     }
adds32(Imm32 imm,Register dest)1350     void adds32(Imm32 imm, Register dest) {
1351         Adds(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
1352     }
adds32(Imm32 imm,const Address & dest)1353     void adds32(Imm32 imm, const Address& dest) {
1354         vixl::UseScratchRegisterScope temps(this);
1355         const ARMRegister scratch32 = temps.AcquireW();
1356         MOZ_ASSERT(scratch32.asUnsized() != dest.base);
1357 
1358         Ldr(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset));
1359         Adds(scratch32, scratch32, Operand(imm.value));
1360         Str(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset));
1361     }
add64(Imm32 imm,Register64 dest)1362     void add64(Imm32 imm, Register64 dest) {
1363         Add(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
1364     }
1365 
subs32(Imm32 imm,Register dest)1366     void subs32(Imm32 imm, Register dest) {
1367         Subs(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
1368     }
subs32(Register src,Register dest)1369     void subs32(Register src, Register dest) {
1370         Subs(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32)));
1371     }
1372 
addPtr(Register src,Register dest)1373     void addPtr(Register src, Register dest) {
1374         Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(ARMRegister(src, 64)));
1375     }
addPtr(Register src1,Register src2,Register dest)1376     void addPtr(Register src1, Register src2, Register dest) {
1377         Add(ARMRegister(dest, 64), ARMRegister(src1, 64), Operand(ARMRegister(src2, 64)));
1378     }
1379 
addPtr(Imm32 imm,Register dest)1380     void addPtr(Imm32 imm, Register dest) {
1381         Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
1382     }
addPtr(Imm32 imm,Register src,Register dest)1383     void addPtr(Imm32 imm, Register src, Register dest) {
1384         Add(ARMRegister(dest, 64), ARMRegister(src, 64), Operand(imm.value));
1385     }
1386 
addPtr(Imm32 imm,const Address & dest)1387     void addPtr(Imm32 imm, const Address& dest) {
1388         vixl::UseScratchRegisterScope temps(this);
1389         const ARMRegister scratch64 = temps.AcquireX();
1390         MOZ_ASSERT(scratch64.asUnsized() != dest.base);
1391 
1392         Ldr(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset));
1393         Add(scratch64, scratch64, Operand(imm.value));
1394         Str(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset));
1395     }
addPtr(ImmWord imm,Register dest)1396     void addPtr(ImmWord imm, Register dest) {
1397         Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
1398     }
addPtr(ImmPtr imm,Register dest)1399     void addPtr(ImmPtr imm, Register dest) {
1400         Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(uint64_t(imm.value)));
1401     }
addPtr(const Address & src,Register dest)1402     void addPtr(const Address& src, Register dest) {
1403         vixl::UseScratchRegisterScope temps(this);
1404         const ARMRegister scratch64 = temps.AcquireX();
1405         MOZ_ASSERT(scratch64.asUnsized() != src.base);
1406 
1407         Ldr(scratch64, MemOperand(ARMRegister(src.base, 64), src.offset));
1408         Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64));
1409     }
subPtr(Imm32 imm,Register dest)1410     void subPtr(Imm32 imm, Register dest) {
1411         Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
1412     }
subPtr(Register src,Register dest)1413     void subPtr(Register src, Register dest) {
1414         Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(ARMRegister(src, 64)));
1415     }
subPtr(const Address & addr,Register dest)1416     void subPtr(const Address& addr, Register dest) {
1417         vixl::UseScratchRegisterScope temps(this);
1418         const ARMRegister scratch64 = temps.AcquireX();
1419         MOZ_ASSERT(scratch64.asUnsized() != addr.base);
1420 
1421         Ldr(scratch64, MemOperand(ARMRegister(addr.base, 64), addr.offset));
1422         Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64));
1423     }
subPtr(Register src,const Address & dest)1424     void subPtr(Register src, const Address& dest) {
1425         vixl::UseScratchRegisterScope temps(this);
1426         const ARMRegister scratch64 = temps.AcquireX();
1427         MOZ_ASSERT(scratch64.asUnsized() != dest.base);
1428 
1429         Ldr(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset));
1430         Sub(scratch64, scratch64, Operand(ARMRegister(src, 64)));
1431         Str(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset));
1432     }
mul32(Register src1,Register src2,Register dest,Label * onOver,Label * onZero)1433     void mul32(Register src1, Register src2, Register dest, Label* onOver, Label* onZero) {
1434         Smull(ARMRegister(dest, 64), ARMRegister(src1, 32), ARMRegister(src2, 32));
1435         if (onOver) {
1436             Cmp(ARMRegister(dest, 64), Operand(ARMRegister(dest, 32), vixl::SXTW));
1437             B(onOver, NotEqual);
1438         }
1439         if (onZero)
1440             Cbz(ARMRegister(dest, 32), onZero);
1441 
1442         // Clear upper 32 bits.
1443         Mov(ARMRegister(dest, 32), ARMRegister(dest, 32));
1444     }
1445 
ret()1446     void ret() {
1447         pop(lr);
1448         abiret();
1449     }
1450 
retn(Imm32 n)1451     void retn(Imm32 n) {
1452         // ip0 <- [sp]; sp += n; ret ip0
1453         Ldr(vixl::ip0, MemOperand(GetStackPointer64(), ptrdiff_t(n.value), vixl::PostIndex));
1454         syncStackPtr(); // SP is always used to transmit the stack between calls.
1455         Ret(vixl::ip0);
1456     }
1457 
j(Condition cond,Label * dest)1458     void j(Condition cond, Label* dest) {
1459         B(dest, cond);
1460     }
1461 
branch(Condition cond,Label * label)1462     void branch(Condition cond, Label* label) {
1463         B(label, cond);
1464     }
branch(JitCode * target)1465     void branch(JitCode* target) {
1466         syncStackPtr();
1467         addPendingJump(nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE);
1468         b(-1); // The jump target will be patched by executableCopy().
1469     }
1470 
branch32(Condition cond,const Operand & lhs,Register rhs,Label * label)1471     void branch32(Condition cond, const Operand& lhs, Register rhs, Label* label) {
1472         // since rhs is an operand, do the compare backwards
1473         Cmp(ARMRegister(rhs, 32), lhs);
1474         B(label, Assembler::InvertCmpCondition(cond));
1475     }
branch32(Condition cond,const Operand & lhs,Imm32 rhs,Label * label)1476     void branch32(Condition cond, const Operand& lhs, Imm32 rhs, Label* label) {
1477         ARMRegister l = lhs.reg();
1478         Cmp(l, Operand(rhs.value));
1479         B(label, cond);
1480     }
branch32(Condition cond,Register lhs,Register rhs,Label * label)1481     void branch32(Condition cond, Register lhs, Register rhs, Label* label) {
1482         cmp32(lhs, rhs);
1483         B(label, cond);
1484     }
branch32(Condition cond,Register lhs,Imm32 imm,Label * label)1485     void branch32(Condition cond, Register lhs, Imm32 imm, Label* label) {
1486         cmp32(lhs, imm);
1487         B(label, cond);
1488     }
branch32(Condition cond,const Address & lhs,Register rhs,Label * label)1489     void branch32(Condition cond, const Address& lhs, Register rhs, Label* label) {
1490         vixl::UseScratchRegisterScope temps(this);
1491         const Register scratch = temps.AcquireX().asUnsized();
1492         MOZ_ASSERT(scratch != lhs.base);
1493         MOZ_ASSERT(scratch != rhs);
1494         load32(lhs, scratch);
1495         branch32(cond, scratch, rhs, label);
1496     }
branch32(Condition cond,const Address & lhs,Imm32 imm,Label * label)1497     void branch32(Condition cond, const Address& lhs, Imm32 imm, Label* label) {
1498         vixl::UseScratchRegisterScope temps(this);
1499         const Register scratch = temps.AcquireX().asUnsized();
1500         MOZ_ASSERT(scratch != lhs.base);
1501         load32(lhs, scratch);
1502         branch32(cond, scratch, imm, label);
1503     }
branch32(Condition cond,AbsoluteAddress lhs,Register rhs,Label * label)1504     void branch32(Condition cond, AbsoluteAddress lhs, Register rhs, Label* label) {
1505         vixl::UseScratchRegisterScope temps(this);
1506         const Register scratch = temps.AcquireX().asUnsized();
1507         movePtr(ImmPtr(lhs.addr), scratch);
1508         branch32(cond, Address(scratch, 0), rhs, label);
1509     }
branch32(Condition cond,AbsoluteAddress lhs,Imm32 rhs,Label * label)1510     void branch32(Condition cond, AbsoluteAddress lhs, Imm32 rhs, Label* label) {
1511         vixl::UseScratchRegisterScope temps(this);
1512         const Register scratch = temps.AcquireX().asUnsized();
1513         movePtr(ImmPtr(lhs.addr), scratch);
1514         branch32(cond, Address(scratch, 0), rhs, label);
1515     }
branch32(Condition cond,wasm::SymbolicAddress lhs,Imm32 rhs,Label * label)1516     void branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, Label* label) {
1517         vixl::UseScratchRegisterScope temps(this);
1518         const Register scratch = temps.AcquireX().asUnsized();
1519         movePtr(lhs, scratch);
1520         branch32(cond, Address(scratch, 0), rhs, label);
1521     }
branch32(Condition cond,BaseIndex lhs,Imm32 rhs,Label * label)1522     void branch32(Condition cond, BaseIndex lhs, Imm32 rhs, Label* label) {
1523         vixl::UseScratchRegisterScope temps(this);
1524         const ARMRegister scratch32 = temps.AcquireW();
1525         MOZ_ASSERT(scratch32.asUnsized() != lhs.base);
1526         MOZ_ASSERT(scratch32.asUnsized() != lhs.index);
1527         doBaseIndex(scratch32, lhs, vixl::LDR_w);
1528         branch32(cond, scratch32.asUnsized(), rhs, label);
1529     }
1530 
branchTest32(Condition cond,Register lhs,Register rhs,Label * label)1531     void branchTest32(Condition cond, Register lhs, Register rhs, Label* label) {
1532         MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
1533         // x86 prefers |test foo, foo| to |cmp foo, #0|.
1534         // Convert the former to the latter for ARM.
1535         if (lhs == rhs && (cond == Zero || cond == NonZero))
1536             cmp32(lhs, Imm32(0));
1537         else
1538             test32(lhs, rhs);
1539         B(label, cond);
1540     }
branchTest32(Condition cond,Register lhs,Imm32 imm,Label * label)1541     void branchTest32(Condition cond, Register lhs, Imm32 imm, Label* label) {
1542         MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
1543         test32(lhs, imm);
1544         B(label, cond);
1545     }
branchTest32(Condition cond,const Address & address,Imm32 imm,Label * label)1546     void branchTest32(Condition cond, const Address& address, Imm32 imm, Label* label) {
1547         vixl::UseScratchRegisterScope temps(this);
1548         const Register scratch = temps.AcquireX().asUnsized();
1549         MOZ_ASSERT(scratch != address.base);
1550         load32(address, scratch);
1551         branchTest32(cond, scratch, imm, label);
1552     }
branchTest32(Condition cond,AbsoluteAddress address,Imm32 imm,Label * label)1553     void branchTest32(Condition cond, AbsoluteAddress address, Imm32 imm, Label* label) {
1554         vixl::UseScratchRegisterScope temps(this);
1555         const Register scratch = temps.AcquireX().asUnsized();
1556         loadPtr(address, scratch);
1557         branchTest32(cond, scratch, imm, label);
1558     }
1559     CodeOffsetJump jumpWithPatch(RepatchLabel* label, Condition cond = Always,
1560                                  Label* documentation = nullptr)
1561     {
1562         ARMBuffer::PoolEntry pe;
1563         BufferOffset load_bo;
1564         BufferOffset branch_bo;
1565 
1566         // Does not overwrite condition codes from the caller.
1567         {
1568             vixl::UseScratchRegisterScope temps(this);
1569             const ARMRegister scratch64 = temps.AcquireX();
1570             load_bo = immPool64(scratch64, (uint64_t)label, &pe);
1571         }
1572 
1573         MOZ_ASSERT(!label->bound());
1574         if (cond != Always) {
1575             Label notTaken;
1576             B(&notTaken, Assembler::InvertCondition(cond));
1577             branch_bo = b(-1);
1578             bind(&notTaken);
1579         } else {
1580             nop();
1581             branch_bo = b(-1);
1582         }
1583         label->use(branch_bo.getOffset());
1584         return CodeOffsetJump(load_bo.getOffset(), pe.index());
1585     }
1586     CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr) {
1587         return jumpWithPatch(label, Always, documentation);
1588     }
1589     template <typename T>
branchPtrWithPatch(Condition cond,Register reg,T ptr,RepatchLabel * label)1590     CodeOffsetJump branchPtrWithPatch(Condition cond, Register reg, T ptr, RepatchLabel* label) {
1591         cmpPtr(reg, ptr);
1592         return jumpWithPatch(label, cond);
1593     }
1594     template <typename T>
branchPtrWithPatch(Condition cond,Address addr,T ptr,RepatchLabel * label)1595     CodeOffsetJump branchPtrWithPatch(Condition cond, Address addr, T ptr, RepatchLabel* label) {
1596         // The scratch register is unused after the condition codes are set.
1597         {
1598             vixl::UseScratchRegisterScope temps(this);
1599             const Register scratch = temps.AcquireX().asUnsized();
1600             MOZ_ASSERT(scratch != addr.base);
1601             loadPtr(addr, scratch);
1602             cmpPtr(scratch, ptr);
1603         }
1604         return jumpWithPatch(label, cond);
1605     }
1606 
branchPtr(Condition cond,wasm::SymbolicAddress lhs,Register rhs,Label * label)1607     void branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs, Label* label) {
1608         vixl::UseScratchRegisterScope temps(this);
1609         const Register scratch = temps.AcquireX().asUnsized();
1610         MOZ_ASSERT(scratch != rhs);
1611         loadPtr(lhs, scratch);
1612         branchPtr(cond, scratch, rhs, label);
1613     }
branchPtr(Condition cond,Address lhs,ImmWord ptr,Label * label)1614     void branchPtr(Condition cond, Address lhs, ImmWord ptr, Label* label) {
1615         vixl::UseScratchRegisterScope temps(this);
1616         const Register scratch = temps.AcquireX().asUnsized();
1617         MOZ_ASSERT(scratch != lhs.base);
1618         loadPtr(lhs, scratch);
1619         branchPtr(cond, scratch, ptr, label);
1620     }
branchPtr(Condition cond,Address lhs,ImmPtr ptr,Label * label)1621     void branchPtr(Condition cond, Address lhs, ImmPtr ptr, Label* label) {
1622         vixl::UseScratchRegisterScope temps(this);
1623         const Register scratch = temps.AcquireX().asUnsized();
1624         MOZ_ASSERT(scratch != lhs.base);
1625         loadPtr(lhs, scratch);
1626         branchPtr(cond, scratch, ptr, label);
1627     }
branchPtr(Condition cond,Address lhs,Register ptr,Label * label)1628     void branchPtr(Condition cond, Address lhs, Register ptr, Label* label) {
1629         vixl::UseScratchRegisterScope temps(this);
1630         const Register scratch = temps.AcquireX().asUnsized();
1631         MOZ_ASSERT(scratch != lhs.base);
1632         MOZ_ASSERT(scratch != ptr);
1633         loadPtr(lhs, scratch);
1634         branchPtr(cond, scratch, ptr, label);
1635     }
branchPtr(Condition cond,Register lhs,Imm32 imm,Label * label)1636     void branchPtr(Condition cond, Register lhs, Imm32 imm, Label* label) {
1637         cmpPtr(lhs, imm);
1638         B(label, cond);
1639     }
branchPtr(Condition cond,Register lhs,ImmWord ptr,Label * label)1640     void branchPtr(Condition cond, Register lhs, ImmWord ptr, Label* label) {
1641         cmpPtr(lhs, ptr);
1642         B(label, cond);
1643     }
branchPtr(Condition cond,Register lhs,ImmPtr rhs,Label * label)1644     void branchPtr(Condition cond, Register lhs, ImmPtr rhs, Label* label) {
1645         cmpPtr(lhs, rhs);
1646         B(label, cond);
1647     }
branchPtr(Condition cond,Register lhs,ImmGCPtr ptr,Label * label)1648     void branchPtr(Condition cond, Register lhs, ImmGCPtr ptr, Label* label) {
1649         vixl::UseScratchRegisterScope temps(this);
1650         const Register scratch = temps.AcquireX().asUnsized();
1651         MOZ_ASSERT(scratch != lhs);
1652         movePtr(ptr, scratch);
1653         branchPtr(cond, lhs, scratch, label);
1654     }
branchPtr(Condition cond,Address lhs,ImmGCPtr ptr,Label * label)1655     void branchPtr(Condition cond, Address lhs, ImmGCPtr ptr, Label* label) {
1656         vixl::UseScratchRegisterScope temps(this);
1657         const ARMRegister scratch1_64 = temps.AcquireX();
1658         const ARMRegister scratch2_64 = temps.AcquireX();
1659         MOZ_ASSERT(scratch1_64.asUnsized() != lhs.base);
1660         MOZ_ASSERT(scratch2_64.asUnsized() != lhs.base);
1661 
1662         movePtr(ptr, scratch1_64.asUnsized());
1663         loadPtr(lhs, scratch2_64.asUnsized());
1664         cmp(scratch2_64, scratch1_64);
1665         B(cond, label);
1666 
1667     }
branchPtr(Condition cond,Register lhs,Register rhs,Label * label)1668     void branchPtr(Condition cond, Register lhs, Register rhs, Label* label) {
1669         Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64));
1670         B(label, cond);
1671     }
branchPtr(Condition cond,AbsoluteAddress lhs,Register rhs,Label * label)1672     void branchPtr(Condition cond, AbsoluteAddress lhs, Register rhs, Label* label) {
1673         vixl::UseScratchRegisterScope temps(this);
1674         const Register scratch = temps.AcquireX().asUnsized();
1675         MOZ_ASSERT(scratch != rhs);
1676         loadPtr(lhs, scratch);
1677         branchPtr(cond, scratch, rhs, label);
1678     }
branchPtr(Condition cond,AbsoluteAddress lhs,ImmWord ptr,Label * label)1679     void branchPtr(Condition cond, AbsoluteAddress lhs, ImmWord ptr, Label* label) {
1680         vixl::UseScratchRegisterScope temps(this);
1681         const Register scratch = temps.AcquireX().asUnsized();
1682         loadPtr(lhs, scratch);
1683         branchPtr(cond, scratch, ptr, label);
1684     }
1685 
branch64(Condition cond,const Address & lhs,Imm64 val,Label * label)1686     void branch64(Condition cond, const Address& lhs, Imm64 val, Label* label) {
1687         MOZ_ASSERT(cond == Assembler::NotEqual,
1688                    "other condition codes not supported");
1689 
1690         branchPtr(cond, lhs, ImmWord(val.value), label);
1691     }
1692 
branch64(Condition cond,const Address & lhs,const Address & rhs,Register scratch,Label * label)1693     void branch64(Condition cond, const Address& lhs, const Address& rhs, Register scratch,
1694                   Label* label)
1695     {
1696         MOZ_ASSERT(cond == Assembler::NotEqual,
1697                    "other condition codes not supported");
1698         MOZ_ASSERT(lhs.base != scratch);
1699         MOZ_ASSERT(rhs.base != scratch);
1700 
1701         loadPtr(rhs, scratch);
1702         branchPtr(cond, lhs, scratch, label);
1703     }
1704 
branchTestPtr(Condition cond,Register lhs,Register rhs,Label * label)1705     void branchTestPtr(Condition cond, Register lhs, Register rhs, Label* label) {
1706         Tst(ARMRegister(lhs, 64), Operand(ARMRegister(rhs, 64)));
1707         B(label, cond);
1708     }
branchTestPtr(Condition cond,Register lhs,Imm32 imm,Label * label)1709     void branchTestPtr(Condition cond, Register lhs, Imm32 imm, Label* label) {
1710         Tst(ARMRegister(lhs, 64), Operand(imm.value));
1711         B(label, cond);
1712     }
branchTestPtr(Condition cond,const Address & lhs,Imm32 imm,Label * label)1713     void branchTestPtr(Condition cond, const Address& lhs, Imm32 imm, Label* label) {
1714         vixl::UseScratchRegisterScope temps(this);
1715         const Register scratch = temps.AcquireX().asUnsized();
1716         MOZ_ASSERT(scratch != lhs.base);
1717         loadPtr(lhs, scratch);
1718         branchTestPtr(cond, scratch, imm, label);
1719     }
branchPrivatePtr(Condition cond,const Address & lhs,ImmPtr ptr,Label * label)1720     void branchPrivatePtr(Condition cond, const Address& lhs, ImmPtr ptr, Label* label) {
1721         branchPtr(cond, lhs, ptr, label);
1722     }
1723 
branchPrivatePtr(Condition cond,const Address & lhs,Register ptr,Label * label)1724     void branchPrivatePtr(Condition cond, const Address& lhs, Register ptr, Label* label) {
1725         branchPtr(cond, lhs, ptr, label);
1726     }
1727 
branchPrivatePtr(Condition cond,Register lhs,ImmWord ptr,Label * label)1728     void branchPrivatePtr(Condition cond, Register lhs, ImmWord ptr, Label* label) {
1729         branchPtr(cond, lhs, ptr, label);
1730     }
1731 
decBranchPtr(Condition cond,Register lhs,Imm32 imm,Label * label)1732     void decBranchPtr(Condition cond, Register lhs, Imm32 imm, Label* label) {
1733         Subs(ARMRegister(lhs, 64), ARMRegister(lhs, 64), Operand(imm.value));
1734         B(cond, label);
1735     }
1736 
branchTestUndefined(Condition cond,Register tag,Label * label)1737     void branchTestUndefined(Condition cond, Register tag, Label* label) {
1738         Condition c = testUndefined(cond, tag);
1739         B(label, c);
1740     }
branchTestInt32(Condition cond,Register tag,Label * label)1741     void branchTestInt32(Condition cond, Register tag, Label* label) {
1742         Condition c = testInt32(cond, tag);
1743         B(label, c);
1744     }
branchTestDouble(Condition cond,Register tag,Label * label)1745     void branchTestDouble(Condition cond, Register tag, Label* label) {
1746         Condition c = testDouble(cond, tag);
1747         B(label, c);
1748     }
branchTestBoolean(Condition cond,Register tag,Label * label)1749     void branchTestBoolean(Condition cond, Register tag, Label* label) {
1750         Condition c = testBoolean(cond, tag);
1751         B(label, c);
1752     }
branchTestNull(Condition cond,Register tag,Label * label)1753     void branchTestNull(Condition cond, Register tag, Label* label) {
1754         Condition c = testNull(cond, tag);
1755         B(label, c);
1756     }
branchTestString(Condition cond,Register tag,Label * label)1757     void branchTestString(Condition cond, Register tag, Label* label) {
1758         Condition c = testString(cond, tag);
1759         B(label, c);
1760     }
branchTestSymbol(Condition cond,Register tag,Label * label)1761     void branchTestSymbol(Condition cond, Register tag, Label* label) {
1762         Condition c = testSymbol(cond, tag);
1763         B(label, c);
1764     }
branchTestObject(Condition cond,Register tag,Label * label)1765     void branchTestObject(Condition cond, Register tag, Label* label) {
1766         Condition c = testObject(cond, tag);
1767         B(label, c);
1768     }
branchTestNumber(Condition cond,Register tag,Label * label)1769     void branchTestNumber(Condition cond, Register tag, Label* label) {
1770         Condition c = testNumber(cond, tag);
1771         B(label, c);
1772     }
1773 
branchTestUndefined(Condition cond,const Address & address,Label * label)1774     void branchTestUndefined(Condition cond, const Address& address, Label* label) {
1775         Condition c = testUndefined(cond, address);
1776         B(label, c);
1777     }
branchTestInt32(Condition cond,const Address & address,Label * label)1778     void branchTestInt32(Condition cond, const Address& address, Label* label) {
1779         Condition c = testInt32(cond, address);
1780         B(label, c);
1781     }
branchTestDouble(Condition cond,const Address & address,Label * label)1782     void branchTestDouble(Condition cond, const Address& address, Label* label) {
1783         Condition c = testDouble(cond, address);
1784         B(label, c);
1785     }
branchTestBoolean(Condition cond,const Address & address,Label * label)1786     void branchTestBoolean(Condition cond, const Address& address, Label* label) {
1787         Condition c = testDouble(cond, address);
1788         B(label, c);
1789     }
branchTestNull(Condition cond,const Address & address,Label * label)1790     void branchTestNull(Condition cond, const Address& address, Label* label) {
1791         Condition c = testNull(cond, address);
1792         B(label, c);
1793     }
branchTestString(Condition cond,const Address & address,Label * label)1794     void branchTestString(Condition cond, const Address& address, Label* label) {
1795         Condition c = testString(cond, address);
1796         B(label, c);
1797     }
branchTestSymbol(Condition cond,const Address & address,Label * label)1798     void branchTestSymbol(Condition cond, const Address& address, Label* label) {
1799         Condition c = testSymbol(cond, address);
1800         B(label, c);
1801     }
branchTestObject(Condition cond,const Address & address,Label * label)1802     void branchTestObject(Condition cond, const Address& address, Label* label) {
1803         Condition c = testObject(cond, address);
1804         B(label, c);
1805     }
branchTestNumber(Condition cond,const Address & address,Label * label)1806     void branchTestNumber(Condition cond, const Address& address, Label* label) {
1807         Condition c = testNumber(cond, address);
1808         B(label, c);
1809     }
1810 
1811     // Perform a type-test on a full Value loaded into a register.
1812     // Clobbers the ScratchReg.
branchTestUndefined(Condition cond,const ValueOperand & src,Label * label)1813     void branchTestUndefined(Condition cond, const ValueOperand& src, Label* label) {
1814         Condition c = testUndefined(cond, src);
1815         B(label, c);
1816     }
branchTestInt32(Condition cond,const ValueOperand & src,Label * label)1817     void branchTestInt32(Condition cond, const ValueOperand& src, Label* label) {
1818         Condition c = testInt32(cond, src);
1819         B(label, c);
1820     }
branchTestBoolean(Condition cond,const ValueOperand & src,Label * label)1821     void branchTestBoolean(Condition cond, const ValueOperand& src, Label* label) {
1822         Condition c = testBoolean(cond, src);
1823         B(label, c);
1824     }
branchTestDouble(Condition cond,const ValueOperand & src,Label * label)1825     void branchTestDouble(Condition cond, const ValueOperand& src, Label* label) {
1826         Condition c = testDouble(cond, src);
1827         B(label, c);
1828     }
branchTestNull(Condition cond,const ValueOperand & src,Label * label)1829     void branchTestNull(Condition cond, const ValueOperand& src, Label* label) {
1830         Condition c = testNull(cond, src);
1831         B(label, c);
1832     }
branchTestString(Condition cond,const ValueOperand & src,Label * label)1833     void branchTestString(Condition cond, const ValueOperand& src, Label* label) {
1834         Condition c = testString(cond, src);
1835         B(label, c);
1836     }
branchTestSymbol(Condition cond,const ValueOperand & src,Label * label)1837     void branchTestSymbol(Condition cond, const ValueOperand& src, Label* label) {
1838         Condition c = testSymbol(cond, src);
1839         B(label, c);
1840     }
branchTestObject(Condition cond,const ValueOperand & src,Label * label)1841     void branchTestObject(Condition cond, const ValueOperand& src, Label* label) {
1842         Condition c = testObject(cond, src);
1843         B(label, c);
1844     }
branchTestNumber(Condition cond,const ValueOperand & src,Label * label)1845     void branchTestNumber(Condition cond, const ValueOperand& src, Label* label) {
1846         Condition c = testNumber(cond, src);
1847         B(label, c);
1848     }
1849 
1850     // Perform a type-test on a Value addressed by BaseIndex.
1851     // Clobbers the ScratchReg.
branchTestUndefined(Condition cond,const BaseIndex & address,Label * label)1852     void branchTestUndefined(Condition cond, const BaseIndex& address, Label* label) {
1853         Condition c = testUndefined(cond, address);
1854         B(label, c);
1855     }
branchTestInt32(Condition cond,const BaseIndex & address,Label * label)1856     void branchTestInt32(Condition cond, const BaseIndex& address, Label* label) {
1857         Condition c = testInt32(cond, address);
1858         B(label, c);
1859     }
branchTestBoolean(Condition cond,const BaseIndex & address,Label * label)1860     void branchTestBoolean(Condition cond, const BaseIndex& address, Label* label) {
1861         Condition c = testBoolean(cond, address);
1862         B(label, c);
1863     }
branchTestDouble(Condition cond,const BaseIndex & address,Label * label)1864     void branchTestDouble(Condition cond, const BaseIndex& address, Label* label) {
1865         Condition c = testDouble(cond, address);
1866         B(label, c);
1867     }
branchTestNull(Condition cond,const BaseIndex & address,Label * label)1868     void branchTestNull(Condition cond, const BaseIndex& address, Label* label) {
1869         Condition c = testNull(cond, address);
1870         B(label, c);
1871     }
branchTestString(Condition cond,const BaseIndex & address,Label * label)1872     void branchTestString(Condition cond, const BaseIndex& address, Label* label) {
1873         Condition c = testString(cond, address);
1874         B(label, c);
1875     }
branchTestSymbol(Condition cond,const BaseIndex & address,Label * label)1876     void branchTestSymbol(Condition cond, const BaseIndex& address, Label* label) {
1877         Condition c = testSymbol(cond, address);
1878         B(label, c);
1879     }
branchTestObject(Condition cond,const BaseIndex & address,Label * label)1880     void branchTestObject(Condition cond, const BaseIndex& address, Label* label) {
1881         Condition c = testObject(cond, address);
1882         B(label, c);
1883     }
1884     template <typename T>
branchTestGCThing(Condition cond,const T & src,Label * label)1885     void branchTestGCThing(Condition cond, const T& src, Label* label) {
1886         Condition c = testGCThing(cond, src);
1887         B(label, c);
1888     }
1889     template <typename T>
branchTestPrimitive(Condition cond,const T & t,Label * label)1890     void branchTestPrimitive(Condition cond, const T& t, Label* label) {
1891         Condition c = testPrimitive(cond, t);
1892         B(label, c);
1893     }
1894     template <typename T>
branchTestMagic(Condition cond,const T & t,Label * label)1895     void branchTestMagic(Condition cond, const T& t, Label* label) {
1896         Condition c = testMagic(cond, t);
1897         B(label, c);
1898     }
branchTestMagicValue(Condition cond,const ValueOperand & val,JSWhyMagic why,Label * label)1899     void branchTestMagicValue(Condition cond, const ValueOperand& val, JSWhyMagic why, Label* label) {
1900         MOZ_ASSERT(cond == Equal || cond == NotEqual);
1901         branchTestValue(cond, val, MagicValue(why), label);
1902     }
branchTestValue(Condition cond,const ValueOperand & value,const Value & v,Label * label)1903     void branchTestValue(Condition cond, const ValueOperand& value, const Value& v, Label* label) {
1904         vixl::UseScratchRegisterScope temps(this);
1905         const ARMRegister scratch64 = temps.AcquireX();
1906         MOZ_ASSERT(scratch64.asUnsized() != value.valueReg());
1907         moveValue(v, ValueOperand(scratch64.asUnsized()));
1908         Cmp(ARMRegister(value.valueReg(), 64), scratch64);
1909         B(label, cond);
1910     }
branchTestValue(Condition cond,const Address & valaddr,const ValueOperand & value,Label * label)1911     void branchTestValue(Condition cond, const Address& valaddr, const ValueOperand& value,
1912                          Label* label)
1913     {
1914         vixl::UseScratchRegisterScope temps(this);
1915         const ARMRegister scratch64 = temps.AcquireX();
1916         MOZ_ASSERT(scratch64.asUnsized() != valaddr.base);
1917         MOZ_ASSERT(scratch64.asUnsized() != value.valueReg());
1918         loadValue(valaddr, scratch64.asUnsized());
1919         Cmp(ARMRegister(value.valueReg(), 64), Operand(scratch64));
1920         B(label, cond);
1921     }
branchTest64(Condition cond,Register64 lhs,Register64 rhs,Register temp,Label * label)1922     void branchTest64(Condition cond, Register64 lhs, Register64 rhs, Register temp, Label* label) {
1923         branchTestPtr(cond, lhs.reg, rhs.reg, label);
1924     }
1925 
compareDouble(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs)1926     void compareDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs) {
1927         Fcmp(ARMFPRegister(lhs, 64), ARMFPRegister(rhs, 64));
1928     }
branchDouble(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs,Label * label)1929     void branchDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs, Label* label) {
1930         compareDouble(cond, lhs, rhs);
1931         switch (cond) {
1932           case DoubleNotEqual: {
1933             Label unordered;
1934             // not equal *and* ordered
1935             branch(Overflow, &unordered);
1936             branch(NotEqual, label);
1937             bind(&unordered);
1938             break;
1939           }
1940           case DoubleEqualOrUnordered:
1941             branch(Overflow, label);
1942             branch(Equal, label);
1943             break;
1944           default:
1945             branch(Condition(cond), label);
1946         }
1947     }
1948 
compareFloat(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs)1949     void compareFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs) {
1950         Fcmp(ARMFPRegister(lhs, 32), ARMFPRegister(rhs, 32));
1951     }
branchFloat(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs,Label * label)1952     void branchFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs, Label* label) {
1953         compareFloat(cond, lhs, rhs);
1954         switch (cond) {
1955           case DoubleNotEqual: {
1956             Label unordered;
1957             // not equal *and* ordered
1958             branch(Overflow, &unordered);
1959             branch(NotEqual, label);
1960             bind(&unordered);
1961             break;
1962           }
1963           case DoubleEqualOrUnordered:
1964             branch(Overflow, label);
1965             branch(Equal, label);
1966             break;
1967           default:
1968             branch(Condition(cond), label);
1969         }
1970     }
1971 
branchNegativeZero(FloatRegister reg,Register scratch,Label * label)1972     void branchNegativeZero(FloatRegister reg, Register scratch, Label* label) {
1973         MOZ_CRASH("branchNegativeZero");
1974     }
branchNegativeZeroFloat32(FloatRegister reg,Register scratch,Label * label)1975     void branchNegativeZeroFloat32(FloatRegister reg, Register scratch, Label* label) {
1976         MOZ_CRASH("branchNegativeZeroFloat32");
1977     }
1978 
boxDouble(FloatRegister src,const ValueOperand & dest)1979     void boxDouble(FloatRegister src, const ValueOperand& dest) {
1980         Fmov(ARMRegister(dest.valueReg(), 64), ARMFPRegister(src, 64));
1981     }
boxNonDouble(JSValueType type,Register src,const ValueOperand & dest)1982     void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) {
1983         boxValue(type, src, dest.valueReg());
1984     }
1985 
1986     // Note that the |dest| register here may be ScratchReg, so we shouldn't use it.
unboxInt32(const ValueOperand & src,Register dest)1987     void unboxInt32(const ValueOperand& src, Register dest) {
1988         move32(src.valueReg(), dest);
1989     }
unboxInt32(const Address & src,Register dest)1990     void unboxInt32(const Address& src, Register dest) {
1991         load32(src, dest);
1992     }
unboxDouble(const Address & src,FloatRegister dest)1993     void unboxDouble(const Address& src, FloatRegister dest) {
1994         loadDouble(src, dest);
1995     }
unboxDouble(const ValueOperand & src,FloatRegister dest)1996     void unboxDouble(const ValueOperand& src, FloatRegister dest) {
1997         Fmov(ARMFPRegister(dest, 64), ARMRegister(src.valueReg(), 64));
1998     }
1999 
unboxArgObjMagic(const ValueOperand & src,Register dest)2000     void unboxArgObjMagic(const ValueOperand& src, Register dest) {
2001         MOZ_CRASH("unboxArgObjMagic");
2002     }
unboxArgObjMagic(const Address & src,Register dest)2003     void unboxArgObjMagic(const Address& src, Register dest) {
2004         MOZ_CRASH("unboxArgObjMagic");
2005     }
2006 
unboxBoolean(const ValueOperand & src,Register dest)2007     void unboxBoolean(const ValueOperand& src, Register dest) {
2008         move32(src.valueReg(), dest);
2009     }
unboxBoolean(const Address & src,Register dest)2010     void unboxBoolean(const Address& src, Register dest) {
2011         load32(src, dest);
2012     }
2013 
unboxMagic(const ValueOperand & src,Register dest)2014     void unboxMagic(const ValueOperand& src, Register dest) {
2015         move32(src.valueReg(), dest);
2016     }
2017     // Unbox any non-double value into dest. Prefer unboxInt32 or unboxBoolean
2018     // instead if the source type is known.
unboxNonDouble(const ValueOperand & src,Register dest)2019     void unboxNonDouble(const ValueOperand& src, Register dest) {
2020         unboxNonDouble(src.valueReg(), dest);
2021     }
unboxNonDouble(Address src,Register dest)2022     void unboxNonDouble(Address src, Register dest) {
2023         loadPtr(src, dest);
2024         unboxNonDouble(dest, dest);
2025     }
2026 
unboxNonDouble(Register src,Register dest)2027     void unboxNonDouble(Register src, Register dest) {
2028         And(ARMRegister(dest, 64), ARMRegister(src, 64), Operand((1ULL << JSVAL_TAG_SHIFT) - 1ULL));
2029     }
2030 
unboxPrivate(const ValueOperand & src,Register dest)2031     void unboxPrivate(const ValueOperand& src, Register dest) {
2032         ubfx(ARMRegister(dest, 64), ARMRegister(src.valueReg(), 64), 1, JSVAL_TAG_SHIFT - 1);
2033     }
2034 
notBoolean(const ValueOperand & val)2035     void notBoolean(const ValueOperand& val) {
2036         ARMRegister r(val.valueReg(), 64);
2037         eor(r, r, Operand(1));
2038     }
unboxObject(const ValueOperand & src,Register dest)2039     void unboxObject(const ValueOperand& src, Register dest) {
2040         unboxNonDouble(src.valueReg(), dest);
2041     }
unboxObject(Register src,Register dest)2042     void unboxObject(Register src, Register dest) {
2043         unboxNonDouble(src, dest);
2044     }
unboxObject(const Address & src,Register dest)2045     void unboxObject(const Address& src, Register dest) {
2046         loadPtr(src, dest);
2047         unboxNonDouble(dest, dest);
2048     }
unboxObject(const BaseIndex & src,Register dest)2049     void unboxObject(const BaseIndex& src, Register dest) {
2050         doBaseIndex(ARMRegister(dest, 64), src, vixl::LDR_x);
2051         unboxNonDouble(dest, dest);
2052     }
2053 
unboxValue(const ValueOperand & src,AnyRegister dest)2054     void unboxValue(const ValueOperand& src, AnyRegister dest) {
2055         if (dest.isFloat()) {
2056             Label notInt32, end;
2057             branchTestInt32(Assembler::NotEqual, src, &notInt32);
2058             convertInt32ToDouble(src.valueReg(), dest.fpu());
2059             jump(&end);
2060             bind(&notInt32);
2061             unboxDouble(src, dest.fpu());
2062             bind(&end);
2063         } else {
2064             unboxNonDouble(src, dest.gpr());
2065         }
2066 
2067     }
unboxString(const ValueOperand & operand,Register dest)2068     void unboxString(const ValueOperand& operand, Register dest) {
2069         unboxNonDouble(operand, dest);
2070     }
unboxString(const Address & src,Register dest)2071     void unboxString(const Address& src, Register dest) {
2072         unboxNonDouble(src, dest);
2073     }
unboxSymbol(const ValueOperand & operand,Register dest)2074     void unboxSymbol(const ValueOperand& operand, Register dest) {
2075         unboxNonDouble(operand, dest);
2076     }
unboxSymbol(const Address & src,Register dest)2077     void unboxSymbol(const Address& src, Register dest) {
2078         unboxNonDouble(src, dest);
2079     }
2080     // These two functions use the low 32-bits of the full value register.
boolValueToDouble(const ValueOperand & operand,FloatRegister dest)2081     void boolValueToDouble(const ValueOperand& operand, FloatRegister dest) {
2082         convertInt32ToDouble(operand.valueReg(), dest);
2083     }
int32ValueToDouble(const ValueOperand & operand,FloatRegister dest)2084     void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest) {
2085         convertInt32ToDouble(operand.valueReg(), dest);
2086     }
2087 
boolValueToFloat32(const ValueOperand & operand,FloatRegister dest)2088     void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
2089         convertInt32ToFloat32(operand.valueReg(), dest);
2090     }
int32ValueToFloat32(const ValueOperand & operand,FloatRegister dest)2091     void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
2092         convertInt32ToFloat32(operand.valueReg(), dest);
2093     }
2094 
loadConstantDouble(double d,FloatRegister dest)2095     void loadConstantDouble(double d, FloatRegister dest) {
2096         Fmov(ARMFPRegister(dest, 64), d);
2097     }
loadConstantFloat32(float f,FloatRegister dest)2098     void loadConstantFloat32(float f, FloatRegister dest) {
2099         Fmov(ARMFPRegister(dest, 32), f);
2100     }
2101 
2102     // Register-based tests.
testUndefined(Condition cond,Register tag)2103     Condition testUndefined(Condition cond, Register tag) {
2104         MOZ_ASSERT(cond == Equal || cond == NotEqual);
2105         cmp32(tag, ImmTag(JSVAL_TAG_UNDEFINED));
2106         return cond;
2107     }
testInt32(Condition cond,Register tag)2108     Condition testInt32(Condition cond, Register tag) {
2109         MOZ_ASSERT(cond == Equal || cond == NotEqual);
2110         cmp32(tag, ImmTag(JSVAL_TAG_INT32));
2111         return cond;
2112     }
testBoolean(Condition cond,Register tag)2113     Condition testBoolean(Condition cond, Register tag) {
2114         MOZ_ASSERT(cond == Equal || cond == NotEqual);
2115         cmp32(tag, ImmTag(JSVAL_TAG_BOOLEAN));
2116         return cond;
2117     }
testNull(Condition cond,Register tag)2118     Condition testNull(Condition cond, Register tag) {
2119         MOZ_ASSERT(cond == Equal || cond == NotEqual);
2120         cmp32(tag, ImmTag(JSVAL_TAG_NULL));
2121         return cond;
2122     }
testString(Condition cond,Register tag)2123     Condition testString(Condition cond, Register tag) {
2124         MOZ_ASSERT(cond == Equal || cond == NotEqual);
2125         cmp32(tag, ImmTag(JSVAL_TAG_STRING));
2126         return cond;
2127     }
testSymbol(Condition cond,Register tag)2128     Condition testSymbol(Condition cond, Register tag) {
2129         MOZ_ASSERT(cond == Equal || cond == NotEqual);
2130         cmp32(tag, ImmTag(JSVAL_TAG_SYMBOL));
2131         return cond;
2132     }
testObject(Condition cond,Register tag)2133     Condition testObject(Condition cond, Register tag) {
2134         MOZ_ASSERT(cond == Equal || cond == NotEqual);
2135         cmp32(tag, ImmTag(JSVAL_TAG_OBJECT));
2136         return cond;
2137     }
testDouble(Condition cond,Register tag)2138     Condition testDouble(Condition cond, Register tag) {
2139         MOZ_ASSERT(cond == Equal || cond == NotEqual);
2140         cmp32(tag, Imm32(JSVAL_TAG_MAX_DOUBLE));
2141         return (cond == Equal) ? BelowOrEqual : Above;
2142     }
testNumber(Condition cond,Register tag)2143     Condition testNumber(Condition cond, Register tag) {
2144         MOZ_ASSERT(cond == Equal || cond == NotEqual);
2145         cmp32(tag, Imm32(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET));
2146         return (cond == Equal) ? BelowOrEqual : Above;
2147     }
testGCThing(Condition cond,Register tag)2148     Condition testGCThing(Condition cond, Register tag) {
2149         MOZ_ASSERT(cond == Equal || cond == NotEqual);
2150         cmp32(tag, Imm32(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET));
2151         return (cond == Equal) ? AboveOrEqual : Below;
2152     }
testMagic(Condition cond,Register tag)2153     Condition testMagic(Condition cond, Register tag) {
2154         MOZ_ASSERT(cond == Equal || cond == NotEqual);
2155         cmp32(tag, ImmTag(JSVAL_TAG_MAGIC));
2156         return cond;
2157     }
testPrimitive(Condition cond,Register tag)2158     Condition testPrimitive(Condition cond, Register tag) {
2159         MOZ_ASSERT(cond == Equal || cond == NotEqual);
2160         cmp32(tag, Imm32(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET));
2161         return (cond == Equal) ? Below : AboveOrEqual;
2162     }
testError(Condition cond,Register tag)2163     Condition testError(Condition cond, Register tag) {
2164         return testMagic(cond, tag);
2165     }
2166 
2167     // ValueOperand-based tests.
testInt32(Condition cond,const ValueOperand & value)2168     Condition testInt32(Condition cond, const ValueOperand& value) {
2169         // The incoming ValueOperand may use scratch registers.
2170         vixl::UseScratchRegisterScope temps(this);
2171 
2172         if (value.valueReg() == ScratchReg2) {
2173             MOZ_ASSERT(temps.IsAvailable(ScratchReg64));
2174             MOZ_ASSERT(!temps.IsAvailable(ScratchReg2_64));
2175             temps.Exclude(ScratchReg64);
2176 
2177             if (cond != Equal && cond != NotEqual)
2178                 MOZ_CRASH("NYI: non-equality comparisons");
2179 
2180             // In the event that the tag is not encodable in a single cmp / teq instruction,
2181             // perform the xor that teq would use, this will leave the tag bits being
2182             // zero, or non-zero, which can be tested with either and or shift.
2183             unsigned int n, imm_r, imm_s;
2184             uint64_t immediate = uint64_t(ImmTag(JSVAL_TAG_INT32).value) << JSVAL_TAG_SHIFT;
2185             if (IsImmLogical(immediate, 64, &n, &imm_s, &imm_r)) {
2186                 Eor(ScratchReg64, ScratchReg2_64, Operand(immediate));
2187             } else {
2188                 Mov(ScratchReg64, immediate);
2189                 Eor(ScratchReg64, ScratchReg2_64, ScratchReg64);
2190             }
2191             Tst(ScratchReg64, Operand(-1ll << JSVAL_TAG_SHIFT));
2192             return cond;
2193         }
2194 
2195         const Register scratch = temps.AcquireX().asUnsized();
2196         MOZ_ASSERT(scratch != value.valueReg());
2197 
2198         splitTag(value, scratch);
2199         return testInt32(cond, scratch);
2200     }
testBoolean(Condition cond,const ValueOperand & value)2201     Condition testBoolean(Condition cond, const ValueOperand& value) {
2202         vixl::UseScratchRegisterScope temps(this);
2203         const Register scratch = temps.AcquireX().asUnsized();
2204         MOZ_ASSERT(value.valueReg() != scratch);
2205         splitTag(value, scratch);
2206         return testBoolean(cond, scratch);
2207     }
testDouble(Condition cond,const ValueOperand & value)2208     Condition testDouble(Condition cond, const ValueOperand& value) {
2209         vixl::UseScratchRegisterScope temps(this);
2210         const Register scratch = temps.AcquireX().asUnsized();
2211         MOZ_ASSERT(value.valueReg() != scratch);
2212         splitTag(value, scratch);
2213         return testDouble(cond, scratch);
2214     }
testNull(Condition cond,const ValueOperand & value)2215     Condition testNull(Condition cond, const ValueOperand& value) {
2216         vixl::UseScratchRegisterScope temps(this);
2217         const Register scratch = temps.AcquireX().asUnsized();
2218         MOZ_ASSERT(value.valueReg() != scratch);
2219         splitTag(value, scratch);
2220         return testNull(cond, scratch);
2221     }
testUndefined(Condition cond,const ValueOperand & value)2222     Condition testUndefined(Condition cond, const ValueOperand& value) {
2223         vixl::UseScratchRegisterScope temps(this);
2224         const Register scratch = temps.AcquireX().asUnsized();
2225         MOZ_ASSERT(value.valueReg() != scratch);
2226         splitTag(value, scratch);
2227         return testUndefined(cond, scratch);
2228     }
testString(Condition cond,const ValueOperand & value)2229     Condition testString(Condition cond, const ValueOperand& value) {
2230         vixl::UseScratchRegisterScope temps(this);
2231         const Register scratch = temps.AcquireX().asUnsized();
2232         MOZ_ASSERT(value.valueReg() != scratch);
2233         splitTag(value, scratch);
2234         return testString(cond, scratch);
2235     }
testSymbol(Condition cond,const ValueOperand & value)2236     Condition testSymbol(Condition cond, const ValueOperand& value) {
2237         vixl::UseScratchRegisterScope temps(this);
2238         const Register scratch = temps.AcquireX().asUnsized();
2239         MOZ_ASSERT(value.valueReg() != scratch);
2240         splitTag(value, scratch);
2241         return testSymbol(cond, scratch);
2242     }
testObject(Condition cond,const ValueOperand & value)2243     Condition testObject(Condition cond, const ValueOperand& value) {
2244         vixl::UseScratchRegisterScope temps(this);
2245         const Register scratch = temps.AcquireX().asUnsized();
2246         MOZ_ASSERT(value.valueReg() != scratch);
2247         splitTag(value, scratch);
2248         return testObject(cond, scratch);
2249     }
testNumber(Condition cond,const ValueOperand & value)2250     Condition testNumber(Condition cond, const ValueOperand& value) {
2251         vixl::UseScratchRegisterScope temps(this);
2252         const Register scratch = temps.AcquireX().asUnsized();
2253         MOZ_ASSERT(value.valueReg() != scratch);
2254         splitTag(value, scratch);
2255         return testNumber(cond, scratch);
2256     }
testPrimitive(Condition cond,const ValueOperand & value)2257     Condition testPrimitive(Condition cond, const ValueOperand& value) {
2258         vixl::UseScratchRegisterScope temps(this);
2259         const Register scratch = temps.AcquireX().asUnsized();
2260         MOZ_ASSERT(value.valueReg() != scratch);
2261         splitTag(value, scratch);
2262         return testPrimitive(cond, scratch);
2263     }
testMagic(Condition cond,const ValueOperand & value)2264     Condition testMagic(Condition cond, const ValueOperand& value) {
2265         vixl::UseScratchRegisterScope temps(this);
2266         const Register scratch = temps.AcquireX().asUnsized();
2267         MOZ_ASSERT(value.valueReg() != scratch);
2268         splitTag(value, scratch);
2269         return testMagic(cond, scratch);
2270     }
testError(Condition cond,const ValueOperand & value)2271     Condition testError(Condition cond, const ValueOperand& value) {
2272         return testMagic(cond, value);
2273     }
2274 
2275     // Address-based tests.
testGCThing(Condition cond,const Address & address)2276     Condition testGCThing(Condition cond, const Address& address) {
2277         vixl::UseScratchRegisterScope temps(this);
2278         const Register scratch = temps.AcquireX().asUnsized();
2279         MOZ_ASSERT(address.base != scratch);
2280         splitTag(address, scratch);
2281         return testGCThing(cond, scratch);
2282     }
testMagic(Condition cond,const Address & address)2283     Condition testMagic(Condition cond, const Address& address) {
2284         vixl::UseScratchRegisterScope temps(this);
2285         const Register scratch = temps.AcquireX().asUnsized();
2286         MOZ_ASSERT(address.base != scratch);
2287         splitTag(address, scratch);
2288         return testMagic(cond, scratch);
2289     }
testInt32(Condition cond,const Address & address)2290     Condition testInt32(Condition cond, const Address& address) {
2291         vixl::UseScratchRegisterScope temps(this);
2292         const Register scratch = temps.AcquireX().asUnsized();
2293         MOZ_ASSERT(address.base != scratch);
2294         splitTag(address, scratch);
2295         return testInt32(cond, scratch);
2296     }
testDouble(Condition cond,const Address & address)2297     Condition testDouble(Condition cond, const Address& address) {
2298         vixl::UseScratchRegisterScope temps(this);
2299         const Register scratch = temps.AcquireX().asUnsized();
2300         MOZ_ASSERT(address.base != scratch);
2301         splitTag(address, scratch);
2302         return testDouble(cond, scratch);
2303     }
testBoolean(Condition cond,const Address & address)2304     Condition testBoolean(Condition cond, const Address& address) {
2305         vixl::UseScratchRegisterScope temps(this);
2306         const Register scratch = temps.AcquireX().asUnsized();
2307         MOZ_ASSERT(address.base != scratch);
2308         splitTag(address, scratch);
2309         return testBoolean(cond, scratch);
2310     }
testNull(Condition cond,const Address & address)2311     Condition testNull(Condition cond, const Address& address) {
2312         vixl::UseScratchRegisterScope temps(this);
2313         const Register scratch = temps.AcquireX().asUnsized();
2314         MOZ_ASSERT(address.base != scratch);
2315         splitTag(address, scratch);
2316         return testNull(cond, scratch);
2317     }
testUndefined(Condition cond,const Address & address)2318     Condition testUndefined(Condition cond, const Address& address) {
2319         vixl::UseScratchRegisterScope temps(this);
2320         const Register scratch = temps.AcquireX().asUnsized();
2321         MOZ_ASSERT(address.base != scratch);
2322         splitTag(address, scratch);
2323         return testUndefined(cond, scratch);
2324     }
testString(Condition cond,const Address & address)2325     Condition testString(Condition cond, const Address& address) {
2326         vixl::UseScratchRegisterScope temps(this);
2327         const Register scratch = temps.AcquireX().asUnsized();
2328         MOZ_ASSERT(address.base != scratch);
2329         splitTag(address, scratch);
2330         return testString(cond, scratch);
2331     }
testSymbol(Condition cond,const Address & address)2332     Condition testSymbol(Condition cond, const Address& address) {
2333         vixl::UseScratchRegisterScope temps(this);
2334         const Register scratch = temps.AcquireX().asUnsized();
2335         MOZ_ASSERT(address.base != scratch);
2336         splitTag(address, scratch);
2337         return testSymbol(cond, scratch);
2338     }
testObject(Condition cond,const Address & address)2339     Condition testObject(Condition cond, const Address& address) {
2340         vixl::UseScratchRegisterScope temps(this);
2341         const Register scratch = temps.AcquireX().asUnsized();
2342         MOZ_ASSERT(address.base != scratch);
2343         splitTag(address, scratch);
2344         return testObject(cond, scratch);
2345     }
testNumber(Condition cond,const Address & address)2346     Condition testNumber(Condition cond, const Address& address) {
2347         vixl::UseScratchRegisterScope temps(this);
2348         const Register scratch = temps.AcquireX().asUnsized();
2349         MOZ_ASSERT(address.base != scratch);
2350         splitTag(address, scratch);
2351         return testNumber(cond, scratch);
2352     }
2353 
2354     // BaseIndex-based tests.
testUndefined(Condition cond,const BaseIndex & src)2355     Condition testUndefined(Condition cond, const BaseIndex& src) {
2356         vixl::UseScratchRegisterScope temps(this);
2357         const Register scratch = temps.AcquireX().asUnsized();
2358         MOZ_ASSERT(src.base != scratch);
2359         MOZ_ASSERT(src.index != scratch);
2360         splitTag(src, scratch);
2361         return testUndefined(cond, scratch);
2362     }
testNull(Condition cond,const BaseIndex & src)2363     Condition testNull(Condition cond, const BaseIndex& src) {
2364         vixl::UseScratchRegisterScope temps(this);
2365         const Register scratch = temps.AcquireX().asUnsized();
2366         MOZ_ASSERT(src.base != scratch);
2367         MOZ_ASSERT(src.index != scratch);
2368         splitTag(src, scratch);
2369         return testNull(cond, scratch);
2370     }
testBoolean(Condition cond,const BaseIndex & src)2371     Condition testBoolean(Condition cond, const BaseIndex& src) {
2372         vixl::UseScratchRegisterScope temps(this);
2373         const Register scratch = temps.AcquireX().asUnsized();
2374         MOZ_ASSERT(src.base != scratch);
2375         MOZ_ASSERT(src.index != scratch);
2376         splitTag(src, scratch);
2377         return testBoolean(cond, scratch);
2378     }
testString(Condition cond,const BaseIndex & src)2379     Condition testString(Condition cond, const BaseIndex& src) {
2380         vixl::UseScratchRegisterScope temps(this);
2381         const Register scratch = temps.AcquireX().asUnsized();
2382         MOZ_ASSERT(src.base != scratch);
2383         MOZ_ASSERT(src.index != scratch);
2384         splitTag(src, scratch);
2385         return testString(cond, scratch);
2386     }
testSymbol(Condition cond,const BaseIndex & src)2387     Condition testSymbol(Condition cond, const BaseIndex& src) {
2388         vixl::UseScratchRegisterScope temps(this);
2389         const Register scratch = temps.AcquireX().asUnsized();
2390         MOZ_ASSERT(src.base != scratch);
2391         MOZ_ASSERT(src.index != scratch);
2392         splitTag(src, scratch);
2393         return testSymbol(cond, scratch);
2394     }
testInt32(Condition cond,const BaseIndex & src)2395     Condition testInt32(Condition cond, const BaseIndex& src) {
2396         vixl::UseScratchRegisterScope temps(this);
2397         const Register scratch = temps.AcquireX().asUnsized();
2398         MOZ_ASSERT(src.base != scratch);
2399         MOZ_ASSERT(src.index != scratch);
2400         splitTag(src, scratch);
2401         return testInt32(cond, scratch);
2402     }
testObject(Condition cond,const BaseIndex & src)2403     Condition testObject(Condition cond, const BaseIndex& src) {
2404         vixl::UseScratchRegisterScope temps(this);
2405         const Register scratch = temps.AcquireX().asUnsized();
2406         MOZ_ASSERT(src.base != scratch);
2407         MOZ_ASSERT(src.index != scratch);
2408         splitTag(src, scratch);
2409         return testObject(cond, scratch);
2410     }
testDouble(Condition cond,const BaseIndex & src)2411     Condition testDouble(Condition cond, const BaseIndex& src) {
2412         vixl::UseScratchRegisterScope temps(this);
2413         const Register scratch = temps.AcquireX().asUnsized();
2414         MOZ_ASSERT(src.base != scratch);
2415         MOZ_ASSERT(src.index != scratch);
2416         splitTag(src, scratch);
2417         return testDouble(cond, scratch);
2418     }
testMagic(Condition cond,const BaseIndex & src)2419     Condition testMagic(Condition cond, const BaseIndex& src) {
2420         vixl::UseScratchRegisterScope temps(this);
2421         const Register scratch = temps.AcquireX().asUnsized();
2422         MOZ_ASSERT(src.base != scratch);
2423         MOZ_ASSERT(src.index != scratch);
2424         splitTag(src, scratch);
2425         return testMagic(cond, scratch);
2426     }
testGCThing(Condition cond,const BaseIndex & src)2427     Condition testGCThing(Condition cond, const BaseIndex& src) {
2428         vixl::UseScratchRegisterScope temps(this);
2429         const Register scratch = temps.AcquireX().asUnsized();
2430         MOZ_ASSERT(src.base != scratch);
2431         MOZ_ASSERT(src.index != scratch);
2432         splitTag(src, scratch);
2433         return testGCThing(cond, scratch);
2434     }
2435 
testInt32Truthy(bool truthy,const ValueOperand & operand)2436     Condition testInt32Truthy(bool truthy, const ValueOperand& operand) {
2437         ARMRegister payload32(operand.valueReg(), 32);
2438         Tst(payload32, payload32);
2439         return truthy ? NonZero : Zero;
2440     }
branchTestInt32Truthy(bool truthy,const ValueOperand & operand,Label * label)2441     void branchTestInt32Truthy(bool truthy, const ValueOperand& operand, Label* label) {
2442         Condition c = testInt32Truthy(truthy, operand);
2443         B(label, c);
2444     }
2445 
branchTestDoubleTruthy(bool truthy,FloatRegister reg,Label * label)2446     void branchTestDoubleTruthy(bool truthy, FloatRegister reg, Label* label) {
2447         Fcmp(ARMFPRegister(reg, 64), 0.0);
2448         if (!truthy) {
2449             // falsy values are zero, and NaN.
2450             branch(Zero, label);
2451             branch(Overflow, label);
2452         } else {
2453             // truthy values are non-zero and not nan.
2454             // If it is overflow
2455             Label onFalse;
2456             branch(Zero, &onFalse);
2457             branch(Overflow, &onFalse);
2458             B(label);
2459             bind(&onFalse);
2460         }
2461     }
2462 
testBooleanTruthy(bool truthy,const ValueOperand & operand)2463     Condition testBooleanTruthy(bool truthy, const ValueOperand& operand) {
2464         ARMRegister payload32(operand.valueReg(), 32);
2465         Tst(payload32, payload32);
2466         return truthy ? NonZero : Zero;
2467     }
branchTestBooleanTruthy(bool truthy,const ValueOperand & operand,Label * label)2468     void branchTestBooleanTruthy(bool truthy, const ValueOperand& operand, Label* label) {
2469         Condition c = testBooleanTruthy(truthy, operand);
2470         B(label, c);
2471     }
testStringTruthy(bool truthy,const ValueOperand & value)2472     Condition testStringTruthy(bool truthy, const ValueOperand& value) {
2473         vixl::UseScratchRegisterScope temps(this);
2474         const Register scratch = temps.AcquireX().asUnsized();
2475         const ARMRegister scratch32(scratch, 32);
2476         const ARMRegister scratch64(scratch, 64);
2477 
2478         MOZ_ASSERT(value.valueReg() != scratch);
2479 
2480         unboxString(value, scratch);
2481         Ldr(scratch32, MemOperand(scratch64, JSString::offsetOfLength()));
2482         Cmp(scratch32, Operand(0));
2483         return truthy ? Condition::NonZero : Condition::Zero;
2484     }
branchTestStringTruthy(bool truthy,const ValueOperand & value,Label * label)2485     void branchTestStringTruthy(bool truthy, const ValueOperand& value, Label* label) {
2486         Condition c = testStringTruthy(truthy, value);
2487         B(label, c);
2488     }
int32OrDouble(Register src,ARMFPRegister dest)2489     void int32OrDouble(Register src, ARMFPRegister dest) {
2490         Label isInt32;
2491         Label join;
2492         testInt32(Equal, ValueOperand(src));
2493         B(&isInt32, Equal);
2494         // is double, move teh bits as is
2495         Fmov(dest, ARMRegister(src, 64));
2496         B(&join);
2497         bind(&isInt32);
2498         // is int32, do a conversion while moving
2499         Scvtf(dest, ARMRegister(src, 32));
2500         bind(&join);
2501     }
loadUnboxedValue(Address address,MIRType type,AnyRegister dest)2502     void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
2503         if (dest.isFloat()) {
2504             vixl::UseScratchRegisterScope temps(this);
2505             const ARMRegister scratch64 = temps.AcquireX();
2506             MOZ_ASSERT(scratch64.asUnsized() != address.base);
2507             Ldr(scratch64, toMemOperand(address));
2508             int32OrDouble(scratch64.asUnsized(), ARMFPRegister(dest.fpu(), 64));
2509         } else if (type == MIRType_Int32 || type == MIRType_Boolean) {
2510             load32(address, dest.gpr());
2511         } else {
2512             loadPtr(address, dest.gpr());
2513             unboxNonDouble(dest.gpr(), dest.gpr());
2514         }
2515     }
2516 
loadUnboxedValue(BaseIndex address,MIRType type,AnyRegister dest)2517     void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) {
2518         if (dest.isFloat()) {
2519             vixl::UseScratchRegisterScope temps(this);
2520             const ARMRegister scratch64 = temps.AcquireX();
2521             MOZ_ASSERT(scratch64.asUnsized() != address.base);
2522             MOZ_ASSERT(scratch64.asUnsized() != address.index);
2523             doBaseIndex(scratch64, address, vixl::LDR_x);
2524             int32OrDouble(scratch64.asUnsized(), ARMFPRegister(dest.fpu(), 64));
2525         }  else if (type == MIRType_Int32 || type == MIRType_Boolean) {
2526             load32(address, dest.gpr());
2527         } else {
2528             loadPtr(address, dest.gpr());
2529             unboxNonDouble(dest.gpr(), dest.gpr());
2530         }
2531     }
2532 
loadInstructionPointerAfterCall(Register dest)2533     void loadInstructionPointerAfterCall(Register dest) {
2534         MOZ_CRASH("loadInstructionPointerAfterCall");
2535     }
2536 
2537     // Emit a B that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp().
toggledJump(Label * label)2538     CodeOffset toggledJump(Label* label) {
2539         BufferOffset offset = b(label, Always);
2540         CodeOffset ret(offset.getOffset());
2541         return ret;
2542     }
2543 
2544     // load: offset to the load instruction obtained by movePatchablePtr().
writeDataRelocation(ImmGCPtr ptr,BufferOffset load)2545     void writeDataRelocation(ImmGCPtr ptr, BufferOffset load) {
2546         if (ptr.value)
2547             dataRelocations_.writeUnsigned(load.getOffset());
2548     }
writeDataRelocation(const Value & val,BufferOffset load)2549     void writeDataRelocation(const Value& val, BufferOffset load) {
2550         if (val.isMarkable()) {
2551             gc::Cell* cell = reinterpret_cast<gc::Cell*>(val.toGCThing());
2552             if (cell && gc::IsInsideNursery(cell))
2553                 embedsNurseryPointers_ = true;
2554             dataRelocations_.writeUnsigned(load.getOffset());
2555         }
2556     }
2557 
writePrebarrierOffset(CodeOffset label)2558     void writePrebarrierOffset(CodeOffset label) {
2559         preBarriers_.writeUnsigned(label.offset());
2560     }
2561 
computeEffectiveAddress(const Address & address,Register dest)2562     void computeEffectiveAddress(const Address& address, Register dest) {
2563         Add(ARMRegister(dest, 64), ARMRegister(address.base, 64), Operand(address.offset));
2564     }
computeEffectiveAddress(const BaseIndex & address,Register dest)2565     void computeEffectiveAddress(const BaseIndex& address, Register dest) {
2566         ARMRegister dest64(dest, 64);
2567         ARMRegister base64(address.base, 64);
2568         ARMRegister index64(address.index, 64);
2569 
2570         Add(dest64, base64, Operand(index64, vixl::LSL, address.scale));
2571         if (address.offset)
2572             Add(dest64, dest64, Operand(address.offset));
2573     }
2574 
2575   public:
labelForPatch()2576     CodeOffset labelForPatch() {
2577         return CodeOffset(nextOffset().getOffset());
2578     }
2579 
2580     void handleFailureWithHandlerTail(void* handler);
2581 
2582     // FIXME: See CodeGeneratorX64 calls to noteAsmJSGlobalAccess.
patchAsmJSGlobalAccess(CodeOffset patchAt,uint8_t * code,uint8_t * globalData,unsigned globalDataOffset)2583     void patchAsmJSGlobalAccess(CodeOffset patchAt, uint8_t* code,
2584                                 uint8_t* globalData, unsigned globalDataOffset)
2585     {
2586         MOZ_CRASH("patchAsmJSGlobalAccess");
2587     }
2588 
memIntToValue(const Address & src,const Address & dest)2589     void memIntToValue(const Address& src, const Address& dest) {
2590         vixl::UseScratchRegisterScope temps(this);
2591         const Register scratch = temps.AcquireX().asUnsized();
2592         MOZ_ASSERT(scratch != src.base);
2593         MOZ_ASSERT(scratch != dest.base);
2594         load32(src, scratch);
2595         storeValue(JSVAL_TYPE_INT32, scratch, dest);
2596     }
2597 
2598     void branchPtrInNurseryRange(Condition cond, Register ptr, Register temp, Label* label);
2599     void branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp, Label* label);
2600 
appendCallSite(const wasm::CallSiteDesc & desc)2601     void appendCallSite(const wasm::CallSiteDesc& desc) {
2602         MOZ_CRASH("appendCallSite");
2603     }
2604 
callExit(wasm::SymbolicAddress imm,uint32_t stackArgBytes)2605     void callExit(wasm::SymbolicAddress imm, uint32_t stackArgBytes) {
2606         MOZ_CRASH("callExit");
2607     }
2608 
profilerEnterFrame(Register framePtr,Register scratch)2609     void profilerEnterFrame(Register framePtr, Register scratch) {
2610         AbsoluteAddress activation(GetJitContext()->runtime->addressOfProfilingActivation());
2611         loadPtr(activation, scratch);
2612         storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
2613         storePtr(ImmPtr(nullptr), Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
2614     }
profilerExitFrame()2615     void profilerExitFrame() {
2616         branch(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
2617     }
ToPayload(Address value)2618     Address ToPayload(Address value) {
2619         return value;
2620     }
ToType(Address value)2621     Address ToType(Address value) {
2622         return value;
2623     }
2624 
2625   private:
2626     template <typename T>
compareExchange(int nbytes,bool signExtend,const T & address,Register oldval,Register newval,Register output)2627     void compareExchange(int nbytes, bool signExtend, const T& address, Register oldval,
2628                          Register newval, Register output)
2629     {
2630         MOZ_CRASH("compareExchange");
2631     }
2632 
2633     template <typename T>
atomicFetchOp(int nbytes,bool signExtend,AtomicOp op,const Imm32 & value,const T & address,Register temp,Register output)2634     void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
2635                        const T& address, Register temp, Register output)
2636     {
2637         MOZ_CRASH("atomicFetchOp");
2638     }
2639 
2640     template <typename T>
atomicFetchOp(int nbytes,bool signExtend,AtomicOp op,const Register & value,const T & address,Register temp,Register output)2641     void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
2642                        const T& address, Register temp, Register output)
2643     {
2644         MOZ_CRASH("atomicFetchOp");
2645     }
2646 
2647     template <typename T>
atomicEffectOp(int nbytes,AtomicOp op,const Register & value,const T & mem)2648     void atomicEffectOp(int nbytes, AtomicOp op, const Register& value, const T& mem) {
2649         MOZ_CRASH("atomicEffectOp");
2650     }
2651 
2652     template <typename T>
atomicEffectOp(int nbytes,AtomicOp op,const Imm32 & value,const T & mem)2653     void atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value, const T& mem) {
2654         MOZ_CRASH("atomicEffectOp");
2655     }
2656 
2657   public:
2658     // T in {Address,BaseIndex}
2659     // S in {Imm32,Register}
2660 
2661     template <typename T>
compareExchange8SignExtend(const T & mem,Register oldval,Register newval,Register output)2662     void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register output)
2663     {
2664         compareExchange(1, true, mem, oldval, newval, output);
2665     }
2666     template <typename T>
compareExchange8ZeroExtend(const T & mem,Register oldval,Register newval,Register output)2667     void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register output)
2668     {
2669         compareExchange(1, false, mem, oldval, newval, output);
2670     }
2671     template <typename T>
compareExchange16SignExtend(const T & mem,Register oldval,Register newval,Register output)2672     void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register output)
2673     {
2674         compareExchange(2, true, mem, oldval, newval, output);
2675     }
2676     template <typename T>
compareExchange16ZeroExtend(const T & mem,Register oldval,Register newval,Register output)2677     void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register output)
2678     {
2679         compareExchange(2, false, mem, oldval, newval, output);
2680     }
2681     template <typename T>
compareExchange32(const T & mem,Register oldval,Register newval,Register output)2682     void compareExchange32(const T& mem, Register oldval, Register newval, Register output)  {
2683         compareExchange(4, false, mem, oldval, newval, output);
2684     }
2685     template <typename T>
atomicExchange32(const T & mem,Register value,Register output)2686     void atomicExchange32(const T& mem, Register value, Register output) {
2687         MOZ_CRASH("atomicExchang32");
2688     }
2689 
2690     template <typename T>
atomicExchange8ZeroExtend(const T & mem,Register value,Register output)2691     void atomicExchange8ZeroExtend(const T& mem, Register value, Register output) {
2692         MOZ_CRASH("atomicExchange8ZeroExtend");
2693     }
2694     template <typename T>
atomicExchange8SignExtend(const T & mem,Register value,Register output)2695     void atomicExchange8SignExtend(const T& mem, Register value, Register output) {
2696         MOZ_CRASH("atomicExchange8SignExtend");
2697     }
2698 
2699     template <typename T, typename S>
atomicFetchAdd8SignExtend(const S & value,const T & mem,Register temp,Register output)2700     void atomicFetchAdd8SignExtend(const S& value, const T& mem, Register temp, Register output) {
2701         atomicFetchOp(1, true, AtomicFetchAddOp, value, mem, temp, output);
2702     }
2703     template <typename T, typename S>
atomicFetchAdd8ZeroExtend(const S & value,const T & mem,Register temp,Register output)2704     void atomicFetchAdd8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
2705         atomicFetchOp(1, false, AtomicFetchAddOp, value, mem, temp, output);
2706     }
2707     template <typename T, typename S>
atomicFetchAdd16SignExtend(const S & value,const T & mem,Register temp,Register output)2708     void atomicFetchAdd16SignExtend(const S& value, const T& mem, Register temp, Register output) {
2709         atomicFetchOp(2, true, AtomicFetchAddOp, value, mem, temp, output);
2710     }
2711     template <typename T, typename S>
atomicFetchAdd16ZeroExtend(const S & value,const T & mem,Register temp,Register output)2712     void atomicFetchAdd16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
2713         atomicFetchOp(2, false, AtomicFetchAddOp, value, mem, temp, output);
2714     }
2715     template <typename T, typename S>
atomicFetchAdd32(const S & value,const T & mem,Register temp,Register output)2716     void atomicFetchAdd32(const S& value, const T& mem, Register temp, Register output) {
2717         atomicFetchOp(4, false, AtomicFetchAddOp, value, mem, temp, output);
2718     }
2719 
2720     template <typename T, typename S>
atomicAdd8(const S & value,const T & mem)2721     void atomicAdd8(const S& value, const T& mem) {
2722         atomicEffectOp(1, AtomicFetchAddOp, value, mem);
2723     }
2724     template <typename T, typename S>
atomicAdd16(const S & value,const T & mem)2725     void atomicAdd16(const S& value, const T& mem) {
2726         atomicEffectOp(2, AtomicFetchAddOp, value, mem);
2727     }
2728     template <typename T, typename S>
atomicAdd32(const S & value,const T & mem)2729     void atomicAdd32(const S& value, const T& mem) {
2730         atomicEffectOp(4, AtomicFetchAddOp, value, mem);
2731     }
2732 
2733     template <typename T>
atomicExchange16ZeroExtend(const T & mem,Register value,Register output)2734     void atomicExchange16ZeroExtend(const T& mem, Register value, Register output) {
2735         MOZ_CRASH("atomicExchange16ZeroExtend");
2736     }
2737     template <typename T>
atomicExchange16SignExtend(const T & mem,Register value,Register output)2738     void atomicExchange16SignExtend(const T& mem, Register value, Register output) {
2739         MOZ_CRASH("atomicExchange16SignExtend");
2740     }
2741 
2742     template <typename T, typename S>
atomicFetchSub8SignExtend(const S & value,const T & mem,Register temp,Register output)2743     void atomicFetchSub8SignExtend(const S& value, const T& mem, Register temp, Register output) {
2744         atomicFetchOp(1, true, AtomicFetchSubOp, value, mem, temp, output);
2745     }
2746     template <typename T, typename S>
atomicFetchSub8ZeroExtend(const S & value,const T & mem,Register temp,Register output)2747     void atomicFetchSub8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
2748         atomicFetchOp(1, false, AtomicFetchSubOp, value, mem, temp, output);
2749     }
2750     template <typename T, typename S>
atomicFetchSub16SignExtend(const S & value,const T & mem,Register temp,Register output)2751     void atomicFetchSub16SignExtend(const S& value, const T& mem, Register temp, Register output) {
2752         atomicFetchOp(2, true, AtomicFetchSubOp, value, mem, temp, output);
2753     }
2754     template <typename T, typename S>
atomicFetchSub16ZeroExtend(const S & value,const T & mem,Register temp,Register output)2755     void atomicFetchSub16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
2756         atomicFetchOp(2, false, AtomicFetchSubOp, value, mem, temp, output);
2757     }
2758     template <typename T, typename S>
atomicFetchSub32(const S & value,const T & mem,Register temp,Register output)2759     void atomicFetchSub32(const S& value, const T& mem, Register temp, Register output) {
2760         atomicFetchOp(4, false, AtomicFetchSubOp, value, mem, temp, output);
2761     }
2762 
2763     template <typename T, typename S>
atomicSub8(const S & value,const T & mem)2764     void atomicSub8(const S& value, const T& mem) {
2765         atomicEffectOp(1, AtomicFetchSubOp, value, mem);
2766     }
2767     template <typename T, typename S>
atomicSub16(const S & value,const T & mem)2768     void atomicSub16(const S& value, const T& mem) {
2769         atomicEffectOp(2, AtomicFetchSubOp, value, mem);
2770     }
2771     template <typename T, typename S>
atomicSub32(const S & value,const T & mem)2772     void atomicSub32(const S& value, const T& mem) {
2773         atomicEffectOp(4, AtomicFetchSubOp, value, mem);
2774     }
2775 
2776     template <typename T, typename S>
atomicFetchAnd8SignExtend(const S & value,const T & mem,Register temp,Register output)2777     void atomicFetchAnd8SignExtend(const S& value, const T& mem, Register temp, Register output) {
2778         atomicFetchOp(1, true, AtomicFetchAndOp, value, mem, temp, output);
2779     }
2780     template <typename T, typename S>
atomicFetchAnd8ZeroExtend(const S & value,const T & mem,Register temp,Register output)2781     void atomicFetchAnd8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
2782         atomicFetchOp(1, false, AtomicFetchAndOp, value, mem, temp, output);
2783     }
2784     template <typename T, typename S>
atomicFetchAnd16SignExtend(const S & value,const T & mem,Register temp,Register output)2785     void atomicFetchAnd16SignExtend(const S& value, const T& mem, Register temp, Register output) {
2786         atomicFetchOp(2, true, AtomicFetchAndOp, value, mem, temp, output);
2787     }
2788     template <typename T, typename S>
atomicFetchAnd16ZeroExtend(const S & value,const T & mem,Register temp,Register output)2789     void atomicFetchAnd16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
2790         atomicFetchOp(2, false, AtomicFetchAndOp, value, mem, temp, output);
2791     }
2792     template <typename T, typename S>
atomicFetchAnd32(const S & value,const T & mem,Register temp,Register output)2793     void atomicFetchAnd32(const S& value, const T& mem, Register temp, Register output) {
2794         atomicFetchOp(4, false, AtomicFetchAndOp, value, mem, temp, output);
2795     }
2796 
2797     template <typename T, typename S>
atomicAnd8(const S & value,const T & mem)2798     void atomicAnd8(const S& value, const T& mem) {
2799         atomicEffectOp(1, AtomicFetchAndOp, value, mem);
2800     }
2801     template <typename T, typename S>
atomicAnd16(const S & value,const T & mem)2802     void atomicAnd16(const S& value, const T& mem) {
2803         atomicEffectOp(2, AtomicFetchAndOp, value, mem);
2804     }
2805     template <typename T, typename S>
atomicAnd32(const S & value,const T & mem)2806     void atomicAnd32(const S& value, const T& mem) {
2807         atomicEffectOp(4, AtomicFetchAndOp, value, mem);
2808     }
2809 
2810     template <typename T, typename S>
atomicFetchOr8SignExtend(const S & value,const T & mem,Register temp,Register output)2811     void atomicFetchOr8SignExtend(const S& value, const T& mem, Register temp, Register output) {
2812         atomicFetchOp(1, true, AtomicFetchOrOp, value, mem, temp, output);
2813     }
2814     template <typename T, typename S>
atomicFetchOr8ZeroExtend(const S & value,const T & mem,Register temp,Register output)2815     void atomicFetchOr8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
2816         atomicFetchOp(1, false, AtomicFetchOrOp, value, mem, temp, output);
2817     }
2818     template <typename T, typename S>
atomicFetchOr16SignExtend(const S & value,const T & mem,Register temp,Register output)2819     void atomicFetchOr16SignExtend(const S& value, const T& mem, Register temp, Register output) {
2820         atomicFetchOp(2, true, AtomicFetchOrOp, value, mem, temp, output);
2821     }
2822     template <typename T, typename S>
atomicFetchOr16ZeroExtend(const S & value,const T & mem,Register temp,Register output)2823     void atomicFetchOr16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
2824         atomicFetchOp(2, false, AtomicFetchOrOp, value, mem, temp, output);
2825     }
2826     template <typename T, typename S>
atomicFetchOr32(const S & value,const T & mem,Register temp,Register output)2827     void atomicFetchOr32(const S& value, const T& mem, Register temp, Register output) {
2828         atomicFetchOp(4, false, AtomicFetchOrOp, value, mem, temp, output);
2829     }
2830 
2831     template <typename T, typename S>
atomicOr8(const S & value,const T & mem)2832     void atomicOr8(const S& value, const T& mem) {
2833         atomicEffectOp(1, AtomicFetchOrOp, value, mem);
2834     }
2835     template <typename T, typename S>
atomicOr16(const S & value,const T & mem)2836     void atomicOr16(const S& value, const T& mem) {
2837         atomicEffectOp(2, AtomicFetchOrOp, value, mem);
2838     }
2839     template <typename T, typename S>
atomicOr32(const S & value,const T & mem)2840     void atomicOr32(const S& value, const T& mem) {
2841         atomicEffectOp(4, AtomicFetchOrOp, value, mem);
2842     }
2843 
2844     template <typename T, typename S>
atomicFetchXor8SignExtend(const S & value,const T & mem,Register temp,Register output)2845     void atomicFetchXor8SignExtend(const S& value, const T& mem, Register temp, Register output) {
2846         atomicFetchOp(1, true, AtomicFetchXorOp, value, mem, temp, output);
2847     }
2848     template <typename T, typename S>
atomicFetchXor8ZeroExtend(const S & value,const T & mem,Register temp,Register output)2849     void atomicFetchXor8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
2850         atomicFetchOp(1, false, AtomicFetchXorOp, value, mem, temp, output);
2851     }
2852     template <typename T, typename S>
atomicFetchXor16SignExtend(const S & value,const T & mem,Register temp,Register output)2853     void atomicFetchXor16SignExtend(const S& value, const T& mem, Register temp, Register output) {
2854         atomicFetchOp(2, true, AtomicFetchXorOp, value, mem, temp, output);
2855     }
2856     template <typename T, typename S>
atomicFetchXor16ZeroExtend(const S & value,const T & mem,Register temp,Register output)2857     void atomicFetchXor16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
2858         atomicFetchOp(2, false, AtomicFetchXorOp, value, mem, temp, output);
2859     }
2860     template <typename T, typename S>
atomicFetchXor32(const S & value,const T & mem,Register temp,Register output)2861     void atomicFetchXor32(const S& value, const T& mem, Register temp, Register output) {
2862         atomicFetchOp(4, false, AtomicFetchXorOp, value, mem, temp, output);
2863     }
2864 
2865     template <typename T, typename S>
atomicXor8(const S & value,const T & mem)2866     void atomicXor8(const S& value, const T& mem) {
2867         atomicEffectOp(1, AtomicFetchXorOp, value, mem);
2868     }
2869     template <typename T, typename S>
atomicXor16(const S & value,const T & mem)2870     void atomicXor16(const S& value, const T& mem) {
2871         atomicEffectOp(2, AtomicFetchXorOp, value, mem);
2872     }
2873     template <typename T, typename S>
atomicXor32(const S & value,const T & mem)2874     void atomicXor32(const S& value, const T& mem) {
2875         atomicEffectOp(4, AtomicFetchXorOp, value, mem);
2876     }
2877 
2878     template<typename T>
2879     void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register oldval, Register newval,
2880                                         Register temp, AnyRegister output);
2881 
2882     template<typename T>
2883     void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value,
2884                                        Register temp, AnyRegister output);
2885 
2886     // Emit a BLR or NOP instruction. ToggleCall can be used to patch
2887     // this instruction.
toggledCall(JitCode * target,bool enabled)2888     CodeOffset toggledCall(JitCode* target, bool enabled) {
2889         // The returned offset must be to the first instruction generated,
2890         // for the debugger to match offset with Baseline's pcMappingEntries_.
2891         BufferOffset offset = nextOffset();
2892 
2893         syncStackPtr();
2894 
2895         BufferOffset loadOffset;
2896         {
2897             vixl::UseScratchRegisterScope temps(this);
2898 
2899             // The register used for the load is hardcoded, so that ToggleCall
2900             // can patch in the branch instruction easily. This could be changed,
2901             // but then ToggleCall must read the target register from the load.
2902             MOZ_ASSERT(temps.IsAvailable(ScratchReg2_64));
2903             temps.Exclude(ScratchReg2_64);
2904 
2905             loadOffset = immPool64(ScratchReg2_64, uint64_t(target->raw()));
2906 
2907             if (enabled)
2908                 blr(ScratchReg2_64);
2909             else
2910                 nop();
2911         }
2912 
2913         addPendingJump(loadOffset, ImmPtr(target->raw()), Relocation::JITCODE);
2914         CodeOffset ret(offset.getOffset());
2915         return ret;
2916     }
2917 
ToggledCallSize(uint8_t * code)2918     static size_t ToggledCallSize(uint8_t* code) {
2919         static const uint32_t syncStackInstruction = 0x9100039f; // mov sp, r28
2920 
2921         // start it off as an 8 byte sequence
2922         int ret = 8;
2923         Instruction* cur = (Instruction*)code;
2924         uint32_t* curw = (uint32_t*)code;
2925 
2926         if (*curw == syncStackInstruction) {
2927             ret += 4;
2928             cur += 4;
2929         }
2930 
2931         if (cur->IsUncondB())
2932             ret += cur->ImmPCRawOffset() << vixl::kInstructionSizeLog2;
2933 
2934         return ret;
2935     }
2936 
checkARMRegAlignment(const ARMRegister & reg)2937     void checkARMRegAlignment(const ARMRegister& reg) {
2938 #ifdef DEBUG
2939         vixl::UseScratchRegisterScope temps(this);
2940         const ARMRegister scratch64 = temps.AcquireX();
2941         MOZ_ASSERT(scratch64.asUnsized() != reg.asUnsized());
2942         Label aligned;
2943         Mov(scratch64, reg);
2944         Tst(scratch64, Operand(StackAlignment - 1));
2945         B(Zero, &aligned);
2946         breakpoint();
2947         bind(&aligned);
2948         Mov(scratch64, vixl::xzr); // Clear the scratch register for sanity.
2949 #endif
2950     }
2951 
checkStackAlignment()2952     void checkStackAlignment() {
2953 #ifdef DEBUG
2954         checkARMRegAlignment(GetStackPointer64());
2955 
2956         // If another register is being used to track pushes, check sp explicitly.
2957         if (!GetStackPointer64().Is(vixl::sp))
2958             checkARMRegAlignment(vixl::sp);
2959 #endif
2960     }
2961 
abiret()2962     void abiret() {
2963         syncStackPtr(); // SP is always used to transmit the stack between calls.
2964         vixl::MacroAssembler::Ret(vixl::lr);
2965     }
2966 
mulBy3(Register src,Register dest)2967     void mulBy3(Register src, Register dest) {
2968         ARMRegister xdest(dest, 64);
2969         ARMRegister xsrc(src, 64);
2970         Add(xdest, xsrc, Operand(xsrc, vixl::LSL, 1));
2971     }
2972 
mul64(Imm64 imm,const Register64 & dest)2973     void mul64(Imm64 imm, const Register64& dest) {
2974         vixl::UseScratchRegisterScope temps(this);
2975         const ARMRegister scratch64 = temps.AcquireX();
2976         MOZ_ASSERT(dest.reg != scratch64.asUnsized());
2977         mov(ImmWord(imm.value), scratch64.asUnsized());
2978         Mul(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), scratch64);
2979     }
2980 
convertUInt64ToDouble(Register64 src,Register temp,FloatRegister dest)2981     void convertUInt64ToDouble(Register64 src, Register temp, FloatRegister dest) {
2982         Ucvtf(ARMFPRegister(dest, 64), ARMRegister(src.reg, 64));
2983     }
mulDoublePtr(ImmPtr imm,Register temp,FloatRegister dest)2984     void mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest) {
2985         vixl::UseScratchRegisterScope temps(this);
2986         const Register scratch = temps.AcquireX().asUnsized();
2987         MOZ_ASSERT(temp != scratch);
2988         movePtr(imm, scratch);
2989         const ARMFPRegister scratchDouble = temps.AcquireD();
2990         Ldr(scratchDouble, MemOperand(Address(scratch, 0)));
2991         fmul(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), scratchDouble);
2992     }
2993 
2994     template <typename T>
branchAdd32(Condition cond,T src,Register dest,Label * label)2995     void branchAdd32(Condition cond, T src, Register dest, Label* label) {
2996         adds32(src, dest);
2997         branch(cond, label);
2998     }
2999 
3000     template <typename T>
branchSub32(Condition cond,T src,Register dest,Label * label)3001     void branchSub32(Condition cond, T src, Register dest, Label* label) {
3002         subs32(src, dest);
3003         branch(cond, label);
3004     }
clampCheck(Register r,Label * handleNotAnInt)3005     void clampCheck(Register r, Label* handleNotAnInt) {
3006         MOZ_CRASH("clampCheck");
3007     }
3008 
stackCheck(ImmWord limitAddr,Label * label)3009     void stackCheck(ImmWord limitAddr, Label* label) {
3010         MOZ_CRASH("stackCheck");
3011     }
clampIntToUint8(Register reg)3012     void clampIntToUint8(Register reg) {
3013         vixl::UseScratchRegisterScope temps(this);
3014         const ARMRegister scratch32 = temps.AcquireW();
3015         const ARMRegister reg32(reg, 32);
3016         MOZ_ASSERT(!scratch32.Is(reg32));
3017 
3018         Cmp(reg32, Operand(reg32, vixl::UXTB));
3019         Csel(reg32, reg32, vixl::wzr, Assembler::GreaterThanOrEqual);
3020         Mov(scratch32, Operand(0xff));
3021         Csel(reg32, reg32, scratch32, Assembler::LessThanOrEqual);
3022     }
3023 
incrementInt32Value(const Address & addr)3024     void incrementInt32Value(const Address& addr) {
3025         vixl::UseScratchRegisterScope temps(this);
3026         const ARMRegister scratch32 = temps.AcquireW();
3027         MOZ_ASSERT(scratch32.asUnsized() != addr.base);
3028 
3029         load32(addr, scratch32.asUnsized());
3030         Add(scratch32, scratch32, Operand(1));
3031         store32(scratch32.asUnsized(), addr);
3032     }
inc64(AbsoluteAddress dest)3033     void inc64(AbsoluteAddress dest) {
3034         vixl::UseScratchRegisterScope temps(this);
3035         const ARMRegister scratchAddr64 = temps.AcquireX();
3036         const ARMRegister scratch64 = temps.AcquireX();
3037 
3038         Mov(scratchAddr64, uint64_t(dest.addr));
3039         Ldr(scratch64, MemOperand(scratchAddr64, 0));
3040         Add(scratch64, scratch64, Operand(1));
3041         Str(scratch64, MemOperand(scratchAddr64, 0));
3042     }
3043 
3044     void BoundsCheck(Register ptrReg, Label* onFail, vixl::CPURegister zeroMe = vixl::NoReg) {
3045         // use tst rather than Tst to *ensure* that a single instrution is generated.
3046         Cmp(ARMRegister(ptrReg, 32), ARMRegister(HeapLenReg, 32));
3047         if (!zeroMe.IsNone()) {
3048             if (zeroMe.IsRegister()) {
3049                 Csel(ARMRegister(zeroMe),
3050                      ARMRegister(zeroMe),
3051                      Operand(zeroMe.Is32Bits() ? vixl::wzr : vixl::xzr),
3052                      Assembler::Below);
3053             } else if (zeroMe.Is32Bits()) {
3054                 vixl::UseScratchRegisterScope temps(this);
3055                 const ARMFPRegister scratchFloat = temps.AcquireS();
3056                 Fmov(scratchFloat, JS::GenericNaN());
3057                 Fcsel(ARMFPRegister(zeroMe), ARMFPRegister(zeroMe), scratchFloat, Assembler::Below);
3058             } else {
3059                 vixl::UseScratchRegisterScope temps(this);
3060                 const ARMFPRegister scratchDouble = temps.AcquireD();
3061                 Fmov(scratchDouble, JS::GenericNaN());
3062                 Fcsel(ARMFPRegister(zeroMe), ARMFPRegister(zeroMe), scratchDouble, Assembler::Below);
3063             }
3064         }
3065         B(onFail, Assembler::AboveOrEqual);
3066     }
3067     void breakpoint();
3068 
3069     // Emits a simulator directive to save the current sp on an internal stack.
simulatorMarkSP()3070     void simulatorMarkSP() {
3071 #ifdef JS_SIMULATOR_ARM64
3072         svc(vixl::kMarkStackPointer);
3073 #endif
3074     }
3075 
3076     // Emits a simulator directive to pop from its internal stack
3077     // and assert that the value is equal to the current sp.
simulatorCheckSP()3078     void simulatorCheckSP() {
3079 #ifdef JS_SIMULATOR_ARM64
3080         svc(vixl::kCheckStackPointer);
3081 #endif
3082     }
3083 
loadAsmJSActivation(Register dest)3084     void loadAsmJSActivation(Register dest) {
3085         loadPtr(Address(GlobalReg, wasm::ActivationGlobalDataOffset - AsmJSGlobalRegBias), dest);
3086     }
loadAsmJSHeapRegisterFromGlobalData()3087     void loadAsmJSHeapRegisterFromGlobalData() {
3088         loadPtr(Address(GlobalReg, wasm::HeapGlobalDataOffset - AsmJSGlobalRegBias), HeapReg);
3089         loadPtr(Address(GlobalReg, wasm::HeapGlobalDataOffset - AsmJSGlobalRegBias + 8), HeapLenReg);
3090     }
3091 
3092     // Overwrites the payload bits of a dest register containing a Value.
movePayload(Register src,Register dest)3093     void movePayload(Register src, Register dest) {
3094         // Bfxil cannot be used with the zero register as a source.
3095         if (src == rzr)
3096             And(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(~int64_t(JSVAL_PAYLOAD_MASK)));
3097         else
3098             Bfxil(ARMRegister(dest, 64), ARMRegister(src, 64), 0, JSVAL_TAG_SHIFT);
3099     }
3100 
3101     // FIXME: Should be in Assembler?
3102     // FIXME: Should be const?
currentOffset()3103     uint32_t currentOffset() const {
3104         return nextOffset().getOffset();
3105     }
3106 
3107   protected:
buildOOLFakeExitFrame(void * fakeReturnAddr)3108     bool buildOOLFakeExitFrame(void* fakeReturnAddr) {
3109         uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS);
3110         Push(Imm32(descriptor));
3111         Push(ImmPtr(fakeReturnAddr));
3112         return true;
3113     }
3114 };
3115 
3116 typedef MacroAssemblerCompat MacroAssemblerSpecific;
3117 
3118 } // namespace jit
3119 } // namespace js
3120 
3121 #endif // jit_arm64_MacroAssembler_arm64_h
3122