1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #ifndef jit_arm64_MacroAssembler_arm64_h
8 #define jit_arm64_MacroAssembler_arm64_h
9
10 #include "jit/arm64/Assembler-arm64.h"
11 #include "jit/arm64/vixl/Debugger-vixl.h"
12 #include "jit/arm64/vixl/MacroAssembler-vixl.h"
13 #include "jit/AtomicOp.h"
14 #include "jit/MoveResolver.h"
15 #include "vm/BigIntType.h" // JS::BigInt
16 #include "wasm/WasmTypes.h"
17
18 #ifdef _M_ARM64
19 # ifdef move32
20 # undef move32
21 # endif
22 # ifdef move64
23 # undef move64
24 # endif
25 #endif
26
27 namespace js {
28 namespace jit {
29
30 // Import VIXL operands directly into the jit namespace for shared code.
31 using vixl::MemOperand;
32 using vixl::Operand;
33
34 struct ImmShiftedTag : public ImmWord {
ImmShiftedTagImmShiftedTag35 explicit ImmShiftedTag(JSValueShiftedTag shtag) : ImmWord((uintptr_t)shtag) {}
36
ImmShiftedTagImmShiftedTag37 explicit ImmShiftedTag(JSValueType type)
38 : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type)))) {
39 }
40 };
41
42 struct ImmTag : public Imm32 {
ImmTagImmTag43 explicit ImmTag(JSValueTag tag) : Imm32(tag) {}
44 };
45
46 class ScratchTagScope;
47
48 class MacroAssemblerCompat : public vixl::MacroAssembler {
49 public:
50 typedef vixl::Condition Condition;
51
52 private:
53 // Perform a downcast. Should be removed by Bug 996602.
54 js::jit::MacroAssembler& asMasm();
55 const js::jit::MacroAssembler& asMasm() const;
56
57 public:
58 // Restrict to only VIXL-internal functions.
59 vixl::MacroAssembler& asVIXL();
60 const MacroAssembler& asVIXL() const;
61
62 protected:
63 bool enoughMemory_;
64 uint32_t framePushed_;
65
MacroAssemblerCompat()66 MacroAssemblerCompat()
67 : vixl::MacroAssembler(), enoughMemory_(true), framePushed_(0) {}
68
69 protected:
70 MoveResolver moveResolver_;
71
72 public:
oom()73 bool oom() const { return Assembler::oom() || !enoughMemory_; }
toARMRegister(RegisterOrSP r,size_t size)74 static ARMRegister toARMRegister(RegisterOrSP r, size_t size) {
75 if (IsHiddenSP(r)) {
76 MOZ_ASSERT(size == 64);
77 return sp;
78 }
79 return ARMRegister(AsRegister(r), size);
80 }
toMemOperand(const Address & a)81 static MemOperand toMemOperand(const Address& a) {
82 return MemOperand(toARMRegister(a.base, 64), a.offset);
83 }
doBaseIndex(const vixl::CPURegister & rt,const BaseIndex & addr,vixl::LoadStoreOp op)84 void doBaseIndex(const vixl::CPURegister& rt, const BaseIndex& addr,
85 vixl::LoadStoreOp op) {
86 const ARMRegister base = toARMRegister(addr.base, 64);
87 const ARMRegister index = ARMRegister(addr.index, 64);
88 const unsigned scale = addr.scale;
89
90 if (!addr.offset &&
91 (!scale || scale == static_cast<unsigned>(CalcLSDataSize(op)))) {
92 LoadStoreMacro(rt, MemOperand(base, index, vixl::LSL, scale), op);
93 return;
94 }
95
96 vixl::UseScratchRegisterScope temps(this);
97 ARMRegister scratch64 = temps.AcquireX();
98 MOZ_ASSERT(!scratch64.Is(rt));
99 MOZ_ASSERT(!scratch64.Is(base));
100 MOZ_ASSERT(!scratch64.Is(index));
101
102 Add(scratch64, base, Operand(index, vixl::LSL, scale));
103 LoadStoreMacro(rt, MemOperand(scratch64, addr.offset), op);
104 }
Push(ARMRegister reg)105 void Push(ARMRegister reg) {
106 push(reg);
107 adjustFrame(reg.size() / 8);
108 }
Push(Register reg)109 void Push(Register reg) {
110 vixl::MacroAssembler::Push(ARMRegister(reg, 64));
111 adjustFrame(8);
112 }
Push(Imm32 imm)113 void Push(Imm32 imm) {
114 push(imm);
115 adjustFrame(8);
116 }
Push(FloatRegister f)117 void Push(FloatRegister f) {
118 push(ARMFPRegister(f, 64));
119 adjustFrame(8);
120 }
Push(ImmPtr imm)121 void Push(ImmPtr imm) {
122 push(imm);
123 adjustFrame(sizeof(void*));
124 }
push(FloatRegister f)125 void push(FloatRegister f) {
126 vixl::MacroAssembler::Push(ARMFPRegister(f, 64));
127 }
push(ARMFPRegister f)128 void push(ARMFPRegister f) { vixl::MacroAssembler::Push(f); }
push(Imm32 imm)129 void push(Imm32 imm) {
130 if (imm.value == 0) {
131 vixl::MacroAssembler::Push(vixl::xzr);
132 } else {
133 vixl::UseScratchRegisterScope temps(this);
134 const ARMRegister scratch64 = temps.AcquireX();
135 move32(imm, scratch64.asUnsized());
136 vixl::MacroAssembler::Push(scratch64);
137 }
138 }
push(ImmWord imm)139 void push(ImmWord imm) {
140 if (imm.value == 0) {
141 vixl::MacroAssembler::Push(vixl::xzr);
142 } else {
143 vixl::UseScratchRegisterScope temps(this);
144 const ARMRegister scratch64 = temps.AcquireX();
145 Mov(scratch64, imm.value);
146 vixl::MacroAssembler::Push(scratch64);
147 }
148 }
push(ImmPtr imm)149 void push(ImmPtr imm) {
150 if (imm.value == nullptr) {
151 vixl::MacroAssembler::Push(vixl::xzr);
152 } else {
153 vixl::UseScratchRegisterScope temps(this);
154 const ARMRegister scratch64 = temps.AcquireX();
155 movePtr(imm, scratch64.asUnsized());
156 vixl::MacroAssembler::Push(scratch64);
157 }
158 }
push(ImmGCPtr imm)159 void push(ImmGCPtr imm) {
160 if (imm.value == nullptr) {
161 vixl::MacroAssembler::Push(vixl::xzr);
162 } else {
163 vixl::UseScratchRegisterScope temps(this);
164 const ARMRegister scratch64 = temps.AcquireX();
165 movePtr(imm, scratch64.asUnsized());
166 vixl::MacroAssembler::Push(scratch64);
167 }
168 }
push(ARMRegister reg)169 void push(ARMRegister reg) { vixl::MacroAssembler::Push(reg); }
push(Address a)170 void push(Address a) {
171 vixl::UseScratchRegisterScope temps(this);
172 const ARMRegister scratch64 = temps.AcquireX();
173 MOZ_ASSERT(a.base != scratch64.asUnsized());
174 loadPtr(a, scratch64.asUnsized());
175 vixl::MacroAssembler::Push(scratch64);
176 }
177
178 // Push registers.
push(Register reg)179 void push(Register reg) { vixl::MacroAssembler::Push(ARMRegister(reg, 64)); }
push(RegisterOrSP reg)180 void push(RegisterOrSP reg) {
181 if (IsHiddenSP(reg)) {
182 vixl::MacroAssembler::Push(sp);
183 }
184 vixl::MacroAssembler::Push(toARMRegister(reg, 64));
185 }
push(Register r0,Register r1)186 void push(Register r0, Register r1) {
187 vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64));
188 }
push(Register r0,Register r1,Register r2)189 void push(Register r0, Register r1, Register r2) {
190 vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64),
191 ARMRegister(r2, 64));
192 }
push(Register r0,Register r1,Register r2,Register r3)193 void push(Register r0, Register r1, Register r2, Register r3) {
194 vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64),
195 ARMRegister(r2, 64), ARMRegister(r3, 64));
196 }
push(ARMFPRegister r0,ARMFPRegister r1,ARMFPRegister r2,ARMFPRegister r3)197 void push(ARMFPRegister r0, ARMFPRegister r1, ARMFPRegister r2,
198 ARMFPRegister r3) {
199 vixl::MacroAssembler::Push(r0, r1, r2, r3);
200 }
201
202 // Pop registers.
pop(Register reg)203 void pop(Register reg) { vixl::MacroAssembler::Pop(ARMRegister(reg, 64)); }
pop(Register r0,Register r1)204 void pop(Register r0, Register r1) {
205 vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64));
206 }
pop(Register r0,Register r1,Register r2)207 void pop(Register r0, Register r1, Register r2) {
208 vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64),
209 ARMRegister(r2, 64));
210 }
pop(Register r0,Register r1,Register r2,Register r3)211 void pop(Register r0, Register r1, Register r2, Register r3) {
212 vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64),
213 ARMRegister(r2, 64), ARMRegister(r3, 64));
214 }
pop(ARMFPRegister r0,ARMFPRegister r1,ARMFPRegister r2,ARMFPRegister r3)215 void pop(ARMFPRegister r0, ARMFPRegister r1, ARMFPRegister r2,
216 ARMFPRegister r3) {
217 vixl::MacroAssembler::Pop(r0, r1, r2, r3);
218 }
219
pop(const ValueOperand & v)220 void pop(const ValueOperand& v) { pop(v.valueReg()); }
pop(const FloatRegister & f)221 void pop(const FloatRegister& f) {
222 vixl::MacroAssembler::Pop(ARMFPRegister(f, 64));
223 }
224
implicitPop(uint32_t args)225 void implicitPop(uint32_t args) {
226 MOZ_ASSERT(args % sizeof(intptr_t) == 0);
227 adjustFrame(0 - args);
228 }
Pop(ARMRegister r)229 void Pop(ARMRegister r) {
230 vixl::MacroAssembler::Pop(r);
231 adjustFrame(0 - r.size() / 8);
232 }
233 // FIXME: This is the same on every arch.
234 // FIXME: If we can share framePushed_, we can share this.
235 // FIXME: Or just make it at the highest level.
PushWithPatch(ImmWord word)236 CodeOffset PushWithPatch(ImmWord word) {
237 framePushed_ += sizeof(word.value);
238 return pushWithPatch(word);
239 }
PushWithPatch(ImmPtr ptr)240 CodeOffset PushWithPatch(ImmPtr ptr) {
241 return PushWithPatch(ImmWord(uintptr_t(ptr.value)));
242 }
243
framePushed()244 uint32_t framePushed() const { return framePushed_; }
adjustFrame(int32_t diff)245 void adjustFrame(int32_t diff) { setFramePushed(framePushed_ + diff); }
246
setFramePushed(uint32_t framePushed)247 void setFramePushed(uint32_t framePushed) { framePushed_ = framePushed; }
248
freeStack(Register amount)249 void freeStack(Register amount) {
250 vixl::MacroAssembler::Drop(Operand(ARMRegister(amount, 64)));
251 }
252
253 // Update sp with the value of the current active stack pointer, if necessary.
syncStackPtr()254 void syncStackPtr() {
255 if (!GetStackPointer64().Is(vixl::sp)) {
256 Mov(vixl::sp, GetStackPointer64());
257 }
258 }
initPseudoStackPtr()259 void initPseudoStackPtr() {
260 if (!GetStackPointer64().Is(vixl::sp)) {
261 Mov(GetStackPointer64(), vixl::sp);
262 }
263 }
264 // In debug builds only, cause a trap if PSP is active and PSP != SP
assertStackPtrsSynced(uint32_t id)265 void assertStackPtrsSynced(uint32_t id) {
266 #ifdef DEBUG
267 // The add and sub instructions below will only take a 12-bit immediate.
268 MOZ_ASSERT(id <= 0xFFF);
269 if (!GetStackPointer64().Is(vixl::sp)) {
270 Label ok;
271 // Add a marker, so we can figure out who requested the check when
272 // inspecting the generated code. Note, a more concise way to encode
273 // the marker would be to use it as an immediate for the `brk`
274 // instruction as generated by `Unreachable()`, and removing the add/sub.
275 Add(GetStackPointer64(), GetStackPointer64(), Operand(id));
276 Sub(GetStackPointer64(), GetStackPointer64(), Operand(id));
277 Cmp(vixl::sp, GetStackPointer64());
278 B(Equal, &ok);
279 Unreachable();
280 bind(&ok);
281 }
282 #endif
283 }
284 // In debug builds only, add a marker that doesn't change the machine's
285 // state. Note these markers are x16-based, as opposed to the x28-based
286 // ones made by `assertStackPtrsSynced`.
addMarker(uint32_t id)287 void addMarker(uint32_t id) {
288 #ifdef DEBUG
289 // Only 12 bits of immediate are allowed.
290 MOZ_ASSERT(id <= 0xFFF);
291 ARMRegister x16 = ARMRegister(r16, 64);
292 Add(x16, x16, Operand(id));
293 Sub(x16, x16, Operand(id));
294 #endif
295 }
296
storeValue(ValueOperand val,const Address & dest)297 void storeValue(ValueOperand val, const Address& dest) {
298 storePtr(val.valueReg(), dest);
299 }
300
301 template <typename T>
storeValue(JSValueType type,Register reg,const T & dest)302 void storeValue(JSValueType type, Register reg, const T& dest) {
303 vixl::UseScratchRegisterScope temps(this);
304 const Register scratch = temps.AcquireX().asUnsized();
305 MOZ_ASSERT(scratch != reg);
306 tagValue(type, reg, ValueOperand(scratch));
307 storeValue(ValueOperand(scratch), dest);
308 }
309 template <typename T>
storeValue(const Value & val,const T & dest)310 void storeValue(const Value& val, const T& dest) {
311 vixl::UseScratchRegisterScope temps(this);
312 const Register scratch = temps.AcquireX().asUnsized();
313 moveValue(val, ValueOperand(scratch));
314 storeValue(ValueOperand(scratch), dest);
315 }
storeValue(ValueOperand val,BaseIndex dest)316 void storeValue(ValueOperand val, BaseIndex dest) {
317 storePtr(val.valueReg(), dest);
318 }
storeValue(const Address & src,const Address & dest,Register temp)319 void storeValue(const Address& src, const Address& dest, Register temp) {
320 loadPtr(src, temp);
321 storePtr(temp, dest);
322 }
323
loadValue(Address src,Register val)324 void loadValue(Address src, Register val) {
325 Ldr(ARMRegister(val, 64), MemOperand(src));
326 }
loadValue(Address src,ValueOperand val)327 void loadValue(Address src, ValueOperand val) {
328 Ldr(ARMRegister(val.valueReg(), 64), MemOperand(src));
329 }
loadValue(const BaseIndex & src,ValueOperand val)330 void loadValue(const BaseIndex& src, ValueOperand val) {
331 doBaseIndex(ARMRegister(val.valueReg(), 64), src, vixl::LDR_x);
332 }
loadUnalignedValue(const Address & src,ValueOperand dest)333 void loadUnalignedValue(const Address& src, ValueOperand dest) {
334 loadValue(src, dest);
335 }
tagValue(JSValueType type,Register payload,ValueOperand dest)336 void tagValue(JSValueType type, Register payload, ValueOperand dest) {
337 // This could be cleverer, but the first attempt had bugs.
338 Orr(ARMRegister(dest.valueReg(), 64), ARMRegister(payload, 64),
339 Operand(ImmShiftedTag(type).value));
340 }
pushValue(ValueOperand val)341 void pushValue(ValueOperand val) {
342 vixl::MacroAssembler::Push(ARMRegister(val.valueReg(), 64));
343 }
popValue(ValueOperand val)344 void popValue(ValueOperand val) {
345 vixl::MacroAssembler::Pop(ARMRegister(val.valueReg(), 64));
346 // SP may be < PSP now (that's OK).
347 // eg testcase: tests/backup-point-bug1315634.js
348 }
pushValue(const Value & val)349 void pushValue(const Value& val) {
350 vixl::UseScratchRegisterScope temps(this);
351 const Register scratch = temps.AcquireX().asUnsized();
352 if (val.isGCThing()) {
353 BufferOffset load =
354 movePatchablePtr(ImmPtr(val.bitsAsPunboxPointer()), scratch);
355 writeDataRelocation(val, load);
356 push(scratch);
357 } else {
358 moveValue(val, scratch);
359 push(scratch);
360 }
361 }
pushValue(JSValueType type,Register reg)362 void pushValue(JSValueType type, Register reg) {
363 vixl::UseScratchRegisterScope temps(this);
364 const Register scratch = temps.AcquireX().asUnsized();
365 MOZ_ASSERT(scratch != reg);
366 tagValue(type, reg, ValueOperand(scratch));
367 push(scratch);
368 }
pushValue(const Address & addr)369 void pushValue(const Address& addr) {
370 vixl::UseScratchRegisterScope temps(this);
371 const Register scratch = temps.AcquireX().asUnsized();
372 MOZ_ASSERT(scratch != addr.base);
373 loadValue(addr, scratch);
374 push(scratch);
375 }
376 template <typename T>
storeUnboxedPayload(ValueOperand value,T address,size_t nbytes,JSValueType type)377 void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes,
378 JSValueType type) {
379 switch (nbytes) {
380 case 8: {
381 vixl::UseScratchRegisterScope temps(this);
382 const Register scratch = temps.AcquireX().asUnsized();
383 if (type == JSVAL_TYPE_OBJECT) {
384 unboxObjectOrNull(value, scratch);
385 } else {
386 unboxNonDouble(value, scratch, type);
387 }
388 storePtr(scratch, address);
389 return;
390 }
391 case 4:
392 store32(value.valueReg(), address);
393 return;
394 case 1:
395 store8(value.valueReg(), address);
396 return;
397 default:
398 MOZ_CRASH("Bad payload width");
399 }
400 }
moveValue(const Value & val,Register dest)401 void moveValue(const Value& val, Register dest) {
402 if (val.isGCThing()) {
403 BufferOffset load =
404 movePatchablePtr(ImmPtr(val.bitsAsPunboxPointer()), dest);
405 writeDataRelocation(val, load);
406 } else {
407 movePtr(ImmWord(val.asRawBits()), dest);
408 }
409 }
moveValue(const Value & src,const ValueOperand & dest)410 void moveValue(const Value& src, const ValueOperand& dest) {
411 moveValue(src, dest.valueReg());
412 }
413
pushWithPatch(ImmWord imm)414 CodeOffset pushWithPatch(ImmWord imm) {
415 vixl::UseScratchRegisterScope temps(this);
416 const Register scratch = temps.AcquireX().asUnsized();
417 CodeOffset label = movWithPatch(imm, scratch);
418 push(scratch);
419 return label;
420 }
421
movWithPatch(ImmWord imm,Register dest)422 CodeOffset movWithPatch(ImmWord imm, Register dest) {
423 BufferOffset off = immPool64(ARMRegister(dest, 64), imm.value);
424 return CodeOffset(off.getOffset());
425 }
movWithPatch(ImmPtr imm,Register dest)426 CodeOffset movWithPatch(ImmPtr imm, Register dest) {
427 BufferOffset off = immPool64(ARMRegister(dest, 64), uint64_t(imm.value));
428 return CodeOffset(off.getOffset());
429 }
430
431 void boxValue(JSValueType type, Register src, Register dest);
432
splitSignExtTag(Register src,Register dest)433 void splitSignExtTag(Register src, Register dest) {
434 sbfx(ARMRegister(dest, 64), ARMRegister(src, 64), JSVAL_TAG_SHIFT,
435 (64 - JSVAL_TAG_SHIFT));
436 }
extractTag(const Address & address,Register scratch)437 [[nodiscard]] Register extractTag(const Address& address, Register scratch) {
438 loadPtr(address, scratch);
439 splitSignExtTag(scratch, scratch);
440 return scratch;
441 }
extractTag(const ValueOperand & value,Register scratch)442 [[nodiscard]] Register extractTag(const ValueOperand& value,
443 Register scratch) {
444 splitSignExtTag(value.valueReg(), scratch);
445 return scratch;
446 }
extractObject(const Address & address,Register scratch)447 [[nodiscard]] Register extractObject(const Address& address,
448 Register scratch) {
449 loadPtr(address, scratch);
450 unboxObject(scratch, scratch);
451 return scratch;
452 }
extractObject(const ValueOperand & value,Register scratch)453 [[nodiscard]] Register extractObject(const ValueOperand& value,
454 Register scratch) {
455 unboxObject(value, scratch);
456 return scratch;
457 }
extractSymbol(const ValueOperand & value,Register scratch)458 [[nodiscard]] Register extractSymbol(const ValueOperand& value,
459 Register scratch) {
460 unboxSymbol(value, scratch);
461 return scratch;
462 }
extractInt32(const ValueOperand & value,Register scratch)463 [[nodiscard]] Register extractInt32(const ValueOperand& value,
464 Register scratch) {
465 unboxInt32(value, scratch);
466 return scratch;
467 }
extractBoolean(const ValueOperand & value,Register scratch)468 [[nodiscard]] Register extractBoolean(const ValueOperand& value,
469 Register scratch) {
470 unboxBoolean(value, scratch);
471 return scratch;
472 }
473
474 inline void ensureDouble(const ValueOperand& source, FloatRegister dest,
475 Label* failure);
476
emitSet(Condition cond,Register dest)477 void emitSet(Condition cond, Register dest) {
478 Cset(ARMRegister(dest, 64), cond);
479 }
480
testNullSet(Condition cond,const ValueOperand & value,Register dest)481 void testNullSet(Condition cond, const ValueOperand& value, Register dest) {
482 cond = testNull(cond, value);
483 emitSet(cond, dest);
484 }
testObjectSet(Condition cond,const ValueOperand & value,Register dest)485 void testObjectSet(Condition cond, const ValueOperand& value, Register dest) {
486 cond = testObject(cond, value);
487 emitSet(cond, dest);
488 }
testUndefinedSet(Condition cond,const ValueOperand & value,Register dest)489 void testUndefinedSet(Condition cond, const ValueOperand& value,
490 Register dest) {
491 cond = testUndefined(cond, value);
492 emitSet(cond, dest);
493 }
494
convertBoolToInt32(Register source,Register dest)495 void convertBoolToInt32(Register source, Register dest) {
496 Uxtb(ARMRegister(dest, 64), ARMRegister(source, 64));
497 }
498
convertInt32ToDouble(Register src,FloatRegister dest)499 void convertInt32ToDouble(Register src, FloatRegister dest) {
500 Scvtf(ARMFPRegister(dest, 64),
501 ARMRegister(src, 32)); // Uses FPCR rounding mode.
502 }
convertInt32ToDouble(const Address & src,FloatRegister dest)503 void convertInt32ToDouble(const Address& src, FloatRegister dest) {
504 vixl::UseScratchRegisterScope temps(this);
505 const Register scratch = temps.AcquireX().asUnsized();
506 MOZ_ASSERT(scratch != src.base);
507 load32(src, scratch);
508 convertInt32ToDouble(scratch, dest);
509 }
convertInt32ToDouble(const BaseIndex & src,FloatRegister dest)510 void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest) {
511 vixl::UseScratchRegisterScope temps(this);
512 const Register scratch = temps.AcquireX().asUnsized();
513 MOZ_ASSERT(scratch != src.base);
514 MOZ_ASSERT(scratch != src.index);
515 load32(src, scratch);
516 convertInt32ToDouble(scratch, dest);
517 }
518
convertInt32ToFloat32(Register src,FloatRegister dest)519 void convertInt32ToFloat32(Register src, FloatRegister dest) {
520 Scvtf(ARMFPRegister(dest, 32),
521 ARMRegister(src, 32)); // Uses FPCR rounding mode.
522 }
convertInt32ToFloat32(const Address & src,FloatRegister dest)523 void convertInt32ToFloat32(const Address& src, FloatRegister dest) {
524 vixl::UseScratchRegisterScope temps(this);
525 const Register scratch = temps.AcquireX().asUnsized();
526 MOZ_ASSERT(scratch != src.base);
527 load32(src, scratch);
528 convertInt32ToFloat32(scratch, dest);
529 }
530
convertUInt32ToDouble(Register src,FloatRegister dest)531 void convertUInt32ToDouble(Register src, FloatRegister dest) {
532 Ucvtf(ARMFPRegister(dest, 64),
533 ARMRegister(src, 32)); // Uses FPCR rounding mode.
534 }
convertUInt32ToDouble(const Address & src,FloatRegister dest)535 void convertUInt32ToDouble(const Address& src, FloatRegister dest) {
536 vixl::UseScratchRegisterScope temps(this);
537 const Register scratch = temps.AcquireX().asUnsized();
538 MOZ_ASSERT(scratch != src.base);
539 load32(src, scratch);
540 convertUInt32ToDouble(scratch, dest);
541 }
542
convertUInt32ToFloat32(Register src,FloatRegister dest)543 void convertUInt32ToFloat32(Register src, FloatRegister dest) {
544 Ucvtf(ARMFPRegister(dest, 32),
545 ARMRegister(src, 32)); // Uses FPCR rounding mode.
546 }
convertUInt32ToFloat32(const Address & src,FloatRegister dest)547 void convertUInt32ToFloat32(const Address& src, FloatRegister dest) {
548 vixl::UseScratchRegisterScope temps(this);
549 const Register scratch = temps.AcquireX().asUnsized();
550 MOZ_ASSERT(scratch != src.base);
551 load32(src, scratch);
552 convertUInt32ToFloat32(scratch, dest);
553 }
554
convertFloat32ToDouble(FloatRegister src,FloatRegister dest)555 void convertFloat32ToDouble(FloatRegister src, FloatRegister dest) {
556 Fcvt(ARMFPRegister(dest, 64), ARMFPRegister(src, 32));
557 }
convertDoubleToFloat32(FloatRegister src,FloatRegister dest)558 void convertDoubleToFloat32(FloatRegister src, FloatRegister dest) {
559 Fcvt(ARMFPRegister(dest, 32), ARMFPRegister(src, 64));
560 }
561
562 using vixl::MacroAssembler::B;
563
564 void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
565 bool negativeZeroCheck = true) {
566 ARMFPRegister fsrc64(src, 64);
567 ARMRegister dest32(dest, 32);
568
569 // ARMv8.3 chips support the FJCVTZS instruction, which handles
570 // exactly this logic.
571 if (CPUHas(vixl::CPUFeatures::kFP, vixl::CPUFeatures::kJSCVT)) {
572 // Convert double to integer, rounding toward zero.
573 // The Z-flag is set iff the conversion is exact. -0 unsets the Z-flag.
574 Fjcvtzs(dest32, fsrc64);
575
576 if (negativeZeroCheck) {
577 B(fail, Assembler::NonZero);
578 } else {
579 Label done;
580 B(&done, Assembler::Zero); // If conversion was exact, go to end.
581
582 // The conversion was inexact, but the caller intends to allow -0.
583 vixl::UseScratchRegisterScope temps(this);
584 const ARMFPRegister scratch64 = temps.AcquireD();
585 MOZ_ASSERT(!scratch64.Is(fsrc64));
586
587 // Compare fsrc64 to 0.
588 // If fsrc64 == 0 and FJCVTZS conversion was inexact, then fsrc64 is -0.
589 Fmov(scratch64, xzr);
590 Fcmp(scratch64, fsrc64);
591 B(fail, Assembler::NotEqual); // Pass through -0; fail otherwise.
592
593 bind(&done);
594 }
595 } else {
596 // Older processors use a significantly slower path.
597 ARMRegister dest64(dest, 64);
598
599 vixl::UseScratchRegisterScope temps(this);
600 const ARMFPRegister scratch64 = temps.AcquireD();
601 MOZ_ASSERT(!scratch64.Is(fsrc64));
602
603 Fcvtzs(dest32, fsrc64); // Convert, rounding toward zero.
604 Scvtf(scratch64, dest32); // Convert back, using FPCR rounding mode.
605 Fcmp(scratch64, fsrc64);
606 B(fail, Assembler::NotEqual);
607
608 if (negativeZeroCheck) {
609 Label nonzero;
610 Cbnz(dest32, &nonzero);
611 Fmov(dest64, fsrc64);
612 Cbnz(dest64, fail);
613 bind(&nonzero);
614 }
615 }
616 }
617 void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
618 bool negativeZeroCheck = true) {
619 vixl::UseScratchRegisterScope temps(this);
620 const ARMFPRegister scratch32 = temps.AcquireS();
621
622 ARMFPRegister fsrc(src, 32);
623 ARMRegister dest32(dest, 32);
624 ARMRegister dest64(dest, 64);
625
626 MOZ_ASSERT(!scratch32.Is(fsrc));
627
628 Fcvtzs(dest64, fsrc); // Convert, rounding toward zero.
629 Scvtf(scratch32, dest32); // Convert back, using FPCR rounding mode.
630 Fcmp(scratch32, fsrc);
631 B(fail, Assembler::NotEqual);
632
633 if (negativeZeroCheck) {
634 Label nonzero;
635 Cbnz(dest32, &nonzero);
636 Fmov(dest32, fsrc);
637 Cbnz(dest32, fail);
638 bind(&nonzero);
639 }
640 And(dest64, dest64, Operand(0xffffffff));
641 }
642
643 void convertDoubleToPtr(FloatRegister src, Register dest, Label* fail,
644 bool negativeZeroCheck = true) {
645 ARMFPRegister fsrc64(src, 64);
646 ARMRegister dest64(dest, 64);
647
648 vixl::UseScratchRegisterScope temps(this);
649 const ARMFPRegister scratch64 = temps.AcquireD();
650 MOZ_ASSERT(!scratch64.Is(fsrc64));
651
652 // Note: we can't use the FJCVTZS instruction here because that only works
653 // for 32-bit values.
654
655 Fcvtzs(dest64, fsrc64); // Convert, rounding toward zero.
656 Scvtf(scratch64, dest64); // Convert back, using FPCR rounding mode.
657 Fcmp(scratch64, fsrc64);
658 B(fail, Assembler::NotEqual);
659
660 if (negativeZeroCheck) {
661 Label nonzero;
662 Cbnz(dest64, &nonzero);
663 Fmov(dest64, fsrc64);
664 Cbnz(dest64, fail);
665 bind(&nonzero);
666 }
667 }
668
floor(FloatRegister input,Register output,Label * bail)669 void floor(FloatRegister input, Register output, Label* bail) {
670 Label handleZero;
671 // Label handleNeg;
672 Label fin;
673 ARMFPRegister iDbl(input, 64);
674 ARMRegister o64(output, 64);
675 ARMRegister o32(output, 32);
676 Fcmp(iDbl, 0.0);
677 B(Assembler::Equal, &handleZero);
678 // B(Assembler::Signed, &handleNeg);
679 // NaN is always a bail condition, just bail directly.
680 B(Assembler::Overflow, bail);
681 Fcvtms(o64, iDbl);
682 Cmp(o64, Operand(o64, vixl::SXTW));
683 B(NotEqual, bail);
684 Mov(o32, o32);
685 B(&fin);
686
687 bind(&handleZero);
688 // Move the top word of the double into the output reg, if it is non-zero,
689 // then the original value was -0.0.
690 Fmov(o64, iDbl);
691 Cbnz(o64, bail);
692 bind(&fin);
693 }
694
floorf(FloatRegister input,Register output,Label * bail)695 void floorf(FloatRegister input, Register output, Label* bail) {
696 Label handleZero;
697 // Label handleNeg;
698 Label fin;
699 ARMFPRegister iFlt(input, 32);
700 ARMRegister o64(output, 64);
701 ARMRegister o32(output, 32);
702 Fcmp(iFlt, 0.0);
703 B(Assembler::Equal, &handleZero);
704 // B(Assembler::Signed, &handleNeg);
705 // NaN is always a bail condition, just bail directly.
706 B(Assembler::Overflow, bail);
707 Fcvtms(o64, iFlt);
708 Cmp(o64, Operand(o64, vixl::SXTW));
709 B(NotEqual, bail);
710 Mov(o32, o32);
711 B(&fin);
712
713 bind(&handleZero);
714 // Move the top word of the double into the output reg, if it is non-zero,
715 // then the original value was -0.0.
716 Fmov(o32, iFlt);
717 Cbnz(o32, bail);
718 bind(&fin);
719 }
720
ceil(FloatRegister input,Register output,Label * bail)721 void ceil(FloatRegister input, Register output, Label* bail) {
722 Label handleZero;
723 Label fin;
724 ARMFPRegister iDbl(input, 64);
725 ARMRegister o64(output, 64);
726 ARMRegister o32(output, 32);
727 Fcmp(iDbl, 0.0);
728 B(Assembler::Overflow, bail);
729 Fcvtps(o64, iDbl);
730 Cmp(o64, Operand(o64, vixl::SXTW));
731 B(NotEqual, bail);
732 Cbz(o64, &handleZero);
733 Mov(o32, o32);
734 B(&fin);
735
736 bind(&handleZero);
737 vixl::UseScratchRegisterScope temps(this);
738 const ARMRegister scratch = temps.AcquireX();
739 Fmov(scratch, iDbl);
740 Cbnz(scratch, bail);
741 bind(&fin);
742 }
743
ceilf(FloatRegister input,Register output,Label * bail)744 void ceilf(FloatRegister input, Register output, Label* bail) {
745 Label handleZero;
746 Label fin;
747 ARMFPRegister iFlt(input, 32);
748 ARMRegister o64(output, 64);
749 ARMRegister o32(output, 32);
750 Fcmp(iFlt, 0.0);
751
752 // NaN is always a bail condition, just bail directly.
753 B(Assembler::Overflow, bail);
754 Fcvtps(o64, iFlt);
755 Cmp(o64, Operand(o64, vixl::SXTW));
756 B(NotEqual, bail);
757 Cbz(o64, &handleZero);
758 Mov(o32, o32);
759 B(&fin);
760
761 bind(&handleZero);
762 // Move the top word of the double into the output reg, if it is non-zero,
763 // then the original value was -0.0.
764 Fmov(o32, iFlt);
765 Cbnz(o32, bail);
766 bind(&fin);
767 }
768
jump(Label * label)769 void jump(Label* label) { B(label); }
jump(JitCode * code)770 void jump(JitCode* code) { branch(code); }
jump(ImmPtr ptr)771 void jump(ImmPtr ptr) {
772 // It is unclear why this sync is necessary:
773 // * PSP and SP have been observed to be different in testcase
774 // tests/asm.js/testBug1046688.js.
775 // * Removing the sync causes no failures in all of jit-tests.
776 //
777 // Also see branch(JitCode*) below. This version of jump() is called only
778 // from jump(TrampolinePtr) which is called on various very slow paths,
779 // probably only in JS.
780 syncStackPtr();
781 BufferOffset loc =
782 b(-1,
783 LabelDoc()); // The jump target will be patched by executableCopy().
784 addPendingJump(loc, ptr, RelocationKind::HARDCODED);
785 }
jump(TrampolinePtr code)786 void jump(TrampolinePtr code) { jump(ImmPtr(code.value)); }
jump(Register reg)787 void jump(Register reg) { Br(ARMRegister(reg, 64)); }
jump(const Address & addr)788 void jump(const Address& addr) {
789 vixl::UseScratchRegisterScope temps(this);
790 MOZ_ASSERT(temps.IsAvailable(ScratchReg64)); // ip0
791 temps.Exclude(ScratchReg64);
792 MOZ_ASSERT(addr.base != ScratchReg64.asUnsized());
793 loadPtr(addr, ScratchReg64.asUnsized());
794 br(ScratchReg64);
795 }
796
align(int alignment)797 void align(int alignment) { armbuffer_.align(alignment); }
798
haltingAlign(int alignment)799 void haltingAlign(int alignment) {
800 armbuffer_.align(alignment, vixl::HLT | ImmException(0xBAAD));
801 }
nopAlign(int alignment)802 void nopAlign(int alignment) { armbuffer_.align(alignment); }
803
movePtr(Register src,Register dest)804 void movePtr(Register src, Register dest) {
805 Mov(ARMRegister(dest, 64), ARMRegister(src, 64));
806 }
movePtr(ImmWord imm,Register dest)807 void movePtr(ImmWord imm, Register dest) {
808 Mov(ARMRegister(dest, 64), int64_t(imm.value));
809 }
movePtr(ImmPtr imm,Register dest)810 void movePtr(ImmPtr imm, Register dest) {
811 Mov(ARMRegister(dest, 64), int64_t(imm.value));
812 }
movePtr(wasm::SymbolicAddress imm,Register dest)813 void movePtr(wasm::SymbolicAddress imm, Register dest) {
814 BufferOffset off = movePatchablePtr(ImmWord(0xffffffffffffffffULL), dest);
815 append(wasm::SymbolicAccess(CodeOffset(off.getOffset()), imm));
816 }
movePtr(ImmGCPtr imm,Register dest)817 void movePtr(ImmGCPtr imm, Register dest) {
818 BufferOffset load = movePatchablePtr(ImmPtr(imm.value), dest);
819 writeDataRelocation(imm, load);
820 }
821
mov(ImmWord imm,Register dest)822 void mov(ImmWord imm, Register dest) { movePtr(imm, dest); }
mov(ImmPtr imm,Register dest)823 void mov(ImmPtr imm, Register dest) { movePtr(imm, dest); }
mov(wasm::SymbolicAddress imm,Register dest)824 void mov(wasm::SymbolicAddress imm, Register dest) { movePtr(imm, dest); }
mov(Register src,Register dest)825 void mov(Register src, Register dest) { movePtr(src, dest); }
826 void mov(CodeLabel* label, Register dest);
827
move32(Imm32 imm,Register dest)828 void move32(Imm32 imm, Register dest) {
829 Mov(ARMRegister(dest, 32), (int64_t)imm.value);
830 }
move32(Register src,Register dest)831 void move32(Register src, Register dest) {
832 Mov(ARMRegister(dest, 32), ARMRegister(src, 32));
833 }
834
835 // Move a pointer using a literal pool, so that the pointer
836 // may be easily patched or traced.
837 // Returns the BufferOffset of the load instruction emitted.
838 BufferOffset movePatchablePtr(ImmWord ptr, Register dest);
839 BufferOffset movePatchablePtr(ImmPtr ptr, Register dest);
840
loadPtr(wasm::SymbolicAddress address,Register dest)841 void loadPtr(wasm::SymbolicAddress address, Register dest) {
842 vixl::UseScratchRegisterScope temps(this);
843 const ARMRegister scratch = temps.AcquireX();
844 movePtr(address, scratch.asUnsized());
845 Ldr(ARMRegister(dest, 64), MemOperand(scratch));
846 }
loadPtr(AbsoluteAddress address,Register dest)847 void loadPtr(AbsoluteAddress address, Register dest) {
848 vixl::UseScratchRegisterScope temps(this);
849 const ARMRegister scratch = temps.AcquireX();
850 movePtr(ImmWord((uintptr_t)address.addr), scratch.asUnsized());
851 Ldr(ARMRegister(dest, 64), MemOperand(scratch));
852 }
loadPtr(const Address & address,Register dest)853 void loadPtr(const Address& address, Register dest) {
854 Ldr(ARMRegister(dest, 64), MemOperand(address));
855 }
loadPtr(const BaseIndex & src,Register dest)856 void loadPtr(const BaseIndex& src, Register dest) {
857 ARMRegister base = toARMRegister(src.base, 64);
858 uint32_t scale = Imm32::ShiftOf(src.scale).value;
859 ARMRegister dest64(dest, 64);
860 ARMRegister index64(src.index, 64);
861
862 if (src.offset) {
863 vixl::UseScratchRegisterScope temps(this);
864 const ARMRegister scratch = temps.AcquireX();
865 MOZ_ASSERT(!scratch.Is(base));
866 MOZ_ASSERT(!scratch.Is(dest64));
867 MOZ_ASSERT(!scratch.Is(index64));
868
869 Add(scratch, base, Operand(int64_t(src.offset)));
870 Ldr(dest64, MemOperand(scratch, index64, vixl::LSL, scale));
871 return;
872 }
873
874 Ldr(dest64, MemOperand(base, index64, vixl::LSL, scale));
875 }
876 void loadPrivate(const Address& src, Register dest);
877
store8(Register src,const Address & address)878 void store8(Register src, const Address& address) {
879 Strb(ARMRegister(src, 32), toMemOperand(address));
880 }
store8(Imm32 imm,const Address & address)881 void store8(Imm32 imm, const Address& address) {
882 vixl::UseScratchRegisterScope temps(this);
883 const ARMRegister scratch32 = temps.AcquireW();
884 MOZ_ASSERT(scratch32.asUnsized() != address.base);
885 move32(imm, scratch32.asUnsized());
886 Strb(scratch32, toMemOperand(address));
887 }
store8(Register src,const BaseIndex & address)888 void store8(Register src, const BaseIndex& address) {
889 doBaseIndex(ARMRegister(src, 32), address, vixl::STRB_w);
890 }
store8(Imm32 imm,const BaseIndex & address)891 void store8(Imm32 imm, const BaseIndex& address) {
892 vixl::UseScratchRegisterScope temps(this);
893 const ARMRegister scratch32 = temps.AcquireW();
894 MOZ_ASSERT(scratch32.asUnsized() != address.base);
895 MOZ_ASSERT(scratch32.asUnsized() != address.index);
896 Mov(scratch32, Operand(imm.value));
897 doBaseIndex(scratch32, address, vixl::STRB_w);
898 }
899
store16(Register src,const Address & address)900 void store16(Register src, const Address& address) {
901 Strh(ARMRegister(src, 32), toMemOperand(address));
902 }
store16(Imm32 imm,const Address & address)903 void store16(Imm32 imm, const Address& address) {
904 vixl::UseScratchRegisterScope temps(this);
905 const ARMRegister scratch32 = temps.AcquireW();
906 MOZ_ASSERT(scratch32.asUnsized() != address.base);
907 move32(imm, scratch32.asUnsized());
908 Strh(scratch32, toMemOperand(address));
909 }
store16(Register src,const BaseIndex & address)910 void store16(Register src, const BaseIndex& address) {
911 doBaseIndex(ARMRegister(src, 32), address, vixl::STRH_w);
912 }
store16(Imm32 imm,const BaseIndex & address)913 void store16(Imm32 imm, const BaseIndex& address) {
914 vixl::UseScratchRegisterScope temps(this);
915 const ARMRegister scratch32 = temps.AcquireW();
916 MOZ_ASSERT(scratch32.asUnsized() != address.base);
917 MOZ_ASSERT(scratch32.asUnsized() != address.index);
918 Mov(scratch32, Operand(imm.value));
919 doBaseIndex(scratch32, address, vixl::STRH_w);
920 }
921 template <typename S, typename T>
store16Unaligned(const S & src,const T & dest)922 void store16Unaligned(const S& src, const T& dest) {
923 store16(src, dest);
924 }
925
storePtr(ImmWord imm,const Address & address)926 void storePtr(ImmWord imm, const Address& address) {
927 vixl::UseScratchRegisterScope temps(this);
928 const Register scratch = temps.AcquireX().asUnsized();
929 MOZ_ASSERT(scratch != address.base);
930 movePtr(imm, scratch);
931 storePtr(scratch, address);
932 }
storePtr(ImmPtr imm,const Address & address)933 void storePtr(ImmPtr imm, const Address& address) {
934 vixl::UseScratchRegisterScope temps(this);
935 const ARMRegister scratch64 = temps.AcquireX();
936 MOZ_ASSERT(scratch64.asUnsized() != address.base);
937 Mov(scratch64, uint64_t(imm.value));
938 Str(scratch64, toMemOperand(address));
939 }
storePtr(ImmGCPtr imm,const Address & address)940 void storePtr(ImmGCPtr imm, const Address& address) {
941 vixl::UseScratchRegisterScope temps(this);
942 const Register scratch = temps.AcquireX().asUnsized();
943 MOZ_ASSERT(scratch != address.base);
944 movePtr(imm, scratch);
945 storePtr(scratch, address);
946 }
storePtr(Register src,const Address & address)947 void storePtr(Register src, const Address& address) {
948 Str(ARMRegister(src, 64), toMemOperand(address));
949 }
950
storePtr(ImmWord imm,const BaseIndex & address)951 void storePtr(ImmWord imm, const BaseIndex& address) {
952 vixl::UseScratchRegisterScope temps(this);
953 const ARMRegister scratch64 = temps.AcquireX();
954 MOZ_ASSERT(scratch64.asUnsized() != address.base);
955 MOZ_ASSERT(scratch64.asUnsized() != address.index);
956 Mov(scratch64, Operand(imm.value));
957 doBaseIndex(scratch64, address, vixl::STR_x);
958 }
storePtr(ImmGCPtr imm,const BaseIndex & address)959 void storePtr(ImmGCPtr imm, const BaseIndex& address) {
960 vixl::UseScratchRegisterScope temps(this);
961 const Register scratch = temps.AcquireX().asUnsized();
962 MOZ_ASSERT(scratch != address.base);
963 MOZ_ASSERT(scratch != address.index);
964 movePtr(imm, scratch);
965 doBaseIndex(ARMRegister(scratch, 64), address, vixl::STR_x);
966 }
storePtr(Register src,const BaseIndex & address)967 void storePtr(Register src, const BaseIndex& address) {
968 doBaseIndex(ARMRegister(src, 64), address, vixl::STR_x);
969 }
970
storePtr(Register src,AbsoluteAddress address)971 void storePtr(Register src, AbsoluteAddress address) {
972 vixl::UseScratchRegisterScope temps(this);
973 const ARMRegister scratch64 = temps.AcquireX();
974 Mov(scratch64, uint64_t(address.addr));
975 Str(ARMRegister(src, 64), MemOperand(scratch64));
976 }
977
store32(Register src,AbsoluteAddress address)978 void store32(Register src, AbsoluteAddress address) {
979 vixl::UseScratchRegisterScope temps(this);
980 const ARMRegister scratch64 = temps.AcquireX();
981 Mov(scratch64, uint64_t(address.addr));
982 Str(ARMRegister(src, 32), MemOperand(scratch64));
983 }
store32(Imm32 imm,const Address & address)984 void store32(Imm32 imm, const Address& address) {
985 vixl::UseScratchRegisterScope temps(this);
986 const ARMRegister scratch32 = temps.AcquireW();
987 MOZ_ASSERT(scratch32.asUnsized() != address.base);
988 Mov(scratch32, uint64_t(imm.value));
989 Str(scratch32, toMemOperand(address));
990 }
store32(Register r,const Address & address)991 void store32(Register r, const Address& address) {
992 Str(ARMRegister(r, 32), toMemOperand(address));
993 }
store32(Imm32 imm,const BaseIndex & address)994 void store32(Imm32 imm, const BaseIndex& address) {
995 vixl::UseScratchRegisterScope temps(this);
996 const ARMRegister scratch32 = temps.AcquireW();
997 MOZ_ASSERT(scratch32.asUnsized() != address.base);
998 MOZ_ASSERT(scratch32.asUnsized() != address.index);
999 Mov(scratch32, imm.value);
1000 doBaseIndex(scratch32, address, vixl::STR_w);
1001 }
store32(Register r,const BaseIndex & address)1002 void store32(Register r, const BaseIndex& address) {
1003 doBaseIndex(ARMRegister(r, 32), address, vixl::STR_w);
1004 }
1005
store32_NoSecondScratch(Imm32 imm,const Address & address)1006 void store32_NoSecondScratch(Imm32 imm, const Address& address) {
1007 vixl::UseScratchRegisterScope temps(this);
1008 temps.Exclude(ARMRegister(ScratchReg2, 32)); // Disallow ScratchReg2.
1009 const ARMRegister scratch32 = temps.AcquireW();
1010
1011 MOZ_ASSERT(scratch32.asUnsized() != address.base);
1012 Mov(scratch32, uint64_t(imm.value));
1013 Str(scratch32, toMemOperand(address));
1014 }
1015
1016 template <typename S, typename T>
store32Unaligned(const S & src,const T & dest)1017 void store32Unaligned(const S& src, const T& dest) {
1018 store32(src, dest);
1019 }
1020
store64(Register64 src,Address address)1021 void store64(Register64 src, Address address) { storePtr(src.reg, address); }
1022
store64(Register64 src,const BaseIndex & address)1023 void store64(Register64 src, const BaseIndex& address) {
1024 storePtr(src.reg, address);
1025 }
1026
store64(Imm64 imm,const BaseIndex & address)1027 void store64(Imm64 imm, const BaseIndex& address) {
1028 storePtr(ImmWord(imm.value), address);
1029 }
1030
store64(Imm64 imm,const Address & address)1031 void store64(Imm64 imm, const Address& address) {
1032 storePtr(ImmWord(imm.value), address);
1033 }
1034
1035 template <typename S, typename T>
store64Unaligned(const S & src,const T & dest)1036 void store64Unaligned(const S& src, const T& dest) {
1037 store64(src, dest);
1038 }
1039
1040 // StackPointer manipulation.
1041 inline void addToStackPtr(Register src);
1042 inline void addToStackPtr(Imm32 imm);
1043 inline void addToStackPtr(const Address& src);
1044 inline void addStackPtrTo(Register dest);
1045
1046 inline void subFromStackPtr(Register src);
1047 inline void subFromStackPtr(Imm32 imm);
1048 inline void subStackPtrFrom(Register dest);
1049
1050 inline void andToStackPtr(Imm32 t);
1051
1052 inline void moveToStackPtr(Register src);
1053 inline void moveStackPtrTo(Register dest);
1054
1055 inline void loadStackPtr(const Address& src);
1056 inline void storeStackPtr(const Address& dest);
1057
1058 // StackPointer testing functions.
1059 inline void branchTestStackPtr(Condition cond, Imm32 rhs, Label* label);
1060 inline void branchStackPtr(Condition cond, Register rhs, Label* label);
1061 inline void branchStackPtrRhs(Condition cond, Address lhs, Label* label);
1062 inline void branchStackPtrRhs(Condition cond, AbsoluteAddress lhs,
1063 Label* label);
1064
testPtr(Register lhs,Register rhs)1065 void testPtr(Register lhs, Register rhs) {
1066 Tst(ARMRegister(lhs, 64), Operand(ARMRegister(rhs, 64)));
1067 }
test32(Register lhs,Register rhs)1068 void test32(Register lhs, Register rhs) {
1069 Tst(ARMRegister(lhs, 32), Operand(ARMRegister(rhs, 32)));
1070 }
test32(const Address & addr,Imm32 imm)1071 void test32(const Address& addr, Imm32 imm) {
1072 vixl::UseScratchRegisterScope temps(this);
1073 const ARMRegister scratch32 = temps.AcquireW();
1074 MOZ_ASSERT(scratch32.asUnsized() != addr.base);
1075 load32(addr, scratch32.asUnsized());
1076 Tst(scratch32, Operand(imm.value));
1077 }
test32(Register lhs,Imm32 rhs)1078 void test32(Register lhs, Imm32 rhs) {
1079 Tst(ARMRegister(lhs, 32), Operand(rhs.value));
1080 }
cmp32(Register lhs,Imm32 rhs)1081 void cmp32(Register lhs, Imm32 rhs) {
1082 Cmp(ARMRegister(lhs, 32), Operand(rhs.value));
1083 }
cmp32(Register a,Register b)1084 void cmp32(Register a, Register b) {
1085 Cmp(ARMRegister(a, 32), Operand(ARMRegister(b, 32)));
1086 }
cmp32(const Address & lhs,Imm32 rhs)1087 void cmp32(const Address& lhs, Imm32 rhs) {
1088 vixl::UseScratchRegisterScope temps(this);
1089 const ARMRegister scratch32 = temps.AcquireW();
1090 MOZ_ASSERT(scratch32.asUnsized() != lhs.base);
1091 Ldr(scratch32, toMemOperand(lhs));
1092 Cmp(scratch32, Operand(rhs.value));
1093 }
cmp32(const Address & lhs,Register rhs)1094 void cmp32(const Address& lhs, Register rhs) {
1095 vixl::UseScratchRegisterScope temps(this);
1096 const ARMRegister scratch32 = temps.AcquireW();
1097 MOZ_ASSERT(scratch32.asUnsized() != lhs.base);
1098 MOZ_ASSERT(scratch32.asUnsized() != rhs);
1099 Ldr(scratch32, toMemOperand(lhs));
1100 Cmp(scratch32, Operand(ARMRegister(rhs, 32)));
1101 }
cmp32(const vixl::Operand & lhs,Imm32 rhs)1102 void cmp32(const vixl::Operand& lhs, Imm32 rhs) {
1103 vixl::UseScratchRegisterScope temps(this);
1104 const ARMRegister scratch32 = temps.AcquireW();
1105 Mov(scratch32, lhs);
1106 Cmp(scratch32, Operand(rhs.value));
1107 }
cmp32(const vixl::Operand & lhs,Register rhs)1108 void cmp32(const vixl::Operand& lhs, Register rhs) {
1109 vixl::UseScratchRegisterScope temps(this);
1110 const ARMRegister scratch32 = temps.AcquireW();
1111 Mov(scratch32, lhs);
1112 Cmp(scratch32, Operand(ARMRegister(rhs, 32)));
1113 }
1114
cmn32(Register lhs,Imm32 rhs)1115 void cmn32(Register lhs, Imm32 rhs) {
1116 Cmn(ARMRegister(lhs, 32), Operand(rhs.value));
1117 }
1118
cmpPtr(Register lhs,Imm32 rhs)1119 void cmpPtr(Register lhs, Imm32 rhs) {
1120 Cmp(ARMRegister(lhs, 64), Operand(rhs.value));
1121 }
cmpPtr(Register lhs,ImmWord rhs)1122 void cmpPtr(Register lhs, ImmWord rhs) {
1123 Cmp(ARMRegister(lhs, 64), Operand(rhs.value));
1124 }
cmpPtr(Register lhs,ImmPtr rhs)1125 void cmpPtr(Register lhs, ImmPtr rhs) {
1126 Cmp(ARMRegister(lhs, 64), Operand(uint64_t(rhs.value)));
1127 }
cmpPtr(Register lhs,Register rhs)1128 void cmpPtr(Register lhs, Register rhs) {
1129 Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64));
1130 }
cmpPtr(Register lhs,ImmGCPtr rhs)1131 void cmpPtr(Register lhs, ImmGCPtr rhs) {
1132 vixl::UseScratchRegisterScope temps(this);
1133 const Register scratch = temps.AcquireX().asUnsized();
1134 MOZ_ASSERT(scratch != lhs);
1135 movePtr(rhs, scratch);
1136 cmpPtr(lhs, scratch);
1137 }
1138
cmpPtr(const Address & lhs,Register rhs)1139 void cmpPtr(const Address& lhs, Register rhs) {
1140 vixl::UseScratchRegisterScope temps(this);
1141 const ARMRegister scratch64 = temps.AcquireX();
1142 MOZ_ASSERT(scratch64.asUnsized() != lhs.base);
1143 MOZ_ASSERT(scratch64.asUnsized() != rhs);
1144 Ldr(scratch64, toMemOperand(lhs));
1145 Cmp(scratch64, Operand(ARMRegister(rhs, 64)));
1146 }
cmpPtr(const Address & lhs,ImmWord rhs)1147 void cmpPtr(const Address& lhs, ImmWord rhs) {
1148 vixl::UseScratchRegisterScope temps(this);
1149 const ARMRegister scratch64 = temps.AcquireX();
1150 MOZ_ASSERT(scratch64.asUnsized() != lhs.base);
1151 Ldr(scratch64, toMemOperand(lhs));
1152 Cmp(scratch64, Operand(rhs.value));
1153 }
cmpPtr(const Address & lhs,ImmPtr rhs)1154 void cmpPtr(const Address& lhs, ImmPtr rhs) {
1155 vixl::UseScratchRegisterScope temps(this);
1156 const ARMRegister scratch64 = temps.AcquireX();
1157 MOZ_ASSERT(scratch64.asUnsized() != lhs.base);
1158 Ldr(scratch64, toMemOperand(lhs));
1159 Cmp(scratch64, Operand(uint64_t(rhs.value)));
1160 }
cmpPtr(const Address & lhs,ImmGCPtr rhs)1161 void cmpPtr(const Address& lhs, ImmGCPtr rhs) {
1162 vixl::UseScratchRegisterScope temps(this);
1163 const Register scratch = temps.AcquireX().asUnsized();
1164 MOZ_ASSERT(scratch != lhs.base);
1165 loadPtr(lhs, scratch);
1166 cmpPtr(scratch, rhs);
1167 }
1168
loadDouble(const Address & src,FloatRegister dest)1169 void loadDouble(const Address& src, FloatRegister dest) {
1170 Ldr(ARMFPRegister(dest, 64), MemOperand(src));
1171 }
loadDouble(const BaseIndex & src,FloatRegister dest)1172 void loadDouble(const BaseIndex& src, FloatRegister dest) {
1173 ARMRegister base = toARMRegister(src.base, 64);
1174 ARMRegister index(src.index, 64);
1175
1176 if (src.offset == 0) {
1177 Ldr(ARMFPRegister(dest, 64),
1178 MemOperand(base, index, vixl::LSL, unsigned(src.scale)));
1179 return;
1180 }
1181
1182 vixl::UseScratchRegisterScope temps(this);
1183 const ARMRegister scratch64 = temps.AcquireX();
1184 MOZ_ASSERT(scratch64.asUnsized() != src.base);
1185 MOZ_ASSERT(scratch64.asUnsized() != src.index);
1186
1187 Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale)));
1188 Ldr(ARMFPRegister(dest, 64), MemOperand(scratch64, src.offset));
1189 }
loadFloatAsDouble(const Address & addr,FloatRegister dest)1190 void loadFloatAsDouble(const Address& addr, FloatRegister dest) {
1191 Ldr(ARMFPRegister(dest, 32), toMemOperand(addr));
1192 fcvt(ARMFPRegister(dest, 64), ARMFPRegister(dest, 32));
1193 }
loadFloatAsDouble(const BaseIndex & src,FloatRegister dest)1194 void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest) {
1195 ARMRegister base = toARMRegister(src.base, 64);
1196 ARMRegister index(src.index, 64);
1197 if (src.offset == 0) {
1198 Ldr(ARMFPRegister(dest, 32),
1199 MemOperand(base, index, vixl::LSL, unsigned(src.scale)));
1200 } else {
1201 vixl::UseScratchRegisterScope temps(this);
1202 const ARMRegister scratch64 = temps.AcquireX();
1203 MOZ_ASSERT(scratch64.asUnsized() != src.base);
1204 MOZ_ASSERT(scratch64.asUnsized() != src.index);
1205
1206 Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale)));
1207 Ldr(ARMFPRegister(dest, 32), MemOperand(scratch64, src.offset));
1208 }
1209 fcvt(ARMFPRegister(dest, 64), ARMFPRegister(dest, 32));
1210 }
1211
loadFloat32(const Address & addr,FloatRegister dest)1212 void loadFloat32(const Address& addr, FloatRegister dest) {
1213 Ldr(ARMFPRegister(dest, 32), toMemOperand(addr));
1214 }
loadFloat32(const BaseIndex & src,FloatRegister dest)1215 void loadFloat32(const BaseIndex& src, FloatRegister dest) {
1216 ARMRegister base = toARMRegister(src.base, 64);
1217 ARMRegister index(src.index, 64);
1218 if (src.offset == 0) {
1219 Ldr(ARMFPRegister(dest, 32),
1220 MemOperand(base, index, vixl::LSL, unsigned(src.scale)));
1221 } else {
1222 vixl::UseScratchRegisterScope temps(this);
1223 const ARMRegister scratch64 = temps.AcquireX();
1224 MOZ_ASSERT(scratch64.asUnsized() != src.base);
1225 MOZ_ASSERT(scratch64.asUnsized() != src.index);
1226
1227 Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale)));
1228 Ldr(ARMFPRegister(dest, 32), MemOperand(scratch64, src.offset));
1229 }
1230 }
1231
moveDouble(FloatRegister src,FloatRegister dest)1232 void moveDouble(FloatRegister src, FloatRegister dest) {
1233 fmov(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
1234 }
zeroDouble(FloatRegister reg)1235 void zeroDouble(FloatRegister reg) {
1236 fmov(ARMFPRegister(reg, 64), vixl::xzr);
1237 }
zeroFloat32(FloatRegister reg)1238 void zeroFloat32(FloatRegister reg) {
1239 fmov(ARMFPRegister(reg, 32), vixl::wzr);
1240 }
1241
moveFloat32(FloatRegister src,FloatRegister dest)1242 void moveFloat32(FloatRegister src, FloatRegister dest) {
1243 fmov(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
1244 }
moveFloatAsDouble(Register src,FloatRegister dest)1245 void moveFloatAsDouble(Register src, FloatRegister dest) {
1246 MOZ_CRASH("moveFloatAsDouble");
1247 }
1248
moveSimd128(FloatRegister src,FloatRegister dest)1249 void moveSimd128(FloatRegister src, FloatRegister dest) {
1250 fmov(ARMFPRegister(dest, 128), ARMFPRegister(src, 128));
1251 }
1252
splitSignExtTag(const ValueOperand & operand,Register dest)1253 void splitSignExtTag(const ValueOperand& operand, Register dest) {
1254 splitSignExtTag(operand.valueReg(), dest);
1255 }
splitSignExtTag(const Address & operand,Register dest)1256 void splitSignExtTag(const Address& operand, Register dest) {
1257 loadPtr(operand, dest);
1258 splitSignExtTag(dest, dest);
1259 }
splitSignExtTag(const BaseIndex & operand,Register dest)1260 void splitSignExtTag(const BaseIndex& operand, Register dest) {
1261 loadPtr(operand, dest);
1262 splitSignExtTag(dest, dest);
1263 }
1264
1265 // Extracts the tag of a value and places it in tag
1266 inline void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag);
cmpTag(const ValueOperand & operand,ImmTag tag)1267 void cmpTag(const ValueOperand& operand, ImmTag tag) { MOZ_CRASH("cmpTag"); }
1268
load32(const Address & address,Register dest)1269 void load32(const Address& address, Register dest) {
1270 Ldr(ARMRegister(dest, 32), toMemOperand(address));
1271 }
load32(const BaseIndex & src,Register dest)1272 void load32(const BaseIndex& src, Register dest) {
1273 doBaseIndex(ARMRegister(dest, 32), src, vixl::LDR_w);
1274 }
load32(AbsoluteAddress address,Register dest)1275 void load32(AbsoluteAddress address, Register dest) {
1276 vixl::UseScratchRegisterScope temps(this);
1277 const ARMRegister scratch64 = temps.AcquireX();
1278 movePtr(ImmWord((uintptr_t)address.addr), scratch64.asUnsized());
1279 ldr(ARMRegister(dest, 32), MemOperand(scratch64));
1280 }
1281 template <typename S>
load32Unaligned(const S & src,Register dest)1282 void load32Unaligned(const S& src, Register dest) {
1283 load32(src, dest);
1284 }
load64(const Address & address,Register64 dest)1285 void load64(const Address& address, Register64 dest) {
1286 loadPtr(address, dest.reg);
1287 }
load64(const BaseIndex & address,Register64 dest)1288 void load64(const BaseIndex& address, Register64 dest) {
1289 loadPtr(address, dest.reg);
1290 }
1291 template <typename S>
load64Unaligned(const S & src,Register64 dest)1292 void load64Unaligned(const S& src, Register64 dest) {
1293 load64(src, dest);
1294 }
1295
load8SignExtend(const Address & address,Register dest)1296 void load8SignExtend(const Address& address, Register dest) {
1297 Ldrsb(ARMRegister(dest, 32), toMemOperand(address));
1298 }
load8SignExtend(const BaseIndex & src,Register dest)1299 void load8SignExtend(const BaseIndex& src, Register dest) {
1300 doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRSB_w);
1301 }
1302
load8ZeroExtend(const Address & address,Register dest)1303 void load8ZeroExtend(const Address& address, Register dest) {
1304 Ldrb(ARMRegister(dest, 32), toMemOperand(address));
1305 }
load8ZeroExtend(const BaseIndex & src,Register dest)1306 void load8ZeroExtend(const BaseIndex& src, Register dest) {
1307 doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRB_w);
1308 }
1309
load16SignExtend(const Address & address,Register dest)1310 void load16SignExtend(const Address& address, Register dest) {
1311 Ldrsh(ARMRegister(dest, 32), toMemOperand(address));
1312 }
load16SignExtend(const BaseIndex & src,Register dest)1313 void load16SignExtend(const BaseIndex& src, Register dest) {
1314 doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRSH_w);
1315 }
1316 template <typename S>
load16UnalignedSignExtend(const S & src,Register dest)1317 void load16UnalignedSignExtend(const S& src, Register dest) {
1318 load16SignExtend(src, dest);
1319 }
1320
load16ZeroExtend(const Address & address,Register dest)1321 void load16ZeroExtend(const Address& address, Register dest) {
1322 Ldrh(ARMRegister(dest, 32), toMemOperand(address));
1323 }
load16ZeroExtend(const BaseIndex & src,Register dest)1324 void load16ZeroExtend(const BaseIndex& src, Register dest) {
1325 doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRH_w);
1326 }
1327 template <typename S>
load16UnalignedZeroExtend(const S & src,Register dest)1328 void load16UnalignedZeroExtend(const S& src, Register dest) {
1329 load16ZeroExtend(src, dest);
1330 }
1331
adds32(Register src,Register dest)1332 void adds32(Register src, Register dest) {
1333 Adds(ARMRegister(dest, 32), ARMRegister(dest, 32),
1334 Operand(ARMRegister(src, 32)));
1335 }
adds32(Imm32 imm,Register dest)1336 void adds32(Imm32 imm, Register dest) {
1337 Adds(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
1338 }
adds32(Imm32 imm,const Address & dest)1339 void adds32(Imm32 imm, const Address& dest) {
1340 vixl::UseScratchRegisterScope temps(this);
1341 const ARMRegister scratch32 = temps.AcquireW();
1342 MOZ_ASSERT(scratch32.asUnsized() != dest.base);
1343
1344 Ldr(scratch32, toMemOperand(dest));
1345 Adds(scratch32, scratch32, Operand(imm.value));
1346 Str(scratch32, toMemOperand(dest));
1347 }
adds64(Imm32 imm,Register dest)1348 void adds64(Imm32 imm, Register dest) {
1349 Adds(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
1350 }
adds64(Register src,Register dest)1351 void adds64(Register src, Register dest) {
1352 Adds(ARMRegister(dest, 64), ARMRegister(dest, 64),
1353 Operand(ARMRegister(src, 64)));
1354 }
1355
subs32(Imm32 imm,Register dest)1356 void subs32(Imm32 imm, Register dest) {
1357 Subs(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
1358 }
subs32(Register src,Register dest)1359 void subs32(Register src, Register dest) {
1360 Subs(ARMRegister(dest, 32), ARMRegister(dest, 32),
1361 Operand(ARMRegister(src, 32)));
1362 }
subs64(Imm32 imm,Register dest)1363 void subs64(Imm32 imm, Register dest) {
1364 Subs(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
1365 }
subs64(Register src,Register dest)1366 void subs64(Register src, Register dest) {
1367 Subs(ARMRegister(dest, 64), ARMRegister(dest, 64),
1368 Operand(ARMRegister(src, 64)));
1369 }
1370
ret()1371 void ret() {
1372 pop(lr);
1373 abiret();
1374 }
1375
retn(Imm32 n)1376 void retn(Imm32 n) {
1377 vixl::UseScratchRegisterScope temps(this);
1378 MOZ_ASSERT(temps.IsAvailable(ScratchReg64)); // ip0
1379 temps.Exclude(ScratchReg64);
1380 // ip0 <- [sp]; sp += n; ret ip0
1381 Ldr(ScratchReg64,
1382 MemOperand(GetStackPointer64(), ptrdiff_t(n.value), vixl::PostIndex));
1383 syncStackPtr(); // SP is always used to transmit the stack between calls.
1384 Ret(ScratchReg64);
1385 }
1386
j(Condition cond,Label * dest)1387 void j(Condition cond, Label* dest) { B(dest, cond); }
1388
branch(Condition cond,Label * label)1389 void branch(Condition cond, Label* label) { B(label, cond); }
branch(JitCode * target)1390 void branch(JitCode* target) {
1391 // It is unclear why this sync is necessary:
1392 // * PSP and SP have been observed to be different in testcase
1393 // tests/async/debugger-reject-after-fulfill.js
1394 // * Removing the sync causes no failures in all of jit-tests.
1395 //
1396 // Also see jump() above. This is used only to implement jump(JitCode*)
1397 // and only for JS, it appears.
1398 syncStackPtr();
1399 BufferOffset loc =
1400 b(-1,
1401 LabelDoc()); // The jump target will be patched by executableCopy().
1402 addPendingJump(loc, ImmPtr(target->raw()), RelocationKind::JITCODE);
1403 }
1404
compareDouble(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs)1405 void compareDouble(DoubleCondition cond, FloatRegister lhs,
1406 FloatRegister rhs) {
1407 Fcmp(ARMFPRegister(lhs, 64), ARMFPRegister(rhs, 64));
1408 }
1409
compareFloat(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs)1410 void compareFloat(DoubleCondition cond, FloatRegister lhs,
1411 FloatRegister rhs) {
1412 Fcmp(ARMFPRegister(lhs, 32), ARMFPRegister(rhs, 32));
1413 }
1414
1415 void compareSimd128Int(Assembler::Condition cond, ARMFPRegister dest,
1416 ARMFPRegister lhs, ARMFPRegister rhs);
1417 void compareSimd128Float(Assembler::Condition cond, ARMFPRegister dest,
1418 ARMFPRegister lhs, ARMFPRegister rhs);
1419 void rightShiftInt8x16(FloatRegister lhs, Register rhs, FloatRegister dest,
1420 bool isUnsigned);
1421 void rightShiftInt16x8(FloatRegister lhs, Register rhs, FloatRegister dest,
1422 bool isUnsigned);
1423 void rightShiftInt32x4(FloatRegister lhs, Register rhs, FloatRegister dest,
1424 bool isUnsigned);
1425 void rightShiftInt64x2(FloatRegister lhs, Register rhs, FloatRegister dest,
1426 bool isUnsigned);
1427
branchNegativeZero(FloatRegister reg,Register scratch,Label * label)1428 void branchNegativeZero(FloatRegister reg, Register scratch, Label* label) {
1429 MOZ_CRASH("branchNegativeZero");
1430 }
branchNegativeZeroFloat32(FloatRegister reg,Register scratch,Label * label)1431 void branchNegativeZeroFloat32(FloatRegister reg, Register scratch,
1432 Label* label) {
1433 MOZ_CRASH("branchNegativeZeroFloat32");
1434 }
1435
boxDouble(FloatRegister src,const ValueOperand & dest,FloatRegister)1436 void boxDouble(FloatRegister src, const ValueOperand& dest, FloatRegister) {
1437 Fmov(ARMRegister(dest.valueReg(), 64), ARMFPRegister(src, 64));
1438 }
boxNonDouble(JSValueType type,Register src,const ValueOperand & dest)1439 void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) {
1440 boxValue(type, src, dest.valueReg());
1441 }
1442
1443 // Note that the |dest| register here may be ScratchReg, so we shouldn't use
1444 // it.
unboxInt32(const ValueOperand & src,Register dest)1445 void unboxInt32(const ValueOperand& src, Register dest) {
1446 move32(src.valueReg(), dest);
1447 }
unboxInt32(const Address & src,Register dest)1448 void unboxInt32(const Address& src, Register dest) { load32(src, dest); }
unboxInt32(const BaseIndex & src,Register dest)1449 void unboxInt32(const BaseIndex& src, Register dest) { load32(src, dest); }
1450
1451 template <typename T>
unboxDouble(const T & src,FloatRegister dest)1452 void unboxDouble(const T& src, FloatRegister dest) {
1453 loadDouble(src, dest);
1454 }
unboxDouble(const ValueOperand & src,FloatRegister dest)1455 void unboxDouble(const ValueOperand& src, FloatRegister dest) {
1456 Fmov(ARMFPRegister(dest, 64), ARMRegister(src.valueReg(), 64));
1457 }
1458
unboxArgObjMagic(const ValueOperand & src,Register dest)1459 void unboxArgObjMagic(const ValueOperand& src, Register dest) {
1460 MOZ_CRASH("unboxArgObjMagic");
1461 }
unboxArgObjMagic(const Address & src,Register dest)1462 void unboxArgObjMagic(const Address& src, Register dest) {
1463 MOZ_CRASH("unboxArgObjMagic");
1464 }
1465
unboxBoolean(const ValueOperand & src,Register dest)1466 void unboxBoolean(const ValueOperand& src, Register dest) {
1467 move32(src.valueReg(), dest);
1468 }
unboxBoolean(const Address & src,Register dest)1469 void unboxBoolean(const Address& src, Register dest) { load32(src, dest); }
unboxBoolean(const BaseIndex & src,Register dest)1470 void unboxBoolean(const BaseIndex& src, Register dest) { load32(src, dest); }
1471
unboxMagic(const ValueOperand & src,Register dest)1472 void unboxMagic(const ValueOperand& src, Register dest) {
1473 move32(src.valueReg(), dest);
1474 }
unboxNonDouble(const ValueOperand & src,Register dest,JSValueType type)1475 void unboxNonDouble(const ValueOperand& src, Register dest,
1476 JSValueType type) {
1477 unboxNonDouble(src.valueReg(), dest, type);
1478 }
1479
1480 template <typename T>
unboxNonDouble(T src,Register dest,JSValueType type)1481 void unboxNonDouble(T src, Register dest, JSValueType type) {
1482 MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
1483 if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
1484 load32(src, dest);
1485 return;
1486 }
1487 loadPtr(src, dest);
1488 unboxNonDouble(dest, dest, type);
1489 }
1490
unboxNonDouble(Register src,Register dest,JSValueType type)1491 void unboxNonDouble(Register src, Register dest, JSValueType type) {
1492 MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
1493 if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
1494 move32(src, dest);
1495 return;
1496 }
1497 Eor(ARMRegister(dest, 64), ARMRegister(src, 64),
1498 Operand(JSVAL_TYPE_TO_SHIFTED_TAG(type)));
1499 }
1500
notBoolean(const ValueOperand & val)1501 void notBoolean(const ValueOperand& val) {
1502 ARMRegister r(val.valueReg(), 64);
1503 eor(r, r, Operand(1));
1504 }
unboxObject(const ValueOperand & src,Register dest)1505 void unboxObject(const ValueOperand& src, Register dest) {
1506 unboxNonDouble(src.valueReg(), dest, JSVAL_TYPE_OBJECT);
1507 }
unboxObject(Register src,Register dest)1508 void unboxObject(Register src, Register dest) {
1509 unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
1510 }
unboxObject(const Address & src,Register dest)1511 void unboxObject(const Address& src, Register dest) {
1512 loadPtr(src, dest);
1513 unboxNonDouble(dest, dest, JSVAL_TYPE_OBJECT);
1514 }
unboxObject(const BaseIndex & src,Register dest)1515 void unboxObject(const BaseIndex& src, Register dest) {
1516 doBaseIndex(ARMRegister(dest, 64), src, vixl::LDR_x);
1517 unboxNonDouble(dest, dest, JSVAL_TYPE_OBJECT);
1518 }
1519
1520 template <typename T>
unboxObjectOrNull(const T & src,Register dest)1521 void unboxObjectOrNull(const T& src, Register dest) {
1522 unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
1523 And(ARMRegister(dest, 64), ARMRegister(dest, 64),
1524 Operand(~JS::detail::ValueObjectOrNullBit));
1525 }
1526
1527 // See comment in MacroAssembler-x64.h.
unboxGCThingForGCBarrier(const Address & src,Register dest)1528 void unboxGCThingForGCBarrier(const Address& src, Register dest) {
1529 loadPtr(src, dest);
1530 And(ARMRegister(dest, 64), ARMRegister(dest, 64),
1531 Operand(JS::detail::ValueGCThingPayloadMask));
1532 }
unboxGCThingForGCBarrier(const ValueOperand & src,Register dest)1533 void unboxGCThingForGCBarrier(const ValueOperand& src, Register dest) {
1534 And(ARMRegister(dest, 64), ARMRegister(src.valueReg(), 64),
1535 Operand(JS::detail::ValueGCThingPayloadMask));
1536 }
1537
1538 inline void unboxValue(const ValueOperand& src, AnyRegister dest,
1539 JSValueType type);
1540
unboxString(const ValueOperand & operand,Register dest)1541 void unboxString(const ValueOperand& operand, Register dest) {
1542 unboxNonDouble(operand, dest, JSVAL_TYPE_STRING);
1543 }
unboxString(const Address & src,Register dest)1544 void unboxString(const Address& src, Register dest) {
1545 unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
1546 }
unboxSymbol(const ValueOperand & operand,Register dest)1547 void unboxSymbol(const ValueOperand& operand, Register dest) {
1548 unboxNonDouble(operand, dest, JSVAL_TYPE_SYMBOL);
1549 }
unboxSymbol(const Address & src,Register dest)1550 void unboxSymbol(const Address& src, Register dest) {
1551 unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
1552 }
unboxBigInt(const ValueOperand & operand,Register dest)1553 void unboxBigInt(const ValueOperand& operand, Register dest) {
1554 unboxNonDouble(operand, dest, JSVAL_TYPE_BIGINT);
1555 }
unboxBigInt(const Address & src,Register dest)1556 void unboxBigInt(const Address& src, Register dest) {
1557 unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
1558 }
1559 // These two functions use the low 32-bits of the full value register.
boolValueToDouble(const ValueOperand & operand,FloatRegister dest)1560 void boolValueToDouble(const ValueOperand& operand, FloatRegister dest) {
1561 convertInt32ToDouble(operand.valueReg(), dest);
1562 }
int32ValueToDouble(const ValueOperand & operand,FloatRegister dest)1563 void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest) {
1564 convertInt32ToDouble(operand.valueReg(), dest);
1565 }
1566
boolValueToFloat32(const ValueOperand & operand,FloatRegister dest)1567 void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
1568 convertInt32ToFloat32(operand.valueReg(), dest);
1569 }
int32ValueToFloat32(const ValueOperand & operand,FloatRegister dest)1570 void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
1571 convertInt32ToFloat32(operand.valueReg(), dest);
1572 }
1573
loadConstantDouble(double d,FloatRegister dest)1574 void loadConstantDouble(double d, FloatRegister dest) {
1575 Fmov(ARMFPRegister(dest, 64), d);
1576 }
loadConstantFloat32(float f,FloatRegister dest)1577 void loadConstantFloat32(float f, FloatRegister dest) {
1578 Fmov(ARMFPRegister(dest, 32), f);
1579 }
1580
cmpTag(Register tag,ImmTag ref)1581 void cmpTag(Register tag, ImmTag ref) {
1582 // As opposed to other architecture, splitTag is replaced by splitSignExtTag
1583 // which extract the tag with a sign extension. The reason being that cmp32
1584 // with a tag value would be too large to fit as a 12 bits immediate value,
1585 // and would require the VIXL macro assembler to add an extra instruction
1586 // and require extra scratch register to load the Tag value.
1587 //
1588 // Instead, we compare with the negative value of the sign extended tag with
1589 // the CMN instruction. The sign extended tag is expected to be a negative
1590 // value. Therefore the negative of the sign extended tag is expected to be
1591 // near 0 and fit on 12 bits.
1592 //
1593 // Ignoring the sign extension, the logic is the following:
1594 //
1595 // CMP32(Reg, Tag) = Reg - Tag
1596 // = Reg + (-Tag)
1597 // = CMN32(Reg, -Tag)
1598 //
1599 // Note: testGCThing, testPrimitive and testNumber which are checking for
1600 // inequalities should use unsigned comparisons (as done by default) in
1601 // order to keep the same relation order after the sign extension, i.e.
1602 // using Above or Below which are based on the carry flag.
1603 uint32_t hiShift = JSVAL_TAG_SHIFT - 32;
1604 int32_t seTag = int32_t(ref.value);
1605 seTag = (seTag << hiShift) >> hiShift;
1606 MOZ_ASSERT(seTag < 0);
1607 int32_t negTag = -seTag;
1608 // Check thest negTag is encoded on a 12 bits immediate value.
1609 MOZ_ASSERT((negTag & ~0xFFF) == 0);
1610 cmn32(tag, Imm32(negTag));
1611 }
1612
1613 // Register-based tests.
testUndefined(Condition cond,Register tag)1614 Condition testUndefined(Condition cond, Register tag) {
1615 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1616 cmpTag(tag, ImmTag(JSVAL_TAG_UNDEFINED));
1617 return cond;
1618 }
testInt32(Condition cond,Register tag)1619 Condition testInt32(Condition cond, Register tag) {
1620 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1621 cmpTag(tag, ImmTag(JSVAL_TAG_INT32));
1622 return cond;
1623 }
testBoolean(Condition cond,Register tag)1624 Condition testBoolean(Condition cond, Register tag) {
1625 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1626 cmpTag(tag, ImmTag(JSVAL_TAG_BOOLEAN));
1627 return cond;
1628 }
testNull(Condition cond,Register tag)1629 Condition testNull(Condition cond, Register tag) {
1630 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1631 cmpTag(tag, ImmTag(JSVAL_TAG_NULL));
1632 return cond;
1633 }
testString(Condition cond,Register tag)1634 Condition testString(Condition cond, Register tag) {
1635 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1636 cmpTag(tag, ImmTag(JSVAL_TAG_STRING));
1637 return cond;
1638 }
testSymbol(Condition cond,Register tag)1639 Condition testSymbol(Condition cond, Register tag) {
1640 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1641 cmpTag(tag, ImmTag(JSVAL_TAG_SYMBOL));
1642 return cond;
1643 }
testBigInt(Condition cond,Register tag)1644 Condition testBigInt(Condition cond, Register tag) {
1645 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1646 cmpTag(tag, ImmTag(JSVAL_TAG_BIGINT));
1647 return cond;
1648 }
testObject(Condition cond,Register tag)1649 Condition testObject(Condition cond, Register tag) {
1650 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1651 cmpTag(tag, ImmTag(JSVAL_TAG_OBJECT));
1652 return cond;
1653 }
testDouble(Condition cond,Register tag)1654 Condition testDouble(Condition cond, Register tag) {
1655 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1656 cmpTag(tag, ImmTag(JSVAL_TAG_MAX_DOUBLE));
1657 // Requires unsigned comparison due to cmpTag internals.
1658 return (cond == Equal) ? BelowOrEqual : Above;
1659 }
testNumber(Condition cond,Register tag)1660 Condition testNumber(Condition cond, Register tag) {
1661 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1662 cmpTag(tag, ImmTag(JS::detail::ValueUpperInclNumberTag));
1663 // Requires unsigned comparison due to cmpTag internals.
1664 return (cond == Equal) ? BelowOrEqual : Above;
1665 }
testGCThing(Condition cond,Register tag)1666 Condition testGCThing(Condition cond, Register tag) {
1667 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1668 cmpTag(tag, ImmTag(JS::detail::ValueLowerInclGCThingTag));
1669 // Requires unsigned comparison due to cmpTag internals.
1670 return (cond == Equal) ? AboveOrEqual : Below;
1671 }
testMagic(Condition cond,Register tag)1672 Condition testMagic(Condition cond, Register tag) {
1673 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1674 cmpTag(tag, ImmTag(JSVAL_TAG_MAGIC));
1675 return cond;
1676 }
testPrimitive(Condition cond,Register tag)1677 Condition testPrimitive(Condition cond, Register tag) {
1678 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1679 cmpTag(tag, ImmTag(JS::detail::ValueUpperExclPrimitiveTag));
1680 // Requires unsigned comparison due to cmpTag internals.
1681 return (cond == Equal) ? Below : AboveOrEqual;
1682 }
testError(Condition cond,Register tag)1683 Condition testError(Condition cond, Register tag) {
1684 return testMagic(cond, tag);
1685 }
1686
1687 // ValueOperand-based tests.
testInt32(Condition cond,const ValueOperand & value)1688 Condition testInt32(Condition cond, const ValueOperand& value) {
1689 // The incoming ValueOperand may use scratch registers.
1690 vixl::UseScratchRegisterScope temps(this);
1691 const Register scratch = temps.AcquireX().asUnsized();
1692 MOZ_ASSERT(scratch != value.valueReg());
1693
1694 splitSignExtTag(value, scratch);
1695 return testInt32(cond, scratch);
1696 }
testBoolean(Condition cond,const ValueOperand & value)1697 Condition testBoolean(Condition cond, const ValueOperand& value) {
1698 vixl::UseScratchRegisterScope temps(this);
1699 const Register scratch = temps.AcquireX().asUnsized();
1700 MOZ_ASSERT(value.valueReg() != scratch);
1701 splitSignExtTag(value, scratch);
1702 return testBoolean(cond, scratch);
1703 }
testDouble(Condition cond,const ValueOperand & value)1704 Condition testDouble(Condition cond, const ValueOperand& value) {
1705 vixl::UseScratchRegisterScope temps(this);
1706 const Register scratch = temps.AcquireX().asUnsized();
1707 MOZ_ASSERT(value.valueReg() != scratch);
1708 splitSignExtTag(value, scratch);
1709 return testDouble(cond, scratch);
1710 }
testNull(Condition cond,const ValueOperand & value)1711 Condition testNull(Condition cond, const ValueOperand& value) {
1712 vixl::UseScratchRegisterScope temps(this);
1713 const Register scratch = temps.AcquireX().asUnsized();
1714 MOZ_ASSERT(value.valueReg() != scratch);
1715 splitSignExtTag(value, scratch);
1716 return testNull(cond, scratch);
1717 }
testUndefined(Condition cond,const ValueOperand & value)1718 Condition testUndefined(Condition cond, const ValueOperand& value) {
1719 vixl::UseScratchRegisterScope temps(this);
1720 const Register scratch = temps.AcquireX().asUnsized();
1721 MOZ_ASSERT(value.valueReg() != scratch);
1722 splitSignExtTag(value, scratch);
1723 return testUndefined(cond, scratch);
1724 }
testString(Condition cond,const ValueOperand & value)1725 Condition testString(Condition cond, const ValueOperand& value) {
1726 vixl::UseScratchRegisterScope temps(this);
1727 const Register scratch = temps.AcquireX().asUnsized();
1728 MOZ_ASSERT(value.valueReg() != scratch);
1729 splitSignExtTag(value, scratch);
1730 return testString(cond, scratch);
1731 }
testSymbol(Condition cond,const ValueOperand & value)1732 Condition testSymbol(Condition cond, const ValueOperand& value) {
1733 vixl::UseScratchRegisterScope temps(this);
1734 const Register scratch = temps.AcquireX().asUnsized();
1735 MOZ_ASSERT(value.valueReg() != scratch);
1736 splitSignExtTag(value, scratch);
1737 return testSymbol(cond, scratch);
1738 }
testBigInt(Condition cond,const ValueOperand & value)1739 Condition testBigInt(Condition cond, const ValueOperand& value) {
1740 vixl::UseScratchRegisterScope temps(this);
1741 const Register scratch = temps.AcquireX().asUnsized();
1742 MOZ_ASSERT(value.valueReg() != scratch);
1743 splitSignExtTag(value, scratch);
1744 return testBigInt(cond, scratch);
1745 }
testObject(Condition cond,const ValueOperand & value)1746 Condition testObject(Condition cond, const ValueOperand& value) {
1747 vixl::UseScratchRegisterScope temps(this);
1748 const Register scratch = temps.AcquireX().asUnsized();
1749 MOZ_ASSERT(value.valueReg() != scratch);
1750 splitSignExtTag(value, scratch);
1751 return testObject(cond, scratch);
1752 }
testNumber(Condition cond,const ValueOperand & value)1753 Condition testNumber(Condition cond, const ValueOperand& value) {
1754 vixl::UseScratchRegisterScope temps(this);
1755 const Register scratch = temps.AcquireX().asUnsized();
1756 MOZ_ASSERT(value.valueReg() != scratch);
1757 splitSignExtTag(value, scratch);
1758 return testNumber(cond, scratch);
1759 }
testPrimitive(Condition cond,const ValueOperand & value)1760 Condition testPrimitive(Condition cond, const ValueOperand& value) {
1761 vixl::UseScratchRegisterScope temps(this);
1762 const Register scratch = temps.AcquireX().asUnsized();
1763 MOZ_ASSERT(value.valueReg() != scratch);
1764 splitSignExtTag(value, scratch);
1765 return testPrimitive(cond, scratch);
1766 }
testMagic(Condition cond,const ValueOperand & value)1767 Condition testMagic(Condition cond, const ValueOperand& value) {
1768 vixl::UseScratchRegisterScope temps(this);
1769 const Register scratch = temps.AcquireX().asUnsized();
1770 MOZ_ASSERT(value.valueReg() != scratch);
1771 splitSignExtTag(value, scratch);
1772 return testMagic(cond, scratch);
1773 }
testGCThing(Condition cond,const ValueOperand & value)1774 Condition testGCThing(Condition cond, const ValueOperand& value) {
1775 vixl::UseScratchRegisterScope temps(this);
1776 const Register scratch = temps.AcquireX().asUnsized();
1777 MOZ_ASSERT(value.valueReg() != scratch);
1778 splitSignExtTag(value, scratch);
1779 return testGCThing(cond, scratch);
1780 }
testError(Condition cond,const ValueOperand & value)1781 Condition testError(Condition cond, const ValueOperand& value) {
1782 return testMagic(cond, value);
1783 }
1784
1785 // Address-based tests.
testGCThing(Condition cond,const Address & address)1786 Condition testGCThing(Condition cond, const Address& address) {
1787 vixl::UseScratchRegisterScope temps(this);
1788 const Register scratch = temps.AcquireX().asUnsized();
1789 MOZ_ASSERT(address.base != scratch);
1790 splitSignExtTag(address, scratch);
1791 return testGCThing(cond, scratch);
1792 }
testMagic(Condition cond,const Address & address)1793 Condition testMagic(Condition cond, const Address& address) {
1794 vixl::UseScratchRegisterScope temps(this);
1795 const Register scratch = temps.AcquireX().asUnsized();
1796 MOZ_ASSERT(address.base != scratch);
1797 splitSignExtTag(address, scratch);
1798 return testMagic(cond, scratch);
1799 }
testInt32(Condition cond,const Address & address)1800 Condition testInt32(Condition cond, const Address& address) {
1801 vixl::UseScratchRegisterScope temps(this);
1802 const Register scratch = temps.AcquireX().asUnsized();
1803 MOZ_ASSERT(address.base != scratch);
1804 splitSignExtTag(address, scratch);
1805 return testInt32(cond, scratch);
1806 }
testDouble(Condition cond,const Address & address)1807 Condition testDouble(Condition cond, const Address& address) {
1808 vixl::UseScratchRegisterScope temps(this);
1809 const Register scratch = temps.AcquireX().asUnsized();
1810 MOZ_ASSERT(address.base != scratch);
1811 splitSignExtTag(address, scratch);
1812 return testDouble(cond, scratch);
1813 }
testBoolean(Condition cond,const Address & address)1814 Condition testBoolean(Condition cond, const Address& address) {
1815 vixl::UseScratchRegisterScope temps(this);
1816 const Register scratch = temps.AcquireX().asUnsized();
1817 MOZ_ASSERT(address.base != scratch);
1818 splitSignExtTag(address, scratch);
1819 return testBoolean(cond, scratch);
1820 }
testNull(Condition cond,const Address & address)1821 Condition testNull(Condition cond, const Address& address) {
1822 vixl::UseScratchRegisterScope temps(this);
1823 const Register scratch = temps.AcquireX().asUnsized();
1824 MOZ_ASSERT(address.base != scratch);
1825 splitSignExtTag(address, scratch);
1826 return testNull(cond, scratch);
1827 }
testUndefined(Condition cond,const Address & address)1828 Condition testUndefined(Condition cond, const Address& address) {
1829 vixl::UseScratchRegisterScope temps(this);
1830 const Register scratch = temps.AcquireX().asUnsized();
1831 MOZ_ASSERT(address.base != scratch);
1832 splitSignExtTag(address, scratch);
1833 return testUndefined(cond, scratch);
1834 }
testString(Condition cond,const Address & address)1835 Condition testString(Condition cond, const Address& address) {
1836 vixl::UseScratchRegisterScope temps(this);
1837 const Register scratch = temps.AcquireX().asUnsized();
1838 MOZ_ASSERT(address.base != scratch);
1839 splitSignExtTag(address, scratch);
1840 return testString(cond, scratch);
1841 }
testSymbol(Condition cond,const Address & address)1842 Condition testSymbol(Condition cond, const Address& address) {
1843 vixl::UseScratchRegisterScope temps(this);
1844 const Register scratch = temps.AcquireX().asUnsized();
1845 MOZ_ASSERT(address.base != scratch);
1846 splitSignExtTag(address, scratch);
1847 return testSymbol(cond, scratch);
1848 }
testBigInt(Condition cond,const Address & address)1849 Condition testBigInt(Condition cond, const Address& address) {
1850 vixl::UseScratchRegisterScope temps(this);
1851 const Register scratch = temps.AcquireX().asUnsized();
1852 MOZ_ASSERT(address.base != scratch);
1853 splitSignExtTag(address, scratch);
1854 return testBigInt(cond, scratch);
1855 }
testObject(Condition cond,const Address & address)1856 Condition testObject(Condition cond, const Address& address) {
1857 vixl::UseScratchRegisterScope temps(this);
1858 const Register scratch = temps.AcquireX().asUnsized();
1859 MOZ_ASSERT(address.base != scratch);
1860 splitSignExtTag(address, scratch);
1861 return testObject(cond, scratch);
1862 }
testNumber(Condition cond,const Address & address)1863 Condition testNumber(Condition cond, const Address& address) {
1864 vixl::UseScratchRegisterScope temps(this);
1865 const Register scratch = temps.AcquireX().asUnsized();
1866 MOZ_ASSERT(address.base != scratch);
1867 splitSignExtTag(address, scratch);
1868 return testNumber(cond, scratch);
1869 }
1870
1871 // BaseIndex-based tests.
testUndefined(Condition cond,const BaseIndex & src)1872 Condition testUndefined(Condition cond, const BaseIndex& src) {
1873 vixl::UseScratchRegisterScope temps(this);
1874 const Register scratch = temps.AcquireX().asUnsized();
1875 MOZ_ASSERT(src.base != scratch);
1876 MOZ_ASSERT(src.index != scratch);
1877 splitSignExtTag(src, scratch);
1878 return testUndefined(cond, scratch);
1879 }
testNull(Condition cond,const BaseIndex & src)1880 Condition testNull(Condition cond, const BaseIndex& src) {
1881 vixl::UseScratchRegisterScope temps(this);
1882 const Register scratch = temps.AcquireX().asUnsized();
1883 MOZ_ASSERT(src.base != scratch);
1884 MOZ_ASSERT(src.index != scratch);
1885 splitSignExtTag(src, scratch);
1886 return testNull(cond, scratch);
1887 }
testBoolean(Condition cond,const BaseIndex & src)1888 Condition testBoolean(Condition cond, const BaseIndex& src) {
1889 vixl::UseScratchRegisterScope temps(this);
1890 const Register scratch = temps.AcquireX().asUnsized();
1891 MOZ_ASSERT(src.base != scratch);
1892 MOZ_ASSERT(src.index != scratch);
1893 splitSignExtTag(src, scratch);
1894 return testBoolean(cond, scratch);
1895 }
testString(Condition cond,const BaseIndex & src)1896 Condition testString(Condition cond, const BaseIndex& src) {
1897 vixl::UseScratchRegisterScope temps(this);
1898 const Register scratch = temps.AcquireX().asUnsized();
1899 MOZ_ASSERT(src.base != scratch);
1900 MOZ_ASSERT(src.index != scratch);
1901 splitSignExtTag(src, scratch);
1902 return testString(cond, scratch);
1903 }
testSymbol(Condition cond,const BaseIndex & src)1904 Condition testSymbol(Condition cond, const BaseIndex& src) {
1905 vixl::UseScratchRegisterScope temps(this);
1906 const Register scratch = temps.AcquireX().asUnsized();
1907 MOZ_ASSERT(src.base != scratch);
1908 MOZ_ASSERT(src.index != scratch);
1909 splitSignExtTag(src, scratch);
1910 return testSymbol(cond, scratch);
1911 }
testBigInt(Condition cond,const BaseIndex & src)1912 Condition testBigInt(Condition cond, const BaseIndex& src) {
1913 vixl::UseScratchRegisterScope temps(this);
1914 const Register scratch = temps.AcquireX().asUnsized();
1915 MOZ_ASSERT(src.base != scratch);
1916 MOZ_ASSERT(src.index != scratch);
1917 splitSignExtTag(src, scratch);
1918 return testBigInt(cond, scratch);
1919 }
testBigIntTruthy(bool truthy,const ValueOperand & value)1920 Condition testBigIntTruthy(bool truthy, const ValueOperand& value) {
1921 vixl::UseScratchRegisterScope temps(this);
1922 const Register scratch = temps.AcquireX().asUnsized();
1923
1924 MOZ_ASSERT(value.valueReg() != scratch);
1925
1926 unboxBigInt(value, scratch);
1927 load32(Address(scratch, BigInt::offsetOfDigitLength()), scratch);
1928 cmp32(scratch, Imm32(0));
1929 return truthy ? Condition::NonZero : Condition::Zero;
1930 }
testInt32(Condition cond,const BaseIndex & src)1931 Condition testInt32(Condition cond, const BaseIndex& src) {
1932 vixl::UseScratchRegisterScope temps(this);
1933 const Register scratch = temps.AcquireX().asUnsized();
1934 MOZ_ASSERT(src.base != scratch);
1935 MOZ_ASSERT(src.index != scratch);
1936 splitSignExtTag(src, scratch);
1937 return testInt32(cond, scratch);
1938 }
testObject(Condition cond,const BaseIndex & src)1939 Condition testObject(Condition cond, const BaseIndex& src) {
1940 vixl::UseScratchRegisterScope temps(this);
1941 const Register scratch = temps.AcquireX().asUnsized();
1942 MOZ_ASSERT(src.base != scratch);
1943 MOZ_ASSERT(src.index != scratch);
1944 splitSignExtTag(src, scratch);
1945 return testObject(cond, scratch);
1946 }
testDouble(Condition cond,const BaseIndex & src)1947 Condition testDouble(Condition cond, const BaseIndex& src) {
1948 vixl::UseScratchRegisterScope temps(this);
1949 const Register scratch = temps.AcquireX().asUnsized();
1950 MOZ_ASSERT(src.base != scratch);
1951 MOZ_ASSERT(src.index != scratch);
1952 splitSignExtTag(src, scratch);
1953 return testDouble(cond, scratch);
1954 }
testMagic(Condition cond,const BaseIndex & src)1955 Condition testMagic(Condition cond, const BaseIndex& src) {
1956 vixl::UseScratchRegisterScope temps(this);
1957 const Register scratch = temps.AcquireX().asUnsized();
1958 MOZ_ASSERT(src.base != scratch);
1959 MOZ_ASSERT(src.index != scratch);
1960 splitSignExtTag(src, scratch);
1961 return testMagic(cond, scratch);
1962 }
testGCThing(Condition cond,const BaseIndex & src)1963 Condition testGCThing(Condition cond, const BaseIndex& src) {
1964 vixl::UseScratchRegisterScope temps(this);
1965 const Register scratch = temps.AcquireX().asUnsized();
1966 MOZ_ASSERT(src.base != scratch);
1967 MOZ_ASSERT(src.index != scratch);
1968 splitSignExtTag(src, scratch);
1969 return testGCThing(cond, scratch);
1970 }
1971
testInt32Truthy(bool truthy,const ValueOperand & operand)1972 Condition testInt32Truthy(bool truthy, const ValueOperand& operand) {
1973 ARMRegister payload32(operand.valueReg(), 32);
1974 Tst(payload32, payload32);
1975 return truthy ? NonZero : Zero;
1976 }
1977
testBooleanTruthy(bool truthy,const ValueOperand & operand)1978 Condition testBooleanTruthy(bool truthy, const ValueOperand& operand) {
1979 ARMRegister payload32(operand.valueReg(), 32);
1980 Tst(payload32, payload32);
1981 return truthy ? NonZero : Zero;
1982 }
testStringTruthy(bool truthy,const ValueOperand & value)1983 Condition testStringTruthy(bool truthy, const ValueOperand& value) {
1984 vixl::UseScratchRegisterScope temps(this);
1985 const Register scratch = temps.AcquireX().asUnsized();
1986 const ARMRegister scratch32(scratch, 32);
1987 const ARMRegister scratch64(scratch, 64);
1988
1989 MOZ_ASSERT(value.valueReg() != scratch);
1990
1991 unboxString(value, scratch);
1992 Ldr(scratch32, MemOperand(scratch64, JSString::offsetOfLength()));
1993 Cmp(scratch32, Operand(0));
1994 return truthy ? Condition::NonZero : Condition::Zero;
1995 }
int32OrDouble(Register src,ARMFPRegister dest)1996 void int32OrDouble(Register src, ARMFPRegister dest) {
1997 Label isInt32;
1998 Label join;
1999 testInt32(Equal, ValueOperand(src));
2000 B(&isInt32, Equal);
2001 // is double, move the bits as is
2002 Fmov(dest, ARMRegister(src, 64));
2003 B(&join);
2004 bind(&isInt32);
2005 // is int32, do a conversion while moving
2006 Scvtf(dest, ARMRegister(src, 32));
2007 bind(&join);
2008 }
loadUnboxedValue(Address address,MIRType type,AnyRegister dest)2009 void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
2010 if (dest.isFloat()) {
2011 vixl::UseScratchRegisterScope temps(this);
2012 const ARMRegister scratch64 = temps.AcquireX();
2013 MOZ_ASSERT(scratch64.asUnsized() != address.base);
2014 Ldr(scratch64, toMemOperand(address));
2015 int32OrDouble(scratch64.asUnsized(), ARMFPRegister(dest.fpu(), 64));
2016 } else {
2017 unboxNonDouble(address, dest.gpr(), ValueTypeFromMIRType(type));
2018 }
2019 }
2020
loadUnboxedValue(BaseIndex address,MIRType type,AnyRegister dest)2021 void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) {
2022 if (dest.isFloat()) {
2023 vixl::UseScratchRegisterScope temps(this);
2024 const ARMRegister scratch64 = temps.AcquireX();
2025 MOZ_ASSERT(scratch64.asUnsized() != address.base);
2026 MOZ_ASSERT(scratch64.asUnsized() != address.index);
2027 doBaseIndex(scratch64, address, vixl::LDR_x);
2028 int32OrDouble(scratch64.asUnsized(), ARMFPRegister(dest.fpu(), 64));
2029 } else {
2030 unboxNonDouble(address, dest.gpr(), ValueTypeFromMIRType(type));
2031 }
2032 }
2033
loadInstructionPointerAfterCall(Register dest)2034 void loadInstructionPointerAfterCall(Register dest) {
2035 MOZ_CRASH("loadInstructionPointerAfterCall");
2036 }
2037
2038 // Emit a B that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp().
toggledJump(Label * label)2039 CodeOffset toggledJump(Label* label) {
2040 BufferOffset offset = b(label, Always);
2041 CodeOffset ret(offset.getOffset());
2042 return ret;
2043 }
2044
2045 // load: offset to the load instruction obtained by movePatchablePtr().
writeDataRelocation(ImmGCPtr ptr,BufferOffset load)2046 void writeDataRelocation(ImmGCPtr ptr, BufferOffset load) {
2047 // Raw GC pointer relocations and Value relocations both end up in
2048 // Assembler::TraceDataRelocations.
2049 if (ptr.value) {
2050 if (gc::IsInsideNursery(ptr.value)) {
2051 embedsNurseryPointers_ = true;
2052 }
2053 dataRelocations_.writeUnsigned(load.getOffset());
2054 }
2055 }
writeDataRelocation(const Value & val,BufferOffset load)2056 void writeDataRelocation(const Value& val, BufferOffset load) {
2057 // Raw GC pointer relocations and Value relocations both end up in
2058 // Assembler::TraceDataRelocations.
2059 if (val.isGCThing()) {
2060 gc::Cell* cell = val.toGCThing();
2061 if (cell && gc::IsInsideNursery(cell)) {
2062 embedsNurseryPointers_ = true;
2063 }
2064 dataRelocations_.writeUnsigned(load.getOffset());
2065 }
2066 }
2067
computeEffectiveAddress(const Address & address,Register dest)2068 void computeEffectiveAddress(const Address& address, Register dest) {
2069 Add(ARMRegister(dest, 64), toARMRegister(address.base, 64),
2070 Operand(address.offset));
2071 }
computeEffectiveAddress(const Address & address,RegisterOrSP dest)2072 void computeEffectiveAddress(const Address& address, RegisterOrSP dest) {
2073 Add(toARMRegister(dest, 64), toARMRegister(address.base, 64),
2074 Operand(address.offset));
2075 }
computeEffectiveAddress(const BaseIndex & address,Register dest)2076 void computeEffectiveAddress(const BaseIndex& address, Register dest) {
2077 ARMRegister dest64(dest, 64);
2078 ARMRegister base64 = toARMRegister(address.base, 64);
2079 ARMRegister index64(address.index, 64);
2080
2081 Add(dest64, base64, Operand(index64, vixl::LSL, address.scale));
2082 if (address.offset) {
2083 Add(dest64, dest64, Operand(address.offset));
2084 }
2085 }
2086
2087 public:
2088 void handleFailureWithHandlerTail(Label* profilerExitTail);
2089
2090 void profilerEnterFrame(Register framePtr, Register scratch);
2091 void profilerEnterFrame(RegisterOrSP framePtr, Register scratch);
2092 void profilerExitFrame();
ToPayload(Address value)2093 Address ToPayload(Address value) { return value; }
ToType(Address value)2094 Address ToType(Address value) { return value; }
2095
2096 void wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register memoryBase,
2097 Register ptr, AnyRegister outany, Register64 out64);
2098 void wasmLoadImpl(const wasm::MemoryAccessDesc& access, MemOperand srcAddr,
2099 AnyRegister outany, Register64 out64);
2100 void wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister valany,
2101 Register64 val64, Register memoryBase, Register ptr);
2102 void wasmStoreImpl(const wasm::MemoryAccessDesc& access, MemOperand destAddr,
2103 AnyRegister valany, Register64 val64);
2104 // The complete address is in `address`, and `access` is used for its type
2105 // attributes only; its `offset` is ignored.
2106 void wasmLoadAbsolute(const wasm::MemoryAccessDesc& access,
2107 Register memoryBase, uint64_t address, AnyRegister out,
2108 Register64 out64);
2109 void wasmStoreAbsolute(const wasm::MemoryAccessDesc& access,
2110 AnyRegister value, Register64 value64,
2111 Register memoryBase, uint64_t address);
2112
2113 // Emit a BLR or NOP instruction. ToggleCall can be used to patch
2114 // this instruction.
toggledCall(JitCode * target,bool enabled)2115 CodeOffset toggledCall(JitCode* target, bool enabled) {
2116 // The returned offset must be to the first instruction generated,
2117 // for the debugger to match offset with Baseline's pcMappingEntries_.
2118 BufferOffset offset = nextOffset();
2119
2120 // It is unclear why this sync is necessary:
2121 // * PSP and SP have been observed to be different in testcase
2122 // tests/cacheir/bug1448136.js
2123 // * Removing the sync causes no failures in all of jit-tests.
2124 syncStackPtr();
2125
2126 BufferOffset loadOffset;
2127 {
2128 vixl::UseScratchRegisterScope temps(this);
2129
2130 // The register used for the load is hardcoded, so that ToggleCall
2131 // can patch in the branch instruction easily. This could be changed,
2132 // but then ToggleCall must read the target register from the load.
2133 MOZ_ASSERT(temps.IsAvailable(ScratchReg2_64));
2134 temps.Exclude(ScratchReg2_64);
2135
2136 loadOffset = immPool64(ScratchReg2_64, uint64_t(target->raw()));
2137
2138 if (enabled) {
2139 blr(ScratchReg2_64);
2140 } else {
2141 nop();
2142 }
2143 }
2144
2145 addPendingJump(loadOffset, ImmPtr(target->raw()), RelocationKind::JITCODE);
2146 CodeOffset ret(offset.getOffset());
2147 return ret;
2148 }
2149
ToggledCallSize(uint8_t * code)2150 static size_t ToggledCallSize(uint8_t* code) {
2151 // The call site is a sequence of two or three instructions:
2152 //
2153 // syncStack (optional)
2154 // ldr/adr
2155 // nop/blr
2156 //
2157 // Flushed constant pools can appear before any of the instructions.
2158
2159 const Instruction* cur = (const Instruction*)code;
2160 cur = cur->skipPool();
2161 if (cur->IsStackPtrSync()) cur = cur->NextInstruction();
2162 cur = cur->skipPool();
2163 cur = cur->NextInstruction(); // LDR/ADR
2164 cur = cur->skipPool();
2165 cur = cur->NextInstruction(); // NOP/BLR
2166 return (uint8_t*)cur - code;
2167 }
2168
checkARMRegAlignment(const ARMRegister & reg)2169 void checkARMRegAlignment(const ARMRegister& reg) {
2170 #ifdef DEBUG
2171 vixl::UseScratchRegisterScope temps(this);
2172 const ARMRegister scratch64 = temps.AcquireX();
2173 MOZ_ASSERT_IF(!reg.IsSP(), scratch64.asUnsized() != reg.asUnsized());
2174 Label aligned;
2175 Mov(scratch64, reg);
2176 Tst(scratch64, Operand(StackAlignment - 1));
2177 B(Zero, &aligned);
2178 breakpoint();
2179 bind(&aligned);
2180 Mov(scratch64, vixl::xzr); // Clear the scratch register for sanity.
2181 #endif
2182 }
2183
checkStackAlignment()2184 void checkStackAlignment() {
2185 #ifdef DEBUG
2186 checkARMRegAlignment(GetStackPointer64());
2187
2188 // If another register is being used to track pushes, check sp explicitly.
2189 if (!GetStackPointer64().Is(vixl::sp)) {
2190 checkARMRegAlignment(vixl::sp);
2191 }
2192 #endif
2193 }
2194
abiret()2195 void abiret() {
2196 syncStackPtr(); // SP is always used to transmit the stack between calls.
2197 vixl::MacroAssembler::Ret(vixl::lr);
2198 }
2199
clampCheck(Register r,Label * handleNotAnInt)2200 void clampCheck(Register r, Label* handleNotAnInt) {
2201 MOZ_CRASH("clampCheck");
2202 }
2203
stackCheck(ImmWord limitAddr,Label * label)2204 void stackCheck(ImmWord limitAddr, Label* label) { MOZ_CRASH("stackCheck"); }
2205
incrementInt32Value(const Address & addr)2206 void incrementInt32Value(const Address& addr) {
2207 vixl::UseScratchRegisterScope temps(this);
2208 const ARMRegister scratch32 = temps.AcquireW();
2209 MOZ_ASSERT(scratch32.asUnsized() != addr.base);
2210
2211 load32(addr, scratch32.asUnsized());
2212 Add(scratch32, scratch32, Operand(1));
2213 store32(scratch32.asUnsized(), addr);
2214 }
2215
2216 void breakpoint();
2217
2218 // Emits a simulator directive to save the current sp on an internal stack.
simulatorMarkSP()2219 void simulatorMarkSP() {
2220 #ifdef JS_SIMULATOR_ARM64
2221 svc(vixl::kMarkStackPointer);
2222 #endif
2223 }
2224
2225 // Emits a simulator directive to pop from its internal stack
2226 // and assert that the value is equal to the current sp.
simulatorCheckSP()2227 void simulatorCheckSP() {
2228 #ifdef JS_SIMULATOR_ARM64
2229 svc(vixl::kCheckStackPointer);
2230 #endif
2231 }
2232
loadWasmGlobalPtr(uint32_t globalDataOffset,Register dest)2233 void loadWasmGlobalPtr(uint32_t globalDataOffset, Register dest) {
2234 loadPtr(Address(WasmTlsReg,
2235 offsetof(wasm::TlsData, globalArea) + globalDataOffset),
2236 dest);
2237 }
loadWasmPinnedRegsFromTls()2238 void loadWasmPinnedRegsFromTls() {
2239 loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, memoryBase)), HeapReg);
2240 }
2241
2242 // Overwrites the payload bits of a dest register containing a Value.
movePayload(Register src,Register dest)2243 void movePayload(Register src, Register dest) {
2244 // Bfxil cannot be used with the zero register as a source.
2245 if (src == rzr) {
2246 And(ARMRegister(dest, 64), ARMRegister(dest, 64),
2247 Operand(JS::detail::ValueTagMask));
2248 } else {
2249 Bfxil(ARMRegister(dest, 64), ARMRegister(src, 64), 0, JSVAL_TAG_SHIFT);
2250 }
2251 }
2252
2253 protected:
2254 bool buildOOLFakeExitFrame(void* fakeReturnAddr);
2255 };
2256
2257 // See documentation for ScratchTagScope and ScratchTagScopeRelease in
2258 // MacroAssembler-x64.h.
2259
2260 class ScratchTagScope {
2261 vixl::UseScratchRegisterScope temps_;
2262 ARMRegister scratch64_;
2263 bool owned_;
2264 mozilla::DebugOnly<bool> released_;
2265
2266 public:
ScratchTagScope(MacroAssemblerCompat & masm,const ValueOperand &)2267 ScratchTagScope(MacroAssemblerCompat& masm, const ValueOperand&)
2268 : temps_(&masm), owned_(true), released_(false) {
2269 scratch64_ = temps_.AcquireX();
2270 }
2271
Register()2272 operator Register() {
2273 MOZ_ASSERT(!released_);
2274 return scratch64_.asUnsized();
2275 }
2276
release()2277 void release() {
2278 MOZ_ASSERT(!released_);
2279 released_ = true;
2280 if (owned_) {
2281 temps_.Release(scratch64_);
2282 owned_ = false;
2283 }
2284 }
2285
reacquire()2286 void reacquire() {
2287 MOZ_ASSERT(released_);
2288 released_ = false;
2289 }
2290 };
2291
2292 class ScratchTagScopeRelease {
2293 ScratchTagScope* ts_;
2294
2295 public:
ScratchTagScopeRelease(ScratchTagScope * ts)2296 explicit ScratchTagScopeRelease(ScratchTagScope* ts) : ts_(ts) {
2297 ts_->release();
2298 }
~ScratchTagScopeRelease()2299 ~ScratchTagScopeRelease() { ts_->reacquire(); }
2300 };
2301
splitTagForTest(const ValueOperand & value,ScratchTagScope & tag)2302 inline void MacroAssemblerCompat::splitTagForTest(const ValueOperand& value,
2303 ScratchTagScope& tag) {
2304 splitSignExtTag(value, tag);
2305 }
2306
2307 typedef MacroAssemblerCompat MacroAssemblerSpecific;
2308
2309 } // namespace jit
2310 } // namespace js
2311
2312 #endif // jit_arm64_MacroAssembler_arm64_h
2313