1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #ifndef jit_arm64_MacroAssembler_arm64_h
8 #define jit_arm64_MacroAssembler_arm64_h
9
10 #include "jit/arm64/Assembler-arm64.h"
11 #include "jit/arm64/vixl/Debugger-vixl.h"
12 #include "jit/arm64/vixl/MacroAssembler-vixl.h"
13 #include "jit/AtomicOp.h"
14 #include "jit/MoveResolver.h"
15 #include "vm/BigIntType.h" // JS::BigInt
16 #include "wasm/WasmBuiltins.h"
17 #include "wasm/WasmTlsData.h"
18
19 #ifdef _M_ARM64
20 # ifdef move32
21 # undef move32
22 # endif
23 # ifdef move64
24 # undef move64
25 # endif
26 #endif
27
28 namespace js {
29 namespace jit {
30
31 // Import VIXL operands directly into the jit namespace for shared code.
32 using vixl::MemOperand;
33 using vixl::Operand;
34
35 struct ImmShiftedTag : public ImmWord {
ImmShiftedTagImmShiftedTag36 explicit ImmShiftedTag(JSValueShiftedTag shtag) : ImmWord((uintptr_t)shtag) {}
37
ImmShiftedTagImmShiftedTag38 explicit ImmShiftedTag(JSValueType type)
39 : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type)))) {
40 }
41 };
42
43 struct ImmTag : public Imm32 {
ImmTagImmTag44 explicit ImmTag(JSValueTag tag) : Imm32(tag) {}
45 };
46
47 class ScratchTagScope;
48
49 class MacroAssemblerCompat : public vixl::MacroAssembler {
50 public:
51 typedef vixl::Condition Condition;
52
53 private:
54 // Perform a downcast. Should be removed by Bug 996602.
55 js::jit::MacroAssembler& asMasm();
56 const js::jit::MacroAssembler& asMasm() const;
57
58 public:
59 // Restrict to only VIXL-internal functions.
60 vixl::MacroAssembler& asVIXL();
61 const MacroAssembler& asVIXL() const;
62
63 protected:
64 bool enoughMemory_;
65 uint32_t framePushed_;
66
MacroAssemblerCompat()67 MacroAssemblerCompat()
68 : vixl::MacroAssembler(), enoughMemory_(true), framePushed_(0) {}
69
70 protected:
71 MoveResolver moveResolver_;
72
73 public:
oom()74 bool oom() const { return Assembler::oom() || !enoughMemory_; }
toARMRegister(RegisterOrSP r,size_t size)75 static ARMRegister toARMRegister(RegisterOrSP r, size_t size) {
76 if (IsHiddenSP(r)) {
77 MOZ_ASSERT(size == 64);
78 return sp;
79 }
80 return ARMRegister(AsRegister(r), size);
81 }
toMemOperand(const Address & a)82 static MemOperand toMemOperand(const Address& a) {
83 return MemOperand(toARMRegister(a.base, 64), a.offset);
84 }
doBaseIndex(const vixl::CPURegister & rt,const BaseIndex & addr,vixl::LoadStoreOp op)85 void doBaseIndex(const vixl::CPURegister& rt, const BaseIndex& addr,
86 vixl::LoadStoreOp op) {
87 const ARMRegister base = toARMRegister(addr.base, 64);
88 const ARMRegister index = ARMRegister(addr.index, 64);
89 const unsigned scale = addr.scale;
90
91 if (!addr.offset &&
92 (!scale || scale == static_cast<unsigned>(CalcLSDataSize(op)))) {
93 LoadStoreMacro(rt, MemOperand(base, index, vixl::LSL, scale), op);
94 return;
95 }
96
97 vixl::UseScratchRegisterScope temps(this);
98 ARMRegister scratch64 = temps.AcquireX();
99 MOZ_ASSERT(!scratch64.Is(rt));
100 MOZ_ASSERT(!scratch64.Is(base));
101 MOZ_ASSERT(!scratch64.Is(index));
102
103 Add(scratch64, base, Operand(index, vixl::LSL, scale));
104 LoadStoreMacro(rt, MemOperand(scratch64, addr.offset), op);
105 }
Push(ARMRegister reg)106 void Push(ARMRegister reg) {
107 push(reg);
108 adjustFrame(reg.size() / 8);
109 }
Push(Register reg)110 void Push(Register reg) {
111 vixl::MacroAssembler::Push(ARMRegister(reg, 64));
112 adjustFrame(8);
113 }
Push(Imm32 imm)114 void Push(Imm32 imm) {
115 push(imm);
116 adjustFrame(8);
117 }
Push(FloatRegister f)118 void Push(FloatRegister f) {
119 push(ARMFPRegister(f, 64));
120 adjustFrame(8);
121 }
Push(ImmPtr imm)122 void Push(ImmPtr imm) {
123 push(imm);
124 adjustFrame(sizeof(void*));
125 }
push(FloatRegister f)126 void push(FloatRegister f) {
127 vixl::MacroAssembler::Push(ARMFPRegister(f, 64));
128 }
push(ARMFPRegister f)129 void push(ARMFPRegister f) { vixl::MacroAssembler::Push(f); }
push(Imm32 imm)130 void push(Imm32 imm) {
131 if (imm.value == 0) {
132 vixl::MacroAssembler::Push(vixl::xzr);
133 } else {
134 vixl::UseScratchRegisterScope temps(this);
135 const ARMRegister scratch64 = temps.AcquireX();
136 move32(imm, scratch64.asUnsized());
137 vixl::MacroAssembler::Push(scratch64);
138 }
139 }
push(ImmWord imm)140 void push(ImmWord imm) {
141 if (imm.value == 0) {
142 vixl::MacroAssembler::Push(vixl::xzr);
143 } else {
144 vixl::UseScratchRegisterScope temps(this);
145 const ARMRegister scratch64 = temps.AcquireX();
146 Mov(scratch64, imm.value);
147 vixl::MacroAssembler::Push(scratch64);
148 }
149 }
push(ImmPtr imm)150 void push(ImmPtr imm) {
151 if (imm.value == nullptr) {
152 vixl::MacroAssembler::Push(vixl::xzr);
153 } else {
154 vixl::UseScratchRegisterScope temps(this);
155 const ARMRegister scratch64 = temps.AcquireX();
156 movePtr(imm, scratch64.asUnsized());
157 vixl::MacroAssembler::Push(scratch64);
158 }
159 }
push(ImmGCPtr imm)160 void push(ImmGCPtr imm) {
161 if (imm.value == nullptr) {
162 vixl::MacroAssembler::Push(vixl::xzr);
163 } else {
164 vixl::UseScratchRegisterScope temps(this);
165 const ARMRegister scratch64 = temps.AcquireX();
166 movePtr(imm, scratch64.asUnsized());
167 vixl::MacroAssembler::Push(scratch64);
168 }
169 }
push(ARMRegister reg)170 void push(ARMRegister reg) { vixl::MacroAssembler::Push(reg); }
push(Address a)171 void push(Address a) {
172 vixl::UseScratchRegisterScope temps(this);
173 const ARMRegister scratch64 = temps.AcquireX();
174 MOZ_ASSERT(a.base != scratch64.asUnsized());
175 loadPtr(a, scratch64.asUnsized());
176 vixl::MacroAssembler::Push(scratch64);
177 }
178
179 // Push registers.
push(Register reg)180 void push(Register reg) { vixl::MacroAssembler::Push(ARMRegister(reg, 64)); }
push(RegisterOrSP reg)181 void push(RegisterOrSP reg) {
182 if (IsHiddenSP(reg)) {
183 vixl::MacroAssembler::Push(sp);
184 }
185 vixl::MacroAssembler::Push(toARMRegister(reg, 64));
186 }
push(Register r0,Register r1)187 void push(Register r0, Register r1) {
188 vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64));
189 }
push(Register r0,Register r1,Register r2)190 void push(Register r0, Register r1, Register r2) {
191 vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64),
192 ARMRegister(r2, 64));
193 }
push(Register r0,Register r1,Register r2,Register r3)194 void push(Register r0, Register r1, Register r2, Register r3) {
195 vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64),
196 ARMRegister(r2, 64), ARMRegister(r3, 64));
197 }
push(ARMFPRegister r0,ARMFPRegister r1,ARMFPRegister r2,ARMFPRegister r3)198 void push(ARMFPRegister r0, ARMFPRegister r1, ARMFPRegister r2,
199 ARMFPRegister r3) {
200 vixl::MacroAssembler::Push(r0, r1, r2, r3);
201 }
202
203 // Pop registers.
pop(Register reg)204 void pop(Register reg) { vixl::MacroAssembler::Pop(ARMRegister(reg, 64)); }
pop(Register r0,Register r1)205 void pop(Register r0, Register r1) {
206 vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64));
207 }
pop(Register r0,Register r1,Register r2)208 void pop(Register r0, Register r1, Register r2) {
209 vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64),
210 ARMRegister(r2, 64));
211 }
pop(Register r0,Register r1,Register r2,Register r3)212 void pop(Register r0, Register r1, Register r2, Register r3) {
213 vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64),
214 ARMRegister(r2, 64), ARMRegister(r3, 64));
215 }
pop(ARMFPRegister r0,ARMFPRegister r1,ARMFPRegister r2,ARMFPRegister r3)216 void pop(ARMFPRegister r0, ARMFPRegister r1, ARMFPRegister r2,
217 ARMFPRegister r3) {
218 vixl::MacroAssembler::Pop(r0, r1, r2, r3);
219 }
220
pop(const ValueOperand & v)221 void pop(const ValueOperand& v) { pop(v.valueReg()); }
pop(const FloatRegister & f)222 void pop(const FloatRegister& f) {
223 vixl::MacroAssembler::Pop(ARMFPRegister(f, 64));
224 }
225
implicitPop(uint32_t args)226 void implicitPop(uint32_t args) {
227 MOZ_ASSERT(args % sizeof(intptr_t) == 0);
228 adjustFrame(0 - args);
229 }
Pop(ARMRegister r)230 void Pop(ARMRegister r) {
231 vixl::MacroAssembler::Pop(r);
232 adjustFrame(0 - r.size() / 8);
233 }
234 // FIXME: This is the same on every arch.
235 // FIXME: If we can share framePushed_, we can share this.
236 // FIXME: Or just make it at the highest level.
PushWithPatch(ImmWord word)237 CodeOffset PushWithPatch(ImmWord word) {
238 framePushed_ += sizeof(word.value);
239 return pushWithPatch(word);
240 }
PushWithPatch(ImmPtr ptr)241 CodeOffset PushWithPatch(ImmPtr ptr) {
242 return PushWithPatch(ImmWord(uintptr_t(ptr.value)));
243 }
244
framePushed()245 uint32_t framePushed() const { return framePushed_; }
adjustFrame(int32_t diff)246 void adjustFrame(int32_t diff) { setFramePushed(framePushed_ + diff); }
247
setFramePushed(uint32_t framePushed)248 void setFramePushed(uint32_t framePushed) { framePushed_ = framePushed; }
249
freeStack(Register amount)250 void freeStack(Register amount) {
251 vixl::MacroAssembler::Drop(Operand(ARMRegister(amount, 64)));
252 }
253
254 // Update sp with the value of the current active stack pointer, if necessary.
syncStackPtr()255 void syncStackPtr() {
256 if (!GetStackPointer64().Is(vixl::sp)) {
257 Mov(vixl::sp, GetStackPointer64());
258 }
259 }
initPseudoStackPtr()260 void initPseudoStackPtr() {
261 if (!GetStackPointer64().Is(vixl::sp)) {
262 Mov(GetStackPointer64(), vixl::sp);
263 }
264 }
265 // In debug builds only, cause a trap if PSP is active and PSP != SP
assertStackPtrsSynced(uint32_t id)266 void assertStackPtrsSynced(uint32_t id) {
267 #ifdef DEBUG
268 // The add and sub instructions below will only take a 12-bit immediate.
269 MOZ_ASSERT(id <= 0xFFF);
270 if (!GetStackPointer64().Is(vixl::sp)) {
271 Label ok;
272 // Add a marker, so we can figure out who requested the check when
273 // inspecting the generated code. Note, a more concise way to encode
274 // the marker would be to use it as an immediate for the `brk`
275 // instruction as generated by `Unreachable()`, and removing the add/sub.
276 Add(GetStackPointer64(), GetStackPointer64(), Operand(id));
277 Sub(GetStackPointer64(), GetStackPointer64(), Operand(id));
278 Cmp(vixl::sp, GetStackPointer64());
279 B(Equal, &ok);
280 Unreachable();
281 bind(&ok);
282 }
283 #endif
284 }
285 // In debug builds only, add a marker that doesn't change the machine's
286 // state. Note these markers are x16-based, as opposed to the x28-based
287 // ones made by `assertStackPtrsSynced`.
addMarker(uint32_t id)288 void addMarker(uint32_t id) {
289 #ifdef DEBUG
290 // Only 12 bits of immediate are allowed.
291 MOZ_ASSERT(id <= 0xFFF);
292 ARMRegister x16 = ARMRegister(r16, 64);
293 Add(x16, x16, Operand(id));
294 Sub(x16, x16, Operand(id));
295 #endif
296 }
297
storeValue(ValueOperand val,const Address & dest)298 void storeValue(ValueOperand val, const Address& dest) {
299 storePtr(val.valueReg(), dest);
300 }
301
302 template <typename T>
storeValue(JSValueType type,Register reg,const T & dest)303 void storeValue(JSValueType type, Register reg, const T& dest) {
304 vixl::UseScratchRegisterScope temps(this);
305 const Register scratch = temps.AcquireX().asUnsized();
306 MOZ_ASSERT(scratch != reg);
307 tagValue(type, reg, ValueOperand(scratch));
308 storeValue(ValueOperand(scratch), dest);
309 }
310 template <typename T>
storeValue(const Value & val,const T & dest)311 void storeValue(const Value& val, const T& dest) {
312 vixl::UseScratchRegisterScope temps(this);
313 const Register scratch = temps.AcquireX().asUnsized();
314 moveValue(val, ValueOperand(scratch));
315 storeValue(ValueOperand(scratch), dest);
316 }
storeValue(ValueOperand val,BaseIndex dest)317 void storeValue(ValueOperand val, BaseIndex dest) {
318 storePtr(val.valueReg(), dest);
319 }
storeValue(const Address & src,const Address & dest,Register temp)320 void storeValue(const Address& src, const Address& dest, Register temp) {
321 loadPtr(src, temp);
322 storePtr(temp, dest);
323 }
324
storePrivateValue(Register src,const Address & dest)325 void storePrivateValue(Register src, const Address& dest) {
326 storePtr(src, dest);
327 }
storePrivateValue(ImmGCPtr imm,const Address & dest)328 void storePrivateValue(ImmGCPtr imm, const Address& dest) {
329 storePtr(imm, dest);
330 }
331
loadValue(Address src,Register val)332 void loadValue(Address src, Register val) {
333 Ldr(ARMRegister(val, 64), MemOperand(src));
334 }
loadValue(Address src,ValueOperand val)335 void loadValue(Address src, ValueOperand val) {
336 Ldr(ARMRegister(val.valueReg(), 64), MemOperand(src));
337 }
loadValue(const BaseIndex & src,ValueOperand val)338 void loadValue(const BaseIndex& src, ValueOperand val) {
339 doBaseIndex(ARMRegister(val.valueReg(), 64), src, vixl::LDR_x);
340 }
loadUnalignedValue(const Address & src,ValueOperand dest)341 void loadUnalignedValue(const Address& src, ValueOperand dest) {
342 loadValue(src, dest);
343 }
tagValue(JSValueType type,Register payload,ValueOperand dest)344 void tagValue(JSValueType type, Register payload, ValueOperand dest) {
345 // This could be cleverer, but the first attempt had bugs.
346 Orr(ARMRegister(dest.valueReg(), 64), ARMRegister(payload, 64),
347 Operand(ImmShiftedTag(type).value));
348 }
pushValue(ValueOperand val)349 void pushValue(ValueOperand val) {
350 vixl::MacroAssembler::Push(ARMRegister(val.valueReg(), 64));
351 }
popValue(ValueOperand val)352 void popValue(ValueOperand val) {
353 vixl::MacroAssembler::Pop(ARMRegister(val.valueReg(), 64));
354 // SP may be < PSP now (that's OK).
355 // eg testcase: tests/backup-point-bug1315634.js
356 }
pushValue(const Value & val)357 void pushValue(const Value& val) {
358 vixl::UseScratchRegisterScope temps(this);
359 const Register scratch = temps.AcquireX().asUnsized();
360 if (val.isGCThing()) {
361 BufferOffset load =
362 movePatchablePtr(ImmPtr(val.bitsAsPunboxPointer()), scratch);
363 writeDataRelocation(val, load);
364 push(scratch);
365 } else {
366 moveValue(val, scratch);
367 push(scratch);
368 }
369 }
pushValue(JSValueType type,Register reg)370 void pushValue(JSValueType type, Register reg) {
371 vixl::UseScratchRegisterScope temps(this);
372 const Register scratch = temps.AcquireX().asUnsized();
373 MOZ_ASSERT(scratch != reg);
374 tagValue(type, reg, ValueOperand(scratch));
375 push(scratch);
376 }
pushValue(const Address & addr)377 void pushValue(const Address& addr) {
378 vixl::UseScratchRegisterScope temps(this);
379 const Register scratch = temps.AcquireX().asUnsized();
380 MOZ_ASSERT(scratch != addr.base);
381 loadValue(addr, scratch);
382 push(scratch);
383 }
384 template <typename T>
storeUnboxedPayload(ValueOperand value,T address,size_t nbytes,JSValueType type)385 void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes,
386 JSValueType type) {
387 switch (nbytes) {
388 case 8: {
389 vixl::UseScratchRegisterScope temps(this);
390 const Register scratch = temps.AcquireX().asUnsized();
391 if (type == JSVAL_TYPE_OBJECT) {
392 unboxObjectOrNull(value, scratch);
393 } else {
394 unboxNonDouble(value, scratch, type);
395 }
396 storePtr(scratch, address);
397 return;
398 }
399 case 4:
400 store32(value.valueReg(), address);
401 return;
402 case 1:
403 store8(value.valueReg(), address);
404 return;
405 default:
406 MOZ_CRASH("Bad payload width");
407 }
408 }
moveValue(const Value & val,Register dest)409 void moveValue(const Value& val, Register dest) {
410 if (val.isGCThing()) {
411 BufferOffset load =
412 movePatchablePtr(ImmPtr(val.bitsAsPunboxPointer()), dest);
413 writeDataRelocation(val, load);
414 } else {
415 movePtr(ImmWord(val.asRawBits()), dest);
416 }
417 }
moveValue(const Value & src,const ValueOperand & dest)418 void moveValue(const Value& src, const ValueOperand& dest) {
419 moveValue(src, dest.valueReg());
420 }
421
pushWithPatch(ImmWord imm)422 CodeOffset pushWithPatch(ImmWord imm) {
423 vixl::UseScratchRegisterScope temps(this);
424 const Register scratch = temps.AcquireX().asUnsized();
425 CodeOffset label = movWithPatch(imm, scratch);
426 push(scratch);
427 return label;
428 }
429
movWithPatch(ImmWord imm,Register dest)430 CodeOffset movWithPatch(ImmWord imm, Register dest) {
431 BufferOffset off = immPool64(ARMRegister(dest, 64), imm.value);
432 return CodeOffset(off.getOffset());
433 }
movWithPatch(ImmPtr imm,Register dest)434 CodeOffset movWithPatch(ImmPtr imm, Register dest) {
435 BufferOffset off = immPool64(ARMRegister(dest, 64), uint64_t(imm.value));
436 return CodeOffset(off.getOffset());
437 }
438
439 void boxValue(JSValueType type, Register src, Register dest);
440
splitSignExtTag(Register src,Register dest)441 void splitSignExtTag(Register src, Register dest) {
442 sbfx(ARMRegister(dest, 64), ARMRegister(src, 64), JSVAL_TAG_SHIFT,
443 (64 - JSVAL_TAG_SHIFT));
444 }
extractTag(const Address & address,Register scratch)445 [[nodiscard]] Register extractTag(const Address& address, Register scratch) {
446 loadPtr(address, scratch);
447 splitSignExtTag(scratch, scratch);
448 return scratch;
449 }
extractTag(const ValueOperand & value,Register scratch)450 [[nodiscard]] Register extractTag(const ValueOperand& value,
451 Register scratch) {
452 splitSignExtTag(value.valueReg(), scratch);
453 return scratch;
454 }
extractObject(const Address & address,Register scratch)455 [[nodiscard]] Register extractObject(const Address& address,
456 Register scratch) {
457 loadPtr(address, scratch);
458 unboxObject(scratch, scratch);
459 return scratch;
460 }
extractObject(const ValueOperand & value,Register scratch)461 [[nodiscard]] Register extractObject(const ValueOperand& value,
462 Register scratch) {
463 unboxObject(value, scratch);
464 return scratch;
465 }
extractSymbol(const ValueOperand & value,Register scratch)466 [[nodiscard]] Register extractSymbol(const ValueOperand& value,
467 Register scratch) {
468 unboxSymbol(value, scratch);
469 return scratch;
470 }
extractInt32(const ValueOperand & value,Register scratch)471 [[nodiscard]] Register extractInt32(const ValueOperand& value,
472 Register scratch) {
473 unboxInt32(value, scratch);
474 return scratch;
475 }
extractBoolean(const ValueOperand & value,Register scratch)476 [[nodiscard]] Register extractBoolean(const ValueOperand& value,
477 Register scratch) {
478 unboxBoolean(value, scratch);
479 return scratch;
480 }
481
482 inline void ensureDouble(const ValueOperand& source, FloatRegister dest,
483 Label* failure);
484
emitSet(Condition cond,Register dest)485 void emitSet(Condition cond, Register dest) {
486 Cset(ARMRegister(dest, 64), cond);
487 }
488
testNullSet(Condition cond,const ValueOperand & value,Register dest)489 void testNullSet(Condition cond, const ValueOperand& value, Register dest) {
490 cond = testNull(cond, value);
491 emitSet(cond, dest);
492 }
testObjectSet(Condition cond,const ValueOperand & value,Register dest)493 void testObjectSet(Condition cond, const ValueOperand& value, Register dest) {
494 cond = testObject(cond, value);
495 emitSet(cond, dest);
496 }
testUndefinedSet(Condition cond,const ValueOperand & value,Register dest)497 void testUndefinedSet(Condition cond, const ValueOperand& value,
498 Register dest) {
499 cond = testUndefined(cond, value);
500 emitSet(cond, dest);
501 }
502
convertBoolToInt32(Register source,Register dest)503 void convertBoolToInt32(Register source, Register dest) {
504 Uxtb(ARMRegister(dest, 64), ARMRegister(source, 64));
505 }
506
convertInt32ToDouble(Register src,FloatRegister dest)507 void convertInt32ToDouble(Register src, FloatRegister dest) {
508 Scvtf(ARMFPRegister(dest, 64),
509 ARMRegister(src, 32)); // Uses FPCR rounding mode.
510 }
convertInt32ToDouble(const Address & src,FloatRegister dest)511 void convertInt32ToDouble(const Address& src, FloatRegister dest) {
512 vixl::UseScratchRegisterScope temps(this);
513 const Register scratch = temps.AcquireX().asUnsized();
514 MOZ_ASSERT(scratch != src.base);
515 load32(src, scratch);
516 convertInt32ToDouble(scratch, dest);
517 }
convertInt32ToDouble(const BaseIndex & src,FloatRegister dest)518 void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest) {
519 vixl::UseScratchRegisterScope temps(this);
520 const Register scratch = temps.AcquireX().asUnsized();
521 MOZ_ASSERT(scratch != src.base);
522 MOZ_ASSERT(scratch != src.index);
523 load32(src, scratch);
524 convertInt32ToDouble(scratch, dest);
525 }
526
convertInt32ToFloat32(Register src,FloatRegister dest)527 void convertInt32ToFloat32(Register src, FloatRegister dest) {
528 Scvtf(ARMFPRegister(dest, 32),
529 ARMRegister(src, 32)); // Uses FPCR rounding mode.
530 }
convertInt32ToFloat32(const Address & src,FloatRegister dest)531 void convertInt32ToFloat32(const Address& src, FloatRegister dest) {
532 vixl::UseScratchRegisterScope temps(this);
533 const Register scratch = temps.AcquireX().asUnsized();
534 MOZ_ASSERT(scratch != src.base);
535 load32(src, scratch);
536 convertInt32ToFloat32(scratch, dest);
537 }
538
convertUInt32ToDouble(Register src,FloatRegister dest)539 void convertUInt32ToDouble(Register src, FloatRegister dest) {
540 Ucvtf(ARMFPRegister(dest, 64),
541 ARMRegister(src, 32)); // Uses FPCR rounding mode.
542 }
convertUInt32ToDouble(const Address & src,FloatRegister dest)543 void convertUInt32ToDouble(const Address& src, FloatRegister dest) {
544 vixl::UseScratchRegisterScope temps(this);
545 const Register scratch = temps.AcquireX().asUnsized();
546 MOZ_ASSERT(scratch != src.base);
547 load32(src, scratch);
548 convertUInt32ToDouble(scratch, dest);
549 }
550
convertUInt32ToFloat32(Register src,FloatRegister dest)551 void convertUInt32ToFloat32(Register src, FloatRegister dest) {
552 Ucvtf(ARMFPRegister(dest, 32),
553 ARMRegister(src, 32)); // Uses FPCR rounding mode.
554 }
convertUInt32ToFloat32(const Address & src,FloatRegister dest)555 void convertUInt32ToFloat32(const Address& src, FloatRegister dest) {
556 vixl::UseScratchRegisterScope temps(this);
557 const Register scratch = temps.AcquireX().asUnsized();
558 MOZ_ASSERT(scratch != src.base);
559 load32(src, scratch);
560 convertUInt32ToFloat32(scratch, dest);
561 }
562
convertFloat32ToDouble(FloatRegister src,FloatRegister dest)563 void convertFloat32ToDouble(FloatRegister src, FloatRegister dest) {
564 Fcvt(ARMFPRegister(dest, 64), ARMFPRegister(src, 32));
565 }
convertDoubleToFloat32(FloatRegister src,FloatRegister dest)566 void convertDoubleToFloat32(FloatRegister src, FloatRegister dest) {
567 Fcvt(ARMFPRegister(dest, 32), ARMFPRegister(src, 64));
568 }
569
570 using vixl::MacroAssembler::B;
571
572 void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
573 bool negativeZeroCheck = true) {
574 ARMFPRegister fsrc64(src, 64);
575 ARMRegister dest32(dest, 32);
576
577 // ARMv8.3 chips support the FJCVTZS instruction, which handles
578 // exactly this logic.
579 if (CPUHas(vixl::CPUFeatures::kFP, vixl::CPUFeatures::kJSCVT)) {
580 // Convert double to integer, rounding toward zero.
581 // The Z-flag is set iff the conversion is exact. -0 unsets the Z-flag.
582 Fjcvtzs(dest32, fsrc64);
583
584 if (negativeZeroCheck) {
585 B(fail, Assembler::NonZero);
586 } else {
587 Label done;
588 B(&done, Assembler::Zero); // If conversion was exact, go to end.
589
590 // The conversion was inexact, but the caller intends to allow -0.
591 vixl::UseScratchRegisterScope temps(this);
592 const ARMFPRegister scratch64 = temps.AcquireD();
593 MOZ_ASSERT(!scratch64.Is(fsrc64));
594
595 // Compare fsrc64 to 0.
596 // If fsrc64 == 0 and FJCVTZS conversion was inexact, then fsrc64 is -0.
597 Fmov(scratch64, xzr);
598 Fcmp(scratch64, fsrc64);
599 B(fail, Assembler::NotEqual); // Pass through -0; fail otherwise.
600
601 bind(&done);
602 }
603 } else {
604 // Older processors use a significantly slower path.
605 ARMRegister dest64(dest, 64);
606
607 vixl::UseScratchRegisterScope temps(this);
608 const ARMFPRegister scratch64 = temps.AcquireD();
609 MOZ_ASSERT(!scratch64.Is(fsrc64));
610
611 Fcvtzs(dest32, fsrc64); // Convert, rounding toward zero.
612 Scvtf(scratch64, dest32); // Convert back, using FPCR rounding mode.
613 Fcmp(scratch64, fsrc64);
614 B(fail, Assembler::NotEqual);
615
616 if (negativeZeroCheck) {
617 Label nonzero;
618 Cbnz(dest32, &nonzero);
619 Fmov(dest64, fsrc64);
620 Cbnz(dest64, fail);
621 bind(&nonzero);
622 }
623 }
624 }
625 void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
626 bool negativeZeroCheck = true) {
627 vixl::UseScratchRegisterScope temps(this);
628 const ARMFPRegister scratch32 = temps.AcquireS();
629
630 ARMFPRegister fsrc(src, 32);
631 ARMRegister dest32(dest, 32);
632 ARMRegister dest64(dest, 64);
633
634 MOZ_ASSERT(!scratch32.Is(fsrc));
635
636 Fcvtzs(dest64, fsrc); // Convert, rounding toward zero.
637 Scvtf(scratch32, dest32); // Convert back, using FPCR rounding mode.
638 Fcmp(scratch32, fsrc);
639 B(fail, Assembler::NotEqual);
640
641 if (negativeZeroCheck) {
642 Label nonzero;
643 Cbnz(dest32, &nonzero);
644 Fmov(dest32, fsrc);
645 Cbnz(dest32, fail);
646 bind(&nonzero);
647 }
648 And(dest64, dest64, Operand(0xffffffff));
649 }
650
651 void convertDoubleToPtr(FloatRegister src, Register dest, Label* fail,
652 bool negativeZeroCheck = true) {
653 ARMFPRegister fsrc64(src, 64);
654 ARMRegister dest64(dest, 64);
655
656 vixl::UseScratchRegisterScope temps(this);
657 const ARMFPRegister scratch64 = temps.AcquireD();
658 MOZ_ASSERT(!scratch64.Is(fsrc64));
659
660 // Note: we can't use the FJCVTZS instruction here because that only works
661 // for 32-bit values.
662
663 Fcvtzs(dest64, fsrc64); // Convert, rounding toward zero.
664 Scvtf(scratch64, dest64); // Convert back, using FPCR rounding mode.
665 Fcmp(scratch64, fsrc64);
666 B(fail, Assembler::NotEqual);
667
668 if (negativeZeroCheck) {
669 Label nonzero;
670 Cbnz(dest64, &nonzero);
671 Fmov(dest64, fsrc64);
672 Cbnz(dest64, fail);
673 bind(&nonzero);
674 }
675 }
676
floor(FloatRegister input,Register output,Label * bail)677 void floor(FloatRegister input, Register output, Label* bail) {
678 Label handleZero;
679 // Label handleNeg;
680 Label fin;
681 ARMFPRegister iDbl(input, 64);
682 ARMRegister o64(output, 64);
683 ARMRegister o32(output, 32);
684 Fcmp(iDbl, 0.0);
685 B(Assembler::Equal, &handleZero);
686 // B(Assembler::Signed, &handleNeg);
687 // NaN is always a bail condition, just bail directly.
688 B(Assembler::Overflow, bail);
689 Fcvtms(o64, iDbl);
690 Cmp(o64, Operand(o64, vixl::SXTW));
691 B(NotEqual, bail);
692 Mov(o32, o32);
693 B(&fin);
694
695 bind(&handleZero);
696 // Move the top word of the double into the output reg, if it is non-zero,
697 // then the original value was -0.0.
698 Fmov(o64, iDbl);
699 Cbnz(o64, bail);
700 bind(&fin);
701 }
702
floorf(FloatRegister input,Register output,Label * bail)703 void floorf(FloatRegister input, Register output, Label* bail) {
704 Label handleZero;
705 // Label handleNeg;
706 Label fin;
707 ARMFPRegister iFlt(input, 32);
708 ARMRegister o64(output, 64);
709 ARMRegister o32(output, 32);
710 Fcmp(iFlt, 0.0);
711 B(Assembler::Equal, &handleZero);
712 // B(Assembler::Signed, &handleNeg);
713 // NaN is always a bail condition, just bail directly.
714 B(Assembler::Overflow, bail);
715 Fcvtms(o64, iFlt);
716 Cmp(o64, Operand(o64, vixl::SXTW));
717 B(NotEqual, bail);
718 Mov(o32, o32);
719 B(&fin);
720
721 bind(&handleZero);
722 // Move the top word of the double into the output reg, if it is non-zero,
723 // then the original value was -0.0.
724 Fmov(o32, iFlt);
725 Cbnz(o32, bail);
726 bind(&fin);
727 }
728
ceil(FloatRegister input,Register output,Label * bail)729 void ceil(FloatRegister input, Register output, Label* bail) {
730 Label handleZero;
731 Label fin;
732 ARMFPRegister iDbl(input, 64);
733 ARMRegister o64(output, 64);
734 ARMRegister o32(output, 32);
735 Fcmp(iDbl, 0.0);
736 B(Assembler::Overflow, bail);
737 Fcvtps(o64, iDbl);
738 Cmp(o64, Operand(o64, vixl::SXTW));
739 B(NotEqual, bail);
740 Cbz(o64, &handleZero);
741 Mov(o32, o32);
742 B(&fin);
743
744 bind(&handleZero);
745 vixl::UseScratchRegisterScope temps(this);
746 const ARMRegister scratch = temps.AcquireX();
747 Fmov(scratch, iDbl);
748 Cbnz(scratch, bail);
749 bind(&fin);
750 }
751
ceilf(FloatRegister input,Register output,Label * bail)752 void ceilf(FloatRegister input, Register output, Label* bail) {
753 Label handleZero;
754 Label fin;
755 ARMFPRegister iFlt(input, 32);
756 ARMRegister o64(output, 64);
757 ARMRegister o32(output, 32);
758 Fcmp(iFlt, 0.0);
759
760 // NaN is always a bail condition, just bail directly.
761 B(Assembler::Overflow, bail);
762 Fcvtps(o64, iFlt);
763 Cmp(o64, Operand(o64, vixl::SXTW));
764 B(NotEqual, bail);
765 Cbz(o64, &handleZero);
766 Mov(o32, o32);
767 B(&fin);
768
769 bind(&handleZero);
770 // Move the top word of the double into the output reg, if it is non-zero,
771 // then the original value was -0.0.
772 Fmov(o32, iFlt);
773 Cbnz(o32, bail);
774 bind(&fin);
775 }
776
jump(Label * label)777 void jump(Label* label) { B(label); }
jump(JitCode * code)778 void jump(JitCode* code) { branch(code); }
jump(ImmPtr ptr)779 void jump(ImmPtr ptr) {
780 // It is unclear why this sync is necessary:
781 // * PSP and SP have been observed to be different in testcase
782 // tests/asm.js/testBug1046688.js.
783 // * Removing the sync causes no failures in all of jit-tests.
784 //
785 // Also see branch(JitCode*) below. This version of jump() is called only
786 // from jump(TrampolinePtr) which is called on various very slow paths,
787 // probably only in JS.
788 syncStackPtr();
789 BufferOffset loc =
790 b(-1,
791 LabelDoc()); // The jump target will be patched by executableCopy().
792 addPendingJump(loc, ptr, RelocationKind::HARDCODED);
793 }
jump(TrampolinePtr code)794 void jump(TrampolinePtr code) { jump(ImmPtr(code.value)); }
jump(Register reg)795 void jump(Register reg) { Br(ARMRegister(reg, 64)); }
jump(const Address & addr)796 void jump(const Address& addr) {
797 vixl::UseScratchRegisterScope temps(this);
798 MOZ_ASSERT(temps.IsAvailable(ScratchReg64)); // ip0
799 temps.Exclude(ScratchReg64);
800 MOZ_ASSERT(addr.base != ScratchReg64.asUnsized());
801 loadPtr(addr, ScratchReg64.asUnsized());
802 br(ScratchReg64);
803 }
804
align(int alignment)805 void align(int alignment) { armbuffer_.align(alignment); }
806
haltingAlign(int alignment)807 void haltingAlign(int alignment) {
808 armbuffer_.align(alignment, vixl::HLT | ImmException(0xBAAD));
809 }
nopAlign(int alignment)810 void nopAlign(int alignment) { armbuffer_.align(alignment); }
811
movePtr(Register src,Register dest)812 void movePtr(Register src, Register dest) {
813 Mov(ARMRegister(dest, 64), ARMRegister(src, 64));
814 }
movePtr(ImmWord imm,Register dest)815 void movePtr(ImmWord imm, Register dest) {
816 Mov(ARMRegister(dest, 64), int64_t(imm.value));
817 }
movePtr(ImmPtr imm,Register dest)818 void movePtr(ImmPtr imm, Register dest) {
819 Mov(ARMRegister(dest, 64), int64_t(imm.value));
820 }
movePtr(wasm::SymbolicAddress imm,Register dest)821 void movePtr(wasm::SymbolicAddress imm, Register dest) {
822 BufferOffset off = movePatchablePtr(ImmWord(0xffffffffffffffffULL), dest);
823 append(wasm::SymbolicAccess(CodeOffset(off.getOffset()), imm));
824 }
movePtr(ImmGCPtr imm,Register dest)825 void movePtr(ImmGCPtr imm, Register dest) {
826 BufferOffset load = movePatchablePtr(ImmPtr(imm.value), dest);
827 writeDataRelocation(imm, load);
828 }
829
mov(ImmWord imm,Register dest)830 void mov(ImmWord imm, Register dest) { movePtr(imm, dest); }
mov(ImmPtr imm,Register dest)831 void mov(ImmPtr imm, Register dest) { movePtr(imm, dest); }
mov(wasm::SymbolicAddress imm,Register dest)832 void mov(wasm::SymbolicAddress imm, Register dest) { movePtr(imm, dest); }
mov(Register src,Register dest)833 void mov(Register src, Register dest) { movePtr(src, dest); }
834 void mov(CodeLabel* label, Register dest);
835
move32(Imm32 imm,Register dest)836 void move32(Imm32 imm, Register dest) {
837 Mov(ARMRegister(dest, 32), (int64_t)imm.value);
838 }
move32(Register src,Register dest)839 void move32(Register src, Register dest) {
840 Mov(ARMRegister(dest, 32), ARMRegister(src, 32));
841 }
842
843 // Move a pointer using a literal pool, so that the pointer
844 // may be easily patched or traced.
845 // Returns the BufferOffset of the load instruction emitted.
846 BufferOffset movePatchablePtr(ImmWord ptr, Register dest);
847 BufferOffset movePatchablePtr(ImmPtr ptr, Register dest);
848
loadPtr(wasm::SymbolicAddress address,Register dest)849 void loadPtr(wasm::SymbolicAddress address, Register dest) {
850 vixl::UseScratchRegisterScope temps(this);
851 const ARMRegister scratch = temps.AcquireX();
852 movePtr(address, scratch.asUnsized());
853 Ldr(ARMRegister(dest, 64), MemOperand(scratch));
854 }
loadPtr(AbsoluteAddress address,Register dest)855 void loadPtr(AbsoluteAddress address, Register dest) {
856 vixl::UseScratchRegisterScope temps(this);
857 const ARMRegister scratch = temps.AcquireX();
858 movePtr(ImmWord((uintptr_t)address.addr), scratch.asUnsized());
859 Ldr(ARMRegister(dest, 64), MemOperand(scratch));
860 }
loadPtr(const Address & address,Register dest)861 void loadPtr(const Address& address, Register dest) {
862 Ldr(ARMRegister(dest, 64), MemOperand(address));
863 }
loadPtr(const BaseIndex & src,Register dest)864 void loadPtr(const BaseIndex& src, Register dest) {
865 ARMRegister base = toARMRegister(src.base, 64);
866 uint32_t scale = Imm32::ShiftOf(src.scale).value;
867 ARMRegister dest64(dest, 64);
868 ARMRegister index64(src.index, 64);
869
870 if (src.offset) {
871 vixl::UseScratchRegisterScope temps(this);
872 const ARMRegister scratch = temps.AcquireX();
873 MOZ_ASSERT(!scratch.Is(base));
874 MOZ_ASSERT(!scratch.Is(dest64));
875 MOZ_ASSERT(!scratch.Is(index64));
876
877 Add(scratch, base, Operand(int64_t(src.offset)));
878 Ldr(dest64, MemOperand(scratch, index64, vixl::LSL, scale));
879 return;
880 }
881
882 Ldr(dest64, MemOperand(base, index64, vixl::LSL, scale));
883 }
884 void loadPrivate(const Address& src, Register dest);
885
store8(Register src,const Address & address)886 void store8(Register src, const Address& address) {
887 Strb(ARMRegister(src, 32), toMemOperand(address));
888 }
store8(Imm32 imm,const Address & address)889 void store8(Imm32 imm, const Address& address) {
890 vixl::UseScratchRegisterScope temps(this);
891 const ARMRegister scratch32 = temps.AcquireW();
892 MOZ_ASSERT(scratch32.asUnsized() != address.base);
893 move32(imm, scratch32.asUnsized());
894 Strb(scratch32, toMemOperand(address));
895 }
store8(Register src,const BaseIndex & address)896 void store8(Register src, const BaseIndex& address) {
897 doBaseIndex(ARMRegister(src, 32), address, vixl::STRB_w);
898 }
store8(Imm32 imm,const BaseIndex & address)899 void store8(Imm32 imm, const BaseIndex& address) {
900 vixl::UseScratchRegisterScope temps(this);
901 const ARMRegister scratch32 = temps.AcquireW();
902 MOZ_ASSERT(scratch32.asUnsized() != address.base);
903 MOZ_ASSERT(scratch32.asUnsized() != address.index);
904 Mov(scratch32, Operand(imm.value));
905 doBaseIndex(scratch32, address, vixl::STRB_w);
906 }
907
store16(Register src,const Address & address)908 void store16(Register src, const Address& address) {
909 Strh(ARMRegister(src, 32), toMemOperand(address));
910 }
store16(Imm32 imm,const Address & address)911 void store16(Imm32 imm, const Address& address) {
912 vixl::UseScratchRegisterScope temps(this);
913 const ARMRegister scratch32 = temps.AcquireW();
914 MOZ_ASSERT(scratch32.asUnsized() != address.base);
915 move32(imm, scratch32.asUnsized());
916 Strh(scratch32, toMemOperand(address));
917 }
store16(Register src,const BaseIndex & address)918 void store16(Register src, const BaseIndex& address) {
919 doBaseIndex(ARMRegister(src, 32), address, vixl::STRH_w);
920 }
store16(Imm32 imm,const BaseIndex & address)921 void store16(Imm32 imm, const BaseIndex& address) {
922 vixl::UseScratchRegisterScope temps(this);
923 const ARMRegister scratch32 = temps.AcquireW();
924 MOZ_ASSERT(scratch32.asUnsized() != address.base);
925 MOZ_ASSERT(scratch32.asUnsized() != address.index);
926 Mov(scratch32, Operand(imm.value));
927 doBaseIndex(scratch32, address, vixl::STRH_w);
928 }
929 template <typename S, typename T>
store16Unaligned(const S & src,const T & dest)930 void store16Unaligned(const S& src, const T& dest) {
931 store16(src, dest);
932 }
933
storePtr(ImmWord imm,const Address & address)934 void storePtr(ImmWord imm, const Address& address) {
935 vixl::UseScratchRegisterScope temps(this);
936 const Register scratch = temps.AcquireX().asUnsized();
937 MOZ_ASSERT(scratch != address.base);
938 movePtr(imm, scratch);
939 storePtr(scratch, address);
940 }
storePtr(ImmPtr imm,const Address & address)941 void storePtr(ImmPtr imm, const Address& address) {
942 vixl::UseScratchRegisterScope temps(this);
943 const ARMRegister scratch64 = temps.AcquireX();
944 MOZ_ASSERT(scratch64.asUnsized() != address.base);
945 Mov(scratch64, uint64_t(imm.value));
946 Str(scratch64, toMemOperand(address));
947 }
storePtr(ImmGCPtr imm,const Address & address)948 void storePtr(ImmGCPtr imm, const Address& address) {
949 vixl::UseScratchRegisterScope temps(this);
950 const Register scratch = temps.AcquireX().asUnsized();
951 MOZ_ASSERT(scratch != address.base);
952 movePtr(imm, scratch);
953 storePtr(scratch, address);
954 }
storePtr(Register src,const Address & address)955 void storePtr(Register src, const Address& address) {
956 Str(ARMRegister(src, 64), toMemOperand(address));
957 }
958
storePtr(ImmWord imm,const BaseIndex & address)959 void storePtr(ImmWord imm, const BaseIndex& address) {
960 vixl::UseScratchRegisterScope temps(this);
961 const ARMRegister scratch64 = temps.AcquireX();
962 MOZ_ASSERT(scratch64.asUnsized() != address.base);
963 MOZ_ASSERT(scratch64.asUnsized() != address.index);
964 Mov(scratch64, Operand(imm.value));
965 doBaseIndex(scratch64, address, vixl::STR_x);
966 }
storePtr(ImmGCPtr imm,const BaseIndex & address)967 void storePtr(ImmGCPtr imm, const BaseIndex& address) {
968 vixl::UseScratchRegisterScope temps(this);
969 const Register scratch = temps.AcquireX().asUnsized();
970 MOZ_ASSERT(scratch != address.base);
971 MOZ_ASSERT(scratch != address.index);
972 movePtr(imm, scratch);
973 doBaseIndex(ARMRegister(scratch, 64), address, vixl::STR_x);
974 }
storePtr(Register src,const BaseIndex & address)975 void storePtr(Register src, const BaseIndex& address) {
976 doBaseIndex(ARMRegister(src, 64), address, vixl::STR_x);
977 }
978
storePtr(Register src,AbsoluteAddress address)979 void storePtr(Register src, AbsoluteAddress address) {
980 vixl::UseScratchRegisterScope temps(this);
981 const ARMRegister scratch64 = temps.AcquireX();
982 Mov(scratch64, uint64_t(address.addr));
983 Str(ARMRegister(src, 64), MemOperand(scratch64));
984 }
985
store32(Register src,AbsoluteAddress address)986 void store32(Register src, AbsoluteAddress address) {
987 vixl::UseScratchRegisterScope temps(this);
988 const ARMRegister scratch64 = temps.AcquireX();
989 Mov(scratch64, uint64_t(address.addr));
990 Str(ARMRegister(src, 32), MemOperand(scratch64));
991 }
store32(Imm32 imm,const Address & address)992 void store32(Imm32 imm, const Address& address) {
993 vixl::UseScratchRegisterScope temps(this);
994 const ARMRegister scratch32 = temps.AcquireW();
995 MOZ_ASSERT(scratch32.asUnsized() != address.base);
996 Mov(scratch32, uint64_t(imm.value));
997 Str(scratch32, toMemOperand(address));
998 }
store32(Register r,const Address & address)999 void store32(Register r, const Address& address) {
1000 Str(ARMRegister(r, 32), toMemOperand(address));
1001 }
store32(Imm32 imm,const BaseIndex & address)1002 void store32(Imm32 imm, const BaseIndex& address) {
1003 vixl::UseScratchRegisterScope temps(this);
1004 const ARMRegister scratch32 = temps.AcquireW();
1005 MOZ_ASSERT(scratch32.asUnsized() != address.base);
1006 MOZ_ASSERT(scratch32.asUnsized() != address.index);
1007 Mov(scratch32, imm.value);
1008 doBaseIndex(scratch32, address, vixl::STR_w);
1009 }
store32(Register r,const BaseIndex & address)1010 void store32(Register r, const BaseIndex& address) {
1011 doBaseIndex(ARMRegister(r, 32), address, vixl::STR_w);
1012 }
1013
store32_NoSecondScratch(Imm32 imm,const Address & address)1014 void store32_NoSecondScratch(Imm32 imm, const Address& address) {
1015 vixl::UseScratchRegisterScope temps(this);
1016 temps.Exclude(ARMRegister(ScratchReg2, 32)); // Disallow ScratchReg2.
1017 const ARMRegister scratch32 = temps.AcquireW();
1018
1019 MOZ_ASSERT(scratch32.asUnsized() != address.base);
1020 Mov(scratch32, uint64_t(imm.value));
1021 Str(scratch32, toMemOperand(address));
1022 }
1023
1024 template <typename S, typename T>
store32Unaligned(const S & src,const T & dest)1025 void store32Unaligned(const S& src, const T& dest) {
1026 store32(src, dest);
1027 }
1028
store64(Register64 src,Address address)1029 void store64(Register64 src, Address address) { storePtr(src.reg, address); }
1030
store64(Register64 src,const BaseIndex & address)1031 void store64(Register64 src, const BaseIndex& address) {
1032 storePtr(src.reg, address);
1033 }
1034
store64(Imm64 imm,const BaseIndex & address)1035 void store64(Imm64 imm, const BaseIndex& address) {
1036 storePtr(ImmWord(imm.value), address);
1037 }
1038
store64(Imm64 imm,const Address & address)1039 void store64(Imm64 imm, const Address& address) {
1040 storePtr(ImmWord(imm.value), address);
1041 }
1042
1043 template <typename S, typename T>
store64Unaligned(const S & src,const T & dest)1044 void store64Unaligned(const S& src, const T& dest) {
1045 store64(src, dest);
1046 }
1047
1048 // StackPointer manipulation.
1049 inline void addToStackPtr(Register src);
1050 inline void addToStackPtr(Imm32 imm);
1051 inline void addToStackPtr(const Address& src);
1052 inline void addStackPtrTo(Register dest);
1053
1054 inline void subFromStackPtr(Register src);
1055 inline void subFromStackPtr(Imm32 imm);
1056 inline void subStackPtrFrom(Register dest);
1057
1058 inline void andToStackPtr(Imm32 t);
1059
1060 inline void moveToStackPtr(Register src);
1061 inline void moveStackPtrTo(Register dest);
1062
1063 inline void loadStackPtr(const Address& src);
1064 inline void storeStackPtr(const Address& dest);
1065
1066 // StackPointer testing functions.
1067 inline void branchTestStackPtr(Condition cond, Imm32 rhs, Label* label);
1068 inline void branchStackPtr(Condition cond, Register rhs, Label* label);
1069 inline void branchStackPtrRhs(Condition cond, Address lhs, Label* label);
1070 inline void branchStackPtrRhs(Condition cond, AbsoluteAddress lhs,
1071 Label* label);
1072
testPtr(Register lhs,Register rhs)1073 void testPtr(Register lhs, Register rhs) {
1074 Tst(ARMRegister(lhs, 64), Operand(ARMRegister(rhs, 64)));
1075 }
test32(Register lhs,Register rhs)1076 void test32(Register lhs, Register rhs) {
1077 Tst(ARMRegister(lhs, 32), Operand(ARMRegister(rhs, 32)));
1078 }
test32(const Address & addr,Imm32 imm)1079 void test32(const Address& addr, Imm32 imm) {
1080 vixl::UseScratchRegisterScope temps(this);
1081 const ARMRegister scratch32 = temps.AcquireW();
1082 MOZ_ASSERT(scratch32.asUnsized() != addr.base);
1083 load32(addr, scratch32.asUnsized());
1084 Tst(scratch32, Operand(imm.value));
1085 }
test32(Register lhs,Imm32 rhs)1086 void test32(Register lhs, Imm32 rhs) {
1087 Tst(ARMRegister(lhs, 32), Operand(rhs.value));
1088 }
cmp32(Register lhs,Imm32 rhs)1089 void cmp32(Register lhs, Imm32 rhs) {
1090 Cmp(ARMRegister(lhs, 32), Operand(rhs.value));
1091 }
cmp32(Register a,Register b)1092 void cmp32(Register a, Register b) {
1093 Cmp(ARMRegister(a, 32), Operand(ARMRegister(b, 32)));
1094 }
cmp32(const Address & lhs,Imm32 rhs)1095 void cmp32(const Address& lhs, Imm32 rhs) {
1096 vixl::UseScratchRegisterScope temps(this);
1097 const ARMRegister scratch32 = temps.AcquireW();
1098 MOZ_ASSERT(scratch32.asUnsized() != lhs.base);
1099 Ldr(scratch32, toMemOperand(lhs));
1100 Cmp(scratch32, Operand(rhs.value));
1101 }
cmp32(const Address & lhs,Register rhs)1102 void cmp32(const Address& lhs, Register rhs) {
1103 vixl::UseScratchRegisterScope temps(this);
1104 const ARMRegister scratch32 = temps.AcquireW();
1105 MOZ_ASSERT(scratch32.asUnsized() != lhs.base);
1106 MOZ_ASSERT(scratch32.asUnsized() != rhs);
1107 Ldr(scratch32, toMemOperand(lhs));
1108 Cmp(scratch32, Operand(ARMRegister(rhs, 32)));
1109 }
cmp32(const vixl::Operand & lhs,Imm32 rhs)1110 void cmp32(const vixl::Operand& lhs, Imm32 rhs) {
1111 vixl::UseScratchRegisterScope temps(this);
1112 const ARMRegister scratch32 = temps.AcquireW();
1113 Mov(scratch32, lhs);
1114 Cmp(scratch32, Operand(rhs.value));
1115 }
cmp32(const vixl::Operand & lhs,Register rhs)1116 void cmp32(const vixl::Operand& lhs, Register rhs) {
1117 vixl::UseScratchRegisterScope temps(this);
1118 const ARMRegister scratch32 = temps.AcquireW();
1119 Mov(scratch32, lhs);
1120 Cmp(scratch32, Operand(ARMRegister(rhs, 32)));
1121 }
1122
cmn32(Register lhs,Imm32 rhs)1123 void cmn32(Register lhs, Imm32 rhs) {
1124 Cmn(ARMRegister(lhs, 32), Operand(rhs.value));
1125 }
1126
cmpPtr(Register lhs,Imm32 rhs)1127 void cmpPtr(Register lhs, Imm32 rhs) {
1128 Cmp(ARMRegister(lhs, 64), Operand(rhs.value));
1129 }
cmpPtr(Register lhs,ImmWord rhs)1130 void cmpPtr(Register lhs, ImmWord rhs) {
1131 Cmp(ARMRegister(lhs, 64), Operand(rhs.value));
1132 }
cmpPtr(Register lhs,ImmPtr rhs)1133 void cmpPtr(Register lhs, ImmPtr rhs) {
1134 Cmp(ARMRegister(lhs, 64), Operand(uint64_t(rhs.value)));
1135 }
cmpPtr(Register lhs,Imm64 rhs)1136 void cmpPtr(Register lhs, Imm64 rhs) {
1137 Cmp(ARMRegister(lhs, 64), Operand(uint64_t(rhs.value)));
1138 }
cmpPtr(Register lhs,Register rhs)1139 void cmpPtr(Register lhs, Register rhs) {
1140 Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64));
1141 }
cmpPtr(Register lhs,ImmGCPtr rhs)1142 void cmpPtr(Register lhs, ImmGCPtr rhs) {
1143 vixl::UseScratchRegisterScope temps(this);
1144 const Register scratch = temps.AcquireX().asUnsized();
1145 MOZ_ASSERT(scratch != lhs);
1146 movePtr(rhs, scratch);
1147 cmpPtr(lhs, scratch);
1148 }
1149
cmpPtr(const Address & lhs,Register rhs)1150 void cmpPtr(const Address& lhs, Register rhs) {
1151 vixl::UseScratchRegisterScope temps(this);
1152 const ARMRegister scratch64 = temps.AcquireX();
1153 MOZ_ASSERT(scratch64.asUnsized() != lhs.base);
1154 MOZ_ASSERT(scratch64.asUnsized() != rhs);
1155 Ldr(scratch64, toMemOperand(lhs));
1156 Cmp(scratch64, Operand(ARMRegister(rhs, 64)));
1157 }
cmpPtr(const Address & lhs,ImmWord rhs)1158 void cmpPtr(const Address& lhs, ImmWord rhs) {
1159 vixl::UseScratchRegisterScope temps(this);
1160 const ARMRegister scratch64 = temps.AcquireX();
1161 MOZ_ASSERT(scratch64.asUnsized() != lhs.base);
1162 Ldr(scratch64, toMemOperand(lhs));
1163 Cmp(scratch64, Operand(rhs.value));
1164 }
cmpPtr(const Address & lhs,ImmPtr rhs)1165 void cmpPtr(const Address& lhs, ImmPtr rhs) {
1166 vixl::UseScratchRegisterScope temps(this);
1167 const ARMRegister scratch64 = temps.AcquireX();
1168 MOZ_ASSERT(scratch64.asUnsized() != lhs.base);
1169 Ldr(scratch64, toMemOperand(lhs));
1170 Cmp(scratch64, Operand(uint64_t(rhs.value)));
1171 }
cmpPtr(const Address & lhs,ImmGCPtr rhs)1172 void cmpPtr(const Address& lhs, ImmGCPtr rhs) {
1173 vixl::UseScratchRegisterScope temps(this);
1174 const Register scratch = temps.AcquireX().asUnsized();
1175 MOZ_ASSERT(scratch != lhs.base);
1176 loadPtr(lhs, scratch);
1177 cmpPtr(scratch, rhs);
1178 }
1179
loadDouble(const Address & src,FloatRegister dest)1180 void loadDouble(const Address& src, FloatRegister dest) {
1181 Ldr(ARMFPRegister(dest, 64), MemOperand(src));
1182 }
loadDouble(const BaseIndex & src,FloatRegister dest)1183 void loadDouble(const BaseIndex& src, FloatRegister dest) {
1184 ARMRegister base = toARMRegister(src.base, 64);
1185 ARMRegister index(src.index, 64);
1186
1187 if (src.offset == 0) {
1188 Ldr(ARMFPRegister(dest, 64),
1189 MemOperand(base, index, vixl::LSL, unsigned(src.scale)));
1190 return;
1191 }
1192
1193 vixl::UseScratchRegisterScope temps(this);
1194 const ARMRegister scratch64 = temps.AcquireX();
1195 MOZ_ASSERT(scratch64.asUnsized() != src.base);
1196 MOZ_ASSERT(scratch64.asUnsized() != src.index);
1197
1198 Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale)));
1199 Ldr(ARMFPRegister(dest, 64), MemOperand(scratch64, src.offset));
1200 }
loadFloatAsDouble(const Address & addr,FloatRegister dest)1201 void loadFloatAsDouble(const Address& addr, FloatRegister dest) {
1202 Ldr(ARMFPRegister(dest, 32), toMemOperand(addr));
1203 fcvt(ARMFPRegister(dest, 64), ARMFPRegister(dest, 32));
1204 }
loadFloatAsDouble(const BaseIndex & src,FloatRegister dest)1205 void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest) {
1206 ARMRegister base = toARMRegister(src.base, 64);
1207 ARMRegister index(src.index, 64);
1208 if (src.offset == 0) {
1209 Ldr(ARMFPRegister(dest, 32),
1210 MemOperand(base, index, vixl::LSL, unsigned(src.scale)));
1211 } else {
1212 vixl::UseScratchRegisterScope temps(this);
1213 const ARMRegister scratch64 = temps.AcquireX();
1214 MOZ_ASSERT(scratch64.asUnsized() != src.base);
1215 MOZ_ASSERT(scratch64.asUnsized() != src.index);
1216
1217 Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale)));
1218 Ldr(ARMFPRegister(dest, 32), MemOperand(scratch64, src.offset));
1219 }
1220 fcvt(ARMFPRegister(dest, 64), ARMFPRegister(dest, 32));
1221 }
1222
loadFloat32(const Address & addr,FloatRegister dest)1223 void loadFloat32(const Address& addr, FloatRegister dest) {
1224 Ldr(ARMFPRegister(dest, 32), toMemOperand(addr));
1225 }
loadFloat32(const BaseIndex & src,FloatRegister dest)1226 void loadFloat32(const BaseIndex& src, FloatRegister dest) {
1227 ARMRegister base = toARMRegister(src.base, 64);
1228 ARMRegister index(src.index, 64);
1229 if (src.offset == 0) {
1230 Ldr(ARMFPRegister(dest, 32),
1231 MemOperand(base, index, vixl::LSL, unsigned(src.scale)));
1232 } else {
1233 vixl::UseScratchRegisterScope temps(this);
1234 const ARMRegister scratch64 = temps.AcquireX();
1235 MOZ_ASSERT(scratch64.asUnsized() != src.base);
1236 MOZ_ASSERT(scratch64.asUnsized() != src.index);
1237
1238 Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale)));
1239 Ldr(ARMFPRegister(dest, 32), MemOperand(scratch64, src.offset));
1240 }
1241 }
1242
moveDouble(FloatRegister src,FloatRegister dest)1243 void moveDouble(FloatRegister src, FloatRegister dest) {
1244 fmov(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
1245 }
zeroDouble(FloatRegister reg)1246 void zeroDouble(FloatRegister reg) {
1247 fmov(ARMFPRegister(reg, 64), vixl::xzr);
1248 }
zeroFloat32(FloatRegister reg)1249 void zeroFloat32(FloatRegister reg) {
1250 fmov(ARMFPRegister(reg, 32), vixl::wzr);
1251 }
1252
moveFloat32(FloatRegister src,FloatRegister dest)1253 void moveFloat32(FloatRegister src, FloatRegister dest) {
1254 fmov(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
1255 }
moveFloatAsDouble(Register src,FloatRegister dest)1256 void moveFloatAsDouble(Register src, FloatRegister dest) {
1257 MOZ_CRASH("moveFloatAsDouble");
1258 }
1259
moveSimd128(FloatRegister src,FloatRegister dest)1260 void moveSimd128(FloatRegister src, FloatRegister dest) {
1261 fmov(ARMFPRegister(dest, 128), ARMFPRegister(src, 128));
1262 }
1263
splitSignExtTag(const ValueOperand & operand,Register dest)1264 void splitSignExtTag(const ValueOperand& operand, Register dest) {
1265 splitSignExtTag(operand.valueReg(), dest);
1266 }
splitSignExtTag(const Address & operand,Register dest)1267 void splitSignExtTag(const Address& operand, Register dest) {
1268 loadPtr(operand, dest);
1269 splitSignExtTag(dest, dest);
1270 }
splitSignExtTag(const BaseIndex & operand,Register dest)1271 void splitSignExtTag(const BaseIndex& operand, Register dest) {
1272 loadPtr(operand, dest);
1273 splitSignExtTag(dest, dest);
1274 }
1275
1276 // Extracts the tag of a value and places it in tag
1277 inline void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag);
cmpTag(const ValueOperand & operand,ImmTag tag)1278 void cmpTag(const ValueOperand& operand, ImmTag tag) { MOZ_CRASH("cmpTag"); }
1279
load32(const Address & address,Register dest)1280 void load32(const Address& address, Register dest) {
1281 Ldr(ARMRegister(dest, 32), toMemOperand(address));
1282 }
load32(const BaseIndex & src,Register dest)1283 void load32(const BaseIndex& src, Register dest) {
1284 doBaseIndex(ARMRegister(dest, 32), src, vixl::LDR_w);
1285 }
load32(AbsoluteAddress address,Register dest)1286 void load32(AbsoluteAddress address, Register dest) {
1287 vixl::UseScratchRegisterScope temps(this);
1288 const ARMRegister scratch64 = temps.AcquireX();
1289 movePtr(ImmWord((uintptr_t)address.addr), scratch64.asUnsized());
1290 ldr(ARMRegister(dest, 32), MemOperand(scratch64));
1291 }
1292 template <typename S>
load32Unaligned(const S & src,Register dest)1293 void load32Unaligned(const S& src, Register dest) {
1294 load32(src, dest);
1295 }
load64(const Address & address,Register64 dest)1296 void load64(const Address& address, Register64 dest) {
1297 loadPtr(address, dest.reg);
1298 }
load64(const BaseIndex & address,Register64 dest)1299 void load64(const BaseIndex& address, Register64 dest) {
1300 loadPtr(address, dest.reg);
1301 }
1302 template <typename S>
load64Unaligned(const S & src,Register64 dest)1303 void load64Unaligned(const S& src, Register64 dest) {
1304 load64(src, dest);
1305 }
1306
load8SignExtend(const Address & address,Register dest)1307 void load8SignExtend(const Address& address, Register dest) {
1308 Ldrsb(ARMRegister(dest, 32), toMemOperand(address));
1309 }
load8SignExtend(const BaseIndex & src,Register dest)1310 void load8SignExtend(const BaseIndex& src, Register dest) {
1311 doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRSB_w);
1312 }
1313
load8ZeroExtend(const Address & address,Register dest)1314 void load8ZeroExtend(const Address& address, Register dest) {
1315 Ldrb(ARMRegister(dest, 32), toMemOperand(address));
1316 }
load8ZeroExtend(const BaseIndex & src,Register dest)1317 void load8ZeroExtend(const BaseIndex& src, Register dest) {
1318 doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRB_w);
1319 }
1320
load16SignExtend(const Address & address,Register dest)1321 void load16SignExtend(const Address& address, Register dest) {
1322 Ldrsh(ARMRegister(dest, 32), toMemOperand(address));
1323 }
load16SignExtend(const BaseIndex & src,Register dest)1324 void load16SignExtend(const BaseIndex& src, Register dest) {
1325 doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRSH_w);
1326 }
1327 template <typename S>
load16UnalignedSignExtend(const S & src,Register dest)1328 void load16UnalignedSignExtend(const S& src, Register dest) {
1329 load16SignExtend(src, dest);
1330 }
1331
load16ZeroExtend(const Address & address,Register dest)1332 void load16ZeroExtend(const Address& address, Register dest) {
1333 Ldrh(ARMRegister(dest, 32), toMemOperand(address));
1334 }
load16ZeroExtend(const BaseIndex & src,Register dest)1335 void load16ZeroExtend(const BaseIndex& src, Register dest) {
1336 doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRH_w);
1337 }
1338 template <typename S>
load16UnalignedZeroExtend(const S & src,Register dest)1339 void load16UnalignedZeroExtend(const S& src, Register dest) {
1340 load16ZeroExtend(src, dest);
1341 }
1342
adds32(Register src,Register dest)1343 void adds32(Register src, Register dest) {
1344 Adds(ARMRegister(dest, 32), ARMRegister(dest, 32),
1345 Operand(ARMRegister(src, 32)));
1346 }
adds32(Imm32 imm,Register dest)1347 void adds32(Imm32 imm, Register dest) {
1348 Adds(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
1349 }
adds32(Imm32 imm,const Address & dest)1350 void adds32(Imm32 imm, const Address& dest) {
1351 vixl::UseScratchRegisterScope temps(this);
1352 const ARMRegister scratch32 = temps.AcquireW();
1353 MOZ_ASSERT(scratch32.asUnsized() != dest.base);
1354
1355 Ldr(scratch32, toMemOperand(dest));
1356 Adds(scratch32, scratch32, Operand(imm.value));
1357 Str(scratch32, toMemOperand(dest));
1358 }
adds64(Imm32 imm,Register dest)1359 void adds64(Imm32 imm, Register dest) {
1360 Adds(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
1361 }
adds64(ImmWord imm,Register dest)1362 void adds64(ImmWord imm, Register dest) {
1363 Adds(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
1364 }
adds64(Register src,Register dest)1365 void adds64(Register src, Register dest) {
1366 Adds(ARMRegister(dest, 64), ARMRegister(dest, 64),
1367 Operand(ARMRegister(src, 64)));
1368 }
1369
subs32(Imm32 imm,Register dest)1370 void subs32(Imm32 imm, Register dest) {
1371 Subs(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
1372 }
subs32(Register src,Register dest)1373 void subs32(Register src, Register dest) {
1374 Subs(ARMRegister(dest, 32), ARMRegister(dest, 32),
1375 Operand(ARMRegister(src, 32)));
1376 }
subs64(Imm32 imm,Register dest)1377 void subs64(Imm32 imm, Register dest) {
1378 Subs(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
1379 }
subs64(Register src,Register dest)1380 void subs64(Register src, Register dest) {
1381 Subs(ARMRegister(dest, 64), ARMRegister(dest, 64),
1382 Operand(ARMRegister(src, 64)));
1383 }
1384
ret()1385 void ret() {
1386 pop(lr);
1387 abiret();
1388 }
1389
retn(Imm32 n)1390 void retn(Imm32 n) {
1391 vixl::UseScratchRegisterScope temps(this);
1392 MOZ_ASSERT(temps.IsAvailable(ScratchReg64)); // ip0
1393 temps.Exclude(ScratchReg64);
1394 // ip0 <- [sp]; sp += n; ret ip0
1395 Ldr(ScratchReg64,
1396 MemOperand(GetStackPointer64(), ptrdiff_t(n.value), vixl::PostIndex));
1397 syncStackPtr(); // SP is always used to transmit the stack between calls.
1398 Ret(ScratchReg64);
1399 }
1400
j(Condition cond,Label * dest)1401 void j(Condition cond, Label* dest) { B(dest, cond); }
1402
branch(Condition cond,Label * label)1403 void branch(Condition cond, Label* label) { B(label, cond); }
branch(JitCode * target)1404 void branch(JitCode* target) {
1405 // It is unclear why this sync is necessary:
1406 // * PSP and SP have been observed to be different in testcase
1407 // tests/async/debugger-reject-after-fulfill.js
1408 // * Removing the sync causes no failures in all of jit-tests.
1409 //
1410 // Also see jump() above. This is used only to implement jump(JitCode*)
1411 // and only for JS, it appears.
1412 syncStackPtr();
1413 BufferOffset loc =
1414 b(-1,
1415 LabelDoc()); // The jump target will be patched by executableCopy().
1416 addPendingJump(loc, ImmPtr(target->raw()), RelocationKind::JITCODE);
1417 }
1418
compareDouble(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs)1419 void compareDouble(DoubleCondition cond, FloatRegister lhs,
1420 FloatRegister rhs) {
1421 Fcmp(ARMFPRegister(lhs, 64), ARMFPRegister(rhs, 64));
1422 }
1423
compareFloat(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs)1424 void compareFloat(DoubleCondition cond, FloatRegister lhs,
1425 FloatRegister rhs) {
1426 Fcmp(ARMFPRegister(lhs, 32), ARMFPRegister(rhs, 32));
1427 }
1428
1429 void compareSimd128Int(Assembler::Condition cond, ARMFPRegister dest,
1430 ARMFPRegister lhs, ARMFPRegister rhs);
1431 void compareSimd128Float(Assembler::Condition cond, ARMFPRegister dest,
1432 ARMFPRegister lhs, ARMFPRegister rhs);
1433 void rightShiftInt8x16(FloatRegister lhs, Register rhs, FloatRegister dest,
1434 bool isUnsigned);
1435 void rightShiftInt16x8(FloatRegister lhs, Register rhs, FloatRegister dest,
1436 bool isUnsigned);
1437 void rightShiftInt32x4(FloatRegister lhs, Register rhs, FloatRegister dest,
1438 bool isUnsigned);
1439 void rightShiftInt64x2(FloatRegister lhs, Register rhs, FloatRegister dest,
1440 bool isUnsigned);
1441
branchNegativeZero(FloatRegister reg,Register scratch,Label * label)1442 void branchNegativeZero(FloatRegister reg, Register scratch, Label* label) {
1443 MOZ_CRASH("branchNegativeZero");
1444 }
branchNegativeZeroFloat32(FloatRegister reg,Register scratch,Label * label)1445 void branchNegativeZeroFloat32(FloatRegister reg, Register scratch,
1446 Label* label) {
1447 MOZ_CRASH("branchNegativeZeroFloat32");
1448 }
1449
boxDouble(FloatRegister src,const ValueOperand & dest,FloatRegister)1450 void boxDouble(FloatRegister src, const ValueOperand& dest, FloatRegister) {
1451 Fmov(ARMRegister(dest.valueReg(), 64), ARMFPRegister(src, 64));
1452 }
boxNonDouble(JSValueType type,Register src,const ValueOperand & dest)1453 void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) {
1454 boxValue(type, src, dest.valueReg());
1455 }
1456
1457 // Note that the |dest| register here may be ScratchReg, so we shouldn't use
1458 // it.
unboxInt32(const ValueOperand & src,Register dest)1459 void unboxInt32(const ValueOperand& src, Register dest) {
1460 move32(src.valueReg(), dest);
1461 }
unboxInt32(const Address & src,Register dest)1462 void unboxInt32(const Address& src, Register dest) { load32(src, dest); }
unboxInt32(const BaseIndex & src,Register dest)1463 void unboxInt32(const BaseIndex& src, Register dest) { load32(src, dest); }
1464
1465 template <typename T>
unboxDouble(const T & src,FloatRegister dest)1466 void unboxDouble(const T& src, FloatRegister dest) {
1467 loadDouble(src, dest);
1468 }
unboxDouble(const ValueOperand & src,FloatRegister dest)1469 void unboxDouble(const ValueOperand& src, FloatRegister dest) {
1470 Fmov(ARMFPRegister(dest, 64), ARMRegister(src.valueReg(), 64));
1471 }
1472
unboxArgObjMagic(const ValueOperand & src,Register dest)1473 void unboxArgObjMagic(const ValueOperand& src, Register dest) {
1474 MOZ_CRASH("unboxArgObjMagic");
1475 }
unboxArgObjMagic(const Address & src,Register dest)1476 void unboxArgObjMagic(const Address& src, Register dest) {
1477 MOZ_CRASH("unboxArgObjMagic");
1478 }
1479
unboxBoolean(const ValueOperand & src,Register dest)1480 void unboxBoolean(const ValueOperand& src, Register dest) {
1481 move32(src.valueReg(), dest);
1482 }
unboxBoolean(const Address & src,Register dest)1483 void unboxBoolean(const Address& src, Register dest) { load32(src, dest); }
unboxBoolean(const BaseIndex & src,Register dest)1484 void unboxBoolean(const BaseIndex& src, Register dest) { load32(src, dest); }
1485
unboxMagic(const ValueOperand & src,Register dest)1486 void unboxMagic(const ValueOperand& src, Register dest) {
1487 move32(src.valueReg(), dest);
1488 }
unboxNonDouble(const ValueOperand & src,Register dest,JSValueType type)1489 void unboxNonDouble(const ValueOperand& src, Register dest,
1490 JSValueType type) {
1491 unboxNonDouble(src.valueReg(), dest, type);
1492 }
1493
1494 template <typename T>
unboxNonDouble(T src,Register dest,JSValueType type)1495 void unboxNonDouble(T src, Register dest, JSValueType type) {
1496 MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
1497 if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
1498 load32(src, dest);
1499 return;
1500 }
1501 loadPtr(src, dest);
1502 unboxNonDouble(dest, dest, type);
1503 }
1504
unboxNonDouble(Register src,Register dest,JSValueType type)1505 void unboxNonDouble(Register src, Register dest, JSValueType type) {
1506 MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
1507 if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
1508 move32(src, dest);
1509 return;
1510 }
1511 Eor(ARMRegister(dest, 64), ARMRegister(src, 64),
1512 Operand(JSVAL_TYPE_TO_SHIFTED_TAG(type)));
1513 }
1514
notBoolean(const ValueOperand & val)1515 void notBoolean(const ValueOperand& val) {
1516 ARMRegister r(val.valueReg(), 64);
1517 eor(r, r, Operand(1));
1518 }
unboxObject(const ValueOperand & src,Register dest)1519 void unboxObject(const ValueOperand& src, Register dest) {
1520 unboxNonDouble(src.valueReg(), dest, JSVAL_TYPE_OBJECT);
1521 }
unboxObject(Register src,Register dest)1522 void unboxObject(Register src, Register dest) {
1523 unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
1524 }
unboxObject(const Address & src,Register dest)1525 void unboxObject(const Address& src, Register dest) {
1526 loadPtr(src, dest);
1527 unboxNonDouble(dest, dest, JSVAL_TYPE_OBJECT);
1528 }
unboxObject(const BaseIndex & src,Register dest)1529 void unboxObject(const BaseIndex& src, Register dest) {
1530 doBaseIndex(ARMRegister(dest, 64), src, vixl::LDR_x);
1531 unboxNonDouble(dest, dest, JSVAL_TYPE_OBJECT);
1532 }
1533
1534 template <typename T>
unboxObjectOrNull(const T & src,Register dest)1535 void unboxObjectOrNull(const T& src, Register dest) {
1536 unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
1537 And(ARMRegister(dest, 64), ARMRegister(dest, 64),
1538 Operand(~JS::detail::ValueObjectOrNullBit));
1539 }
1540
1541 // See comment in MacroAssembler-x64.h.
unboxGCThingForGCBarrier(const Address & src,Register dest)1542 void unboxGCThingForGCBarrier(const Address& src, Register dest) {
1543 loadPtr(src, dest);
1544 And(ARMRegister(dest, 64), ARMRegister(dest, 64),
1545 Operand(JS::detail::ValueGCThingPayloadMask));
1546 }
unboxGCThingForGCBarrier(const ValueOperand & src,Register dest)1547 void unboxGCThingForGCBarrier(const ValueOperand& src, Register dest) {
1548 And(ARMRegister(dest, 64), ARMRegister(src.valueReg(), 64),
1549 Operand(JS::detail::ValueGCThingPayloadMask));
1550 }
1551
1552 inline void unboxValue(const ValueOperand& src, AnyRegister dest,
1553 JSValueType type);
1554
unboxString(const ValueOperand & operand,Register dest)1555 void unboxString(const ValueOperand& operand, Register dest) {
1556 unboxNonDouble(operand, dest, JSVAL_TYPE_STRING);
1557 }
unboxString(const Address & src,Register dest)1558 void unboxString(const Address& src, Register dest) {
1559 unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
1560 }
unboxSymbol(const ValueOperand & operand,Register dest)1561 void unboxSymbol(const ValueOperand& operand, Register dest) {
1562 unboxNonDouble(operand, dest, JSVAL_TYPE_SYMBOL);
1563 }
unboxSymbol(const Address & src,Register dest)1564 void unboxSymbol(const Address& src, Register dest) {
1565 unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
1566 }
unboxBigInt(const ValueOperand & operand,Register dest)1567 void unboxBigInt(const ValueOperand& operand, Register dest) {
1568 unboxNonDouble(operand, dest, JSVAL_TYPE_BIGINT);
1569 }
unboxBigInt(const Address & src,Register dest)1570 void unboxBigInt(const Address& src, Register dest) {
1571 unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
1572 }
1573 // These two functions use the low 32-bits of the full value register.
boolValueToDouble(const ValueOperand & operand,FloatRegister dest)1574 void boolValueToDouble(const ValueOperand& operand, FloatRegister dest) {
1575 convertInt32ToDouble(operand.valueReg(), dest);
1576 }
int32ValueToDouble(const ValueOperand & operand,FloatRegister dest)1577 void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest) {
1578 convertInt32ToDouble(operand.valueReg(), dest);
1579 }
1580
boolValueToFloat32(const ValueOperand & operand,FloatRegister dest)1581 void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
1582 convertInt32ToFloat32(operand.valueReg(), dest);
1583 }
int32ValueToFloat32(const ValueOperand & operand,FloatRegister dest)1584 void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
1585 convertInt32ToFloat32(operand.valueReg(), dest);
1586 }
1587
loadConstantDouble(double d,FloatRegister dest)1588 void loadConstantDouble(double d, FloatRegister dest) {
1589 Fmov(ARMFPRegister(dest, 64), d);
1590 }
loadConstantFloat32(float f,FloatRegister dest)1591 void loadConstantFloat32(float f, FloatRegister dest) {
1592 Fmov(ARMFPRegister(dest, 32), f);
1593 }
1594
cmpTag(Register tag,ImmTag ref)1595 void cmpTag(Register tag, ImmTag ref) {
1596 // As opposed to other architecture, splitTag is replaced by splitSignExtTag
1597 // which extract the tag with a sign extension. The reason being that cmp32
1598 // with a tag value would be too large to fit as a 12 bits immediate value,
1599 // and would require the VIXL macro assembler to add an extra instruction
1600 // and require extra scratch register to load the Tag value.
1601 //
1602 // Instead, we compare with the negative value of the sign extended tag with
1603 // the CMN instruction. The sign extended tag is expected to be a negative
1604 // value. Therefore the negative of the sign extended tag is expected to be
1605 // near 0 and fit on 12 bits.
1606 //
1607 // Ignoring the sign extension, the logic is the following:
1608 //
1609 // CMP32(Reg, Tag) = Reg - Tag
1610 // = Reg + (-Tag)
1611 // = CMN32(Reg, -Tag)
1612 //
1613 // Note: testGCThing, testPrimitive and testNumber which are checking for
1614 // inequalities should use unsigned comparisons (as done by default) in
1615 // order to keep the same relation order after the sign extension, i.e.
1616 // using Above or Below which are based on the carry flag.
1617 uint32_t hiShift = JSVAL_TAG_SHIFT - 32;
1618 int32_t seTag = int32_t(ref.value);
1619 seTag = (seTag << hiShift) >> hiShift;
1620 MOZ_ASSERT(seTag < 0);
1621 int32_t negTag = -seTag;
1622 // Check thest negTag is encoded on a 12 bits immediate value.
1623 MOZ_ASSERT((negTag & ~0xFFF) == 0);
1624 cmn32(tag, Imm32(negTag));
1625 }
1626
1627 // Register-based tests.
testUndefined(Condition cond,Register tag)1628 Condition testUndefined(Condition cond, Register tag) {
1629 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1630 cmpTag(tag, ImmTag(JSVAL_TAG_UNDEFINED));
1631 return cond;
1632 }
testInt32(Condition cond,Register tag)1633 Condition testInt32(Condition cond, Register tag) {
1634 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1635 cmpTag(tag, ImmTag(JSVAL_TAG_INT32));
1636 return cond;
1637 }
testBoolean(Condition cond,Register tag)1638 Condition testBoolean(Condition cond, Register tag) {
1639 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1640 cmpTag(tag, ImmTag(JSVAL_TAG_BOOLEAN));
1641 return cond;
1642 }
testNull(Condition cond,Register tag)1643 Condition testNull(Condition cond, Register tag) {
1644 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1645 cmpTag(tag, ImmTag(JSVAL_TAG_NULL));
1646 return cond;
1647 }
testString(Condition cond,Register tag)1648 Condition testString(Condition cond, Register tag) {
1649 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1650 cmpTag(tag, ImmTag(JSVAL_TAG_STRING));
1651 return cond;
1652 }
testSymbol(Condition cond,Register tag)1653 Condition testSymbol(Condition cond, Register tag) {
1654 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1655 cmpTag(tag, ImmTag(JSVAL_TAG_SYMBOL));
1656 return cond;
1657 }
testBigInt(Condition cond,Register tag)1658 Condition testBigInt(Condition cond, Register tag) {
1659 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1660 cmpTag(tag, ImmTag(JSVAL_TAG_BIGINT));
1661 return cond;
1662 }
testObject(Condition cond,Register tag)1663 Condition testObject(Condition cond, Register tag) {
1664 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1665 cmpTag(tag, ImmTag(JSVAL_TAG_OBJECT));
1666 return cond;
1667 }
testDouble(Condition cond,Register tag)1668 Condition testDouble(Condition cond, Register tag) {
1669 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1670 cmpTag(tag, ImmTag(JSVAL_TAG_MAX_DOUBLE));
1671 // Requires unsigned comparison due to cmpTag internals.
1672 return (cond == Equal) ? BelowOrEqual : Above;
1673 }
testNumber(Condition cond,Register tag)1674 Condition testNumber(Condition cond, Register tag) {
1675 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1676 cmpTag(tag, ImmTag(JS::detail::ValueUpperInclNumberTag));
1677 // Requires unsigned comparison due to cmpTag internals.
1678 return (cond == Equal) ? BelowOrEqual : Above;
1679 }
testGCThing(Condition cond,Register tag)1680 Condition testGCThing(Condition cond, Register tag) {
1681 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1682 cmpTag(tag, ImmTag(JS::detail::ValueLowerInclGCThingTag));
1683 // Requires unsigned comparison due to cmpTag internals.
1684 return (cond == Equal) ? AboveOrEqual : Below;
1685 }
testMagic(Condition cond,Register tag)1686 Condition testMagic(Condition cond, Register tag) {
1687 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1688 cmpTag(tag, ImmTag(JSVAL_TAG_MAGIC));
1689 return cond;
1690 }
testPrimitive(Condition cond,Register tag)1691 Condition testPrimitive(Condition cond, Register tag) {
1692 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1693 cmpTag(tag, ImmTag(JS::detail::ValueUpperExclPrimitiveTag));
1694 // Requires unsigned comparison due to cmpTag internals.
1695 return (cond == Equal) ? Below : AboveOrEqual;
1696 }
testError(Condition cond,Register tag)1697 Condition testError(Condition cond, Register tag) {
1698 return testMagic(cond, tag);
1699 }
1700
1701 // ValueOperand-based tests.
testInt32(Condition cond,const ValueOperand & value)1702 Condition testInt32(Condition cond, const ValueOperand& value) {
1703 // The incoming ValueOperand may use scratch registers.
1704 vixl::UseScratchRegisterScope temps(this);
1705 const Register scratch = temps.AcquireX().asUnsized();
1706 MOZ_ASSERT(scratch != value.valueReg());
1707
1708 splitSignExtTag(value, scratch);
1709 return testInt32(cond, scratch);
1710 }
testBoolean(Condition cond,const ValueOperand & value)1711 Condition testBoolean(Condition cond, const ValueOperand& value) {
1712 vixl::UseScratchRegisterScope temps(this);
1713 const Register scratch = temps.AcquireX().asUnsized();
1714 MOZ_ASSERT(value.valueReg() != scratch);
1715 splitSignExtTag(value, scratch);
1716 return testBoolean(cond, scratch);
1717 }
testDouble(Condition cond,const ValueOperand & value)1718 Condition testDouble(Condition cond, const ValueOperand& value) {
1719 vixl::UseScratchRegisterScope temps(this);
1720 const Register scratch = temps.AcquireX().asUnsized();
1721 MOZ_ASSERT(value.valueReg() != scratch);
1722 splitSignExtTag(value, scratch);
1723 return testDouble(cond, scratch);
1724 }
testNull(Condition cond,const ValueOperand & value)1725 Condition testNull(Condition cond, const ValueOperand& value) {
1726 vixl::UseScratchRegisterScope temps(this);
1727 const Register scratch = temps.AcquireX().asUnsized();
1728 MOZ_ASSERT(value.valueReg() != scratch);
1729 splitSignExtTag(value, scratch);
1730 return testNull(cond, scratch);
1731 }
testUndefined(Condition cond,const ValueOperand & value)1732 Condition testUndefined(Condition cond, const ValueOperand& value) {
1733 vixl::UseScratchRegisterScope temps(this);
1734 const Register scratch = temps.AcquireX().asUnsized();
1735 MOZ_ASSERT(value.valueReg() != scratch);
1736 splitSignExtTag(value, scratch);
1737 return testUndefined(cond, scratch);
1738 }
testString(Condition cond,const ValueOperand & value)1739 Condition testString(Condition cond, const ValueOperand& value) {
1740 vixl::UseScratchRegisterScope temps(this);
1741 const Register scratch = temps.AcquireX().asUnsized();
1742 MOZ_ASSERT(value.valueReg() != scratch);
1743 splitSignExtTag(value, scratch);
1744 return testString(cond, scratch);
1745 }
testSymbol(Condition cond,const ValueOperand & value)1746 Condition testSymbol(Condition cond, const ValueOperand& value) {
1747 vixl::UseScratchRegisterScope temps(this);
1748 const Register scratch = temps.AcquireX().asUnsized();
1749 MOZ_ASSERT(value.valueReg() != scratch);
1750 splitSignExtTag(value, scratch);
1751 return testSymbol(cond, scratch);
1752 }
testBigInt(Condition cond,const ValueOperand & value)1753 Condition testBigInt(Condition cond, const ValueOperand& value) {
1754 vixl::UseScratchRegisterScope temps(this);
1755 const Register scratch = temps.AcquireX().asUnsized();
1756 MOZ_ASSERT(value.valueReg() != scratch);
1757 splitSignExtTag(value, scratch);
1758 return testBigInt(cond, scratch);
1759 }
testObject(Condition cond,const ValueOperand & value)1760 Condition testObject(Condition cond, const ValueOperand& value) {
1761 vixl::UseScratchRegisterScope temps(this);
1762 const Register scratch = temps.AcquireX().asUnsized();
1763 MOZ_ASSERT(value.valueReg() != scratch);
1764 splitSignExtTag(value, scratch);
1765 return testObject(cond, scratch);
1766 }
testNumber(Condition cond,const ValueOperand & value)1767 Condition testNumber(Condition cond, const ValueOperand& value) {
1768 vixl::UseScratchRegisterScope temps(this);
1769 const Register scratch = temps.AcquireX().asUnsized();
1770 MOZ_ASSERT(value.valueReg() != scratch);
1771 splitSignExtTag(value, scratch);
1772 return testNumber(cond, scratch);
1773 }
testPrimitive(Condition cond,const ValueOperand & value)1774 Condition testPrimitive(Condition cond, const ValueOperand& value) {
1775 vixl::UseScratchRegisterScope temps(this);
1776 const Register scratch = temps.AcquireX().asUnsized();
1777 MOZ_ASSERT(value.valueReg() != scratch);
1778 splitSignExtTag(value, scratch);
1779 return testPrimitive(cond, scratch);
1780 }
testMagic(Condition cond,const ValueOperand & value)1781 Condition testMagic(Condition cond, const ValueOperand& value) {
1782 vixl::UseScratchRegisterScope temps(this);
1783 const Register scratch = temps.AcquireX().asUnsized();
1784 MOZ_ASSERT(value.valueReg() != scratch);
1785 splitSignExtTag(value, scratch);
1786 return testMagic(cond, scratch);
1787 }
testGCThing(Condition cond,const ValueOperand & value)1788 Condition testGCThing(Condition cond, const ValueOperand& value) {
1789 vixl::UseScratchRegisterScope temps(this);
1790 const Register scratch = temps.AcquireX().asUnsized();
1791 MOZ_ASSERT(value.valueReg() != scratch);
1792 splitSignExtTag(value, scratch);
1793 return testGCThing(cond, scratch);
1794 }
testError(Condition cond,const ValueOperand & value)1795 Condition testError(Condition cond, const ValueOperand& value) {
1796 return testMagic(cond, value);
1797 }
1798
1799 // Address-based tests.
testGCThing(Condition cond,const Address & address)1800 Condition testGCThing(Condition cond, const Address& address) {
1801 vixl::UseScratchRegisterScope temps(this);
1802 const Register scratch = temps.AcquireX().asUnsized();
1803 MOZ_ASSERT(address.base != scratch);
1804 splitSignExtTag(address, scratch);
1805 return testGCThing(cond, scratch);
1806 }
testMagic(Condition cond,const Address & address)1807 Condition testMagic(Condition cond, const Address& address) {
1808 vixl::UseScratchRegisterScope temps(this);
1809 const Register scratch = temps.AcquireX().asUnsized();
1810 MOZ_ASSERT(address.base != scratch);
1811 splitSignExtTag(address, scratch);
1812 return testMagic(cond, scratch);
1813 }
testInt32(Condition cond,const Address & address)1814 Condition testInt32(Condition cond, const Address& address) {
1815 vixl::UseScratchRegisterScope temps(this);
1816 const Register scratch = temps.AcquireX().asUnsized();
1817 MOZ_ASSERT(address.base != scratch);
1818 splitSignExtTag(address, scratch);
1819 return testInt32(cond, scratch);
1820 }
testDouble(Condition cond,const Address & address)1821 Condition testDouble(Condition cond, const Address& address) {
1822 vixl::UseScratchRegisterScope temps(this);
1823 const Register scratch = temps.AcquireX().asUnsized();
1824 MOZ_ASSERT(address.base != scratch);
1825 splitSignExtTag(address, scratch);
1826 return testDouble(cond, scratch);
1827 }
testBoolean(Condition cond,const Address & address)1828 Condition testBoolean(Condition cond, const Address& address) {
1829 vixl::UseScratchRegisterScope temps(this);
1830 const Register scratch = temps.AcquireX().asUnsized();
1831 MOZ_ASSERT(address.base != scratch);
1832 splitSignExtTag(address, scratch);
1833 return testBoolean(cond, scratch);
1834 }
testNull(Condition cond,const Address & address)1835 Condition testNull(Condition cond, const Address& address) {
1836 vixl::UseScratchRegisterScope temps(this);
1837 const Register scratch = temps.AcquireX().asUnsized();
1838 MOZ_ASSERT(address.base != scratch);
1839 splitSignExtTag(address, scratch);
1840 return testNull(cond, scratch);
1841 }
testUndefined(Condition cond,const Address & address)1842 Condition testUndefined(Condition cond, const Address& address) {
1843 vixl::UseScratchRegisterScope temps(this);
1844 const Register scratch = temps.AcquireX().asUnsized();
1845 MOZ_ASSERT(address.base != scratch);
1846 splitSignExtTag(address, scratch);
1847 return testUndefined(cond, scratch);
1848 }
testString(Condition cond,const Address & address)1849 Condition testString(Condition cond, const Address& address) {
1850 vixl::UseScratchRegisterScope temps(this);
1851 const Register scratch = temps.AcquireX().asUnsized();
1852 MOZ_ASSERT(address.base != scratch);
1853 splitSignExtTag(address, scratch);
1854 return testString(cond, scratch);
1855 }
testSymbol(Condition cond,const Address & address)1856 Condition testSymbol(Condition cond, const Address& address) {
1857 vixl::UseScratchRegisterScope temps(this);
1858 const Register scratch = temps.AcquireX().asUnsized();
1859 MOZ_ASSERT(address.base != scratch);
1860 splitSignExtTag(address, scratch);
1861 return testSymbol(cond, scratch);
1862 }
testBigInt(Condition cond,const Address & address)1863 Condition testBigInt(Condition cond, const Address& address) {
1864 vixl::UseScratchRegisterScope temps(this);
1865 const Register scratch = temps.AcquireX().asUnsized();
1866 MOZ_ASSERT(address.base != scratch);
1867 splitSignExtTag(address, scratch);
1868 return testBigInt(cond, scratch);
1869 }
testObject(Condition cond,const Address & address)1870 Condition testObject(Condition cond, const Address& address) {
1871 vixl::UseScratchRegisterScope temps(this);
1872 const Register scratch = temps.AcquireX().asUnsized();
1873 MOZ_ASSERT(address.base != scratch);
1874 splitSignExtTag(address, scratch);
1875 return testObject(cond, scratch);
1876 }
testNumber(Condition cond,const Address & address)1877 Condition testNumber(Condition cond, const Address& address) {
1878 vixl::UseScratchRegisterScope temps(this);
1879 const Register scratch = temps.AcquireX().asUnsized();
1880 MOZ_ASSERT(address.base != scratch);
1881 splitSignExtTag(address, scratch);
1882 return testNumber(cond, scratch);
1883 }
1884
1885 // BaseIndex-based tests.
testUndefined(Condition cond,const BaseIndex & src)1886 Condition testUndefined(Condition cond, const BaseIndex& src) {
1887 vixl::UseScratchRegisterScope temps(this);
1888 const Register scratch = temps.AcquireX().asUnsized();
1889 MOZ_ASSERT(src.base != scratch);
1890 MOZ_ASSERT(src.index != scratch);
1891 splitSignExtTag(src, scratch);
1892 return testUndefined(cond, scratch);
1893 }
testNull(Condition cond,const BaseIndex & src)1894 Condition testNull(Condition cond, const BaseIndex& src) {
1895 vixl::UseScratchRegisterScope temps(this);
1896 const Register scratch = temps.AcquireX().asUnsized();
1897 MOZ_ASSERT(src.base != scratch);
1898 MOZ_ASSERT(src.index != scratch);
1899 splitSignExtTag(src, scratch);
1900 return testNull(cond, scratch);
1901 }
testBoolean(Condition cond,const BaseIndex & src)1902 Condition testBoolean(Condition cond, const BaseIndex& src) {
1903 vixl::UseScratchRegisterScope temps(this);
1904 const Register scratch = temps.AcquireX().asUnsized();
1905 MOZ_ASSERT(src.base != scratch);
1906 MOZ_ASSERT(src.index != scratch);
1907 splitSignExtTag(src, scratch);
1908 return testBoolean(cond, scratch);
1909 }
testString(Condition cond,const BaseIndex & src)1910 Condition testString(Condition cond, const BaseIndex& src) {
1911 vixl::UseScratchRegisterScope temps(this);
1912 const Register scratch = temps.AcquireX().asUnsized();
1913 MOZ_ASSERT(src.base != scratch);
1914 MOZ_ASSERT(src.index != scratch);
1915 splitSignExtTag(src, scratch);
1916 return testString(cond, scratch);
1917 }
testSymbol(Condition cond,const BaseIndex & src)1918 Condition testSymbol(Condition cond, const BaseIndex& src) {
1919 vixl::UseScratchRegisterScope temps(this);
1920 const Register scratch = temps.AcquireX().asUnsized();
1921 MOZ_ASSERT(src.base != scratch);
1922 MOZ_ASSERT(src.index != scratch);
1923 splitSignExtTag(src, scratch);
1924 return testSymbol(cond, scratch);
1925 }
testBigInt(Condition cond,const BaseIndex & src)1926 Condition testBigInt(Condition cond, const BaseIndex& src) {
1927 vixl::UseScratchRegisterScope temps(this);
1928 const Register scratch = temps.AcquireX().asUnsized();
1929 MOZ_ASSERT(src.base != scratch);
1930 MOZ_ASSERT(src.index != scratch);
1931 splitSignExtTag(src, scratch);
1932 return testBigInt(cond, scratch);
1933 }
testInt32(Condition cond,const BaseIndex & src)1934 Condition testInt32(Condition cond, const BaseIndex& src) {
1935 vixl::UseScratchRegisterScope temps(this);
1936 const Register scratch = temps.AcquireX().asUnsized();
1937 MOZ_ASSERT(src.base != scratch);
1938 MOZ_ASSERT(src.index != scratch);
1939 splitSignExtTag(src, scratch);
1940 return testInt32(cond, scratch);
1941 }
testObject(Condition cond,const BaseIndex & src)1942 Condition testObject(Condition cond, const BaseIndex& src) {
1943 vixl::UseScratchRegisterScope temps(this);
1944 const Register scratch = temps.AcquireX().asUnsized();
1945 MOZ_ASSERT(src.base != scratch);
1946 MOZ_ASSERT(src.index != scratch);
1947 splitSignExtTag(src, scratch);
1948 return testObject(cond, scratch);
1949 }
testDouble(Condition cond,const BaseIndex & src)1950 Condition testDouble(Condition cond, const BaseIndex& src) {
1951 vixl::UseScratchRegisterScope temps(this);
1952 const Register scratch = temps.AcquireX().asUnsized();
1953 MOZ_ASSERT(src.base != scratch);
1954 MOZ_ASSERT(src.index != scratch);
1955 splitSignExtTag(src, scratch);
1956 return testDouble(cond, scratch);
1957 }
testMagic(Condition cond,const BaseIndex & src)1958 Condition testMagic(Condition cond, const BaseIndex& src) {
1959 vixl::UseScratchRegisterScope temps(this);
1960 const Register scratch = temps.AcquireX().asUnsized();
1961 MOZ_ASSERT(src.base != scratch);
1962 MOZ_ASSERT(src.index != scratch);
1963 splitSignExtTag(src, scratch);
1964 return testMagic(cond, scratch);
1965 }
testGCThing(Condition cond,const BaseIndex & src)1966 Condition testGCThing(Condition cond, const BaseIndex& src) {
1967 vixl::UseScratchRegisterScope temps(this);
1968 const Register scratch = temps.AcquireX().asUnsized();
1969 MOZ_ASSERT(src.base != scratch);
1970 MOZ_ASSERT(src.index != scratch);
1971 splitSignExtTag(src, scratch);
1972 return testGCThing(cond, scratch);
1973 }
1974
testInt32Truthy(bool truthy,const ValueOperand & operand)1975 Condition testInt32Truthy(bool truthy, const ValueOperand& operand) {
1976 ARMRegister payload32(operand.valueReg(), 32);
1977 Tst(payload32, payload32);
1978 return truthy ? NonZero : Zero;
1979 }
1980
testBooleanTruthy(bool truthy,const ValueOperand & operand)1981 Condition testBooleanTruthy(bool truthy, const ValueOperand& operand) {
1982 ARMRegister payload32(operand.valueReg(), 32);
1983 Tst(payload32, payload32);
1984 return truthy ? NonZero : Zero;
1985 }
1986
1987 Condition testBigIntTruthy(bool truthy, const ValueOperand& value);
1988 Condition testStringTruthy(bool truthy, const ValueOperand& value);
1989
int32OrDouble(Register src,ARMFPRegister dest)1990 void int32OrDouble(Register src, ARMFPRegister dest) {
1991 Label isInt32;
1992 Label join;
1993 testInt32(Equal, ValueOperand(src));
1994 B(&isInt32, Equal);
1995 // is double, move the bits as is
1996 Fmov(dest, ARMRegister(src, 64));
1997 B(&join);
1998 bind(&isInt32);
1999 // is int32, do a conversion while moving
2000 Scvtf(dest, ARMRegister(src, 32));
2001 bind(&join);
2002 }
loadUnboxedValue(Address address,MIRType type,AnyRegister dest)2003 void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
2004 if (dest.isFloat()) {
2005 vixl::UseScratchRegisterScope temps(this);
2006 const ARMRegister scratch64 = temps.AcquireX();
2007 MOZ_ASSERT(scratch64.asUnsized() != address.base);
2008 Ldr(scratch64, toMemOperand(address));
2009 int32OrDouble(scratch64.asUnsized(), ARMFPRegister(dest.fpu(), 64));
2010 } else {
2011 unboxNonDouble(address, dest.gpr(), ValueTypeFromMIRType(type));
2012 }
2013 }
2014
loadUnboxedValue(BaseIndex address,MIRType type,AnyRegister dest)2015 void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) {
2016 if (dest.isFloat()) {
2017 vixl::UseScratchRegisterScope temps(this);
2018 const ARMRegister scratch64 = temps.AcquireX();
2019 MOZ_ASSERT(scratch64.asUnsized() != address.base);
2020 MOZ_ASSERT(scratch64.asUnsized() != address.index);
2021 doBaseIndex(scratch64, address, vixl::LDR_x);
2022 int32OrDouble(scratch64.asUnsized(), ARMFPRegister(dest.fpu(), 64));
2023 } else {
2024 unboxNonDouble(address, dest.gpr(), ValueTypeFromMIRType(type));
2025 }
2026 }
2027
loadInstructionPointerAfterCall(Register dest)2028 void loadInstructionPointerAfterCall(Register dest) {
2029 MOZ_CRASH("loadInstructionPointerAfterCall");
2030 }
2031
2032 // Emit a B that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp().
toggledJump(Label * label)2033 CodeOffset toggledJump(Label* label) {
2034 BufferOffset offset = b(label, Always);
2035 CodeOffset ret(offset.getOffset());
2036 return ret;
2037 }
2038
2039 // load: offset to the load instruction obtained by movePatchablePtr().
writeDataRelocation(ImmGCPtr ptr,BufferOffset load)2040 void writeDataRelocation(ImmGCPtr ptr, BufferOffset load) {
2041 // Raw GC pointer relocations and Value relocations both end up in
2042 // Assembler::TraceDataRelocations.
2043 if (ptr.value) {
2044 if (gc::IsInsideNursery(ptr.value)) {
2045 embedsNurseryPointers_ = true;
2046 }
2047 dataRelocations_.writeUnsigned(load.getOffset());
2048 }
2049 }
writeDataRelocation(const Value & val,BufferOffset load)2050 void writeDataRelocation(const Value& val, BufferOffset load) {
2051 // Raw GC pointer relocations and Value relocations both end up in
2052 // Assembler::TraceDataRelocations.
2053 if (val.isGCThing()) {
2054 gc::Cell* cell = val.toGCThing();
2055 if (cell && gc::IsInsideNursery(cell)) {
2056 embedsNurseryPointers_ = true;
2057 }
2058 dataRelocations_.writeUnsigned(load.getOffset());
2059 }
2060 }
2061
computeEffectiveAddress(const Address & address,Register dest)2062 void computeEffectiveAddress(const Address& address, Register dest) {
2063 Add(ARMRegister(dest, 64), toARMRegister(address.base, 64),
2064 Operand(address.offset));
2065 }
computeEffectiveAddress(const Address & address,RegisterOrSP dest)2066 void computeEffectiveAddress(const Address& address, RegisterOrSP dest) {
2067 Add(toARMRegister(dest, 64), toARMRegister(address.base, 64),
2068 Operand(address.offset));
2069 }
computeEffectiveAddress(const BaseIndex & address,Register dest)2070 void computeEffectiveAddress(const BaseIndex& address, Register dest) {
2071 ARMRegister dest64(dest, 64);
2072 ARMRegister base64 = toARMRegister(address.base, 64);
2073 ARMRegister index64(address.index, 64);
2074
2075 Add(dest64, base64, Operand(index64, vixl::LSL, address.scale));
2076 if (address.offset) {
2077 Add(dest64, dest64, Operand(address.offset));
2078 }
2079 }
2080
2081 public:
2082 void handleFailureWithHandlerTail(Label* profilerExitTail);
2083
2084 void profilerEnterFrame(Register framePtr, Register scratch);
2085 void profilerEnterFrame(RegisterOrSP framePtr, Register scratch);
2086 void profilerExitFrame();
ToPayload(Address value)2087 Address ToPayload(Address value) { return value; }
ToType(Address value)2088 Address ToType(Address value) { return value; }
2089
2090 void wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register memoryBase,
2091 Register ptr, AnyRegister outany, Register64 out64);
2092 void wasmLoadImpl(const wasm::MemoryAccessDesc& access, MemOperand srcAddr,
2093 AnyRegister outany, Register64 out64);
2094 void wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister valany,
2095 Register64 val64, Register memoryBase, Register ptr);
2096 void wasmStoreImpl(const wasm::MemoryAccessDesc& access, MemOperand destAddr,
2097 AnyRegister valany, Register64 val64);
2098 // The complete address is in `address`, and `access` is used for its type
2099 // attributes only; its `offset` is ignored.
2100 void wasmLoadAbsolute(const wasm::MemoryAccessDesc& access,
2101 Register memoryBase, uint64_t address, AnyRegister out,
2102 Register64 out64);
2103 void wasmStoreAbsolute(const wasm::MemoryAccessDesc& access,
2104 AnyRegister value, Register64 value64,
2105 Register memoryBase, uint64_t address);
2106
2107 // Emit a BLR or NOP instruction. ToggleCall can be used to patch
2108 // this instruction.
toggledCall(JitCode * target,bool enabled)2109 CodeOffset toggledCall(JitCode* target, bool enabled) {
2110 // The returned offset must be to the first instruction generated,
2111 // for the debugger to match offset with Baseline's pcMappingEntries_.
2112 BufferOffset offset = nextOffset();
2113
2114 // It is unclear why this sync is necessary:
2115 // * PSP and SP have been observed to be different in testcase
2116 // tests/cacheir/bug1448136.js
2117 // * Removing the sync causes no failures in all of jit-tests.
2118 syncStackPtr();
2119
2120 BufferOffset loadOffset;
2121 {
2122 vixl::UseScratchRegisterScope temps(this);
2123
2124 // The register used for the load is hardcoded, so that ToggleCall
2125 // can patch in the branch instruction easily. This could be changed,
2126 // but then ToggleCall must read the target register from the load.
2127 MOZ_ASSERT(temps.IsAvailable(ScratchReg2_64));
2128 temps.Exclude(ScratchReg2_64);
2129
2130 loadOffset = immPool64(ScratchReg2_64, uint64_t(target->raw()));
2131
2132 if (enabled) {
2133 blr(ScratchReg2_64);
2134 } else {
2135 nop();
2136 }
2137 }
2138
2139 addPendingJump(loadOffset, ImmPtr(target->raw()), RelocationKind::JITCODE);
2140 CodeOffset ret(offset.getOffset());
2141 return ret;
2142 }
2143
ToggledCallSize(uint8_t * code)2144 static size_t ToggledCallSize(uint8_t* code) {
2145 // The call site is a sequence of two or three instructions:
2146 //
2147 // syncStack (optional)
2148 // ldr/adr
2149 // nop/blr
2150 //
2151 // Flushed constant pools can appear before any of the instructions.
2152
2153 const Instruction* cur = (const Instruction*)code;
2154 cur = cur->skipPool();
2155 if (cur->IsStackPtrSync()) cur = cur->NextInstruction();
2156 cur = cur->skipPool();
2157 cur = cur->NextInstruction(); // LDR/ADR
2158 cur = cur->skipPool();
2159 cur = cur->NextInstruction(); // NOP/BLR
2160 return (uint8_t*)cur - code;
2161 }
2162
checkARMRegAlignment(const ARMRegister & reg)2163 void checkARMRegAlignment(const ARMRegister& reg) {
2164 #ifdef DEBUG
2165 vixl::UseScratchRegisterScope temps(this);
2166 const ARMRegister scratch64 = temps.AcquireX();
2167 MOZ_ASSERT_IF(!reg.IsSP(), scratch64.asUnsized() != reg.asUnsized());
2168 Label aligned;
2169 Mov(scratch64, reg);
2170 Tst(scratch64, Operand(StackAlignment - 1));
2171 B(Zero, &aligned);
2172 breakpoint();
2173 bind(&aligned);
2174 Mov(scratch64, vixl::xzr); // Clear the scratch register for sanity.
2175 #endif
2176 }
2177
checkStackAlignment()2178 void checkStackAlignment() {
2179 #ifdef DEBUG
2180 checkARMRegAlignment(GetStackPointer64());
2181
2182 // If another register is being used to track pushes, check sp explicitly.
2183 if (!GetStackPointer64().Is(vixl::sp)) {
2184 checkARMRegAlignment(vixl::sp);
2185 }
2186 #endif
2187 }
2188
abiret()2189 void abiret() {
2190 syncStackPtr(); // SP is always used to transmit the stack between calls.
2191 vixl::MacroAssembler::Ret(vixl::lr);
2192 }
2193
clampCheck(Register r,Label * handleNotAnInt)2194 void clampCheck(Register r, Label* handleNotAnInt) {
2195 MOZ_CRASH("clampCheck");
2196 }
2197
stackCheck(ImmWord limitAddr,Label * label)2198 void stackCheck(ImmWord limitAddr, Label* label) { MOZ_CRASH("stackCheck"); }
2199
incrementInt32Value(const Address & addr)2200 void incrementInt32Value(const Address& addr) {
2201 vixl::UseScratchRegisterScope temps(this);
2202 const ARMRegister scratch32 = temps.AcquireW();
2203 MOZ_ASSERT(scratch32.asUnsized() != addr.base);
2204
2205 load32(addr, scratch32.asUnsized());
2206 Add(scratch32, scratch32, Operand(1));
2207 store32(scratch32.asUnsized(), addr);
2208 }
2209
2210 void breakpoint();
2211
2212 // Emits a simulator directive to save the current sp on an internal stack.
simulatorMarkSP()2213 void simulatorMarkSP() {
2214 #ifdef JS_SIMULATOR_ARM64
2215 svc(vixl::kMarkStackPointer);
2216 #endif
2217 }
2218
2219 // Emits a simulator directive to pop from its internal stack
2220 // and assert that the value is equal to the current sp.
simulatorCheckSP()2221 void simulatorCheckSP() {
2222 #ifdef JS_SIMULATOR_ARM64
2223 svc(vixl::kCheckStackPointer);
2224 #endif
2225 }
2226
2227 // Overwrites the payload bits of a dest register containing a Value.
movePayload(Register src,Register dest)2228 void movePayload(Register src, Register dest) {
2229 // Bfxil cannot be used with the zero register as a source.
2230 if (src == rzr) {
2231 And(ARMRegister(dest, 64), ARMRegister(dest, 64),
2232 Operand(JS::detail::ValueTagMask));
2233 } else {
2234 Bfxil(ARMRegister(dest, 64), ARMRegister(src, 64), 0, JSVAL_TAG_SHIFT);
2235 }
2236 }
2237
2238 protected:
2239 bool buildOOLFakeExitFrame(void* fakeReturnAddr);
2240 };
2241
2242 // See documentation for ScratchTagScope and ScratchTagScopeRelease in
2243 // MacroAssembler-x64.h.
2244
2245 class ScratchTagScope {
2246 vixl::UseScratchRegisterScope temps_;
2247 ARMRegister scratch64_;
2248 bool owned_;
2249 mozilla::DebugOnly<bool> released_;
2250
2251 public:
ScratchTagScope(MacroAssemblerCompat & masm,const ValueOperand &)2252 ScratchTagScope(MacroAssemblerCompat& masm, const ValueOperand&)
2253 : temps_(&masm), owned_(true), released_(false) {
2254 scratch64_ = temps_.AcquireX();
2255 }
2256
Register()2257 operator Register() {
2258 MOZ_ASSERT(!released_);
2259 return scratch64_.asUnsized();
2260 }
2261
release()2262 void release() {
2263 MOZ_ASSERT(!released_);
2264 released_ = true;
2265 if (owned_) {
2266 temps_.Release(scratch64_);
2267 owned_ = false;
2268 }
2269 }
2270
reacquire()2271 void reacquire() {
2272 MOZ_ASSERT(released_);
2273 released_ = false;
2274 }
2275 };
2276
2277 class ScratchTagScopeRelease {
2278 ScratchTagScope* ts_;
2279
2280 public:
ScratchTagScopeRelease(ScratchTagScope * ts)2281 explicit ScratchTagScopeRelease(ScratchTagScope* ts) : ts_(ts) {
2282 ts_->release();
2283 }
~ScratchTagScopeRelease()2284 ~ScratchTagScopeRelease() { ts_->reacquire(); }
2285 };
2286
splitTagForTest(const ValueOperand & value,ScratchTagScope & tag)2287 inline void MacroAssemblerCompat::splitTagForTest(const ValueOperand& value,
2288 ScratchTagScope& tag) {
2289 splitSignExtTag(value, tag);
2290 }
2291
2292 typedef MacroAssemblerCompat MacroAssemblerSpecific;
2293
2294 } // namespace jit
2295 } // namespace js
2296
2297 #endif // jit_arm64_MacroAssembler_arm64_h
2298