1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #ifndef jit_arm64_MacroAssembler_arm64_inl_h
8 #define jit_arm64_MacroAssembler_arm64_inl_h
9
10 #include "jit/arm64/MacroAssembler-arm64.h"
11
12 namespace js {
13 namespace jit {
14
15 //{{{ check_macroassembler_style
16
move64(Register64 src,Register64 dest)17 void MacroAssembler::move64(Register64 src, Register64 dest) {
18 Mov(ARMRegister(dest.reg, 64), ARMRegister(src.reg, 64));
19 }
20
move64(Imm64 imm,Register64 dest)21 void MacroAssembler::move64(Imm64 imm, Register64 dest) {
22 Mov(ARMRegister(dest.reg, 64), imm.value);
23 }
24
moveFloat32ToGPR(FloatRegister src,Register dest)25 void MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest) {
26 Fmov(ARMRegister(dest, 32), ARMFPRegister(src, 32));
27 }
28
moveGPRToFloat32(Register src,FloatRegister dest)29 void MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest) {
30 Fmov(ARMFPRegister(dest, 32), ARMRegister(src, 32));
31 }
32
move8SignExtend(Register src,Register dest)33 void MacroAssembler::move8SignExtend(Register src, Register dest) {
34 Sxtb(ARMRegister(dest, 32), ARMRegister(src, 32));
35 }
36
move16SignExtend(Register src,Register dest)37 void MacroAssembler::move16SignExtend(Register src, Register dest) {
38 Sxth(ARMRegister(dest, 32), ARMRegister(src, 32));
39 }
40
moveDoubleToGPR64(FloatRegister src,Register64 dest)41 void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
42 Fmov(ARMRegister(dest.reg, 64), ARMFPRegister(src, 64));
43 }
44
moveGPR64ToDouble(Register64 src,FloatRegister dest)45 void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
46 Fmov(ARMFPRegister(dest, 64), ARMRegister(src.reg, 64));
47 }
48
move64To32(Register64 src,Register dest)49 void MacroAssembler::move64To32(Register64 src, Register dest) {
50 Mov(ARMRegister(dest, 32), ARMRegister(src.reg, 32));
51 }
52
move32To64ZeroExtend(Register src,Register64 dest)53 void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
54 Mov(ARMRegister(dest.reg, 32), ARMRegister(src, 32));
55 }
56
move8To64SignExtend(Register src,Register64 dest)57 void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
58 Sxtb(ARMRegister(dest.reg, 64), ARMRegister(src, 32));
59 }
60
move16To64SignExtend(Register src,Register64 dest)61 void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
62 Sxth(ARMRegister(dest.reg, 64), ARMRegister(src, 32));
63 }
64
move32To64SignExtend(Register src,Register64 dest)65 void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
66 Sxtw(ARMRegister(dest.reg, 64), ARMRegister(src, 32));
67 }
68
move32ZeroExtendToPtr(Register src,Register dest)69 void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
70 Mov(ARMRegister(dest, 32), ARMRegister(src, 32));
71 }
72
73 // ===============================================================
74 // Load instructions
75
load32SignExtendToPtr(const Address & src,Register dest)76 void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
77 load32(src, dest);
78 move32To64SignExtend(dest, Register64(dest));
79 }
80
loadAbiReturnAddress(Register dest)81 void MacroAssembler::loadAbiReturnAddress(Register dest) { movePtr(lr, dest); }
82
83 // ===============================================================
84 // Logical instructions
85
not32(Register reg)86 void MacroAssembler::not32(Register reg) {
87 Orn(ARMRegister(reg, 32), vixl::wzr, ARMRegister(reg, 32));
88 }
89
and32(Register src,Register dest)90 void MacroAssembler::and32(Register src, Register dest) {
91 And(ARMRegister(dest, 32), ARMRegister(dest, 32),
92 Operand(ARMRegister(src, 32)));
93 }
94
and32(Imm32 imm,Register dest)95 void MacroAssembler::and32(Imm32 imm, Register dest) {
96 And(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
97 }
98
and32(Imm32 imm,Register src,Register dest)99 void MacroAssembler::and32(Imm32 imm, Register src, Register dest) {
100 And(ARMRegister(dest, 32), ARMRegister(src, 32), Operand(imm.value));
101 }
102
and32(Imm32 imm,const Address & dest)103 void MacroAssembler::and32(Imm32 imm, const Address& dest) {
104 vixl::UseScratchRegisterScope temps(this);
105 const ARMRegister scratch32 = temps.AcquireW();
106 MOZ_ASSERT(scratch32.asUnsized() != dest.base);
107 load32(dest, scratch32.asUnsized());
108 And(scratch32, scratch32, Operand(imm.value));
109 store32(scratch32.asUnsized(), dest);
110 }
111
and32(const Address & src,Register dest)112 void MacroAssembler::and32(const Address& src, Register dest) {
113 vixl::UseScratchRegisterScope temps(this);
114 const ARMRegister scratch32 = temps.AcquireW();
115 MOZ_ASSERT(scratch32.asUnsized() != src.base);
116 load32(src, scratch32.asUnsized());
117 And(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(scratch32));
118 }
119
andPtr(Register src,Register dest)120 void MacroAssembler::andPtr(Register src, Register dest) {
121 And(ARMRegister(dest, 64), ARMRegister(dest, 64),
122 Operand(ARMRegister(src, 64)));
123 }
124
andPtr(Imm32 imm,Register dest)125 void MacroAssembler::andPtr(Imm32 imm, Register dest) {
126 And(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
127 }
128
and64(Imm64 imm,Register64 dest)129 void MacroAssembler::and64(Imm64 imm, Register64 dest) {
130 vixl::UseScratchRegisterScope temps(this);
131 const Register scratch = temps.AcquireX().asUnsized();
132 mov(ImmWord(imm.value), scratch);
133 andPtr(scratch, dest.reg);
134 }
135
and64(Register64 src,Register64 dest)136 void MacroAssembler::and64(Register64 src, Register64 dest) {
137 And(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64),
138 ARMRegister(src.reg, 64));
139 }
140
or64(Imm64 imm,Register64 dest)141 void MacroAssembler::or64(Imm64 imm, Register64 dest) {
142 vixl::UseScratchRegisterScope temps(this);
143 const Register scratch = temps.AcquireX().asUnsized();
144 mov(ImmWord(imm.value), scratch);
145 orPtr(scratch, dest.reg);
146 }
147
xor64(Imm64 imm,Register64 dest)148 void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
149 vixl::UseScratchRegisterScope temps(this);
150 const Register scratch = temps.AcquireX().asUnsized();
151 mov(ImmWord(imm.value), scratch);
152 xorPtr(scratch, dest.reg);
153 }
154
or32(Imm32 imm,Register dest)155 void MacroAssembler::or32(Imm32 imm, Register dest) {
156 Orr(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
157 }
158
or32(Register src,Register dest)159 void MacroAssembler::or32(Register src, Register dest) {
160 Orr(ARMRegister(dest, 32), ARMRegister(dest, 32),
161 Operand(ARMRegister(src, 32)));
162 }
163
or32(Imm32 imm,const Address & dest)164 void MacroAssembler::or32(Imm32 imm, const Address& dest) {
165 vixl::UseScratchRegisterScope temps(this);
166 const ARMRegister scratch32 = temps.AcquireW();
167 MOZ_ASSERT(scratch32.asUnsized() != dest.base);
168 load32(dest, scratch32.asUnsized());
169 Orr(scratch32, scratch32, Operand(imm.value));
170 store32(scratch32.asUnsized(), dest);
171 }
172
orPtr(Register src,Register dest)173 void MacroAssembler::orPtr(Register src, Register dest) {
174 Orr(ARMRegister(dest, 64), ARMRegister(dest, 64),
175 Operand(ARMRegister(src, 64)));
176 }
177
orPtr(Imm32 imm,Register dest)178 void MacroAssembler::orPtr(Imm32 imm, Register dest) {
179 Orr(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
180 }
181
or64(Register64 src,Register64 dest)182 void MacroAssembler::or64(Register64 src, Register64 dest) {
183 orPtr(src.reg, dest.reg);
184 }
185
xor64(Register64 src,Register64 dest)186 void MacroAssembler::xor64(Register64 src, Register64 dest) {
187 xorPtr(src.reg, dest.reg);
188 }
189
xor32(Register src,Register dest)190 void MacroAssembler::xor32(Register src, Register dest) {
191 Eor(ARMRegister(dest, 32), ARMRegister(dest, 32),
192 Operand(ARMRegister(src, 32)));
193 }
194
xor32(Imm32 imm,Register dest)195 void MacroAssembler::xor32(Imm32 imm, Register dest) {
196 Eor(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
197 }
198
xorPtr(Register src,Register dest)199 void MacroAssembler::xorPtr(Register src, Register dest) {
200 Eor(ARMRegister(dest, 64), ARMRegister(dest, 64),
201 Operand(ARMRegister(src, 64)));
202 }
203
xorPtr(Imm32 imm,Register dest)204 void MacroAssembler::xorPtr(Imm32 imm, Register dest) {
205 Eor(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
206 }
207
208 // ===============================================================
209 // Swap instructions
210
byteSwap16SignExtend(Register reg)211 void MacroAssembler::byteSwap16SignExtend(Register reg) {
212 rev16(ARMRegister(reg, 32), ARMRegister(reg, 32));
213 sxth(ARMRegister(reg, 32), ARMRegister(reg, 32));
214 }
215
byteSwap16ZeroExtend(Register reg)216 void MacroAssembler::byteSwap16ZeroExtend(Register reg) {
217 rev16(ARMRegister(reg, 32), ARMRegister(reg, 32));
218 uxth(ARMRegister(reg, 32), ARMRegister(reg, 32));
219 }
220
byteSwap32(Register reg)221 void MacroAssembler::byteSwap32(Register reg) {
222 rev(ARMRegister(reg, 32), ARMRegister(reg, 32));
223 }
224
byteSwap64(Register64 reg)225 void MacroAssembler::byteSwap64(Register64 reg) {
226 rev(ARMRegister(reg.reg, 64), ARMRegister(reg.reg, 64));
227 }
228
229 // ===============================================================
230 // Arithmetic functions
231
add32(Register src,Register dest)232 void MacroAssembler::add32(Register src, Register dest) {
233 Add(ARMRegister(dest, 32), ARMRegister(dest, 32),
234 Operand(ARMRegister(src, 32)));
235 }
236
add32(Imm32 imm,Register dest)237 void MacroAssembler::add32(Imm32 imm, Register dest) {
238 Add(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
239 }
240
add32(Imm32 imm,const Address & dest)241 void MacroAssembler::add32(Imm32 imm, const Address& dest) {
242 vixl::UseScratchRegisterScope temps(this);
243 const ARMRegister scratch32 = temps.AcquireW();
244 MOZ_ASSERT(scratch32.asUnsized() != dest.base);
245
246 Ldr(scratch32, toMemOperand(dest));
247 Add(scratch32, scratch32, Operand(imm.value));
248 Str(scratch32, toMemOperand(dest));
249 }
250
addPtr(Register src,Register dest)251 void MacroAssembler::addPtr(Register src, Register dest) {
252 addPtr(src, dest, dest);
253 }
254
addPtr(Register src1,Register src2,Register dest)255 void MacroAssembler::addPtr(Register src1, Register src2, Register dest) {
256 Add(ARMRegister(dest, 64), ARMRegister(src1, 64),
257 Operand(ARMRegister(src2, 64)));
258 }
259
addPtr(Imm32 imm,Register dest)260 void MacroAssembler::addPtr(Imm32 imm, Register dest) {
261 addPtr(imm, dest, dest);
262 }
263
addPtr(Imm32 imm,Register src,Register dest)264 void MacroAssembler::addPtr(Imm32 imm, Register src, Register dest) {
265 Add(ARMRegister(dest, 64), ARMRegister(src, 64), Operand(imm.value));
266 }
267
addPtr(ImmWord imm,Register dest)268 void MacroAssembler::addPtr(ImmWord imm, Register dest) {
269 Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
270 }
271
addPtr(Imm32 imm,const Address & dest)272 void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
273 vixl::UseScratchRegisterScope temps(this);
274 const ARMRegister scratch64 = temps.AcquireX();
275 MOZ_ASSERT(scratch64.asUnsized() != dest.base);
276
277 Ldr(scratch64, toMemOperand(dest));
278 Add(scratch64, scratch64, Operand(imm.value));
279 Str(scratch64, toMemOperand(dest));
280 }
281
addPtr(const Address & src,Register dest)282 void MacroAssembler::addPtr(const Address& src, Register dest) {
283 vixl::UseScratchRegisterScope temps(this);
284 const ARMRegister scratch64 = temps.AcquireX();
285 MOZ_ASSERT(scratch64.asUnsized() != src.base);
286
287 Ldr(scratch64, toMemOperand(src));
288 Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64));
289 }
290
add64(Register64 src,Register64 dest)291 void MacroAssembler::add64(Register64 src, Register64 dest) {
292 addPtr(src.reg, dest.reg);
293 }
294
add64(Imm32 imm,Register64 dest)295 void MacroAssembler::add64(Imm32 imm, Register64 dest) {
296 Add(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
297 }
298
add64(Imm64 imm,Register64 dest)299 void MacroAssembler::add64(Imm64 imm, Register64 dest) {
300 Add(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
301 }
302
sub32FromStackPtrWithPatch(Register dest)303 CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
304 vixl::UseScratchRegisterScope temps(this);
305 const ARMRegister scratch = temps.AcquireX();
306 AutoForbidPoolsAndNops afp(this,
307 /* max number of instructions in scope = */ 3);
308 CodeOffset offs = CodeOffset(currentOffset());
309 movz(scratch, 0, 0);
310 movk(scratch, 0, 16);
311 Sub(ARMRegister(dest, 64), sp, scratch);
312 return offs;
313 }
314
patchSub32FromStackPtr(CodeOffset offset,Imm32 imm)315 void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
316 Instruction* i1 = getInstructionAt(BufferOffset(offset.offset()));
317 MOZ_ASSERT(i1->IsMovz());
318 i1->SetInstructionBits(i1->InstructionBits() |
319 ImmMoveWide(uint16_t(imm.value)));
320
321 Instruction* i2 = getInstructionAt(BufferOffset(offset.offset() + 4));
322 MOZ_ASSERT(i2->IsMovk());
323 i2->SetInstructionBits(i2->InstructionBits() |
324 ImmMoveWide(uint16_t(imm.value >> 16)));
325 }
326
addDouble(FloatRegister src,FloatRegister dest)327 void MacroAssembler::addDouble(FloatRegister src, FloatRegister dest) {
328 fadd(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
329 ARMFPRegister(src, 64));
330 }
331
addFloat32(FloatRegister src,FloatRegister dest)332 void MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest) {
333 fadd(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
334 ARMFPRegister(src, 32));
335 }
336
sub32(Imm32 imm,Register dest)337 void MacroAssembler::sub32(Imm32 imm, Register dest) {
338 Sub(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
339 }
340
sub32(Register src,Register dest)341 void MacroAssembler::sub32(Register src, Register dest) {
342 Sub(ARMRegister(dest, 32), ARMRegister(dest, 32),
343 Operand(ARMRegister(src, 32)));
344 }
345
sub32(const Address & src,Register dest)346 void MacroAssembler::sub32(const Address& src, Register dest) {
347 vixl::UseScratchRegisterScope temps(this);
348 const ARMRegister scratch32 = temps.AcquireW();
349 MOZ_ASSERT(scratch32.asUnsized() != src.base);
350 load32(src, scratch32.asUnsized());
351 Sub(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(scratch32));
352 }
353
subPtr(Register src,Register dest)354 void MacroAssembler::subPtr(Register src, Register dest) {
355 Sub(ARMRegister(dest, 64), ARMRegister(dest, 64),
356 Operand(ARMRegister(src, 64)));
357 }
358
subPtr(Register src,const Address & dest)359 void MacroAssembler::subPtr(Register src, const Address& dest) {
360 vixl::UseScratchRegisterScope temps(this);
361 const ARMRegister scratch64 = temps.AcquireX();
362 MOZ_ASSERT(scratch64.asUnsized() != dest.base);
363
364 Ldr(scratch64, toMemOperand(dest));
365 Sub(scratch64, scratch64, Operand(ARMRegister(src, 64)));
366 Str(scratch64, toMemOperand(dest));
367 }
368
subPtr(Imm32 imm,Register dest)369 void MacroAssembler::subPtr(Imm32 imm, Register dest) {
370 Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
371 }
372
subPtr(const Address & addr,Register dest)373 void MacroAssembler::subPtr(const Address& addr, Register dest) {
374 vixl::UseScratchRegisterScope temps(this);
375 const ARMRegister scratch64 = temps.AcquireX();
376 MOZ_ASSERT(scratch64.asUnsized() != addr.base);
377
378 Ldr(scratch64, toMemOperand(addr));
379 Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64));
380 }
381
sub64(Register64 src,Register64 dest)382 void MacroAssembler::sub64(Register64 src, Register64 dest) {
383 Sub(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64),
384 ARMRegister(src.reg, 64));
385 }
386
sub64(Imm64 imm,Register64 dest)387 void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
388 Sub(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
389 }
390
subDouble(FloatRegister src,FloatRegister dest)391 void MacroAssembler::subDouble(FloatRegister src, FloatRegister dest) {
392 fsub(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
393 ARMFPRegister(src, 64));
394 }
395
subFloat32(FloatRegister src,FloatRegister dest)396 void MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest) {
397 fsub(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
398 ARMFPRegister(src, 32));
399 }
400
mul32(Register rhs,Register srcDest)401 void MacroAssembler::mul32(Register rhs, Register srcDest) {
402 mul32(srcDest, rhs, srcDest, nullptr);
403 }
404
mul32(Register src1,Register src2,Register dest,Label * onOver)405 void MacroAssembler::mul32(Register src1, Register src2, Register dest,
406 Label* onOver) {
407 Smull(ARMRegister(dest, 64), ARMRegister(src1, 32), ARMRegister(src2, 32));
408 if (onOver) {
409 Cmp(ARMRegister(dest, 64), Operand(ARMRegister(dest, 32), vixl::SXTW));
410 B(onOver, NotEqual);
411 }
412
413 // Clear upper 32 bits.
414 Mov(ARMRegister(dest, 32), ARMRegister(dest, 32));
415 }
416
mul64(Imm64 imm,const Register64 & dest)417 void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
418 vixl::UseScratchRegisterScope temps(this);
419 const ARMRegister scratch64 = temps.AcquireX();
420 MOZ_ASSERT(dest.reg != scratch64.asUnsized());
421 mov(ImmWord(imm.value), scratch64.asUnsized());
422 Mul(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), scratch64);
423 }
424
mul64(const Register64 & src,const Register64 & dest,const Register temp)425 void MacroAssembler::mul64(const Register64& src, const Register64& dest,
426 const Register temp) {
427 MOZ_ASSERT(temp == Register::Invalid());
428 Mul(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64),
429 ARMRegister(src.reg, 64));
430 }
431
mulBy3(Register src,Register dest)432 void MacroAssembler::mulBy3(Register src, Register dest) {
433 ARMRegister xdest(dest, 64);
434 ARMRegister xsrc(src, 64);
435 Add(xdest, xsrc, Operand(xsrc, vixl::LSL, 1));
436 }
437
mulFloat32(FloatRegister src,FloatRegister dest)438 void MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest) {
439 fmul(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
440 ARMFPRegister(src, 32));
441 }
442
mulDouble(FloatRegister src,FloatRegister dest)443 void MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest) {
444 fmul(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
445 ARMFPRegister(src, 64));
446 }
447
mulDoublePtr(ImmPtr imm,Register temp,FloatRegister dest)448 void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
449 FloatRegister dest) {
450 vixl::UseScratchRegisterScope temps(this);
451 const Register scratch = temps.AcquireX().asUnsized();
452 MOZ_ASSERT(temp != scratch);
453 movePtr(imm, scratch);
454 const ARMFPRegister scratchDouble = temps.AcquireD();
455 Ldr(scratchDouble, MemOperand(Address(scratch, 0)));
456 fmul(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), scratchDouble);
457 }
458
quotient32(Register rhs,Register srcDest,bool isUnsigned)459 void MacroAssembler::quotient32(Register rhs, Register srcDest,
460 bool isUnsigned) {
461 if (isUnsigned) {
462 Udiv(ARMRegister(srcDest, 32), ARMRegister(srcDest, 32),
463 ARMRegister(rhs, 32));
464 } else {
465 Sdiv(ARMRegister(srcDest, 32), ARMRegister(srcDest, 32),
466 ARMRegister(rhs, 32));
467 }
468 }
469
470 // This does not deal with x % 0 or INT_MIN % -1, the caller needs to filter
471 // those cases when they may occur.
472
remainder32(Register rhs,Register srcDest,bool isUnsigned)473 void MacroAssembler::remainder32(Register rhs, Register srcDest,
474 bool isUnsigned) {
475 vixl::UseScratchRegisterScope temps(this);
476 ARMRegister scratch = temps.AcquireW();
477 if (isUnsigned) {
478 Udiv(scratch, ARMRegister(srcDest, 32), ARMRegister(rhs, 32));
479 } else {
480 Sdiv(scratch, ARMRegister(srcDest, 32), ARMRegister(rhs, 32));
481 }
482 Mul(scratch, scratch, ARMRegister(rhs, 32));
483 Sub(ARMRegister(srcDest, 32), ARMRegister(srcDest, 32), scratch);
484 }
485
divFloat32(FloatRegister src,FloatRegister dest)486 void MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest) {
487 fdiv(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
488 ARMFPRegister(src, 32));
489 }
490
divDouble(FloatRegister src,FloatRegister dest)491 void MacroAssembler::divDouble(FloatRegister src, FloatRegister dest) {
492 fdiv(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
493 ARMFPRegister(src, 64));
494 }
495
inc64(AbsoluteAddress dest)496 void MacroAssembler::inc64(AbsoluteAddress dest) {
497 vixl::UseScratchRegisterScope temps(this);
498 const ARMRegister scratchAddr64 = temps.AcquireX();
499 const ARMRegister scratch64 = temps.AcquireX();
500
501 Mov(scratchAddr64, uint64_t(dest.addr));
502 Ldr(scratch64, MemOperand(scratchAddr64, 0));
503 Add(scratch64, scratch64, Operand(1));
504 Str(scratch64, MemOperand(scratchAddr64, 0));
505 }
506
neg32(Register reg)507 void MacroAssembler::neg32(Register reg) {
508 Negs(ARMRegister(reg, 32), Operand(ARMRegister(reg, 32)));
509 }
510
neg64(Register64 reg)511 void MacroAssembler::neg64(Register64 reg) { negPtr(reg.reg); }
512
negPtr(Register reg)513 void MacroAssembler::negPtr(Register reg) {
514 Negs(ARMRegister(reg, 64), Operand(ARMRegister(reg, 64)));
515 }
516
negateFloat(FloatRegister reg)517 void MacroAssembler::negateFloat(FloatRegister reg) {
518 fneg(ARMFPRegister(reg, 32), ARMFPRegister(reg, 32));
519 }
520
negateDouble(FloatRegister reg)521 void MacroAssembler::negateDouble(FloatRegister reg) {
522 fneg(ARMFPRegister(reg, 64), ARMFPRegister(reg, 64));
523 }
524
absFloat32(FloatRegister src,FloatRegister dest)525 void MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest) {
526 fabs(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
527 }
528
absDouble(FloatRegister src,FloatRegister dest)529 void MacroAssembler::absDouble(FloatRegister src, FloatRegister dest) {
530 fabs(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
531 }
532
sqrtFloat32(FloatRegister src,FloatRegister dest)533 void MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest) {
534 fsqrt(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
535 }
536
sqrtDouble(FloatRegister src,FloatRegister dest)537 void MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest) {
538 fsqrt(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
539 }
540
minFloat32(FloatRegister other,FloatRegister srcDest,bool handleNaN)541 void MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest,
542 bool handleNaN) {
543 MOZ_ASSERT(handleNaN); // Always true for wasm
544 fmin(ARMFPRegister(srcDest, 32), ARMFPRegister(srcDest, 32),
545 ARMFPRegister(other, 32));
546 }
547
minDouble(FloatRegister other,FloatRegister srcDest,bool handleNaN)548 void MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest,
549 bool handleNaN) {
550 MOZ_ASSERT(handleNaN); // Always true for wasm
551 fmin(ARMFPRegister(srcDest, 64), ARMFPRegister(srcDest, 64),
552 ARMFPRegister(other, 64));
553 }
554
maxFloat32(FloatRegister other,FloatRegister srcDest,bool handleNaN)555 void MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest,
556 bool handleNaN) {
557 MOZ_ASSERT(handleNaN); // Always true for wasm
558 fmax(ARMFPRegister(srcDest, 32), ARMFPRegister(srcDest, 32),
559 ARMFPRegister(other, 32));
560 }
561
maxDouble(FloatRegister other,FloatRegister srcDest,bool handleNaN)562 void MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest,
563 bool handleNaN) {
564 MOZ_ASSERT(handleNaN); // Always true for wasm
565 fmax(ARMFPRegister(srcDest, 64), ARMFPRegister(srcDest, 64),
566 ARMFPRegister(other, 64));
567 }
568
569 // ===============================================================
570 // Shift functions
571
lshiftPtr(Imm32 imm,Register dest)572 void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
573 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
574 Lsl(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value);
575 }
576
lshift64(Imm32 imm,Register64 dest)577 void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
578 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
579 lshiftPtr(imm, dest.reg);
580 }
581
lshift64(Register shift,Register64 srcDest)582 void MacroAssembler::lshift64(Register shift, Register64 srcDest) {
583 Lsl(ARMRegister(srcDest.reg, 64), ARMRegister(srcDest.reg, 64),
584 ARMRegister(shift, 64));
585 }
586
lshift32(Register shift,Register dest)587 void MacroAssembler::lshift32(Register shift, Register dest) {
588 Lsl(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32));
589 }
590
flexibleLshift32(Register src,Register dest)591 void MacroAssembler::flexibleLshift32(Register src, Register dest) {
592 lshift32(src, dest);
593 }
594
lshift32(Imm32 imm,Register dest)595 void MacroAssembler::lshift32(Imm32 imm, Register dest) {
596 MOZ_ASSERT(0 <= imm.value && imm.value < 32);
597 Lsl(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value);
598 }
599
rshiftPtr(Imm32 imm,Register dest)600 void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
601 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
602 Lsr(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value);
603 }
604
rshiftPtr(Imm32 imm,Register src,Register dest)605 void MacroAssembler::rshiftPtr(Imm32 imm, Register src, Register dest) {
606 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
607 Lsr(ARMRegister(dest, 64), ARMRegister(src, 64), imm.value);
608 }
609
rshift32(Register shift,Register dest)610 void MacroAssembler::rshift32(Register shift, Register dest) {
611 Lsr(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32));
612 }
613
flexibleRshift32(Register src,Register dest)614 void MacroAssembler::flexibleRshift32(Register src, Register dest) {
615 rshift32(src, dest);
616 }
617
rshift32(Imm32 imm,Register dest)618 void MacroAssembler::rshift32(Imm32 imm, Register dest) {
619 MOZ_ASSERT(0 <= imm.value && imm.value < 32);
620 Lsr(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value);
621 }
622
rshiftPtrArithmetic(Imm32 imm,Register dest)623 void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
624 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
625 Asr(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value);
626 }
627
rshift32Arithmetic(Register shift,Register dest)628 void MacroAssembler::rshift32Arithmetic(Register shift, Register dest) {
629 Asr(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32));
630 }
631
rshift32Arithmetic(Imm32 imm,Register dest)632 void MacroAssembler::rshift32Arithmetic(Imm32 imm, Register dest) {
633 MOZ_ASSERT(0 <= imm.value && imm.value < 32);
634 Asr(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value);
635 }
636
flexibleRshift32Arithmetic(Register src,Register dest)637 void MacroAssembler::flexibleRshift32Arithmetic(Register src, Register dest) {
638 rshift32Arithmetic(src, dest);
639 }
640
rshift64(Imm32 imm,Register64 dest)641 void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
642 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
643 rshiftPtr(imm, dest.reg);
644 }
645
rshift64(Register shift,Register64 srcDest)646 void MacroAssembler::rshift64(Register shift, Register64 srcDest) {
647 Lsr(ARMRegister(srcDest.reg, 64), ARMRegister(srcDest.reg, 64),
648 ARMRegister(shift, 64));
649 }
650
rshift64Arithmetic(Imm32 imm,Register64 dest)651 void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
652 Asr(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), imm.value);
653 }
654
rshift64Arithmetic(Register shift,Register64 srcDest)655 void MacroAssembler::rshift64Arithmetic(Register shift, Register64 srcDest) {
656 Asr(ARMRegister(srcDest.reg, 64), ARMRegister(srcDest.reg, 64),
657 ARMRegister(shift, 64));
658 }
659
660 // ===============================================================
661 // Condition functions
662
663 template <typename T1, typename T2>
cmp32Set(Condition cond,T1 lhs,T2 rhs,Register dest)664 void MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) {
665 cmp32(lhs, rhs);
666 emitSet(cond, dest);
667 }
668
669 template <typename T1, typename T2>
cmpPtrSet(Condition cond,T1 lhs,T2 rhs,Register dest)670 void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
671 cmpPtr(lhs, rhs);
672 emitSet(cond, dest);
673 }
674
675 // ===============================================================
676 // Rotation functions
677
rotateLeft(Imm32 count,Register input,Register dest)678 void MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest) {
679 Ror(ARMRegister(dest, 32), ARMRegister(input, 32), (32 - count.value) & 31);
680 }
681
rotateLeft(Register count,Register input,Register dest)682 void MacroAssembler::rotateLeft(Register count, Register input, Register dest) {
683 vixl::UseScratchRegisterScope temps(this);
684 const ARMRegister scratch = temps.AcquireW();
685 // Really 32 - count, but the upper bits of the result are ignored.
686 Neg(scratch, ARMRegister(count, 32));
687 Ror(ARMRegister(dest, 32), ARMRegister(input, 32), scratch);
688 }
689
rotateRight(Imm32 count,Register input,Register dest)690 void MacroAssembler::rotateRight(Imm32 count, Register input, Register dest) {
691 Ror(ARMRegister(dest, 32), ARMRegister(input, 32), count.value & 31);
692 }
693
rotateRight(Register count,Register input,Register dest)694 void MacroAssembler::rotateRight(Register count, Register input,
695 Register dest) {
696 Ror(ARMRegister(dest, 32), ARMRegister(input, 32), ARMRegister(count, 32));
697 }
698
rotateLeft64(Register count,Register64 input,Register64 dest,Register temp)699 void MacroAssembler::rotateLeft64(Register count, Register64 input,
700 Register64 dest, Register temp) {
701 MOZ_ASSERT(temp == Register::Invalid());
702
703 vixl::UseScratchRegisterScope temps(this);
704 const ARMRegister scratch = temps.AcquireX();
705 // Really 64 - count, but the upper bits of the result are ignored.
706 Neg(scratch, ARMRegister(count, 64));
707 Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64), scratch);
708 }
709
rotateLeft64(Imm32 count,Register64 input,Register64 dest,Register temp)710 void MacroAssembler::rotateLeft64(Imm32 count, Register64 input,
711 Register64 dest, Register temp) {
712 MOZ_ASSERT(temp == Register::Invalid());
713
714 Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64),
715 (64 - count.value) & 63);
716 }
717
rotateRight64(Register count,Register64 input,Register64 dest,Register temp)718 void MacroAssembler::rotateRight64(Register count, Register64 input,
719 Register64 dest, Register temp) {
720 MOZ_ASSERT(temp == Register::Invalid());
721
722 Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64),
723 ARMRegister(count, 64));
724 }
725
rotateRight64(Imm32 count,Register64 input,Register64 dest,Register temp)726 void MacroAssembler::rotateRight64(Imm32 count, Register64 input,
727 Register64 dest, Register temp) {
728 MOZ_ASSERT(temp == Register::Invalid());
729
730 Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64), count.value & 63);
731 }
732
733 // ===============================================================
734 // Bit counting functions
735
clz32(Register src,Register dest,bool knownNotZero)736 void MacroAssembler::clz32(Register src, Register dest, bool knownNotZero) {
737 Clz(ARMRegister(dest, 32), ARMRegister(src, 32));
738 }
739
ctz32(Register src,Register dest,bool knownNotZero)740 void MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero) {
741 Rbit(ARMRegister(dest, 32), ARMRegister(src, 32));
742 Clz(ARMRegister(dest, 32), ARMRegister(dest, 32));
743 }
744
clz64(Register64 src,Register dest)745 void MacroAssembler::clz64(Register64 src, Register dest) {
746 Clz(ARMRegister(dest, 64), ARMRegister(src.reg, 64));
747 }
748
ctz64(Register64 src,Register dest)749 void MacroAssembler::ctz64(Register64 src, Register dest) {
750 Rbit(ARMRegister(dest, 64), ARMRegister(src.reg, 64));
751 Clz(ARMRegister(dest, 64), ARMRegister(dest, 64));
752 }
753
popcnt32(Register src_,Register dest_,Register tmp_)754 void MacroAssembler::popcnt32(Register src_, Register dest_, Register tmp_) {
755 MOZ_ASSERT(tmp_ != Register::Invalid());
756
757 // Equivalent to mozilla::CountPopulation32().
758
759 ARMRegister src(src_, 32);
760 ARMRegister dest(dest_, 32);
761 ARMRegister tmp(tmp_, 32);
762
763 Mov(tmp, src);
764 if (src_ != dest_) {
765 Mov(dest, src);
766 }
767 Lsr(dest, dest, 1);
768 And(dest, dest, 0x55555555);
769 Sub(dest, tmp, dest);
770 Lsr(tmp, dest, 2);
771 And(tmp, tmp, 0x33333333);
772 And(dest, dest, 0x33333333);
773 Add(dest, tmp, dest);
774 Add(dest, dest, Operand(dest, vixl::LSR, 4));
775 And(dest, dest, 0x0F0F0F0F);
776 Add(dest, dest, Operand(dest, vixl::LSL, 8));
777 Add(dest, dest, Operand(dest, vixl::LSL, 16));
778 Lsr(dest, dest, 24);
779 }
780
popcnt64(Register64 src_,Register64 dest_,Register tmp_)781 void MacroAssembler::popcnt64(Register64 src_, Register64 dest_,
782 Register tmp_) {
783 MOZ_ASSERT(tmp_ != Register::Invalid());
784
785 // Equivalent to mozilla::CountPopulation64(), though likely more efficient.
786
787 ARMRegister src(src_.reg, 64);
788 ARMRegister dest(dest_.reg, 64);
789 ARMRegister tmp(tmp_, 64);
790
791 Mov(tmp, src);
792 if (src_ != dest_) {
793 Mov(dest, src);
794 }
795 Lsr(dest, dest, 1);
796 And(dest, dest, 0x5555555555555555);
797 Sub(dest, tmp, dest);
798 Lsr(tmp, dest, 2);
799 And(tmp, tmp, 0x3333333333333333);
800 And(dest, dest, 0x3333333333333333);
801 Add(dest, tmp, dest);
802 Add(dest, dest, Operand(dest, vixl::LSR, 4));
803 And(dest, dest, 0x0F0F0F0F0F0F0F0F);
804 Add(dest, dest, Operand(dest, vixl::LSL, 8));
805 Add(dest, dest, Operand(dest, vixl::LSL, 16));
806 Add(dest, dest, Operand(dest, vixl::LSL, 32));
807 Lsr(dest, dest, 56);
808 }
809
810 // ===============================================================
811 // Branch functions
812
813 template <class L>
branch32(Condition cond,Register lhs,Register rhs,L label)814 void MacroAssembler::branch32(Condition cond, Register lhs, Register rhs,
815 L label) {
816 cmp32(lhs, rhs);
817 B(label, cond);
818 }
819
820 template <class L>
branch32(Condition cond,Register lhs,Imm32 imm,L label)821 void MacroAssembler::branch32(Condition cond, Register lhs, Imm32 imm,
822 L label) {
823 cmp32(lhs, imm);
824 B(label, cond);
825 }
826
branch32(Condition cond,Register lhs,const Address & rhs,Label * label)827 void MacroAssembler::branch32(Condition cond, Register lhs, const Address& rhs,
828 Label* label) {
829 vixl::UseScratchRegisterScope temps(this);
830 const Register scratch = temps.AcquireX().asUnsized();
831 MOZ_ASSERT(scratch != lhs);
832 MOZ_ASSERT(scratch != rhs.base);
833 load32(rhs, scratch);
834 branch32(cond, lhs, scratch, label);
835 }
836
branch32(Condition cond,const Address & lhs,Register rhs,Label * label)837 void MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs,
838 Label* label) {
839 vixl::UseScratchRegisterScope temps(this);
840 const Register scratch = temps.AcquireX().asUnsized();
841 MOZ_ASSERT(scratch != lhs.base);
842 MOZ_ASSERT(scratch != rhs);
843 load32(lhs, scratch);
844 branch32(cond, scratch, rhs, label);
845 }
846
branch32(Condition cond,const Address & lhs,Imm32 imm,Label * label)847 void MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 imm,
848 Label* label) {
849 vixl::UseScratchRegisterScope temps(this);
850 const Register scratch = temps.AcquireX().asUnsized();
851 MOZ_ASSERT(scratch != lhs.base);
852 load32(lhs, scratch);
853 branch32(cond, scratch, imm, label);
854 }
855
branch32(Condition cond,const AbsoluteAddress & lhs,Register rhs,Label * label)856 void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
857 Register rhs, Label* label) {
858 vixl::UseScratchRegisterScope temps(this);
859 const Register scratch = temps.AcquireX().asUnsized();
860 movePtr(ImmPtr(lhs.addr), scratch);
861 branch32(cond, Address(scratch, 0), rhs, label);
862 }
863
branch32(Condition cond,const AbsoluteAddress & lhs,Imm32 rhs,Label * label)864 void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
865 Imm32 rhs, Label* label) {
866 vixl::UseScratchRegisterScope temps(this);
867 const Register scratch = temps.AcquireX().asUnsized();
868 load32(lhs, scratch);
869 branch32(cond, scratch, rhs, label);
870 }
871
branch32(Condition cond,const BaseIndex & lhs,Imm32 rhs,Label * label)872 void MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
873 Label* label) {
874 vixl::UseScratchRegisterScope temps(this);
875 const ARMRegister scratch32 = temps.AcquireW();
876 MOZ_ASSERT(scratch32.asUnsized() != lhs.base);
877 MOZ_ASSERT(scratch32.asUnsized() != lhs.index);
878 doBaseIndex(scratch32, lhs, vixl::LDR_w);
879 branch32(cond, scratch32.asUnsized(), rhs, label);
880 }
881
branch32(Condition cond,wasm::SymbolicAddress lhs,Imm32 rhs,Label * label)882 void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs,
883 Imm32 rhs, Label* label) {
884 vixl::UseScratchRegisterScope temps(this);
885 const Register scratch = temps.AcquireX().asUnsized();
886 movePtr(lhs, scratch);
887 branch32(cond, Address(scratch, 0), rhs, label);
888 }
889
branch64(Condition cond,Register64 lhs,Imm64 val,Label * success,Label * fail)890 void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
891 Label* success, Label* fail) {
892 Cmp(ARMRegister(lhs.reg, 64), val.value);
893 B(success, cond);
894 if (fail) {
895 B(fail);
896 }
897 }
898
branch64(Condition cond,Register64 lhs,Register64 rhs,Label * success,Label * fail)899 void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
900 Label* success, Label* fail) {
901 Cmp(ARMRegister(lhs.reg, 64), ARMRegister(rhs.reg, 64));
902 B(success, cond);
903 if (fail) {
904 B(fail);
905 }
906 }
907
branch64(Condition cond,const Address & lhs,Imm64 val,Label * label)908 void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
909 Label* label) {
910 MOZ_ASSERT(cond == Assembler::NotEqual,
911 "other condition codes not supported");
912
913 branchPtr(cond, lhs, ImmWord(val.value), label);
914 }
915
branch64(Condition cond,const Address & lhs,const Address & rhs,Register scratch,Label * label)916 void MacroAssembler::branch64(Condition cond, const Address& lhs,
917 const Address& rhs, Register scratch,
918 Label* label) {
919 MOZ_ASSERT(cond == Assembler::NotEqual,
920 "other condition codes not supported");
921 MOZ_ASSERT(lhs.base != scratch);
922 MOZ_ASSERT(rhs.base != scratch);
923
924 loadPtr(rhs, scratch);
925 branchPtr(cond, lhs, scratch, label);
926 }
927
928 template <class L>
branchPtr(Condition cond,Register lhs,Register rhs,L label)929 void MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs,
930 L label) {
931 Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64));
932 B(label, cond);
933 }
934
branchPtr(Condition cond,Register lhs,Imm32 rhs,Label * label)935 void MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs,
936 Label* label) {
937 cmpPtr(lhs, rhs);
938 B(label, cond);
939 }
940
branchPtr(Condition cond,Register lhs,ImmPtr rhs,Label * label)941 void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs,
942 Label* label) {
943 cmpPtr(lhs, rhs);
944 B(label, cond);
945 }
946
branchPtr(Condition cond,Register lhs,ImmGCPtr rhs,Label * label)947 void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs,
948 Label* label) {
949 vixl::UseScratchRegisterScope temps(this);
950 const Register scratch = temps.AcquireX().asUnsized();
951 MOZ_ASSERT(scratch != lhs);
952 movePtr(rhs, scratch);
953 branchPtr(cond, lhs, scratch, label);
954 }
955
branchPtr(Condition cond,Register lhs,ImmWord rhs,Label * label)956 void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs,
957 Label* label) {
958 cmpPtr(lhs, rhs);
959 B(label, cond);
960 }
961
962 template <class L>
branchPtr(Condition cond,const Address & lhs,Register rhs,L label)963 void MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs,
964 L label) {
965 vixl::UseScratchRegisterScope temps(this);
966 const Register scratch = temps.AcquireX().asUnsized();
967 MOZ_ASSERT(scratch != lhs.base);
968 MOZ_ASSERT(scratch != rhs);
969 loadPtr(lhs, scratch);
970 branchPtr(cond, scratch, rhs, label);
971 }
972
branchPtr(Condition cond,const Address & lhs,ImmPtr rhs,Label * label)973 void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs,
974 Label* label) {
975 vixl::UseScratchRegisterScope temps(this);
976 const Register scratch = temps.AcquireX().asUnsized();
977 MOZ_ASSERT(scratch != lhs.base);
978 loadPtr(lhs, scratch);
979 branchPtr(cond, scratch, rhs, label);
980 }
981
branchPtr(Condition cond,const Address & lhs,ImmGCPtr rhs,Label * label)982 void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs,
983 Label* label) {
984 vixl::UseScratchRegisterScope temps(this);
985 const ARMRegister scratch1_64 = temps.AcquireX();
986 const ARMRegister scratch2_64 = temps.AcquireX();
987 MOZ_ASSERT(scratch1_64.asUnsized() != lhs.base);
988 MOZ_ASSERT(scratch2_64.asUnsized() != lhs.base);
989
990 movePtr(rhs, scratch1_64.asUnsized());
991 loadPtr(lhs, scratch2_64.asUnsized());
992 branchPtr(cond, scratch2_64.asUnsized(), scratch1_64.asUnsized(), label);
993 }
994
branchPtr(Condition cond,const Address & lhs,ImmWord rhs,Label * label)995 void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs,
996 Label* label) {
997 vixl::UseScratchRegisterScope temps(this);
998 const Register scratch = temps.AcquireX().asUnsized();
999 MOZ_ASSERT(scratch != lhs.base);
1000 loadPtr(lhs, scratch);
1001 branchPtr(cond, scratch, rhs, label);
1002 }
1003
branchPtr(Condition cond,const AbsoluteAddress & lhs,Register rhs,Label * label)1004 void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
1005 Register rhs, Label* label) {
1006 vixl::UseScratchRegisterScope temps(this);
1007 const Register scratch = temps.AcquireX().asUnsized();
1008 MOZ_ASSERT(scratch != rhs);
1009 loadPtr(lhs, scratch);
1010 branchPtr(cond, scratch, rhs, label);
1011 }
1012
branchPtr(Condition cond,const AbsoluteAddress & lhs,ImmWord rhs,Label * label)1013 void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
1014 ImmWord rhs, Label* label) {
1015 vixl::UseScratchRegisterScope temps(this);
1016 const Register scratch = temps.AcquireX().asUnsized();
1017 loadPtr(lhs, scratch);
1018 branchPtr(cond, scratch, rhs, label);
1019 }
1020
branchPtr(Condition cond,wasm::SymbolicAddress lhs,Register rhs,Label * label)1021 void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
1022 Register rhs, Label* label) {
1023 vixl::UseScratchRegisterScope temps(this);
1024 const Register scratch = temps.AcquireX().asUnsized();
1025 MOZ_ASSERT(scratch != rhs);
1026 loadPtr(lhs, scratch);
1027 branchPtr(cond, scratch, rhs, label);
1028 }
1029
branchPtr(Condition cond,const BaseIndex & lhs,ImmWord rhs,Label * label)1030 void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
1031 ImmWord rhs, Label* label) {
1032 vixl::UseScratchRegisterScope temps(this);
1033 const Register scratch = temps.AcquireX().asUnsized();
1034 MOZ_ASSERT(scratch != lhs.base);
1035 MOZ_ASSERT(scratch != lhs.index);
1036 loadPtr(lhs, scratch);
1037 branchPtr(cond, scratch, rhs, label);
1038 }
1039
branchPrivatePtr(Condition cond,const Address & lhs,Register rhs,Label * label)1040 void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
1041 Register rhs, Label* label) {
1042 branchPtr(cond, lhs, rhs, label);
1043 }
1044
branchFloat(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs,Label * label)1045 void MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs,
1046 FloatRegister rhs, Label* label) {
1047 compareFloat(cond, lhs, rhs);
1048 switch (cond) {
1049 case DoubleNotEqual: {
1050 Label unordered;
1051 // not equal *and* ordered
1052 branch(Overflow, &unordered);
1053 branch(NotEqual, label);
1054 bind(&unordered);
1055 break;
1056 }
1057 case DoubleEqualOrUnordered:
1058 branch(Overflow, label);
1059 branch(Equal, label);
1060 break;
1061 default:
1062 branch(Condition(cond), label);
1063 }
1064 }
1065
branchTruncateFloat32MaybeModUint32(FloatRegister src,Register dest,Label * fail)1066 void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
1067 Register dest,
1068 Label* fail) {
1069 vixl::UseScratchRegisterScope temps(this);
1070 const ARMRegister scratch64 = temps.AcquireX();
1071
1072 ARMFPRegister src32(src, 32);
1073 ARMRegister dest64(dest, 64);
1074
1075 MOZ_ASSERT(!scratch64.Is(dest64));
1076
1077 Fcvtzs(dest64, src32);
1078 Add(scratch64, dest64, Operand(0x7fffffffffffffff));
1079 Cmn(scratch64, 3);
1080 B(fail, Assembler::Above);
1081 And(dest64, dest64, Operand(0xffffffff));
1082 }
1083
branchTruncateFloat32ToInt32(FloatRegister src,Register dest,Label * fail)1084 void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
1085 Register dest, Label* fail) {
1086 convertFloat32ToInt32(src, dest, fail, false);
1087 }
1088
branchDouble(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs,Label * label)1089 void MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs,
1090 FloatRegister rhs, Label* label) {
1091 compareDouble(cond, lhs, rhs);
1092 switch (cond) {
1093 case DoubleNotEqual: {
1094 Label unordered;
1095 // not equal *and* ordered
1096 branch(Overflow, &unordered);
1097 branch(NotEqual, label);
1098 bind(&unordered);
1099 break;
1100 }
1101 case DoubleEqualOrUnordered:
1102 branch(Overflow, label);
1103 branch(Equal, label);
1104 break;
1105 default:
1106 branch(Condition(cond), label);
1107 }
1108 }
1109
branchTruncateDoubleMaybeModUint32(FloatRegister src,Register dest,Label * fail)1110 void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
1111 Register dest,
1112 Label* fail) {
1113 vixl::UseScratchRegisterScope temps(this);
1114 const ARMRegister scratch64 = temps.AcquireX();
1115
1116 // An out of range integer will be saturated to the destination size.
1117 ARMFPRegister src64(src, 64);
1118 ARMRegister dest64(dest, 64);
1119
1120 MOZ_ASSERT(!scratch64.Is(dest64));
1121
1122 Fcvtzs(dest64, src64);
1123 Add(scratch64, dest64, Operand(0x7fffffffffffffff));
1124 Cmn(scratch64, 3);
1125 B(fail, Assembler::Above);
1126 And(dest64, dest64, Operand(0xffffffff));
1127 }
1128
branchTruncateDoubleToInt32(FloatRegister src,Register dest,Label * fail)1129 void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
1130 Register dest, Label* fail) {
1131 convertDoubleToInt32(src, dest, fail, false);
1132 }
1133
1134 template <typename T>
branchAdd32(Condition cond,T src,Register dest,Label * label)1135 void MacroAssembler::branchAdd32(Condition cond, T src, Register dest,
1136 Label* label) {
1137 adds32(src, dest);
1138 B(label, cond);
1139 }
1140
1141 template <typename T>
branchSub32(Condition cond,T src,Register dest,Label * label)1142 void MacroAssembler::branchSub32(Condition cond, T src, Register dest,
1143 Label* label) {
1144 subs32(src, dest);
1145 branch(cond, label);
1146 }
1147
1148 template <typename T>
branchMul32(Condition cond,T src,Register dest,Label * label)1149 void MacroAssembler::branchMul32(Condition cond, T src, Register dest,
1150 Label* label) {
1151 MOZ_ASSERT(cond == Assembler::Overflow);
1152 vixl::UseScratchRegisterScope temps(this);
1153 mul32(src, dest, dest, label);
1154 }
1155
1156 template <typename T>
branchRshift32(Condition cond,T src,Register dest,Label * label)1157 void MacroAssembler::branchRshift32(Condition cond, T src, Register dest,
1158 Label* label) {
1159 MOZ_ASSERT(cond == Zero || cond == NonZero);
1160 rshift32(src, dest);
1161 branch32(cond == Zero ? Equal : NotEqual, dest, Imm32(0), label);
1162 }
1163
branchNeg32(Condition cond,Register reg,Label * label)1164 void MacroAssembler::branchNeg32(Condition cond, Register reg, Label* label) {
1165 MOZ_ASSERT(cond == Overflow);
1166 neg32(reg);
1167 B(label, cond);
1168 }
1169
decBranchPtr(Condition cond,Register lhs,Imm32 rhs,Label * label)1170 void MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs,
1171 Label* label) {
1172 Subs(ARMRegister(lhs, 64), ARMRegister(lhs, 64), Operand(rhs.value));
1173 B(cond, label);
1174 }
1175
1176 template <class L>
branchTest32(Condition cond,Register lhs,Register rhs,L label)1177 void MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs,
1178 L label) {
1179 MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
1180 cond == NotSigned);
1181 // x86 prefers |test foo, foo| to |cmp foo, #0|.
1182 // Convert the former to the latter for ARM.
1183 if (lhs == rhs && (cond == Zero || cond == NonZero)) {
1184 cmp32(lhs, Imm32(0));
1185 } else {
1186 test32(lhs, rhs);
1187 }
1188 B(label, cond);
1189 }
1190
1191 template <class L>
branchTest32(Condition cond,Register lhs,Imm32 rhs,L label)1192 void MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs,
1193 L label) {
1194 MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
1195 cond == NotSigned);
1196 test32(lhs, rhs);
1197 B(label, cond);
1198 }
1199
branchTest32(Condition cond,const Address & lhs,Imm32 rhs,Label * label)1200 void MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs,
1201 Label* label) {
1202 vixl::UseScratchRegisterScope temps(this);
1203 const Register scratch = temps.AcquireX().asUnsized();
1204 MOZ_ASSERT(scratch != lhs.base);
1205 load32(lhs, scratch);
1206 branchTest32(cond, scratch, rhs, label);
1207 }
1208
branchTest32(Condition cond,const AbsoluteAddress & lhs,Imm32 rhs,Label * label)1209 void MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs,
1210 Imm32 rhs, Label* label) {
1211 vixl::UseScratchRegisterScope temps(this);
1212 const Register scratch = temps.AcquireX().asUnsized();
1213 load32(lhs, scratch);
1214 branchTest32(cond, scratch, rhs, label);
1215 }
1216
1217 template <class L>
branchTestPtr(Condition cond,Register lhs,Register rhs,L label)1218 void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs,
1219 L label) {
1220 Tst(ARMRegister(lhs, 64), Operand(ARMRegister(rhs, 64)));
1221 B(label, cond);
1222 }
1223
branchTestPtr(Condition cond,Register lhs,Imm32 rhs,Label * label)1224 void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs,
1225 Label* label) {
1226 Tst(ARMRegister(lhs, 64), Operand(rhs.value));
1227 B(label, cond);
1228 }
1229
branchTestPtr(Condition cond,const Address & lhs,Imm32 rhs,Label * label)1230 void MacroAssembler::branchTestPtr(Condition cond, const Address& lhs,
1231 Imm32 rhs, Label* label) {
1232 vixl::UseScratchRegisterScope temps(this);
1233 const Register scratch = temps.AcquireX().asUnsized();
1234 MOZ_ASSERT(scratch != lhs.base);
1235 loadPtr(lhs, scratch);
1236 branchTestPtr(cond, scratch, rhs, label);
1237 }
1238
1239 template <class L>
branchTest64(Condition cond,Register64 lhs,Register64 rhs,Register temp,L label)1240 void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
1241 Register64 rhs, Register temp, L label) {
1242 branchTestPtr(cond, lhs.reg, rhs.reg, label);
1243 }
1244
branchTestUndefined(Condition cond,Register tag,Label * label)1245 void MacroAssembler::branchTestUndefined(Condition cond, Register tag,
1246 Label* label) {
1247 branchTestUndefinedImpl(cond, tag, label);
1248 }
1249
branchTestUndefined(Condition cond,const Address & address,Label * label)1250 void MacroAssembler::branchTestUndefined(Condition cond, const Address& address,
1251 Label* label) {
1252 branchTestUndefinedImpl(cond, address, label);
1253 }
1254
branchTestUndefined(Condition cond,const BaseIndex & address,Label * label)1255 void MacroAssembler::branchTestUndefined(Condition cond,
1256 const BaseIndex& address,
1257 Label* label) {
1258 branchTestUndefinedImpl(cond, address, label);
1259 }
1260
branchTestUndefined(Condition cond,const ValueOperand & value,Label * label)1261 void MacroAssembler::branchTestUndefined(Condition cond,
1262 const ValueOperand& value,
1263 Label* label) {
1264 branchTestUndefinedImpl(cond, value, label);
1265 }
1266
1267 template <typename T>
branchTestUndefinedImpl(Condition cond,const T & t,Label * label)1268 void MacroAssembler::branchTestUndefinedImpl(Condition cond, const T& t,
1269 Label* label) {
1270 Condition c = testUndefined(cond, t);
1271 B(label, c);
1272 }
1273
branchTestInt32(Condition cond,Register tag,Label * label)1274 void MacroAssembler::branchTestInt32(Condition cond, Register tag,
1275 Label* label) {
1276 branchTestInt32Impl(cond, tag, label);
1277 }
1278
branchTestInt32(Condition cond,const Address & address,Label * label)1279 void MacroAssembler::branchTestInt32(Condition cond, const Address& address,
1280 Label* label) {
1281 branchTestInt32Impl(cond, address, label);
1282 }
1283
branchTestInt32(Condition cond,const BaseIndex & address,Label * label)1284 void MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address,
1285 Label* label) {
1286 branchTestInt32Impl(cond, address, label);
1287 }
1288
branchTestInt32(Condition cond,const ValueOperand & value,Label * label)1289 void MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value,
1290 Label* label) {
1291 branchTestInt32Impl(cond, value, label);
1292 }
1293
1294 template <typename T>
branchTestInt32Impl(Condition cond,const T & t,Label * label)1295 void MacroAssembler::branchTestInt32Impl(Condition cond, const T& t,
1296 Label* label) {
1297 Condition c = testInt32(cond, t);
1298 B(label, c);
1299 }
1300
branchTestInt32Truthy(bool truthy,const ValueOperand & value,Label * label)1301 void MacroAssembler::branchTestInt32Truthy(bool truthy,
1302 const ValueOperand& value,
1303 Label* label) {
1304 Condition c = testInt32Truthy(truthy, value);
1305 B(label, c);
1306 }
1307
branchTestDouble(Condition cond,Register tag,Label * label)1308 void MacroAssembler::branchTestDouble(Condition cond, Register tag,
1309 Label* label) {
1310 branchTestDoubleImpl(cond, tag, label);
1311 }
1312
branchTestDouble(Condition cond,const Address & address,Label * label)1313 void MacroAssembler::branchTestDouble(Condition cond, const Address& address,
1314 Label* label) {
1315 branchTestDoubleImpl(cond, address, label);
1316 }
1317
branchTestDouble(Condition cond,const BaseIndex & address,Label * label)1318 void MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address,
1319 Label* label) {
1320 branchTestDoubleImpl(cond, address, label);
1321 }
1322
branchTestDouble(Condition cond,const ValueOperand & value,Label * label)1323 void MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value,
1324 Label* label) {
1325 branchTestDoubleImpl(cond, value, label);
1326 }
1327
1328 template <typename T>
branchTestDoubleImpl(Condition cond,const T & t,Label * label)1329 void MacroAssembler::branchTestDoubleImpl(Condition cond, const T& t,
1330 Label* label) {
1331 Condition c = testDouble(cond, t);
1332 B(label, c);
1333 }
1334
branchTestDoubleTruthy(bool truthy,FloatRegister reg,Label * label)1335 void MacroAssembler::branchTestDoubleTruthy(bool truthy, FloatRegister reg,
1336 Label* label) {
1337 Fcmp(ARMFPRegister(reg, 64), 0.0);
1338 if (!truthy) {
1339 // falsy values are zero, and NaN.
1340 branch(Zero, label);
1341 branch(Overflow, label);
1342 } else {
1343 // truthy values are non-zero and not nan.
1344 // If it is overflow
1345 Label onFalse;
1346 branch(Zero, &onFalse);
1347 branch(Overflow, &onFalse);
1348 B(label);
1349 bind(&onFalse);
1350 }
1351 }
1352
branchTestNumber(Condition cond,Register tag,Label * label)1353 void MacroAssembler::branchTestNumber(Condition cond, Register tag,
1354 Label* label) {
1355 branchTestNumberImpl(cond, tag, label);
1356 }
1357
branchTestNumber(Condition cond,const ValueOperand & value,Label * label)1358 void MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value,
1359 Label* label) {
1360 branchTestNumberImpl(cond, value, label);
1361 }
1362
1363 template <typename T>
branchTestNumberImpl(Condition cond,const T & t,Label * label)1364 void MacroAssembler::branchTestNumberImpl(Condition cond, const T& t,
1365 Label* label) {
1366 Condition c = testNumber(cond, t);
1367 B(label, c);
1368 }
1369
branchTestBoolean(Condition cond,Register tag,Label * label)1370 void MacroAssembler::branchTestBoolean(Condition cond, Register tag,
1371 Label* label) {
1372 branchTestBooleanImpl(cond, tag, label);
1373 }
1374
branchTestBoolean(Condition cond,const Address & address,Label * label)1375 void MacroAssembler::branchTestBoolean(Condition cond, const Address& address,
1376 Label* label) {
1377 branchTestBooleanImpl(cond, address, label);
1378 }
1379
branchTestBoolean(Condition cond,const BaseIndex & address,Label * label)1380 void MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address,
1381 Label* label) {
1382 branchTestBooleanImpl(cond, address, label);
1383 }
1384
branchTestBoolean(Condition cond,const ValueOperand & value,Label * label)1385 void MacroAssembler::branchTestBoolean(Condition cond,
1386 const ValueOperand& value,
1387 Label* label) {
1388 branchTestBooleanImpl(cond, value, label);
1389 }
1390
1391 template <typename T>
branchTestBooleanImpl(Condition cond,const T & tag,Label * label)1392 void MacroAssembler::branchTestBooleanImpl(Condition cond, const T& tag,
1393 Label* label) {
1394 Condition c = testBoolean(cond, tag);
1395 B(label, c);
1396 }
1397
branchTestBooleanTruthy(bool truthy,const ValueOperand & value,Label * label)1398 void MacroAssembler::branchTestBooleanTruthy(bool truthy,
1399 const ValueOperand& value,
1400 Label* label) {
1401 Condition c = testBooleanTruthy(truthy, value);
1402 B(label, c);
1403 }
1404
branchTestString(Condition cond,Register tag,Label * label)1405 void MacroAssembler::branchTestString(Condition cond, Register tag,
1406 Label* label) {
1407 branchTestStringImpl(cond, tag, label);
1408 }
1409
branchTestString(Condition cond,const Address & address,Label * label)1410 void MacroAssembler::branchTestString(Condition cond, const Address& address,
1411 Label* label) {
1412 branchTestStringImpl(cond, address, label);
1413 }
1414
branchTestString(Condition cond,const BaseIndex & address,Label * label)1415 void MacroAssembler::branchTestString(Condition cond, const BaseIndex& address,
1416 Label* label) {
1417 branchTestStringImpl(cond, address, label);
1418 }
1419
branchTestString(Condition cond,const ValueOperand & value,Label * label)1420 void MacroAssembler::branchTestString(Condition cond, const ValueOperand& value,
1421 Label* label) {
1422 branchTestStringImpl(cond, value, label);
1423 }
1424
1425 template <typename T>
branchTestStringImpl(Condition cond,const T & t,Label * label)1426 void MacroAssembler::branchTestStringImpl(Condition cond, const T& t,
1427 Label* label) {
1428 Condition c = testString(cond, t);
1429 B(label, c);
1430 }
1431
branchTestStringTruthy(bool truthy,const ValueOperand & value,Label * label)1432 void MacroAssembler::branchTestStringTruthy(bool truthy,
1433 const ValueOperand& value,
1434 Label* label) {
1435 Condition c = testStringTruthy(truthy, value);
1436 B(label, c);
1437 }
1438
branchTestSymbol(Condition cond,Register tag,Label * label)1439 void MacroAssembler::branchTestSymbol(Condition cond, Register tag,
1440 Label* label) {
1441 branchTestSymbolImpl(cond, tag, label);
1442 }
1443
branchTestSymbol(Condition cond,const Address & address,Label * label)1444 void MacroAssembler::branchTestSymbol(Condition cond, const Address& address,
1445 Label* label) {
1446 branchTestSymbolImpl(cond, address, label);
1447 }
1448
branchTestSymbol(Condition cond,const BaseIndex & address,Label * label)1449 void MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address,
1450 Label* label) {
1451 branchTestSymbolImpl(cond, address, label);
1452 }
1453
branchTestSymbol(Condition cond,const ValueOperand & value,Label * label)1454 void MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value,
1455 Label* label) {
1456 branchTestSymbolImpl(cond, value, label);
1457 }
1458
1459 template <typename T>
branchTestSymbolImpl(Condition cond,const T & t,Label * label)1460 void MacroAssembler::branchTestSymbolImpl(Condition cond, const T& t,
1461 Label* label) {
1462 Condition c = testSymbol(cond, t);
1463 B(label, c);
1464 }
1465
branchTestBigInt(Condition cond,Register tag,Label * label)1466 void MacroAssembler::branchTestBigInt(Condition cond, Register tag,
1467 Label* label) {
1468 branchTestBigIntImpl(cond, tag, label);
1469 }
1470
branchTestBigInt(Condition cond,const Address & address,Label * label)1471 void MacroAssembler::branchTestBigInt(Condition cond, const Address& address,
1472 Label* label) {
1473 branchTestBigIntImpl(cond, address, label);
1474 }
1475
branchTestBigInt(Condition cond,const BaseIndex & address,Label * label)1476 void MacroAssembler::branchTestBigInt(Condition cond, const BaseIndex& address,
1477 Label* label) {
1478 branchTestBigIntImpl(cond, address, label);
1479 }
1480
branchTestBigInt(Condition cond,const ValueOperand & value,Label * label)1481 void MacroAssembler::branchTestBigInt(Condition cond, const ValueOperand& value,
1482 Label* label) {
1483 branchTestBigIntImpl(cond, value, label);
1484 }
1485
1486 template <typename T>
branchTestBigIntImpl(Condition cond,const T & t,Label * label)1487 void MacroAssembler::branchTestBigIntImpl(Condition cond, const T& t,
1488 Label* label) {
1489 Condition c = testBigInt(cond, t);
1490 B(label, c);
1491 }
1492
branchTestBigIntTruthy(bool truthy,const ValueOperand & value,Label * label)1493 void MacroAssembler::branchTestBigIntTruthy(bool truthy,
1494 const ValueOperand& value,
1495 Label* label) {
1496 Condition c = testBigIntTruthy(truthy, value);
1497 B(label, c);
1498 }
1499
branchTestNull(Condition cond,Register tag,Label * label)1500 void MacroAssembler::branchTestNull(Condition cond, Register tag,
1501 Label* label) {
1502 branchTestNullImpl(cond, tag, label);
1503 }
1504
branchTestNull(Condition cond,const Address & address,Label * label)1505 void MacroAssembler::branchTestNull(Condition cond, const Address& address,
1506 Label* label) {
1507 branchTestNullImpl(cond, address, label);
1508 }
1509
branchTestNull(Condition cond,const BaseIndex & address,Label * label)1510 void MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address,
1511 Label* label) {
1512 branchTestNullImpl(cond, address, label);
1513 }
1514
branchTestNull(Condition cond,const ValueOperand & value,Label * label)1515 void MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value,
1516 Label* label) {
1517 branchTestNullImpl(cond, value, label);
1518 }
1519
1520 template <typename T>
branchTestNullImpl(Condition cond,const T & t,Label * label)1521 void MacroAssembler::branchTestNullImpl(Condition cond, const T& t,
1522 Label* label) {
1523 Condition c = testNull(cond, t);
1524 B(label, c);
1525 }
1526
branchTestObject(Condition cond,Register tag,Label * label)1527 void MacroAssembler::branchTestObject(Condition cond, Register tag,
1528 Label* label) {
1529 branchTestObjectImpl(cond, tag, label);
1530 }
1531
branchTestObject(Condition cond,const Address & address,Label * label)1532 void MacroAssembler::branchTestObject(Condition cond, const Address& address,
1533 Label* label) {
1534 branchTestObjectImpl(cond, address, label);
1535 }
1536
branchTestObject(Condition cond,const BaseIndex & address,Label * label)1537 void MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address,
1538 Label* label) {
1539 branchTestObjectImpl(cond, address, label);
1540 }
1541
branchTestObject(Condition cond,const ValueOperand & value,Label * label)1542 void MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value,
1543 Label* label) {
1544 branchTestObjectImpl(cond, value, label);
1545 }
1546
1547 template <typename T>
branchTestObjectImpl(Condition cond,const T & t,Label * label)1548 void MacroAssembler::branchTestObjectImpl(Condition cond, const T& t,
1549 Label* label) {
1550 Condition c = testObject(cond, t);
1551 B(label, c);
1552 }
1553
branchTestGCThing(Condition cond,const Address & address,Label * label)1554 void MacroAssembler::branchTestGCThing(Condition cond, const Address& address,
1555 Label* label) {
1556 branchTestGCThingImpl(cond, address, label);
1557 }
1558
branchTestGCThing(Condition cond,const BaseIndex & address,Label * label)1559 void MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address,
1560 Label* label) {
1561 branchTestGCThingImpl(cond, address, label);
1562 }
1563
branchTestGCThing(Condition cond,const ValueOperand & value,Label * label)1564 void MacroAssembler::branchTestGCThing(Condition cond,
1565 const ValueOperand& value,
1566 Label* label) {
1567 branchTestGCThingImpl(cond, value, label);
1568 }
1569
1570 template <typename T>
branchTestGCThingImpl(Condition cond,const T & src,Label * label)1571 void MacroAssembler::branchTestGCThingImpl(Condition cond, const T& src,
1572 Label* label) {
1573 Condition c = testGCThing(cond, src);
1574 B(label, c);
1575 }
1576
branchTestPrimitive(Condition cond,Register tag,Label * label)1577 void MacroAssembler::branchTestPrimitive(Condition cond, Register tag,
1578 Label* label) {
1579 branchTestPrimitiveImpl(cond, tag, label);
1580 }
1581
branchTestPrimitive(Condition cond,const ValueOperand & value,Label * label)1582 void MacroAssembler::branchTestPrimitive(Condition cond,
1583 const ValueOperand& value,
1584 Label* label) {
1585 branchTestPrimitiveImpl(cond, value, label);
1586 }
1587
1588 template <typename T>
branchTestPrimitiveImpl(Condition cond,const T & t,Label * label)1589 void MacroAssembler::branchTestPrimitiveImpl(Condition cond, const T& t,
1590 Label* label) {
1591 Condition c = testPrimitive(cond, t);
1592 B(label, c);
1593 }
1594
branchTestMagic(Condition cond,Register tag,Label * label)1595 void MacroAssembler::branchTestMagic(Condition cond, Register tag,
1596 Label* label) {
1597 branchTestMagicImpl(cond, tag, label);
1598 }
1599
branchTestMagic(Condition cond,const Address & address,Label * label)1600 void MacroAssembler::branchTestMagic(Condition cond, const Address& address,
1601 Label* label) {
1602 branchTestMagicImpl(cond, address, label);
1603 }
1604
branchTestMagic(Condition cond,const BaseIndex & address,Label * label)1605 void MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address,
1606 Label* label) {
1607 branchTestMagicImpl(cond, address, label);
1608 }
1609
1610 template <class L>
branchTestMagic(Condition cond,const ValueOperand & value,L label)1611 void MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value,
1612 L label) {
1613 branchTestMagicImpl(cond, value, label);
1614 }
1615
1616 template <typename T, class L>
branchTestMagicImpl(Condition cond,const T & t,L label)1617 void MacroAssembler::branchTestMagicImpl(Condition cond, const T& t, L label) {
1618 Condition c = testMagic(cond, t);
1619 B(label, c);
1620 }
1621
branchTestMagic(Condition cond,const Address & valaddr,JSWhyMagic why,Label * label)1622 void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
1623 JSWhyMagic why, Label* label) {
1624 uint64_t magic = MagicValue(why).asRawBits();
1625 cmpPtr(valaddr, ImmWord(magic));
1626 B(label, cond);
1627 }
1628
branchToComputedAddress(const BaseIndex & addr)1629 void MacroAssembler::branchToComputedAddress(const BaseIndex& addr) {
1630 vixl::UseScratchRegisterScope temps(&this->asVIXL());
1631 const ARMRegister scratch64 = temps.AcquireX();
1632 loadPtr(addr, scratch64.asUnsized());
1633 Br(scratch64);
1634 }
1635
cmp32Move32(Condition cond,Register lhs,Register rhs,Register src,Register dest)1636 void MacroAssembler::cmp32Move32(Condition cond, Register lhs, Register rhs,
1637 Register src, Register dest) {
1638 cmp32(lhs, rhs);
1639 Csel(ARMRegister(dest, 32), ARMRegister(src, 32), ARMRegister(dest, 32),
1640 cond);
1641 }
1642
cmp32Move32(Condition cond,Register lhs,const Address & rhs,Register src,Register dest)1643 void MacroAssembler::cmp32Move32(Condition cond, Register lhs,
1644 const Address& rhs, Register src,
1645 Register dest) {
1646 cmp32(lhs, rhs);
1647 Csel(ARMRegister(dest, 32), ARMRegister(src, 32), ARMRegister(dest, 32),
1648 cond);
1649 }
1650
cmp32Load32(Condition cond,Register lhs,const Address & rhs,const Address & src,Register dest)1651 void MacroAssembler::cmp32Load32(Condition cond, Register lhs,
1652 const Address& rhs, const Address& src,
1653 Register dest) {
1654 MOZ_CRASH("NYI");
1655 }
1656
cmp32Load32(Condition cond,Register lhs,Register rhs,const Address & src,Register dest)1657 void MacroAssembler::cmp32Load32(Condition cond, Register lhs, Register rhs,
1658 const Address& src, Register dest) {
1659 MOZ_CRASH("NYI");
1660 }
1661
cmp32MovePtr(Condition cond,Register lhs,Imm32 rhs,Register src,Register dest)1662 void MacroAssembler::cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
1663 Register src, Register dest) {
1664 cmp32(lhs, rhs);
1665 Csel(ARMRegister(dest, 64), ARMRegister(src, 64), ARMRegister(dest, 64),
1666 cond);
1667 }
1668
cmp32LoadPtr(Condition cond,const Address & lhs,Imm32 rhs,const Address & src,Register dest)1669 void MacroAssembler::cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
1670 const Address& src, Register dest) {
1671 // ARM64 does not support conditional loads, so we use a branch with a CSel
1672 // (to prevent Spectre attacks).
1673 vixl::UseScratchRegisterScope temps(this);
1674 const ARMRegister scratch64 = temps.AcquireX();
1675 Label done;
1676 branch32(Assembler::InvertCondition(cond), lhs, rhs, &done);
1677 loadPtr(src, scratch64.asUnsized());
1678 Csel(ARMRegister(dest, 64), scratch64, ARMRegister(dest, 64), cond);
1679 bind(&done);
1680 }
1681
test32LoadPtr(Condition cond,const Address & addr,Imm32 mask,const Address & src,Register dest)1682 void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
1683 Imm32 mask, const Address& src,
1684 Register dest) {
1685 MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
1686
1687 // ARM64 does not support conditional loads, so we use a branch with a CSel
1688 // (to prevent Spectre attacks).
1689 vixl::UseScratchRegisterScope temps(this);
1690 const ARMRegister scratch64 = temps.AcquireX();
1691 Label done;
1692 branchTest32(Assembler::InvertCondition(cond), addr, mask, &done);
1693 loadPtr(src, scratch64.asUnsized());
1694 Csel(ARMRegister(dest, 64), scratch64, ARMRegister(dest, 64), cond);
1695 bind(&done);
1696 }
1697
test32MovePtr(Condition cond,const Address & addr,Imm32 mask,Register src,Register dest)1698 void MacroAssembler::test32MovePtr(Condition cond, const Address& addr,
1699 Imm32 mask, Register src, Register dest) {
1700 MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
1701 test32(addr, mask);
1702 Csel(ARMRegister(dest, 64), ARMRegister(src, 64), ARMRegister(dest, 64),
1703 cond);
1704 }
1705
spectreMovePtr(Condition cond,Register src,Register dest)1706 void MacroAssembler::spectreMovePtr(Condition cond, Register src,
1707 Register dest) {
1708 Csel(ARMRegister(dest, 64), ARMRegister(src, 64), ARMRegister(dest, 64),
1709 cond);
1710 }
1711
spectreZeroRegister(Condition cond,Register,Register dest)1712 void MacroAssembler::spectreZeroRegister(Condition cond, Register,
1713 Register dest) {
1714 Csel(ARMRegister(dest, 64), ARMRegister(dest, 64), vixl::xzr,
1715 Assembler::InvertCondition(cond));
1716 }
1717
spectreBoundsCheck32(Register index,Register length,Register maybeScratch,Label * failure)1718 void MacroAssembler::spectreBoundsCheck32(Register index, Register length,
1719 Register maybeScratch,
1720 Label* failure) {
1721 MOZ_ASSERT(length != maybeScratch);
1722 MOZ_ASSERT(index != maybeScratch);
1723
1724 branch32(Assembler::BelowOrEqual, length, index, failure);
1725
1726 if (JitOptions.spectreIndexMasking) {
1727 Csel(ARMRegister(index, 32), ARMRegister(index, 32), vixl::wzr,
1728 Assembler::Above);
1729 }
1730 }
1731
spectreBoundsCheck32(Register index,const Address & length,Register maybeScratch,Label * failure)1732 void MacroAssembler::spectreBoundsCheck32(Register index, const Address& length,
1733 Register maybeScratch,
1734 Label* failure) {
1735 MOZ_ASSERT(index != length.base);
1736 MOZ_ASSERT(length.base != maybeScratch);
1737 MOZ_ASSERT(index != maybeScratch);
1738
1739 branch32(Assembler::BelowOrEqual, length, index, failure);
1740
1741 if (JitOptions.spectreIndexMasking) {
1742 Csel(ARMRegister(index, 32), ARMRegister(index, 32), vixl::wzr,
1743 Assembler::Above);
1744 }
1745 }
1746
1747 // ========================================================================
1748 // Memory access primitives.
storeUncanonicalizedDouble(FloatRegister src,const Address & dest)1749 void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
1750 const Address& dest) {
1751 Str(ARMFPRegister(src, 64), toMemOperand(dest));
1752 }
storeUncanonicalizedDouble(FloatRegister src,const BaseIndex & dest)1753 void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
1754 const BaseIndex& dest) {
1755 doBaseIndex(ARMFPRegister(src, 64), dest, vixl::STR_d);
1756 }
1757
storeUncanonicalizedFloat32(FloatRegister src,const Address & addr)1758 void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
1759 const Address& addr) {
1760 Str(ARMFPRegister(src, 32), toMemOperand(addr));
1761 }
storeUncanonicalizedFloat32(FloatRegister src,const BaseIndex & addr)1762 void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
1763 const BaseIndex& addr) {
1764 doBaseIndex(ARMFPRegister(src, 32), addr, vixl::STR_s);
1765 }
1766
storeFloat32x3(FloatRegister src,const Address & dest)1767 void MacroAssembler::storeFloat32x3(FloatRegister src, const Address& dest) {
1768 MOZ_CRASH("NYI");
1769 }
storeFloat32x3(FloatRegister src,const BaseIndex & dest)1770 void MacroAssembler::storeFloat32x3(FloatRegister src, const BaseIndex& dest) {
1771 MOZ_CRASH("NYI");
1772 }
1773
memoryBarrier(MemoryBarrierBits barrier)1774 void MacroAssembler::memoryBarrier(MemoryBarrierBits barrier) {
1775 if (barrier == MembarStoreStore) {
1776 Dmb(vixl::InnerShareable, vixl::BarrierWrites);
1777 } else if (barrier == MembarLoadLoad) {
1778 Dmb(vixl::InnerShareable, vixl::BarrierReads);
1779 } else if (barrier) {
1780 Dmb(vixl::InnerShareable, vixl::BarrierAll);
1781 }
1782 }
1783
1784 // ===============================================================
1785 // Clamping functions.
1786
clampIntToUint8(Register reg)1787 void MacroAssembler::clampIntToUint8(Register reg) {
1788 vixl::UseScratchRegisterScope temps(this);
1789 const ARMRegister scratch32 = temps.AcquireW();
1790 const ARMRegister reg32(reg, 32);
1791 MOZ_ASSERT(!scratch32.Is(reg32));
1792
1793 Cmp(reg32, Operand(reg32, vixl::UXTB));
1794 Csel(reg32, reg32, vixl::wzr, Assembler::GreaterThanOrEqual);
1795 Mov(scratch32, Operand(0xff));
1796 Csel(reg32, reg32, scratch32, Assembler::LessThanOrEqual);
1797 }
1798
fallibleUnboxPtr(const ValueOperand & src,Register dest,JSValueType type,Label * fail)1799 void MacroAssembler::fallibleUnboxPtr(const ValueOperand& src, Register dest,
1800 JSValueType type, Label* fail) {
1801 MOZ_ASSERT(type == JSVAL_TYPE_OBJECT || type == JSVAL_TYPE_STRING ||
1802 type == JSVAL_TYPE_SYMBOL || type == JSVAL_TYPE_BIGINT);
1803 // dest := src XOR mask
1804 // fail if dest >> JSVAL_TAG_SHIFT != 0
1805 const ARMRegister src64(src.valueReg(), 64);
1806 const ARMRegister dest64(dest, 64);
1807 Eor(dest64, src64, Operand(JSVAL_TYPE_TO_SHIFTED_TAG(type)));
1808 Cmp(vixl::xzr, Operand(dest64, vixl::LSR, JSVAL_TAG_SHIFT));
1809 j(Assembler::NotEqual, fail);
1810 }
1811
fallibleUnboxPtr(const Address & src,Register dest,JSValueType type,Label * fail)1812 void MacroAssembler::fallibleUnboxPtr(const Address& src, Register dest,
1813 JSValueType type, Label* fail) {
1814 loadValue(src, ValueOperand(dest));
1815 fallibleUnboxPtr(ValueOperand(dest), dest, type, fail);
1816 }
1817
fallibleUnboxPtr(const BaseIndex & src,Register dest,JSValueType type,Label * fail)1818 void MacroAssembler::fallibleUnboxPtr(const BaseIndex& src, Register dest,
1819 JSValueType type, Label* fail) {
1820 loadValue(src, ValueOperand(dest));
1821 fallibleUnboxPtr(ValueOperand(dest), dest, type, fail);
1822 }
1823
1824 //}}} check_macroassembler_style
1825 // ===============================================================
1826
addToStackPtr(Register src)1827 void MacroAssemblerCompat::addToStackPtr(Register src) {
1828 Add(GetStackPointer64(), GetStackPointer64(), ARMRegister(src, 64));
1829 }
1830
addToStackPtr(Imm32 imm)1831 void MacroAssemblerCompat::addToStackPtr(Imm32 imm) {
1832 Add(GetStackPointer64(), GetStackPointer64(), Operand(imm.value));
1833 }
1834
addToStackPtr(const Address & src)1835 void MacroAssemblerCompat::addToStackPtr(const Address& src) {
1836 vixl::UseScratchRegisterScope temps(this);
1837 const ARMRegister scratch = temps.AcquireX();
1838 Ldr(scratch, toMemOperand(src));
1839 Add(GetStackPointer64(), GetStackPointer64(), scratch);
1840 }
1841
addStackPtrTo(Register dest)1842 void MacroAssemblerCompat::addStackPtrTo(Register dest) {
1843 Add(ARMRegister(dest, 64), ARMRegister(dest, 64), GetStackPointer64());
1844 }
1845
subFromStackPtr(Register src)1846 void MacroAssemblerCompat::subFromStackPtr(Register src) {
1847 Sub(GetStackPointer64(), GetStackPointer64(), ARMRegister(src, 64));
1848 syncStackPtr();
1849 }
1850
subFromStackPtr(Imm32 imm)1851 void MacroAssemblerCompat::subFromStackPtr(Imm32 imm) {
1852 Sub(GetStackPointer64(), GetStackPointer64(), Operand(imm.value));
1853 syncStackPtr();
1854 }
1855
subStackPtrFrom(Register dest)1856 void MacroAssemblerCompat::subStackPtrFrom(Register dest) {
1857 Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), GetStackPointer64());
1858 }
1859
andToStackPtr(Imm32 imm)1860 void MacroAssemblerCompat::andToStackPtr(Imm32 imm) {
1861 if (sp.Is(GetStackPointer64())) {
1862 vixl::UseScratchRegisterScope temps(this);
1863 const ARMRegister scratch = temps.AcquireX();
1864 Mov(scratch, sp);
1865 And(sp, scratch, Operand(imm.value));
1866 // syncStackPtr() not needed since our SP is the real SP.
1867 } else {
1868 And(GetStackPointer64(), GetStackPointer64(), Operand(imm.value));
1869 syncStackPtr();
1870 }
1871 }
1872
andStackPtrTo(Register dest)1873 void MacroAssemblerCompat::andStackPtrTo(Register dest) {
1874 And(ARMRegister(dest, 64), ARMRegister(dest, 64), GetStackPointer64());
1875 }
1876
moveToStackPtr(Register src)1877 void MacroAssemblerCompat::moveToStackPtr(Register src) {
1878 Mov(GetStackPointer64(), ARMRegister(src, 64));
1879 syncStackPtr();
1880 }
1881
moveStackPtrTo(Register dest)1882 void MacroAssemblerCompat::moveStackPtrTo(Register dest) {
1883 Mov(ARMRegister(dest, 64), GetStackPointer64());
1884 }
1885
loadStackPtr(const Address & src)1886 void MacroAssemblerCompat::loadStackPtr(const Address& src) {
1887 if (sp.Is(GetStackPointer64())) {
1888 vixl::UseScratchRegisterScope temps(this);
1889 const ARMRegister scratch = temps.AcquireX();
1890 Ldr(scratch, toMemOperand(src));
1891 Mov(sp, scratch);
1892 // syncStackPtr() not needed since our SP is the real SP.
1893 } else {
1894 Ldr(GetStackPointer64(), toMemOperand(src));
1895 syncStackPtr();
1896 }
1897 }
1898
storeStackPtr(const Address & dest)1899 void MacroAssemblerCompat::storeStackPtr(const Address& dest) {
1900 if (sp.Is(GetStackPointer64())) {
1901 vixl::UseScratchRegisterScope temps(this);
1902 const ARMRegister scratch = temps.AcquireX();
1903 Mov(scratch, sp);
1904 Str(scratch, toMemOperand(dest));
1905 } else {
1906 Str(GetStackPointer64(), toMemOperand(dest));
1907 }
1908 }
1909
branchTestStackPtr(Condition cond,Imm32 rhs,Label * label)1910 void MacroAssemblerCompat::branchTestStackPtr(Condition cond, Imm32 rhs,
1911 Label* label) {
1912 if (sp.Is(GetStackPointer64())) {
1913 vixl::UseScratchRegisterScope temps(this);
1914 const ARMRegister scratch = temps.AcquireX();
1915 Mov(scratch, sp);
1916 Tst(scratch, Operand(rhs.value));
1917 } else {
1918 Tst(GetStackPointer64(), Operand(rhs.value));
1919 }
1920 B(label, cond);
1921 }
1922
branchStackPtr(Condition cond,Register rhs_,Label * label)1923 void MacroAssemblerCompat::branchStackPtr(Condition cond, Register rhs_,
1924 Label* label) {
1925 ARMRegister rhs(rhs_, 64);
1926 if (sp.Is(GetStackPointer64())) {
1927 vixl::UseScratchRegisterScope temps(this);
1928 const ARMRegister scratch = temps.AcquireX();
1929 Mov(scratch, sp);
1930 Cmp(scratch, rhs);
1931 } else {
1932 Cmp(GetStackPointer64(), rhs);
1933 }
1934 B(label, cond);
1935 }
1936
branchStackPtrRhs(Condition cond,Address lhs,Label * label)1937 void MacroAssemblerCompat::branchStackPtrRhs(Condition cond, Address lhs,
1938 Label* label) {
1939 vixl::UseScratchRegisterScope temps(this);
1940 const ARMRegister scratch = temps.AcquireX();
1941 Ldr(scratch, toMemOperand(lhs));
1942 // Cmp disallows SP as the rhs, so flip the operands and invert the
1943 // condition.
1944 Cmp(GetStackPointer64(), scratch);
1945 B(label, Assembler::InvertCondition(cond));
1946 }
1947
branchStackPtrRhs(Condition cond,AbsoluteAddress lhs,Label * label)1948 void MacroAssemblerCompat::branchStackPtrRhs(Condition cond,
1949 AbsoluteAddress lhs,
1950 Label* label) {
1951 vixl::UseScratchRegisterScope temps(this);
1952 const ARMRegister scratch = temps.AcquireX();
1953 loadPtr(lhs, scratch.asUnsized());
1954 // Cmp disallows SP as the rhs, so flip the operands and invert the
1955 // condition.
1956 Cmp(GetStackPointer64(), scratch);
1957 B(label, Assembler::InvertCondition(cond));
1958 }
1959
1960 // If source is a double, load into dest.
1961 // If source is int32, convert to double and store in dest.
1962 // Else, branch to failure.
ensureDouble(const ValueOperand & source,FloatRegister dest,Label * failure)1963 void MacroAssemblerCompat::ensureDouble(const ValueOperand& source,
1964 FloatRegister dest, Label* failure) {
1965 Label isDouble, done;
1966
1967 {
1968 ScratchTagScope tag(asMasm(), source);
1969 splitTagForTest(source, tag);
1970 asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
1971 asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
1972 }
1973
1974 convertInt32ToDouble(source.valueReg(), dest);
1975 jump(&done);
1976
1977 bind(&isDouble);
1978 unboxDouble(source, dest);
1979
1980 bind(&done);
1981 }
1982
unboxValue(const ValueOperand & src,AnyRegister dest,JSValueType type)1983 void MacroAssemblerCompat::unboxValue(const ValueOperand& src, AnyRegister dest,
1984 JSValueType type) {
1985 if (dest.isFloat()) {
1986 Label notInt32, end;
1987 asMasm().branchTestInt32(Assembler::NotEqual, src, ¬Int32);
1988 convertInt32ToDouble(src.valueReg(), dest.fpu());
1989 jump(&end);
1990 bind(¬Int32);
1991 unboxDouble(src, dest.fpu());
1992 bind(&end);
1993 } else {
1994 unboxNonDouble(src, dest.gpr(), type);
1995 }
1996 }
1997
1998 } // namespace jit
1999 } // namespace js
2000
2001 #endif /* jit_arm64_MacroAssembler_arm64_inl_h */
2002