1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #ifndef jit_arm64_MacroAssembler_arm64_inl_h
8 #define jit_arm64_MacroAssembler_arm64_inl_h
9
10 #include "jit/arm64/MacroAssembler-arm64.h"
11
12 namespace js {
13 namespace jit {
14
15 //{{{ check_macroassembler_style
16
move64(Register64 src,Register64 dest)17 void MacroAssembler::move64(Register64 src, Register64 dest) {
18 Mov(ARMRegister(dest.reg, 64), ARMRegister(src.reg, 64));
19 }
20
move64(Imm64 imm,Register64 dest)21 void MacroAssembler::move64(Imm64 imm, Register64 dest) {
22 Mov(ARMRegister(dest.reg, 64), imm.value);
23 }
24
moveFloat32ToGPR(FloatRegister src,Register dest)25 void MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest) {
26 Fmov(ARMRegister(dest, 32), ARMFPRegister(src, 32));
27 }
28
moveGPRToFloat32(Register src,FloatRegister dest)29 void MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest) {
30 Fmov(ARMFPRegister(dest, 32), ARMRegister(src, 32));
31 }
32
move8SignExtend(Register src,Register dest)33 void MacroAssembler::move8SignExtend(Register src, Register dest) {
34 Sxtb(ARMRegister(dest, 32), ARMRegister(src, 32));
35 }
36
move16SignExtend(Register src,Register dest)37 void MacroAssembler::move16SignExtend(Register src, Register dest) {
38 Sxth(ARMRegister(dest, 32), ARMRegister(src, 32));
39 }
40
moveDoubleToGPR64(FloatRegister src,Register64 dest)41 void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
42 Fmov(ARMRegister(dest.reg, 64), ARMFPRegister(src, 64));
43 }
44
moveGPR64ToDouble(Register64 src,FloatRegister dest)45 void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
46 Fmov(ARMFPRegister(dest, 64), ARMRegister(src.reg, 64));
47 }
48
move64To32(Register64 src,Register dest)49 void MacroAssembler::move64To32(Register64 src, Register dest) {
50 Mov(ARMRegister(dest, 32), ARMRegister(src.reg, 32));
51 }
52
move32To64ZeroExtend(Register src,Register64 dest)53 void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
54 Mov(ARMRegister(dest.reg, 32), ARMRegister(src, 32));
55 }
56
move8To64SignExtend(Register src,Register64 dest)57 void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
58 Sxtb(ARMRegister(dest.reg, 64), ARMRegister(src, 32));
59 }
60
move16To64SignExtend(Register src,Register64 dest)61 void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
62 Sxth(ARMRegister(dest.reg, 64), ARMRegister(src, 32));
63 }
64
move32To64SignExtend(Register src,Register64 dest)65 void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
66 Sxtw(ARMRegister(dest.reg, 64), ARMRegister(src, 32));
67 }
68
69 // ===============================================================
70 // Logical instructions
71
not32(Register reg)72 void MacroAssembler::not32(Register reg) {
73 Orn(ARMRegister(reg, 32), vixl::wzr, ARMRegister(reg, 32));
74 }
75
and32(Register src,Register dest)76 void MacroAssembler::and32(Register src, Register dest) {
77 And(ARMRegister(dest, 32), ARMRegister(dest, 32),
78 Operand(ARMRegister(src, 32)));
79 }
80
and32(Imm32 imm,Register dest)81 void MacroAssembler::and32(Imm32 imm, Register dest) {
82 And(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
83 }
84
and32(Imm32 imm,Register src,Register dest)85 void MacroAssembler::and32(Imm32 imm, Register src, Register dest) {
86 And(ARMRegister(dest, 32), ARMRegister(src, 32), Operand(imm.value));
87 }
88
and32(Imm32 imm,const Address & dest)89 void MacroAssembler::and32(Imm32 imm, const Address& dest) {
90 vixl::UseScratchRegisterScope temps(this);
91 const ARMRegister scratch32 = temps.AcquireW();
92 MOZ_ASSERT(scratch32.asUnsized() != dest.base);
93 load32(dest, scratch32.asUnsized());
94 And(scratch32, scratch32, Operand(imm.value));
95 store32(scratch32.asUnsized(), dest);
96 }
97
and32(const Address & src,Register dest)98 void MacroAssembler::and32(const Address& src, Register dest) {
99 vixl::UseScratchRegisterScope temps(this);
100 const ARMRegister scratch32 = temps.AcquireW();
101 MOZ_ASSERT(scratch32.asUnsized() != src.base);
102 load32(src, scratch32.asUnsized());
103 And(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(scratch32));
104 }
105
andPtr(Register src,Register dest)106 void MacroAssembler::andPtr(Register src, Register dest) {
107 And(ARMRegister(dest, 64), ARMRegister(dest, 64),
108 Operand(ARMRegister(src, 64)));
109 }
110
andPtr(Imm32 imm,Register dest)111 void MacroAssembler::andPtr(Imm32 imm, Register dest) {
112 And(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
113 }
114
and64(Imm64 imm,Register64 dest)115 void MacroAssembler::and64(Imm64 imm, Register64 dest) {
116 vixl::UseScratchRegisterScope temps(this);
117 const Register scratch = temps.AcquireX().asUnsized();
118 mov(ImmWord(imm.value), scratch);
119 andPtr(scratch, dest.reg);
120 }
121
and64(Register64 src,Register64 dest)122 void MacroAssembler::and64(Register64 src, Register64 dest) {
123 And(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64),
124 ARMRegister(src.reg, 64));
125 }
126
or64(Imm64 imm,Register64 dest)127 void MacroAssembler::or64(Imm64 imm, Register64 dest) {
128 vixl::UseScratchRegisterScope temps(this);
129 const Register scratch = temps.AcquireX().asUnsized();
130 mov(ImmWord(imm.value), scratch);
131 orPtr(scratch, dest.reg);
132 }
133
xor64(Imm64 imm,Register64 dest)134 void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
135 vixl::UseScratchRegisterScope temps(this);
136 const Register scratch = temps.AcquireX().asUnsized();
137 mov(ImmWord(imm.value), scratch);
138 xorPtr(scratch, dest.reg);
139 }
140
or32(Imm32 imm,Register dest)141 void MacroAssembler::or32(Imm32 imm, Register dest) {
142 Orr(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
143 }
144
or32(Register src,Register dest)145 void MacroAssembler::or32(Register src, Register dest) {
146 Orr(ARMRegister(dest, 32), ARMRegister(dest, 32),
147 Operand(ARMRegister(src, 32)));
148 }
149
or32(Imm32 imm,const Address & dest)150 void MacroAssembler::or32(Imm32 imm, const Address& dest) {
151 vixl::UseScratchRegisterScope temps(this);
152 const ARMRegister scratch32 = temps.AcquireW();
153 MOZ_ASSERT(scratch32.asUnsized() != dest.base);
154 load32(dest, scratch32.asUnsized());
155 Orr(scratch32, scratch32, Operand(imm.value));
156 store32(scratch32.asUnsized(), dest);
157 }
158
orPtr(Register src,Register dest)159 void MacroAssembler::orPtr(Register src, Register dest) {
160 Orr(ARMRegister(dest, 64), ARMRegister(dest, 64),
161 Operand(ARMRegister(src, 64)));
162 }
163
orPtr(Imm32 imm,Register dest)164 void MacroAssembler::orPtr(Imm32 imm, Register dest) {
165 Orr(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
166 }
167
or64(Register64 src,Register64 dest)168 void MacroAssembler::or64(Register64 src, Register64 dest) {
169 orPtr(src.reg, dest.reg);
170 }
171
xor64(Register64 src,Register64 dest)172 void MacroAssembler::xor64(Register64 src, Register64 dest) {
173 xorPtr(src.reg, dest.reg);
174 }
175
xor32(Register src,Register dest)176 void MacroAssembler::xor32(Register src, Register dest) {
177 Eor(ARMRegister(dest, 32), ARMRegister(dest, 32),
178 Operand(ARMRegister(src, 32)));
179 }
180
xor32(Imm32 imm,Register dest)181 void MacroAssembler::xor32(Imm32 imm, Register dest) {
182 Eor(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
183 }
184
xorPtr(Register src,Register dest)185 void MacroAssembler::xorPtr(Register src, Register dest) {
186 Eor(ARMRegister(dest, 64), ARMRegister(dest, 64),
187 Operand(ARMRegister(src, 64)));
188 }
189
xorPtr(Imm32 imm,Register dest)190 void MacroAssembler::xorPtr(Imm32 imm, Register dest) {
191 Eor(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
192 }
193
194 // ===============================================================
195 // Arithmetic functions
196
add32(Register src,Register dest)197 void MacroAssembler::add32(Register src, Register dest) {
198 Add(ARMRegister(dest, 32), ARMRegister(dest, 32),
199 Operand(ARMRegister(src, 32)));
200 }
201
add32(Imm32 imm,Register dest)202 void MacroAssembler::add32(Imm32 imm, Register dest) {
203 Add(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
204 }
205
add32(Imm32 imm,const Address & dest)206 void MacroAssembler::add32(Imm32 imm, const Address& dest) {
207 vixl::UseScratchRegisterScope temps(this);
208 const ARMRegister scratch32 = temps.AcquireW();
209 MOZ_ASSERT(scratch32.asUnsized() != dest.base);
210
211 Ldr(scratch32, toMemOperand(dest));
212 Add(scratch32, scratch32, Operand(imm.value));
213 Str(scratch32, toMemOperand(dest));
214 }
215
addPtr(Register src,Register dest)216 void MacroAssembler::addPtr(Register src, Register dest) {
217 addPtr(src, dest, dest);
218 }
219
addPtr(Register src1,Register src2,Register dest)220 void MacroAssembler::addPtr(Register src1, Register src2, Register dest) {
221 Add(ARMRegister(dest, 64), ARMRegister(src1, 64),
222 Operand(ARMRegister(src2, 64)));
223 }
224
addPtr(Imm32 imm,Register dest)225 void MacroAssembler::addPtr(Imm32 imm, Register dest) {
226 addPtr(imm, dest, dest);
227 }
228
addPtr(Imm32 imm,Register src,Register dest)229 void MacroAssembler::addPtr(Imm32 imm, Register src, Register dest) {
230 Add(ARMRegister(dest, 64), ARMRegister(src, 64), Operand(imm.value));
231 }
232
addPtr(ImmWord imm,Register dest)233 void MacroAssembler::addPtr(ImmWord imm, Register dest) {
234 Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
235 }
236
addPtr(Imm32 imm,const Address & dest)237 void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
238 vixl::UseScratchRegisterScope temps(this);
239 const ARMRegister scratch64 = temps.AcquireX();
240 MOZ_ASSERT(scratch64.asUnsized() != dest.base);
241
242 Ldr(scratch64, toMemOperand(dest));
243 Add(scratch64, scratch64, Operand(imm.value));
244 Str(scratch64, toMemOperand(dest));
245 }
246
addPtr(const Address & src,Register dest)247 void MacroAssembler::addPtr(const Address& src, Register dest) {
248 vixl::UseScratchRegisterScope temps(this);
249 const ARMRegister scratch64 = temps.AcquireX();
250 MOZ_ASSERT(scratch64.asUnsized() != src.base);
251
252 Ldr(scratch64, toMemOperand(src));
253 Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64));
254 }
255
add64(Register64 src,Register64 dest)256 void MacroAssembler::add64(Register64 src, Register64 dest) {
257 addPtr(src.reg, dest.reg);
258 }
259
add64(Imm32 imm,Register64 dest)260 void MacroAssembler::add64(Imm32 imm, Register64 dest) {
261 Add(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
262 }
263
add64(Imm64 imm,Register64 dest)264 void MacroAssembler::add64(Imm64 imm, Register64 dest) {
265 Add(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
266 }
267
sub32FromStackPtrWithPatch(Register dest)268 CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
269 vixl::UseScratchRegisterScope temps(this);
270 const ARMRegister scratch = temps.AcquireX();
271 CodeOffset offs = CodeOffset(currentOffset());
272 movz(scratch, 0, 0);
273 movk(scratch, 0, 16);
274 Sub(ARMRegister(dest, 64), sp, scratch);
275 return offs;
276 }
277
patchSub32FromStackPtr(CodeOffset offset,Imm32 imm)278 void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
279 Instruction* i1 = getInstructionAt(BufferOffset(offset.offset()));
280 MOZ_ASSERT(i1->IsMovz());
281 i1->SetInstructionBits(i1->InstructionBits() |
282 ImmMoveWide(uint16_t(imm.value)));
283
284 Instruction* i2 = getInstructionAt(BufferOffset(offset.offset() + 4));
285 MOZ_ASSERT(i2->IsMovk());
286 i2->SetInstructionBits(i2->InstructionBits() |
287 ImmMoveWide(uint16_t(imm.value >> 16)));
288 }
289
addDouble(FloatRegister src,FloatRegister dest)290 void MacroAssembler::addDouble(FloatRegister src, FloatRegister dest) {
291 fadd(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
292 ARMFPRegister(src, 64));
293 }
294
addFloat32(FloatRegister src,FloatRegister dest)295 void MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest) {
296 fadd(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
297 ARMFPRegister(src, 32));
298 }
299
sub32(Imm32 imm,Register dest)300 void MacroAssembler::sub32(Imm32 imm, Register dest) {
301 Sub(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
302 }
303
sub32(Register src,Register dest)304 void MacroAssembler::sub32(Register src, Register dest) {
305 Sub(ARMRegister(dest, 32), ARMRegister(dest, 32),
306 Operand(ARMRegister(src, 32)));
307 }
308
sub32(const Address & src,Register dest)309 void MacroAssembler::sub32(const Address& src, Register dest) {
310 vixl::UseScratchRegisterScope temps(this);
311 const ARMRegister scratch32 = temps.AcquireW();
312 MOZ_ASSERT(scratch32.asUnsized() != src.base);
313 load32(src, scratch32.asUnsized());
314 Sub(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(scratch32));
315 }
316
subPtr(Register src,Register dest)317 void MacroAssembler::subPtr(Register src, Register dest) {
318 Sub(ARMRegister(dest, 64), ARMRegister(dest, 64),
319 Operand(ARMRegister(src, 64)));
320 }
321
subPtr(Register src,const Address & dest)322 void MacroAssembler::subPtr(Register src, const Address& dest) {
323 vixl::UseScratchRegisterScope temps(this);
324 const ARMRegister scratch64 = temps.AcquireX();
325 MOZ_ASSERT(scratch64.asUnsized() != dest.base);
326
327 Ldr(scratch64, toMemOperand(dest));
328 Sub(scratch64, scratch64, Operand(ARMRegister(src, 64)));
329 Str(scratch64, toMemOperand(dest));
330 }
331
subPtr(Imm32 imm,Register dest)332 void MacroAssembler::subPtr(Imm32 imm, Register dest) {
333 Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
334 }
335
subPtr(const Address & addr,Register dest)336 void MacroAssembler::subPtr(const Address& addr, Register dest) {
337 vixl::UseScratchRegisterScope temps(this);
338 const ARMRegister scratch64 = temps.AcquireX();
339 MOZ_ASSERT(scratch64.asUnsized() != addr.base);
340
341 Ldr(scratch64, toMemOperand(addr));
342 Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64));
343 }
344
sub64(Register64 src,Register64 dest)345 void MacroAssembler::sub64(Register64 src, Register64 dest) {
346 Sub(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64),
347 ARMRegister(src.reg, 64));
348 }
349
sub64(Imm64 imm,Register64 dest)350 void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
351 Sub(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
352 }
353
subDouble(FloatRegister src,FloatRegister dest)354 void MacroAssembler::subDouble(FloatRegister src, FloatRegister dest) {
355 fsub(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
356 ARMFPRegister(src, 64));
357 }
358
subFloat32(FloatRegister src,FloatRegister dest)359 void MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest) {
360 fsub(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
361 ARMFPRegister(src, 32));
362 }
363
mul32(Register rhs,Register srcDest)364 void MacroAssembler::mul32(Register rhs, Register srcDest) {
365 mul32(srcDest, rhs, srcDest, nullptr, nullptr);
366 }
367
mul32(Register src1,Register src2,Register dest,Label * onOver,Label * onZero)368 void MacroAssembler::mul32(Register src1, Register src2, Register dest,
369 Label* onOver, Label* onZero) {
370 Smull(ARMRegister(dest, 64), ARMRegister(src1, 32), ARMRegister(src2, 32));
371 if (onOver) {
372 Cmp(ARMRegister(dest, 64), Operand(ARMRegister(dest, 32), vixl::SXTW));
373 B(onOver, NotEqual);
374 }
375 if (onZero) Cbz(ARMRegister(dest, 32), onZero);
376
377 // Clear upper 32 bits.
378 Mov(ARMRegister(dest, 32), ARMRegister(dest, 32));
379 }
380
mul64(Imm64 imm,const Register64 & dest)381 void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
382 vixl::UseScratchRegisterScope temps(this);
383 const ARMRegister scratch64 = temps.AcquireX();
384 MOZ_ASSERT(dest.reg != scratch64.asUnsized());
385 mov(ImmWord(imm.value), scratch64.asUnsized());
386 Mul(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), scratch64);
387 }
388
mul64(const Register64 & src,const Register64 & dest,const Register temp)389 void MacroAssembler::mul64(const Register64& src, const Register64& dest,
390 const Register temp) {
391 MOZ_ASSERT(temp == Register::Invalid());
392 Mul(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64),
393 ARMRegister(src.reg, 64));
394 }
395
mulBy3(Register src,Register dest)396 void MacroAssembler::mulBy3(Register src, Register dest) {
397 ARMRegister xdest(dest, 64);
398 ARMRegister xsrc(src, 64);
399 Add(xdest, xsrc, Operand(xsrc, vixl::LSL, 1));
400 }
401
mulFloat32(FloatRegister src,FloatRegister dest)402 void MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest) {
403 fmul(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
404 ARMFPRegister(src, 32));
405 }
406
mulDouble(FloatRegister src,FloatRegister dest)407 void MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest) {
408 fmul(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
409 ARMFPRegister(src, 64));
410 }
411
mulDoublePtr(ImmPtr imm,Register temp,FloatRegister dest)412 void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
413 FloatRegister dest) {
414 vixl::UseScratchRegisterScope temps(this);
415 const Register scratch = temps.AcquireX().asUnsized();
416 MOZ_ASSERT(temp != scratch);
417 movePtr(imm, scratch);
418 const ARMFPRegister scratchDouble = temps.AcquireD();
419 Ldr(scratchDouble, MemOperand(Address(scratch, 0)));
420 fmul(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), scratchDouble);
421 }
422
quotient32(Register rhs,Register srcDest,bool isUnsigned)423 void MacroAssembler::quotient32(Register rhs, Register srcDest,
424 bool isUnsigned) {
425 if (isUnsigned)
426 Udiv(ARMRegister(srcDest, 32), ARMRegister(srcDest, 32),
427 ARMRegister(rhs, 32));
428 else
429 Sdiv(ARMRegister(srcDest, 32), ARMRegister(srcDest, 32),
430 ARMRegister(rhs, 32));
431 }
432
433 // This does not deal with x % 0 or INT_MIN % -1, the caller needs to filter
434 // those cases when they may occur.
435
remainder32(Register rhs,Register srcDest,bool isUnsigned)436 void MacroAssembler::remainder32(Register rhs, Register srcDest,
437 bool isUnsigned) {
438 vixl::UseScratchRegisterScope temps(this);
439 ARMRegister scratch = temps.AcquireW();
440 if (isUnsigned)
441 Udiv(scratch, ARMRegister(srcDest, 32), ARMRegister(rhs, 32));
442 else
443 Sdiv(scratch, ARMRegister(srcDest, 32), ARMRegister(rhs, 32));
444 Mul(scratch, scratch, ARMRegister(rhs, 32));
445 Sub(ARMRegister(srcDest, 32), ARMRegister(srcDest, 32), scratch);
446 }
447
divFloat32(FloatRegister src,FloatRegister dest)448 void MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest) {
449 fdiv(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
450 ARMFPRegister(src, 32));
451 }
452
divDouble(FloatRegister src,FloatRegister dest)453 void MacroAssembler::divDouble(FloatRegister src, FloatRegister dest) {
454 fdiv(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
455 ARMFPRegister(src, 64));
456 }
457
inc64(AbsoluteAddress dest)458 void MacroAssembler::inc64(AbsoluteAddress dest) {
459 vixl::UseScratchRegisterScope temps(this);
460 const ARMRegister scratchAddr64 = temps.AcquireX();
461 const ARMRegister scratch64 = temps.AcquireX();
462
463 Mov(scratchAddr64, uint64_t(dest.addr));
464 Ldr(scratch64, MemOperand(scratchAddr64, 0));
465 Add(scratch64, scratch64, Operand(1));
466 Str(scratch64, MemOperand(scratchAddr64, 0));
467 }
468
neg32(Register reg)469 void MacroAssembler::neg32(Register reg) {
470 Negs(ARMRegister(reg, 32), Operand(ARMRegister(reg, 32)));
471 }
472
negateFloat(FloatRegister reg)473 void MacroAssembler::negateFloat(FloatRegister reg) {
474 fneg(ARMFPRegister(reg, 32), ARMFPRegister(reg, 32));
475 }
476
negateDouble(FloatRegister reg)477 void MacroAssembler::negateDouble(FloatRegister reg) {
478 fneg(ARMFPRegister(reg, 64), ARMFPRegister(reg, 64));
479 }
480
absFloat32(FloatRegister src,FloatRegister dest)481 void MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest) {
482 fabs(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
483 }
484
absDouble(FloatRegister src,FloatRegister dest)485 void MacroAssembler::absDouble(FloatRegister src, FloatRegister dest) {
486 fabs(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
487 }
488
sqrtFloat32(FloatRegister src,FloatRegister dest)489 void MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest) {
490 fsqrt(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
491 }
492
sqrtDouble(FloatRegister src,FloatRegister dest)493 void MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest) {
494 fsqrt(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
495 }
496
minFloat32(FloatRegister other,FloatRegister srcDest,bool handleNaN)497 void MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest,
498 bool handleNaN) {
499 MOZ_ASSERT(handleNaN); // Always true for wasm
500 fmin(ARMFPRegister(srcDest, 32), ARMFPRegister(srcDest, 32),
501 ARMFPRegister(other, 32));
502 }
503
minDouble(FloatRegister other,FloatRegister srcDest,bool handleNaN)504 void MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest,
505 bool handleNaN) {
506 MOZ_ASSERT(handleNaN); // Always true for wasm
507 fmin(ARMFPRegister(srcDest, 64), ARMFPRegister(srcDest, 64),
508 ARMFPRegister(other, 64));
509 }
510
maxFloat32(FloatRegister other,FloatRegister srcDest,bool handleNaN)511 void MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest,
512 bool handleNaN) {
513 MOZ_ASSERT(handleNaN); // Always true for wasm
514 fmax(ARMFPRegister(srcDest, 32), ARMFPRegister(srcDest, 32),
515 ARMFPRegister(other, 32));
516 }
517
maxDouble(FloatRegister other,FloatRegister srcDest,bool handleNaN)518 void MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest,
519 bool handleNaN) {
520 MOZ_ASSERT(handleNaN); // Always true for wasm
521 fmax(ARMFPRegister(srcDest, 64), ARMFPRegister(srcDest, 64),
522 ARMFPRegister(other, 64));
523 }
524
525 // ===============================================================
526 // Shift functions
527
lshiftPtr(Imm32 imm,Register dest)528 void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
529 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
530 Lsl(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value);
531 }
532
lshift64(Imm32 imm,Register64 dest)533 void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
534 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
535 lshiftPtr(imm, dest.reg);
536 }
537
lshift64(Register shift,Register64 srcDest)538 void MacroAssembler::lshift64(Register shift, Register64 srcDest) {
539 Lsl(ARMRegister(srcDest.reg, 64), ARMRegister(srcDest.reg, 64),
540 ARMRegister(shift, 64));
541 }
542
lshift32(Register shift,Register dest)543 void MacroAssembler::lshift32(Register shift, Register dest) {
544 Lsl(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32));
545 }
546
lshift32(Imm32 imm,Register dest)547 void MacroAssembler::lshift32(Imm32 imm, Register dest) {
548 MOZ_ASSERT(0 <= imm.value && imm.value < 32);
549 Lsl(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value);
550 }
551
rshiftPtr(Imm32 imm,Register dest)552 void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
553 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
554 Lsr(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value);
555 }
556
rshiftPtr(Imm32 imm,Register src,Register dest)557 void MacroAssembler::rshiftPtr(Imm32 imm, Register src, Register dest) {
558 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
559 Lsr(ARMRegister(dest, 64), ARMRegister(src, 64), imm.value);
560 }
561
rshift32(Register shift,Register dest)562 void MacroAssembler::rshift32(Register shift, Register dest) {
563 Lsr(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32));
564 }
565
rshift32(Imm32 imm,Register dest)566 void MacroAssembler::rshift32(Imm32 imm, Register dest) {
567 MOZ_ASSERT(0 <= imm.value && imm.value < 32);
568 Lsr(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value);
569 }
570
rshiftPtrArithmetic(Imm32 imm,Register dest)571 void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
572 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
573 Asr(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value);
574 }
575
rshift32Arithmetic(Register shift,Register dest)576 void MacroAssembler::rshift32Arithmetic(Register shift, Register dest) {
577 Asr(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32));
578 }
579
rshift32Arithmetic(Imm32 imm,Register dest)580 void MacroAssembler::rshift32Arithmetic(Imm32 imm, Register dest) {
581 MOZ_ASSERT(0 <= imm.value && imm.value < 32);
582 Asr(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value);
583 }
584
rshift64(Imm32 imm,Register64 dest)585 void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
586 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
587 rshiftPtr(imm, dest.reg);
588 }
589
rshift64(Register shift,Register64 srcDest)590 void MacroAssembler::rshift64(Register shift, Register64 srcDest) {
591 Lsr(ARMRegister(srcDest.reg, 64), ARMRegister(srcDest.reg, 64),
592 ARMRegister(shift, 64));
593 }
594
rshift64Arithmetic(Imm32 imm,Register64 dest)595 void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
596 Asr(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), imm.value);
597 }
598
rshift64Arithmetic(Register shift,Register64 srcDest)599 void MacroAssembler::rshift64Arithmetic(Register shift, Register64 srcDest) {
600 Asr(ARMRegister(srcDest.reg, 64), ARMRegister(srcDest.reg, 64),
601 ARMRegister(shift, 64));
602 }
603
604 // ===============================================================
605 // Condition functions
606
607 template <typename T1, typename T2>
cmp32Set(Condition cond,T1 lhs,T2 rhs,Register dest)608 void MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) {
609 cmp32(lhs, rhs);
610 emitSet(cond, dest);
611 }
612
613 template <typename T1, typename T2>
cmpPtrSet(Condition cond,T1 lhs,T2 rhs,Register dest)614 void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
615 cmpPtr(lhs, rhs);
616 emitSet(cond, dest);
617 }
618
619 // ===============================================================
620 // Rotation functions
621
rotateLeft(Imm32 count,Register input,Register dest)622 void MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest) {
623 Ror(ARMRegister(dest, 32), ARMRegister(input, 32), (32 - count.value) & 31);
624 }
625
rotateLeft(Register count,Register input,Register dest)626 void MacroAssembler::rotateLeft(Register count, Register input, Register dest) {
627 vixl::UseScratchRegisterScope temps(this);
628 const ARMRegister scratch = temps.AcquireW();
629 // Really 32 - count, but the upper bits of the result are ignored.
630 Neg(scratch, ARMRegister(count, 32));
631 Ror(ARMRegister(dest, 32), ARMRegister(input, 32), scratch);
632 }
633
rotateRight(Imm32 count,Register input,Register dest)634 void MacroAssembler::rotateRight(Imm32 count, Register input, Register dest) {
635 Ror(ARMRegister(dest, 32), ARMRegister(input, 32), count.value & 31);
636 }
637
rotateRight(Register count,Register input,Register dest)638 void MacroAssembler::rotateRight(Register count, Register input,
639 Register dest) {
640 Ror(ARMRegister(dest, 32), ARMRegister(input, 32), ARMRegister(count, 32));
641 }
642
rotateLeft64(Register count,Register64 input,Register64 dest,Register temp)643 void MacroAssembler::rotateLeft64(Register count, Register64 input,
644 Register64 dest, Register temp) {
645 MOZ_ASSERT(temp == Register::Invalid());
646
647 vixl::UseScratchRegisterScope temps(this);
648 const ARMRegister scratch = temps.AcquireX();
649 // Really 64 - count, but the upper bits of the result are ignored.
650 Neg(scratch, ARMRegister(count, 64));
651 Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64), scratch);
652 }
653
rotateLeft64(Imm32 count,Register64 input,Register64 dest,Register temp)654 void MacroAssembler::rotateLeft64(Imm32 count, Register64 input,
655 Register64 dest, Register temp) {
656 MOZ_ASSERT(temp == Register::Invalid());
657
658 Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64),
659 (64 - count.value) & 63);
660 }
661
rotateRight64(Register count,Register64 input,Register64 dest,Register temp)662 void MacroAssembler::rotateRight64(Register count, Register64 input,
663 Register64 dest, Register temp) {
664 MOZ_ASSERT(temp == Register::Invalid());
665
666 Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64),
667 ARMRegister(count, 64));
668 }
669
rotateRight64(Imm32 count,Register64 input,Register64 dest,Register temp)670 void MacroAssembler::rotateRight64(Imm32 count, Register64 input,
671 Register64 dest, Register temp) {
672 MOZ_ASSERT(temp == Register::Invalid());
673
674 Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64), count.value & 63);
675 }
676
677 // ===============================================================
678 // Bit counting functions
679
clz32(Register src,Register dest,bool knownNotZero)680 void MacroAssembler::clz32(Register src, Register dest, bool knownNotZero) {
681 Clz(ARMRegister(dest, 32), ARMRegister(src, 32));
682 }
683
ctz32(Register src,Register dest,bool knownNotZero)684 void MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero) {
685 Rbit(ARMRegister(dest, 32), ARMRegister(src, 32));
686 Clz(ARMRegister(dest, 32), ARMRegister(dest, 32));
687 }
688
clz64(Register64 src,Register dest)689 void MacroAssembler::clz64(Register64 src, Register dest) {
690 Clz(ARMRegister(dest, 64), ARMRegister(src.reg, 64));
691 }
692
ctz64(Register64 src,Register dest)693 void MacroAssembler::ctz64(Register64 src, Register dest) {
694 Rbit(ARMRegister(dest, 64), ARMRegister(src.reg, 64));
695 Clz(ARMRegister(dest, 64), ARMRegister(dest, 64));
696 }
697
popcnt32(Register src_,Register dest_,Register tmp_)698 void MacroAssembler::popcnt32(Register src_, Register dest_, Register tmp_) {
699 MOZ_ASSERT(tmp_ != Register::Invalid());
700
701 // Equivalent to mozilla::CountPopulation32().
702
703 ARMRegister src(src_, 32);
704 ARMRegister dest(dest_, 32);
705 ARMRegister tmp(tmp_, 32);
706
707 Mov(tmp, src);
708 if (src_ != dest_) Mov(dest, src);
709 Lsr(dest, dest, 1);
710 And(dest, dest, 0x55555555);
711 Sub(dest, tmp, dest);
712 Lsr(tmp, dest, 2);
713 And(tmp, tmp, 0x33333333);
714 And(dest, dest, 0x33333333);
715 Add(dest, tmp, dest);
716 Add(dest, dest, Operand(dest, vixl::LSR, 4));
717 And(dest, dest, 0x0F0F0F0F);
718 Add(dest, dest, Operand(dest, vixl::LSL, 8));
719 Add(dest, dest, Operand(dest, vixl::LSL, 16));
720 Lsr(dest, dest, 24);
721 }
722
popcnt64(Register64 src_,Register64 dest_,Register tmp_)723 void MacroAssembler::popcnt64(Register64 src_, Register64 dest_,
724 Register tmp_) {
725 MOZ_ASSERT(tmp_ != Register::Invalid());
726
727 // Equivalent to mozilla::CountPopulation64(), though likely more efficient.
728
729 ARMRegister src(src_.reg, 64);
730 ARMRegister dest(dest_.reg, 64);
731 ARMRegister tmp(tmp_, 64);
732
733 Mov(tmp, src);
734 if (src_ != dest_) Mov(dest, src);
735 Lsr(dest, dest, 1);
736 And(dest, dest, 0x5555555555555555);
737 Sub(dest, tmp, dest);
738 Lsr(tmp, dest, 2);
739 And(tmp, tmp, 0x3333333333333333);
740 And(dest, dest, 0x3333333333333333);
741 Add(dest, tmp, dest);
742 Add(dest, dest, Operand(dest, vixl::LSR, 4));
743 And(dest, dest, 0x0F0F0F0F0F0F0F0F);
744 Add(dest, dest, Operand(dest, vixl::LSL, 8));
745 Add(dest, dest, Operand(dest, vixl::LSL, 16));
746 Add(dest, dest, Operand(dest, vixl::LSL, 32));
747 Lsr(dest, dest, 56);
748 }
749
750 // ===============================================================
751 // Branch functions
752
753 template <class L>
branch32(Condition cond,Register lhs,Register rhs,L label)754 void MacroAssembler::branch32(Condition cond, Register lhs, Register rhs,
755 L label) {
756 cmp32(lhs, rhs);
757 B(label, cond);
758 }
759
760 template <class L>
branch32(Condition cond,Register lhs,Imm32 imm,L label)761 void MacroAssembler::branch32(Condition cond, Register lhs, Imm32 imm,
762 L label) {
763 cmp32(lhs, imm);
764 B(label, cond);
765 }
766
branch32(Condition cond,const Address & lhs,Register rhs,Label * label)767 void MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs,
768 Label* label) {
769 vixl::UseScratchRegisterScope temps(this);
770 const Register scratch = temps.AcquireX().asUnsized();
771 MOZ_ASSERT(scratch != lhs.base);
772 MOZ_ASSERT(scratch != rhs);
773 load32(lhs, scratch);
774 branch32(cond, scratch, rhs, label);
775 }
776
branch32(Condition cond,const Address & lhs,Imm32 imm,Label * label)777 void MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 imm,
778 Label* label) {
779 vixl::UseScratchRegisterScope temps(this);
780 const Register scratch = temps.AcquireX().asUnsized();
781 MOZ_ASSERT(scratch != lhs.base);
782 load32(lhs, scratch);
783 branch32(cond, scratch, imm, label);
784 }
785
branch32(Condition cond,const AbsoluteAddress & lhs,Register rhs,Label * label)786 void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
787 Register rhs, Label* label) {
788 vixl::UseScratchRegisterScope temps(this);
789 const Register scratch = temps.AcquireX().asUnsized();
790 movePtr(ImmPtr(lhs.addr), scratch);
791 branch32(cond, Address(scratch, 0), rhs, label);
792 }
793
branch32(Condition cond,const AbsoluteAddress & lhs,Imm32 rhs,Label * label)794 void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
795 Imm32 rhs, Label* label) {
796 vixl::UseScratchRegisterScope temps(this);
797 const Register scratch = temps.AcquireX().asUnsized();
798 movePtr(ImmPtr(lhs.addr), scratch);
799 branch32(cond, Address(scratch, 0), rhs, label);
800 }
801
branch32(Condition cond,const BaseIndex & lhs,Imm32 rhs,Label * label)802 void MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
803 Label* label) {
804 vixl::UseScratchRegisterScope temps(this);
805 const ARMRegister scratch32 = temps.AcquireW();
806 MOZ_ASSERT(scratch32.asUnsized() != lhs.base);
807 MOZ_ASSERT(scratch32.asUnsized() != lhs.index);
808 doBaseIndex(scratch32, lhs, vixl::LDR_w);
809 branch32(cond, scratch32.asUnsized(), rhs, label);
810 }
811
branch32(Condition cond,wasm::SymbolicAddress lhs,Imm32 rhs,Label * label)812 void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs,
813 Imm32 rhs, Label* label) {
814 vixl::UseScratchRegisterScope temps(this);
815 const Register scratch = temps.AcquireX().asUnsized();
816 movePtr(lhs, scratch);
817 branch32(cond, Address(scratch, 0), rhs, label);
818 }
819
branch64(Condition cond,Register64 lhs,Imm64 val,Label * success,Label * fail)820 void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
821 Label* success, Label* fail) {
822 Cmp(ARMRegister(lhs.reg, 64), val.value);
823 B(success, cond);
824 if (fail) B(fail);
825 }
826
branch64(Condition cond,Register64 lhs,Register64 rhs,Label * success,Label * fail)827 void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
828 Label* success, Label* fail) {
829 Cmp(ARMRegister(lhs.reg, 64), ARMRegister(rhs.reg, 64));
830 B(success, cond);
831 if (fail) B(fail);
832 }
833
branch64(Condition cond,const Address & lhs,Imm64 val,Label * label)834 void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
835 Label* label) {
836 MOZ_ASSERT(cond == Assembler::NotEqual,
837 "other condition codes not supported");
838
839 branchPtr(cond, lhs, ImmWord(val.value), label);
840 }
841
branch64(Condition cond,const Address & lhs,const Address & rhs,Register scratch,Label * label)842 void MacroAssembler::branch64(Condition cond, const Address& lhs,
843 const Address& rhs, Register scratch,
844 Label* label) {
845 MOZ_ASSERT(cond == Assembler::NotEqual,
846 "other condition codes not supported");
847 MOZ_ASSERT(lhs.base != scratch);
848 MOZ_ASSERT(rhs.base != scratch);
849
850 loadPtr(rhs, scratch);
851 branchPtr(cond, lhs, scratch, label);
852 }
853
854 template <class L>
branchPtr(Condition cond,Register lhs,Register rhs,L label)855 void MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs,
856 L label) {
857 Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64));
858 B(label, cond);
859 }
860
branchPtr(Condition cond,Register lhs,Imm32 rhs,Label * label)861 void MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs,
862 Label* label) {
863 cmpPtr(lhs, rhs);
864 B(label, cond);
865 }
866
branchPtr(Condition cond,Register lhs,ImmPtr rhs,Label * label)867 void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs,
868 Label* label) {
869 cmpPtr(lhs, rhs);
870 B(label, cond);
871 }
872
branchPtr(Condition cond,Register lhs,ImmGCPtr rhs,Label * label)873 void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs,
874 Label* label) {
875 vixl::UseScratchRegisterScope temps(this);
876 const Register scratch = temps.AcquireX().asUnsized();
877 MOZ_ASSERT(scratch != lhs);
878 movePtr(rhs, scratch);
879 branchPtr(cond, lhs, scratch, label);
880 }
881
branchPtr(Condition cond,Register lhs,ImmWord rhs,Label * label)882 void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs,
883 Label* label) {
884 cmpPtr(lhs, rhs);
885 B(label, cond);
886 }
887
888 template <class L>
branchPtr(Condition cond,const Address & lhs,Register rhs,L label)889 void MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs,
890 L label) {
891 vixl::UseScratchRegisterScope temps(this);
892 const Register scratch = temps.AcquireX().asUnsized();
893 MOZ_ASSERT(scratch != lhs.base);
894 MOZ_ASSERT(scratch != rhs);
895 loadPtr(lhs, scratch);
896 branchPtr(cond, scratch, rhs, label);
897 }
898
branchPtr(Condition cond,const Address & lhs,ImmPtr rhs,Label * label)899 void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs,
900 Label* label) {
901 vixl::UseScratchRegisterScope temps(this);
902 const Register scratch = temps.AcquireX().asUnsized();
903 MOZ_ASSERT(scratch != lhs.base);
904 loadPtr(lhs, scratch);
905 branchPtr(cond, scratch, rhs, label);
906 }
907
branchPtr(Condition cond,const Address & lhs,ImmGCPtr rhs,Label * label)908 void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs,
909 Label* label) {
910 vixl::UseScratchRegisterScope temps(this);
911 const ARMRegister scratch1_64 = temps.AcquireX();
912 const ARMRegister scratch2_64 = temps.AcquireX();
913 MOZ_ASSERT(scratch1_64.asUnsized() != lhs.base);
914 MOZ_ASSERT(scratch2_64.asUnsized() != lhs.base);
915
916 movePtr(rhs, scratch1_64.asUnsized());
917 loadPtr(lhs, scratch2_64.asUnsized());
918 branchPtr(cond, scratch2_64.asUnsized(), scratch1_64.asUnsized(), label);
919 }
920
branchPtr(Condition cond,const Address & lhs,ImmWord rhs,Label * label)921 void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs,
922 Label* label) {
923 vixl::UseScratchRegisterScope temps(this);
924 const Register scratch = temps.AcquireX().asUnsized();
925 MOZ_ASSERT(scratch != lhs.base);
926 loadPtr(lhs, scratch);
927 branchPtr(cond, scratch, rhs, label);
928 }
929
branchPtr(Condition cond,const AbsoluteAddress & lhs,Register rhs,Label * label)930 void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
931 Register rhs, Label* label) {
932 vixl::UseScratchRegisterScope temps(this);
933 const Register scratch = temps.AcquireX().asUnsized();
934 MOZ_ASSERT(scratch != rhs);
935 loadPtr(lhs, scratch);
936 branchPtr(cond, scratch, rhs, label);
937 }
938
branchPtr(Condition cond,const AbsoluteAddress & lhs,ImmWord rhs,Label * label)939 void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
940 ImmWord rhs, Label* label) {
941 vixl::UseScratchRegisterScope temps(this);
942 const Register scratch = temps.AcquireX().asUnsized();
943 loadPtr(lhs, scratch);
944 branchPtr(cond, scratch, rhs, label);
945 }
946
branchPtr(Condition cond,wasm::SymbolicAddress lhs,Register rhs,Label * label)947 void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
948 Register rhs, Label* label) {
949 vixl::UseScratchRegisterScope temps(this);
950 const Register scratch = temps.AcquireX().asUnsized();
951 MOZ_ASSERT(scratch != rhs);
952 loadPtr(lhs, scratch);
953 branchPtr(cond, scratch, rhs, label);
954 }
955
branchPtr(Condition cond,const BaseIndex & lhs,ImmWord rhs,Label * label)956 void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
957 ImmWord rhs, Label* label) {
958 vixl::UseScratchRegisterScope temps(this);
959 const Register scratch = temps.AcquireX().asUnsized();
960 MOZ_ASSERT(scratch != lhs.base);
961 MOZ_ASSERT(scratch != lhs.index);
962 loadPtr(lhs, scratch);
963 branchPtr(cond, scratch, rhs, label);
964 }
965
966 template <typename T>
branchPtrWithPatch(Condition cond,Register lhs,T rhs,RepatchLabel * label)967 CodeOffsetJump MacroAssembler::branchPtrWithPatch(Condition cond, Register lhs,
968 T rhs, RepatchLabel* label) {
969 cmpPtr(lhs, rhs);
970 return jumpWithPatch(label, cond);
971 }
972
973 template <typename T>
branchPtrWithPatch(Condition cond,Address lhs,T rhs,RepatchLabel * label)974 CodeOffsetJump MacroAssembler::branchPtrWithPatch(Condition cond, Address lhs,
975 T rhs, RepatchLabel* label) {
976 // The scratch register is unused after the condition codes are set.
977 {
978 vixl::UseScratchRegisterScope temps(this);
979 const Register scratch = temps.AcquireX().asUnsized();
980 MOZ_ASSERT(scratch != lhs.base);
981 loadPtr(lhs, scratch);
982 cmpPtr(scratch, rhs);
983 }
984 return jumpWithPatch(label, cond);
985 }
986
branchPrivatePtr(Condition cond,const Address & lhs,Register rhs,Label * label)987 void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
988 Register rhs, Label* label) {
989 vixl::UseScratchRegisterScope temps(this);
990 const Register scratch = temps.AcquireX().asUnsized();
991 if (rhs != scratch) movePtr(rhs, scratch);
992 // Instead of unboxing lhs, box rhs and do direct comparison with lhs.
993 rshiftPtr(Imm32(1), scratch);
994 branchPtr(cond, lhs, scratch, label);
995 }
996
branchFloat(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs,Label * label)997 void MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs,
998 FloatRegister rhs, Label* label) {
999 compareFloat(cond, lhs, rhs);
1000 switch (cond) {
1001 case DoubleNotEqual: {
1002 Label unordered;
1003 // not equal *and* ordered
1004 branch(Overflow, &unordered);
1005 branch(NotEqual, label);
1006 bind(&unordered);
1007 break;
1008 }
1009 case DoubleEqualOrUnordered:
1010 branch(Overflow, label);
1011 branch(Equal, label);
1012 break;
1013 default:
1014 branch(Condition(cond), label);
1015 }
1016 }
1017
branchTruncateFloat32MaybeModUint32(FloatRegister src,Register dest,Label * fail)1018 void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
1019 Register dest,
1020 Label* fail) {
1021 vixl::UseScratchRegisterScope temps(this);
1022 const ARMRegister scratch64 = temps.AcquireX();
1023
1024 ARMFPRegister src32(src, 32);
1025 ARMRegister dest64(dest, 64);
1026
1027 MOZ_ASSERT(!scratch64.Is(dest64));
1028
1029 Fcvtzs(dest64, src32);
1030 Add(scratch64, dest64, Operand(0x7fffffffffffffff));
1031 Cmn(scratch64, 3);
1032 B(fail, Assembler::Above);
1033 And(dest64, dest64, Operand(0xffffffff));
1034 }
1035
branchTruncateFloat32ToInt32(FloatRegister src,Register dest,Label * fail)1036 void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
1037 Register dest, Label* fail) {
1038 convertFloat32ToInt32(src, dest, fail);
1039 }
1040
branchDouble(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs,Label * label)1041 void MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs,
1042 FloatRegister rhs, Label* label) {
1043 compareDouble(cond, lhs, rhs);
1044 switch (cond) {
1045 case DoubleNotEqual: {
1046 Label unordered;
1047 // not equal *and* ordered
1048 branch(Overflow, &unordered);
1049 branch(NotEqual, label);
1050 bind(&unordered);
1051 break;
1052 }
1053 case DoubleEqualOrUnordered:
1054 branch(Overflow, label);
1055 branch(Equal, label);
1056 break;
1057 default:
1058 branch(Condition(cond), label);
1059 }
1060 }
1061
branchTruncateDoubleMaybeModUint32(FloatRegister src,Register dest,Label * fail)1062 void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
1063 Register dest,
1064 Label* fail) {
1065 vixl::UseScratchRegisterScope temps(this);
1066 const ARMRegister scratch64 = temps.AcquireX();
1067
1068 // An out of range integer will be saturated to the destination size.
1069 ARMFPRegister src64(src, 64);
1070 ARMRegister dest64(dest, 64);
1071
1072 MOZ_ASSERT(!scratch64.Is(dest64));
1073
1074 Fcvtzs(dest64, src64);
1075 Add(scratch64, dest64, Operand(0x7fffffffffffffff));
1076 Cmn(scratch64, 3);
1077 B(fail, Assembler::Above);
1078 And(dest64, dest64, Operand(0xffffffff));
1079 }
1080
branchTruncateDoubleToInt32(FloatRegister src,Register dest,Label * fail)1081 void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
1082 Register dest, Label* fail) {
1083 convertDoubleToInt32(src, dest, fail);
1084 }
1085
1086 template <typename T, typename L>
branchAdd32(Condition cond,T src,Register dest,L label)1087 void MacroAssembler::branchAdd32(Condition cond, T src, Register dest,
1088 L label) {
1089 adds32(src, dest);
1090 B(label, cond);
1091 }
1092
1093 template <typename T>
branchSub32(Condition cond,T src,Register dest,Label * label)1094 void MacroAssembler::branchSub32(Condition cond, T src, Register dest,
1095 Label* label) {
1096 subs32(src, dest);
1097 branch(cond, label);
1098 }
1099
decBranchPtr(Condition cond,Register lhs,Imm32 rhs,Label * label)1100 void MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs,
1101 Label* label) {
1102 Subs(ARMRegister(lhs, 64), ARMRegister(lhs, 64), Operand(rhs.value));
1103 B(cond, label);
1104 }
1105
1106 template <class L>
branchTest32(Condition cond,Register lhs,Register rhs,L label)1107 void MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs,
1108 L label) {
1109 MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
1110 cond == NotSigned);
1111 // x86 prefers |test foo, foo| to |cmp foo, #0|.
1112 // Convert the former to the latter for ARM.
1113 if (lhs == rhs && (cond == Zero || cond == NonZero))
1114 cmp32(lhs, Imm32(0));
1115 else
1116 test32(lhs, rhs);
1117 B(label, cond);
1118 }
1119
1120 template <class L>
branchTest32(Condition cond,Register lhs,Imm32 rhs,L label)1121 void MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs,
1122 L label) {
1123 MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
1124 cond == NotSigned);
1125 test32(lhs, rhs);
1126 B(label, cond);
1127 }
1128
branchTest32(Condition cond,const Address & lhs,Imm32 rhs,Label * label)1129 void MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs,
1130 Label* label) {
1131 vixl::UseScratchRegisterScope temps(this);
1132 const Register scratch = temps.AcquireX().asUnsized();
1133 MOZ_ASSERT(scratch != lhs.base);
1134 load32(lhs, scratch);
1135 branchTest32(cond, scratch, rhs, label);
1136 }
1137
branchTest32(Condition cond,const AbsoluteAddress & lhs,Imm32 rhs,Label * label)1138 void MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs,
1139 Imm32 rhs, Label* label) {
1140 vixl::UseScratchRegisterScope temps(this);
1141 const Register scratch = temps.AcquireX().asUnsized();
1142 load32(lhs, scratch);
1143 branchTest32(cond, scratch, rhs, label);
1144 }
1145
1146 template <class L>
branchTestPtr(Condition cond,Register lhs,Register rhs,L label)1147 void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs,
1148 L label) {
1149 Tst(ARMRegister(lhs, 64), Operand(ARMRegister(rhs, 64)));
1150 B(label, cond);
1151 }
1152
branchTestPtr(Condition cond,Register lhs,Imm32 rhs,Label * label)1153 void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs,
1154 Label* label) {
1155 Tst(ARMRegister(lhs, 64), Operand(rhs.value));
1156 B(label, cond);
1157 }
1158
branchTestPtr(Condition cond,const Address & lhs,Imm32 rhs,Label * label)1159 void MacroAssembler::branchTestPtr(Condition cond, const Address& lhs,
1160 Imm32 rhs, Label* label) {
1161 vixl::UseScratchRegisterScope temps(this);
1162 const Register scratch = temps.AcquireX().asUnsized();
1163 MOZ_ASSERT(scratch != lhs.base);
1164 loadPtr(lhs, scratch);
1165 branchTestPtr(cond, scratch, rhs, label);
1166 }
1167
1168 template <class L>
branchTest64(Condition cond,Register64 lhs,Register64 rhs,Register temp,L label)1169 void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
1170 Register64 rhs, Register temp, L label) {
1171 branchTestPtr(cond, lhs.reg, rhs.reg, label);
1172 }
1173
branchTestUndefined(Condition cond,Register tag,Label * label)1174 void MacroAssembler::branchTestUndefined(Condition cond, Register tag,
1175 Label* label) {
1176 branchTestUndefinedImpl(cond, tag, label);
1177 }
1178
branchTestUndefined(Condition cond,const Address & address,Label * label)1179 void MacroAssembler::branchTestUndefined(Condition cond, const Address& address,
1180 Label* label) {
1181 branchTestUndefinedImpl(cond, address, label);
1182 }
1183
branchTestUndefined(Condition cond,const BaseIndex & address,Label * label)1184 void MacroAssembler::branchTestUndefined(Condition cond,
1185 const BaseIndex& address,
1186 Label* label) {
1187 branchTestUndefinedImpl(cond, address, label);
1188 }
1189
branchTestUndefined(Condition cond,const ValueOperand & value,Label * label)1190 void MacroAssembler::branchTestUndefined(Condition cond,
1191 const ValueOperand& value,
1192 Label* label) {
1193 branchTestUndefinedImpl(cond, value, label);
1194 }
1195
1196 template <typename T>
branchTestUndefinedImpl(Condition cond,const T & t,Label * label)1197 void MacroAssembler::branchTestUndefinedImpl(Condition cond, const T& t,
1198 Label* label) {
1199 Condition c = testUndefined(cond, t);
1200 B(label, c);
1201 }
1202
branchTestInt32(Condition cond,Register tag,Label * label)1203 void MacroAssembler::branchTestInt32(Condition cond, Register tag,
1204 Label* label) {
1205 branchTestInt32Impl(cond, tag, label);
1206 }
1207
branchTestInt32(Condition cond,const Address & address,Label * label)1208 void MacroAssembler::branchTestInt32(Condition cond, const Address& address,
1209 Label* label) {
1210 branchTestInt32Impl(cond, address, label);
1211 }
1212
branchTestInt32(Condition cond,const BaseIndex & address,Label * label)1213 void MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address,
1214 Label* label) {
1215 branchTestInt32Impl(cond, address, label);
1216 }
1217
branchTestInt32(Condition cond,const ValueOperand & value,Label * label)1218 void MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value,
1219 Label* label) {
1220 branchTestInt32Impl(cond, value, label);
1221 }
1222
1223 template <typename T>
branchTestInt32Impl(Condition cond,const T & t,Label * label)1224 void MacroAssembler::branchTestInt32Impl(Condition cond, const T& t,
1225 Label* label) {
1226 Condition c = testInt32(cond, t);
1227 B(label, c);
1228 }
1229
branchTestInt32Truthy(bool truthy,const ValueOperand & value,Label * label)1230 void MacroAssembler::branchTestInt32Truthy(bool truthy,
1231 const ValueOperand& value,
1232 Label* label) {
1233 Condition c = testInt32Truthy(truthy, value);
1234 B(label, c);
1235 }
1236
branchTestDouble(Condition cond,Register tag,Label * label)1237 void MacroAssembler::branchTestDouble(Condition cond, Register tag,
1238 Label* label) {
1239 branchTestDoubleImpl(cond, tag, label);
1240 }
1241
branchTestDouble(Condition cond,const Address & address,Label * label)1242 void MacroAssembler::branchTestDouble(Condition cond, const Address& address,
1243 Label* label) {
1244 branchTestDoubleImpl(cond, address, label);
1245 }
1246
branchTestDouble(Condition cond,const BaseIndex & address,Label * label)1247 void MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address,
1248 Label* label) {
1249 branchTestDoubleImpl(cond, address, label);
1250 }
1251
branchTestDouble(Condition cond,const ValueOperand & value,Label * label)1252 void MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value,
1253 Label* label) {
1254 branchTestDoubleImpl(cond, value, label);
1255 }
1256
1257 template <typename T>
branchTestDoubleImpl(Condition cond,const T & t,Label * label)1258 void MacroAssembler::branchTestDoubleImpl(Condition cond, const T& t,
1259 Label* label) {
1260 Condition c = testDouble(cond, t);
1261 B(label, c);
1262 }
1263
branchTestDoubleTruthy(bool truthy,FloatRegister reg,Label * label)1264 void MacroAssembler::branchTestDoubleTruthy(bool truthy, FloatRegister reg,
1265 Label* label) {
1266 Fcmp(ARMFPRegister(reg, 64), 0.0);
1267 if (!truthy) {
1268 // falsy values are zero, and NaN.
1269 branch(Zero, label);
1270 branch(Overflow, label);
1271 } else {
1272 // truthy values are non-zero and not nan.
1273 // If it is overflow
1274 Label onFalse;
1275 branch(Zero, &onFalse);
1276 branch(Overflow, &onFalse);
1277 B(label);
1278 bind(&onFalse);
1279 }
1280 }
1281
branchTestNumber(Condition cond,Register tag,Label * label)1282 void MacroAssembler::branchTestNumber(Condition cond, Register tag,
1283 Label* label) {
1284 branchTestNumberImpl(cond, tag, label);
1285 }
1286
branchTestNumber(Condition cond,const ValueOperand & value,Label * label)1287 void MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value,
1288 Label* label) {
1289 branchTestNumberImpl(cond, value, label);
1290 }
1291
1292 template <typename T>
branchTestNumberImpl(Condition cond,const T & t,Label * label)1293 void MacroAssembler::branchTestNumberImpl(Condition cond, const T& t,
1294 Label* label) {
1295 Condition c = testNumber(cond, t);
1296 B(label, c);
1297 }
1298
branchTestBoolean(Condition cond,Register tag,Label * label)1299 void MacroAssembler::branchTestBoolean(Condition cond, Register tag,
1300 Label* label) {
1301 branchTestBooleanImpl(cond, tag, label);
1302 }
1303
branchTestBoolean(Condition cond,const Address & address,Label * label)1304 void MacroAssembler::branchTestBoolean(Condition cond, const Address& address,
1305 Label* label) {
1306 branchTestBooleanImpl(cond, address, label);
1307 }
1308
branchTestBoolean(Condition cond,const BaseIndex & address,Label * label)1309 void MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address,
1310 Label* label) {
1311 branchTestBooleanImpl(cond, address, label);
1312 }
1313
branchTestBoolean(Condition cond,const ValueOperand & value,Label * label)1314 void MacroAssembler::branchTestBoolean(Condition cond,
1315 const ValueOperand& value,
1316 Label* label) {
1317 branchTestBooleanImpl(cond, value, label);
1318 }
1319
1320 template <typename T>
branchTestBooleanImpl(Condition cond,const T & tag,Label * label)1321 void MacroAssembler::branchTestBooleanImpl(Condition cond, const T& tag,
1322 Label* label) {
1323 Condition c = testBoolean(cond, tag);
1324 B(label, c);
1325 }
1326
branchTestBooleanTruthy(bool truthy,const ValueOperand & value,Label * label)1327 void MacroAssembler::branchTestBooleanTruthy(bool truthy,
1328 const ValueOperand& value,
1329 Label* label) {
1330 Condition c = testBooleanTruthy(truthy, value);
1331 B(label, c);
1332 }
1333
branchTestString(Condition cond,Register tag,Label * label)1334 void MacroAssembler::branchTestString(Condition cond, Register tag,
1335 Label* label) {
1336 branchTestStringImpl(cond, tag, label);
1337 }
1338
branchTestString(Condition cond,const Address & address,Label * label)1339 void MacroAssembler::branchTestString(Condition cond, const Address& address,
1340 Label* label) {
1341 branchTestStringImpl(cond, address, label);
1342 }
1343
branchTestString(Condition cond,const BaseIndex & address,Label * label)1344 void MacroAssembler::branchTestString(Condition cond, const BaseIndex& address,
1345 Label* label) {
1346 branchTestStringImpl(cond, address, label);
1347 }
1348
branchTestString(Condition cond,const ValueOperand & value,Label * label)1349 void MacroAssembler::branchTestString(Condition cond, const ValueOperand& value,
1350 Label* label) {
1351 branchTestStringImpl(cond, value, label);
1352 }
1353
1354 template <typename T>
branchTestStringImpl(Condition cond,const T & t,Label * label)1355 void MacroAssembler::branchTestStringImpl(Condition cond, const T& t,
1356 Label* label) {
1357 Condition c = testString(cond, t);
1358 B(label, c);
1359 }
1360
branchTestStringTruthy(bool truthy,const ValueOperand & value,Label * label)1361 void MacroAssembler::branchTestStringTruthy(bool truthy,
1362 const ValueOperand& value,
1363 Label* label) {
1364 Condition c = testStringTruthy(truthy, value);
1365 B(label, c);
1366 }
1367
branchTestSymbol(Condition cond,Register tag,Label * label)1368 void MacroAssembler::branchTestSymbol(Condition cond, Register tag,
1369 Label* label) {
1370 branchTestSymbolImpl(cond, tag, label);
1371 }
1372
branchTestSymbol(Condition cond,const BaseIndex & address,Label * label)1373 void MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address,
1374 Label* label) {
1375 branchTestSymbolImpl(cond, address, label);
1376 }
1377
branchTestSymbol(Condition cond,const ValueOperand & value,Label * label)1378 void MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value,
1379 Label* label) {
1380 branchTestSymbolImpl(cond, value, label);
1381 }
1382
1383 template <typename T>
branchTestSymbolImpl(Condition cond,const T & t,Label * label)1384 void MacroAssembler::branchTestSymbolImpl(Condition cond, const T& t,
1385 Label* label) {
1386 Condition c = testSymbol(cond, t);
1387 B(label, c);
1388 }
1389
branchTestNull(Condition cond,Register tag,Label * label)1390 void MacroAssembler::branchTestNull(Condition cond, Register tag,
1391 Label* label) {
1392 branchTestNullImpl(cond, tag, label);
1393 }
1394
branchTestNull(Condition cond,const Address & address,Label * label)1395 void MacroAssembler::branchTestNull(Condition cond, const Address& address,
1396 Label* label) {
1397 branchTestNullImpl(cond, address, label);
1398 }
1399
branchTestNull(Condition cond,const BaseIndex & address,Label * label)1400 void MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address,
1401 Label* label) {
1402 branchTestNullImpl(cond, address, label);
1403 }
1404
branchTestNull(Condition cond,const ValueOperand & value,Label * label)1405 void MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value,
1406 Label* label) {
1407 branchTestNullImpl(cond, value, label);
1408 }
1409
1410 template <typename T>
branchTestNullImpl(Condition cond,const T & t,Label * label)1411 void MacroAssembler::branchTestNullImpl(Condition cond, const T& t,
1412 Label* label) {
1413 Condition c = testNull(cond, t);
1414 B(label, c);
1415 }
1416
branchTestObject(Condition cond,Register tag,Label * label)1417 void MacroAssembler::branchTestObject(Condition cond, Register tag,
1418 Label* label) {
1419 branchTestObjectImpl(cond, tag, label);
1420 }
1421
branchTestObject(Condition cond,const Address & address,Label * label)1422 void MacroAssembler::branchTestObject(Condition cond, const Address& address,
1423 Label* label) {
1424 branchTestObjectImpl(cond, address, label);
1425 }
1426
branchTestObject(Condition cond,const BaseIndex & address,Label * label)1427 void MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address,
1428 Label* label) {
1429 branchTestObjectImpl(cond, address, label);
1430 }
1431
branchTestObject(Condition cond,const ValueOperand & value,Label * label)1432 void MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value,
1433 Label* label) {
1434 branchTestObjectImpl(cond, value, label);
1435 }
1436
1437 template <typename T>
branchTestObjectImpl(Condition cond,const T & t,Label * label)1438 void MacroAssembler::branchTestObjectImpl(Condition cond, const T& t,
1439 Label* label) {
1440 Condition c = testObject(cond, t);
1441 B(label, c);
1442 }
1443
branchTestGCThing(Condition cond,const Address & address,Label * label)1444 void MacroAssembler::branchTestGCThing(Condition cond, const Address& address,
1445 Label* label) {
1446 branchTestGCThingImpl(cond, address, label);
1447 }
1448
branchTestGCThing(Condition cond,const BaseIndex & address,Label * label)1449 void MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address,
1450 Label* label) {
1451 branchTestGCThingImpl(cond, address, label);
1452 }
1453
1454 template <typename T>
branchTestGCThingImpl(Condition cond,const T & src,Label * label)1455 void MacroAssembler::branchTestGCThingImpl(Condition cond, const T& src,
1456 Label* label) {
1457 Condition c = testGCThing(cond, src);
1458 B(label, c);
1459 }
1460
branchTestPrimitive(Condition cond,Register tag,Label * label)1461 void MacroAssembler::branchTestPrimitive(Condition cond, Register tag,
1462 Label* label) {
1463 branchTestPrimitiveImpl(cond, tag, label);
1464 }
1465
branchTestPrimitive(Condition cond,const ValueOperand & value,Label * label)1466 void MacroAssembler::branchTestPrimitive(Condition cond,
1467 const ValueOperand& value,
1468 Label* label) {
1469 branchTestPrimitiveImpl(cond, value, label);
1470 }
1471
1472 template <typename T>
branchTestPrimitiveImpl(Condition cond,const T & t,Label * label)1473 void MacroAssembler::branchTestPrimitiveImpl(Condition cond, const T& t,
1474 Label* label) {
1475 Condition c = testPrimitive(cond, t);
1476 B(label, c);
1477 }
1478
branchTestMagic(Condition cond,Register tag,Label * label)1479 void MacroAssembler::branchTestMagic(Condition cond, Register tag,
1480 Label* label) {
1481 branchTestMagicImpl(cond, tag, label);
1482 }
1483
branchTestMagic(Condition cond,const Address & address,Label * label)1484 void MacroAssembler::branchTestMagic(Condition cond, const Address& address,
1485 Label* label) {
1486 branchTestMagicImpl(cond, address, label);
1487 }
1488
branchTestMagic(Condition cond,const BaseIndex & address,Label * label)1489 void MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address,
1490 Label* label) {
1491 branchTestMagicImpl(cond, address, label);
1492 }
1493
1494 template <class L>
branchTestMagic(Condition cond,const ValueOperand & value,L label)1495 void MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value,
1496 L label) {
1497 branchTestMagicImpl(cond, value, label);
1498 }
1499
1500 template <typename T, class L>
branchTestMagicImpl(Condition cond,const T & t,L label)1501 void MacroAssembler::branchTestMagicImpl(Condition cond, const T& t, L label) {
1502 Condition c = testMagic(cond, t);
1503 B(label, c);
1504 }
1505
branchTestMagic(Condition cond,const Address & valaddr,JSWhyMagic why,Label * label)1506 void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
1507 JSWhyMagic why, Label* label) {
1508 uint64_t magic = MagicValue(why).asRawBits();
1509 cmpPtr(valaddr, ImmWord(magic));
1510 B(label, cond);
1511 }
1512
branchToComputedAddress(const BaseIndex & addr)1513 void MacroAssembler::branchToComputedAddress(const BaseIndex& addr) {
1514 // Not used by Rabaldr.
1515 MOZ_CRASH("NYI - branchToComputedAddress");
1516 }
1517
cmp32Move32(Condition cond,Register lhs,Register rhs,Register src,Register dest)1518 void MacroAssembler::cmp32Move32(Condition cond, Register lhs, Register rhs,
1519 Register src, Register dest) {
1520 cmp32(lhs, rhs);
1521 Csel(ARMRegister(dest, 32), ARMRegister(src, 32), ARMRegister(dest, 32),
1522 cond);
1523 }
1524
cmp32Move32(Condition cond,Register lhs,const Address & rhs,Register src,Register dest)1525 void MacroAssembler::cmp32Move32(Condition cond, Register lhs,
1526 const Address& rhs, Register src,
1527 Register dest) {
1528 cmp32(lhs, rhs);
1529 Csel(ARMRegister(dest, 32), ARMRegister(src, 32), ARMRegister(dest, 32),
1530 cond);
1531 }
1532
cmp32MovePtr(Condition cond,Register lhs,Imm32 rhs,Register src,Register dest)1533 void MacroAssembler::cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
1534 Register src, Register dest) {
1535 cmp32(lhs, rhs);
1536 Csel(ARMRegister(dest, 64), ARMRegister(src, 64), ARMRegister(dest, 64),
1537 cond);
1538 }
1539
test32LoadPtr(Condition cond,const Address & addr,Imm32 mask,const Address & src,Register dest)1540 void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
1541 Imm32 mask, const Address& src,
1542 Register dest) {
1543 MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
1544
1545 // ARM64 does not support conditional loads, so we use a branch with a CSel
1546 // (to prevent Spectre attacks).
1547 vixl::UseScratchRegisterScope temps(this);
1548 const ARMRegister scratch64 = temps.AcquireX();
1549 Label done;
1550 branchTest32(Assembler::InvertCondition(cond), addr, mask, &done);
1551 loadPtr(src, scratch64.asUnsized());
1552 Csel(ARMRegister(dest, 64), scratch64, ARMRegister(dest, 64), cond);
1553 bind(&done);
1554 }
1555
test32MovePtr(Condition cond,const Address & addr,Imm32 mask,Register src,Register dest)1556 void MacroAssembler::test32MovePtr(Condition cond, const Address& addr,
1557 Imm32 mask, Register src, Register dest) {
1558 MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
1559 test32(addr, mask);
1560 Csel(ARMRegister(dest, 64), ARMRegister(src, 64), ARMRegister(dest, 64),
1561 cond);
1562 }
1563
spectreMovePtr(Condition cond,Register src,Register dest)1564 void MacroAssembler::spectreMovePtr(Condition cond, Register src,
1565 Register dest) {
1566 Csel(ARMRegister(dest, 64), ARMRegister(src, 64), ARMRegister(dest, 64),
1567 cond);
1568 }
1569
spectreZeroRegister(Condition cond,Register,Register dest)1570 void MacroAssembler::spectreZeroRegister(Condition cond, Register,
1571 Register dest) {
1572 Csel(ARMRegister(dest, 64), ARMRegister(dest, 64), vixl::xzr,
1573 Assembler::InvertCondition(cond));
1574 }
1575
spectreBoundsCheck32(Register index,Register length,Register maybeScratch,Label * failure)1576 void MacroAssembler::spectreBoundsCheck32(Register index, Register length,
1577 Register maybeScratch,
1578 Label* failure) {
1579 MOZ_ASSERT(length != maybeScratch);
1580 MOZ_ASSERT(index != maybeScratch);
1581
1582 branch32(Assembler::BelowOrEqual, length, index, failure);
1583
1584 if (JitOptions.spectreIndexMasking)
1585 Csel(ARMRegister(index, 32), ARMRegister(index, 32), vixl::wzr,
1586 Assembler::Above);
1587 }
1588
spectreBoundsCheck32(Register index,const Address & length,Register maybeScratch,Label * failure)1589 void MacroAssembler::spectreBoundsCheck32(Register index, const Address& length,
1590 Register maybeScratch,
1591 Label* failure) {
1592 MOZ_ASSERT(index != length.base);
1593 MOZ_ASSERT(length.base != maybeScratch);
1594 MOZ_ASSERT(index != maybeScratch);
1595
1596 branch32(Assembler::BelowOrEqual, length, index, failure);
1597
1598 if (JitOptions.spectreIndexMasking)
1599 Csel(ARMRegister(index, 32), ARMRegister(index, 32), vixl::wzr,
1600 Assembler::Above);
1601 }
1602
1603 // ========================================================================
1604 // Memory access primitives.
storeUncanonicalizedDouble(FloatRegister src,const Address & dest)1605 void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
1606 const Address& dest) {
1607 Str(ARMFPRegister(src, 64), toMemOperand(dest));
1608 }
storeUncanonicalizedDouble(FloatRegister src,const BaseIndex & dest)1609 void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
1610 const BaseIndex& dest) {
1611 doBaseIndex(ARMFPRegister(src, 64), dest, vixl::STR_d);
1612 }
1613
storeUncanonicalizedFloat32(FloatRegister src,const Address & addr)1614 void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
1615 const Address& addr) {
1616 Str(ARMFPRegister(src, 32), toMemOperand(addr));
1617 }
storeUncanonicalizedFloat32(FloatRegister src,const BaseIndex & addr)1618 void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
1619 const BaseIndex& addr) {
1620 doBaseIndex(ARMFPRegister(src, 32), addr, vixl::STR_s);
1621 }
1622
storeFloat32x3(FloatRegister src,const Address & dest)1623 void MacroAssembler::storeFloat32x3(FloatRegister src, const Address& dest) {
1624 MOZ_CRASH("NYI");
1625 }
storeFloat32x3(FloatRegister src,const BaseIndex & dest)1626 void MacroAssembler::storeFloat32x3(FloatRegister src, const BaseIndex& dest) {
1627 MOZ_CRASH("NYI");
1628 }
1629
memoryBarrier(MemoryBarrierBits barrier)1630 void MacroAssembler::memoryBarrier(MemoryBarrierBits barrier) {
1631 if (barrier == MembarStoreStore)
1632 Dmb(vixl::InnerShareable, vixl::BarrierWrites);
1633 else if (barrier == MembarLoadLoad)
1634 Dmb(vixl::InnerShareable, vixl::BarrierReads);
1635 else if (barrier)
1636 Dmb(vixl::InnerShareable, vixl::BarrierAll);
1637 }
1638
1639 // ===============================================================
1640 // Clamping functions.
1641
clampIntToUint8(Register reg)1642 void MacroAssembler::clampIntToUint8(Register reg) {
1643 vixl::UseScratchRegisterScope temps(this);
1644 const ARMRegister scratch32 = temps.AcquireW();
1645 const ARMRegister reg32(reg, 32);
1646 MOZ_ASSERT(!scratch32.Is(reg32));
1647
1648 Cmp(reg32, Operand(reg32, vixl::UXTB));
1649 Csel(reg32, reg32, vixl::wzr, Assembler::GreaterThanOrEqual);
1650 Mov(scratch32, Operand(0xff));
1651 Csel(reg32, reg32, scratch32, Assembler::LessThanOrEqual);
1652 }
1653
1654 // ========================================================================
1655 // wasm support
1656
1657 template <class L>
wasmBoundsCheck(Condition cond,Register index,Register boundsCheckLimit,L label)1658 void MacroAssembler::wasmBoundsCheck(Condition cond, Register index,
1659 Register boundsCheckLimit, L label) {
1660 // Not used on ARM64, we rely on signal handling instead
1661 MOZ_CRASH("NYI - wasmBoundsCheck");
1662 }
1663
1664 template <class L>
wasmBoundsCheck(Condition cond,Register index,Address boundsCheckLimit,L label)1665 void MacroAssembler::wasmBoundsCheck(Condition cond, Register index,
1666 Address boundsCheckLimit, L label) {
1667 // Not used on ARM64, we rely on signal handling instead
1668 MOZ_CRASH("NYI - wasmBoundsCheck");
1669 }
1670
1671 //}}} check_macroassembler_style
1672 // ===============================================================
1673
addToStackPtr(Register src)1674 void MacroAssemblerCompat::addToStackPtr(Register src) {
1675 Add(GetStackPointer64(), GetStackPointer64(), ARMRegister(src, 64));
1676 }
1677
addToStackPtr(Imm32 imm)1678 void MacroAssemblerCompat::addToStackPtr(Imm32 imm) {
1679 Add(GetStackPointer64(), GetStackPointer64(), Operand(imm.value));
1680 }
1681
addToStackPtr(const Address & src)1682 void MacroAssemblerCompat::addToStackPtr(const Address& src) {
1683 vixl::UseScratchRegisterScope temps(this);
1684 const ARMRegister scratch = temps.AcquireX();
1685 Ldr(scratch, toMemOperand(src));
1686 Add(GetStackPointer64(), GetStackPointer64(), scratch);
1687 }
1688
addStackPtrTo(Register dest)1689 void MacroAssemblerCompat::addStackPtrTo(Register dest) {
1690 Add(ARMRegister(dest, 64), ARMRegister(dest, 64), GetStackPointer64());
1691 }
1692
subFromStackPtr(Register src)1693 void MacroAssemblerCompat::subFromStackPtr(Register src) {
1694 Sub(GetStackPointer64(), GetStackPointer64(), ARMRegister(src, 64));
1695 syncStackPtr();
1696 }
1697
subFromStackPtr(Imm32 imm)1698 void MacroAssemblerCompat::subFromStackPtr(Imm32 imm) {
1699 Sub(GetStackPointer64(), GetStackPointer64(), Operand(imm.value));
1700 syncStackPtr();
1701 }
1702
subStackPtrFrom(Register dest)1703 void MacroAssemblerCompat::subStackPtrFrom(Register dest) {
1704 Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), GetStackPointer64());
1705 }
1706
andToStackPtr(Imm32 imm)1707 void MacroAssemblerCompat::andToStackPtr(Imm32 imm) {
1708 if (sp.Is(GetStackPointer64())) {
1709 vixl::UseScratchRegisterScope temps(this);
1710 const ARMRegister scratch = temps.AcquireX();
1711 Mov(scratch, sp);
1712 And(sp, scratch, Operand(imm.value));
1713 // syncStackPtr() not needed since our SP is the real SP.
1714 } else {
1715 And(GetStackPointer64(), GetStackPointer64(), Operand(imm.value));
1716 syncStackPtr();
1717 }
1718 }
1719
andStackPtrTo(Register dest)1720 void MacroAssemblerCompat::andStackPtrTo(Register dest) {
1721 And(ARMRegister(dest, 64), ARMRegister(dest, 64), GetStackPointer64());
1722 }
1723
moveToStackPtr(Register src)1724 void MacroAssemblerCompat::moveToStackPtr(Register src) {
1725 Mov(GetStackPointer64(), ARMRegister(src, 64));
1726 syncStackPtr();
1727 }
1728
moveStackPtrTo(Register dest)1729 void MacroAssemblerCompat::moveStackPtrTo(Register dest) {
1730 Mov(ARMRegister(dest, 64), GetStackPointer64());
1731 }
1732
loadStackPtr(const Address & src)1733 void MacroAssemblerCompat::loadStackPtr(const Address& src) {
1734 if (sp.Is(GetStackPointer64())) {
1735 vixl::UseScratchRegisterScope temps(this);
1736 const ARMRegister scratch = temps.AcquireX();
1737 Ldr(scratch, toMemOperand(src));
1738 Mov(sp, scratch);
1739 // syncStackPtr() not needed since our SP is the real SP.
1740 } else {
1741 Ldr(GetStackPointer64(), toMemOperand(src));
1742 syncStackPtr();
1743 }
1744 }
1745
storeStackPtr(const Address & dest)1746 void MacroAssemblerCompat::storeStackPtr(const Address& dest) {
1747 if (sp.Is(GetStackPointer64())) {
1748 vixl::UseScratchRegisterScope temps(this);
1749 const ARMRegister scratch = temps.AcquireX();
1750 Mov(scratch, sp);
1751 Str(scratch, toMemOperand(dest));
1752 } else {
1753 Str(GetStackPointer64(), toMemOperand(dest));
1754 }
1755 }
1756
branchTestStackPtr(Condition cond,Imm32 rhs,Label * label)1757 void MacroAssemblerCompat::branchTestStackPtr(Condition cond, Imm32 rhs,
1758 Label* label) {
1759 if (sp.Is(GetStackPointer64())) {
1760 vixl::UseScratchRegisterScope temps(this);
1761 const ARMRegister scratch = temps.AcquireX();
1762 Mov(scratch, sp);
1763 Tst(scratch, Operand(rhs.value));
1764 } else {
1765 Tst(GetStackPointer64(), Operand(rhs.value));
1766 }
1767 B(label, cond);
1768 }
1769
branchStackPtr(Condition cond,Register rhs_,Label * label)1770 void MacroAssemblerCompat::branchStackPtr(Condition cond, Register rhs_,
1771 Label* label) {
1772 ARMRegister rhs(rhs_, 64);
1773 if (sp.Is(GetStackPointer64())) {
1774 vixl::UseScratchRegisterScope temps(this);
1775 const ARMRegister scratch = temps.AcquireX();
1776 Mov(scratch, sp);
1777 Cmp(scratch, rhs);
1778 } else {
1779 Cmp(GetStackPointer64(), rhs);
1780 }
1781 B(label, cond);
1782 }
1783
branchStackPtrRhs(Condition cond,Address lhs,Label * label)1784 void MacroAssemblerCompat::branchStackPtrRhs(Condition cond, Address lhs,
1785 Label* label) {
1786 vixl::UseScratchRegisterScope temps(this);
1787 const ARMRegister scratch = temps.AcquireX();
1788 Ldr(scratch, toMemOperand(lhs));
1789 // Cmp disallows SP as the rhs, so flip the operands and invert the
1790 // condition.
1791 Cmp(GetStackPointer64(), scratch);
1792 B(label, Assembler::InvertCondition(cond));
1793 }
1794
1795 // If source is a double, load into dest.
1796 // If source is int32, convert to double and store in dest.
1797 // Else, branch to failure.
ensureDouble(const ValueOperand & source,FloatRegister dest,Label * failure)1798 void MacroAssemblerCompat::ensureDouble(const ValueOperand& source,
1799 FloatRegister dest, Label* failure) {
1800 Label isDouble, done;
1801
1802 {
1803 ScratchTagScope tag(asMasm(), source);
1804 splitTagForTest(source, tag);
1805 asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
1806 asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
1807 }
1808
1809 convertInt32ToDouble(source.valueReg(), dest);
1810 jump(&done);
1811
1812 bind(&isDouble);
1813 unboxDouble(source, dest);
1814
1815 bind(&done);
1816 }
1817
unboxValue(const ValueOperand & src,AnyRegister dest,JSValueType type)1818 void MacroAssemblerCompat::unboxValue(const ValueOperand& src, AnyRegister dest,
1819 JSValueType type) {
1820 if (dest.isFloat()) {
1821 Label notInt32, end;
1822 asMasm().branchTestInt32(Assembler::NotEqual, src, ¬Int32);
1823 convertInt32ToDouble(src.valueReg(), dest.fpu());
1824 jump(&end);
1825 bind(¬Int32);
1826 unboxDouble(src, dest.fpu());
1827 bind(&end);
1828 } else {
1829 unboxNonDouble(src, dest.gpr(), type);
1830 }
1831 }
1832
1833 } // namespace jit
1834 } // namespace js
1835
1836 #endif /* jit_arm64_MacroAssembler_arm64_inl_h */
1837