1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #ifndef jit_arm64_MacroAssembler_arm64_inl_h
8 #define jit_arm64_MacroAssembler_arm64_inl_h
9
10 #include "jit/arm64/MacroAssembler-arm64.h"
11
12 namespace js {
13 namespace jit {
14
15 //{{{ check_macroassembler_style
16
move64(Register64 src,Register64 dest)17 void MacroAssembler::move64(Register64 src, Register64 dest) {
18 Mov(ARMRegister(dest.reg, 64), ARMRegister(src.reg, 64));
19 }
20
move64(Imm64 imm,Register64 dest)21 void MacroAssembler::move64(Imm64 imm, Register64 dest) {
22 Mov(ARMRegister(dest.reg, 64), imm.value);
23 }
24
moveFloat32ToGPR(FloatRegister src,Register dest)25 void MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest) {
26 Fmov(ARMRegister(dest, 32), ARMFPRegister(src, 32));
27 }
28
moveGPRToFloat32(Register src,FloatRegister dest)29 void MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest) {
30 Fmov(ARMFPRegister(dest, 32), ARMRegister(src, 32));
31 }
32
move8SignExtend(Register src,Register dest)33 void MacroAssembler::move8SignExtend(Register src, Register dest) {
34 Sxtb(ARMRegister(dest, 32), ARMRegister(src, 32));
35 }
36
move16SignExtend(Register src,Register dest)37 void MacroAssembler::move16SignExtend(Register src, Register dest) {
38 Sxth(ARMRegister(dest, 32), ARMRegister(src, 32));
39 }
40
moveDoubleToGPR64(FloatRegister src,Register64 dest)41 void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
42 Fmov(ARMRegister(dest.reg, 64), ARMFPRegister(src, 64));
43 }
44
moveGPR64ToDouble(Register64 src,FloatRegister dest)45 void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
46 Fmov(ARMFPRegister(dest, 64), ARMRegister(src.reg, 64));
47 }
48
move64To32(Register64 src,Register dest)49 void MacroAssembler::move64To32(Register64 src, Register dest) {
50 Mov(ARMRegister(dest, 32), ARMRegister(src.reg, 32));
51 }
52
move32To64ZeroExtend(Register src,Register64 dest)53 void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
54 Mov(ARMRegister(dest.reg, 32), ARMRegister(src, 32));
55 }
56
move8To64SignExtend(Register src,Register64 dest)57 void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
58 Sxtb(ARMRegister(dest.reg, 64), ARMRegister(src, 32));
59 }
60
move16To64SignExtend(Register src,Register64 dest)61 void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
62 Sxth(ARMRegister(dest.reg, 64), ARMRegister(src, 32));
63 }
64
move32To64SignExtend(Register src,Register64 dest)65 void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
66 Sxtw(ARMRegister(dest.reg, 64), ARMRegister(src, 32));
67 }
68
move32SignExtendToPtr(Register src,Register dest)69 void MacroAssembler::move32SignExtendToPtr(Register src, Register dest) {
70 Sxtw(ARMRegister(dest, 64), ARMRegister(src, 32));
71 }
72
move32ZeroExtendToPtr(Register src,Register dest)73 void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
74 Mov(ARMRegister(dest, 32), ARMRegister(src, 32));
75 }
76
77 // ===============================================================
78 // Load instructions
79
load32SignExtendToPtr(const Address & src,Register dest)80 void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
81 load32(src, dest);
82 move32To64SignExtend(dest, Register64(dest));
83 }
84
loadAbiReturnAddress(Register dest)85 void MacroAssembler::loadAbiReturnAddress(Register dest) { movePtr(lr, dest); }
86
87 // ===============================================================
88 // Logical instructions
89
not32(Register reg)90 void MacroAssembler::not32(Register reg) {
91 Orn(ARMRegister(reg, 32), vixl::wzr, ARMRegister(reg, 32));
92 }
93
notPtr(Register reg)94 void MacroAssembler::notPtr(Register reg) {
95 Orn(ARMRegister(reg, 64), vixl::xzr, ARMRegister(reg, 64));
96 }
97
and32(Register src,Register dest)98 void MacroAssembler::and32(Register src, Register dest) {
99 And(ARMRegister(dest, 32), ARMRegister(dest, 32),
100 Operand(ARMRegister(src, 32)));
101 }
102
and32(Imm32 imm,Register dest)103 void MacroAssembler::and32(Imm32 imm, Register dest) {
104 And(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
105 }
106
and32(Imm32 imm,Register src,Register dest)107 void MacroAssembler::and32(Imm32 imm, Register src, Register dest) {
108 And(ARMRegister(dest, 32), ARMRegister(src, 32), Operand(imm.value));
109 }
110
and32(Imm32 imm,const Address & dest)111 void MacroAssembler::and32(Imm32 imm, const Address& dest) {
112 vixl::UseScratchRegisterScope temps(this);
113 const ARMRegister scratch32 = temps.AcquireW();
114 MOZ_ASSERT(scratch32.asUnsized() != dest.base);
115 load32(dest, scratch32.asUnsized());
116 And(scratch32, scratch32, Operand(imm.value));
117 store32(scratch32.asUnsized(), dest);
118 }
119
and32(const Address & src,Register dest)120 void MacroAssembler::and32(const Address& src, Register dest) {
121 vixl::UseScratchRegisterScope temps(this);
122 const ARMRegister scratch32 = temps.AcquireW();
123 MOZ_ASSERT(scratch32.asUnsized() != src.base);
124 load32(src, scratch32.asUnsized());
125 And(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(scratch32));
126 }
127
andPtr(Register src,Register dest)128 void MacroAssembler::andPtr(Register src, Register dest) {
129 And(ARMRegister(dest, 64), ARMRegister(dest, 64),
130 Operand(ARMRegister(src, 64)));
131 }
132
andPtr(Imm32 imm,Register dest)133 void MacroAssembler::andPtr(Imm32 imm, Register dest) {
134 And(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
135 }
136
and64(Imm64 imm,Register64 dest)137 void MacroAssembler::and64(Imm64 imm, Register64 dest) {
138 And(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
139 }
140
and64(Register64 src,Register64 dest)141 void MacroAssembler::and64(Register64 src, Register64 dest) {
142 And(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64),
143 ARMRegister(src.reg, 64));
144 }
145
or64(Imm64 imm,Register64 dest)146 void MacroAssembler::or64(Imm64 imm, Register64 dest) {
147 Orr(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
148 }
149
or32(Imm32 imm,Register dest)150 void MacroAssembler::or32(Imm32 imm, Register dest) {
151 Orr(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
152 }
153
or32(Register src,Register dest)154 void MacroAssembler::or32(Register src, Register dest) {
155 Orr(ARMRegister(dest, 32), ARMRegister(dest, 32),
156 Operand(ARMRegister(src, 32)));
157 }
158
or32(Imm32 imm,const Address & dest)159 void MacroAssembler::or32(Imm32 imm, const Address& dest) {
160 vixl::UseScratchRegisterScope temps(this);
161 const ARMRegister scratch32 = temps.AcquireW();
162 MOZ_ASSERT(scratch32.asUnsized() != dest.base);
163 load32(dest, scratch32.asUnsized());
164 Orr(scratch32, scratch32, Operand(imm.value));
165 store32(scratch32.asUnsized(), dest);
166 }
167
orPtr(Register src,Register dest)168 void MacroAssembler::orPtr(Register src, Register dest) {
169 Orr(ARMRegister(dest, 64), ARMRegister(dest, 64),
170 Operand(ARMRegister(src, 64)));
171 }
172
orPtr(Imm32 imm,Register dest)173 void MacroAssembler::orPtr(Imm32 imm, Register dest) {
174 Orr(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
175 }
176
or64(Register64 src,Register64 dest)177 void MacroAssembler::or64(Register64 src, Register64 dest) {
178 orPtr(src.reg, dest.reg);
179 }
180
xor64(Register64 src,Register64 dest)181 void MacroAssembler::xor64(Register64 src, Register64 dest) {
182 xorPtr(src.reg, dest.reg);
183 }
184
xor32(Register src,Register dest)185 void MacroAssembler::xor32(Register src, Register dest) {
186 Eor(ARMRegister(dest, 32), ARMRegister(dest, 32),
187 Operand(ARMRegister(src, 32)));
188 }
189
xor32(Imm32 imm,Register dest)190 void MacroAssembler::xor32(Imm32 imm, Register dest) {
191 Eor(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
192 }
193
xor32(Imm32 imm,const Address & dest)194 void MacroAssembler::xor32(Imm32 imm, const Address& dest) {
195 vixl::UseScratchRegisterScope temps(this);
196 const ARMRegister scratch32 = temps.AcquireW();
197 MOZ_ASSERT(scratch32.asUnsized() != dest.base);
198 load32(dest, scratch32.asUnsized());
199 Eor(scratch32, scratch32, Operand(imm.value));
200 store32(scratch32.asUnsized(), dest);
201 }
202
xor32(const Address & src,Register dest)203 void MacroAssembler::xor32(const Address& src, Register dest) {
204 vixl::UseScratchRegisterScope temps(this);
205 const ARMRegister scratch32 = temps.AcquireW();
206 MOZ_ASSERT(scratch32.asUnsized() != src.base);
207 load32(src, scratch32.asUnsized());
208 Eor(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(scratch32));
209 }
210
xorPtr(Register src,Register dest)211 void MacroAssembler::xorPtr(Register src, Register dest) {
212 Eor(ARMRegister(dest, 64), ARMRegister(dest, 64),
213 Operand(ARMRegister(src, 64)));
214 }
215
xorPtr(Imm32 imm,Register dest)216 void MacroAssembler::xorPtr(Imm32 imm, Register dest) {
217 Eor(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
218 }
219
xor64(Imm64 imm,Register64 dest)220 void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
221 Eor(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
222 }
223
224 // ===============================================================
225 // Swap instructions
226
byteSwap16SignExtend(Register reg)227 void MacroAssembler::byteSwap16SignExtend(Register reg) {
228 rev16(ARMRegister(reg, 32), ARMRegister(reg, 32));
229 sxth(ARMRegister(reg, 32), ARMRegister(reg, 32));
230 }
231
byteSwap16ZeroExtend(Register reg)232 void MacroAssembler::byteSwap16ZeroExtend(Register reg) {
233 rev16(ARMRegister(reg, 32), ARMRegister(reg, 32));
234 uxth(ARMRegister(reg, 32), ARMRegister(reg, 32));
235 }
236
byteSwap32(Register reg)237 void MacroAssembler::byteSwap32(Register reg) {
238 rev(ARMRegister(reg, 32), ARMRegister(reg, 32));
239 }
240
byteSwap64(Register64 reg)241 void MacroAssembler::byteSwap64(Register64 reg) {
242 rev(ARMRegister(reg.reg, 64), ARMRegister(reg.reg, 64));
243 }
244
245 // ===============================================================
246 // Arithmetic functions
247
add32(Register src,Register dest)248 void MacroAssembler::add32(Register src, Register dest) {
249 Add(ARMRegister(dest, 32), ARMRegister(dest, 32),
250 Operand(ARMRegister(src, 32)));
251 }
252
add32(Imm32 imm,Register dest)253 void MacroAssembler::add32(Imm32 imm, Register dest) {
254 Add(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
255 }
256
add32(Imm32 imm,const Address & dest)257 void MacroAssembler::add32(Imm32 imm, const Address& dest) {
258 vixl::UseScratchRegisterScope temps(this);
259 const ARMRegister scratch32 = temps.AcquireW();
260 MOZ_ASSERT(scratch32.asUnsized() != dest.base);
261
262 Ldr(scratch32, toMemOperand(dest));
263 Add(scratch32, scratch32, Operand(imm.value));
264 Str(scratch32, toMemOperand(dest));
265 }
266
addPtr(Register src,Register dest)267 void MacroAssembler::addPtr(Register src, Register dest) {
268 addPtr(src, dest, dest);
269 }
270
addPtr(Register src1,Register src2,Register dest)271 void MacroAssembler::addPtr(Register src1, Register src2, Register dest) {
272 Add(ARMRegister(dest, 64), ARMRegister(src1, 64),
273 Operand(ARMRegister(src2, 64)));
274 }
275
addPtr(Imm32 imm,Register dest)276 void MacroAssembler::addPtr(Imm32 imm, Register dest) {
277 addPtr(imm, dest, dest);
278 }
279
addPtr(Imm32 imm,Register src,Register dest)280 void MacroAssembler::addPtr(Imm32 imm, Register src, Register dest) {
281 Add(ARMRegister(dest, 64), ARMRegister(src, 64), Operand(imm.value));
282 }
283
addPtr(ImmWord imm,Register dest)284 void MacroAssembler::addPtr(ImmWord imm, Register dest) {
285 Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
286 }
287
addPtr(Imm32 imm,const Address & dest)288 void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
289 vixl::UseScratchRegisterScope temps(this);
290 const ARMRegister scratch64 = temps.AcquireX();
291 MOZ_ASSERT(scratch64.asUnsized() != dest.base);
292
293 Ldr(scratch64, toMemOperand(dest));
294 Add(scratch64, scratch64, Operand(imm.value));
295 Str(scratch64, toMemOperand(dest));
296 }
297
addPtr(const Address & src,Register dest)298 void MacroAssembler::addPtr(const Address& src, Register dest) {
299 vixl::UseScratchRegisterScope temps(this);
300 const ARMRegister scratch64 = temps.AcquireX();
301 MOZ_ASSERT(scratch64.asUnsized() != src.base);
302
303 Ldr(scratch64, toMemOperand(src));
304 Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64));
305 }
306
add64(Register64 src,Register64 dest)307 void MacroAssembler::add64(Register64 src, Register64 dest) {
308 addPtr(src.reg, dest.reg);
309 }
310
add64(Imm32 imm,Register64 dest)311 void MacroAssembler::add64(Imm32 imm, Register64 dest) {
312 Add(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
313 }
314
add64(Imm64 imm,Register64 dest)315 void MacroAssembler::add64(Imm64 imm, Register64 dest) {
316 Add(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
317 }
318
sub32FromStackPtrWithPatch(Register dest)319 CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
320 vixl::UseScratchRegisterScope temps(this);
321 const ARMRegister scratch = temps.AcquireX();
322 AutoForbidPoolsAndNops afp(this,
323 /* max number of instructions in scope = */ 3);
324 CodeOffset offs = CodeOffset(currentOffset());
325 movz(scratch, 0, 0);
326 movk(scratch, 0, 16);
327 Sub(ARMRegister(dest, 64), sp, scratch);
328 return offs;
329 }
330
patchSub32FromStackPtr(CodeOffset offset,Imm32 imm)331 void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
332 Instruction* i1 = getInstructionAt(BufferOffset(offset.offset()));
333 MOZ_ASSERT(i1->IsMovz());
334 i1->SetInstructionBits(i1->InstructionBits() |
335 ImmMoveWide(uint16_t(imm.value)));
336
337 Instruction* i2 = getInstructionAt(BufferOffset(offset.offset() + 4));
338 MOZ_ASSERT(i2->IsMovk());
339 i2->SetInstructionBits(i2->InstructionBits() |
340 ImmMoveWide(uint16_t(imm.value >> 16)));
341 }
342
addDouble(FloatRegister src,FloatRegister dest)343 void MacroAssembler::addDouble(FloatRegister src, FloatRegister dest) {
344 fadd(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
345 ARMFPRegister(src, 64));
346 }
347
addFloat32(FloatRegister src,FloatRegister dest)348 void MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest) {
349 fadd(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
350 ARMFPRegister(src, 32));
351 }
352
sub32(Imm32 imm,Register dest)353 void MacroAssembler::sub32(Imm32 imm, Register dest) {
354 Sub(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
355 }
356
sub32(Register src,Register dest)357 void MacroAssembler::sub32(Register src, Register dest) {
358 Sub(ARMRegister(dest, 32), ARMRegister(dest, 32),
359 Operand(ARMRegister(src, 32)));
360 }
361
sub32(const Address & src,Register dest)362 void MacroAssembler::sub32(const Address& src, Register dest) {
363 vixl::UseScratchRegisterScope temps(this);
364 const ARMRegister scratch32 = temps.AcquireW();
365 MOZ_ASSERT(scratch32.asUnsized() != src.base);
366 load32(src, scratch32.asUnsized());
367 Sub(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(scratch32));
368 }
369
subPtr(Register src,Register dest)370 void MacroAssembler::subPtr(Register src, Register dest) {
371 Sub(ARMRegister(dest, 64), ARMRegister(dest, 64),
372 Operand(ARMRegister(src, 64)));
373 }
374
subPtr(Register src,const Address & dest)375 void MacroAssembler::subPtr(Register src, const Address& dest) {
376 vixl::UseScratchRegisterScope temps(this);
377 const ARMRegister scratch64 = temps.AcquireX();
378 MOZ_ASSERT(scratch64.asUnsized() != dest.base);
379
380 Ldr(scratch64, toMemOperand(dest));
381 Sub(scratch64, scratch64, Operand(ARMRegister(src, 64)));
382 Str(scratch64, toMemOperand(dest));
383 }
384
subPtr(Imm32 imm,Register dest)385 void MacroAssembler::subPtr(Imm32 imm, Register dest) {
386 Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
387 }
388
subPtr(const Address & addr,Register dest)389 void MacroAssembler::subPtr(const Address& addr, Register dest) {
390 vixl::UseScratchRegisterScope temps(this);
391 const ARMRegister scratch64 = temps.AcquireX();
392 MOZ_ASSERT(scratch64.asUnsized() != addr.base);
393
394 Ldr(scratch64, toMemOperand(addr));
395 Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64));
396 }
397
sub64(Register64 src,Register64 dest)398 void MacroAssembler::sub64(Register64 src, Register64 dest) {
399 Sub(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64),
400 ARMRegister(src.reg, 64));
401 }
402
sub64(Imm64 imm,Register64 dest)403 void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
404 Sub(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
405 }
406
subDouble(FloatRegister src,FloatRegister dest)407 void MacroAssembler::subDouble(FloatRegister src, FloatRegister dest) {
408 fsub(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
409 ARMFPRegister(src, 64));
410 }
411
subFloat32(FloatRegister src,FloatRegister dest)412 void MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest) {
413 fsub(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
414 ARMFPRegister(src, 32));
415 }
416
mul32(Register rhs,Register srcDest)417 void MacroAssembler::mul32(Register rhs, Register srcDest) {
418 mul32(srcDest, rhs, srcDest, nullptr);
419 }
420
mul32(Register src1,Register src2,Register dest,Label * onOver)421 void MacroAssembler::mul32(Register src1, Register src2, Register dest,
422 Label* onOver) {
423 if (onOver) {
424 Smull(ARMRegister(dest, 64), ARMRegister(src1, 32), ARMRegister(src2, 32));
425 Cmp(ARMRegister(dest, 64), Operand(ARMRegister(dest, 32), vixl::SXTW));
426 B(onOver, NotEqual);
427
428 // Clear upper 32 bits.
429 Mov(ARMRegister(dest, 32), ARMRegister(dest, 32));
430 } else {
431 Mul(ARMRegister(dest, 32), ARMRegister(src1, 32), ARMRegister(src2, 32));
432 }
433 }
434
mulPtr(Register rhs,Register srcDest)435 void MacroAssembler::mulPtr(Register rhs, Register srcDest) {
436 Mul(ARMRegister(srcDest, 64), ARMRegister(srcDest, 64), ARMRegister(rhs, 64));
437 }
438
mul64(Imm64 imm,const Register64 & dest)439 void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
440 vixl::UseScratchRegisterScope temps(this);
441 const ARMRegister scratch64 = temps.AcquireX();
442 MOZ_ASSERT(dest.reg != scratch64.asUnsized());
443 mov(ImmWord(imm.value), scratch64.asUnsized());
444 Mul(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), scratch64);
445 }
446
mul64(const Register64 & src,const Register64 & dest,const Register temp)447 void MacroAssembler::mul64(const Register64& src, const Register64& dest,
448 const Register temp) {
449 MOZ_ASSERT(temp == Register::Invalid());
450 Mul(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64),
451 ARMRegister(src.reg, 64));
452 }
453
mul64(const Register64 & src1,const Register64 & src2,const Register64 & dest)454 void MacroAssembler::mul64(const Register64& src1, const Register64& src2,
455 const Register64& dest) {
456 Mul(ARMRegister(dest.reg, 64), ARMRegister(src1.reg, 64),
457 ARMRegister(src2.reg, 64));
458 }
459
mul64(Imm64 src1,const Register64 & src2,const Register64 & dest)460 void MacroAssembler::mul64(Imm64 src1, const Register64& src2,
461 const Register64& dest) {
462 vixl::UseScratchRegisterScope temps(this);
463 const ARMRegister scratch64 = temps.AcquireX();
464 MOZ_ASSERT(dest.reg != scratch64.asUnsized());
465 mov(ImmWord(src1.value), scratch64.asUnsized());
466 Mul(ARMRegister(dest.reg, 64), ARMRegister(src2.reg, 64), scratch64);
467 }
468
mulBy3(Register src,Register dest)469 void MacroAssembler::mulBy3(Register src, Register dest) {
470 ARMRegister xdest(dest, 64);
471 ARMRegister xsrc(src, 64);
472 Add(xdest, xsrc, Operand(xsrc, vixl::LSL, 1));
473 }
474
mulFloat32(FloatRegister src,FloatRegister dest)475 void MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest) {
476 fmul(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
477 ARMFPRegister(src, 32));
478 }
479
mulDouble(FloatRegister src,FloatRegister dest)480 void MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest) {
481 fmul(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
482 ARMFPRegister(src, 64));
483 }
484
mulDoublePtr(ImmPtr imm,Register temp,FloatRegister dest)485 void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
486 FloatRegister dest) {
487 vixl::UseScratchRegisterScope temps(this);
488 const Register scratch = temps.AcquireX().asUnsized();
489 MOZ_ASSERT(temp != scratch);
490 movePtr(imm, scratch);
491 const ARMFPRegister scratchDouble = temps.AcquireD();
492 Ldr(scratchDouble, MemOperand(Address(scratch, 0)));
493 fmul(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), scratchDouble);
494 }
495
quotient32(Register rhs,Register srcDest,bool isUnsigned)496 void MacroAssembler::quotient32(Register rhs, Register srcDest,
497 bool isUnsigned) {
498 if (isUnsigned) {
499 Udiv(ARMRegister(srcDest, 32), ARMRegister(srcDest, 32),
500 ARMRegister(rhs, 32));
501 } else {
502 Sdiv(ARMRegister(srcDest, 32), ARMRegister(srcDest, 32),
503 ARMRegister(rhs, 32));
504 }
505 }
506
507 // This does not deal with x % 0 or INT_MIN % -1, the caller needs to filter
508 // those cases when they may occur.
509
remainder32(Register rhs,Register srcDest,bool isUnsigned)510 void MacroAssembler::remainder32(Register rhs, Register srcDest,
511 bool isUnsigned) {
512 vixl::UseScratchRegisterScope temps(this);
513 ARMRegister scratch = temps.AcquireW();
514 if (isUnsigned) {
515 Udiv(scratch, ARMRegister(srcDest, 32), ARMRegister(rhs, 32));
516 } else {
517 Sdiv(scratch, ARMRegister(srcDest, 32), ARMRegister(rhs, 32));
518 }
519 Mul(scratch, scratch, ARMRegister(rhs, 32));
520 Sub(ARMRegister(srcDest, 32), ARMRegister(srcDest, 32), scratch);
521 }
522
divFloat32(FloatRegister src,FloatRegister dest)523 void MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest) {
524 fdiv(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
525 ARMFPRegister(src, 32));
526 }
527
divDouble(FloatRegister src,FloatRegister dest)528 void MacroAssembler::divDouble(FloatRegister src, FloatRegister dest) {
529 fdiv(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
530 ARMFPRegister(src, 64));
531 }
532
inc64(AbsoluteAddress dest)533 void MacroAssembler::inc64(AbsoluteAddress dest) {
534 vixl::UseScratchRegisterScope temps(this);
535 const ARMRegister scratchAddr64 = temps.AcquireX();
536 const ARMRegister scratch64 = temps.AcquireX();
537
538 Mov(scratchAddr64, uint64_t(dest.addr));
539 Ldr(scratch64, MemOperand(scratchAddr64, 0));
540 Add(scratch64, scratch64, Operand(1));
541 Str(scratch64, MemOperand(scratchAddr64, 0));
542 }
543
neg32(Register reg)544 void MacroAssembler::neg32(Register reg) {
545 Negs(ARMRegister(reg, 32), Operand(ARMRegister(reg, 32)));
546 }
547
neg64(Register64 reg)548 void MacroAssembler::neg64(Register64 reg) { negPtr(reg.reg); }
549
negPtr(Register reg)550 void MacroAssembler::negPtr(Register reg) {
551 Negs(ARMRegister(reg, 64), Operand(ARMRegister(reg, 64)));
552 }
553
negateFloat(FloatRegister reg)554 void MacroAssembler::negateFloat(FloatRegister reg) {
555 fneg(ARMFPRegister(reg, 32), ARMFPRegister(reg, 32));
556 }
557
negateDouble(FloatRegister reg)558 void MacroAssembler::negateDouble(FloatRegister reg) {
559 fneg(ARMFPRegister(reg, 64), ARMFPRegister(reg, 64));
560 }
561
abs32(Register src,Register dest)562 void MacroAssembler::abs32(Register src, Register dest) {
563 Cmp(ARMRegister(src, 32), wzr);
564 Cneg(ARMRegister(dest, 32), ARMRegister(src, 32), Assembler::LessThan);
565 }
566
absFloat32(FloatRegister src,FloatRegister dest)567 void MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest) {
568 fabs(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
569 }
570
absDouble(FloatRegister src,FloatRegister dest)571 void MacroAssembler::absDouble(FloatRegister src, FloatRegister dest) {
572 fabs(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
573 }
574
sqrtFloat32(FloatRegister src,FloatRegister dest)575 void MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest) {
576 fsqrt(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
577 }
578
sqrtDouble(FloatRegister src,FloatRegister dest)579 void MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest) {
580 fsqrt(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
581 }
582
minFloat32(FloatRegister other,FloatRegister srcDest,bool handleNaN)583 void MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest,
584 bool handleNaN) {
585 MOZ_ASSERT(handleNaN); // Always true for wasm
586 fmin(ARMFPRegister(srcDest, 32), ARMFPRegister(srcDest, 32),
587 ARMFPRegister(other, 32));
588 }
589
minDouble(FloatRegister other,FloatRegister srcDest,bool handleNaN)590 void MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest,
591 bool handleNaN) {
592 MOZ_ASSERT(handleNaN); // Always true for wasm
593 fmin(ARMFPRegister(srcDest, 64), ARMFPRegister(srcDest, 64),
594 ARMFPRegister(other, 64));
595 }
596
maxFloat32(FloatRegister other,FloatRegister srcDest,bool handleNaN)597 void MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest,
598 bool handleNaN) {
599 MOZ_ASSERT(handleNaN); // Always true for wasm
600 fmax(ARMFPRegister(srcDest, 32), ARMFPRegister(srcDest, 32),
601 ARMFPRegister(other, 32));
602 }
603
maxDouble(FloatRegister other,FloatRegister srcDest,bool handleNaN)604 void MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest,
605 bool handleNaN) {
606 MOZ_ASSERT(handleNaN); // Always true for wasm
607 fmax(ARMFPRegister(srcDest, 64), ARMFPRegister(srcDest, 64),
608 ARMFPRegister(other, 64));
609 }
610
611 // ===============================================================
612 // Shift functions
613
lshiftPtr(Imm32 imm,Register dest)614 void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
615 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
616 Lsl(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value);
617 }
618
lshiftPtr(Register shift,Register dest)619 void MacroAssembler::lshiftPtr(Register shift, Register dest) {
620 Lsl(ARMRegister(dest, 64), ARMRegister(dest, 64), ARMRegister(shift, 64));
621 }
622
lshift64(Imm32 imm,Register64 dest)623 void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
624 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
625 lshiftPtr(imm, dest.reg);
626 }
627
lshift64(Register shift,Register64 srcDest)628 void MacroAssembler::lshift64(Register shift, Register64 srcDest) {
629 Lsl(ARMRegister(srcDest.reg, 64), ARMRegister(srcDest.reg, 64),
630 ARMRegister(shift, 64));
631 }
632
lshift32(Register shift,Register dest)633 void MacroAssembler::lshift32(Register shift, Register dest) {
634 Lsl(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32));
635 }
636
flexibleLshift32(Register src,Register dest)637 void MacroAssembler::flexibleLshift32(Register src, Register dest) {
638 lshift32(src, dest);
639 }
640
lshift32(Imm32 imm,Register dest)641 void MacroAssembler::lshift32(Imm32 imm, Register dest) {
642 MOZ_ASSERT(0 <= imm.value && imm.value < 32);
643 Lsl(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value);
644 }
645
rshiftPtr(Imm32 imm,Register dest)646 void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
647 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
648 Lsr(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value);
649 }
650
rshiftPtr(Imm32 imm,Register src,Register dest)651 void MacroAssembler::rshiftPtr(Imm32 imm, Register src, Register dest) {
652 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
653 Lsr(ARMRegister(dest, 64), ARMRegister(src, 64), imm.value);
654 }
655
rshiftPtr(Register shift,Register dest)656 void MacroAssembler::rshiftPtr(Register shift, Register dest) {
657 Lsr(ARMRegister(dest, 64), ARMRegister(dest, 64), ARMRegister(shift, 64));
658 }
659
rshift32(Register shift,Register dest)660 void MacroAssembler::rshift32(Register shift, Register dest) {
661 Lsr(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32));
662 }
663
flexibleRshift32(Register src,Register dest)664 void MacroAssembler::flexibleRshift32(Register src, Register dest) {
665 rshift32(src, dest);
666 }
667
rshift32(Imm32 imm,Register dest)668 void MacroAssembler::rshift32(Imm32 imm, Register dest) {
669 MOZ_ASSERT(0 <= imm.value && imm.value < 32);
670 Lsr(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value);
671 }
672
rshiftPtrArithmetic(Imm32 imm,Register dest)673 void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
674 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
675 Asr(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value);
676 }
677
rshift32Arithmetic(Register shift,Register dest)678 void MacroAssembler::rshift32Arithmetic(Register shift, Register dest) {
679 Asr(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32));
680 }
681
rshift32Arithmetic(Imm32 imm,Register dest)682 void MacroAssembler::rshift32Arithmetic(Imm32 imm, Register dest) {
683 MOZ_ASSERT(0 <= imm.value && imm.value < 32);
684 Asr(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value);
685 }
686
flexibleRshift32Arithmetic(Register src,Register dest)687 void MacroAssembler::flexibleRshift32Arithmetic(Register src, Register dest) {
688 rshift32Arithmetic(src, dest);
689 }
690
rshift64(Imm32 imm,Register64 dest)691 void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
692 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
693 rshiftPtr(imm, dest.reg);
694 }
695
rshift64(Register shift,Register64 srcDest)696 void MacroAssembler::rshift64(Register shift, Register64 srcDest) {
697 Lsr(ARMRegister(srcDest.reg, 64), ARMRegister(srcDest.reg, 64),
698 ARMRegister(shift, 64));
699 }
700
rshift64Arithmetic(Imm32 imm,Register64 dest)701 void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
702 Asr(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), imm.value);
703 }
704
rshift64Arithmetic(Register shift,Register64 srcDest)705 void MacroAssembler::rshift64Arithmetic(Register shift, Register64 srcDest) {
706 Asr(ARMRegister(srcDest.reg, 64), ARMRegister(srcDest.reg, 64),
707 ARMRegister(shift, 64));
708 }
709
710 // ===============================================================
711 // Condition functions
712
713 template <typename T1, typename T2>
cmp32Set(Condition cond,T1 lhs,T2 rhs,Register dest)714 void MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) {
715 cmp32(lhs, rhs);
716 emitSet(cond, dest);
717 }
718
719 template <typename T1, typename T2>
cmpPtrSet(Condition cond,T1 lhs,T2 rhs,Register dest)720 void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
721 cmpPtr(lhs, rhs);
722 emitSet(cond, dest);
723 }
724
725 // ===============================================================
726 // Rotation functions
727
rotateLeft(Imm32 count,Register input,Register dest)728 void MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest) {
729 Ror(ARMRegister(dest, 32), ARMRegister(input, 32), (32 - count.value) & 31);
730 }
731
rotateLeft(Register count,Register input,Register dest)732 void MacroAssembler::rotateLeft(Register count, Register input, Register dest) {
733 vixl::UseScratchRegisterScope temps(this);
734 const ARMRegister scratch = temps.AcquireW();
735 // Really 32 - count, but the upper bits of the result are ignored.
736 Neg(scratch, ARMRegister(count, 32));
737 Ror(ARMRegister(dest, 32), ARMRegister(input, 32), scratch);
738 }
739
rotateRight(Imm32 count,Register input,Register dest)740 void MacroAssembler::rotateRight(Imm32 count, Register input, Register dest) {
741 Ror(ARMRegister(dest, 32), ARMRegister(input, 32), count.value & 31);
742 }
743
rotateRight(Register count,Register input,Register dest)744 void MacroAssembler::rotateRight(Register count, Register input,
745 Register dest) {
746 Ror(ARMRegister(dest, 32), ARMRegister(input, 32), ARMRegister(count, 32));
747 }
748
rotateLeft64(Register count,Register64 input,Register64 dest,Register temp)749 void MacroAssembler::rotateLeft64(Register count, Register64 input,
750 Register64 dest, Register temp) {
751 MOZ_ASSERT(temp == Register::Invalid());
752
753 vixl::UseScratchRegisterScope temps(this);
754 const ARMRegister scratch = temps.AcquireX();
755 // Really 64 - count, but the upper bits of the result are ignored.
756 Neg(scratch, ARMRegister(count, 64));
757 Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64), scratch);
758 }
759
rotateLeft64(Imm32 count,Register64 input,Register64 dest,Register temp)760 void MacroAssembler::rotateLeft64(Imm32 count, Register64 input,
761 Register64 dest, Register temp) {
762 MOZ_ASSERT(temp == Register::Invalid());
763
764 Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64),
765 (64 - count.value) & 63);
766 }
767
rotateRight64(Register count,Register64 input,Register64 dest,Register temp)768 void MacroAssembler::rotateRight64(Register count, Register64 input,
769 Register64 dest, Register temp) {
770 MOZ_ASSERT(temp == Register::Invalid());
771
772 Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64),
773 ARMRegister(count, 64));
774 }
775
rotateRight64(Imm32 count,Register64 input,Register64 dest,Register temp)776 void MacroAssembler::rotateRight64(Imm32 count, Register64 input,
777 Register64 dest, Register temp) {
778 MOZ_ASSERT(temp == Register::Invalid());
779
780 Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64), count.value & 63);
781 }
782
783 // ===============================================================
784 // Bit counting functions
785
clz32(Register src,Register dest,bool knownNotZero)786 void MacroAssembler::clz32(Register src, Register dest, bool knownNotZero) {
787 Clz(ARMRegister(dest, 32), ARMRegister(src, 32));
788 }
789
ctz32(Register src,Register dest,bool knownNotZero)790 void MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero) {
791 Rbit(ARMRegister(dest, 32), ARMRegister(src, 32));
792 Clz(ARMRegister(dest, 32), ARMRegister(dest, 32));
793 }
794
clz64(Register64 src,Register dest)795 void MacroAssembler::clz64(Register64 src, Register dest) {
796 Clz(ARMRegister(dest, 64), ARMRegister(src.reg, 64));
797 }
798
ctz64(Register64 src,Register dest)799 void MacroAssembler::ctz64(Register64 src, Register dest) {
800 Rbit(ARMRegister(dest, 64), ARMRegister(src.reg, 64));
801 Clz(ARMRegister(dest, 64), ARMRegister(dest, 64));
802 }
803
popcnt32(Register src_,Register dest_,Register tmp_)804 void MacroAssembler::popcnt32(Register src_, Register dest_, Register tmp_) {
805 MOZ_ASSERT(tmp_ != Register::Invalid());
806
807 // Equivalent to mozilla::CountPopulation32().
808
809 ARMRegister src(src_, 32);
810 ARMRegister dest(dest_, 32);
811 ARMRegister tmp(tmp_, 32);
812
813 Mov(tmp, src);
814 if (src_ != dest_) {
815 Mov(dest, src);
816 }
817 Lsr(dest, dest, 1);
818 And(dest, dest, 0x55555555);
819 Sub(dest, tmp, dest);
820 Lsr(tmp, dest, 2);
821 And(tmp, tmp, 0x33333333);
822 And(dest, dest, 0x33333333);
823 Add(dest, tmp, dest);
824 Add(dest, dest, Operand(dest, vixl::LSR, 4));
825 And(dest, dest, 0x0F0F0F0F);
826 Add(dest, dest, Operand(dest, vixl::LSL, 8));
827 Add(dest, dest, Operand(dest, vixl::LSL, 16));
828 Lsr(dest, dest, 24);
829 }
830
popcnt64(Register64 src_,Register64 dest_,Register tmp_)831 void MacroAssembler::popcnt64(Register64 src_, Register64 dest_,
832 Register tmp_) {
833 MOZ_ASSERT(tmp_ != Register::Invalid());
834
835 // Equivalent to mozilla::CountPopulation64(), though likely more efficient.
836
837 ARMRegister src(src_.reg, 64);
838 ARMRegister dest(dest_.reg, 64);
839 ARMRegister tmp(tmp_, 64);
840
841 Mov(tmp, src);
842 if (src_ != dest_) {
843 Mov(dest, src);
844 }
845 Lsr(dest, dest, 1);
846 And(dest, dest, 0x5555555555555555);
847 Sub(dest, tmp, dest);
848 Lsr(tmp, dest, 2);
849 And(tmp, tmp, 0x3333333333333333);
850 And(dest, dest, 0x3333333333333333);
851 Add(dest, tmp, dest);
852 Add(dest, dest, Operand(dest, vixl::LSR, 4));
853 And(dest, dest, 0x0F0F0F0F0F0F0F0F);
854 Add(dest, dest, Operand(dest, vixl::LSL, 8));
855 Add(dest, dest, Operand(dest, vixl::LSL, 16));
856 Add(dest, dest, Operand(dest, vixl::LSL, 32));
857 Lsr(dest, dest, 56);
858 }
859
860 // ===============================================================
861 // Branch functions
862
863 template <class L>
branch32(Condition cond,Register lhs,Register rhs,L label)864 void MacroAssembler::branch32(Condition cond, Register lhs, Register rhs,
865 L label) {
866 cmp32(lhs, rhs);
867 B(label, cond);
868 }
869
870 template <class L>
branch32(Condition cond,Register lhs,Imm32 imm,L label)871 void MacroAssembler::branch32(Condition cond, Register lhs, Imm32 imm,
872 L label) {
873 if (imm.value == 0 && cond == Assembler::Equal) {
874 Cbz(ARMRegister(lhs, 32), label);
875 } else if (imm.value == 0 && cond == Assembler::NotEqual) {
876 Cbnz(ARMRegister(lhs, 32), label);
877 } else {
878 cmp32(lhs, imm);
879 B(label, cond);
880 }
881 }
882
branch32(Condition cond,Register lhs,const Address & rhs,Label * label)883 void MacroAssembler::branch32(Condition cond, Register lhs, const Address& rhs,
884 Label* label) {
885 vixl::UseScratchRegisterScope temps(this);
886 const Register scratch = temps.AcquireX().asUnsized();
887 MOZ_ASSERT(scratch != lhs);
888 MOZ_ASSERT(scratch != rhs.base);
889 load32(rhs, scratch);
890 branch32(cond, lhs, scratch, label);
891 }
892
branch32(Condition cond,const Address & lhs,Register rhs,Label * label)893 void MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs,
894 Label* label) {
895 vixl::UseScratchRegisterScope temps(this);
896 const Register scratch = temps.AcquireX().asUnsized();
897 MOZ_ASSERT(scratch != lhs.base);
898 MOZ_ASSERT(scratch != rhs);
899 load32(lhs, scratch);
900 branch32(cond, scratch, rhs, label);
901 }
902
branch32(Condition cond,const Address & lhs,Imm32 imm,Label * label)903 void MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 imm,
904 Label* label) {
905 vixl::UseScratchRegisterScope temps(this);
906 const Register scratch = temps.AcquireX().asUnsized();
907 MOZ_ASSERT(scratch != lhs.base);
908 load32(lhs, scratch);
909 branch32(cond, scratch, imm, label);
910 }
911
branch32(Condition cond,const AbsoluteAddress & lhs,Register rhs,Label * label)912 void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
913 Register rhs, Label* label) {
914 vixl::UseScratchRegisterScope temps(this);
915 const Register scratch = temps.AcquireX().asUnsized();
916 movePtr(ImmPtr(lhs.addr), scratch);
917 branch32(cond, Address(scratch, 0), rhs, label);
918 }
919
branch32(Condition cond,const AbsoluteAddress & lhs,Imm32 rhs,Label * label)920 void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
921 Imm32 rhs, Label* label) {
922 vixl::UseScratchRegisterScope temps(this);
923 const Register scratch = temps.AcquireX().asUnsized();
924 load32(lhs, scratch);
925 branch32(cond, scratch, rhs, label);
926 }
927
branch32(Condition cond,const BaseIndex & lhs,Imm32 rhs,Label * label)928 void MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
929 Label* label) {
930 vixl::UseScratchRegisterScope temps(this);
931 const ARMRegister scratch32 = temps.AcquireW();
932 MOZ_ASSERT(scratch32.asUnsized() != lhs.base);
933 MOZ_ASSERT(scratch32.asUnsized() != lhs.index);
934 doBaseIndex(scratch32, lhs, vixl::LDR_w);
935 branch32(cond, scratch32.asUnsized(), rhs, label);
936 }
937
branch32(Condition cond,wasm::SymbolicAddress lhs,Imm32 rhs,Label * label)938 void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs,
939 Imm32 rhs, Label* label) {
940 vixl::UseScratchRegisterScope temps(this);
941 const Register scratch = temps.AcquireX().asUnsized();
942 movePtr(lhs, scratch);
943 branch32(cond, Address(scratch, 0), rhs, label);
944 }
945
branch64(Condition cond,Register64 lhs,Imm64 val,Label * success,Label * fail)946 void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
947 Label* success, Label* fail) {
948 if (val.value == 0 && cond == Assembler::Equal) {
949 Cbz(ARMRegister(lhs.reg, 64), success);
950 } else if (val.value == 0 && cond == Assembler::NotEqual) {
951 Cbnz(ARMRegister(lhs.reg, 64), success);
952 } else {
953 Cmp(ARMRegister(lhs.reg, 64), val.value);
954 B(success, cond);
955 }
956 if (fail) {
957 B(fail);
958 }
959 }
960
branch64(Condition cond,Register64 lhs,Register64 rhs,Label * success,Label * fail)961 void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
962 Label* success, Label* fail) {
963 Cmp(ARMRegister(lhs.reg, 64), ARMRegister(rhs.reg, 64));
964 B(success, cond);
965 if (fail) {
966 B(fail);
967 }
968 }
969
branch64(Condition cond,const Address & lhs,Imm64 val,Label * label)970 void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
971 Label* label) {
972 MOZ_ASSERT(cond == Assembler::NotEqual,
973 "other condition codes not supported");
974
975 branchPtr(cond, lhs, ImmWord(val.value), label);
976 }
977
branch64(Condition cond,const Address & lhs,const Address & rhs,Register scratch,Label * label)978 void MacroAssembler::branch64(Condition cond, const Address& lhs,
979 const Address& rhs, Register scratch,
980 Label* label) {
981 MOZ_ASSERT(cond == Assembler::NotEqual,
982 "other condition codes not supported");
983 MOZ_ASSERT(lhs.base != scratch);
984 MOZ_ASSERT(rhs.base != scratch);
985
986 loadPtr(rhs, scratch);
987 branchPtr(cond, lhs, scratch, label);
988 }
989
990 template <class L>
branchPtr(Condition cond,Register lhs,Register rhs,L label)991 void MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs,
992 L label) {
993 Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64));
994 B(label, cond);
995 }
996
branchPtr(Condition cond,Register lhs,Imm32 rhs,Label * label)997 void MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs,
998 Label* label) {
999 if (rhs.value == 0 && cond == Assembler::Equal) {
1000 Cbz(ARMRegister(lhs, 64), label);
1001 } else if (rhs.value == 0 && cond == Assembler::NotEqual) {
1002 Cbnz(ARMRegister(lhs, 64), label);
1003 } else {
1004 cmpPtr(lhs, rhs);
1005 B(label, cond);
1006 }
1007 }
1008
branchPtr(Condition cond,Register lhs,ImmPtr rhs,Label * label)1009 void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs,
1010 Label* label) {
1011 if (rhs.value == 0 && cond == Assembler::Equal) {
1012 Cbz(ARMRegister(lhs, 64), label);
1013 } else if (rhs.value == 0 && cond == Assembler::NotEqual) {
1014 Cbnz(ARMRegister(lhs, 64), label);
1015 } else {
1016 cmpPtr(lhs, rhs);
1017 B(label, cond);
1018 }
1019 }
1020
branchPtr(Condition cond,Register lhs,ImmGCPtr rhs,Label * label)1021 void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs,
1022 Label* label) {
1023 vixl::UseScratchRegisterScope temps(this);
1024 const Register scratch = temps.AcquireX().asUnsized();
1025 MOZ_ASSERT(scratch != lhs);
1026 movePtr(rhs, scratch);
1027 branchPtr(cond, lhs, scratch, label);
1028 }
1029
branchPtr(Condition cond,Register lhs,ImmWord rhs,Label * label)1030 void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs,
1031 Label* label) {
1032 if (rhs.value == 0 && cond == Assembler::Equal) {
1033 Cbz(ARMRegister(lhs, 64), label);
1034 } else if (rhs.value == 0 && cond == Assembler::NotEqual) {
1035 Cbnz(ARMRegister(lhs, 64), label);
1036 } else {
1037 cmpPtr(lhs, rhs);
1038 B(label, cond);
1039 }
1040 }
1041
1042 template <class L>
branchPtr(Condition cond,const Address & lhs,Register rhs,L label)1043 void MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs,
1044 L label) {
1045 vixl::UseScratchRegisterScope temps(this);
1046 const Register scratch = temps.AcquireX().asUnsized();
1047 MOZ_ASSERT(scratch != lhs.base);
1048 MOZ_ASSERT(scratch != rhs);
1049 loadPtr(lhs, scratch);
1050 branchPtr(cond, scratch, rhs, label);
1051 }
1052
branchPtr(Condition cond,const Address & lhs,ImmPtr rhs,Label * label)1053 void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs,
1054 Label* label) {
1055 vixl::UseScratchRegisterScope temps(this);
1056 const Register scratch = temps.AcquireX().asUnsized();
1057 MOZ_ASSERT(scratch != lhs.base);
1058 loadPtr(lhs, scratch);
1059 branchPtr(cond, scratch, rhs, label);
1060 }
1061
branchPtr(Condition cond,const Address & lhs,ImmGCPtr rhs,Label * label)1062 void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs,
1063 Label* label) {
1064 vixl::UseScratchRegisterScope temps(this);
1065 const ARMRegister scratch1_64 = temps.AcquireX();
1066 const ARMRegister scratch2_64 = temps.AcquireX();
1067 MOZ_ASSERT(scratch1_64.asUnsized() != lhs.base);
1068 MOZ_ASSERT(scratch2_64.asUnsized() != lhs.base);
1069
1070 movePtr(rhs, scratch1_64.asUnsized());
1071 loadPtr(lhs, scratch2_64.asUnsized());
1072 branchPtr(cond, scratch2_64.asUnsized(), scratch1_64.asUnsized(), label);
1073 }
1074
branchPtr(Condition cond,const Address & lhs,ImmWord rhs,Label * label)1075 void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs,
1076 Label* label) {
1077 vixl::UseScratchRegisterScope temps(this);
1078 const Register scratch = temps.AcquireX().asUnsized();
1079 MOZ_ASSERT(scratch != lhs.base);
1080 loadPtr(lhs, scratch);
1081 branchPtr(cond, scratch, rhs, label);
1082 }
1083
branchPtr(Condition cond,const AbsoluteAddress & lhs,Register rhs,Label * label)1084 void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
1085 Register rhs, Label* label) {
1086 vixl::UseScratchRegisterScope temps(this);
1087 const Register scratch = temps.AcquireX().asUnsized();
1088 MOZ_ASSERT(scratch != rhs);
1089 loadPtr(lhs, scratch);
1090 branchPtr(cond, scratch, rhs, label);
1091 }
1092
branchPtr(Condition cond,const AbsoluteAddress & lhs,ImmWord rhs,Label * label)1093 void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
1094 ImmWord rhs, Label* label) {
1095 vixl::UseScratchRegisterScope temps(this);
1096 const Register scratch = temps.AcquireX().asUnsized();
1097 loadPtr(lhs, scratch);
1098 branchPtr(cond, scratch, rhs, label);
1099 }
1100
branchPtr(Condition cond,wasm::SymbolicAddress lhs,Register rhs,Label * label)1101 void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
1102 Register rhs, Label* label) {
1103 vixl::UseScratchRegisterScope temps(this);
1104 const Register scratch = temps.AcquireX().asUnsized();
1105 MOZ_ASSERT(scratch != rhs);
1106 loadPtr(lhs, scratch);
1107 branchPtr(cond, scratch, rhs, label);
1108 }
1109
branchPtr(Condition cond,const BaseIndex & lhs,ImmWord rhs,Label * label)1110 void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
1111 ImmWord rhs, Label* label) {
1112 vixl::UseScratchRegisterScope temps(this);
1113 const Register scratch = temps.AcquireX().asUnsized();
1114 MOZ_ASSERT(scratch != lhs.base);
1115 MOZ_ASSERT(scratch != lhs.index);
1116 loadPtr(lhs, scratch);
1117 branchPtr(cond, scratch, rhs, label);
1118 }
1119
branchPtr(Condition cond,const BaseIndex & lhs,Register rhs,Label * label)1120 void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
1121 Register rhs, Label* label) {
1122 vixl::UseScratchRegisterScope temps(this);
1123 const Register scratch = temps.AcquireX().asUnsized();
1124 MOZ_ASSERT(scratch != lhs.base);
1125 MOZ_ASSERT(scratch != lhs.index);
1126 loadPtr(lhs, scratch);
1127 branchPtr(cond, scratch, rhs, label);
1128 }
1129
branchPrivatePtr(Condition cond,const Address & lhs,Register rhs,Label * label)1130 void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
1131 Register rhs, Label* label) {
1132 branchPtr(cond, lhs, rhs, label);
1133 }
1134
branchFloat(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs,Label * label)1135 void MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs,
1136 FloatRegister rhs, Label* label) {
1137 compareFloat(cond, lhs, rhs);
1138 switch (cond) {
1139 case DoubleNotEqual: {
1140 Label unordered;
1141 // not equal *and* ordered
1142 branch(Overflow, &unordered);
1143 branch(NotEqual, label);
1144 bind(&unordered);
1145 break;
1146 }
1147 case DoubleEqualOrUnordered:
1148 branch(Overflow, label);
1149 branch(Equal, label);
1150 break;
1151 default:
1152 branch(Condition(cond), label);
1153 }
1154 }
1155
branchTruncateFloat32MaybeModUint32(FloatRegister src,Register dest,Label * fail)1156 void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
1157 Register dest,
1158 Label* fail) {
1159 vixl::UseScratchRegisterScope temps(this);
1160 const ARMRegister scratch64 = temps.AcquireX();
1161
1162 ARMFPRegister src32(src, 32);
1163 ARMRegister dest64(dest, 64);
1164
1165 MOZ_ASSERT(!scratch64.Is(dest64));
1166
1167 Fcvtzs(dest64, src32);
1168 Add(scratch64, dest64, Operand(0x7fffffffffffffff));
1169 Cmn(scratch64, 3);
1170 B(fail, Assembler::Above);
1171 And(dest64, dest64, Operand(0xffffffff));
1172 }
1173
branchTruncateFloat32ToInt32(FloatRegister src,Register dest,Label * fail)1174 void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
1175 Register dest, Label* fail) {
1176 convertFloat32ToInt32(src, dest, fail, false);
1177 }
1178
branchDouble(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs,Label * label)1179 void MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs,
1180 FloatRegister rhs, Label* label) {
1181 compareDouble(cond, lhs, rhs);
1182 switch (cond) {
1183 case DoubleNotEqual: {
1184 Label unordered;
1185 // not equal *and* ordered
1186 branch(Overflow, &unordered);
1187 branch(NotEqual, label);
1188 bind(&unordered);
1189 break;
1190 }
1191 case DoubleEqualOrUnordered:
1192 branch(Overflow, label);
1193 branch(Equal, label);
1194 break;
1195 default:
1196 branch(Condition(cond), label);
1197 }
1198 }
1199
branchTruncateDoubleMaybeModUint32(FloatRegister src,Register dest,Label * fail)1200 void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
1201 Register dest,
1202 Label* fail) {
1203 vixl::UseScratchRegisterScope temps(this);
1204 const ARMRegister scratch64 = temps.AcquireX();
1205
1206 // An out of range integer will be saturated to the destination size.
1207 ARMFPRegister src64(src, 64);
1208 ARMRegister dest64(dest, 64);
1209
1210 MOZ_ASSERT(!scratch64.Is(dest64));
1211
1212 Fcvtzs(dest64, src64);
1213 Add(scratch64, dest64, Operand(0x7fffffffffffffff));
1214 Cmn(scratch64, 3);
1215 B(fail, Assembler::Above);
1216 And(dest64, dest64, Operand(0xffffffff));
1217 }
1218
branchTruncateDoubleToInt32(FloatRegister src,Register dest,Label * fail)1219 void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
1220 Register dest, Label* fail) {
1221 convertDoubleToInt32(src, dest, fail, false);
1222 }
1223
1224 template <typename T>
branchAdd32(Condition cond,T src,Register dest,Label * label)1225 void MacroAssembler::branchAdd32(Condition cond, T src, Register dest,
1226 Label* label) {
1227 adds32(src, dest);
1228 B(label, cond);
1229 }
1230
1231 template <typename T>
branchSub32(Condition cond,T src,Register dest,Label * label)1232 void MacroAssembler::branchSub32(Condition cond, T src, Register dest,
1233 Label* label) {
1234 subs32(src, dest);
1235 branch(cond, label);
1236 }
1237
1238 template <typename T>
branchMul32(Condition cond,T src,Register dest,Label * label)1239 void MacroAssembler::branchMul32(Condition cond, T src, Register dest,
1240 Label* label) {
1241 MOZ_ASSERT(cond == Assembler::Overflow);
1242 vixl::UseScratchRegisterScope temps(this);
1243 mul32(src, dest, dest, label);
1244 }
1245
1246 template <typename T>
branchRshift32(Condition cond,T src,Register dest,Label * label)1247 void MacroAssembler::branchRshift32(Condition cond, T src, Register dest,
1248 Label* label) {
1249 MOZ_ASSERT(cond == Zero || cond == NonZero);
1250 rshift32(src, dest);
1251 branch32(cond == Zero ? Equal : NotEqual, dest, Imm32(0), label);
1252 }
1253
branchNeg32(Condition cond,Register reg,Label * label)1254 void MacroAssembler::branchNeg32(Condition cond, Register reg, Label* label) {
1255 MOZ_ASSERT(cond == Overflow);
1256 neg32(reg);
1257 B(label, cond);
1258 }
1259
1260 template <typename T>
branchAddPtr(Condition cond,T src,Register dest,Label * label)1261 void MacroAssembler::branchAddPtr(Condition cond, T src, Register dest,
1262 Label* label) {
1263 adds64(src, dest);
1264 B(label, cond);
1265 }
1266
1267 template <typename T>
branchSubPtr(Condition cond,T src,Register dest,Label * label)1268 void MacroAssembler::branchSubPtr(Condition cond, T src, Register dest,
1269 Label* label) {
1270 subs64(src, dest);
1271 B(label, cond);
1272 }
1273
branchMulPtr(Condition cond,Register src,Register dest,Label * label)1274 void MacroAssembler::branchMulPtr(Condition cond, Register src, Register dest,
1275 Label* label) {
1276 MOZ_ASSERT(cond == Assembler::Overflow);
1277
1278 vixl::UseScratchRegisterScope temps(this);
1279 const ARMRegister scratch64 = temps.AcquireX();
1280 const ARMRegister src64(src, 64);
1281 const ARMRegister dest64(dest, 64);
1282
1283 Smulh(scratch64, dest64, src64);
1284 Mul(dest64, dest64, src64);
1285 Cmp(scratch64, Operand(dest64, vixl::ASR, 63));
1286 B(label, NotEqual);
1287 }
1288
decBranchPtr(Condition cond,Register lhs,Imm32 rhs,Label * label)1289 void MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs,
1290 Label* label) {
1291 Subs(ARMRegister(lhs, 64), ARMRegister(lhs, 64), Operand(rhs.value));
1292 B(cond, label);
1293 }
1294
1295 template <class L>
branchTest32(Condition cond,Register lhs,Register rhs,L label)1296 void MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs,
1297 L label) {
1298 MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
1299 cond == NotSigned);
1300 // The x86-biased front end prefers |test foo, foo| to |cmp foo, #0|. We look
1301 // for the former pattern and expand as Cbz/Cbnz when possible.
1302 if (lhs == rhs && cond == Zero) {
1303 Cbz(ARMRegister(lhs, 32), label);
1304 } else if (lhs == rhs && cond == NonZero) {
1305 Cbnz(ARMRegister(lhs, 32), label);
1306 } else {
1307 test32(lhs, rhs);
1308 B(label, cond);
1309 }
1310 }
1311
1312 template <class L>
branchTest32(Condition cond,Register lhs,Imm32 rhs,L label)1313 void MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs,
1314 L label) {
1315 MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
1316 cond == NotSigned);
1317 test32(lhs, rhs);
1318 B(label, cond);
1319 }
1320
branchTest32(Condition cond,const Address & lhs,Imm32 rhs,Label * label)1321 void MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs,
1322 Label* label) {
1323 vixl::UseScratchRegisterScope temps(this);
1324 const Register scratch = temps.AcquireX().asUnsized();
1325 MOZ_ASSERT(scratch != lhs.base);
1326 load32(lhs, scratch);
1327 branchTest32(cond, scratch, rhs, label);
1328 }
1329
branchTest32(Condition cond,const AbsoluteAddress & lhs,Imm32 rhs,Label * label)1330 void MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs,
1331 Imm32 rhs, Label* label) {
1332 vixl::UseScratchRegisterScope temps(this);
1333 const Register scratch = temps.AcquireX().asUnsized();
1334 load32(lhs, scratch);
1335 branchTest32(cond, scratch, rhs, label);
1336 }
1337
1338 template <class L>
branchTestPtr(Condition cond,Register lhs,Register rhs,L label)1339 void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs,
1340 L label) {
1341 // See branchTest32.
1342 MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
1343 cond == NotSigned);
1344 if (lhs == rhs && cond == Zero) {
1345 Cbz(ARMRegister(lhs, 64), label);
1346 } else if (lhs == rhs && cond == NonZero) {
1347 Cbnz(ARMRegister(lhs, 64), label);
1348 } else {
1349 Tst(ARMRegister(lhs, 64), Operand(ARMRegister(rhs, 64)));
1350 B(label, cond);
1351 }
1352 }
1353
branchTestPtr(Condition cond,Register lhs,Imm32 rhs,Label * label)1354 void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs,
1355 Label* label) {
1356 Tst(ARMRegister(lhs, 64), Operand(rhs.value));
1357 B(label, cond);
1358 }
1359
branchTestPtr(Condition cond,const Address & lhs,Imm32 rhs,Label * label)1360 void MacroAssembler::branchTestPtr(Condition cond, const Address& lhs,
1361 Imm32 rhs, Label* label) {
1362 vixl::UseScratchRegisterScope temps(this);
1363 const Register scratch = temps.AcquireX().asUnsized();
1364 MOZ_ASSERT(scratch != lhs.base);
1365 loadPtr(lhs, scratch);
1366 branchTestPtr(cond, scratch, rhs, label);
1367 }
1368
1369 template <class L>
branchTest64(Condition cond,Register64 lhs,Register64 rhs,Register temp,L label)1370 void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
1371 Register64 rhs, Register temp, L label) {
1372 branchTestPtr(cond, lhs.reg, rhs.reg, label);
1373 }
1374
branchTestUndefined(Condition cond,Register tag,Label * label)1375 void MacroAssembler::branchTestUndefined(Condition cond, Register tag,
1376 Label* label) {
1377 branchTestUndefinedImpl(cond, tag, label);
1378 }
1379
branchTestUndefined(Condition cond,const Address & address,Label * label)1380 void MacroAssembler::branchTestUndefined(Condition cond, const Address& address,
1381 Label* label) {
1382 branchTestUndefinedImpl(cond, address, label);
1383 }
1384
branchTestUndefined(Condition cond,const BaseIndex & address,Label * label)1385 void MacroAssembler::branchTestUndefined(Condition cond,
1386 const BaseIndex& address,
1387 Label* label) {
1388 branchTestUndefinedImpl(cond, address, label);
1389 }
1390
branchTestUndefined(Condition cond,const ValueOperand & value,Label * label)1391 void MacroAssembler::branchTestUndefined(Condition cond,
1392 const ValueOperand& value,
1393 Label* label) {
1394 branchTestUndefinedImpl(cond, value, label);
1395 }
1396
1397 template <typename T>
branchTestUndefinedImpl(Condition cond,const T & t,Label * label)1398 void MacroAssembler::branchTestUndefinedImpl(Condition cond, const T& t,
1399 Label* label) {
1400 Condition c = testUndefined(cond, t);
1401 B(label, c);
1402 }
1403
branchTestInt32(Condition cond,Register tag,Label * label)1404 void MacroAssembler::branchTestInt32(Condition cond, Register tag,
1405 Label* label) {
1406 branchTestInt32Impl(cond, tag, label);
1407 }
1408
branchTestInt32(Condition cond,const Address & address,Label * label)1409 void MacroAssembler::branchTestInt32(Condition cond, const Address& address,
1410 Label* label) {
1411 branchTestInt32Impl(cond, address, label);
1412 }
1413
branchTestInt32(Condition cond,const BaseIndex & address,Label * label)1414 void MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address,
1415 Label* label) {
1416 branchTestInt32Impl(cond, address, label);
1417 }
1418
branchTestInt32(Condition cond,const ValueOperand & value,Label * label)1419 void MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value,
1420 Label* label) {
1421 branchTestInt32Impl(cond, value, label);
1422 }
1423
1424 template <typename T>
branchTestInt32Impl(Condition cond,const T & t,Label * label)1425 void MacroAssembler::branchTestInt32Impl(Condition cond, const T& t,
1426 Label* label) {
1427 Condition c = testInt32(cond, t);
1428 B(label, c);
1429 }
1430
branchTestInt32Truthy(bool truthy,const ValueOperand & value,Label * label)1431 void MacroAssembler::branchTestInt32Truthy(bool truthy,
1432 const ValueOperand& value,
1433 Label* label) {
1434 Condition c = testInt32Truthy(truthy, value);
1435 B(label, c);
1436 }
1437
branchTestDouble(Condition cond,Register tag,Label * label)1438 void MacroAssembler::branchTestDouble(Condition cond, Register tag,
1439 Label* label) {
1440 branchTestDoubleImpl(cond, tag, label);
1441 }
1442
branchTestDouble(Condition cond,const Address & address,Label * label)1443 void MacroAssembler::branchTestDouble(Condition cond, const Address& address,
1444 Label* label) {
1445 branchTestDoubleImpl(cond, address, label);
1446 }
1447
branchTestDouble(Condition cond,const BaseIndex & address,Label * label)1448 void MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address,
1449 Label* label) {
1450 branchTestDoubleImpl(cond, address, label);
1451 }
1452
branchTestDouble(Condition cond,const ValueOperand & value,Label * label)1453 void MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value,
1454 Label* label) {
1455 branchTestDoubleImpl(cond, value, label);
1456 }
1457
1458 template <typename T>
branchTestDoubleImpl(Condition cond,const T & t,Label * label)1459 void MacroAssembler::branchTestDoubleImpl(Condition cond, const T& t,
1460 Label* label) {
1461 Condition c = testDouble(cond, t);
1462 B(label, c);
1463 }
1464
branchTestDoubleTruthy(bool truthy,FloatRegister reg,Label * label)1465 void MacroAssembler::branchTestDoubleTruthy(bool truthy, FloatRegister reg,
1466 Label* label) {
1467 Fcmp(ARMFPRegister(reg, 64), 0.0);
1468 if (!truthy) {
1469 // falsy values are zero, and NaN.
1470 branch(Zero, label);
1471 branch(Overflow, label);
1472 } else {
1473 // truthy values are non-zero and not nan.
1474 // If it is overflow
1475 Label onFalse;
1476 branch(Zero, &onFalse);
1477 branch(Overflow, &onFalse);
1478 B(label);
1479 bind(&onFalse);
1480 }
1481 }
1482
branchTestNumber(Condition cond,Register tag,Label * label)1483 void MacroAssembler::branchTestNumber(Condition cond, Register tag,
1484 Label* label) {
1485 branchTestNumberImpl(cond, tag, label);
1486 }
1487
branchTestNumber(Condition cond,const ValueOperand & value,Label * label)1488 void MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value,
1489 Label* label) {
1490 branchTestNumberImpl(cond, value, label);
1491 }
1492
1493 template <typename T>
branchTestNumberImpl(Condition cond,const T & t,Label * label)1494 void MacroAssembler::branchTestNumberImpl(Condition cond, const T& t,
1495 Label* label) {
1496 Condition c = testNumber(cond, t);
1497 B(label, c);
1498 }
1499
branchTestBoolean(Condition cond,Register tag,Label * label)1500 void MacroAssembler::branchTestBoolean(Condition cond, Register tag,
1501 Label* label) {
1502 branchTestBooleanImpl(cond, tag, label);
1503 }
1504
branchTestBoolean(Condition cond,const Address & address,Label * label)1505 void MacroAssembler::branchTestBoolean(Condition cond, const Address& address,
1506 Label* label) {
1507 branchTestBooleanImpl(cond, address, label);
1508 }
1509
branchTestBoolean(Condition cond,const BaseIndex & address,Label * label)1510 void MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address,
1511 Label* label) {
1512 branchTestBooleanImpl(cond, address, label);
1513 }
1514
branchTestBoolean(Condition cond,const ValueOperand & value,Label * label)1515 void MacroAssembler::branchTestBoolean(Condition cond,
1516 const ValueOperand& value,
1517 Label* label) {
1518 branchTestBooleanImpl(cond, value, label);
1519 }
1520
1521 template <typename T>
branchTestBooleanImpl(Condition cond,const T & tag,Label * label)1522 void MacroAssembler::branchTestBooleanImpl(Condition cond, const T& tag,
1523 Label* label) {
1524 Condition c = testBoolean(cond, tag);
1525 B(label, c);
1526 }
1527
branchTestBooleanTruthy(bool truthy,const ValueOperand & value,Label * label)1528 void MacroAssembler::branchTestBooleanTruthy(bool truthy,
1529 const ValueOperand& value,
1530 Label* label) {
1531 Condition c = testBooleanTruthy(truthy, value);
1532 B(label, c);
1533 }
1534
branchTestString(Condition cond,Register tag,Label * label)1535 void MacroAssembler::branchTestString(Condition cond, Register tag,
1536 Label* label) {
1537 branchTestStringImpl(cond, tag, label);
1538 }
1539
branchTestString(Condition cond,const Address & address,Label * label)1540 void MacroAssembler::branchTestString(Condition cond, const Address& address,
1541 Label* label) {
1542 branchTestStringImpl(cond, address, label);
1543 }
1544
branchTestString(Condition cond,const BaseIndex & address,Label * label)1545 void MacroAssembler::branchTestString(Condition cond, const BaseIndex& address,
1546 Label* label) {
1547 branchTestStringImpl(cond, address, label);
1548 }
1549
branchTestString(Condition cond,const ValueOperand & value,Label * label)1550 void MacroAssembler::branchTestString(Condition cond, const ValueOperand& value,
1551 Label* label) {
1552 branchTestStringImpl(cond, value, label);
1553 }
1554
1555 template <typename T>
branchTestStringImpl(Condition cond,const T & t,Label * label)1556 void MacroAssembler::branchTestStringImpl(Condition cond, const T& t,
1557 Label* label) {
1558 Condition c = testString(cond, t);
1559 B(label, c);
1560 }
1561
branchTestStringTruthy(bool truthy,const ValueOperand & value,Label * label)1562 void MacroAssembler::branchTestStringTruthy(bool truthy,
1563 const ValueOperand& value,
1564 Label* label) {
1565 Condition c = testStringTruthy(truthy, value);
1566 B(label, c);
1567 }
1568
branchTestSymbol(Condition cond,Register tag,Label * label)1569 void MacroAssembler::branchTestSymbol(Condition cond, Register tag,
1570 Label* label) {
1571 branchTestSymbolImpl(cond, tag, label);
1572 }
1573
branchTestSymbol(Condition cond,const Address & address,Label * label)1574 void MacroAssembler::branchTestSymbol(Condition cond, const Address& address,
1575 Label* label) {
1576 branchTestSymbolImpl(cond, address, label);
1577 }
1578
branchTestSymbol(Condition cond,const BaseIndex & address,Label * label)1579 void MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address,
1580 Label* label) {
1581 branchTestSymbolImpl(cond, address, label);
1582 }
1583
branchTestSymbol(Condition cond,const ValueOperand & value,Label * label)1584 void MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value,
1585 Label* label) {
1586 branchTestSymbolImpl(cond, value, label);
1587 }
1588
1589 template <typename T>
branchTestSymbolImpl(Condition cond,const T & t,Label * label)1590 void MacroAssembler::branchTestSymbolImpl(Condition cond, const T& t,
1591 Label* label) {
1592 Condition c = testSymbol(cond, t);
1593 B(label, c);
1594 }
1595
branchTestBigInt(Condition cond,Register tag,Label * label)1596 void MacroAssembler::branchTestBigInt(Condition cond, Register tag,
1597 Label* label) {
1598 branchTestBigIntImpl(cond, tag, label);
1599 }
1600
branchTestBigInt(Condition cond,const Address & address,Label * label)1601 void MacroAssembler::branchTestBigInt(Condition cond, const Address& address,
1602 Label* label) {
1603 branchTestBigIntImpl(cond, address, label);
1604 }
1605
branchTestBigInt(Condition cond,const BaseIndex & address,Label * label)1606 void MacroAssembler::branchTestBigInt(Condition cond, const BaseIndex& address,
1607 Label* label) {
1608 branchTestBigIntImpl(cond, address, label);
1609 }
1610
branchTestBigInt(Condition cond,const ValueOperand & value,Label * label)1611 void MacroAssembler::branchTestBigInt(Condition cond, const ValueOperand& value,
1612 Label* label) {
1613 branchTestBigIntImpl(cond, value, label);
1614 }
1615
1616 template <typename T>
branchTestBigIntImpl(Condition cond,const T & t,Label * label)1617 void MacroAssembler::branchTestBigIntImpl(Condition cond, const T& t,
1618 Label* label) {
1619 Condition c = testBigInt(cond, t);
1620 B(label, c);
1621 }
1622
branchTestBigIntTruthy(bool truthy,const ValueOperand & value,Label * label)1623 void MacroAssembler::branchTestBigIntTruthy(bool truthy,
1624 const ValueOperand& value,
1625 Label* label) {
1626 Condition c = testBigIntTruthy(truthy, value);
1627 B(label, c);
1628 }
1629
branchTestNull(Condition cond,Register tag,Label * label)1630 void MacroAssembler::branchTestNull(Condition cond, Register tag,
1631 Label* label) {
1632 branchTestNullImpl(cond, tag, label);
1633 }
1634
branchTestNull(Condition cond,const Address & address,Label * label)1635 void MacroAssembler::branchTestNull(Condition cond, const Address& address,
1636 Label* label) {
1637 branchTestNullImpl(cond, address, label);
1638 }
1639
branchTestNull(Condition cond,const BaseIndex & address,Label * label)1640 void MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address,
1641 Label* label) {
1642 branchTestNullImpl(cond, address, label);
1643 }
1644
branchTestNull(Condition cond,const ValueOperand & value,Label * label)1645 void MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value,
1646 Label* label) {
1647 branchTestNullImpl(cond, value, label);
1648 }
1649
1650 template <typename T>
branchTestNullImpl(Condition cond,const T & t,Label * label)1651 void MacroAssembler::branchTestNullImpl(Condition cond, const T& t,
1652 Label* label) {
1653 Condition c = testNull(cond, t);
1654 B(label, c);
1655 }
1656
branchTestObject(Condition cond,Register tag,Label * label)1657 void MacroAssembler::branchTestObject(Condition cond, Register tag,
1658 Label* label) {
1659 branchTestObjectImpl(cond, tag, label);
1660 }
1661
branchTestObject(Condition cond,const Address & address,Label * label)1662 void MacroAssembler::branchTestObject(Condition cond, const Address& address,
1663 Label* label) {
1664 branchTestObjectImpl(cond, address, label);
1665 }
1666
branchTestObject(Condition cond,const BaseIndex & address,Label * label)1667 void MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address,
1668 Label* label) {
1669 branchTestObjectImpl(cond, address, label);
1670 }
1671
branchTestObject(Condition cond,const ValueOperand & value,Label * label)1672 void MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value,
1673 Label* label) {
1674 branchTestObjectImpl(cond, value, label);
1675 }
1676
1677 template <typename T>
branchTestObjectImpl(Condition cond,const T & t,Label * label)1678 void MacroAssembler::branchTestObjectImpl(Condition cond, const T& t,
1679 Label* label) {
1680 Condition c = testObject(cond, t);
1681 B(label, c);
1682 }
1683
branchTestGCThing(Condition cond,const Address & address,Label * label)1684 void MacroAssembler::branchTestGCThing(Condition cond, const Address& address,
1685 Label* label) {
1686 branchTestGCThingImpl(cond, address, label);
1687 }
1688
branchTestGCThing(Condition cond,const BaseIndex & address,Label * label)1689 void MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address,
1690 Label* label) {
1691 branchTestGCThingImpl(cond, address, label);
1692 }
1693
branchTestGCThing(Condition cond,const ValueOperand & value,Label * label)1694 void MacroAssembler::branchTestGCThing(Condition cond,
1695 const ValueOperand& value,
1696 Label* label) {
1697 branchTestGCThingImpl(cond, value, label);
1698 }
1699
1700 template <typename T>
branchTestGCThingImpl(Condition cond,const T & src,Label * label)1701 void MacroAssembler::branchTestGCThingImpl(Condition cond, const T& src,
1702 Label* label) {
1703 Condition c = testGCThing(cond, src);
1704 B(label, c);
1705 }
1706
branchTestPrimitive(Condition cond,Register tag,Label * label)1707 void MacroAssembler::branchTestPrimitive(Condition cond, Register tag,
1708 Label* label) {
1709 branchTestPrimitiveImpl(cond, tag, label);
1710 }
1711
branchTestPrimitive(Condition cond,const ValueOperand & value,Label * label)1712 void MacroAssembler::branchTestPrimitive(Condition cond,
1713 const ValueOperand& value,
1714 Label* label) {
1715 branchTestPrimitiveImpl(cond, value, label);
1716 }
1717
1718 template <typename T>
branchTestPrimitiveImpl(Condition cond,const T & t,Label * label)1719 void MacroAssembler::branchTestPrimitiveImpl(Condition cond, const T& t,
1720 Label* label) {
1721 Condition c = testPrimitive(cond, t);
1722 B(label, c);
1723 }
1724
branchTestMagic(Condition cond,Register tag,Label * label)1725 void MacroAssembler::branchTestMagic(Condition cond, Register tag,
1726 Label* label) {
1727 branchTestMagicImpl(cond, tag, label);
1728 }
1729
branchTestMagic(Condition cond,const Address & address,Label * label)1730 void MacroAssembler::branchTestMagic(Condition cond, const Address& address,
1731 Label* label) {
1732 branchTestMagicImpl(cond, address, label);
1733 }
1734
branchTestMagic(Condition cond,const BaseIndex & address,Label * label)1735 void MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address,
1736 Label* label) {
1737 branchTestMagicImpl(cond, address, label);
1738 }
1739
1740 template <class L>
branchTestMagic(Condition cond,const ValueOperand & value,L label)1741 void MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value,
1742 L label) {
1743 branchTestMagicImpl(cond, value, label);
1744 }
1745
1746 template <typename T, class L>
branchTestMagicImpl(Condition cond,const T & t,L label)1747 void MacroAssembler::branchTestMagicImpl(Condition cond, const T& t, L label) {
1748 Condition c = testMagic(cond, t);
1749 B(label, c);
1750 }
1751
branchTestMagic(Condition cond,const Address & valaddr,JSWhyMagic why,Label * label)1752 void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
1753 JSWhyMagic why, Label* label) {
1754 uint64_t magic = MagicValue(why).asRawBits();
1755 cmpPtr(valaddr, ImmWord(magic));
1756 B(label, cond);
1757 }
1758
branchTestValue(Condition cond,const BaseIndex & lhs,const ValueOperand & rhs,Label * label)1759 void MacroAssembler::branchTestValue(Condition cond, const BaseIndex& lhs,
1760 const ValueOperand& rhs, Label* label) {
1761 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
1762 branchPtr(cond, lhs, rhs.valueReg(), label);
1763 }
1764
branchToComputedAddress(const BaseIndex & addr)1765 void MacroAssembler::branchToComputedAddress(const BaseIndex& addr) {
1766 vixl::UseScratchRegisterScope temps(&this->asVIXL());
1767 const ARMRegister scratch64 = temps.AcquireX();
1768 loadPtr(addr, scratch64.asUnsized());
1769 Br(scratch64);
1770 }
1771
cmp32Move32(Condition cond,Register lhs,Register rhs,Register src,Register dest)1772 void MacroAssembler::cmp32Move32(Condition cond, Register lhs, Register rhs,
1773 Register src, Register dest) {
1774 cmp32(lhs, rhs);
1775 Csel(ARMRegister(dest, 32), ARMRegister(src, 32), ARMRegister(dest, 32),
1776 cond);
1777 }
1778
cmp32Move32(Condition cond,Register lhs,const Address & rhs,Register src,Register dest)1779 void MacroAssembler::cmp32Move32(Condition cond, Register lhs,
1780 const Address& rhs, Register src,
1781 Register dest) {
1782 MOZ_CRASH("NYI");
1783 }
1784
cmpPtrMovePtr(Condition cond,Register lhs,Register rhs,Register src,Register dest)1785 void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
1786 Register src, Register dest) {
1787 cmpPtr(lhs, rhs);
1788 Csel(ARMRegister(dest, 64), ARMRegister(src, 64), ARMRegister(dest, 64),
1789 cond);
1790 }
1791
cmpPtrMovePtr(Condition cond,Register lhs,const Address & rhs,Register src,Register dest)1792 void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs,
1793 const Address& rhs, Register src,
1794 Register dest) {
1795 MOZ_CRASH("NYI");
1796 }
1797
cmp32Load32(Condition cond,Register lhs,const Address & rhs,const Address & src,Register dest)1798 void MacroAssembler::cmp32Load32(Condition cond, Register lhs,
1799 const Address& rhs, const Address& src,
1800 Register dest) {
1801 MOZ_CRASH("NYI");
1802 }
1803
cmp32Load32(Condition cond,Register lhs,Register rhs,const Address & src,Register dest)1804 void MacroAssembler::cmp32Load32(Condition cond, Register lhs, Register rhs,
1805 const Address& src, Register dest) {
1806 MOZ_CRASH("NYI");
1807 }
1808
cmp32MovePtr(Condition cond,Register lhs,Imm32 rhs,Register src,Register dest)1809 void MacroAssembler::cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
1810 Register src, Register dest) {
1811 cmp32(lhs, rhs);
1812 Csel(ARMRegister(dest, 64), ARMRegister(src, 64), ARMRegister(dest, 64),
1813 cond);
1814 }
1815
cmp32LoadPtr(Condition cond,const Address & lhs,Imm32 rhs,const Address & src,Register dest)1816 void MacroAssembler::cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
1817 const Address& src, Register dest) {
1818 // ARM64 does not support conditional loads, so we use a branch with a CSel
1819 // (to prevent Spectre attacks).
1820 vixl::UseScratchRegisterScope temps(this);
1821 const ARMRegister scratch64 = temps.AcquireX();
1822
1823 // Can't use branch32() here, because it may select Cbz/Cbnz which don't
1824 // affect condition flags.
1825 Label done;
1826 cmp32(lhs, rhs);
1827 B(&done, Assembler::InvertCondition(cond));
1828
1829 loadPtr(src, scratch64.asUnsized());
1830 Csel(ARMRegister(dest, 64), scratch64, ARMRegister(dest, 64), cond);
1831 bind(&done);
1832 }
1833
test32LoadPtr(Condition cond,const Address & addr,Imm32 mask,const Address & src,Register dest)1834 void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
1835 Imm32 mask, const Address& src,
1836 Register dest) {
1837 MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
1838
1839 // ARM64 does not support conditional loads, so we use a branch with a CSel
1840 // (to prevent Spectre attacks).
1841 vixl::UseScratchRegisterScope temps(this);
1842 const ARMRegister scratch64 = temps.AcquireX();
1843 Label done;
1844 branchTest32(Assembler::InvertCondition(cond), addr, mask, &done);
1845 loadPtr(src, scratch64.asUnsized());
1846 Csel(ARMRegister(dest, 64), scratch64, ARMRegister(dest, 64), cond);
1847 bind(&done);
1848 }
1849
test32MovePtr(Condition cond,const Address & addr,Imm32 mask,Register src,Register dest)1850 void MacroAssembler::test32MovePtr(Condition cond, const Address& addr,
1851 Imm32 mask, Register src, Register dest) {
1852 MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
1853 test32(addr, mask);
1854 Csel(ARMRegister(dest, 64), ARMRegister(src, 64), ARMRegister(dest, 64),
1855 cond);
1856 }
1857
spectreMovePtr(Condition cond,Register src,Register dest)1858 void MacroAssembler::spectreMovePtr(Condition cond, Register src,
1859 Register dest) {
1860 Csel(ARMRegister(dest, 64), ARMRegister(src, 64), ARMRegister(dest, 64),
1861 cond);
1862 }
1863
spectreZeroRegister(Condition cond,Register,Register dest)1864 void MacroAssembler::spectreZeroRegister(Condition cond, Register,
1865 Register dest) {
1866 Csel(ARMRegister(dest, 64), ARMRegister(dest, 64), vixl::xzr,
1867 Assembler::InvertCondition(cond));
1868 }
1869
spectreBoundsCheck32(Register index,Register length,Register maybeScratch,Label * failure)1870 void MacroAssembler::spectreBoundsCheck32(Register index, Register length,
1871 Register maybeScratch,
1872 Label* failure) {
1873 MOZ_ASSERT(length != maybeScratch);
1874 MOZ_ASSERT(index != maybeScratch);
1875
1876 branch32(Assembler::BelowOrEqual, length, index, failure);
1877
1878 if (JitOptions.spectreIndexMasking) {
1879 Csel(ARMRegister(index, 32), ARMRegister(index, 32), vixl::wzr,
1880 Assembler::Above);
1881 }
1882 }
1883
spectreBoundsCheck32(Register index,const Address & length,Register maybeScratch,Label * failure)1884 void MacroAssembler::spectreBoundsCheck32(Register index, const Address& length,
1885 Register maybeScratch,
1886 Label* failure) {
1887 MOZ_ASSERT(index != length.base);
1888 MOZ_ASSERT(length.base != maybeScratch);
1889 MOZ_ASSERT(index != maybeScratch);
1890
1891 branch32(Assembler::BelowOrEqual, length, index, failure);
1892
1893 if (JitOptions.spectreIndexMasking) {
1894 Csel(ARMRegister(index, 32), ARMRegister(index, 32), vixl::wzr,
1895 Assembler::Above);
1896 }
1897 }
1898
spectreBoundsCheckPtr(Register index,Register length,Register maybeScratch,Label * failure)1899 void MacroAssembler::spectreBoundsCheckPtr(Register index, Register length,
1900 Register maybeScratch,
1901 Label* failure) {
1902 MOZ_ASSERT(length != maybeScratch);
1903 MOZ_ASSERT(index != maybeScratch);
1904
1905 branchPtr(Assembler::BelowOrEqual, length, index, failure);
1906
1907 if (JitOptions.spectreIndexMasking) {
1908 Csel(ARMRegister(index, 64), ARMRegister(index, 64), vixl::xzr,
1909 Assembler::Above);
1910 }
1911 }
1912
spectreBoundsCheckPtr(Register index,const Address & length,Register maybeScratch,Label * failure)1913 void MacroAssembler::spectreBoundsCheckPtr(Register index,
1914 const Address& length,
1915 Register maybeScratch,
1916 Label* failure) {
1917 MOZ_ASSERT(index != length.base);
1918 MOZ_ASSERT(length.base != maybeScratch);
1919 MOZ_ASSERT(index != maybeScratch);
1920
1921 branchPtr(Assembler::BelowOrEqual, length, index, failure);
1922
1923 if (JitOptions.spectreIndexMasking) {
1924 Csel(ARMRegister(index, 64), ARMRegister(index, 64), vixl::xzr,
1925 Assembler::Above);
1926 }
1927 }
1928
1929 // ========================================================================
1930 // Memory access primitives.
storeUncanonicalizedDouble(FloatRegister src,const Address & dest)1931 void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
1932 const Address& dest) {
1933 Str(ARMFPRegister(src, 64), toMemOperand(dest));
1934 }
storeUncanonicalizedDouble(FloatRegister src,const BaseIndex & dest)1935 void MacroAssembler::storeUncanonicalizedDouble(FloatRegister src,
1936 const BaseIndex& dest) {
1937 doBaseIndex(ARMFPRegister(src, 64), dest, vixl::STR_d);
1938 }
1939
storeUncanonicalizedFloat32(FloatRegister src,const Address & addr)1940 void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
1941 const Address& addr) {
1942 Str(ARMFPRegister(src, 32), toMemOperand(addr));
1943 }
storeUncanonicalizedFloat32(FloatRegister src,const BaseIndex & addr)1944 void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src,
1945 const BaseIndex& addr) {
1946 doBaseIndex(ARMFPRegister(src, 32), addr, vixl::STR_s);
1947 }
1948
memoryBarrier(MemoryBarrierBits barrier)1949 void MacroAssembler::memoryBarrier(MemoryBarrierBits barrier) {
1950 // Bug 1715494: Discriminating barriers such as StoreStore are hard to reason
1951 // about. Execute the full barrier for everything that requires a barrier.
1952 if (barrier) {
1953 Dmb(vixl::InnerShareable, vixl::BarrierAll);
1954 }
1955 }
1956
1957 // ===============================================================
1958 // Clamping functions.
1959
clampIntToUint8(Register reg)1960 void MacroAssembler::clampIntToUint8(Register reg) {
1961 vixl::UseScratchRegisterScope temps(this);
1962 const ARMRegister scratch32 = temps.AcquireW();
1963 const ARMRegister reg32(reg, 32);
1964 MOZ_ASSERT(!scratch32.Is(reg32));
1965
1966 Cmp(reg32, Operand(reg32, vixl::UXTB));
1967 Csel(reg32, reg32, vixl::wzr, Assembler::GreaterThanOrEqual);
1968 Mov(scratch32, Operand(0xff));
1969 Csel(reg32, reg32, scratch32, Assembler::LessThanOrEqual);
1970 }
1971
fallibleUnboxPtr(const ValueOperand & src,Register dest,JSValueType type,Label * fail)1972 void MacroAssembler::fallibleUnboxPtr(const ValueOperand& src, Register dest,
1973 JSValueType type, Label* fail) {
1974 MOZ_ASSERT(type == JSVAL_TYPE_OBJECT || type == JSVAL_TYPE_STRING ||
1975 type == JSVAL_TYPE_SYMBOL || type == JSVAL_TYPE_BIGINT);
1976 // dest := src XOR mask
1977 // fail if dest >> JSVAL_TAG_SHIFT != 0
1978 const ARMRegister src64(src.valueReg(), 64);
1979 const ARMRegister dest64(dest, 64);
1980 Eor(dest64, src64, Operand(JSVAL_TYPE_TO_SHIFTED_TAG(type)));
1981 Cmp(vixl::xzr, Operand(dest64, vixl::LSR, JSVAL_TAG_SHIFT));
1982 j(Assembler::NotEqual, fail);
1983 }
1984
fallibleUnboxPtr(const Address & src,Register dest,JSValueType type,Label * fail)1985 void MacroAssembler::fallibleUnboxPtr(const Address& src, Register dest,
1986 JSValueType type, Label* fail) {
1987 loadValue(src, ValueOperand(dest));
1988 fallibleUnboxPtr(ValueOperand(dest), dest, type, fail);
1989 }
1990
fallibleUnboxPtr(const BaseIndex & src,Register dest,JSValueType type,Label * fail)1991 void MacroAssembler::fallibleUnboxPtr(const BaseIndex& src, Register dest,
1992 JSValueType type, Label* fail) {
1993 loadValue(src, ValueOperand(dest));
1994 fallibleUnboxPtr(ValueOperand(dest), dest, type, fail);
1995 }
1996
1997 //}}} check_macroassembler_style
1998
1999 // Wasm SIMD
2000
SimdReg(FloatRegister r)2001 static inline ARMFPRegister SimdReg(FloatRegister r) {
2002 MOZ_ASSERT(r.isSimd128());
2003 return ARMFPRegister(r, 128);
2004 }
2005
Simd16B(FloatRegister r)2006 static inline ARMFPRegister Simd16B(FloatRegister r) {
2007 return SimdReg(r).V16B();
2008 }
2009
Simd8B(FloatRegister r)2010 static inline ARMFPRegister Simd8B(FloatRegister r) { return SimdReg(r).V8B(); }
2011
Simd8H(FloatRegister r)2012 static inline ARMFPRegister Simd8H(FloatRegister r) { return SimdReg(r).V8H(); }
2013
Simd4H(FloatRegister r)2014 static inline ARMFPRegister Simd4H(FloatRegister r) { return SimdReg(r).V4H(); }
2015
Simd4S(FloatRegister r)2016 static inline ARMFPRegister Simd4S(FloatRegister r) { return SimdReg(r).V4S(); }
2017
Simd2S(FloatRegister r)2018 static inline ARMFPRegister Simd2S(FloatRegister r) { return SimdReg(r).V2S(); }
2019
Simd2D(FloatRegister r)2020 static inline ARMFPRegister Simd2D(FloatRegister r) { return SimdReg(r).V2D(); }
2021
Simd1D(FloatRegister r)2022 static inline ARMFPRegister Simd1D(FloatRegister r) { return SimdReg(r).V1D(); }
2023
SimdQ(FloatRegister r)2024 static inline ARMFPRegister SimdQ(FloatRegister r) { return SimdReg(r).Q(); }
2025
2026 //{{{ check_macroassembler_style
2027
2028 // Moves
2029
moveSimd128(FloatRegister src,FloatRegister dest)2030 void MacroAssembler::moveSimd128(FloatRegister src, FloatRegister dest) {
2031 if (src != dest) {
2032 Mov(SimdReg(dest), SimdReg(src));
2033 }
2034 }
2035
zeroSimd128(FloatRegister dest)2036 void MacroAssembler::zeroSimd128(FloatRegister dest) {
2037 // Unclear what the best code is here, xor is just what we do on x86.
2038 // Alternatives would be `FMOV dest.4s, #0` and `FMOV dest, xzr`.
2039 Eor(Simd16B(dest), Simd16B(dest), Simd16B(dest));
2040 }
2041
loadConstantSimd128(const SimdConstant & v,FloatRegister dest)2042 void MacroAssembler::loadConstantSimd128(const SimdConstant& v,
2043 FloatRegister dest) {
2044 // Movi does not yet generate good code for many cases, bug 1664397.
2045 SimdConstant c = SimdConstant::CreateX2((const int64_t*)v.bytes());
2046 Movi(SimdReg(dest), c.asInt64x2()[1], c.asInt64x2()[0]);
2047 }
2048
2049 // Splat
2050
splatX16(Register src,FloatRegister dest)2051 void MacroAssembler::splatX16(Register src, FloatRegister dest) {
2052 Dup(Simd16B(dest), ARMRegister(src, 32));
2053 }
2054
splatX16(uint32_t srcLane,FloatRegister src,FloatRegister dest)2055 void MacroAssembler::splatX16(uint32_t srcLane, FloatRegister src,
2056 FloatRegister dest) {
2057 Dup(Simd16B(dest), Simd16B(src), srcLane);
2058 }
2059
splatX8(Register src,FloatRegister dest)2060 void MacroAssembler::splatX8(Register src, FloatRegister dest) {
2061 Dup(Simd8H(dest), ARMRegister(src, 32));
2062 }
2063
splatX8(uint32_t srcLane,FloatRegister src,FloatRegister dest)2064 void MacroAssembler::splatX8(uint32_t srcLane, FloatRegister src,
2065 FloatRegister dest) {
2066 Dup(Simd8H(dest), Simd8H(src), srcLane);
2067 }
2068
splatX4(Register src,FloatRegister dest)2069 void MacroAssembler::splatX4(Register src, FloatRegister dest) {
2070 Dup(Simd4S(dest), ARMRegister(src, 32));
2071 }
2072
splatX4(FloatRegister src,FloatRegister dest)2073 void MacroAssembler::splatX4(FloatRegister src, FloatRegister dest) {
2074 Dup(Simd4S(dest), ARMFPRegister(src), 0);
2075 }
2076
splatX2(Register64 src,FloatRegister dest)2077 void MacroAssembler::splatX2(Register64 src, FloatRegister dest) {
2078 Dup(Simd2D(dest), ARMRegister(src.reg, 64));
2079 }
2080
splatX2(FloatRegister src,FloatRegister dest)2081 void MacroAssembler::splatX2(FloatRegister src, FloatRegister dest) {
2082 Dup(Simd2D(dest), ARMFPRegister(src), 0);
2083 }
2084
2085 // Extract lane as scalar. Float extraction does not canonicalize the value.
2086
extractLaneInt8x16(uint32_t lane,FloatRegister src,Register dest_)2087 void MacroAssembler::extractLaneInt8x16(uint32_t lane, FloatRegister src,
2088 Register dest_) {
2089 MOZ_ASSERT(lane < 16);
2090 ARMRegister dest(dest_, 32);
2091 Umov(dest, Simd4S(src), lane / 4);
2092 Sbfx(dest, dest, (lane % 4) * 8, 8);
2093 }
2094
unsignedExtractLaneInt8x16(uint32_t lane,FloatRegister src,Register dest_)2095 void MacroAssembler::unsignedExtractLaneInt8x16(uint32_t lane,
2096 FloatRegister src,
2097 Register dest_) {
2098 MOZ_ASSERT(lane < 16);
2099 ARMRegister dest(dest_, 32);
2100 Umov(dest, Simd4S(src), lane / 4);
2101 Ubfx(dest, dest, (lane % 4) * 8, 8);
2102 }
2103
extractLaneInt16x8(uint32_t lane,FloatRegister src,Register dest_)2104 void MacroAssembler::extractLaneInt16x8(uint32_t lane, FloatRegister src,
2105 Register dest_) {
2106 MOZ_ASSERT(lane < 8);
2107 ARMRegister dest(dest_, 32);
2108 Umov(dest, Simd4S(src), lane / 2);
2109 Sbfx(dest, dest, (lane % 2) * 16, 16);
2110 }
2111
unsignedExtractLaneInt16x8(uint32_t lane,FloatRegister src,Register dest_)2112 void MacroAssembler::unsignedExtractLaneInt16x8(uint32_t lane,
2113 FloatRegister src,
2114 Register dest_) {
2115 MOZ_ASSERT(lane < 8);
2116 ARMRegister dest(dest_, 32);
2117 Umov(dest, Simd4S(src), lane / 2);
2118 Ubfx(dest, dest, (lane % 2) * 16, 16);
2119 }
2120
extractLaneInt32x4(uint32_t lane,FloatRegister src,Register dest_)2121 void MacroAssembler::extractLaneInt32x4(uint32_t lane, FloatRegister src,
2122 Register dest_) {
2123 MOZ_ASSERT(lane < 4);
2124 ARMRegister dest(dest_, 32);
2125 Umov(dest, Simd4S(src), lane);
2126 }
2127
extractLaneInt64x2(uint32_t lane,FloatRegister src,Register64 dest_)2128 void MacroAssembler::extractLaneInt64x2(uint32_t lane, FloatRegister src,
2129 Register64 dest_) {
2130 MOZ_ASSERT(lane < 2);
2131 ARMRegister dest(dest_.reg, 64);
2132 Umov(dest, Simd2D(src), lane);
2133 }
2134
extractLaneFloat32x4(uint32_t lane,FloatRegister src,FloatRegister dest)2135 void MacroAssembler::extractLaneFloat32x4(uint32_t lane, FloatRegister src,
2136 FloatRegister dest) {
2137 MOZ_ASSERT(lane < 4);
2138 Mov(ARMFPRegister(dest).V4S(), 0, Simd4S(src), lane);
2139 }
2140
extractLaneFloat64x2(uint32_t lane,FloatRegister src,FloatRegister dest)2141 void MacroAssembler::extractLaneFloat64x2(uint32_t lane, FloatRegister src,
2142 FloatRegister dest) {
2143 MOZ_ASSERT(lane < 2);
2144 Mov(ARMFPRegister(dest).V2D(), 0, Simd2D(src), lane);
2145 }
2146
2147 // Replace lane value
2148
replaceLaneInt8x16(unsigned lane,Register rhs,FloatRegister lhsDest)2149 void MacroAssembler::replaceLaneInt8x16(unsigned lane, Register rhs,
2150 FloatRegister lhsDest) {
2151 MOZ_ASSERT(lane < 16);
2152 Mov(Simd16B(lhsDest), lane, ARMRegister(rhs, 32));
2153 }
2154
replaceLaneInt16x8(unsigned lane,Register rhs,FloatRegister lhsDest)2155 void MacroAssembler::replaceLaneInt16x8(unsigned lane, Register rhs,
2156 FloatRegister lhsDest) {
2157 MOZ_ASSERT(lane < 8);
2158 Mov(Simd8H(lhsDest), lane, ARMRegister(rhs, 32));
2159 }
2160
replaceLaneInt32x4(unsigned lane,Register rhs,FloatRegister lhsDest)2161 void MacroAssembler::replaceLaneInt32x4(unsigned lane, Register rhs,
2162 FloatRegister lhsDest) {
2163 MOZ_ASSERT(lane < 4);
2164 Mov(Simd4S(lhsDest), lane, ARMRegister(rhs, 32));
2165 }
2166
replaceLaneInt64x2(unsigned lane,Register64 rhs,FloatRegister lhsDest)2167 void MacroAssembler::replaceLaneInt64x2(unsigned lane, Register64 rhs,
2168 FloatRegister lhsDest) {
2169 MOZ_ASSERT(lane < 2);
2170 Mov(Simd2D(lhsDest), lane, ARMRegister(rhs.reg, 64));
2171 }
2172
replaceLaneFloat32x4(unsigned lane,FloatRegister rhs,FloatRegister lhsDest)2173 void MacroAssembler::replaceLaneFloat32x4(unsigned lane, FloatRegister rhs,
2174 FloatRegister lhsDest) {
2175 MOZ_ASSERT(lane < 4);
2176 Mov(Simd4S(lhsDest), lane, ARMFPRegister(rhs).V4S(), 0);
2177 }
2178
replaceLaneFloat64x2(unsigned lane,FloatRegister rhs,FloatRegister lhsDest)2179 void MacroAssembler::replaceLaneFloat64x2(unsigned lane, FloatRegister rhs,
2180 FloatRegister lhsDest) {
2181 MOZ_ASSERT(lane < 2);
2182 Mov(Simd2D(lhsDest), lane, ARMFPRegister(rhs).V2D(), 0);
2183 }
2184
2185 // Shuffle - blend and permute with immediate indices, and its many
2186 // specializations. Lane values other than those mentioned are illegal.
2187
2188 // lane values 0..31
shuffleInt8x16(const uint8_t lanes[16],FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2189 void MacroAssembler::shuffleInt8x16(const uint8_t lanes[16], FloatRegister lhs,
2190 FloatRegister rhs, FloatRegister dest) {
2191 // The general solution generates ho-hum code. Realistic programs will use
2192 // patterns that can be specialized, and this will be much better. That will
2193 // be handled by bug 1656834, so don't worry about it here.
2194
2195 // Set scratch to the lanevalue when it selects from lhs or ~lanevalue when it
2196 // selects from rhs.
2197 ScratchSimd128Scope scratch(*this);
2198 int8_t idx[16];
2199
2200 if (lhs == rhs) {
2201 for (unsigned i = 0; i < 16; i++) {
2202 idx[i] = lanes[i] < 16 ? lanes[i] : (lanes[i] - 16);
2203 }
2204 loadConstantSimd128(SimdConstant::CreateX16(idx), scratch);
2205 Tbl(Simd16B(dest), Simd16B(lhs), Simd16B(scratch));
2206 return;
2207 }
2208
2209 if (rhs != dest) {
2210 for (unsigned i = 0; i < 16; i++) {
2211 idx[i] = lanes[i] < 16 ? lanes[i] : ~(lanes[i] - 16);
2212 }
2213 } else {
2214 MOZ_ASSERT(lhs != dest);
2215 for (unsigned i = 0; i < 16; i++) {
2216 idx[i] = lanes[i] < 16 ? ~lanes[i] : (lanes[i] - 16);
2217 }
2218 std::swap(lhs, rhs);
2219 }
2220 loadConstantSimd128(SimdConstant::CreateX16(idx), scratch);
2221 Tbl(Simd16B(dest), Simd16B(lhs), Simd16B(scratch));
2222 Not(Simd16B(scratch), Simd16B(scratch));
2223 Tbx(Simd16B(dest), Simd16B(rhs), Simd16B(scratch));
2224 }
2225
shuffleInt8x16(const uint8_t lanes[16],FloatRegister rhs,FloatRegister lhsDest)2226 void MacroAssembler::shuffleInt8x16(const uint8_t lanes[16], FloatRegister rhs,
2227 FloatRegister lhsDest) {
2228 shuffleInt8x16(lanes, lhsDest, rhs, lhsDest);
2229 }
2230
blendInt8x16(const uint8_t lanes[16],FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2231 void MacroAssembler::blendInt8x16(const uint8_t lanes[16], FloatRegister lhs,
2232 FloatRegister rhs, FloatRegister dest) {
2233 ScratchSimd128Scope scratch(*this);
2234 int8_t lanes_[16];
2235
2236 if (rhs == dest) {
2237 for (unsigned i = 0; i < 16; i++) {
2238 lanes_[i] = lanes[i] == 0 ? i : 16 + i;
2239 }
2240 loadConstantSimd128(SimdConstant::CreateX16(lanes_), scratch);
2241 Tbx(Simd16B(dest), Simd16B(lhs), Simd16B(scratch));
2242 return;
2243 }
2244
2245 moveSimd128(lhs, dest);
2246 for (unsigned i = 0; i < 16; i++) {
2247 lanes_[i] = lanes[i] != 0 ? i : 16 + i;
2248 }
2249 loadConstantSimd128(SimdConstant::CreateX16(lanes_), scratch);
2250 Tbx(Simd16B(dest), Simd16B(rhs), Simd16B(scratch));
2251 }
2252
blendInt16x8(const uint16_t lanes[8],FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2253 void MacroAssembler::blendInt16x8(const uint16_t lanes[8], FloatRegister lhs,
2254 FloatRegister rhs, FloatRegister dest) {
2255 static_assert(sizeof(const uint16_t /*lanes*/[8]) == sizeof(uint8_t[16]));
2256 blendInt8x16(reinterpret_cast<const uint8_t*>(lanes), lhs, rhs, dest);
2257 }
2258
interleaveHighInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2259 void MacroAssembler::interleaveHighInt16x8(FloatRegister lhs, FloatRegister rhs,
2260 FloatRegister dest) {
2261 Zip2(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
2262 }
2263
interleaveHighInt32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2264 void MacroAssembler::interleaveHighInt32x4(FloatRegister lhs, FloatRegister rhs,
2265 FloatRegister dest) {
2266 Zip2(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
2267 }
2268
interleaveHighInt64x2(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2269 void MacroAssembler::interleaveHighInt64x2(FloatRegister lhs, FloatRegister rhs,
2270 FloatRegister dest) {
2271 Zip2(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
2272 }
2273
interleaveHighInt8x16(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2274 void MacroAssembler::interleaveHighInt8x16(FloatRegister lhs, FloatRegister rhs,
2275 FloatRegister dest) {
2276 Zip2(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
2277 }
2278
interleaveLowInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2279 void MacroAssembler::interleaveLowInt16x8(FloatRegister lhs, FloatRegister rhs,
2280 FloatRegister dest) {
2281 Zip1(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
2282 }
2283
interleaveLowInt32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2284 void MacroAssembler::interleaveLowInt32x4(FloatRegister lhs, FloatRegister rhs,
2285 FloatRegister dest) {
2286 Zip1(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
2287 }
2288
interleaveLowInt64x2(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2289 void MacroAssembler::interleaveLowInt64x2(FloatRegister lhs, FloatRegister rhs,
2290 FloatRegister dest) {
2291 Zip1(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
2292 }
2293
interleaveLowInt8x16(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2294 void MacroAssembler::interleaveLowInt8x16(FloatRegister lhs, FloatRegister rhs,
2295 FloatRegister dest) {
2296 Zip1(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
2297 }
2298
permuteInt8x16(const uint8_t lanes[16],FloatRegister src,FloatRegister dest)2299 void MacroAssembler::permuteInt8x16(const uint8_t lanes[16], FloatRegister src,
2300 FloatRegister dest) {
2301 ScratchSimd128Scope scratch(*this);
2302 loadConstantSimd128(SimdConstant::CreateX16((const int8_t*)lanes), scratch);
2303 Tbl(Simd16B(dest), Simd16B(src), Simd16B(scratch));
2304 }
2305
permuteInt16x8(const uint16_t lanes[8],FloatRegister src,FloatRegister dest)2306 void MacroAssembler::permuteInt16x8(const uint16_t lanes[8], FloatRegister src,
2307 FloatRegister dest) {
2308 MOZ_ASSERT(lanes[0] < 8 && lanes[1] < 8 && lanes[2] < 8 && lanes[3] < 8 &&
2309 lanes[4] < 8 && lanes[5] < 8 && lanes[6] < 8 && lanes[7] < 8);
2310 const int8_t lanes_[16] = {
2311 (int8_t)(lanes[0] << 1), (int8_t)((lanes[0] << 1) + 1),
2312 (int8_t)(lanes[1] << 1), (int8_t)((lanes[1] << 1) + 1),
2313 (int8_t)(lanes[2] << 1), (int8_t)((lanes[2] << 1) + 1),
2314 (int8_t)(lanes[3] << 1), (int8_t)((lanes[3] << 1) + 1),
2315 (int8_t)(lanes[4] << 1), (int8_t)((lanes[4] << 1) + 1),
2316 (int8_t)(lanes[5] << 1), (int8_t)((lanes[5] << 1) + 1),
2317 (int8_t)(lanes[6] << 1), (int8_t)((lanes[6] << 1) + 1),
2318 (int8_t)(lanes[7] << 1), (int8_t)((lanes[7] << 1) + 1),
2319 };
2320 ScratchSimd128Scope scratch(*this);
2321 loadConstantSimd128(SimdConstant::CreateX16(lanes_), scratch);
2322 Tbl(Simd16B(dest), Simd16B(src), Simd16B(scratch));
2323 }
2324
permuteInt32x4(const uint32_t lanes[4],FloatRegister src,FloatRegister dest)2325 void MacroAssembler::permuteInt32x4(const uint32_t lanes[4], FloatRegister src,
2326 FloatRegister dest) {
2327 ScratchSimd128Scope scratch(*this);
2328 const int8_t lanes_[16] = {
2329 (int8_t)(lanes[0] << 2), (int8_t)((lanes[0] << 2) + 1),
2330 (int8_t)((lanes[0] << 2) + 2), (int8_t)((lanes[0] << 2) + 3),
2331 (int8_t)(lanes[1] << 2), (int8_t)((lanes[1] << 2) + 1),
2332 (int8_t)((lanes[1] << 2) + 2), (int8_t)((lanes[1] << 2) + 3),
2333 (int8_t)(lanes[2] << 2), (int8_t)((lanes[2] << 2) + 1),
2334 (int8_t)((lanes[2] << 2) + 2), (int8_t)((lanes[2] << 2) + 3),
2335 (int8_t)(lanes[3] << 2), (int8_t)((lanes[3] << 2) + 1),
2336 (int8_t)((lanes[3] << 2) + 2), (int8_t)((lanes[3] << 2) + 3),
2337 };
2338 loadConstantSimd128(SimdConstant::CreateX16(lanes_), scratch);
2339 Tbl(Simd16B(dest), Simd16B(src), Simd16B(scratch));
2340 }
2341
rotateRightSimd128(FloatRegister src,FloatRegister dest,uint32_t shift)2342 void MacroAssembler::rotateRightSimd128(FloatRegister src, FloatRegister dest,
2343 uint32_t shift) {
2344 Ext(Simd16B(dest), Simd16B(src), Simd16B(src), shift);
2345 }
2346
leftShiftSimd128(Imm32 count,FloatRegister src,FloatRegister dest)2347 void MacroAssembler::leftShiftSimd128(Imm32 count, FloatRegister src,
2348 FloatRegister dest) {
2349 MOZ_ASSERT(count.value < 16);
2350 ScratchSimd128Scope scratch(*this);
2351 Movi(Simd16B(scratch), 0);
2352 Ext(Simd16B(dest), Simd16B(scratch), Simd16B(src), 16 - count.value);
2353 }
2354
rightShiftSimd128(Imm32 count,FloatRegister src,FloatRegister dest)2355 void MacroAssembler::rightShiftSimd128(Imm32 count, FloatRegister src,
2356 FloatRegister dest) {
2357 MOZ_ASSERT(count.value < 16);
2358 ScratchSimd128Scope scratch(*this);
2359 Movi(Simd16B(scratch), 0);
2360 Ext(Simd16B(dest), Simd16B(src), Simd16B(scratch), count.value);
2361 }
2362
concatAndRightShiftSimd128(FloatRegister lhs,FloatRegister rhs,FloatRegister dest,uint32_t shift)2363 void MacroAssembler::concatAndRightShiftSimd128(FloatRegister lhs,
2364 FloatRegister rhs,
2365 FloatRegister dest,
2366 uint32_t shift) {
2367 MOZ_ASSERT(shift < 16);
2368 Ext(Simd16B(dest), Simd16B(rhs), Simd16B(lhs), shift);
2369 }
2370
2371 // Swizzle - permute with variable indices. `rhs` holds the lanes parameter.
2372
swizzleInt8x16(FloatRegister rhs,FloatRegister lhsDest)2373 void MacroAssembler::swizzleInt8x16(FloatRegister rhs, FloatRegister lhsDest) {
2374 Tbl(Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
2375 }
2376
swizzleInt8x16(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2377 void MacroAssembler::swizzleInt8x16(FloatRegister lhs, FloatRegister rhs,
2378 FloatRegister dest) {
2379 Tbl(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
2380 }
2381
2382 // Integer Add
2383
addInt8x16(FloatRegister rhs,FloatRegister lhsDest)2384 void MacroAssembler::addInt8x16(FloatRegister rhs, FloatRegister lhsDest) {
2385 Add(Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
2386 }
2387
addInt8x16(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2388 void MacroAssembler::addInt8x16(FloatRegister lhs, FloatRegister rhs,
2389 FloatRegister dest) {
2390 Add(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
2391 }
2392
addInt16x8(FloatRegister rhs,FloatRegister lhsDest)2393 void MacroAssembler::addInt16x8(FloatRegister rhs, FloatRegister lhsDest) {
2394 Add(Simd8H(lhsDest), Simd8H(lhsDest), Simd8H(rhs));
2395 }
2396
addInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2397 void MacroAssembler::addInt16x8(FloatRegister lhs, FloatRegister rhs,
2398 FloatRegister dest) {
2399 Add(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
2400 }
2401
addInt32x4(FloatRegister rhs,FloatRegister lhsDest)2402 void MacroAssembler::addInt32x4(FloatRegister rhs, FloatRegister lhsDest) {
2403 Add(Simd4S(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
2404 }
2405
addInt32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2406 void MacroAssembler::addInt32x4(FloatRegister lhs, FloatRegister rhs,
2407 FloatRegister dest) {
2408 Add(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
2409 }
2410
addInt64x2(FloatRegister rhs,FloatRegister lhsDest)2411 void MacroAssembler::addInt64x2(FloatRegister rhs, FloatRegister lhsDest) {
2412 Add(Simd2D(lhsDest), Simd2D(lhsDest), Simd2D(rhs));
2413 }
2414
addInt64x2(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2415 void MacroAssembler::addInt64x2(FloatRegister lhs, FloatRegister rhs,
2416 FloatRegister dest) {
2417 Add(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
2418 }
2419
2420 // Integer Subtract
2421
subInt8x16(FloatRegister rhs,FloatRegister lhsDest)2422 void MacroAssembler::subInt8x16(FloatRegister rhs, FloatRegister lhsDest) {
2423 Sub(Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
2424 }
2425
subInt8x16(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2426 void MacroAssembler::subInt8x16(FloatRegister lhs, FloatRegister rhs,
2427 FloatRegister dest) {
2428 Sub(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
2429 }
2430
subInt16x8(FloatRegister rhs,FloatRegister lhsDest)2431 void MacroAssembler::subInt16x8(FloatRegister rhs, FloatRegister lhsDest) {
2432 Sub(Simd8H(lhsDest), Simd8H(lhsDest), Simd8H(rhs));
2433 }
2434
subInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2435 void MacroAssembler::subInt16x8(FloatRegister lhs, FloatRegister rhs,
2436 FloatRegister dest) {
2437 Sub(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
2438 }
2439
subInt32x4(FloatRegister rhs,FloatRegister lhsDest)2440 void MacroAssembler::subInt32x4(FloatRegister rhs, FloatRegister lhsDest) {
2441 Sub(Simd4S(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
2442 }
2443
subInt32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2444 void MacroAssembler::subInt32x4(FloatRegister lhs, FloatRegister rhs,
2445 FloatRegister dest) {
2446 Sub(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
2447 }
2448
subInt64x2(FloatRegister rhs,FloatRegister lhsDest)2449 void MacroAssembler::subInt64x2(FloatRegister rhs, FloatRegister lhsDest) {
2450 Sub(Simd2D(lhsDest), Simd2D(lhsDest), Simd2D(rhs));
2451 }
2452
subInt64x2(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2453 void MacroAssembler::subInt64x2(FloatRegister lhs, FloatRegister rhs,
2454 FloatRegister dest) {
2455 Sub(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
2456 }
2457
2458 // Integer Multiply
2459
mulInt16x8(FloatRegister rhs,FloatRegister lhsDest)2460 void MacroAssembler::mulInt16x8(FloatRegister rhs, FloatRegister lhsDest) {
2461 Mul(Simd8H(lhsDest), Simd8H(lhsDest), Simd8H(rhs));
2462 }
2463
mulInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2464 void MacroAssembler::mulInt16x8(FloatRegister lhs, FloatRegister rhs,
2465 FloatRegister dest) {
2466 Mul(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
2467 }
2468
mulInt32x4(FloatRegister rhs,FloatRegister lhsDest)2469 void MacroAssembler::mulInt32x4(FloatRegister rhs, FloatRegister lhsDest) {
2470 Mul(Simd4S(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
2471 }
2472
mulInt32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2473 void MacroAssembler::mulInt32x4(FloatRegister lhs, FloatRegister rhs,
2474 FloatRegister dest) {
2475 Mul(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
2476 }
2477
mulInt64x2(FloatRegister lhs,FloatRegister rhs,FloatRegister dest,FloatRegister temp1,FloatRegister temp2)2478 void MacroAssembler::mulInt64x2(FloatRegister lhs, FloatRegister rhs,
2479 FloatRegister dest, FloatRegister temp1,
2480 FloatRegister temp2) {
2481 // As documented at https://chromium-review.googlesource.com/c/v8/v8/+/1781696
2482 // lhs = <D C> <B A>
2483 // rhs = <H G> <F E>
2484 // result = <(DG+CH)_low+CG_high CG_low> <(BE+AF)_low+AE_high AE_low>
2485 ScratchSimd128Scope scratch(*this);
2486 Rev64(Simd4S(temp2), Simd4S(lhs)); // temp2 = <C D> <A B>
2487 Mul(Simd4S(temp2), Simd4S(temp2), Simd4S(rhs)); // temp2 = <CH DG> <AF BE>
2488 Xtn(Simd2S(temp1), Simd2D(rhs)); // temp1 = <0 0> <G E>
2489 Addp(Simd4S(temp2), Simd4S(temp2), Simd4S(temp2)); // temp2 = <CH+DG AF+BE>..
2490 Xtn(Simd2S(scratch), Simd2D(lhs)); // scratch = <0 0> <C A>
2491 Shll(Simd2D(dest), Simd2S(temp2), 32); // dest = <(DG+CH)_low 0>
2492 // <(BE+AF)_low 0>
2493 Umlal(Simd2D(dest), Simd2S(scratch), Simd2S(temp1));
2494 }
2495
extMulLowInt8x16(FloatRegister rhs,FloatRegister lhsDest)2496 void MacroAssembler::extMulLowInt8x16(FloatRegister rhs,
2497 FloatRegister lhsDest) {
2498 Smull(Simd8H(lhsDest), Simd8B(lhsDest), Simd8B(rhs));
2499 }
2500
extMulLowInt8x16(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2501 void MacroAssembler::extMulLowInt8x16(FloatRegister lhs, FloatRegister rhs,
2502 FloatRegister dest) {
2503 Smull(Simd8H(dest), Simd8B(lhs), Simd8B(rhs));
2504 }
2505
extMulHighInt8x16(FloatRegister rhs,FloatRegister lhsDest)2506 void MacroAssembler::extMulHighInt8x16(FloatRegister rhs,
2507 FloatRegister lhsDest) {
2508 Smull2(Simd8H(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
2509 }
2510
extMulHighInt8x16(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2511 void MacroAssembler::extMulHighInt8x16(FloatRegister lhs, FloatRegister rhs,
2512 FloatRegister dest) {
2513 Smull2(Simd8H(dest), Simd16B(lhs), Simd16B(rhs));
2514 }
2515
unsignedExtMulLowInt8x16(FloatRegister rhs,FloatRegister lhsDest)2516 void MacroAssembler::unsignedExtMulLowInt8x16(FloatRegister rhs,
2517 FloatRegister lhsDest) {
2518 Umull(Simd8H(lhsDest), Simd8B(lhsDest), Simd8B(rhs));
2519 }
2520
unsignedExtMulLowInt8x16(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2521 void MacroAssembler::unsignedExtMulLowInt8x16(FloatRegister lhs,
2522 FloatRegister rhs,
2523 FloatRegister dest) {
2524 Umull(Simd8H(dest), Simd8B(lhs), Simd8B(rhs));
2525 }
2526
unsignedExtMulHighInt8x16(FloatRegister rhs,FloatRegister lhsDest)2527 void MacroAssembler::unsignedExtMulHighInt8x16(FloatRegister rhs,
2528 FloatRegister lhsDest) {
2529 Umull2(Simd8H(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
2530 }
2531
unsignedExtMulHighInt8x16(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2532 void MacroAssembler::unsignedExtMulHighInt8x16(FloatRegister lhs,
2533 FloatRegister rhs,
2534 FloatRegister dest) {
2535 Umull2(Simd8H(dest), Simd16B(lhs), Simd16B(rhs));
2536 }
2537
extMulLowInt16x8(FloatRegister rhs,FloatRegister lhsDest)2538 void MacroAssembler::extMulLowInt16x8(FloatRegister rhs,
2539 FloatRegister lhsDest) {
2540 Smull(Simd4S(lhsDest), Simd4H(lhsDest), Simd4H(rhs));
2541 }
2542
extMulLowInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2543 void MacroAssembler::extMulLowInt16x8(FloatRegister lhs, FloatRegister rhs,
2544 FloatRegister dest) {
2545 Smull(Simd4S(dest), Simd4H(lhs), Simd4H(rhs));
2546 }
2547
extMulHighInt16x8(FloatRegister rhs,FloatRegister lhsDest)2548 void MacroAssembler::extMulHighInt16x8(FloatRegister rhs,
2549 FloatRegister lhsDest) {
2550 Smull2(Simd4S(lhsDest), Simd8H(lhsDest), Simd8H(rhs));
2551 }
2552
extMulHighInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2553 void MacroAssembler::extMulHighInt16x8(FloatRegister lhs, FloatRegister rhs,
2554 FloatRegister dest) {
2555 Smull2(Simd4S(dest), Simd8H(lhs), Simd8H(rhs));
2556 }
2557
unsignedExtMulLowInt16x8(FloatRegister rhs,FloatRegister lhsDest)2558 void MacroAssembler::unsignedExtMulLowInt16x8(FloatRegister rhs,
2559 FloatRegister lhsDest) {
2560 Umull(Simd4S(lhsDest), Simd4H(lhsDest), Simd4H(rhs));
2561 }
2562
unsignedExtMulLowInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2563 void MacroAssembler::unsignedExtMulLowInt16x8(FloatRegister lhs,
2564 FloatRegister rhs,
2565 FloatRegister dest) {
2566 Umull(Simd4S(dest), Simd4H(lhs), Simd4H(rhs));
2567 }
2568
unsignedExtMulHighInt16x8(FloatRegister rhs,FloatRegister lhsDest)2569 void MacroAssembler::unsignedExtMulHighInt16x8(FloatRegister rhs,
2570 FloatRegister lhsDest) {
2571 Umull2(Simd4S(lhsDest), Simd8H(lhsDest), Simd8H(rhs));
2572 }
2573
unsignedExtMulHighInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2574 void MacroAssembler::unsignedExtMulHighInt16x8(FloatRegister lhs,
2575 FloatRegister rhs,
2576 FloatRegister dest) {
2577 Umull2(Simd4S(dest), Simd8H(lhs), Simd8H(rhs));
2578 }
2579
extMulLowInt32x4(FloatRegister rhs,FloatRegister lhsDest)2580 void MacroAssembler::extMulLowInt32x4(FloatRegister rhs,
2581 FloatRegister lhsDest) {
2582 Smull(Simd2D(lhsDest), Simd2S(lhsDest), Simd2S(rhs));
2583 }
2584
extMulLowInt32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2585 void MacroAssembler::extMulLowInt32x4(FloatRegister lhs, FloatRegister rhs,
2586 FloatRegister dest) {
2587 Smull(Simd2D(dest), Simd2S(lhs), Simd2S(rhs));
2588 }
2589
extMulHighInt32x4(FloatRegister rhs,FloatRegister lhsDest)2590 void MacroAssembler::extMulHighInt32x4(FloatRegister rhs,
2591 FloatRegister lhsDest) {
2592 Smull2(Simd2D(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
2593 }
2594
extMulHighInt32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2595 void MacroAssembler::extMulHighInt32x4(FloatRegister lhs, FloatRegister rhs,
2596 FloatRegister dest) {
2597 Smull2(Simd2D(dest), Simd4S(lhs), Simd4S(rhs));
2598 }
2599
unsignedExtMulLowInt32x4(FloatRegister rhs,FloatRegister lhsDest)2600 void MacroAssembler::unsignedExtMulLowInt32x4(FloatRegister rhs,
2601 FloatRegister lhsDest) {
2602 Umull(Simd2D(lhsDest), Simd2S(lhsDest), Simd2S(rhs));
2603 }
2604
unsignedExtMulLowInt32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2605 void MacroAssembler::unsignedExtMulLowInt32x4(FloatRegister lhs,
2606 FloatRegister rhs,
2607 FloatRegister dest) {
2608 Umull(Simd2D(dest), Simd2S(lhs), Simd2S(rhs));
2609 }
2610
unsignedExtMulHighInt32x4(FloatRegister rhs,FloatRegister lhsDest)2611 void MacroAssembler::unsignedExtMulHighInt32x4(FloatRegister rhs,
2612 FloatRegister lhsDest) {
2613 Umull2(Simd2D(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
2614 }
2615
unsignedExtMulHighInt32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2616 void MacroAssembler::unsignedExtMulHighInt32x4(FloatRegister lhs,
2617 FloatRegister rhs,
2618 FloatRegister dest) {
2619 Umull2(Simd2D(dest), Simd4S(lhs), Simd4S(rhs));
2620 }
2621
q15MulrSatInt16x8(FloatRegister rhs,FloatRegister lhsDest)2622 void MacroAssembler::q15MulrSatInt16x8(FloatRegister rhs,
2623 FloatRegister lhsDest) {
2624 Sqrdmulh(Simd8H(lhsDest), Simd8H(lhsDest), Simd8H(rhs));
2625 }
2626
q15MulrSatInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2627 void MacroAssembler::q15MulrSatInt16x8(FloatRegister lhs, FloatRegister rhs,
2628 FloatRegister dest) {
2629 Sqrdmulh(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
2630 }
2631
2632 // Integer Negate
2633
negInt8x16(FloatRegister src,FloatRegister dest)2634 void MacroAssembler::negInt8x16(FloatRegister src, FloatRegister dest) {
2635 Neg(Simd16B(dest), Simd16B(src));
2636 }
2637
negInt16x8(FloatRegister src,FloatRegister dest)2638 void MacroAssembler::negInt16x8(FloatRegister src, FloatRegister dest) {
2639 Neg(Simd8H(dest), Simd8H(src));
2640 }
2641
negInt32x4(FloatRegister src,FloatRegister dest)2642 void MacroAssembler::negInt32x4(FloatRegister src, FloatRegister dest) {
2643 Neg(Simd4S(dest), Simd4S(src));
2644 }
2645
negInt64x2(FloatRegister src,FloatRegister dest)2646 void MacroAssembler::negInt64x2(FloatRegister src, FloatRegister dest) {
2647 Neg(Simd2D(dest), Simd2D(src));
2648 }
2649
2650 // Saturating integer add
2651
addSatInt8x16(FloatRegister rhs,FloatRegister lhsDest)2652 void MacroAssembler::addSatInt8x16(FloatRegister rhs, FloatRegister lhsDest) {
2653 Sqadd(Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
2654 }
2655
addSatInt8x16(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2656 void MacroAssembler::addSatInt8x16(FloatRegister lhs, FloatRegister rhs,
2657 FloatRegister dest) {
2658 Sqadd(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
2659 }
2660
unsignedAddSatInt8x16(FloatRegister rhs,FloatRegister lhsDest)2661 void MacroAssembler::unsignedAddSatInt8x16(FloatRegister rhs,
2662 FloatRegister lhsDest) {
2663 Uqadd(Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
2664 }
2665
unsignedAddSatInt8x16(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2666 void MacroAssembler::unsignedAddSatInt8x16(FloatRegister lhs, FloatRegister rhs,
2667 FloatRegister dest) {
2668 Uqadd(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
2669 }
2670
addSatInt16x8(FloatRegister rhs,FloatRegister lhsDest)2671 void MacroAssembler::addSatInt16x8(FloatRegister rhs, FloatRegister lhsDest) {
2672 Sqadd(Simd8H(lhsDest), Simd8H(lhsDest), Simd8H(rhs));
2673 }
2674
addSatInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2675 void MacroAssembler::addSatInt16x8(FloatRegister lhs, FloatRegister rhs,
2676 FloatRegister dest) {
2677 Sqadd(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
2678 }
2679
unsignedAddSatInt16x8(FloatRegister rhs,FloatRegister lhsDest)2680 void MacroAssembler::unsignedAddSatInt16x8(FloatRegister rhs,
2681 FloatRegister lhsDest) {
2682 Uqadd(Simd8H(lhsDest), Simd8H(lhsDest), Simd8H(rhs));
2683 }
2684
unsignedAddSatInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2685 void MacroAssembler::unsignedAddSatInt16x8(FloatRegister lhs, FloatRegister rhs,
2686 FloatRegister dest) {
2687 Uqadd(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
2688 }
2689
2690 // Saturating integer subtract
2691
subSatInt8x16(FloatRegister rhs,FloatRegister lhsDest)2692 void MacroAssembler::subSatInt8x16(FloatRegister rhs, FloatRegister lhsDest) {
2693 Sqsub(Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
2694 }
2695
subSatInt8x16(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2696 void MacroAssembler::subSatInt8x16(FloatRegister lhs, FloatRegister rhs,
2697 FloatRegister dest) {
2698 Sqsub(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
2699 }
2700
unsignedSubSatInt8x16(FloatRegister rhs,FloatRegister lhsDest)2701 void MacroAssembler::unsignedSubSatInt8x16(FloatRegister rhs,
2702 FloatRegister lhsDest) {
2703 Uqsub(Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
2704 }
2705
unsignedSubSatInt8x16(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2706 void MacroAssembler::unsignedSubSatInt8x16(FloatRegister lhs, FloatRegister rhs,
2707 FloatRegister dest) {
2708 Uqsub(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
2709 }
2710
subSatInt16x8(FloatRegister rhs,FloatRegister lhsDest)2711 void MacroAssembler::subSatInt16x8(FloatRegister rhs, FloatRegister lhsDest) {
2712 Sqsub(Simd8H(lhsDest), Simd8H(lhsDest), Simd8H(rhs));
2713 }
2714
subSatInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2715 void MacroAssembler::subSatInt16x8(FloatRegister lhs, FloatRegister rhs,
2716 FloatRegister dest) {
2717 Sqsub(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
2718 }
2719
unsignedSubSatInt16x8(FloatRegister rhs,FloatRegister lhsDest)2720 void MacroAssembler::unsignedSubSatInt16x8(FloatRegister rhs,
2721 FloatRegister lhsDest) {
2722 Uqsub(Simd8H(lhsDest), Simd8H(lhsDest), Simd8H(rhs));
2723 }
2724
unsignedSubSatInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2725 void MacroAssembler::unsignedSubSatInt16x8(FloatRegister lhs, FloatRegister rhs,
2726 FloatRegister dest) {
2727 Uqsub(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
2728 }
2729
2730 // Lane-wise integer minimum
2731
minInt8x16(FloatRegister rhs,FloatRegister lhsDest)2732 void MacroAssembler::minInt8x16(FloatRegister rhs, FloatRegister lhsDest) {
2733 Smin(Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
2734 }
2735
minInt8x16(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2736 void MacroAssembler::minInt8x16(FloatRegister lhs, FloatRegister rhs,
2737 FloatRegister dest) {
2738 Smin(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
2739 }
2740
unsignedMinInt8x16(FloatRegister rhs,FloatRegister lhsDest)2741 void MacroAssembler::unsignedMinInt8x16(FloatRegister rhs,
2742 FloatRegister lhsDest) {
2743 Umin(Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
2744 }
2745
unsignedMinInt8x16(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2746 void MacroAssembler::unsignedMinInt8x16(FloatRegister lhs, FloatRegister rhs,
2747 FloatRegister dest) {
2748 Umin(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
2749 }
2750
minInt16x8(FloatRegister rhs,FloatRegister lhsDest)2751 void MacroAssembler::minInt16x8(FloatRegister rhs, FloatRegister lhsDest) {
2752 Smin(Simd8H(lhsDest), Simd8H(lhsDest), Simd8H(rhs));
2753 }
2754
minInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2755 void MacroAssembler::minInt16x8(FloatRegister lhs, FloatRegister rhs,
2756 FloatRegister dest) {
2757 Smin(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
2758 }
2759
unsignedMinInt16x8(FloatRegister rhs,FloatRegister lhsDest)2760 void MacroAssembler::unsignedMinInt16x8(FloatRegister rhs,
2761 FloatRegister lhsDest) {
2762 Umin(Simd8H(lhsDest), Simd8H(lhsDest), Simd8H(rhs));
2763 }
2764
unsignedMinInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2765 void MacroAssembler::unsignedMinInt16x8(FloatRegister lhs, FloatRegister rhs,
2766 FloatRegister dest) {
2767 Umin(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
2768 }
2769
minInt32x4(FloatRegister rhs,FloatRegister lhsDest)2770 void MacroAssembler::minInt32x4(FloatRegister rhs, FloatRegister lhsDest) {
2771 Smin(Simd4S(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
2772 }
2773
minInt32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2774 void MacroAssembler::minInt32x4(FloatRegister lhs, FloatRegister rhs,
2775 FloatRegister dest) {
2776 Smin(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
2777 }
2778
unsignedMinInt32x4(FloatRegister rhs,FloatRegister lhsDest)2779 void MacroAssembler::unsignedMinInt32x4(FloatRegister rhs,
2780 FloatRegister lhsDest) {
2781 Umin(Simd4S(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
2782 }
2783
unsignedMinInt32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2784 void MacroAssembler::unsignedMinInt32x4(FloatRegister lhs, FloatRegister rhs,
2785 FloatRegister dest) {
2786 Umin(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
2787 }
2788
2789 // Lane-wise integer maximum
2790
maxInt8x16(FloatRegister rhs,FloatRegister lhsDest)2791 void MacroAssembler::maxInt8x16(FloatRegister rhs, FloatRegister lhsDest) {
2792 Smax(Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
2793 }
2794
maxInt8x16(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2795 void MacroAssembler::maxInt8x16(FloatRegister lhs, FloatRegister rhs,
2796 FloatRegister dest) {
2797 Smax(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
2798 }
2799
unsignedMaxInt8x16(FloatRegister rhs,FloatRegister lhsDest)2800 void MacroAssembler::unsignedMaxInt8x16(FloatRegister rhs,
2801 FloatRegister lhsDest) {
2802 Umax(Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
2803 }
2804
unsignedMaxInt8x16(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2805 void MacroAssembler::unsignedMaxInt8x16(FloatRegister lhs, FloatRegister rhs,
2806 FloatRegister dest) {
2807 Umax(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
2808 }
2809
maxInt16x8(FloatRegister rhs,FloatRegister lhsDest)2810 void MacroAssembler::maxInt16x8(FloatRegister rhs, FloatRegister lhsDest) {
2811 Smax(Simd8H(lhsDest), Simd8H(lhsDest), Simd8H(rhs));
2812 }
2813
maxInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2814 void MacroAssembler::maxInt16x8(FloatRegister lhs, FloatRegister rhs,
2815 FloatRegister dest) {
2816 Smax(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
2817 }
2818
unsignedMaxInt16x8(FloatRegister rhs,FloatRegister lhsDest)2819 void MacroAssembler::unsignedMaxInt16x8(FloatRegister rhs,
2820 FloatRegister lhsDest) {
2821 Umax(Simd8H(lhsDest), Simd8H(lhsDest), Simd8H(rhs));
2822 }
2823
unsignedMaxInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2824 void MacroAssembler::unsignedMaxInt16x8(FloatRegister lhs, FloatRegister rhs,
2825 FloatRegister dest) {
2826 Umax(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
2827 }
2828
maxInt32x4(FloatRegister rhs,FloatRegister lhsDest)2829 void MacroAssembler::maxInt32x4(FloatRegister rhs, FloatRegister lhsDest) {
2830 Smax(Simd4S(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
2831 }
2832
maxInt32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2833 void MacroAssembler::maxInt32x4(FloatRegister lhs, FloatRegister rhs,
2834 FloatRegister dest) {
2835 Smax(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
2836 }
2837
unsignedMaxInt32x4(FloatRegister rhs,FloatRegister lhsDest)2838 void MacroAssembler::unsignedMaxInt32x4(FloatRegister rhs,
2839 FloatRegister lhsDest) {
2840 Umax(Simd4S(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
2841 }
2842
unsignedMaxInt32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2843 void MacroAssembler::unsignedMaxInt32x4(FloatRegister lhs, FloatRegister rhs,
2844 FloatRegister dest) {
2845 Umax(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
2846 }
2847
2848 // Lane-wise integer rounding average
2849
unsignedAverageInt8x16(FloatRegister rhs,FloatRegister lhsDest)2850 void MacroAssembler::unsignedAverageInt8x16(FloatRegister rhs,
2851 FloatRegister lhsDest) {
2852 Urhadd(Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
2853 }
2854
unsignedAverageInt8x16(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2855 void MacroAssembler::unsignedAverageInt8x16(FloatRegister lhs,
2856 FloatRegister rhs,
2857 FloatRegister dest) {
2858 Urhadd(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
2859 }
2860
unsignedAverageInt16x8(FloatRegister rhs,FloatRegister lhsDest)2861 void MacroAssembler::unsignedAverageInt16x8(FloatRegister rhs,
2862 FloatRegister lhsDest) {
2863 Urhadd(Simd8H(lhsDest), Simd8H(lhsDest), Simd8H(rhs));
2864 }
2865
unsignedAverageInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)2866 void MacroAssembler::unsignedAverageInt16x8(FloatRegister lhs,
2867 FloatRegister rhs,
2868 FloatRegister dest) {
2869 Urhadd(Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
2870 }
2871
2872 // Lane-wise integer absolute value
2873
absInt8x16(FloatRegister src,FloatRegister dest)2874 void MacroAssembler::absInt8x16(FloatRegister src, FloatRegister dest) {
2875 Abs(Simd16B(dest), Simd16B(src));
2876 }
2877
absInt16x8(FloatRegister src,FloatRegister dest)2878 void MacroAssembler::absInt16x8(FloatRegister src, FloatRegister dest) {
2879 Abs(Simd8H(dest), Simd8H(src));
2880 }
2881
absInt32x4(FloatRegister src,FloatRegister dest)2882 void MacroAssembler::absInt32x4(FloatRegister src, FloatRegister dest) {
2883 Abs(Simd4S(dest), Simd4S(src));
2884 }
2885
absInt64x2(FloatRegister src,FloatRegister dest)2886 void MacroAssembler::absInt64x2(FloatRegister src, FloatRegister dest) {
2887 Abs(Simd2D(dest), Simd2D(src));
2888 }
2889
2890 // Left shift by variable scalar
2891
leftShiftInt8x16(FloatRegister lhs,Register rhs,FloatRegister dest)2892 void MacroAssembler::leftShiftInt8x16(FloatRegister lhs, Register rhs,
2893 FloatRegister dest) {
2894 vixl::UseScratchRegisterScope temps(this);
2895 ARMRegister scratch = temps.AcquireW();
2896 And(scratch, ARMRegister(rhs, 32), 7);
2897 ScratchSimd128Scope vscratch(*this);
2898 Dup(Simd16B(vscratch), scratch);
2899 Sshl(Simd16B(dest), Simd16B(lhs), Simd16B(vscratch));
2900 }
2901
leftShiftInt8x16(Imm32 count,FloatRegister src,FloatRegister dest)2902 void MacroAssembler::leftShiftInt8x16(Imm32 count, FloatRegister src,
2903 FloatRegister dest) {
2904 Shl(Simd16B(dest), Simd16B(src), count.value);
2905 }
2906
leftShiftInt16x8(FloatRegister lhs,Register rhs,FloatRegister dest)2907 void MacroAssembler::leftShiftInt16x8(FloatRegister lhs, Register rhs,
2908 FloatRegister dest) {
2909 vixl::UseScratchRegisterScope temps(this);
2910 ARMRegister scratch = temps.AcquireW();
2911 And(scratch, ARMRegister(rhs, 32), 15);
2912 ScratchSimd128Scope vscratch(*this);
2913 Dup(Simd8H(vscratch), scratch);
2914 Sshl(Simd8H(dest), Simd8H(lhs), Simd8H(vscratch));
2915 }
2916
leftShiftInt16x8(Imm32 count,FloatRegister src,FloatRegister dest)2917 void MacroAssembler::leftShiftInt16x8(Imm32 count, FloatRegister src,
2918 FloatRegister dest) {
2919 Shl(Simd8H(dest), Simd8H(src), count.value);
2920 }
2921
leftShiftInt32x4(FloatRegister lhs,Register rhs,FloatRegister dest)2922 void MacroAssembler::leftShiftInt32x4(FloatRegister lhs, Register rhs,
2923 FloatRegister dest) {
2924 vixl::UseScratchRegisterScope temps(this);
2925 ARMRegister scratch = temps.AcquireW();
2926 And(scratch, ARMRegister(rhs, 32), 31);
2927 ScratchSimd128Scope vscratch(*this);
2928 Dup(Simd4S(vscratch), scratch);
2929 Sshl(Simd4S(dest), Simd4S(lhs), Simd4S(vscratch));
2930 }
2931
leftShiftInt32x4(Imm32 count,FloatRegister src,FloatRegister dest)2932 void MacroAssembler::leftShiftInt32x4(Imm32 count, FloatRegister src,
2933 FloatRegister dest) {
2934 Shl(Simd4S(dest), Simd4S(src), count.value);
2935 }
2936
leftShiftInt64x2(FloatRegister lhs,Register rhs,FloatRegister dest)2937 void MacroAssembler::leftShiftInt64x2(FloatRegister lhs, Register rhs,
2938 FloatRegister dest) {
2939 vixl::UseScratchRegisterScope temps(this);
2940 ARMRegister scratch = temps.AcquireX();
2941 And(scratch, ARMRegister(rhs, 64), 63);
2942 ScratchSimd128Scope vscratch(*this);
2943 Dup(Simd2D(vscratch), scratch);
2944 Sshl(Simd2D(dest), Simd2D(lhs), Simd2D(vscratch));
2945 }
2946
leftShiftInt64x2(Imm32 count,FloatRegister src,FloatRegister dest)2947 void MacroAssembler::leftShiftInt64x2(Imm32 count, FloatRegister src,
2948 FloatRegister dest) {
2949 Shl(Simd2D(dest), Simd2D(src), count.value);
2950 }
2951
2952 // Right shift by variable scalar
2953
rightShiftInt8x16(FloatRegister lhs,Register rhs,FloatRegister dest)2954 void MacroAssembler::rightShiftInt8x16(FloatRegister lhs, Register rhs,
2955 FloatRegister dest) {
2956 MacroAssemblerCompat::rightShiftInt8x16(lhs, rhs, dest,
2957 /* isUnsigned */ false);
2958 }
2959
rightShiftInt8x16(Imm32 count,FloatRegister src,FloatRegister dest)2960 void MacroAssembler::rightShiftInt8x16(Imm32 count, FloatRegister src,
2961 FloatRegister dest) {
2962 Sshr(Simd16B(dest), Simd16B(src), count.value);
2963 }
2964
unsignedRightShiftInt8x16(FloatRegister lhs,Register rhs,FloatRegister dest)2965 void MacroAssembler::unsignedRightShiftInt8x16(FloatRegister lhs, Register rhs,
2966 FloatRegister dest) {
2967 MacroAssemblerCompat::rightShiftInt8x16(lhs, rhs, dest,
2968 /* isUnsigned */ true);
2969 }
2970
unsignedRightShiftInt8x16(Imm32 count,FloatRegister src,FloatRegister dest)2971 void MacroAssembler::unsignedRightShiftInt8x16(Imm32 count, FloatRegister src,
2972 FloatRegister dest) {
2973 Ushr(Simd16B(dest), Simd16B(src), count.value);
2974 }
2975
rightShiftInt16x8(FloatRegister lhs,Register rhs,FloatRegister dest)2976 void MacroAssembler::rightShiftInt16x8(FloatRegister lhs, Register rhs,
2977 FloatRegister dest) {
2978 MacroAssemblerCompat::rightShiftInt16x8(lhs, rhs, dest,
2979 /* isUnsigned */ false);
2980 }
2981
rightShiftInt16x8(Imm32 count,FloatRegister src,FloatRegister dest)2982 void MacroAssembler::rightShiftInt16x8(Imm32 count, FloatRegister src,
2983 FloatRegister dest) {
2984 Sshr(Simd8H(dest), Simd8H(src), count.value);
2985 }
2986
unsignedRightShiftInt16x8(FloatRegister lhs,Register rhs,FloatRegister dest)2987 void MacroAssembler::unsignedRightShiftInt16x8(FloatRegister lhs, Register rhs,
2988 FloatRegister dest) {
2989 MacroAssemblerCompat::rightShiftInt16x8(lhs, rhs, dest,
2990 /* isUnsigned */ true);
2991 }
2992
unsignedRightShiftInt16x8(Imm32 count,FloatRegister src,FloatRegister dest)2993 void MacroAssembler::unsignedRightShiftInt16x8(Imm32 count, FloatRegister src,
2994 FloatRegister dest) {
2995 Ushr(Simd8H(dest), Simd8H(src), count.value);
2996 }
2997
rightShiftInt32x4(FloatRegister lhs,Register rhs,FloatRegister dest)2998 void MacroAssembler::rightShiftInt32x4(FloatRegister lhs, Register rhs,
2999 FloatRegister dest) {
3000 MacroAssemblerCompat::rightShiftInt32x4(lhs, rhs, dest,
3001 /* isUnsigned */ false);
3002 }
3003
rightShiftInt32x4(Imm32 count,FloatRegister src,FloatRegister dest)3004 void MacroAssembler::rightShiftInt32x4(Imm32 count, FloatRegister src,
3005 FloatRegister dest) {
3006 Sshr(Simd4S(dest), Simd4S(src), count.value);
3007 }
3008
unsignedRightShiftInt32x4(FloatRegister lhs,Register rhs,FloatRegister dest)3009 void MacroAssembler::unsignedRightShiftInt32x4(FloatRegister lhs, Register rhs,
3010 FloatRegister dest) {
3011 MacroAssemblerCompat::rightShiftInt32x4(lhs, rhs, dest,
3012 /* isUnsigned */ true);
3013 }
3014
unsignedRightShiftInt32x4(Imm32 count,FloatRegister src,FloatRegister dest)3015 void MacroAssembler::unsignedRightShiftInt32x4(Imm32 count, FloatRegister src,
3016 FloatRegister dest) {
3017 Ushr(Simd4S(dest), Simd4S(src), count.value);
3018 }
3019
rightShiftInt64x2(FloatRegister lhs,Register rhs,FloatRegister dest)3020 void MacroAssembler::rightShiftInt64x2(FloatRegister lhs, Register rhs,
3021 FloatRegister dest) {
3022 MacroAssemblerCompat::rightShiftInt64x2(lhs, rhs, dest,
3023 /* isUnsigned */ false);
3024 }
3025
rightShiftInt64x2(Imm32 count,FloatRegister src,FloatRegister dest)3026 void MacroAssembler::rightShiftInt64x2(Imm32 count, FloatRegister src,
3027 FloatRegister dest) {
3028 Sshr(Simd2D(dest), Simd2D(src), count.value);
3029 }
3030
unsignedRightShiftInt64x2(FloatRegister lhs,Register rhs,FloatRegister dest)3031 void MacroAssembler::unsignedRightShiftInt64x2(FloatRegister lhs, Register rhs,
3032 FloatRegister dest) {
3033 MacroAssemblerCompat::rightShiftInt64x2(lhs, rhs, dest,
3034 /* isUnsigned */ true);
3035 }
3036
unsignedRightShiftInt64x2(Imm32 count,FloatRegister src,FloatRegister dest)3037 void MacroAssembler::unsignedRightShiftInt64x2(Imm32 count, FloatRegister src,
3038 FloatRegister dest) {
3039 Ushr(Simd2D(dest), Simd2D(src), count.value);
3040 }
3041
3042 // Bitwise and, or, xor, not
3043
bitwiseAndSimd128(FloatRegister rhs,FloatRegister lhsDest)3044 void MacroAssembler::bitwiseAndSimd128(FloatRegister rhs,
3045 FloatRegister lhsDest) {
3046 And(Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
3047 }
3048
bitwiseAndSimd128(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3049 void MacroAssembler::bitwiseAndSimd128(FloatRegister lhs, FloatRegister rhs,
3050 FloatRegister dest) {
3051 And(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
3052 }
3053
bitwiseOrSimd128(FloatRegister rhs,FloatRegister lhsDest)3054 void MacroAssembler::bitwiseOrSimd128(FloatRegister rhs,
3055 FloatRegister lhsDest) {
3056 Orr(Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
3057 }
3058
bitwiseOrSimd128(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3059 void MacroAssembler::bitwiseOrSimd128(FloatRegister lhs, FloatRegister rhs,
3060 FloatRegister dest) {
3061 Orr(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
3062 }
3063
bitwiseXorSimd128(FloatRegister rhs,FloatRegister lhsDest)3064 void MacroAssembler::bitwiseXorSimd128(FloatRegister rhs,
3065 FloatRegister lhsDest) {
3066 Eor(Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
3067 }
3068
bitwiseXorSimd128(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3069 void MacroAssembler::bitwiseXorSimd128(FloatRegister lhs, FloatRegister rhs,
3070 FloatRegister dest) {
3071 Eor(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
3072 }
3073
bitwiseNotSimd128(FloatRegister src,FloatRegister dest)3074 void MacroAssembler::bitwiseNotSimd128(FloatRegister src, FloatRegister dest) {
3075 Not(Simd16B(dest), Simd16B(src));
3076 }
3077
bitwiseAndNotSimd128(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3078 void MacroAssembler::bitwiseAndNotSimd128(FloatRegister lhs, FloatRegister rhs,
3079 FloatRegister dest) {
3080 Bic(Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
3081 }
3082
3083 // Bitwise AND with complement: dest = ~lhs & rhs, note this is not what Wasm
3084 // wants but what the x86 hardware offers. Hence the name. Since arm64 has
3085 // dest = lhs & ~rhs we just swap operands.
3086
bitwiseNotAndSimd128(FloatRegister rhs,FloatRegister lhsDest)3087 void MacroAssembler::bitwiseNotAndSimd128(FloatRegister rhs,
3088 FloatRegister lhsDest) {
3089 Bic(Simd16B(lhsDest), Simd16B(rhs), Simd16B(lhsDest));
3090 }
3091
3092 // Bitwise select
3093
bitwiseSelectSimd128(FloatRegister onTrue,FloatRegister onFalse,FloatRegister maskDest)3094 void MacroAssembler::bitwiseSelectSimd128(FloatRegister onTrue,
3095 FloatRegister onFalse,
3096 FloatRegister maskDest) {
3097 Bsl(Simd16B(maskDest), Simd16B(onTrue), Simd16B(onFalse));
3098 }
3099
3100 // Population count
3101
popcntInt8x16(FloatRegister src,FloatRegister dest)3102 void MacroAssembler::popcntInt8x16(FloatRegister src, FloatRegister dest) {
3103 Cnt(Simd16B(dest), Simd16B(src));
3104 }
3105
3106 // Any lane true, ie, any bit set
3107
anyTrueSimd128(FloatRegister src,Register dest_)3108 void MacroAssembler::anyTrueSimd128(FloatRegister src, Register dest_) {
3109 ScratchSimd128Scope scratch_(*this);
3110 ARMFPRegister scratch(Simd1D(scratch_));
3111 ARMRegister dest(dest_, 64);
3112 Addp(scratch, Simd2D(src));
3113 Umov(dest, scratch, 0);
3114 Cmp(dest, Operand(0));
3115 Cset(dest, Assembler::NonZero);
3116 }
3117
3118 // All lanes true
3119
allTrueInt8x16(FloatRegister src,Register dest_)3120 void MacroAssembler::allTrueInt8x16(FloatRegister src, Register dest_) {
3121 ScratchSimd128Scope scratch(*this);
3122 ARMRegister dest(dest_, 64);
3123 Cmeq(Simd16B(scratch), Simd16B(src), 0);
3124 Addp(Simd1D(scratch), Simd2D(scratch));
3125 Umov(dest, Simd1D(scratch), 0);
3126 Cmp(dest, Operand(0));
3127 Cset(dest, Assembler::Zero);
3128 }
3129
allTrueInt16x8(FloatRegister src,Register dest_)3130 void MacroAssembler::allTrueInt16x8(FloatRegister src, Register dest_) {
3131 ScratchSimd128Scope scratch(*this);
3132 ARMRegister dest(dest_, 64);
3133 Cmeq(Simd8H(scratch), Simd8H(src), 0);
3134 Addp(Simd1D(scratch), Simd2D(scratch));
3135 Umov(dest, Simd1D(scratch), 0);
3136 Cmp(dest, Operand(0));
3137 Cset(dest, Assembler::Zero);
3138 }
3139
allTrueInt32x4(FloatRegister src,Register dest_)3140 void MacroAssembler::allTrueInt32x4(FloatRegister src, Register dest_) {
3141 ScratchSimd128Scope scratch(*this);
3142 ARMRegister dest(dest_, 64);
3143 Cmeq(Simd4S(scratch), Simd4S(src), 0);
3144 Addp(Simd1D(scratch), Simd2D(scratch));
3145 Umov(dest, Simd1D(scratch), 0);
3146 Cmp(dest, Operand(0));
3147 Cset(dest, Assembler::Zero);
3148 }
3149
allTrueInt64x2(FloatRegister src,Register dest_)3150 void MacroAssembler::allTrueInt64x2(FloatRegister src, Register dest_) {
3151 ScratchSimd128Scope scratch(*this);
3152 ARMRegister dest(dest_, 64);
3153 Cmeq(Simd2D(scratch), Simd2D(src), 0);
3154 Addp(Simd1D(scratch), Simd2D(scratch));
3155 Umov(dest, Simd1D(scratch), 0);
3156 Cmp(dest, Operand(0));
3157 Cset(dest, Assembler::Zero);
3158 }
3159
3160 // Bitmask, ie extract and compress high bits of all lanes
3161 //
3162 // There's no direct support for this on the chip. These implementations come
3163 // from the writeup that added the instruction to the SIMD instruction set.
3164 // Generally, shifting and masking is used to isolate the sign bit of each
3165 // element in the right position, then a horizontal add creates the result. For
3166 // 8-bit elements an intermediate step is needed to assemble the bits of the
3167 // upper and lower 8 bytes into 8 halfwords.
3168
bitmaskInt8x16(FloatRegister src,Register dest,FloatRegister temp)3169 void MacroAssembler::bitmaskInt8x16(FloatRegister src, Register dest,
3170 FloatRegister temp) {
3171 ScratchSimd128Scope scratch(*this);
3172 int8_t values[] = {1, 2, 4, 8, 16, 32, 64, -128,
3173 1, 2, 4, 8, 16, 32, 64, -128};
3174 loadConstantSimd128(SimdConstant::CreateX16(values), temp);
3175 Sshr(Simd16B(scratch), Simd16B(src), 7);
3176 And(Simd16B(scratch), Simd16B(scratch), Simd16B(temp));
3177 Ext(Simd16B(temp), Simd16B(scratch), Simd16B(scratch), 8);
3178 Zip1(Simd16B(temp), Simd16B(scratch), Simd16B(temp));
3179 Addv(ARMFPRegister(temp, 16), Simd8H(temp));
3180 Mov(ARMRegister(dest, 32), Simd8H(temp), 0);
3181 }
3182
bitmaskInt16x8(FloatRegister src,Register dest,FloatRegister temp)3183 void MacroAssembler::bitmaskInt16x8(FloatRegister src, Register dest,
3184 FloatRegister temp) {
3185 ScratchSimd128Scope scratch(*this);
3186 int16_t values[] = {1, 2, 4, 8, 16, 32, 64, 128};
3187 loadConstantSimd128(SimdConstant::CreateX8(values), temp);
3188 Sshr(Simd8H(scratch), Simd8H(src), 15);
3189 And(Simd16B(scratch), Simd16B(scratch), Simd16B(temp));
3190 Addv(ARMFPRegister(scratch, 16), Simd8H(scratch));
3191 Mov(ARMRegister(dest, 32), Simd8H(scratch), 0);
3192 }
3193
bitmaskInt32x4(FloatRegister src,Register dest,FloatRegister temp)3194 void MacroAssembler::bitmaskInt32x4(FloatRegister src, Register dest,
3195 FloatRegister temp) {
3196 ScratchSimd128Scope scratch(*this);
3197 int32_t values[] = {1, 2, 4, 8};
3198 loadConstantSimd128(SimdConstant::CreateX4(values), temp);
3199 Sshr(Simd4S(scratch), Simd4S(src), 31);
3200 And(Simd16B(scratch), Simd16B(scratch), Simd16B(temp));
3201 Addv(ARMFPRegister(scratch, 32), Simd4S(scratch));
3202 Mov(ARMRegister(dest, 32), Simd4S(scratch), 0);
3203 }
3204
bitmaskInt64x2(FloatRegister src,Register dest,FloatRegister temp)3205 void MacroAssembler::bitmaskInt64x2(FloatRegister src, Register dest,
3206 FloatRegister temp) {
3207 Sqxtn(Simd2S(temp), Simd2D(src));
3208 Ushr(Simd2S(temp), Simd2S(temp), 31);
3209 Usra(ARMFPRegister(temp, 64), ARMFPRegister(temp, 64), 31);
3210 Fmov(ARMRegister(dest, 32), ARMFPRegister(temp, 32));
3211 }
3212
3213 // Comparisons (integer and floating-point)
3214
compareInt8x16(Assembler::Condition cond,FloatRegister rhs,FloatRegister lhsDest)3215 void MacroAssembler::compareInt8x16(Assembler::Condition cond,
3216 FloatRegister rhs, FloatRegister lhsDest) {
3217 compareSimd128Int(cond, Simd16B(lhsDest), Simd16B(lhsDest), Simd16B(rhs));
3218 }
3219
compareInt8x16(Assembler::Condition cond,FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3220 void MacroAssembler::compareInt8x16(Assembler::Condition cond,
3221 FloatRegister lhs, FloatRegister rhs,
3222 FloatRegister dest) {
3223 compareSimd128Int(cond, Simd16B(dest), Simd16B(lhs), Simd16B(rhs));
3224 }
3225
compareInt16x8(Assembler::Condition cond,FloatRegister rhs,FloatRegister lhsDest)3226 void MacroAssembler::compareInt16x8(Assembler::Condition cond,
3227 FloatRegister rhs, FloatRegister lhsDest) {
3228 compareSimd128Int(cond, Simd8H(lhsDest), Simd8H(lhsDest), Simd8H(rhs));
3229 }
3230
compareInt16x8(Assembler::Condition cond,FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3231 void MacroAssembler::compareInt16x8(Assembler::Condition cond,
3232 FloatRegister lhs, FloatRegister rhs,
3233 FloatRegister dest) {
3234 compareSimd128Int(cond, Simd8H(dest), Simd8H(lhs), Simd8H(rhs));
3235 }
3236
compareInt32x4(Assembler::Condition cond,FloatRegister rhs,FloatRegister lhsDest)3237 void MacroAssembler::compareInt32x4(Assembler::Condition cond,
3238 FloatRegister rhs, FloatRegister lhsDest) {
3239 compareSimd128Int(cond, Simd4S(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
3240 }
3241
compareInt32x4(Assembler::Condition cond,FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3242 void MacroAssembler::compareInt32x4(Assembler::Condition cond,
3243 FloatRegister lhs, FloatRegister rhs,
3244 FloatRegister dest) {
3245 compareSimd128Int(cond, Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
3246 }
3247
compareInt64x2(Assembler::Condition cond,FloatRegister rhs,FloatRegister lhsDest)3248 void MacroAssembler::compareInt64x2(Assembler::Condition cond,
3249 FloatRegister rhs, FloatRegister lhsDest) {
3250 compareSimd128Int(cond, Simd2D(lhsDest), Simd2D(lhsDest), Simd2D(rhs));
3251 }
3252
compareInt64x2(Assembler::Condition cond,FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3253 void MacroAssembler::compareInt64x2(Assembler::Condition cond,
3254 FloatRegister lhs, FloatRegister rhs,
3255 FloatRegister dest) {
3256 compareSimd128Int(cond, Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
3257 }
3258
compareFloat32x4(Assembler::Condition cond,FloatRegister rhs,FloatRegister lhsDest)3259 void MacroAssembler::compareFloat32x4(Assembler::Condition cond,
3260 FloatRegister rhs,
3261 FloatRegister lhsDest) {
3262 compareSimd128Float(cond, Simd4S(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
3263 }
3264
compareFloat32x4(Assembler::Condition cond,FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3265 void MacroAssembler::compareFloat32x4(Assembler::Condition cond,
3266 FloatRegister lhs, FloatRegister rhs,
3267 FloatRegister dest) {
3268 compareSimd128Float(cond, Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
3269 }
3270
compareFloat64x2(Assembler::Condition cond,FloatRegister rhs,FloatRegister lhsDest)3271 void MacroAssembler::compareFloat64x2(Assembler::Condition cond,
3272 FloatRegister rhs,
3273 FloatRegister lhsDest) {
3274 compareSimd128Float(cond, Simd2D(lhsDest), Simd2D(lhsDest), Simd2D(rhs));
3275 }
3276
compareFloat64x2(Assembler::Condition cond,FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3277 void MacroAssembler::compareFloat64x2(Assembler::Condition cond,
3278 FloatRegister lhs, FloatRegister rhs,
3279 FloatRegister dest) {
3280 compareSimd128Float(cond, Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
3281 }
3282
3283 // Load
3284
loadUnalignedSimd128(const Address & src,FloatRegister dest)3285 void MacroAssembler::loadUnalignedSimd128(const Address& src,
3286 FloatRegister dest) {
3287 Ldr(ARMFPRegister(dest, 128), toMemOperand(src));
3288 }
3289
loadUnalignedSimd128(const BaseIndex & address,FloatRegister dest)3290 void MacroAssembler::loadUnalignedSimd128(const BaseIndex& address,
3291 FloatRegister dest) {
3292 doBaseIndex(ARMFPRegister(dest, 128), address, vixl::LDR_q);
3293 }
3294
3295 // Store
3296
storeUnalignedSimd128(FloatRegister src,const Address & dest)3297 void MacroAssembler::storeUnalignedSimd128(FloatRegister src,
3298 const Address& dest) {
3299 Str(ARMFPRegister(src, 128), toMemOperand(dest));
3300 }
3301
storeUnalignedSimd128(FloatRegister src,const BaseIndex & dest)3302 void MacroAssembler::storeUnalignedSimd128(FloatRegister src,
3303 const BaseIndex& dest) {
3304 doBaseIndex(ARMFPRegister(src, 128), dest, vixl::STR_q);
3305 }
3306
3307 // Floating point negation
3308
negFloat32x4(FloatRegister src,FloatRegister dest)3309 void MacroAssembler::negFloat32x4(FloatRegister src, FloatRegister dest) {
3310 Fneg(Simd4S(dest), Simd4S(src));
3311 }
3312
negFloat64x2(FloatRegister src,FloatRegister dest)3313 void MacroAssembler::negFloat64x2(FloatRegister src, FloatRegister dest) {
3314 Fneg(Simd2D(dest), Simd2D(src));
3315 }
3316
3317 // Floating point absolute value
3318
absFloat32x4(FloatRegister src,FloatRegister dest)3319 void MacroAssembler::absFloat32x4(FloatRegister src, FloatRegister dest) {
3320 Fabs(Simd4S(dest), Simd4S(src));
3321 }
3322
absFloat64x2(FloatRegister src,FloatRegister dest)3323 void MacroAssembler::absFloat64x2(FloatRegister src, FloatRegister dest) {
3324 Fabs(Simd2D(dest), Simd2D(src));
3325 }
3326
3327 // NaN-propagating minimum
3328
minFloat32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3329 void MacroAssembler::minFloat32x4(FloatRegister lhs, FloatRegister rhs,
3330 FloatRegister dest) {
3331 Fmin(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
3332 }
3333
minFloat32x4(FloatRegister rhs,FloatRegister lhsDest)3334 void MacroAssembler::minFloat32x4(FloatRegister rhs, FloatRegister lhsDest) {
3335 Fmin(Simd4S(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
3336 }
3337
minFloat64x2(FloatRegister rhs,FloatRegister lhsDest)3338 void MacroAssembler::minFloat64x2(FloatRegister rhs, FloatRegister lhsDest) {
3339 Fmin(Simd2D(lhsDest), Simd2D(lhsDest), Simd2D(rhs));
3340 }
3341
minFloat64x2(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3342 void MacroAssembler::minFloat64x2(FloatRegister lhs, FloatRegister rhs,
3343 FloatRegister dest) {
3344 Fmin(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
3345 }
3346
3347 // NaN-propagating maximum
3348
maxFloat32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3349 void MacroAssembler::maxFloat32x4(FloatRegister lhs, FloatRegister rhs,
3350 FloatRegister dest) {
3351 Fmax(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
3352 }
3353
maxFloat32x4(FloatRegister rhs,FloatRegister lhsDest)3354 void MacroAssembler::maxFloat32x4(FloatRegister rhs, FloatRegister lhsDest) {
3355 Fmax(Simd4S(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
3356 }
3357
maxFloat64x2(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3358 void MacroAssembler::maxFloat64x2(FloatRegister lhs, FloatRegister rhs,
3359 FloatRegister dest) {
3360 Fmax(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
3361 }
3362
maxFloat64x2(FloatRegister rhs,FloatRegister lhsDest)3363 void MacroAssembler::maxFloat64x2(FloatRegister rhs, FloatRegister lhsDest) {
3364 Fmax(Simd2D(lhsDest), Simd2D(lhsDest), Simd2D(rhs));
3365 }
3366
3367 // Floating add
3368
addFloat32x4(FloatRegister rhs,FloatRegister lhsDest)3369 void MacroAssembler::addFloat32x4(FloatRegister rhs, FloatRegister lhsDest) {
3370 Fadd(Simd4S(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
3371 }
3372
addFloat32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3373 void MacroAssembler::addFloat32x4(FloatRegister lhs, FloatRegister rhs,
3374 FloatRegister dest) {
3375 Fadd(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
3376 }
3377
addFloat64x2(FloatRegister rhs,FloatRegister lhsDest)3378 void MacroAssembler::addFloat64x2(FloatRegister rhs, FloatRegister lhsDest) {
3379 Fadd(Simd2D(lhsDest), Simd2D(lhsDest), Simd2D(rhs));
3380 }
3381
addFloat64x2(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3382 void MacroAssembler::addFloat64x2(FloatRegister lhs, FloatRegister rhs,
3383 FloatRegister dest) {
3384 Fadd(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
3385 }
3386
3387 // Floating subtract
3388
subFloat32x4(FloatRegister rhs,FloatRegister lhsDest)3389 void MacroAssembler::subFloat32x4(FloatRegister rhs, FloatRegister lhsDest) {
3390 Fsub(Simd4S(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
3391 }
3392
subFloat32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3393 void MacroAssembler::subFloat32x4(FloatRegister lhs, FloatRegister rhs,
3394 FloatRegister dest) {
3395 Fsub(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
3396 }
3397
subFloat64x2(FloatRegister rhs,FloatRegister lhsDest)3398 void MacroAssembler::subFloat64x2(FloatRegister rhs, FloatRegister lhsDest) {
3399 Fsub(Simd2D(lhsDest), Simd2D(lhsDest), Simd2D(rhs));
3400 }
3401
subFloat64x2(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3402 void MacroAssembler::subFloat64x2(FloatRegister lhs, FloatRegister rhs,
3403 FloatRegister dest) {
3404 Fsub(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
3405 }
3406
3407 // Floating division
3408
divFloat32x4(FloatRegister rhs,FloatRegister lhsDest)3409 void MacroAssembler::divFloat32x4(FloatRegister rhs, FloatRegister lhsDest) {
3410 Fdiv(Simd4S(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
3411 }
3412
divFloat32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3413 void MacroAssembler::divFloat32x4(FloatRegister lhs, FloatRegister rhs,
3414 FloatRegister dest) {
3415 Fdiv(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
3416 }
3417
divFloat64x2(FloatRegister rhs,FloatRegister lhsDest)3418 void MacroAssembler::divFloat64x2(FloatRegister rhs, FloatRegister lhsDest) {
3419 Fdiv(Simd2D(lhsDest), Simd2D(lhsDest), Simd2D(rhs));
3420 }
3421
divFloat64x2(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3422 void MacroAssembler::divFloat64x2(FloatRegister lhs, FloatRegister rhs,
3423 FloatRegister dest) {
3424 Fdiv(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
3425 }
3426
3427 // Floating Multiply
3428
mulFloat32x4(FloatRegister rhs,FloatRegister lhsDest)3429 void MacroAssembler::mulFloat32x4(FloatRegister rhs, FloatRegister lhsDest) {
3430 Fmul(Simd4S(lhsDest), Simd4S(lhsDest), Simd4S(rhs));
3431 }
3432
mulFloat32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3433 void MacroAssembler::mulFloat32x4(FloatRegister lhs, FloatRegister rhs,
3434 FloatRegister dest) {
3435 Fmul(Simd4S(dest), Simd4S(lhs), Simd4S(rhs));
3436 }
3437
mulFloat64x2(FloatRegister rhs,FloatRegister lhsDest)3438 void MacroAssembler::mulFloat64x2(FloatRegister rhs, FloatRegister lhsDest) {
3439 Fmul(Simd2D(lhsDest), Simd2D(lhsDest), Simd2D(rhs));
3440 }
3441
mulFloat64x2(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3442 void MacroAssembler::mulFloat64x2(FloatRegister lhs, FloatRegister rhs,
3443 FloatRegister dest) {
3444 Fmul(Simd2D(dest), Simd2D(lhs), Simd2D(rhs));
3445 }
3446
3447 // Pairwise add
3448
extAddPairwiseInt8x16(FloatRegister src,FloatRegister dest)3449 void MacroAssembler::extAddPairwiseInt8x16(FloatRegister src,
3450 FloatRegister dest) {
3451 Saddlp(Simd8H(dest), Simd16B(src));
3452 }
3453
unsignedExtAddPairwiseInt8x16(FloatRegister src,FloatRegister dest)3454 void MacroAssembler::unsignedExtAddPairwiseInt8x16(FloatRegister src,
3455 FloatRegister dest) {
3456 Uaddlp(Simd8H(dest), Simd16B(src));
3457 }
3458
extAddPairwiseInt16x8(FloatRegister src,FloatRegister dest)3459 void MacroAssembler::extAddPairwiseInt16x8(FloatRegister src,
3460 FloatRegister dest) {
3461 Saddlp(Simd4S(dest), Simd8H(src));
3462 }
3463
unsignedExtAddPairwiseInt16x8(FloatRegister src,FloatRegister dest)3464 void MacroAssembler::unsignedExtAddPairwiseInt16x8(FloatRegister src,
3465 FloatRegister dest) {
3466 Uaddlp(Simd4S(dest), Simd8H(src));
3467 }
3468
3469 // Floating square root
3470
sqrtFloat32x4(FloatRegister src,FloatRegister dest)3471 void MacroAssembler::sqrtFloat32x4(FloatRegister src, FloatRegister dest) {
3472 Fsqrt(Simd4S(dest), Simd4S(src));
3473 }
3474
sqrtFloat64x2(FloatRegister src,FloatRegister dest)3475 void MacroAssembler::sqrtFloat64x2(FloatRegister src, FloatRegister dest) {
3476 Fsqrt(Simd2D(dest), Simd2D(src));
3477 }
3478
3479 // Integer to floating point with rounding
3480
convertInt32x4ToFloat32x4(FloatRegister src,FloatRegister dest)3481 void MacroAssembler::convertInt32x4ToFloat32x4(FloatRegister src,
3482 FloatRegister dest) {
3483 Scvtf(Simd4S(dest), Simd4S(src));
3484 }
3485
unsignedConvertInt32x4ToFloat32x4(FloatRegister src,FloatRegister dest)3486 void MacroAssembler::unsignedConvertInt32x4ToFloat32x4(FloatRegister src,
3487 FloatRegister dest) {
3488 Ucvtf(Simd4S(dest), Simd4S(src));
3489 }
3490
convertInt32x4ToFloat64x2(FloatRegister src,FloatRegister dest)3491 void MacroAssembler::convertInt32x4ToFloat64x2(FloatRegister src,
3492 FloatRegister dest) {
3493 Sshll(Simd2D(dest), Simd2S(src), 0);
3494 Scvtf(Simd2D(dest), Simd2D(dest));
3495 }
3496
unsignedConvertInt32x4ToFloat64x2(FloatRegister src,FloatRegister dest)3497 void MacroAssembler::unsignedConvertInt32x4ToFloat64x2(FloatRegister src,
3498 FloatRegister dest) {
3499 Ushll(Simd2D(dest), Simd2S(src), 0);
3500 Ucvtf(Simd2D(dest), Simd2D(dest));
3501 }
3502
3503 // Floating point to integer with saturation
3504
truncSatFloat32x4ToInt32x4(FloatRegister src,FloatRegister dest)3505 void MacroAssembler::truncSatFloat32x4ToInt32x4(FloatRegister src,
3506 FloatRegister dest) {
3507 Fcvtzs(Simd4S(dest), Simd4S(src));
3508 }
3509
unsignedTruncSatFloat32x4ToInt32x4(FloatRegister src,FloatRegister dest)3510 void MacroAssembler::unsignedTruncSatFloat32x4ToInt32x4(FloatRegister src,
3511 FloatRegister dest) {
3512 Fcvtzu(Simd4S(dest), Simd4S(src));
3513 }
3514
truncSatFloat64x2ToInt32x4(FloatRegister src,FloatRegister dest,FloatRegister temp)3515 void MacroAssembler::truncSatFloat64x2ToInt32x4(FloatRegister src,
3516 FloatRegister dest,
3517 FloatRegister temp) {
3518 Fcvtzs(Simd2D(dest), Simd2D(src));
3519 Sqxtn(Simd2S(dest), Simd2D(dest));
3520 }
3521
unsignedTruncSatFloat64x2ToInt32x4(FloatRegister src,FloatRegister dest,FloatRegister temp)3522 void MacroAssembler::unsignedTruncSatFloat64x2ToInt32x4(FloatRegister src,
3523 FloatRegister dest,
3524 FloatRegister temp) {
3525 Fcvtzu(Simd2D(dest), Simd2D(src));
3526 Uqxtn(Simd2S(dest), Simd2D(dest));
3527 }
3528
3529 // Floating point narrowing
3530
convertFloat64x2ToFloat32x4(FloatRegister src,FloatRegister dest)3531 void MacroAssembler::convertFloat64x2ToFloat32x4(FloatRegister src,
3532 FloatRegister dest) {
3533 Fcvtn(Simd2S(dest), Simd2D(src));
3534 }
3535
3536 // Floating point widening
3537
convertFloat32x4ToFloat64x2(FloatRegister src,FloatRegister dest)3538 void MacroAssembler::convertFloat32x4ToFloat64x2(FloatRegister src,
3539 FloatRegister dest) {
3540 Fcvtl(Simd2D(dest), Simd2S(src));
3541 }
3542
3543 // Integer to integer narrowing
3544
narrowInt16x8(FloatRegister rhs,FloatRegister lhsDest)3545 void MacroAssembler::narrowInt16x8(FloatRegister rhs, FloatRegister lhsDest) {
3546 ScratchSimd128Scope scratch(*this);
3547 if (rhs == lhsDest) {
3548 Mov(scratch, SimdReg(rhs));
3549 rhs = scratch;
3550 }
3551 Sqxtn(Simd8B(lhsDest), Simd8H(lhsDest));
3552 Sqxtn2(Simd16B(lhsDest), Simd8H(rhs));
3553 }
3554
narrowInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3555 void MacroAssembler::narrowInt16x8(FloatRegister lhs, FloatRegister rhs,
3556 FloatRegister dest) {
3557 ScratchSimd128Scope scratch(*this);
3558 if (rhs == dest) {
3559 Mov(scratch, SimdReg(rhs));
3560 rhs = scratch;
3561 }
3562 Sqxtn(Simd8B(dest), Simd8H(lhs));
3563 Sqxtn2(Simd16B(dest), Simd8H(rhs));
3564 }
3565
unsignedNarrowInt16x8(FloatRegister rhs,FloatRegister lhsDest)3566 void MacroAssembler::unsignedNarrowInt16x8(FloatRegister rhs,
3567 FloatRegister lhsDest) {
3568 ScratchSimd128Scope scratch(*this);
3569 if (rhs == lhsDest) {
3570 Mov(scratch, SimdReg(rhs));
3571 rhs = scratch;
3572 }
3573 Sqxtun(Simd8B(lhsDest), Simd8H(lhsDest));
3574 Sqxtun2(Simd16B(lhsDest), Simd8H(rhs));
3575 }
3576
unsignedNarrowInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3577 void MacroAssembler::unsignedNarrowInt16x8(FloatRegister lhs, FloatRegister rhs,
3578 FloatRegister dest) {
3579 ScratchSimd128Scope scratch(*this);
3580 if (rhs == dest) {
3581 Mov(scratch, SimdReg(rhs));
3582 rhs = scratch;
3583 }
3584 Sqxtun(Simd8B(dest), Simd8H(lhs));
3585 Sqxtun2(Simd16B(dest), Simd8H(rhs));
3586 }
3587
narrowInt32x4(FloatRegister rhs,FloatRegister lhsDest)3588 void MacroAssembler::narrowInt32x4(FloatRegister rhs, FloatRegister lhsDest) {
3589 ScratchSimd128Scope scratch(*this);
3590 if (rhs == lhsDest) {
3591 Mov(scratch, SimdReg(rhs));
3592 rhs = scratch;
3593 }
3594 Sqxtn(Simd4H(lhsDest), Simd4S(lhsDest));
3595 Sqxtn2(Simd8H(lhsDest), Simd4S(rhs));
3596 }
3597
narrowInt32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3598 void MacroAssembler::narrowInt32x4(FloatRegister lhs, FloatRegister rhs,
3599 FloatRegister dest) {
3600 ScratchSimd128Scope scratch(*this);
3601 if (rhs == dest) {
3602 Mov(scratch, SimdReg(rhs));
3603 rhs = scratch;
3604 }
3605 Sqxtn(Simd4H(dest), Simd4S(lhs));
3606 Sqxtn2(Simd8H(dest), Simd4S(rhs));
3607 }
3608
unsignedNarrowInt32x4(FloatRegister rhs,FloatRegister lhsDest)3609 void MacroAssembler::unsignedNarrowInt32x4(FloatRegister rhs,
3610 FloatRegister lhsDest) {
3611 ScratchSimd128Scope scratch(*this);
3612 if (rhs == lhsDest) {
3613 Mov(scratch, SimdReg(rhs));
3614 rhs = scratch;
3615 }
3616 Sqxtun(Simd4H(lhsDest), Simd4S(lhsDest));
3617 Sqxtun2(Simd8H(lhsDest), Simd4S(rhs));
3618 }
3619
unsignedNarrowInt32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3620 void MacroAssembler::unsignedNarrowInt32x4(FloatRegister lhs, FloatRegister rhs,
3621 FloatRegister dest) {
3622 ScratchSimd128Scope scratch(*this);
3623 if (rhs == dest) {
3624 Mov(scratch, SimdReg(rhs));
3625 rhs = scratch;
3626 }
3627 Sqxtun(Simd4H(dest), Simd4S(lhs));
3628 Sqxtun2(Simd8H(dest), Simd4S(rhs));
3629 }
3630
3631 // Integer to integer widening
3632
widenLowInt8x16(FloatRegister src,FloatRegister dest)3633 void MacroAssembler::widenLowInt8x16(FloatRegister src, FloatRegister dest) {
3634 Sshll(Simd8H(dest), Simd8B(src), 0);
3635 }
3636
widenHighInt8x16(FloatRegister src,FloatRegister dest)3637 void MacroAssembler::widenHighInt8x16(FloatRegister src, FloatRegister dest) {
3638 Sshll2(Simd8H(dest), Simd16B(src), 0);
3639 }
3640
unsignedWidenLowInt8x16(FloatRegister src,FloatRegister dest)3641 void MacroAssembler::unsignedWidenLowInt8x16(FloatRegister src,
3642 FloatRegister dest) {
3643 Ushll(Simd8H(dest), Simd8B(src), 0);
3644 }
3645
unsignedWidenHighInt8x16(FloatRegister src,FloatRegister dest)3646 void MacroAssembler::unsignedWidenHighInt8x16(FloatRegister src,
3647 FloatRegister dest) {
3648 Ushll2(Simd8H(dest), Simd16B(src), 0);
3649 }
3650
widenLowInt16x8(FloatRegister src,FloatRegister dest)3651 void MacroAssembler::widenLowInt16x8(FloatRegister src, FloatRegister dest) {
3652 Sshll(Simd4S(dest), Simd4H(src), 0);
3653 }
3654
widenHighInt16x8(FloatRegister src,FloatRegister dest)3655 void MacroAssembler::widenHighInt16x8(FloatRegister src, FloatRegister dest) {
3656 Sshll2(Simd4S(dest), Simd8H(src), 0);
3657 }
3658
unsignedWidenLowInt16x8(FloatRegister src,FloatRegister dest)3659 void MacroAssembler::unsignedWidenLowInt16x8(FloatRegister src,
3660 FloatRegister dest) {
3661 Ushll(Simd4S(dest), Simd4H(src), 0);
3662 }
3663
unsignedWidenHighInt16x8(FloatRegister src,FloatRegister dest)3664 void MacroAssembler::unsignedWidenHighInt16x8(FloatRegister src,
3665 FloatRegister dest) {
3666 Ushll2(Simd4S(dest), Simd8H(src), 0);
3667 }
3668
widenLowInt32x4(FloatRegister src,FloatRegister dest)3669 void MacroAssembler::widenLowInt32x4(FloatRegister src, FloatRegister dest) {
3670 Sshll(Simd2D(dest), Simd2S(src), 0);
3671 }
3672
unsignedWidenLowInt32x4(FloatRegister src,FloatRegister dest)3673 void MacroAssembler::unsignedWidenLowInt32x4(FloatRegister src,
3674 FloatRegister dest) {
3675 Ushll(Simd2D(dest), Simd2S(src), 0);
3676 }
3677
widenHighInt32x4(FloatRegister src,FloatRegister dest)3678 void MacroAssembler::widenHighInt32x4(FloatRegister src, FloatRegister dest) {
3679 Sshll2(Simd2D(dest), Simd4S(src), 0);
3680 }
3681
unsignedWidenHighInt32x4(FloatRegister src,FloatRegister dest)3682 void MacroAssembler::unsignedWidenHighInt32x4(FloatRegister src,
3683 FloatRegister dest) {
3684 Ushll2(Simd2D(dest), Simd4S(src), 0);
3685 }
3686
3687 // Compare-based minimum/maximum (experimental as of August, 2020)
3688 // https://github.com/WebAssembly/simd/pull/122
3689
pseudoMinFloat32x4(FloatRegister rhsOrRhsDest,FloatRegister lhsOrLhsDest)3690 void MacroAssembler::pseudoMinFloat32x4(FloatRegister rhsOrRhsDest,
3691 FloatRegister lhsOrLhsDest) {
3692 // Shut up the linter by using the same names as in the declaration, then
3693 // aliasing here.
3694 FloatRegister rhs = rhsOrRhsDest;
3695 FloatRegister lhsDest = lhsOrLhsDest;
3696 ScratchSimd128Scope scratch(*this);
3697 Fcmgt(Simd4S(scratch), Simd4S(lhsDest), Simd4S(rhs));
3698 Bsl(Simd16B(scratch), Simd16B(rhs), Simd16B(lhsDest));
3699 Mov(SimdReg(lhsDest), scratch);
3700 }
3701
pseudoMinFloat32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3702 void MacroAssembler::pseudoMinFloat32x4(FloatRegister lhs, FloatRegister rhs,
3703 FloatRegister dest) {
3704 ScratchSimd128Scope scratch(*this);
3705 Fcmgt(Simd4S(scratch), Simd4S(lhs), Simd4S(rhs));
3706 Bsl(Simd16B(scratch), Simd16B(rhs), Simd16B(lhs));
3707 Mov(SimdReg(dest), scratch);
3708 }
3709
pseudoMinFloat64x2(FloatRegister rhsOrRhsDest,FloatRegister lhsOrLhsDest)3710 void MacroAssembler::pseudoMinFloat64x2(FloatRegister rhsOrRhsDest,
3711 FloatRegister lhsOrLhsDest) {
3712 FloatRegister rhs = rhsOrRhsDest;
3713 FloatRegister lhsDest = lhsOrLhsDest;
3714 ScratchSimd128Scope scratch(*this);
3715 Fcmgt(Simd2D(scratch), Simd2D(lhsDest), Simd2D(rhs));
3716 Bsl(Simd16B(scratch), Simd16B(rhs), Simd16B(lhsDest));
3717 Mov(SimdReg(lhsDest), scratch);
3718 }
3719
pseudoMinFloat64x2(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3720 void MacroAssembler::pseudoMinFloat64x2(FloatRegister lhs, FloatRegister rhs,
3721 FloatRegister dest) {
3722 ScratchSimd128Scope scratch(*this);
3723 Fcmgt(Simd2D(scratch), Simd2D(lhs), Simd2D(rhs));
3724 Bsl(Simd16B(scratch), Simd16B(rhs), Simd16B(lhs));
3725 Mov(SimdReg(dest), scratch);
3726 }
3727
pseudoMaxFloat32x4(FloatRegister rhsOrRhsDest,FloatRegister lhsOrLhsDest)3728 void MacroAssembler::pseudoMaxFloat32x4(FloatRegister rhsOrRhsDest,
3729 FloatRegister lhsOrLhsDest) {
3730 FloatRegister rhs = rhsOrRhsDest;
3731 FloatRegister lhsDest = lhsOrLhsDest;
3732 ScratchSimd128Scope scratch(*this);
3733 Fcmgt(Simd4S(scratch), Simd4S(rhs), Simd4S(lhsDest));
3734 Bsl(Simd16B(scratch), Simd16B(rhs), Simd16B(lhsDest));
3735 Mov(SimdReg(lhsDest), scratch);
3736 }
3737
pseudoMaxFloat32x4(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3738 void MacroAssembler::pseudoMaxFloat32x4(FloatRegister lhs, FloatRegister rhs,
3739 FloatRegister dest) {
3740 ScratchSimd128Scope scratch(*this);
3741 Fcmgt(Simd4S(scratch), Simd4S(rhs), Simd4S(lhs));
3742 Bsl(Simd16B(scratch), Simd16B(rhs), Simd16B(lhs));
3743 Mov(SimdReg(dest), scratch);
3744 }
3745
pseudoMaxFloat64x2(FloatRegister rhsOrRhsDest,FloatRegister lhsOrLhsDest)3746 void MacroAssembler::pseudoMaxFloat64x2(FloatRegister rhsOrRhsDest,
3747 FloatRegister lhsOrLhsDest) {
3748 FloatRegister rhs = rhsOrRhsDest;
3749 FloatRegister lhsDest = lhsOrLhsDest;
3750 ScratchSimd128Scope scratch(*this);
3751 Fcmgt(Simd2D(scratch), Simd2D(rhs), Simd2D(lhsDest));
3752 Bsl(Simd16B(scratch), Simd16B(rhs), Simd16B(lhsDest));
3753 Mov(SimdReg(lhsDest), scratch);
3754 }
3755
pseudoMaxFloat64x2(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3756 void MacroAssembler::pseudoMaxFloat64x2(FloatRegister lhs, FloatRegister rhs,
3757 FloatRegister dest) {
3758 ScratchSimd128Scope scratch(*this);
3759 Fcmgt(Simd2D(scratch), Simd2D(rhs), Simd2D(lhs));
3760 Bsl(Simd16B(scratch), Simd16B(rhs), Simd16B(lhs));
3761 Mov(SimdReg(dest), scratch);
3762 }
3763
3764 // Widening/pairwise integer dot product (experimental as of August, 2020)
3765 // https://github.com/WebAssembly/simd/pull/127
3766
widenDotInt16x8(FloatRegister rhs,FloatRegister lhsDest)3767 void MacroAssembler::widenDotInt16x8(FloatRegister rhs, FloatRegister lhsDest) {
3768 widenDotInt16x8(lhsDest, rhs, lhsDest);
3769 }
3770
widenDotInt16x8(FloatRegister lhs,FloatRegister rhs,FloatRegister dest)3771 void MacroAssembler::widenDotInt16x8(FloatRegister lhs, FloatRegister rhs,
3772 FloatRegister dest) {
3773 ScratchSimd128Scope scratch(*this);
3774 Smull(Simd4S(scratch), Simd4H(lhs), Simd4H(rhs));
3775 Smull2(Simd4S(dest), Simd8H(lhs), Simd8H(rhs));
3776 Addp(Simd4S(dest), Simd4S(scratch), Simd4S(dest));
3777 }
3778
3779 // Floating point rounding (experimental as of August, 2020)
3780 // https://github.com/WebAssembly/simd/pull/232
3781
ceilFloat32x4(FloatRegister src,FloatRegister dest)3782 void MacroAssembler::ceilFloat32x4(FloatRegister src, FloatRegister dest) {
3783 Frintp(Simd4S(dest), Simd4S(src));
3784 }
3785
ceilFloat64x2(FloatRegister src,FloatRegister dest)3786 void MacroAssembler::ceilFloat64x2(FloatRegister src, FloatRegister dest) {
3787 Frintp(Simd2D(dest), Simd2D(src));
3788 }
3789
floorFloat32x4(FloatRegister src,FloatRegister dest)3790 void MacroAssembler::floorFloat32x4(FloatRegister src, FloatRegister dest) {
3791 Frintm(Simd4S(dest), Simd4S(src));
3792 }
3793
floorFloat64x2(FloatRegister src,FloatRegister dest)3794 void MacroAssembler::floorFloat64x2(FloatRegister src, FloatRegister dest) {
3795 Frintm(Simd2D(dest), Simd2D(src));
3796 }
3797
truncFloat32x4(FloatRegister src,FloatRegister dest)3798 void MacroAssembler::truncFloat32x4(FloatRegister src, FloatRegister dest) {
3799 Frintz(Simd4S(dest), Simd4S(src));
3800 }
3801
truncFloat64x2(FloatRegister src,FloatRegister dest)3802 void MacroAssembler::truncFloat64x2(FloatRegister src, FloatRegister dest) {
3803 Frintz(Simd2D(dest), Simd2D(src));
3804 }
3805
nearestFloat32x4(FloatRegister src,FloatRegister dest)3806 void MacroAssembler::nearestFloat32x4(FloatRegister src, FloatRegister dest) {
3807 Frintn(Simd4S(dest), Simd4S(src));
3808 }
3809
nearestFloat64x2(FloatRegister src,FloatRegister dest)3810 void MacroAssembler::nearestFloat64x2(FloatRegister src, FloatRegister dest) {
3811 Frintn(Simd2D(dest), Simd2D(src));
3812 }
3813
3814 //}}} check_macroassembler_style
3815 // ===============================================================
3816
addToStackPtr(Register src)3817 void MacroAssemblerCompat::addToStackPtr(Register src) {
3818 Add(GetStackPointer64(), GetStackPointer64(), ARMRegister(src, 64));
3819 // Given that required invariant SP <= PSP, this is probably pointless,
3820 // since it gives PSP a larger value.
3821 syncStackPtr();
3822 }
3823
addToStackPtr(Imm32 imm)3824 void MacroAssemblerCompat::addToStackPtr(Imm32 imm) {
3825 Add(GetStackPointer64(), GetStackPointer64(), Operand(imm.value));
3826 // As above, probably pointless.
3827 syncStackPtr();
3828 }
3829
addToStackPtr(const Address & src)3830 void MacroAssemblerCompat::addToStackPtr(const Address& src) {
3831 vixl::UseScratchRegisterScope temps(this);
3832 const ARMRegister scratch = temps.AcquireX();
3833 Ldr(scratch, toMemOperand(src));
3834 Add(GetStackPointer64(), GetStackPointer64(), scratch);
3835 // As above, probably pointless.
3836 syncStackPtr();
3837 }
3838
addStackPtrTo(Register dest)3839 void MacroAssemblerCompat::addStackPtrTo(Register dest) {
3840 Add(ARMRegister(dest, 64), ARMRegister(dest, 64), GetStackPointer64());
3841 }
3842
subFromStackPtr(Register src)3843 void MacroAssemblerCompat::subFromStackPtr(Register src) {
3844 Sub(GetStackPointer64(), GetStackPointer64(), ARMRegister(src, 64));
3845 syncStackPtr();
3846 }
3847
subFromStackPtr(Imm32 imm)3848 void MacroAssemblerCompat::subFromStackPtr(Imm32 imm) {
3849 Sub(GetStackPointer64(), GetStackPointer64(), Operand(imm.value));
3850 syncStackPtr();
3851 }
3852
subStackPtrFrom(Register dest)3853 void MacroAssemblerCompat::subStackPtrFrom(Register dest) {
3854 Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), GetStackPointer64());
3855 }
3856
andToStackPtr(Imm32 imm)3857 void MacroAssemblerCompat::andToStackPtr(Imm32 imm) {
3858 if (sp.Is(GetStackPointer64())) {
3859 vixl::UseScratchRegisterScope temps(this);
3860 const ARMRegister scratch = temps.AcquireX();
3861 Mov(scratch, sp);
3862 And(sp, scratch, Operand(imm.value));
3863 // syncStackPtr() not needed since our SP is the real SP.
3864 } else {
3865 And(GetStackPointer64(), GetStackPointer64(), Operand(imm.value));
3866 syncStackPtr();
3867 }
3868 }
3869
moveToStackPtr(Register src)3870 void MacroAssemblerCompat::moveToStackPtr(Register src) {
3871 Mov(GetStackPointer64(), ARMRegister(src, 64));
3872 syncStackPtr();
3873 }
3874
moveStackPtrTo(Register dest)3875 void MacroAssemblerCompat::moveStackPtrTo(Register dest) {
3876 Mov(ARMRegister(dest, 64), GetStackPointer64());
3877 }
3878
loadStackPtr(const Address & src)3879 void MacroAssemblerCompat::loadStackPtr(const Address& src) {
3880 if (sp.Is(GetStackPointer64())) {
3881 vixl::UseScratchRegisterScope temps(this);
3882 const ARMRegister scratch = temps.AcquireX();
3883 Ldr(scratch, toMemOperand(src));
3884 Mov(sp, scratch);
3885 // syncStackPtr() not needed since our SP is the real SP.
3886 } else {
3887 Ldr(GetStackPointer64(), toMemOperand(src));
3888 syncStackPtr();
3889 }
3890 }
3891
storeStackPtr(const Address & dest)3892 void MacroAssemblerCompat::storeStackPtr(const Address& dest) {
3893 if (sp.Is(GetStackPointer64())) {
3894 vixl::UseScratchRegisterScope temps(this);
3895 const ARMRegister scratch = temps.AcquireX();
3896 Mov(scratch, sp);
3897 Str(scratch, toMemOperand(dest));
3898 } else {
3899 Str(GetStackPointer64(), toMemOperand(dest));
3900 }
3901 }
3902
branchTestStackPtr(Condition cond,Imm32 rhs,Label * label)3903 void MacroAssemblerCompat::branchTestStackPtr(Condition cond, Imm32 rhs,
3904 Label* label) {
3905 if (sp.Is(GetStackPointer64())) {
3906 vixl::UseScratchRegisterScope temps(this);
3907 const ARMRegister scratch = temps.AcquireX();
3908 Mov(scratch, sp);
3909 Tst(scratch, Operand(rhs.value));
3910 } else {
3911 Tst(GetStackPointer64(), Operand(rhs.value));
3912 }
3913 B(label, cond);
3914 }
3915
branchStackPtr(Condition cond,Register rhs_,Label * label)3916 void MacroAssemblerCompat::branchStackPtr(Condition cond, Register rhs_,
3917 Label* label) {
3918 ARMRegister rhs(rhs_, 64);
3919 if (sp.Is(GetStackPointer64())) {
3920 vixl::UseScratchRegisterScope temps(this);
3921 const ARMRegister scratch = temps.AcquireX();
3922 Mov(scratch, sp);
3923 Cmp(scratch, rhs);
3924 } else {
3925 Cmp(GetStackPointer64(), rhs);
3926 }
3927 B(label, cond);
3928 }
3929
branchStackPtrRhs(Condition cond,Address lhs,Label * label)3930 void MacroAssemblerCompat::branchStackPtrRhs(Condition cond, Address lhs,
3931 Label* label) {
3932 vixl::UseScratchRegisterScope temps(this);
3933 const ARMRegister scratch = temps.AcquireX();
3934 Ldr(scratch, toMemOperand(lhs));
3935 // Cmp disallows SP as the rhs, so flip the operands and invert the
3936 // condition.
3937 Cmp(GetStackPointer64(), scratch);
3938 B(label, Assembler::InvertCondition(cond));
3939 }
3940
branchStackPtrRhs(Condition cond,AbsoluteAddress lhs,Label * label)3941 void MacroAssemblerCompat::branchStackPtrRhs(Condition cond,
3942 AbsoluteAddress lhs,
3943 Label* label) {
3944 vixl::UseScratchRegisterScope temps(this);
3945 const ARMRegister scratch = temps.AcquireX();
3946 loadPtr(lhs, scratch.asUnsized());
3947 // Cmp disallows SP as the rhs, so flip the operands and invert the
3948 // condition.
3949 Cmp(GetStackPointer64(), scratch);
3950 B(label, Assembler::InvertCondition(cond));
3951 }
3952
3953 // If source is a double, load into dest.
3954 // If source is int32, convert to double and store in dest.
3955 // Else, branch to failure.
ensureDouble(const ValueOperand & source,FloatRegister dest,Label * failure)3956 void MacroAssemblerCompat::ensureDouble(const ValueOperand& source,
3957 FloatRegister dest, Label* failure) {
3958 Label isDouble, done;
3959
3960 {
3961 ScratchTagScope tag(asMasm(), source);
3962 splitTagForTest(source, tag);
3963 asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
3964 asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
3965 }
3966
3967 convertInt32ToDouble(source.valueReg(), dest);
3968 jump(&done);
3969
3970 bind(&isDouble);
3971 unboxDouble(source, dest);
3972
3973 bind(&done);
3974 }
3975
unboxValue(const ValueOperand & src,AnyRegister dest,JSValueType type)3976 void MacroAssemblerCompat::unboxValue(const ValueOperand& src, AnyRegister dest,
3977 JSValueType type) {
3978 if (dest.isFloat()) {
3979 Label notInt32, end;
3980 asMasm().branchTestInt32(Assembler::NotEqual, src, ¬Int32);
3981 convertInt32ToDouble(src.valueReg(), dest.fpu());
3982 jump(&end);
3983 bind(¬Int32);
3984 unboxDouble(src, dest.fpu());
3985 bind(&end);
3986 } else {
3987 unboxNonDouble(src, dest.gpr(), type);
3988 }
3989 }
3990
3991 } // namespace jit
3992 } // namespace js
3993
3994 #endif /* jit_arm64_MacroAssembler_arm64_inl_h */
3995