1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #ifndef jit_x86_MacroAssembler_x86_inl_h
8 #define jit_x86_MacroAssembler_x86_inl_h
9
10 #include "jit/x86/MacroAssembler-x86.h"
11
12 #include "jit/x86-shared/MacroAssembler-x86-shared-inl.h"
13
14 namespace js {
15 namespace jit {
16
17 //{{{ check_macroassembler_style
18
move64(Imm64 imm,Register64 dest)19 void MacroAssembler::move64(Imm64 imm, Register64 dest) {
20 move32(Imm32(imm.value & 0xFFFFFFFFL), dest.low);
21 move32(Imm32((imm.value >> 32) & 0xFFFFFFFFL), dest.high);
22 }
23
move64(Register64 src,Register64 dest)24 void MacroAssembler::move64(Register64 src, Register64 dest) {
25 movl(src.low, dest.low);
26 movl(src.high, dest.high);
27 }
28
moveDoubleToGPR64(FloatRegister src,Register64 dest)29 void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
30 ScratchDoubleScope scratch(*this);
31
32 if (Assembler::HasSSE41()) {
33 vmovd(src, dest.low);
34 vpextrd(1, src, dest.high);
35 } else {
36 vmovd(src, dest.low);
37 moveDouble(src, scratch);
38 vpsrldq(Imm32(4), scratch, scratch);
39 vmovd(scratch, dest.high);
40 }
41 }
42
moveGPR64ToDouble(Register64 src,FloatRegister dest)43 void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
44 if (Assembler::HasSSE41()) {
45 vmovd(src.low, dest);
46 vpinsrd(1, src.high, dest, dest);
47 } else {
48 ScratchDoubleScope fpscratch(*this);
49 vmovd(src.low, dest);
50 vmovd(src.high, fpscratch);
51 vunpcklps(fpscratch, dest, dest);
52 }
53 }
54
move64To32(Register64 src,Register dest)55 void MacroAssembler::move64To32(Register64 src, Register dest) {
56 if (src.low != dest) {
57 movl(src.low, dest);
58 }
59 }
60
move32To64ZeroExtend(Register src,Register64 dest)61 void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
62 if (src != dest.low) {
63 movl(src, dest.low);
64 }
65 movl(Imm32(0), dest.high);
66 }
67
move8To64SignExtend(Register src,Register64 dest)68 void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
69 move8SignExtend(src, dest.low);
70 if (dest.low == eax && dest.high == edx) {
71 masm.cdq();
72 } else {
73 movl(dest.low, dest.high);
74 sarl(Imm32(31), dest.high);
75 }
76 }
77
move16To64SignExtend(Register src,Register64 dest)78 void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
79 move16SignExtend(src, dest.low);
80 if (dest.low == eax && dest.high == edx) {
81 masm.cdq();
82 } else {
83 movl(dest.low, dest.high);
84 sarl(Imm32(31), dest.high);
85 }
86 }
87
move32To64SignExtend(Register src,Register64 dest)88 void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
89 if (src != dest.low) {
90 movl(src, dest.low);
91 }
92 if (dest.low == eax && dest.high == edx) {
93 masm.cdq();
94 } else {
95 movl(dest.low, dest.high);
96 sarl(Imm32(31), dest.high);
97 }
98 }
99
move32SignExtendToPtr(Register src,Register dest)100 void MacroAssembler::move32SignExtendToPtr(Register src, Register dest) {
101 movl(src, dest);
102 }
103
move32ZeroExtendToPtr(Register src,Register dest)104 void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
105 movl(src, dest);
106 }
107
108 // ===============================================================
109 // Load instructions
110
load32SignExtendToPtr(const Address & src,Register dest)111 void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
112 load32(src, dest);
113 }
114
115 // ===============================================================
116 // Logical functions
117
notPtr(Register reg)118 void MacroAssembler::notPtr(Register reg) { notl(reg); }
119
andPtr(Register src,Register dest)120 void MacroAssembler::andPtr(Register src, Register dest) { andl(src, dest); }
121
andPtr(Imm32 imm,Register dest)122 void MacroAssembler::andPtr(Imm32 imm, Register dest) { andl(imm, dest); }
123
and64(Imm64 imm,Register64 dest)124 void MacroAssembler::and64(Imm64 imm, Register64 dest) {
125 if (imm.low().value != int32_t(0xFFFFFFFF)) {
126 andl(imm.low(), dest.low);
127 }
128 if (imm.hi().value != int32_t(0xFFFFFFFF)) {
129 andl(imm.hi(), dest.high);
130 }
131 }
132
or64(Imm64 imm,Register64 dest)133 void MacroAssembler::or64(Imm64 imm, Register64 dest) {
134 if (imm.low().value != 0) {
135 orl(imm.low(), dest.low);
136 }
137 if (imm.hi().value != 0) {
138 orl(imm.hi(), dest.high);
139 }
140 }
141
xor64(Imm64 imm,Register64 dest)142 void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
143 if (imm.low().value != 0) {
144 xorl(imm.low(), dest.low);
145 }
146 if (imm.hi().value != 0) {
147 xorl(imm.hi(), dest.high);
148 }
149 }
150
orPtr(Register src,Register dest)151 void MacroAssembler::orPtr(Register src, Register dest) { orl(src, dest); }
152
orPtr(Imm32 imm,Register dest)153 void MacroAssembler::orPtr(Imm32 imm, Register dest) { orl(imm, dest); }
154
and64(Register64 src,Register64 dest)155 void MacroAssembler::and64(Register64 src, Register64 dest) {
156 andl(src.low, dest.low);
157 andl(src.high, dest.high);
158 }
159
or64(Register64 src,Register64 dest)160 void MacroAssembler::or64(Register64 src, Register64 dest) {
161 orl(src.low, dest.low);
162 orl(src.high, dest.high);
163 }
164
xor64(Register64 src,Register64 dest)165 void MacroAssembler::xor64(Register64 src, Register64 dest) {
166 xorl(src.low, dest.low);
167 xorl(src.high, dest.high);
168 }
169
xorPtr(Register src,Register dest)170 void MacroAssembler::xorPtr(Register src, Register dest) { xorl(src, dest); }
171
xorPtr(Imm32 imm,Register dest)172 void MacroAssembler::xorPtr(Imm32 imm, Register dest) { xorl(imm, dest); }
173
174 // ===============================================================
175 // Swap instructions
176
byteSwap64(Register64 reg)177 void MacroAssembler::byteSwap64(Register64 reg) {
178 bswapl(reg.low);
179 bswapl(reg.high);
180 xchgl(reg.low, reg.high);
181 }
182
183 // ===============================================================
184 // Arithmetic functions
185
addPtr(Register src,Register dest)186 void MacroAssembler::addPtr(Register src, Register dest) { addl(src, dest); }
187
addPtr(Imm32 imm,Register dest)188 void MacroAssembler::addPtr(Imm32 imm, Register dest) { addl(imm, dest); }
189
addPtr(ImmWord imm,Register dest)190 void MacroAssembler::addPtr(ImmWord imm, Register dest) {
191 addl(Imm32(imm.value), dest);
192 }
193
addPtr(Imm32 imm,const Address & dest)194 void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
195 addl(imm, Operand(dest));
196 }
197
addPtr(Imm32 imm,const AbsoluteAddress & dest)198 void MacroAssembler::addPtr(Imm32 imm, const AbsoluteAddress& dest) {
199 addl(imm, Operand(dest));
200 }
201
addPtr(const Address & src,Register dest)202 void MacroAssembler::addPtr(const Address& src, Register dest) {
203 addl(Operand(src), dest);
204 }
205
add64(Register64 src,Register64 dest)206 void MacroAssembler::add64(Register64 src, Register64 dest) {
207 addl(src.low, dest.low);
208 adcl(src.high, dest.high);
209 }
210
add64(Imm32 imm,Register64 dest)211 void MacroAssembler::add64(Imm32 imm, Register64 dest) {
212 addl(imm, dest.low);
213 adcl(Imm32(0), dest.high);
214 }
215
add64(Imm64 imm,Register64 dest)216 void MacroAssembler::add64(Imm64 imm, Register64 dest) {
217 if (imm.low().value == 0) {
218 addl(imm.hi(), dest.high);
219 return;
220 }
221 addl(imm.low(), dest.low);
222 adcl(imm.hi(), dest.high);
223 }
224
addConstantDouble(double d,FloatRegister dest)225 void MacroAssembler::addConstantDouble(double d, FloatRegister dest) {
226 Double* dbl = getDouble(d);
227 if (!dbl) {
228 return;
229 }
230 masm.vaddsd_mr(nullptr, dest.encoding(), dest.encoding());
231 propagateOOM(dbl->uses.append(CodeOffset(masm.size())));
232 }
233
sub32FromStackPtrWithPatch(Register dest)234 CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
235 moveStackPtrTo(dest);
236 addlWithPatch(Imm32(0), dest);
237 return CodeOffset(currentOffset());
238 }
239
patchSub32FromStackPtr(CodeOffset offset,Imm32 imm)240 void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
241 patchAddl(offset, -imm.value);
242 }
243
subPtr(Register src,Register dest)244 void MacroAssembler::subPtr(Register src, Register dest) { subl(src, dest); }
245
subPtr(Register src,const Address & dest)246 void MacroAssembler::subPtr(Register src, const Address& dest) {
247 subl(src, Operand(dest));
248 }
249
subPtr(Imm32 imm,Register dest)250 void MacroAssembler::subPtr(Imm32 imm, Register dest) { subl(imm, dest); }
251
subPtr(const Address & addr,Register dest)252 void MacroAssembler::subPtr(const Address& addr, Register dest) {
253 subl(Operand(addr), dest);
254 }
255
sub64(Register64 src,Register64 dest)256 void MacroAssembler::sub64(Register64 src, Register64 dest) {
257 subl(src.low, dest.low);
258 sbbl(src.high, dest.high);
259 }
260
sub64(Imm64 imm,Register64 dest)261 void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
262 if (imm.low().value == 0) {
263 subl(imm.hi(), dest.high);
264 return;
265 }
266 subl(imm.low(), dest.low);
267 sbbl(imm.hi(), dest.high);
268 }
269
mulPtr(Register rhs,Register srcDest)270 void MacroAssembler::mulPtr(Register rhs, Register srcDest) {
271 imull(rhs, srcDest);
272 }
273
274 // Note: this function clobbers eax and edx.
mul64(Imm64 imm,const Register64 & dest)275 void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
276 // LOW32 = LOW(LOW(dest) * LOW(imm));
277 // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
278 // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
279 // + HIGH(LOW(dest) * LOW(imm)) [carry]
280
281 MOZ_ASSERT(dest.low != eax && dest.low != edx);
282 MOZ_ASSERT(dest.high != eax && dest.high != edx);
283
284 // HIGH(dest) = LOW(HIGH(dest) * LOW(imm));
285 movl(Imm32(imm.value & 0xFFFFFFFFL), edx);
286 imull(edx, dest.high);
287
288 // edx:eax = LOW(dest) * LOW(imm);
289 movl(Imm32(imm.value & 0xFFFFFFFFL), edx);
290 movl(dest.low, eax);
291 mull(edx);
292
293 // HIGH(dest) += edx;
294 addl(edx, dest.high);
295
296 // HIGH(dest) += LOW(LOW(dest) * HIGH(imm));
297 if (((imm.value >> 32) & 0xFFFFFFFFL) == 5) {
298 leal(Operand(dest.low, dest.low, TimesFour), edx);
299 } else {
300 MOZ_CRASH("Unsupported imm");
301 }
302 addl(edx, dest.high);
303
304 // LOW(dest) = eax;
305 movl(eax, dest.low);
306 }
307
mul64(Imm64 imm,const Register64 & dest,const Register temp)308 void MacroAssembler::mul64(Imm64 imm, const Register64& dest,
309 const Register temp) {
310 // LOW32 = LOW(LOW(dest) * LOW(src)); (1)
311 // HIGH32 = LOW(HIGH(dest) * LOW(src)) [multiply src into upper bits] (2)
312 // + LOW(LOW(dest) * HIGH(src)) [multiply dest into upper bits] (3)
313 // + HIGH(LOW(dest) * LOW(src)) [carry] (4)
314
315 MOZ_ASSERT(dest == Register64(edx, eax));
316 MOZ_ASSERT(temp != edx && temp != eax);
317
318 movl(dest.low, temp);
319
320 // Compute mul64
321 imull(imm.low(), dest.high); // (2)
322 imull(imm.hi(), temp); // (3)
323 addl(dest.high, temp);
324 movl(imm.low(), dest.high);
325 mull(dest.high /*, dest.low*/); // (4) + (1) output in edx:eax
326 // (dest_hi:dest_lo)
327 addl(temp, dest.high);
328 }
329
mul64(const Register64 & src,const Register64 & dest,const Register temp)330 void MacroAssembler::mul64(const Register64& src, const Register64& dest,
331 const Register temp) {
332 // LOW32 = LOW(LOW(dest) * LOW(src)); (1)
333 // HIGH32 = LOW(HIGH(dest) * LOW(src)) [multiply src into upper bits] (2)
334 // + LOW(LOW(dest) * HIGH(src)) [multiply dest into upper bits] (3)
335 // + HIGH(LOW(dest) * LOW(src)) [carry] (4)
336
337 MOZ_ASSERT(dest == Register64(edx, eax));
338 MOZ_ASSERT(src != Register64(edx, eax) && src != Register64(eax, edx));
339
340 // Make sure the rhs.high isn't the dest.high register anymore.
341 // This saves us from doing other register moves.
342 movl(dest.low, temp);
343
344 // Compute mul64
345 imull(src.low, dest.high); // (2)
346 imull(src.high, temp); // (3)
347 addl(dest.high, temp);
348 movl(src.low, dest.high);
349 mull(dest.high /*, dest.low*/); // (4) + (1) output in edx:eax
350 // (dest_hi:dest_lo)
351 addl(temp, dest.high);
352 }
353
mulBy3(Register src,Register dest)354 void MacroAssembler::mulBy3(Register src, Register dest) {
355 lea(Operand(src, src, TimesTwo), dest);
356 }
357
mulDoublePtr(ImmPtr imm,Register temp,FloatRegister dest)358 void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
359 FloatRegister dest) {
360 movl(imm, temp);
361 vmulsd(Operand(temp, 0), dest, dest);
362 }
363
inc64(AbsoluteAddress dest)364 void MacroAssembler::inc64(AbsoluteAddress dest) {
365 addl(Imm32(1), Operand(dest));
366 Label noOverflow;
367 j(NonZero, &noOverflow);
368 addl(Imm32(1), Operand(dest.offset(4)));
369 bind(&noOverflow);
370 }
371
neg64(Register64 reg)372 void MacroAssembler::neg64(Register64 reg) {
373 negl(reg.low);
374 adcl(Imm32(0), reg.high);
375 negl(reg.high);
376 }
377
negPtr(Register reg)378 void MacroAssembler::negPtr(Register reg) { negl(reg); }
379
380 // ===============================================================
381 // Shift functions
382
lshiftPtr(Imm32 imm,Register dest)383 void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
384 MOZ_ASSERT(0 <= imm.value && imm.value < 32);
385 shll(imm, dest);
386 }
387
lshiftPtr(Register shift,Register srcDest)388 void MacroAssembler::lshiftPtr(Register shift, Register srcDest) {
389 if (HasBMI2()) {
390 shlxl(srcDest, shift, srcDest);
391 return;
392 }
393 MOZ_ASSERT(shift == ecx);
394 shll_cl(srcDest);
395 }
396
lshift64(Imm32 imm,Register64 dest)397 void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
398 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
399 if (imm.value < 32) {
400 shldl(imm, dest.low, dest.high);
401 shll(imm, dest.low);
402 return;
403 }
404
405 mov(dest.low, dest.high);
406 shll(Imm32(imm.value & 0x1f), dest.high);
407 xorl(dest.low, dest.low);
408 }
409
lshift64(Register shift,Register64 srcDest)410 void MacroAssembler::lshift64(Register shift, Register64 srcDest) {
411 MOZ_ASSERT(shift == ecx);
412 MOZ_ASSERT(srcDest.low != ecx && srcDest.high != ecx);
413
414 Label done;
415
416 shldl_cl(srcDest.low, srcDest.high);
417 shll_cl(srcDest.low);
418
419 testl(Imm32(0x20), ecx);
420 j(Condition::Equal, &done);
421
422 // 32 - 63 bit shift
423 movl(srcDest.low, srcDest.high);
424 xorl(srcDest.low, srcDest.low);
425
426 bind(&done);
427 }
428
rshiftPtr(Imm32 imm,Register dest)429 void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
430 MOZ_ASSERT(0 <= imm.value && imm.value < 32);
431 shrl(imm, dest);
432 }
433
rshiftPtr(Register shift,Register srcDest)434 void MacroAssembler::rshiftPtr(Register shift, Register srcDest) {
435 if (HasBMI2()) {
436 shrxl(srcDest, shift, srcDest);
437 return;
438 }
439 MOZ_ASSERT(shift == ecx);
440 shrl_cl(srcDest);
441 }
442
rshift64(Imm32 imm,Register64 dest)443 void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
444 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
445 if (imm.value < 32) {
446 shrdl(imm, dest.high, dest.low);
447 shrl(imm, dest.high);
448 return;
449 }
450
451 movl(dest.high, dest.low);
452 shrl(Imm32(imm.value & 0x1f), dest.low);
453 xorl(dest.high, dest.high);
454 }
455
rshift64(Register shift,Register64 srcDest)456 void MacroAssembler::rshift64(Register shift, Register64 srcDest) {
457 MOZ_ASSERT(shift == ecx);
458 MOZ_ASSERT(srcDest.low != ecx && srcDest.high != ecx);
459
460 Label done;
461
462 shrdl_cl(srcDest.high, srcDest.low);
463 shrl_cl(srcDest.high);
464
465 testl(Imm32(0x20), ecx);
466 j(Condition::Equal, &done);
467
468 // 32 - 63 bit shift
469 movl(srcDest.high, srcDest.low);
470 xorl(srcDest.high, srcDest.high);
471
472 bind(&done);
473 }
474
rshiftPtrArithmetic(Imm32 imm,Register dest)475 void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
476 MOZ_ASSERT(0 <= imm.value && imm.value < 32);
477 sarl(imm, dest);
478 }
479
rshift64Arithmetic(Imm32 imm,Register64 dest)480 void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
481 MOZ_ASSERT(0 <= imm.value && imm.value < 64);
482 if (imm.value < 32) {
483 shrdl(imm, dest.high, dest.low);
484 sarl(imm, dest.high);
485 return;
486 }
487
488 movl(dest.high, dest.low);
489 sarl(Imm32(imm.value & 0x1f), dest.low);
490 sarl(Imm32(0x1f), dest.high);
491 }
492
rshift64Arithmetic(Register shift,Register64 srcDest)493 void MacroAssembler::rshift64Arithmetic(Register shift, Register64 srcDest) {
494 MOZ_ASSERT(shift == ecx);
495 MOZ_ASSERT(srcDest.low != ecx && srcDest.high != ecx);
496
497 Label done;
498
499 shrdl_cl(srcDest.high, srcDest.low);
500 sarl_cl(srcDest.high);
501
502 testl(Imm32(0x20), ecx);
503 j(Condition::Equal, &done);
504
505 // 32 - 63 bit shift
506 movl(srcDest.high, srcDest.low);
507 sarl(Imm32(0x1f), srcDest.high);
508
509 bind(&done);
510 }
511
512 // ===============================================================
513 // Rotation functions
514
rotateLeft64(Register count,Register64 src,Register64 dest,Register temp)515 void MacroAssembler::rotateLeft64(Register count, Register64 src,
516 Register64 dest, Register temp) {
517 MOZ_ASSERT(src == dest, "defineReuseInput");
518 MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
519
520 Label done;
521
522 movl(dest.high, temp);
523 shldl_cl(dest.low, dest.high);
524 shldl_cl(temp, dest.low);
525
526 testl(Imm32(0x20), count);
527 j(Condition::Equal, &done);
528 xchgl(dest.high, dest.low);
529
530 bind(&done);
531 }
532
rotateRight64(Register count,Register64 src,Register64 dest,Register temp)533 void MacroAssembler::rotateRight64(Register count, Register64 src,
534 Register64 dest, Register temp) {
535 MOZ_ASSERT(src == dest, "defineReuseInput");
536 MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
537
538 Label done;
539
540 movl(dest.high, temp);
541 shrdl_cl(dest.low, dest.high);
542 shrdl_cl(temp, dest.low);
543
544 testl(Imm32(0x20), count);
545 j(Condition::Equal, &done);
546 xchgl(dest.high, dest.low);
547
548 bind(&done);
549 }
550
rotateLeft64(Imm32 count,Register64 src,Register64 dest,Register temp)551 void MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest,
552 Register temp) {
553 MOZ_ASSERT(src == dest, "defineReuseInput");
554
555 int32_t amount = count.value & 0x3f;
556 if ((amount & 0x1f) != 0) {
557 movl(dest.high, temp);
558 shldl(Imm32(amount & 0x1f), dest.low, dest.high);
559 shldl(Imm32(amount & 0x1f), temp, dest.low);
560 }
561
562 if (!!(amount & 0x20)) {
563 xchgl(dest.high, dest.low);
564 }
565 }
566
rotateRight64(Imm32 count,Register64 src,Register64 dest,Register temp)567 void MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest,
568 Register temp) {
569 MOZ_ASSERT(src == dest, "defineReuseInput");
570
571 int32_t amount = count.value & 0x3f;
572 if ((amount & 0x1f) != 0) {
573 movl(dest.high, temp);
574 shrdl(Imm32(amount & 0x1f), dest.low, dest.high);
575 shrdl(Imm32(amount & 0x1f), temp, dest.low);
576 }
577
578 if (!!(amount & 0x20)) {
579 xchgl(dest.high, dest.low);
580 }
581 }
582
583 // ===============================================================
584 // Bit counting functions
585
clz64(Register64 src,Register dest)586 void MacroAssembler::clz64(Register64 src, Register dest) {
587 if (AssemblerX86Shared::HasLZCNT()) {
588 Label nonzero, zero;
589
590 testl(src.high, src.high);
591 j(Assembler::Zero, &zero);
592
593 lzcntl(src.high, dest);
594 jump(&nonzero);
595
596 bind(&zero);
597 lzcntl(src.low, dest);
598 addl(Imm32(32), dest);
599
600 bind(&nonzero);
601 return;
602 }
603
604 // Because |dest| may be equal to |src.low|, we rely on BSR not modifying its
605 // output when the input is zero. AMD ISA documents BSR not modifying the
606 // output and current Intel CPUs follow AMD.
607
608 Label nonzero, zero;
609
610 bsrl(src.high, dest);
611 j(Assembler::Zero, &zero);
612 orl(Imm32(32), dest);
613 jump(&nonzero);
614
615 bind(&zero);
616 bsrl(src.low, dest);
617 j(Assembler::NonZero, &nonzero);
618 movl(Imm32(0x7F), dest);
619
620 bind(&nonzero);
621 xorl(Imm32(0x3F), dest);
622 }
623
ctz64(Register64 src,Register dest)624 void MacroAssembler::ctz64(Register64 src, Register dest) {
625 if (AssemblerX86Shared::HasBMI1()) {
626 Label nonzero, zero;
627
628 testl(src.low, src.low);
629 j(Assembler::Zero, &zero);
630
631 tzcntl(src.low, dest);
632 jump(&nonzero);
633
634 bind(&zero);
635 tzcntl(src.high, dest);
636 addl(Imm32(32), dest);
637
638 bind(&nonzero);
639 return;
640 }
641
642 // Because |dest| may be equal to |src.low|, we rely on BSF not modifying its
643 // output when the input is zero. AMD ISA documents BSF not modifying the
644 // output and current Intel CPUs follow AMD.
645
646 Label done, nonzero;
647
648 bsfl(src.low, dest);
649 j(Assembler::NonZero, &done);
650 bsfl(src.high, dest);
651 j(Assembler::NonZero, &nonzero);
652 movl(Imm32(64), dest);
653 jump(&done);
654
655 bind(&nonzero);
656 orl(Imm32(32), dest);
657
658 bind(&done);
659 }
660
popcnt64(Register64 src,Register64 dest,Register tmp)661 void MacroAssembler::popcnt64(Register64 src, Register64 dest, Register tmp) {
662 // The tmp register is only needed if there is no native POPCNT.
663
664 MOZ_ASSERT(src.low != tmp && src.high != tmp);
665 MOZ_ASSERT(dest.low != tmp && dest.high != tmp);
666
667 if (dest.low != src.high) {
668 popcnt32(src.low, dest.low, tmp);
669 popcnt32(src.high, dest.high, tmp);
670 } else {
671 MOZ_ASSERT(dest.high != src.high);
672 popcnt32(src.low, dest.high, tmp);
673 popcnt32(src.high, dest.low, tmp);
674 }
675 addl(dest.high, dest.low);
676 xorl(dest.high, dest.high);
677 }
678
679 // ===============================================================
680 // Condition functions
681
cmp64Set(Condition cond,Address lhs,Imm64 rhs,Register dest)682 void MacroAssembler::cmp64Set(Condition cond, Address lhs, Imm64 rhs,
683 Register dest) {
684 Label success, done;
685
686 branch64(cond, lhs, rhs, &success);
687 move32(Imm32(0), dest);
688 jump(&done);
689 bind(&success);
690 move32(Imm32(1), dest);
691 bind(&done);
692 }
693
694 template <typename T1, typename T2>
cmpPtrSet(Condition cond,T1 lhs,T2 rhs,Register dest)695 void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
696 cmpPtr(lhs, rhs);
697 emitSet(cond, dest);
698 }
699
700 // ===============================================================
701 // Branch functions
702
branch32(Condition cond,const AbsoluteAddress & lhs,Register rhs,Label * label)703 void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
704 Register rhs, Label* label) {
705 cmp32(Operand(lhs), rhs);
706 j(cond, label);
707 }
708
branch32(Condition cond,const AbsoluteAddress & lhs,Imm32 rhs,Label * label)709 void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
710 Imm32 rhs, Label* label) {
711 cmp32(Operand(lhs), rhs);
712 j(cond, label);
713 }
714
branch32(Condition cond,wasm::SymbolicAddress lhs,Imm32 rhs,Label * label)715 void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs,
716 Imm32 rhs, Label* label) {
717 cmpl(rhs, lhs);
718 j(cond, label);
719 }
720
branch64(Condition cond,Register64 lhs,Imm64 val,Label * success,Label * fail)721 void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
722 Label* success, Label* fail) {
723 bool fallthrough = false;
724 Label fallthroughLabel;
725
726 if (!fail) {
727 fail = &fallthroughLabel;
728 fallthrough = true;
729 }
730
731 switch (cond) {
732 case Assembler::Equal:
733 branch32(Assembler::NotEqual, lhs.low, val.low(), fail);
734 branch32(Assembler::Equal, lhs.high, val.hi(), success);
735 if (!fallthrough) {
736 jump(fail);
737 }
738 break;
739 case Assembler::NotEqual:
740 branch32(Assembler::NotEqual, lhs.low, val.low(), success);
741 branch32(Assembler::NotEqual, lhs.high, val.hi(), success);
742 if (!fallthrough) {
743 jump(fail);
744 }
745 break;
746 case Assembler::LessThan:
747 case Assembler::LessThanOrEqual:
748 case Assembler::GreaterThan:
749 case Assembler::GreaterThanOrEqual:
750 case Assembler::Below:
751 case Assembler::BelowOrEqual:
752 case Assembler::Above:
753 case Assembler::AboveOrEqual: {
754 Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
755 Assembler::Condition cond2 =
756 Assembler::ConditionWithoutEqual(Assembler::InvertCondition(cond));
757 Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
758
759 cmp32(lhs.high, val.hi());
760 j(cond1, success);
761 j(cond2, fail);
762 cmp32(lhs.low, val.low());
763 j(cond3, success);
764 if (!fallthrough) {
765 jump(fail);
766 }
767 break;
768 }
769 default:
770 MOZ_CRASH("Condition code not supported");
771 break;
772 }
773
774 if (fallthrough) {
775 bind(fail);
776 }
777 }
778
branch64(Condition cond,Register64 lhs,Register64 rhs,Label * success,Label * fail)779 void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
780 Label* success, Label* fail) {
781 bool fallthrough = false;
782 Label fallthroughLabel;
783
784 if (!fail) {
785 fail = &fallthroughLabel;
786 fallthrough = true;
787 }
788
789 switch (cond) {
790 case Assembler::Equal:
791 branch32(Assembler::NotEqual, lhs.low, rhs.low, fail);
792 branch32(Assembler::Equal, lhs.high, rhs.high, success);
793 if (!fallthrough) {
794 jump(fail);
795 }
796 break;
797 case Assembler::NotEqual:
798 branch32(Assembler::NotEqual, lhs.low, rhs.low, success);
799 branch32(Assembler::NotEqual, lhs.high, rhs.high, success);
800 if (!fallthrough) {
801 jump(fail);
802 }
803 break;
804 case Assembler::LessThan:
805 case Assembler::LessThanOrEqual:
806 case Assembler::GreaterThan:
807 case Assembler::GreaterThanOrEqual:
808 case Assembler::Below:
809 case Assembler::BelowOrEqual:
810 case Assembler::Above:
811 case Assembler::AboveOrEqual: {
812 Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
813 Assembler::Condition cond2 =
814 Assembler::ConditionWithoutEqual(Assembler::InvertCondition(cond));
815 Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
816
817 cmp32(lhs.high, rhs.high);
818 j(cond1, success);
819 j(cond2, fail);
820 cmp32(lhs.low, rhs.low);
821 j(cond3, success);
822 if (!fallthrough) {
823 jump(fail);
824 }
825 break;
826 }
827 default:
828 MOZ_CRASH("Condition code not supported");
829 break;
830 }
831
832 if (fallthrough) {
833 bind(fail);
834 }
835 }
836
branch64(Condition cond,const Address & lhs,Imm64 val,Label * label)837 void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
838 Label* label) {
839 MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
840 "other condition codes not supported");
841
842 Label done;
843
844 if (cond == Assembler::Equal) {
845 branch32(Assembler::NotEqual, lhs, val.firstHalf(), &done);
846 } else {
847 branch32(Assembler::NotEqual, lhs, val.firstHalf(), label);
848 }
849 branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)),
850 val.secondHalf(), label);
851
852 bind(&done);
853 }
854
branch64(Condition cond,const Address & lhs,Register64 rhs,Label * label)855 void MacroAssembler::branch64(Condition cond, const Address& lhs,
856 Register64 rhs, Label* label) {
857 MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
858 "other condition codes not supported");
859
860 Label done;
861
862 if (cond == Assembler::Equal) {
863 branch32(Assembler::NotEqual, lhs, rhs.low, &done);
864 } else {
865 branch32(Assembler::NotEqual, lhs, rhs.low, label);
866 }
867 branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), rhs.high,
868 label);
869
870 bind(&done);
871 }
872
branch64(Condition cond,const Address & lhs,const Address & rhs,Register scratch,Label * label)873 void MacroAssembler::branch64(Condition cond, const Address& lhs,
874 const Address& rhs, Register scratch,
875 Label* label) {
876 MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
877 "other condition codes not supported");
878 MOZ_ASSERT(lhs.base != scratch);
879 MOZ_ASSERT(rhs.base != scratch);
880
881 Label done;
882
883 load32(rhs, scratch);
884 if (cond == Assembler::Equal) {
885 branch32(Assembler::NotEqual, lhs, scratch, &done);
886 } else {
887 branch32(Assembler::NotEqual, lhs, scratch, label);
888 }
889
890 load32(Address(rhs.base, rhs.offset + sizeof(uint32_t)), scratch);
891 branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), scratch,
892 label);
893
894 bind(&done);
895 }
896
branchPtr(Condition cond,const AbsoluteAddress & lhs,Register rhs,Label * label)897 void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
898 Register rhs, Label* label) {
899 branchPtrImpl(cond, lhs, rhs, label);
900 }
901
branchPtr(Condition cond,const AbsoluteAddress & lhs,ImmWord rhs,Label * label)902 void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
903 ImmWord rhs, Label* label) {
904 branchPtrImpl(cond, lhs, rhs, label);
905 }
906
branchPtr(Condition cond,wasm::SymbolicAddress lhs,Register rhs,Label * label)907 void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
908 Register rhs, Label* label) {
909 cmpl(rhs, lhs);
910 j(cond, label);
911 }
912
branchPrivatePtr(Condition cond,const Address & lhs,Register rhs,Label * label)913 void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
914 Register rhs, Label* label) {
915 branchPtr(cond, lhs, rhs, label);
916 }
917
branchTruncateFloat32ToPtr(FloatRegister src,Register dest,Label * fail)918 void MacroAssembler::branchTruncateFloat32ToPtr(FloatRegister src,
919 Register dest, Label* fail) {
920 branchTruncateFloat32ToInt32(src, dest, fail);
921 }
922
branchTruncateFloat32MaybeModUint32(FloatRegister src,Register dest,Label * fail)923 void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
924 Register dest,
925 Label* fail) {
926 branchTruncateFloat32ToInt32(src, dest, fail);
927 }
928
branchTruncateFloat32ToInt32(FloatRegister src,Register dest,Label * fail)929 void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
930 Register dest, Label* fail) {
931 vcvttss2si(src, dest);
932
933 // vcvttss2si returns 0x80000000 on failure. Test for it by
934 // subtracting 1 and testing overflow (this permits the use of a
935 // smaller immediate field).
936 cmp32(dest, Imm32(1));
937 j(Assembler::Overflow, fail);
938 }
939
branchTruncateDoubleToPtr(FloatRegister src,Register dest,Label * fail)940 void MacroAssembler::branchTruncateDoubleToPtr(FloatRegister src, Register dest,
941 Label* fail) {
942 branchTruncateDoubleToInt32(src, dest, fail);
943 }
944
branchTruncateDoubleMaybeModUint32(FloatRegister src,Register dest,Label * fail)945 void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
946 Register dest,
947 Label* fail) {
948 // TODO: X64 supports supports integers up till 64bits. Here we only support
949 // 32bits, before failing. Implementing this for x86 might give a x86 kraken
950 // win.
951 branchTruncateDoubleToInt32(src, dest, fail);
952 }
953
branchTruncateDoubleToInt32(FloatRegister src,Register dest,Label * fail)954 void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
955 Register dest, Label* fail) {
956 vcvttsd2si(src, dest);
957
958 // vcvttsd2si returns 0x80000000 on failure. Test for it by
959 // subtracting 1 and testing overflow (this permits the use of a
960 // smaller immediate field).
961 cmp32(dest, Imm32(1));
962 j(Assembler::Overflow, fail);
963 }
964
branchAdd64(Condition cond,Imm64 imm,Register64 dest,Label * label)965 void MacroAssembler::branchAdd64(Condition cond, Imm64 imm, Register64 dest,
966 Label* label) {
967 add64(imm, dest);
968 j(cond, label);
969 }
970
branchTest32(Condition cond,const AbsoluteAddress & lhs,Imm32 rhs,Label * label)971 void MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs,
972 Imm32 rhs, Label* label) {
973 test32(Operand(lhs), rhs);
974 j(cond, label);
975 }
976
977 template <class L>
branchTest64(Condition cond,Register64 lhs,Register64 rhs,Register temp,L label)978 void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
979 Register64 rhs, Register temp, L label) {
980 if (cond == Assembler::Zero || cond == Assembler::NonZero) {
981 MOZ_ASSERT(lhs.low == rhs.low);
982 MOZ_ASSERT(lhs.high == rhs.high);
983 movl(lhs.low, temp);
984 orl(lhs.high, temp);
985 branchTestPtr(cond, temp, temp, label);
986 } else if (cond == Assembler::Signed || cond == Assembler::NotSigned) {
987 branchTest32(cond, lhs.high, rhs.high, label);
988 } else {
989 MOZ_CRASH("Unsupported condition");
990 }
991 }
992
branchTestBooleanTruthy(bool truthy,const ValueOperand & value,Label * label)993 void MacroAssembler::branchTestBooleanTruthy(bool truthy,
994 const ValueOperand& value,
995 Label* label) {
996 test32(value.payloadReg(), value.payloadReg());
997 j(truthy ? NonZero : Zero, label);
998 }
999
branchTestMagic(Condition cond,const Address & valaddr,JSWhyMagic why,Label * label)1000 void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
1001 JSWhyMagic why, Label* label) {
1002 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
1003
1004 Label notMagic;
1005 if (cond == Assembler::Equal) {
1006 branchTestMagic(Assembler::NotEqual, valaddr, ¬Magic);
1007 } else {
1008 branchTestMagic(Assembler::NotEqual, valaddr, label);
1009 }
1010
1011 branch32(cond, ToPayload(valaddr), Imm32(why), label);
1012 bind(¬Magic);
1013 }
1014
branchTestValue(Condition cond,const BaseIndex & lhs,const ValueOperand & rhs,Label * label)1015 void MacroAssembler::branchTestValue(Condition cond, const BaseIndex& lhs,
1016 const ValueOperand& rhs, Label* label) {
1017 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
1018
1019 Label notSameValue;
1020 if (cond == Assembler::Equal) {
1021 branch32(Assembler::NotEqual, ToType(lhs), rhs.typeReg(), ¬SameValue);
1022 } else {
1023 branch32(Assembler::NotEqual, ToType(lhs), rhs.typeReg(), label);
1024 }
1025
1026 branch32(cond, ToPayload(lhs), rhs.payloadReg(), label);
1027 bind(¬SameValue);
1028 }
1029
branchToComputedAddress(const BaseIndex & addr)1030 void MacroAssembler::branchToComputedAddress(const BaseIndex& addr) {
1031 jmp(Operand(addr));
1032 }
1033
cmp32MovePtr(Condition cond,Register lhs,Imm32 rhs,Register src,Register dest)1034 void MacroAssembler::cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
1035 Register src, Register dest) {
1036 cmp32(lhs, rhs);
1037 cmovCCl(cond, Operand(src), dest);
1038 }
1039
cmp32LoadPtr(Condition cond,const Address & lhs,Imm32 rhs,const Address & src,Register dest)1040 void MacroAssembler::cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
1041 const Address& src, Register dest) {
1042 cmp32(lhs, rhs);
1043 cmovCCl(cond, Operand(src), dest);
1044 }
1045
cmpPtrMovePtr(Condition cond,Register lhs,Register rhs,Register src,Register dest)1046 void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
1047 Register src, Register dest) {
1048 cmp32Move32(cond, lhs, rhs, src, dest);
1049 }
1050
cmpPtrMovePtr(Condition cond,Register lhs,const Address & rhs,Register src,Register dest)1051 void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs,
1052 const Address& rhs, Register src,
1053 Register dest) {
1054 cmp32Move32(cond, lhs, rhs, src, dest);
1055 }
1056
test32LoadPtr(Condition cond,const Address & addr,Imm32 mask,const Address & src,Register dest)1057 void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
1058 Imm32 mask, const Address& src,
1059 Register dest) {
1060 MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
1061 test32(addr, mask);
1062 cmovCCl(cond, Operand(src), dest);
1063 }
1064
test32MovePtr(Condition cond,const Address & addr,Imm32 mask,Register src,Register dest)1065 void MacroAssembler::test32MovePtr(Condition cond, const Address& addr,
1066 Imm32 mask, Register src, Register dest) {
1067 MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
1068 test32(addr, mask);
1069 cmovCCl(cond, Operand(src), dest);
1070 }
1071
spectreMovePtr(Condition cond,Register src,Register dest)1072 void MacroAssembler::spectreMovePtr(Condition cond, Register src,
1073 Register dest) {
1074 cmovCCl(cond, Operand(src), dest);
1075 }
1076
spectreBoundsCheck32(Register index,const Operand & length,Register maybeScratch,Label * failure)1077 void MacroAssembler::spectreBoundsCheck32(Register index, const Operand& length,
1078 Register maybeScratch,
1079 Label* failure) {
1080 Label failurePopValue;
1081 bool pushedValue = false;
1082 if (JitOptions.spectreIndexMasking) {
1083 if (maybeScratch == InvalidReg) {
1084 push(Imm32(0));
1085 pushedValue = true;
1086 } else {
1087 move32(Imm32(0), maybeScratch);
1088 }
1089 }
1090
1091 cmp32(index, length);
1092 j(Assembler::AboveOrEqual, pushedValue ? &failurePopValue : failure);
1093
1094 if (JitOptions.spectreIndexMasking) {
1095 if (maybeScratch == InvalidReg) {
1096 Label done;
1097 cmovCCl(Assembler::AboveOrEqual, Operand(StackPointer, 0), index);
1098 lea(Operand(StackPointer, sizeof(void*)), StackPointer);
1099 jump(&done);
1100
1101 bind(&failurePopValue);
1102 lea(Operand(StackPointer, sizeof(void*)), StackPointer);
1103 jump(failure);
1104
1105 bind(&done);
1106 } else {
1107 cmovCCl(Assembler::AboveOrEqual, maybeScratch, index);
1108 }
1109 }
1110 }
1111
spectreBoundsCheck32(Register index,Register length,Register maybeScratch,Label * failure)1112 void MacroAssembler::spectreBoundsCheck32(Register index, Register length,
1113 Register maybeScratch,
1114 Label* failure) {
1115 MOZ_ASSERT(length != maybeScratch);
1116 MOZ_ASSERT(index != maybeScratch);
1117
1118 spectreBoundsCheck32(index, Operand(length), maybeScratch, failure);
1119 }
1120
spectreBoundsCheck32(Register index,const Address & length,Register maybeScratch,Label * failure)1121 void MacroAssembler::spectreBoundsCheck32(Register index, const Address& length,
1122 Register maybeScratch,
1123 Label* failure) {
1124 MOZ_ASSERT(index != length.base);
1125 MOZ_ASSERT(length.base != maybeScratch);
1126 MOZ_ASSERT(index != maybeScratch);
1127
1128 spectreBoundsCheck32(index, Operand(length), maybeScratch, failure);
1129 }
1130
spectreBoundsCheckPtr(Register index,Register length,Register maybeScratch,Label * failure)1131 void MacroAssembler::spectreBoundsCheckPtr(Register index, Register length,
1132 Register maybeScratch,
1133 Label* failure) {
1134 spectreBoundsCheck32(index, length, maybeScratch, failure);
1135 }
1136
spectreBoundsCheckPtr(Register index,const Address & length,Register maybeScratch,Label * failure)1137 void MacroAssembler::spectreBoundsCheckPtr(Register index,
1138 const Address& length,
1139 Register maybeScratch,
1140 Label* failure) {
1141 spectreBoundsCheck32(index, length, maybeScratch, failure);
1142 }
1143
1144 // ========================================================================
1145 // SIMD
1146
anyTrueSimd128(FloatRegister src,Register dest)1147 void MacroAssembler::anyTrueSimd128(FloatRegister src, Register dest) {
1148 Label done;
1149 movl(Imm32(1), dest);
1150 vptest(src, src); // SSE4.1
1151 j(NonZero, &done);
1152 movl(Imm32(0), dest);
1153 bind(&done);
1154 }
1155
extractLaneInt64x2(uint32_t lane,FloatRegister src,Register64 dest)1156 void MacroAssembler::extractLaneInt64x2(uint32_t lane, FloatRegister src,
1157 Register64 dest) {
1158 if (lane == 0) {
1159 vmovd(src, dest.low);
1160 } else {
1161 vpextrd(2 * lane, src, dest.low);
1162 }
1163 vpextrd(2 * lane + 1, src, dest.high);
1164 }
1165
replaceLaneInt64x2(unsigned lane,Register64 rhs,FloatRegister lhsDest)1166 void MacroAssembler::replaceLaneInt64x2(unsigned lane, Register64 rhs,
1167 FloatRegister lhsDest) {
1168 vpinsrd(2 * lane, rhs.low, lhsDest, lhsDest);
1169 vpinsrd(2 * lane + 1, rhs.high, lhsDest, lhsDest);
1170 }
1171
replaceLaneInt64x2(unsigned lane,FloatRegister lhs,Register64 rhs,FloatRegister dest)1172 void MacroAssembler::replaceLaneInt64x2(unsigned lane, FloatRegister lhs,
1173 Register64 rhs, FloatRegister dest) {
1174 vpinsrd(2 * lane, rhs.low, lhs, dest);
1175 vpinsrd(2 * lane + 1, rhs.high, dest, dest);
1176 }
1177
splatX2(Register64 src,FloatRegister dest)1178 void MacroAssembler::splatX2(Register64 src, FloatRegister dest) {
1179 replaceLaneInt64x2(0, src, dest);
1180 replaceLaneInt64x2(1, src, dest);
1181 }
1182
1183 // ========================================================================
1184 // Truncate floating point.
1185
truncateFloat32ToUInt64(Address src,Address dest,Register temp,FloatRegister floatTemp)1186 void MacroAssembler::truncateFloat32ToUInt64(Address src, Address dest,
1187 Register temp,
1188 FloatRegister floatTemp) {
1189 Label done;
1190
1191 loadFloat32(src, floatTemp);
1192
1193 truncateFloat32ToInt64(src, dest, temp);
1194
1195 // For unsigned conversion the case of [INT64, UINT64] needs to get handle
1196 // seperately.
1197 load32(HighWord(dest), temp);
1198 branch32(Assembler::Condition::NotSigned, temp, Imm32(0), &done);
1199
1200 // Move the value inside INT64 range.
1201 storeFloat32(floatTemp, dest);
1202 loadConstantFloat32(double(int64_t(0x8000000000000000)), floatTemp);
1203 vaddss(Operand(dest), floatTemp, floatTemp);
1204 storeFloat32(floatTemp, dest);
1205 truncateFloat32ToInt64(dest, dest, temp);
1206
1207 load32(HighWord(dest), temp);
1208 orl(Imm32(0x80000000), temp);
1209 store32(temp, HighWord(dest));
1210
1211 bind(&done);
1212 }
1213
truncateDoubleToUInt64(Address src,Address dest,Register temp,FloatRegister floatTemp)1214 void MacroAssembler::truncateDoubleToUInt64(Address src, Address dest,
1215 Register temp,
1216 FloatRegister floatTemp) {
1217 Label done;
1218
1219 loadDouble(src, floatTemp);
1220
1221 truncateDoubleToInt64(src, dest, temp);
1222
1223 // For unsigned conversion the case of [INT64, UINT64] needs to get handle
1224 // seperately.
1225 load32(HighWord(dest), temp);
1226 branch32(Assembler::Condition::NotSigned, temp, Imm32(0), &done);
1227
1228 // Move the value inside INT64 range.
1229 storeDouble(floatTemp, dest);
1230 loadConstantDouble(double(int64_t(0x8000000000000000)), floatTemp);
1231 vaddsd(Operand(dest), floatTemp, floatTemp);
1232 storeDouble(floatTemp, dest);
1233 truncateDoubleToInt64(dest, dest, temp);
1234
1235 load32(HighWord(dest), temp);
1236 orl(Imm32(0x80000000), temp);
1237 store32(temp, HighWord(dest));
1238
1239 bind(&done);
1240 }
1241
1242 template <typename T>
fallibleUnboxPtrImpl(const T & src,Register dest,JSValueType type,Label * fail)1243 void MacroAssemblerX86::fallibleUnboxPtrImpl(const T& src, Register dest,
1244 JSValueType type, Label* fail) {
1245 switch (type) {
1246 case JSVAL_TYPE_OBJECT:
1247 asMasm().branchTestObject(Assembler::NotEqual, src, fail);
1248 break;
1249 case JSVAL_TYPE_STRING:
1250 asMasm().branchTestString(Assembler::NotEqual, src, fail);
1251 break;
1252 case JSVAL_TYPE_SYMBOL:
1253 asMasm().branchTestSymbol(Assembler::NotEqual, src, fail);
1254 break;
1255 case JSVAL_TYPE_BIGINT:
1256 asMasm().branchTestBigInt(Assembler::NotEqual, src, fail);
1257 break;
1258 default:
1259 MOZ_CRASH("Unexpected type");
1260 }
1261 unboxNonDouble(src, dest, type);
1262 }
1263
fallibleUnboxPtr(const ValueOperand & src,Register dest,JSValueType type,Label * fail)1264 void MacroAssembler::fallibleUnboxPtr(const ValueOperand& src, Register dest,
1265 JSValueType type, Label* fail) {
1266 fallibleUnboxPtrImpl(src, dest, type, fail);
1267 }
1268
fallibleUnboxPtr(const Address & src,Register dest,JSValueType type,Label * fail)1269 void MacroAssembler::fallibleUnboxPtr(const Address& src, Register dest,
1270 JSValueType type, Label* fail) {
1271 fallibleUnboxPtrImpl(src, dest, type, fail);
1272 }
1273
fallibleUnboxPtr(const BaseIndex & src,Register dest,JSValueType type,Label * fail)1274 void MacroAssembler::fallibleUnboxPtr(const BaseIndex& src, Register dest,
1275 JSValueType type, Label* fail) {
1276 fallibleUnboxPtrImpl(src, dest, type, fail);
1277 }
1278
1279 //}}} check_macroassembler_style
1280 // ===============================================================
1281
1282 // Note: this function clobbers the source register.
convertUInt32ToDouble(Register src,FloatRegister dest)1283 void MacroAssemblerX86::convertUInt32ToDouble(Register src,
1284 FloatRegister dest) {
1285 // src is [0, 2^32-1]
1286 subl(Imm32(0x80000000), src);
1287
1288 // Now src is [-2^31, 2^31-1] - int range, but not the same value.
1289 convertInt32ToDouble(src, dest);
1290
1291 // dest is now a double with the int range.
1292 // correct the double value by adding 0x80000000.
1293 asMasm().addConstantDouble(2147483648.0, dest);
1294 }
1295
1296 // Note: this function clobbers the source register.
convertUInt32ToFloat32(Register src,FloatRegister dest)1297 void MacroAssemblerX86::convertUInt32ToFloat32(Register src,
1298 FloatRegister dest) {
1299 convertUInt32ToDouble(src, dest);
1300 convertDoubleToFloat32(dest, dest);
1301 }
1302
unboxValue(const ValueOperand & src,AnyRegister dest,JSValueType)1303 void MacroAssemblerX86::unboxValue(const ValueOperand& src, AnyRegister dest,
1304 JSValueType) {
1305 if (dest.isFloat()) {
1306 Label notInt32, end;
1307 asMasm().branchTestInt32(Assembler::NotEqual, src, ¬Int32);
1308 convertInt32ToDouble(src.payloadReg(), dest.fpu());
1309 jump(&end);
1310 bind(¬Int32);
1311 unboxDouble(src, dest.fpu());
1312 bind(&end);
1313 } else {
1314 if (src.payloadReg() != dest.gpr()) {
1315 movl(src.payloadReg(), dest.gpr());
1316 }
1317 }
1318 }
1319
1320 template <typename T>
loadInt32OrDouble(const T & src,FloatRegister dest)1321 void MacroAssemblerX86::loadInt32OrDouble(const T& src, FloatRegister dest) {
1322 Label notInt32, end;
1323 asMasm().branchTestInt32(Assembler::NotEqual, src, ¬Int32);
1324 convertInt32ToDouble(ToPayload(src), dest);
1325 jump(&end);
1326 bind(¬Int32);
1327 loadDouble(src, dest);
1328 bind(&end);
1329 }
1330
1331 template <typename T>
loadUnboxedValue(const T & src,MIRType type,AnyRegister dest)1332 void MacroAssemblerX86::loadUnboxedValue(const T& src, MIRType type,
1333 AnyRegister dest) {
1334 if (dest.isFloat()) {
1335 loadInt32OrDouble(src, dest.fpu());
1336 } else {
1337 movl(Operand(src), dest.gpr());
1338 }
1339 }
1340
1341 // If source is a double, load it into dest. If source is int32,
1342 // convert it to double. Else, branch to failure.
ensureDouble(const ValueOperand & source,FloatRegister dest,Label * failure)1343 void MacroAssemblerX86::ensureDouble(const ValueOperand& source,
1344 FloatRegister dest, Label* failure) {
1345 Label isDouble, done;
1346 asMasm().branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble);
1347 asMasm().branchTestInt32(Assembler::NotEqual, source.typeReg(), failure);
1348
1349 convertInt32ToDouble(source.payloadReg(), dest);
1350 jump(&done);
1351
1352 bind(&isDouble);
1353 unboxDouble(source, dest);
1354
1355 bind(&done);
1356 }
1357
1358 } // namespace jit
1359 } // namespace js
1360
1361 #endif /* jit_x86_MacroAssembler_x86_inl_h */
1362