1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/x64/CodeGenerator-x64.h"
8 
9 #include "mozilla/MathAlgorithms.h"
10 
11 #include "jit/IonCaches.h"
12 #include "jit/MIR.h"
13 
14 #include "jsscriptinlines.h"
15 
16 #include "jit/MacroAssembler-inl.h"
17 #include "jit/shared/CodeGenerator-shared-inl.h"
18 
19 using namespace js;
20 using namespace js::jit;
21 
22 using mozilla::DebugOnly;
23 
CodeGeneratorX64(MIRGenerator * gen,LIRGraph * graph,MacroAssembler * masm)24 CodeGeneratorX64::CodeGeneratorX64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
25   : CodeGeneratorX86Shared(gen, graph, masm)
26 {
27 }
28 
29 ValueOperand
ToValue(LInstruction * ins,size_t pos)30 CodeGeneratorX64::ToValue(LInstruction* ins, size_t pos)
31 {
32     return ValueOperand(ToRegister(ins->getOperand(pos)));
33 }
34 
35 ValueOperand
ToOutValue(LInstruction * ins)36 CodeGeneratorX64::ToOutValue(LInstruction* ins)
37 {
38     return ValueOperand(ToRegister(ins->getDef(0)));
39 }
40 
41 ValueOperand
ToTempValue(LInstruction * ins,size_t pos)42 CodeGeneratorX64::ToTempValue(LInstruction* ins, size_t pos)
43 {
44     return ValueOperand(ToRegister(ins->getTemp(pos)));
45 }
46 
47 Operand
ToOperand64(const LInt64Allocation & a64)48 CodeGeneratorX64::ToOperand64(const LInt64Allocation& a64)
49 {
50     const LAllocation& a = a64.value();
51     MOZ_ASSERT(!a.isFloatReg());
52     if (a.isGeneralReg())
53         return Operand(a.toGeneralReg()->reg());
54     return Operand(masm.getStackPointer(), ToStackOffset(a));
55 }
56 
57 FrameSizeClass
FromDepth(uint32_t frameDepth)58 FrameSizeClass::FromDepth(uint32_t frameDepth)
59 {
60     return FrameSizeClass::None();
61 }
62 
63 FrameSizeClass
ClassLimit()64 FrameSizeClass::ClassLimit()
65 {
66     return FrameSizeClass(0);
67 }
68 
69 uint32_t
frameSize() const70 FrameSizeClass::frameSize() const
71 {
72     MOZ_CRASH("x64 does not use frame size classes");
73 }
74 
75 void
visitValue(LValue * value)76 CodeGeneratorX64::visitValue(LValue* value)
77 {
78     LDefinition* reg = value->getDef(0);
79     masm.moveValue(value->value(), ToRegister(reg));
80 }
81 
82 void
visitBox(LBox * box)83 CodeGeneratorX64::visitBox(LBox* box)
84 {
85     const LAllocation* in = box->getOperand(0);
86     const LDefinition* result = box->getDef(0);
87 
88     if (IsFloatingPointType(box->type())) {
89         ScratchDoubleScope scratch(masm);
90         FloatRegister reg = ToFloatRegister(in);
91         if (box->type() == MIRType::Float32) {
92             masm.convertFloat32ToDouble(reg, scratch);
93             reg = scratch;
94         }
95         masm.vmovq(reg, ToRegister(result));
96     } else {
97         masm.boxValue(ValueTypeFromMIRType(box->type()), ToRegister(in), ToRegister(result));
98     }
99 }
100 
101 void
visitUnbox(LUnbox * unbox)102 CodeGeneratorX64::visitUnbox(LUnbox* unbox)
103 {
104     MUnbox* mir = unbox->mir();
105 
106     if (mir->fallible()) {
107         const ValueOperand value = ToValue(unbox, LUnbox::Input);
108         Assembler::Condition cond;
109         switch (mir->type()) {
110           case MIRType::Int32:
111             cond = masm.testInt32(Assembler::NotEqual, value);
112             break;
113           case MIRType::Boolean:
114             cond = masm.testBoolean(Assembler::NotEqual, value);
115             break;
116           case MIRType::Object:
117             cond = masm.testObject(Assembler::NotEqual, value);
118             break;
119           case MIRType::String:
120             cond = masm.testString(Assembler::NotEqual, value);
121             break;
122           case MIRType::Symbol:
123             cond = masm.testSymbol(Assembler::NotEqual, value);
124             break;
125           default:
126             MOZ_CRASH("Given MIRType cannot be unboxed.");
127         }
128         bailoutIf(cond, unbox->snapshot());
129     }
130 
131     Operand input = ToOperand(unbox->getOperand(LUnbox::Input));
132     Register result = ToRegister(unbox->output());
133     switch (mir->type()) {
134       case MIRType::Int32:
135         masm.unboxInt32(input, result);
136         break;
137       case MIRType::Boolean:
138         masm.unboxBoolean(input, result);
139         break;
140       case MIRType::Object:
141         masm.unboxObject(input, result);
142         break;
143       case MIRType::String:
144         masm.unboxString(input, result);
145         break;
146       case MIRType::Symbol:
147         masm.unboxSymbol(input, result);
148         break;
149       default:
150         MOZ_CRASH("Given MIRType cannot be unboxed.");
151     }
152 }
153 
154 void
visitCompareB(LCompareB * lir)155 CodeGeneratorX64::visitCompareB(LCompareB* lir)
156 {
157     MCompare* mir = lir->mir();
158 
159     const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
160     const LAllocation* rhs = lir->rhs();
161     const Register output = ToRegister(lir->output());
162 
163     MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
164 
165     // Load boxed boolean in ScratchReg.
166     ScratchRegisterScope scratch(masm);
167     if (rhs->isConstant())
168         masm.moveValue(rhs->toConstant()->toJSValue(), scratch);
169     else
170         masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), scratch);
171 
172     // Perform the comparison.
173     masm.cmpPtr(lhs.valueReg(), scratch);
174     masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output);
175 }
176 
177 void
visitCompareBAndBranch(LCompareBAndBranch * lir)178 CodeGeneratorX64::visitCompareBAndBranch(LCompareBAndBranch* lir)
179 {
180     MCompare* mir = lir->cmpMir();
181 
182     const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
183     const LAllocation* rhs = lir->rhs();
184 
185     MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
186 
187     // Load boxed boolean in ScratchReg.
188     ScratchRegisterScope scratch(masm);
189     if (rhs->isConstant())
190         masm.moveValue(rhs->toConstant()->toJSValue(), scratch);
191     else
192         masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), scratch);
193 
194     // Perform the comparison.
195     masm.cmpPtr(lhs.valueReg(), scratch);
196     emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse());
197 }
198 
199 void
visitCompareBitwise(LCompareBitwise * lir)200 CodeGeneratorX64::visitCompareBitwise(LCompareBitwise* lir)
201 {
202     MCompare* mir = lir->mir();
203     const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput);
204     const ValueOperand rhs = ToValue(lir, LCompareBitwise::RhsInput);
205     const Register output = ToRegister(lir->output());
206 
207     MOZ_ASSERT(IsEqualityOp(mir->jsop()));
208 
209     masm.cmpPtr(lhs.valueReg(), rhs.valueReg());
210     masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output);
211 }
212 
213 void
visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch * lir)214 CodeGeneratorX64::visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir)
215 {
216     MCompare* mir = lir->cmpMir();
217 
218     const ValueOperand lhs = ToValue(lir, LCompareBitwiseAndBranch::LhsInput);
219     const ValueOperand rhs = ToValue(lir, LCompareBitwiseAndBranch::RhsInput);
220 
221     MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
222                mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
223 
224     masm.cmpPtr(lhs.valueReg(), rhs.valueReg());
225     emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse());
226 }
227 
228 void
visitCompareI64(LCompareI64 * lir)229 CodeGeneratorX64::visitCompareI64(LCompareI64* lir)
230 {
231     MCompare* mir = lir->mir();
232     MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
233                mir->compareType() == MCompare::Compare_UInt64);
234 
235     const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
236     const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
237     Register lhsReg = ToRegister64(lhs).reg;
238     Register output = ToRegister(lir->output());
239 
240     if (IsConstant(rhs))
241         masm.cmpPtr(lhsReg, ImmWord(ToInt64(rhs)));
242     else
243         masm.cmpPtr(lhsReg, ToOperand64(rhs));
244 
245     bool isSigned = mir->compareType() == MCompare::Compare_Int64;
246     masm.emitSet(JSOpToCondition(lir->jsop(), isSigned), output);
247 }
248 
249 void
visitCompareI64AndBranch(LCompareI64AndBranch * lir)250 CodeGeneratorX64::visitCompareI64AndBranch(LCompareI64AndBranch* lir)
251 {
252     MCompare* mir = lir->cmpMir();
253     MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
254                mir->compareType() == MCompare::Compare_UInt64);
255 
256     LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
257     LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
258     Register lhsReg = ToRegister64(lhs).reg;
259 
260     if (IsConstant(rhs))
261         masm.cmpPtr(lhsReg, ImmWord(ToInt64(rhs)));
262     else
263         masm.cmpPtr(lhsReg, ToOperand64(rhs));
264 
265     bool isSigned = mir->compareType() == MCompare::Compare_Int64;
266     emitBranch(JSOpToCondition(lir->jsop(), isSigned), lir->ifTrue(), lir->ifFalse());
267 }
268 
269 void
visitDivOrModI64(LDivOrModI64 * lir)270 CodeGeneratorX64::visitDivOrModI64(LDivOrModI64* lir)
271 {
272     Register lhs = ToRegister(lir->lhs());
273     Register rhs = ToRegister(lir->rhs());
274     Register output = ToRegister(lir->output());
275 
276     MOZ_ASSERT_IF(lhs != rhs, rhs != rax);
277     MOZ_ASSERT(rhs != rdx);
278     MOZ_ASSERT_IF(output == rax, ToRegister(lir->remainder()) == rdx);
279     MOZ_ASSERT_IF(output == rdx, ToRegister(lir->remainder()) == rax);
280 
281     Label done;
282 
283     // Put the lhs in rax.
284     if (lhs != rax)
285         masm.mov(lhs, rax);
286 
287     // Handle divide by zero.
288     if (lir->canBeDivideByZero()) {
289         masm.branchTestPtr(Assembler::Zero, rhs, rhs, trap(lir, wasm::Trap::IntegerDivideByZero));
290     }
291 
292     // Handle an integer overflow exception from INT64_MIN / -1.
293     if (lir->canBeNegativeOverflow()) {
294         Label notmin;
295         masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), &notmin);
296         masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), &notmin);
297         if (lir->mir()->isMod())
298             masm.xorl(output, output);
299         else
300             masm.jump(trap(lir, wasm::Trap::IntegerOverflow));
301         masm.jump(&done);
302         masm.bind(&notmin);
303     }
304 
305     // Sign extend the lhs into rdx to make rdx:rax.
306     masm.cqo();
307     masm.idivq(rhs);
308 
309     masm.bind(&done);
310 }
311 
312 void
visitUDivOrModI64(LUDivOrModI64 * lir)313 CodeGeneratorX64::visitUDivOrModI64(LUDivOrModI64* lir)
314 {
315     Register lhs = ToRegister(lir->lhs());
316     Register rhs = ToRegister(lir->rhs());
317 
318     DebugOnly<Register> output = ToRegister(lir->output());
319     MOZ_ASSERT_IF(lhs != rhs, rhs != rax);
320     MOZ_ASSERT(rhs != rdx);
321     MOZ_ASSERT_IF(output.value == rax, ToRegister(lir->remainder()) == rdx);
322     MOZ_ASSERT_IF(output.value == rdx, ToRegister(lir->remainder()) == rax);
323 
324     // Put the lhs in rax.
325     if (lhs != rax)
326         masm.mov(lhs, rax);
327 
328     Label done;
329 
330     // Prevent divide by zero.
331     if (lir->canBeDivideByZero())
332         masm.branchTestPtr(Assembler::Zero, rhs, rhs, trap(lir, wasm::Trap::IntegerDivideByZero));
333 
334     // Zero extend the lhs into rdx to make (rdx:rax).
335     masm.xorl(rdx, rdx);
336     masm.udivq(rhs);
337 
338     masm.bind(&done);
339 }
340 
341 void
visitWasmSelectI64(LWasmSelectI64 * lir)342 CodeGeneratorX64::visitWasmSelectI64(LWasmSelectI64* lir)
343 {
344     MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
345 
346     Register cond = ToRegister(lir->condExpr());
347 
348     Operand falseExpr = ToOperandOrRegister64(lir->falseExpr());
349 
350     Register64 out = ToOutRegister64(lir);
351     MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out, "true expr is reused for input");
352 
353     masm.test32(cond, cond);
354     masm.cmovzq(falseExpr, out.reg);
355 }
356 
357 void
visitWasmReinterpretFromI64(LWasmReinterpretFromI64 * lir)358 CodeGeneratorX64::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir)
359 {
360     MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
361     MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
362     masm.vmovq(ToRegister(lir->input()), ToFloatRegister(lir->output()));
363 }
364 
365 void
visitWasmReinterpretToI64(LWasmReinterpretToI64 * lir)366 CodeGeneratorX64::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir)
367 {
368     MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
369     MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
370     masm.vmovq(ToFloatRegister(lir->input()), ToRegister(lir->output()));
371 }
372 
373 void
visitWasmUint32ToDouble(LWasmUint32ToDouble * lir)374 CodeGeneratorX64::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir)
375 {
376     masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output()));
377 }
378 
379 void
visitWasmUint32ToFloat32(LWasmUint32ToFloat32 * lir)380 CodeGeneratorX64::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir)
381 {
382     masm.convertUInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output()));
383 }
384 
385 void
visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic * ins)386 CodeGeneratorX64::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins)
387 {
388     MOZ_CRASH("NYI");
389 }
390 
391 void
visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic * ins)392 CodeGeneratorX64::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins)
393 {
394     MOZ_CRASH("NYI");
395 }
396 
397 void
visitWasmCall(LWasmCall * ins)398 CodeGeneratorX64::visitWasmCall(LWasmCall* ins)
399 {
400     emitWasmCallBase(ins);
401 }
402 
403 void
visitWasmCallI64(LWasmCallI64 * ins)404 CodeGeneratorX64::visitWasmCallI64(LWasmCallI64* ins)
405 {
406     emitWasmCallBase(ins);
407 }
408 
409 void
wasmStore(const wasm::MemoryAccessDesc & access,const LAllocation * value,Operand dstAddr)410 CodeGeneratorX64::wasmStore(const wasm::MemoryAccessDesc& access, const LAllocation* value,
411                             Operand dstAddr)
412 {
413     if (value->isConstant()) {
414         MOZ_ASSERT(!access.isSimd());
415 
416         masm.memoryBarrier(access.barrierBefore());
417 
418         const MConstant* mir = value->toConstant();
419         Imm32 cst = Imm32(mir->type() == MIRType::Int32 ? mir->toInt32() : mir->toInt64());
420 
421         size_t storeOffset = masm.size();
422         switch (access.type()) {
423           case Scalar::Int8:
424           case Scalar::Uint8:
425             masm.movb(cst, dstAddr);
426             break;
427           case Scalar::Int16:
428           case Scalar::Uint16:
429             masm.movw(cst, dstAddr);
430             break;
431           case Scalar::Int32:
432           case Scalar::Uint32:
433             masm.movl(cst, dstAddr);
434             break;
435           case Scalar::Int64:
436           case Scalar::Float32:
437           case Scalar::Float64:
438           case Scalar::Float32x4:
439           case Scalar::Int8x16:
440           case Scalar::Int16x8:
441           case Scalar::Int32x4:
442           case Scalar::Uint8Clamped:
443           case Scalar::MaxTypedArrayViewType:
444             MOZ_CRASH("unexpected array type");
445         }
446         masm.append(access, storeOffset, masm.framePushed());
447 
448         masm.memoryBarrier(access.barrierAfter());
449     } else {
450         masm.wasmStore(access, ToAnyRegister(value), dstAddr);
451     }
452 }
453 
454 template <typename T>
455 void
emitWasmLoad(T * ins)456 CodeGeneratorX64::emitWasmLoad(T* ins)
457 {
458     const MWasmLoad* mir = ins->mir();
459 
460     uint32_t offset = mir->access().offset();
461     MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
462 
463     const LAllocation* ptr = ins->ptr();
464     Operand srcAddr = ptr->isBogus()
465                       ? Operand(HeapReg, offset)
466                       : Operand(HeapReg, ToRegister(ptr), TimesOne, offset);
467 
468     if (mir->type() == MIRType::Int64)
469         masm.wasmLoadI64(mir->access(), srcAddr, ToOutRegister64(ins));
470     else
471         masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(ins->output()));
472 }
473 
474 void
visitWasmLoad(LWasmLoad * ins)475 CodeGeneratorX64::visitWasmLoad(LWasmLoad* ins)
476 {
477     emitWasmLoad(ins);
478 }
479 
480 void
visitWasmLoadI64(LWasmLoadI64 * ins)481 CodeGeneratorX64::visitWasmLoadI64(LWasmLoadI64* ins)
482 {
483     emitWasmLoad(ins);
484 }
485 
486 template <typename T>
487 void
emitWasmStore(T * ins)488 CodeGeneratorX64::emitWasmStore(T* ins)
489 {
490     const MWasmStore* mir = ins->mir();
491 
492     uint32_t offset = mir->access().offset();
493     MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
494 
495     const LAllocation* value = ins->getOperand(ins->ValueIndex);
496     const LAllocation* ptr = ins->ptr();
497     Operand dstAddr = ptr->isBogus()
498                       ? Operand(HeapReg, offset)
499                       : Operand(HeapReg, ToRegister(ptr), TimesOne, offset);
500 
501     wasmStore(mir->access(), value, dstAddr);
502 }
503 
504 void
visitWasmStore(LWasmStore * ins)505 CodeGeneratorX64::visitWasmStore(LWasmStore* ins)
506 {
507     emitWasmStore(ins);
508 }
509 
510 void
visitWasmStoreI64(LWasmStoreI64 * ins)511 CodeGeneratorX64::visitWasmStoreI64(LWasmStoreI64* ins)
512 {
513     emitWasmStore(ins);
514 }
515 
516 void
visitAsmJSLoadHeap(LAsmJSLoadHeap * ins)517 CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
518 {
519     const MAsmJSLoadHeap* mir = ins->mir();
520     MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
521 
522     const LAllocation* ptr = ins->ptr();
523     const LDefinition* out = ins->output();
524 
525     Scalar::Type accessType = mir->access().type();
526     MOZ_ASSERT(!Scalar::isSimdType(accessType));
527 
528     Operand srcAddr = ptr->isBogus()
529                       ? Operand(HeapReg, mir->offset())
530                       : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
531 
532     uint32_t before = masm.size();
533     masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(out));
534     uint32_t after = masm.size();
535     verifyLoadDisassembly(before, after, accessType, srcAddr, *out->output());
536 }
537 
538 void
visitAsmJSStoreHeap(LAsmJSStoreHeap * ins)539 CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
540 {
541     const MAsmJSStoreHeap* mir = ins->mir();
542     MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
543 
544     const LAllocation* ptr = ins->ptr();
545     const LAllocation* value = ins->value();
546 
547     Scalar::Type accessType = mir->access().type();
548     MOZ_ASSERT(!Scalar::isSimdType(accessType));
549 
550     canonicalizeIfDeterministic(accessType, value);
551 
552     Operand dstAddr = ptr->isBogus()
553                       ? Operand(HeapReg, mir->offset())
554                       : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
555 
556     uint32_t before = masm.size();
557     wasmStore(mir->access(), value, dstAddr);
558     uint32_t after = masm.size();
559     verifyStoreDisassembly(before, after, accessType, dstAddr, *value);
560 }
561 
562 void
visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap * ins)563 CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
564 {
565     MAsmJSCompareExchangeHeap* mir = ins->mir();
566     MOZ_ASSERT(mir->access().offset() == 0);
567 
568     Register ptr = ToRegister(ins->ptr());
569     Register oldval = ToRegister(ins->oldValue());
570     Register newval = ToRegister(ins->newValue());
571     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
572 
573     Scalar::Type accessType = mir->access().type();
574     BaseIndex srcAddr(HeapReg, ptr, TimesOne);
575 
576     masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
577                                         srcAddr,
578                                         oldval,
579                                         newval,
580                                         InvalidReg,
581                                         ToAnyRegister(ins->output()));
582 }
583 
584 void
visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap * ins)585 CodeGeneratorX64::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
586 {
587     MAsmJSAtomicExchangeHeap* mir = ins->mir();
588     MOZ_ASSERT(mir->access().offset() == 0);
589 
590     Register ptr = ToRegister(ins->ptr());
591     Register value = ToRegister(ins->value());
592     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
593 
594     Scalar::Type accessType = mir->access().type();
595     MOZ_ASSERT(accessType <= Scalar::Uint32);
596 
597     BaseIndex srcAddr(HeapReg, ptr, TimesOne);
598 
599     masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
600                                        srcAddr,
601                                        value,
602                                        InvalidReg,
603                                        ToAnyRegister(ins->output()));
604 }
605 
606 void
visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap * ins)607 CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
608 {
609     MAsmJSAtomicBinopHeap* mir = ins->mir();
610     MOZ_ASSERT(mir->access().offset() == 0);
611     MOZ_ASSERT(mir->hasUses());
612 
613     Register ptr = ToRegister(ins->ptr());
614     const LAllocation* value = ins->value();
615     Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
616     AnyRegister output = ToAnyRegister(ins->output());
617     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
618 
619     Scalar::Type accessType = mir->access().type();
620     if (accessType == Scalar::Uint32)
621         accessType = Scalar::Int32;
622 
623     AtomicOp op = mir->operation();
624     BaseIndex srcAddr(HeapReg, ptr, TimesOne);
625 
626     if (value->isConstant()) {
627         atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr, temp, InvalidReg,
628                                    output);
629     } else {
630         atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr, temp, InvalidReg,
631                                    output);
632     }
633 }
634 
635 void
visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect * ins)636 CodeGeneratorX64::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
637 {
638     MAsmJSAtomicBinopHeap* mir = ins->mir();
639     MOZ_ASSERT(mir->access().offset() == 0);
640     MOZ_ASSERT(!mir->hasUses());
641 
642     Register ptr = ToRegister(ins->ptr());
643     const LAllocation* value = ins->value();
644     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
645 
646     Scalar::Type accessType = mir->access().type();
647     AtomicOp op = mir->operation();
648 
649     BaseIndex srcAddr(HeapReg, ptr, TimesOne);
650 
651     if (value->isConstant())
652         atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr);
653     else
654         atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr);
655 }
656 
657 void
visitWasmLoadGlobalVar(LWasmLoadGlobalVar * ins)658 CodeGeneratorX64::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins)
659 {
660     MWasmLoadGlobalVar* mir = ins->mir();
661 
662     MIRType type = mir->type();
663     MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
664 
665     CodeOffset label;
666     switch (type) {
667       case MIRType::Int32:
668         label = masm.loadRipRelativeInt32(ToRegister(ins->output()));
669         break;
670       case MIRType::Float32:
671         label = masm.loadRipRelativeFloat32(ToFloatRegister(ins->output()));
672         break;
673       case MIRType::Double:
674         label = masm.loadRipRelativeDouble(ToFloatRegister(ins->output()));
675         break;
676       // Aligned access: code is aligned on PageSize + there is padding
677       // before the global data section.
678       case MIRType::Int8x16:
679       case MIRType::Int16x8:
680       case MIRType::Int32x4:
681       case MIRType::Bool8x16:
682       case MIRType::Bool16x8:
683       case MIRType::Bool32x4:
684         label = masm.loadRipRelativeInt32x4(ToFloatRegister(ins->output()));
685         break;
686       case MIRType::Float32x4:
687         label = masm.loadRipRelativeFloat32x4(ToFloatRegister(ins->output()));
688         break;
689       default:
690         MOZ_CRASH("unexpected type in visitWasmLoadGlobalVar");
691     }
692 
693     masm.append(wasm::GlobalAccess(label, mir->globalDataOffset()));
694 }
695 
696 void
visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64 * ins)697 CodeGeneratorX64::visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins)
698 {
699     MWasmLoadGlobalVar* mir = ins->mir();
700     MOZ_ASSERT(mir->type() == MIRType::Int64);
701     CodeOffset label = masm.loadRipRelativeInt64(ToRegister(ins->output()));
702     masm.append(wasm::GlobalAccess(label, mir->globalDataOffset()));
703 }
704 
705 void
visitWasmStoreGlobalVar(LWasmStoreGlobalVar * ins)706 CodeGeneratorX64::visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins)
707 {
708     MWasmStoreGlobalVar* mir = ins->mir();
709 
710     MIRType type = mir->value()->type();
711     MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
712 
713     CodeOffset label;
714     switch (type) {
715       case MIRType::Int32:
716         label = masm.storeRipRelativeInt32(ToRegister(ins->value()));
717         break;
718       case MIRType::Float32:
719         label = masm.storeRipRelativeFloat32(ToFloatRegister(ins->value()));
720         break;
721       case MIRType::Double:
722         label = masm.storeRipRelativeDouble(ToFloatRegister(ins->value()));
723         break;
724       // Aligned access: code is aligned on PageSize + there is padding
725       // before the global data section.
726       case MIRType::Int32x4:
727       case MIRType::Bool32x4:
728         label = masm.storeRipRelativeInt32x4(ToFloatRegister(ins->value()));
729         break;
730       case MIRType::Float32x4:
731         label = masm.storeRipRelativeFloat32x4(ToFloatRegister(ins->value()));
732         break;
733       default:
734         MOZ_CRASH("unexpected type in visitWasmStoreGlobalVar");
735     }
736 
737     masm.append(wasm::GlobalAccess(label, mir->globalDataOffset()));
738 }
739 
740 void
visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64 * ins)741 CodeGeneratorX64::visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins)
742 {
743     MWasmStoreGlobalVar* mir = ins->mir();
744     MOZ_ASSERT(mir->value()->type() == MIRType::Int64);
745     Register value = ToRegister(ins->getOperand(LWasmStoreGlobalVarI64::InputIndex));
746     CodeOffset label = masm.storeRipRelativeInt64(value);
747     masm.append(wasm::GlobalAccess(label, mir->globalDataOffset()));
748 }
749 
750 void
visitTruncateDToInt32(LTruncateDToInt32 * ins)751 CodeGeneratorX64::visitTruncateDToInt32(LTruncateDToInt32* ins)
752 {
753     FloatRegister input = ToFloatRegister(ins->input());
754     Register output = ToRegister(ins->output());
755 
756     // On x64, branchTruncateDouble uses vcvttsd2sq. Unlike the x86
757     // implementation, this should handle most doubles and we can just
758     // call a stub if it fails.
759     emitTruncateDouble(input, output, ins->mir());
760 }
761 
762 void
visitTruncateFToInt32(LTruncateFToInt32 * ins)763 CodeGeneratorX64::visitTruncateFToInt32(LTruncateFToInt32* ins)
764 {
765     FloatRegister input = ToFloatRegister(ins->input());
766     Register output = ToRegister(ins->output());
767 
768     // On x64, branchTruncateFloat32 uses vcvttss2sq. Unlike the x86
769     // implementation, this should handle most floats and we can just
770     // call a stub if it fails.
771     emitTruncateFloat32(input, output, ins->mir());
772 }
773 
774 void
visitWrapInt64ToInt32(LWrapInt64ToInt32 * lir)775 CodeGeneratorX64::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir)
776 {
777     const LAllocation* input = lir->getOperand(0);
778     Register output = ToRegister(lir->output());
779 
780     if (lir->mir()->bottomHalf())
781         masm.movl(ToOperand(input), output);
782     else
783         MOZ_CRASH("Not implemented.");
784 }
785 
786 void
visitExtendInt32ToInt64(LExtendInt32ToInt64 * lir)787 CodeGeneratorX64::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir)
788 {
789     const LAllocation* input = lir->getOperand(0);
790     Register output = ToRegister(lir->output());
791 
792     if (lir->mir()->isUnsigned())
793         masm.movl(ToOperand(input), output);
794     else
795         masm.movslq(ToOperand(input), output);
796 }
797 
798 void
visitWasmTruncateToInt64(LWasmTruncateToInt64 * lir)799 CodeGeneratorX64::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir)
800 {
801     FloatRegister input = ToFloatRegister(lir->input());
802     Register64 output = ToOutRegister64(lir);
803 
804     MWasmTruncateToInt64* mir = lir->mir();
805     MIRType inputType = mir->input()->type();
806 
807     MOZ_ASSERT(inputType == MIRType::Double || inputType == MIRType::Float32);
808 
809     auto* ool = new(alloc()) OutOfLineWasmTruncateCheck(mir, input);
810     addOutOfLineCode(ool, mir);
811 
812     FloatRegister temp = mir->isUnsigned() ? ToFloatRegister(lir->temp()) : InvalidFloatReg;
813 
814     Label* oolEntry = ool->entry();
815     Label* oolRejoin = ool->rejoin();
816     if (inputType == MIRType::Double) {
817         if (mir->isUnsigned())
818             masm.wasmTruncateDoubleToUInt64(input, output, oolEntry, oolRejoin, temp);
819         else
820             masm.wasmTruncateDoubleToInt64(input, output, oolEntry, oolRejoin, temp);
821     } else {
822         if (mir->isUnsigned())
823             masm.wasmTruncateFloat32ToUInt64(input, output, oolEntry, oolRejoin, temp);
824         else
825             masm.wasmTruncateFloat32ToInt64(input, output, oolEntry, oolRejoin, temp);
826     }
827 }
828 
829 void
visitInt64ToFloatingPoint(LInt64ToFloatingPoint * lir)830 CodeGeneratorX64::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir)
831 {
832     Register64 input = ToRegister64(lir->getInt64Operand(0));
833     FloatRegister output = ToFloatRegister(lir->output());
834 
835     MIRType outputType = lir->mir()->type();
836     MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
837 
838     if (outputType == MIRType::Double) {
839         if (lir->mir()->isUnsigned())
840             masm.convertUInt64ToDouble(input, output, Register::Invalid());
841         else
842             masm.convertInt64ToDouble(input, output);
843     } else {
844         if (lir->mir()->isUnsigned())
845             masm.convertUInt64ToFloat32(input, output, Register::Invalid());
846         else
847             masm.convertInt64ToFloat32(input, output);
848     }
849 }
850 
851 void
visitNotI64(LNotI64 * lir)852 CodeGeneratorX64::visitNotI64(LNotI64* lir)
853 {
854     masm.cmpq(Imm32(0), ToRegister(lir->input()));
855     masm.emitSet(Assembler::Equal, ToRegister(lir->output()));
856 }
857 
858 void
visitClzI64(LClzI64 * lir)859 CodeGeneratorX64::visitClzI64(LClzI64* lir)
860 {
861     Register64 input = ToRegister64(lir->getInt64Operand(0));
862     Register64 output = ToOutRegister64(lir);
863     masm.clz64(input, output.reg);
864 }
865 
866 void
visitCtzI64(LCtzI64 * lir)867 CodeGeneratorX64::visitCtzI64(LCtzI64* lir)
868 {
869     Register64 input = ToRegister64(lir->getInt64Operand(0));
870     Register64 output = ToOutRegister64(lir);
871     masm.ctz64(input, output.reg);
872 }
873 
874 void
visitTestI64AndBranch(LTestI64AndBranch * lir)875 CodeGeneratorX64::visitTestI64AndBranch(LTestI64AndBranch* lir)
876 {
877     Register input = ToRegister(lir->input());
878     masm.testq(input, input);
879     emitBranch(Assembler::NonZero, lir->ifTrue(), lir->ifFalse());
880 }
881