1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/x64/Lowering-x64.h"
8 
9 #include "jit/Lowering.h"
10 #include "jit/MIR.h"
11 #include "jit/x64/Assembler-x64.h"
12 
13 #include "jit/shared/Lowering-shared-inl.h"
14 
15 using namespace js;
16 using namespace js::jit;
17 
useBoxFixed(MDefinition * mir,Register reg1,Register,bool useAtStart)18 LBoxAllocation LIRGeneratorX64::useBoxFixed(MDefinition* mir, Register reg1,
19                                             Register, bool useAtStart) {
20   MOZ_ASSERT(mir->type() == MIRType::Value);
21 
22   ensureDefined(mir);
23   return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart));
24 }
25 
useByteOpRegister(MDefinition * mir)26 LAllocation LIRGeneratorX64::useByteOpRegister(MDefinition* mir) {
27   return useRegister(mir);
28 }
29 
useByteOpRegisterAtStart(MDefinition * mir)30 LAllocation LIRGeneratorX64::useByteOpRegisterAtStart(MDefinition* mir) {
31   return useRegisterAtStart(mir);
32 }
33 
useByteOpRegisterOrNonDoubleConstant(MDefinition * mir)34 LAllocation LIRGeneratorX64::useByteOpRegisterOrNonDoubleConstant(
35     MDefinition* mir) {
36   return useRegisterOrNonDoubleConstant(mir);
37 }
38 
tempByteOpRegister()39 LDefinition LIRGeneratorX64::tempByteOpRegister() { return temp(); }
40 
tempToUnbox()41 LDefinition LIRGeneratorX64::tempToUnbox() { return temp(); }
42 
lowerForALUInt64(LInstructionHelper<INT64_PIECES,2* INT64_PIECES,0> * ins,MDefinition * mir,MDefinition * lhs,MDefinition * rhs)43 void LIRGeneratorX64::lowerForALUInt64(
44     LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
45     MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
46   ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
47   ins->setInt64Operand(INT64_PIECES, willHaveDifferentLIRNodes(lhs, rhs)
48                                          ? useInt64OrConstant(rhs)
49                                          : useInt64OrConstantAtStart(rhs));
50   defineInt64ReuseInput(ins, mir, 0);
51 }
52 
lowerForMulInt64(LMulI64 * ins,MMul * mir,MDefinition * lhs,MDefinition * rhs)53 void LIRGeneratorX64::lowerForMulInt64(LMulI64* ins, MMul* mir,
54                                        MDefinition* lhs, MDefinition* rhs) {
55   // X64 doesn't need a temp for 64bit multiplication.
56   ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
57   ins->setInt64Operand(INT64_PIECES, willHaveDifferentLIRNodes(lhs, rhs)
58                                          ? useInt64OrConstant(rhs)
59                                          : useInt64OrConstantAtStart(rhs));
60   defineInt64ReuseInput(ins, mir, 0);
61 }
62 
visitBox(MBox * box)63 void LIRGenerator::visitBox(MBox* box) {
64   MDefinition* opd = box->getOperand(0);
65 
66   // If the operand is a constant, emit near its uses.
67   if (opd->isConstant() && box->canEmitAtUses()) {
68     emitAtUses(box);
69     return;
70   }
71 
72   if (opd->isConstant()) {
73     define(new (alloc()) LValue(opd->toConstant()->toJSValue()), box,
74            LDefinition(LDefinition::BOX));
75   } else {
76     LBox* ins = new (alloc()) LBox(useRegister(opd), opd->type());
77     define(ins, box, LDefinition(LDefinition::BOX));
78   }
79 }
80 
visitUnbox(MUnbox * unbox)81 void LIRGenerator::visitUnbox(MUnbox* unbox) {
82   MDefinition* box = unbox->getOperand(0);
83   MOZ_ASSERT(box->type() == MIRType::Value);
84 
85   LUnboxBase* lir;
86   if (IsFloatingPointType(unbox->type())) {
87     lir = new (alloc())
88         LUnboxFloatingPoint(useRegisterAtStart(box), unbox->type());
89   } else if (unbox->fallible()) {
90     // If the unbox is fallible, load the Value in a register first to
91     // avoid multiple loads.
92     lir = new (alloc()) LUnbox(useRegisterAtStart(box));
93   } else {
94     lir = new (alloc()) LUnbox(useAtStart(box));
95   }
96 
97   if (unbox->fallible()) {
98     assignSnapshot(lir, unbox->bailoutKind());
99   }
100 
101   define(lir, unbox);
102 }
103 
visitReturnImpl(MDefinition * opd,bool isGenerator)104 void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
105   MOZ_ASSERT(opd->type() == MIRType::Value);
106 
107   LReturn* ins = new (alloc()) LReturn(isGenerator);
108   ins->setOperand(0, useFixed(opd, JSReturnReg));
109   add(ins);
110 }
111 
lowerUntypedPhiInput(MPhi * phi,uint32_t inputPosition,LBlock * block,size_t lirIndex)112 void LIRGeneratorX64::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
113                                            LBlock* block, size_t lirIndex) {
114   lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
115 }
116 
defineInt64Phi(MPhi * phi,size_t lirIndex)117 void LIRGeneratorX64::defineInt64Phi(MPhi* phi, size_t lirIndex) {
118   defineTypedPhi(phi, lirIndex);
119 }
120 
lowerInt64PhiInput(MPhi * phi,uint32_t inputPosition,LBlock * block,size_t lirIndex)121 void LIRGeneratorX64::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
122                                          LBlock* block, size_t lirIndex) {
123   lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
124 }
125 
visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement * ins)126 void LIRGenerator::visitCompareExchangeTypedArrayElement(
127     MCompareExchangeTypedArrayElement* ins) {
128   MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
129   MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
130 
131   if (Scalar::isBigIntType(ins->arrayType())) {
132     LUse elements = useRegister(ins->elements());
133     LAllocation index =
134         useRegisterOrIndexConstant(ins->index(), ins->arrayType());
135     LUse oldval = useRegister(ins->oldval());
136     LUse newval = useRegister(ins->newval());
137     LInt64Definition temp1 = tempInt64Fixed(Register64(rax));
138     LInt64Definition temp2 = tempInt64();
139 
140     auto* lir = new (alloc()) LCompareExchangeTypedArrayElement64(
141         elements, index, oldval, newval, temp1, temp2);
142     define(lir, ins);
143     assignSafepoint(lir, ins);
144     return;
145   }
146 
147   lowerCompareExchangeTypedArrayElement(ins,
148                                         /* useI386ByteRegisters = */ false);
149 }
150 
visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement * ins)151 void LIRGenerator::visitAtomicExchangeTypedArrayElement(
152     MAtomicExchangeTypedArrayElement* ins) {
153   MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
154   MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
155 
156   if (Scalar::isBigIntType(ins->arrayType())) {
157     LUse elements = useRegister(ins->elements());
158     LAllocation index =
159         useRegisterOrIndexConstant(ins->index(), ins->arrayType());
160     LAllocation value = useRegister(ins->value());
161     LInt64Definition temp1 = tempInt64();
162     LDefinition temp2 = temp();
163 
164     auto* lir = new (alloc()) LAtomicExchangeTypedArrayElement64(
165         elements, index, value, temp1, temp2);
166     define(lir, ins);
167     assignSafepoint(lir, ins);
168     return;
169   }
170 
171   lowerAtomicExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ false);
172 }
173 
visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop * ins)174 void LIRGenerator::visitAtomicTypedArrayElementBinop(
175     MAtomicTypedArrayElementBinop* ins) {
176   MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
177   MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
178 
179   if (Scalar::isBigIntType(ins->arrayType())) {
180     LUse elements = useRegister(ins->elements());
181     LAllocation index =
182         useRegisterOrIndexConstant(ins->index(), ins->arrayType());
183     LAllocation value = useRegister(ins->value());
184 
185     // Case 1: the result of the operation is not used.
186     //
187     // We can omit allocating the result BigInt.
188 
189     if (ins->isForEffect()) {
190       LInt64Definition temp = tempInt64();
191 
192       auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
193           elements, index, value, temp);
194       add(lir, ins);
195       return;
196     }
197 
198     // Case 2: the result of the operation is used.
199     //
200     // For ADD and SUB we'll use XADD.
201     //
202     // For AND/OR/XOR we need to use a CMPXCHG loop with rax as a temp register.
203 
204     bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
205                    ins->operation() == AtomicFetchSubOp);
206 
207     LInt64Definition temp1 = tempInt64();
208     LInt64Definition temp2;
209     if (bitOp) {
210       temp2 = tempInt64Fixed(Register64(rax));
211     } else {
212       temp2 = tempInt64();
213     }
214 
215     auto* lir = new (alloc())
216         LAtomicTypedArrayElementBinop64(elements, index, value, temp1, temp2);
217     define(lir, ins);
218     assignSafepoint(lir, ins);
219     return;
220   }
221 
222   lowerAtomicTypedArrayElementBinop(ins, /* useI386ByteRegisters = */ false);
223 }
224 
lowerAtomicLoad64(MLoadUnboxedScalar * ins)225 void LIRGeneratorX64::lowerAtomicLoad64(MLoadUnboxedScalar* ins) {
226   const LUse elements = useRegister(ins->elements());
227   const LAllocation index =
228       useRegisterOrIndexConstant(ins->index(), ins->storageType());
229 
230   auto* lir = new (alloc()) LAtomicLoad64(elements, index, temp(), tempInt64());
231   define(lir, ins);
232   assignSafepoint(lir, ins);
233 }
234 
lowerAtomicStore64(MStoreUnboxedScalar * ins)235 void LIRGeneratorX64::lowerAtomicStore64(MStoreUnboxedScalar* ins) {
236   LUse elements = useRegister(ins->elements());
237   LAllocation index =
238       useRegisterOrIndexConstant(ins->index(), ins->writeType());
239   LAllocation value = useRegister(ins->value());
240 
241   add(new (alloc()) LAtomicStore64(elements, index, value, tempInt64()), ins);
242 }
243 
visitWasmUnsignedToDouble(MWasmUnsignedToDouble * ins)244 void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
245   MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
246   LWasmUint32ToDouble* lir =
247       new (alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
248   define(lir, ins);
249 }
250 
visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32 * ins)251 void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
252   MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
253   LWasmUint32ToFloat32* lir =
254       new (alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
255   define(lir, ins);
256 }
257 
visitWasmHeapBase(MWasmHeapBase * ins)258 void LIRGenerator::visitWasmHeapBase(MWasmHeapBase* ins) {
259   auto* lir = new (alloc()) LWasmHeapBase(LAllocation());
260   define(lir, ins);
261 }
262 
visitWasmLoad(MWasmLoad * ins)263 void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
264   MDefinition* base = ins->base();
265   MOZ_ASSERT(base->type() == MIRType::Int32);
266 
267   if (ins->type() != MIRType::Int64) {
268     auto* lir = new (alloc()) LWasmLoad(useRegisterOrZeroAtStart(base));
269     define(lir, ins);
270     return;
271   }
272 
273   auto* lir = new (alloc()) LWasmLoadI64(useRegisterOrZeroAtStart(base));
274   defineInt64(lir, ins);
275 }
276 
visitWasmStore(MWasmStore * ins)277 void LIRGenerator::visitWasmStore(MWasmStore* ins) {
278   MDefinition* base = ins->base();
279   MOZ_ASSERT(base->type() == MIRType::Int32);
280 
281   MDefinition* value = ins->value();
282   LAllocation valueAlloc;
283   switch (ins->access().type()) {
284     case Scalar::Int8:
285     case Scalar::Uint8:
286     case Scalar::Int16:
287     case Scalar::Uint16:
288     case Scalar::Int32:
289     case Scalar::Uint32:
290       valueAlloc = useRegisterOrConstantAtStart(value);
291       break;
292     case Scalar::Int64:
293       // No way to encode an int64-to-memory move on x64.
294       if (value->isConstant() && value->type() != MIRType::Int64) {
295         valueAlloc = useOrConstantAtStart(value);
296       } else {
297         valueAlloc = useRegisterAtStart(value);
298       }
299       break;
300     case Scalar::Float32:
301     case Scalar::Float64:
302       valueAlloc = useRegisterAtStart(value);
303       break;
304     case Scalar::Simd128:
305 #ifdef ENABLE_WASM_SIMD
306       valueAlloc = useRegisterAtStart(value);
307       break;
308 #else
309       MOZ_CRASH("unexpected array type");
310 #endif
311     case Scalar::BigInt64:
312     case Scalar::BigUint64:
313     case Scalar::Uint8Clamped:
314     case Scalar::MaxTypedArrayViewType:
315       MOZ_CRASH("unexpected array type");
316   }
317 
318   LAllocation baseAlloc = useRegisterOrZeroAtStart(base);
319   auto* lir = new (alloc()) LWasmStore(baseAlloc, valueAlloc);
320   add(lir, ins);
321 }
322 
visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap * ins)323 void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
324   MDefinition* base = ins->base();
325   MOZ_ASSERT(base->type() == MIRType::Int32);
326 
327   // The output may not be used but will be clobbered regardless, so
328   // pin the output to eax.
329   //
330   // The input values must both be in registers.
331 
332   const LAllocation oldval = useRegister(ins->oldValue());
333   const LAllocation newval = useRegister(ins->newValue());
334 
335   LWasmCompareExchangeHeap* lir =
336       new (alloc()) LWasmCompareExchangeHeap(useRegister(base), oldval, newval);
337 
338   defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
339 }
340 
visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap * ins)341 void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
342   MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
343 
344   const LAllocation base = useRegister(ins->base());
345   const LAllocation value = useRegister(ins->value());
346 
347   // The output may not be used but will be clobbered regardless,
348   // so ignore the case where we're not using the value and just
349   // use the output register as a temp.
350 
351   LWasmAtomicExchangeHeap* lir =
352       new (alloc()) LWasmAtomicExchangeHeap(base, value);
353   define(lir, ins);
354 }
355 
visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap * ins)356 void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
357   MDefinition* base = ins->base();
358   MOZ_ASSERT(base->type() == MIRType::Int32);
359 
360   // No support for 64-bit operations with constants at the masm level.
361 
362   bool canTakeConstant = ins->access().type() != Scalar::Int64;
363 
364   // Case 1: the result of the operation is not used.
365   //
366   // We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
367   // LOCK OR, or LOCK XOR.
368 
369   if (!ins->hasUses()) {
370     LAllocation value = canTakeConstant ? useRegisterOrConstant(ins->value())
371                                         : useRegister(ins->value());
372     LWasmAtomicBinopHeapForEffect* lir =
373         new (alloc()) LWasmAtomicBinopHeapForEffect(useRegister(base), value);
374     add(lir, ins);
375     return;
376   }
377 
378   // Case 2: the result of the operation is used.
379   //
380   // For ADD and SUB we'll use XADD with word and byte ops as
381   // appropriate.  Any output register can be used and if value is a
382   // register it's best if it's the same as output:
383   //
384   //    movl       value, output  ; if value != output
385   //    lock xaddl output, mem
386   //
387   // For AND/OR/XOR we need to use a CMPXCHG loop, and the output is
388   // always in rax:
389   //
390   //    movl          *mem, rax
391   // L: mov           rax, temp
392   //    andl          value, temp
393   //    lock cmpxchg  temp, mem  ; reads rax also
394   //    jnz           L
395   //    ; result in rax
396   //
397   // Note the placement of L, cmpxchg will update rax with *mem if
398   // *mem does not have the expected value, so reloading it at the
399   // top of the loop would be redundant.
400 
401   bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
402                  ins->operation() == AtomicFetchSubOp);
403   bool reuseInput = false;
404   LAllocation value;
405 
406   if (bitOp || ins->value()->isConstant()) {
407     value = canTakeConstant ? useRegisterOrConstant(ins->value())
408                             : useRegister(ins->value());
409   } else {
410     reuseInput = true;
411     value = useRegisterAtStart(ins->value());
412   }
413 
414   auto* lir = new (alloc()) LWasmAtomicBinopHeap(
415       useRegister(base), value, bitOp ? temp() : LDefinition::BogusTemp());
416 
417   if (reuseInput) {
418     defineReuseInput(lir, ins, LWasmAtomicBinopHeap::valueOp);
419   } else if (bitOp) {
420     defineFixed(lir, ins, LAllocation(AnyRegister(rax)));
421   } else {
422     define(lir, ins);
423   }
424 }
425 
visitSubstr(MSubstr * ins)426 void LIRGenerator::visitSubstr(MSubstr* ins) {
427   LSubstr* lir = new (alloc())
428       LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
429               useRegister(ins->length()), temp(), temp(), tempByteOpRegister());
430   define(lir, ins);
431   assignSafepoint(lir, ins);
432 }
433 
lowerDivI64(MDiv * div)434 void LIRGeneratorX64::lowerDivI64(MDiv* div) {
435   if (div->isUnsigned()) {
436     lowerUDivI64(div);
437     return;
438   }
439 
440   LDivOrModI64* lir = new (alloc()) LDivOrModI64(
441       useRegister(div->lhs()), useRegister(div->rhs()), tempFixed(rdx));
442   defineInt64Fixed(lir, div, LInt64Allocation(LAllocation(AnyRegister(rax))));
443 }
444 
lowerWasmBuiltinDivI64(MWasmBuiltinDivI64 * div)445 void LIRGeneratorX64::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
446   MOZ_CRASH("We don't use runtime div for this architecture");
447 }
448 
lowerModI64(MMod * mod)449 void LIRGeneratorX64::lowerModI64(MMod* mod) {
450   if (mod->isUnsigned()) {
451     lowerUModI64(mod);
452     return;
453   }
454 
455   LDivOrModI64* lir = new (alloc()) LDivOrModI64(
456       useRegister(mod->lhs()), useRegister(mod->rhs()), tempFixed(rax));
457   defineInt64Fixed(lir, mod, LInt64Allocation(LAllocation(AnyRegister(rdx))));
458 }
459 
lowerWasmBuiltinModI64(MWasmBuiltinModI64 * mod)460 void LIRGeneratorX64::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
461   MOZ_CRASH("We don't use runtime mod for this architecture");
462 }
463 
lowerUDivI64(MDiv * div)464 void LIRGeneratorX64::lowerUDivI64(MDiv* div) {
465   LUDivOrModI64* lir = new (alloc()) LUDivOrModI64(
466       useRegister(div->lhs()), useRegister(div->rhs()), tempFixed(rdx));
467   defineInt64Fixed(lir, div, LInt64Allocation(LAllocation(AnyRegister(rax))));
468 }
469 
lowerUModI64(MMod * mod)470 void LIRGeneratorX64::lowerUModI64(MMod* mod) {
471   LUDivOrModI64* lir = new (alloc()) LUDivOrModI64(
472       useRegister(mod->lhs()), useRegister(mod->rhs()), tempFixed(rax));
473   defineInt64Fixed(lir, mod, LInt64Allocation(LAllocation(AnyRegister(rdx))));
474 }
475 
lowerBigIntDiv(MBigIntDiv * ins)476 void LIRGeneratorX64::lowerBigIntDiv(MBigIntDiv* ins) {
477   auto* lir = new (alloc()) LBigIntDiv(
478       useRegister(ins->lhs()), useRegister(ins->rhs()), tempFixed(rax), temp());
479   defineFixed(lir, ins, LAllocation(AnyRegister(rdx)));
480   assignSafepoint(lir, ins);
481 }
482 
lowerBigIntMod(MBigIntMod * ins)483 void LIRGeneratorX64::lowerBigIntMod(MBigIntMod* ins) {
484   auto* lir = new (alloc()) LBigIntMod(
485       useRegister(ins->lhs()), useRegister(ins->rhs()), tempFixed(rax), temp());
486   defineFixed(lir, ins, LAllocation(AnyRegister(rdx)));
487   assignSafepoint(lir, ins);
488 }
489 
visitWasmTruncateToInt64(MWasmTruncateToInt64 * ins)490 void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
491   MDefinition* opd = ins->input();
492   MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
493 
494   LDefinition maybeTemp =
495       ins->isUnsigned() ? tempDouble() : LDefinition::BogusTemp();
496   defineInt64(new (alloc()) LWasmTruncateToInt64(useRegister(opd), maybeTemp),
497               ins);
498 }
499 
lowerWasmBuiltinTruncateToInt64(MWasmBuiltinTruncateToInt64 * ins)500 void LIRGeneratorX64::lowerWasmBuiltinTruncateToInt64(
501     MWasmBuiltinTruncateToInt64* ins) {
502   MOZ_CRASH("We don't use it for this architecture");
503 }
504 
visitInt64ToFloatingPoint(MInt64ToFloatingPoint * ins)505 void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
506   MDefinition* opd = ins->input();
507   MOZ_ASSERT(opd->type() == MIRType::Int64);
508   MOZ_ASSERT(IsFloatingPointType(ins->type()));
509 
510   LDefinition maybeTemp = ins->isUnsigned() ? temp() : LDefinition::BogusTemp();
511   define(new (alloc()) LInt64ToFloatingPoint(useInt64Register(opd), maybeTemp),
512          ins);
513 }
514 
lowerBuiltinInt64ToFloatingPoint(MBuiltinInt64ToFloatingPoint * ins)515 void LIRGeneratorX64::lowerBuiltinInt64ToFloatingPoint(
516     MBuiltinInt64ToFloatingPoint* ins) {
517   MOZ_CRASH("We don't use it for this architecture");
518 }
519 
visitExtendInt32ToInt64(MExtendInt32ToInt64 * ins)520 void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
521   defineInt64(new (alloc()) LExtendInt32ToInt64(useAtStart(ins->input())), ins);
522 }
523 
visitSignExtendInt64(MSignExtendInt64 * ins)524 void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
525   defineInt64(new (alloc())
526                   LSignExtendInt64(useInt64RegisterAtStart(ins->input())),
527               ins);
528 }
529