1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/x64/Lowering-x64.h"
8
9 #include "jit/Lowering.h"
10 #include "jit/MIR.h"
11 #include "jit/x64/Assembler-x64.h"
12
13 #include "jit/shared/Lowering-shared-inl.h"
14
15 using namespace js;
16 using namespace js::jit;
17
useBoxFixed(MDefinition * mir,Register reg1,Register,bool useAtStart)18 LBoxAllocation LIRGeneratorX64::useBoxFixed(MDefinition* mir, Register reg1,
19 Register, bool useAtStart) {
20 MOZ_ASSERT(mir->type() == MIRType::Value);
21
22 ensureDefined(mir);
23 return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart));
24 }
25
useByteOpRegister(MDefinition * mir)26 LAllocation LIRGeneratorX64::useByteOpRegister(MDefinition* mir) {
27 return useRegister(mir);
28 }
29
useByteOpRegisterAtStart(MDefinition * mir)30 LAllocation LIRGeneratorX64::useByteOpRegisterAtStart(MDefinition* mir) {
31 return useRegisterAtStart(mir);
32 }
33
useByteOpRegisterOrNonDoubleConstant(MDefinition * mir)34 LAllocation LIRGeneratorX64::useByteOpRegisterOrNonDoubleConstant(
35 MDefinition* mir) {
36 return useRegisterOrNonDoubleConstant(mir);
37 }
38
tempByteOpRegister()39 LDefinition LIRGeneratorX64::tempByteOpRegister() { return temp(); }
40
tempToUnbox()41 LDefinition LIRGeneratorX64::tempToUnbox() { return temp(); }
42
lowerForALUInt64(LInstructionHelper<INT64_PIECES,INT64_PIECES,0> * ins,MDefinition * mir,MDefinition * input)43 void LIRGeneratorX64::lowerForALUInt64(
44 LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
45 MDefinition* input) {
46 ins->setInt64Operand(0, useInt64RegisterAtStart(input));
47 defineInt64ReuseInput(ins, mir, 0);
48 }
49
lowerForALUInt64(LInstructionHelper<INT64_PIECES,2* INT64_PIECES,0> * ins,MDefinition * mir,MDefinition * lhs,MDefinition * rhs)50 void LIRGeneratorX64::lowerForALUInt64(
51 LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
52 MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
53 ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
54 ins->setInt64Operand(INT64_PIECES, willHaveDifferentLIRNodes(lhs, rhs)
55 ? useInt64OrConstant(rhs)
56 : useInt64OrConstantAtStart(rhs));
57 defineInt64ReuseInput(ins, mir, 0);
58 }
59
lowerForMulInt64(LMulI64 * ins,MMul * mir,MDefinition * lhs,MDefinition * rhs)60 void LIRGeneratorX64::lowerForMulInt64(LMulI64* ins, MMul* mir,
61 MDefinition* lhs, MDefinition* rhs) {
62 // X64 doesn't need a temp for 64bit multiplication.
63 ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
64 ins->setInt64Operand(INT64_PIECES, willHaveDifferentLIRNodes(lhs, rhs)
65 ? useInt64OrConstant(rhs)
66 : useInt64OrConstantAtStart(rhs));
67 defineInt64ReuseInput(ins, mir, 0);
68 }
69
visitBox(MBox * box)70 void LIRGenerator::visitBox(MBox* box) {
71 MDefinition* opd = box->getOperand(0);
72
73 // If the operand is a constant, emit near its uses.
74 if (opd->isConstant() && box->canEmitAtUses()) {
75 emitAtUses(box);
76 return;
77 }
78
79 if (opd->isConstant()) {
80 define(new (alloc()) LValue(opd->toConstant()->toJSValue()), box,
81 LDefinition(LDefinition::BOX));
82 } else {
83 LBox* ins = new (alloc()) LBox(useRegister(opd), opd->type());
84 define(ins, box, LDefinition(LDefinition::BOX));
85 }
86 }
87
visitUnbox(MUnbox * unbox)88 void LIRGenerator::visitUnbox(MUnbox* unbox) {
89 MDefinition* box = unbox->getOperand(0);
90 MOZ_ASSERT(box->type() == MIRType::Value);
91
92 LUnboxBase* lir;
93 if (IsFloatingPointType(unbox->type())) {
94 lir = new (alloc())
95 LUnboxFloatingPoint(useRegisterAtStart(box), unbox->type());
96 } else if (unbox->fallible()) {
97 // If the unbox is fallible, load the Value in a register first to
98 // avoid multiple loads.
99 lir = new (alloc()) LUnbox(useRegisterAtStart(box));
100 } else {
101 lir = new (alloc()) LUnbox(useAtStart(box));
102 }
103
104 if (unbox->fallible()) {
105 assignSnapshot(lir, unbox->bailoutKind());
106 }
107
108 define(lir, unbox);
109 }
110
visitReturnImpl(MDefinition * opd,bool isGenerator)111 void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
112 MOZ_ASSERT(opd->type() == MIRType::Value);
113
114 LReturn* ins = new (alloc()) LReturn(isGenerator);
115 ins->setOperand(0, useFixed(opd, JSReturnReg));
116 add(ins);
117 }
118
lowerUntypedPhiInput(MPhi * phi,uint32_t inputPosition,LBlock * block,size_t lirIndex)119 void LIRGeneratorX64::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
120 LBlock* block, size_t lirIndex) {
121 lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
122 }
123
defineInt64Phi(MPhi * phi,size_t lirIndex)124 void LIRGeneratorX64::defineInt64Phi(MPhi* phi, size_t lirIndex) {
125 defineTypedPhi(phi, lirIndex);
126 }
127
lowerInt64PhiInput(MPhi * phi,uint32_t inputPosition,LBlock * block,size_t lirIndex)128 void LIRGeneratorX64::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
129 LBlock* block, size_t lirIndex) {
130 lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
131 }
132
visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement * ins)133 void LIRGenerator::visitCompareExchangeTypedArrayElement(
134 MCompareExchangeTypedArrayElement* ins) {
135 MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
136 MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
137
138 if (Scalar::isBigIntType(ins->arrayType())) {
139 LUse elements = useRegister(ins->elements());
140 LAllocation index =
141 useRegisterOrIndexConstant(ins->index(), ins->arrayType());
142 LUse oldval = useRegister(ins->oldval());
143 LUse newval = useRegister(ins->newval());
144 LInt64Definition temp1 = tempInt64Fixed(Register64(rax));
145 LInt64Definition temp2 = tempInt64();
146
147 auto* lir = new (alloc()) LCompareExchangeTypedArrayElement64(
148 elements, index, oldval, newval, temp1, temp2);
149 define(lir, ins);
150 assignSafepoint(lir, ins);
151 return;
152 }
153
154 lowerCompareExchangeTypedArrayElement(ins,
155 /* useI386ByteRegisters = */ false);
156 }
157
visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement * ins)158 void LIRGenerator::visitAtomicExchangeTypedArrayElement(
159 MAtomicExchangeTypedArrayElement* ins) {
160 MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
161 MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
162
163 if (Scalar::isBigIntType(ins->arrayType())) {
164 LUse elements = useRegister(ins->elements());
165 LAllocation index =
166 useRegisterOrIndexConstant(ins->index(), ins->arrayType());
167 LAllocation value = useRegister(ins->value());
168 LInt64Definition temp1 = tempInt64();
169 LDefinition temp2 = temp();
170
171 auto* lir = new (alloc()) LAtomicExchangeTypedArrayElement64(
172 elements, index, value, temp1, temp2);
173 define(lir, ins);
174 assignSafepoint(lir, ins);
175 return;
176 }
177
178 lowerAtomicExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ false);
179 }
180
visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop * ins)181 void LIRGenerator::visitAtomicTypedArrayElementBinop(
182 MAtomicTypedArrayElementBinop* ins) {
183 MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
184 MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
185
186 if (Scalar::isBigIntType(ins->arrayType())) {
187 LUse elements = useRegister(ins->elements());
188 LAllocation index =
189 useRegisterOrIndexConstant(ins->index(), ins->arrayType());
190 LAllocation value = useRegister(ins->value());
191
192 // Case 1: the result of the operation is not used.
193 //
194 // We can omit allocating the result BigInt.
195
196 if (ins->isForEffect()) {
197 LInt64Definition temp = tempInt64();
198
199 auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
200 elements, index, value, temp);
201 add(lir, ins);
202 return;
203 }
204
205 // Case 2: the result of the operation is used.
206 //
207 // For ADD and SUB we'll use XADD.
208 //
209 // For AND/OR/XOR we need to use a CMPXCHG loop with rax as a temp register.
210
211 bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
212 ins->operation() == AtomicFetchSubOp);
213
214 LInt64Definition temp1 = tempInt64();
215 LInt64Definition temp2;
216 if (bitOp) {
217 temp2 = tempInt64Fixed(Register64(rax));
218 } else {
219 temp2 = tempInt64();
220 }
221
222 auto* lir = new (alloc())
223 LAtomicTypedArrayElementBinop64(elements, index, value, temp1, temp2);
224 define(lir, ins);
225 assignSafepoint(lir, ins);
226 return;
227 }
228
229 lowerAtomicTypedArrayElementBinop(ins, /* useI386ByteRegisters = */ false);
230 }
231
lowerAtomicLoad64(MLoadUnboxedScalar * ins)232 void LIRGeneratorX64::lowerAtomicLoad64(MLoadUnboxedScalar* ins) {
233 const LUse elements = useRegister(ins->elements());
234 const LAllocation index =
235 useRegisterOrIndexConstant(ins->index(), ins->storageType());
236
237 auto* lir = new (alloc()) LAtomicLoad64(elements, index, temp(), tempInt64());
238 define(lir, ins);
239 assignSafepoint(lir, ins);
240 }
241
lowerAtomicStore64(MStoreUnboxedScalar * ins)242 void LIRGeneratorX64::lowerAtomicStore64(MStoreUnboxedScalar* ins) {
243 LUse elements = useRegister(ins->elements());
244 LAllocation index =
245 useRegisterOrIndexConstant(ins->index(), ins->writeType());
246 LAllocation value = useRegister(ins->value());
247
248 add(new (alloc()) LAtomicStore64(elements, index, value, tempInt64()), ins);
249 }
250
visitWasmUnsignedToDouble(MWasmUnsignedToDouble * ins)251 void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
252 MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
253 LWasmUint32ToDouble* lir =
254 new (alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
255 define(lir, ins);
256 }
257
visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32 * ins)258 void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
259 MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
260 LWasmUint32ToFloat32* lir =
261 new (alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
262 define(lir, ins);
263 }
264
visitWasmHeapBase(MWasmHeapBase * ins)265 void LIRGenerator::visitWasmHeapBase(MWasmHeapBase* ins) {
266 auto* lir = new (alloc()) LWasmHeapBase(LAllocation());
267 define(lir, ins);
268 }
269
visitWasmLoad(MWasmLoad * ins)270 void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
271 MDefinition* base = ins->base();
272 // 'base' is a GPR but may be of either type. If it is 32-bit it is
273 // zero-extended and can act as 64-bit.
274 MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
275
276 if (ins->type() != MIRType::Int64) {
277 auto* lir = new (alloc()) LWasmLoad(useRegisterOrZeroAtStart(base));
278 define(lir, ins);
279 return;
280 }
281
282 auto* lir = new (alloc()) LWasmLoadI64(useRegisterOrZeroAtStart(base));
283 defineInt64(lir, ins);
284 }
285
visitWasmStore(MWasmStore * ins)286 void LIRGenerator::visitWasmStore(MWasmStore* ins) {
287 MDefinition* base = ins->base();
288 // See comment in visitWasmLoad re the type of 'base'.
289 MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
290
291 MDefinition* value = ins->value();
292 LAllocation valueAlloc;
293 switch (ins->access().type()) {
294 case Scalar::Int8:
295 case Scalar::Uint8:
296 case Scalar::Int16:
297 case Scalar::Uint16:
298 case Scalar::Int32:
299 case Scalar::Uint32:
300 valueAlloc = useRegisterOrConstantAtStart(value);
301 break;
302 case Scalar::Int64:
303 // No way to encode an int64-to-memory move on x64.
304 if (value->isConstant() && value->type() != MIRType::Int64) {
305 valueAlloc = useOrConstantAtStart(value);
306 } else {
307 valueAlloc = useRegisterAtStart(value);
308 }
309 break;
310 case Scalar::Float32:
311 case Scalar::Float64:
312 valueAlloc = useRegisterAtStart(value);
313 break;
314 case Scalar::Simd128:
315 #ifdef ENABLE_WASM_SIMD
316 valueAlloc = useRegisterAtStart(value);
317 break;
318 #else
319 MOZ_CRASH("unexpected array type");
320 #endif
321 case Scalar::BigInt64:
322 case Scalar::BigUint64:
323 case Scalar::Uint8Clamped:
324 case Scalar::MaxTypedArrayViewType:
325 MOZ_CRASH("unexpected array type");
326 }
327
328 LAllocation baseAlloc = useRegisterOrZeroAtStart(base);
329 auto* lir = new (alloc()) LWasmStore(baseAlloc, valueAlloc);
330 add(lir, ins);
331 }
332
visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap * ins)333 void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
334 MDefinition* base = ins->base();
335 // See comment in visitWasmLoad re the type of 'base'.
336 MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
337
338 // The output may not be used but will be clobbered regardless, so
339 // pin the output to eax.
340 //
341 // The input values must both be in registers.
342
343 const LAllocation oldval = useRegister(ins->oldValue());
344 const LAllocation newval = useRegister(ins->newValue());
345
346 LWasmCompareExchangeHeap* lir =
347 new (alloc()) LWasmCompareExchangeHeap(useRegister(base), oldval, newval);
348
349 defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
350 }
351
visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap * ins)352 void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
353 // See comment in visitWasmLoad re the type of 'base'.
354 MOZ_ASSERT(ins->base()->type() == MIRType::Int32 ||
355 ins->base()->type() == MIRType::Int64);
356
357 const LAllocation base = useRegister(ins->base());
358 const LAllocation value = useRegister(ins->value());
359
360 // The output may not be used but will be clobbered regardless,
361 // so ignore the case where we're not using the value and just
362 // use the output register as a temp.
363
364 LWasmAtomicExchangeHeap* lir =
365 new (alloc()) LWasmAtomicExchangeHeap(base, value);
366 define(lir, ins);
367 }
368
visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap * ins)369 void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
370 MDefinition* base = ins->base();
371 // See comment in visitWasmLoad re the type of 'base'.
372 MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
373
374 // No support for 64-bit operations with constants at the masm level.
375
376 bool canTakeConstant = ins->access().type() != Scalar::Int64;
377
378 // Case 1: the result of the operation is not used.
379 //
380 // We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
381 // LOCK OR, or LOCK XOR.
382
383 if (!ins->hasUses()) {
384 LAllocation value = canTakeConstant ? useRegisterOrConstant(ins->value())
385 : useRegister(ins->value());
386 LWasmAtomicBinopHeapForEffect* lir =
387 new (alloc()) LWasmAtomicBinopHeapForEffect(useRegister(base), value);
388 add(lir, ins);
389 return;
390 }
391
392 // Case 2: the result of the operation is used.
393 //
394 // For ADD and SUB we'll use XADD with word and byte ops as
395 // appropriate. Any output register can be used and if value is a
396 // register it's best if it's the same as output:
397 //
398 // movl value, output ; if value != output
399 // lock xaddl output, mem
400 //
401 // For AND/OR/XOR we need to use a CMPXCHG loop, and the output is
402 // always in rax:
403 //
404 // movl *mem, rax
405 // L: mov rax, temp
406 // andl value, temp
407 // lock cmpxchg temp, mem ; reads rax also
408 // jnz L
409 // ; result in rax
410 //
411 // Note the placement of L, cmpxchg will update rax with *mem if
412 // *mem does not have the expected value, so reloading it at the
413 // top of the loop would be redundant.
414
415 bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
416 ins->operation() == AtomicFetchSubOp);
417 bool reuseInput = false;
418 LAllocation value;
419
420 if (bitOp || ins->value()->isConstant()) {
421 value = canTakeConstant ? useRegisterOrConstant(ins->value())
422 : useRegister(ins->value());
423 } else {
424 reuseInput = true;
425 value = useRegisterAtStart(ins->value());
426 }
427
428 auto* lir = new (alloc()) LWasmAtomicBinopHeap(
429 useRegister(base), value, bitOp ? temp() : LDefinition::BogusTemp());
430
431 if (reuseInput) {
432 defineReuseInput(lir, ins, LWasmAtomicBinopHeap::valueOp);
433 } else if (bitOp) {
434 defineFixed(lir, ins, LAllocation(AnyRegister(rax)));
435 } else {
436 define(lir, ins);
437 }
438 }
439
visitSubstr(MSubstr * ins)440 void LIRGenerator::visitSubstr(MSubstr* ins) {
441 LSubstr* lir = new (alloc())
442 LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
443 useRegister(ins->length()), temp(), temp(), tempByteOpRegister());
444 define(lir, ins);
445 assignSafepoint(lir, ins);
446 }
447
lowerDivI64(MDiv * div)448 void LIRGeneratorX64::lowerDivI64(MDiv* div) {
449 if (div->isUnsigned()) {
450 lowerUDivI64(div);
451 return;
452 }
453
454 LDivOrModI64* lir = new (alloc()) LDivOrModI64(
455 useRegister(div->lhs()), useRegister(div->rhs()), tempFixed(rdx));
456 defineInt64Fixed(lir, div, LInt64Allocation(LAllocation(AnyRegister(rax))));
457 }
458
lowerWasmBuiltinDivI64(MWasmBuiltinDivI64 * div)459 void LIRGeneratorX64::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
460 MOZ_CRASH("We don't use runtime div for this architecture");
461 }
462
lowerModI64(MMod * mod)463 void LIRGeneratorX64::lowerModI64(MMod* mod) {
464 if (mod->isUnsigned()) {
465 lowerUModI64(mod);
466 return;
467 }
468
469 LDivOrModI64* lir = new (alloc()) LDivOrModI64(
470 useRegister(mod->lhs()), useRegister(mod->rhs()), tempFixed(rax));
471 defineInt64Fixed(lir, mod, LInt64Allocation(LAllocation(AnyRegister(rdx))));
472 }
473
lowerWasmBuiltinModI64(MWasmBuiltinModI64 * mod)474 void LIRGeneratorX64::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
475 MOZ_CRASH("We don't use runtime mod for this architecture");
476 }
477
lowerUDivI64(MDiv * div)478 void LIRGeneratorX64::lowerUDivI64(MDiv* div) {
479 LUDivOrModI64* lir = new (alloc()) LUDivOrModI64(
480 useRegister(div->lhs()), useRegister(div->rhs()), tempFixed(rdx));
481 defineInt64Fixed(lir, div, LInt64Allocation(LAllocation(AnyRegister(rax))));
482 }
483
lowerUModI64(MMod * mod)484 void LIRGeneratorX64::lowerUModI64(MMod* mod) {
485 LUDivOrModI64* lir = new (alloc()) LUDivOrModI64(
486 useRegister(mod->lhs()), useRegister(mod->rhs()), tempFixed(rax));
487 defineInt64Fixed(lir, mod, LInt64Allocation(LAllocation(AnyRegister(rdx))));
488 }
489
lowerBigIntDiv(MBigIntDiv * ins)490 void LIRGeneratorX64::lowerBigIntDiv(MBigIntDiv* ins) {
491 auto* lir = new (alloc()) LBigIntDiv(
492 useRegister(ins->lhs()), useRegister(ins->rhs()), tempFixed(rax), temp());
493 defineFixed(lir, ins, LAllocation(AnyRegister(rdx)));
494 assignSafepoint(lir, ins);
495 }
496
lowerBigIntMod(MBigIntMod * ins)497 void LIRGeneratorX64::lowerBigIntMod(MBigIntMod* ins) {
498 auto* lir = new (alloc()) LBigIntMod(
499 useRegister(ins->lhs()), useRegister(ins->rhs()), tempFixed(rax), temp());
500 defineFixed(lir, ins, LAllocation(AnyRegister(rdx)));
501 assignSafepoint(lir, ins);
502 }
503
visitWasmTruncateToInt64(MWasmTruncateToInt64 * ins)504 void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
505 MDefinition* opd = ins->input();
506 MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
507
508 LDefinition maybeTemp =
509 ins->isUnsigned() ? tempDouble() : LDefinition::BogusTemp();
510 defineInt64(new (alloc()) LWasmTruncateToInt64(useRegister(opd), maybeTemp),
511 ins);
512 }
513
lowerWasmBuiltinTruncateToInt64(MWasmBuiltinTruncateToInt64 * ins)514 void LIRGeneratorX64::lowerWasmBuiltinTruncateToInt64(
515 MWasmBuiltinTruncateToInt64* ins) {
516 MOZ_CRASH("We don't use it for this architecture");
517 }
518
visitInt64ToFloatingPoint(MInt64ToFloatingPoint * ins)519 void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
520 MDefinition* opd = ins->input();
521 MOZ_ASSERT(opd->type() == MIRType::Int64);
522 MOZ_ASSERT(IsFloatingPointType(ins->type()));
523
524 LDefinition maybeTemp = ins->isUnsigned() ? temp() : LDefinition::BogusTemp();
525 define(new (alloc()) LInt64ToFloatingPoint(useInt64Register(opd), maybeTemp),
526 ins);
527 }
528
lowerBuiltinInt64ToFloatingPoint(MBuiltinInt64ToFloatingPoint * ins)529 void LIRGeneratorX64::lowerBuiltinInt64ToFloatingPoint(
530 MBuiltinInt64ToFloatingPoint* ins) {
531 MOZ_CRASH("We don't use it for this architecture");
532 }
533
visitExtendInt32ToInt64(MExtendInt32ToInt64 * ins)534 void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
535 defineInt64(new (alloc()) LExtendInt32ToInt64(useAtStart(ins->input())), ins);
536 }
537
visitSignExtendInt64(MSignExtendInt64 * ins)538 void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
539 defineInt64(new (alloc())
540 LSignExtendInt64(useInt64RegisterAtStart(ins->input())),
541 ins);
542 }
543
544 // On x64 we specialize the cases: compare is {U,}Int{32,64}, and select is
545 // {U,}Int{32,64}, independently.
canSpecializeWasmCompareAndSelect(MCompare::CompareType compTy,MIRType insTy)546 bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
547 MCompare::CompareType compTy, MIRType insTy) {
548 return (insTy == MIRType::Int32 || insTy == MIRType::Int64) &&
549 (compTy == MCompare::Compare_Int32 ||
550 compTy == MCompare::Compare_UInt32 ||
551 compTy == MCompare::Compare_Int64 ||
552 compTy == MCompare::Compare_UInt64);
553 }
554
lowerWasmCompareAndSelect(MWasmSelect * ins,MDefinition * lhs,MDefinition * rhs,MCompare::CompareType compTy,JSOp jsop)555 void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect* ins,
556 MDefinition* lhs,
557 MDefinition* rhs,
558 MCompare::CompareType compTy,
559 JSOp jsop) {
560 MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
561 auto* lir = new (alloc()) LWasmCompareAndSelect(
562 useRegister(lhs), useAny(rhs), compTy, jsop,
563 useRegisterAtStart(ins->trueExpr()), useAny(ins->falseExpr()));
564 defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
565 }
566