1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/x64/CodeGenerator-x64.h"
8
9 #include "mozilla/MathAlgorithms.h"
10
11 #include "jit/MIR.h"
12
13 #include "jit/MacroAssembler-inl.h"
14 #include "jit/shared/CodeGenerator-shared-inl.h"
15 #include "vm/JSScript-inl.h"
16
17 using namespace js;
18 using namespace js::jit;
19
20 using mozilla::DebugOnly;
21
CodeGeneratorX64(MIRGenerator * gen,LIRGraph * graph,MacroAssembler * masm)22 CodeGeneratorX64::CodeGeneratorX64(MIRGenerator* gen, LIRGraph* graph,
23 MacroAssembler* masm)
24 : CodeGeneratorX86Shared(gen, graph, masm) {}
25
ToValue(LInstruction * ins,size_t pos)26 ValueOperand CodeGeneratorX64::ToValue(LInstruction* ins, size_t pos) {
27 return ValueOperand(ToRegister(ins->getOperand(pos)));
28 }
29
ToTempValue(LInstruction * ins,size_t pos)30 ValueOperand CodeGeneratorX64::ToTempValue(LInstruction* ins, size_t pos) {
31 return ValueOperand(ToRegister(ins->getTemp(pos)));
32 }
33
ToOperand64(const LInt64Allocation & a64)34 Operand CodeGeneratorX64::ToOperand64(const LInt64Allocation& a64) {
35 const LAllocation& a = a64.value();
36 MOZ_ASSERT(!a.isFloatReg());
37 if (a.isGeneralReg()) return Operand(a.toGeneralReg()->reg());
38 return Operand(masm.getStackPointer(), ToStackOffset(a));
39 }
40
FromDepth(uint32_t frameDepth)41 FrameSizeClass FrameSizeClass::FromDepth(uint32_t frameDepth) {
42 return FrameSizeClass::None();
43 }
44
ClassLimit()45 FrameSizeClass FrameSizeClass::ClassLimit() { return FrameSizeClass(0); }
46
frameSize() const47 uint32_t FrameSizeClass::frameSize() const {
48 MOZ_CRASH("x64 does not use frame size classes");
49 }
50
visitValue(LValue * value)51 void CodeGeneratorX64::visitValue(LValue* value) {
52 ValueOperand result = ToOutValue(value);
53 masm.moveValue(value->value(), result);
54 }
55
visitBox(LBox * box)56 void CodeGeneratorX64::visitBox(LBox* box) {
57 const LAllocation* in = box->getOperand(0);
58 ValueOperand result = ToOutValue(box);
59
60 masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result);
61 }
62
visitUnbox(LUnbox * unbox)63 void CodeGeneratorX64::visitUnbox(LUnbox* unbox) {
64 MUnbox* mir = unbox->mir();
65
66 if (mir->fallible()) {
67 const ValueOperand value = ToValue(unbox, LUnbox::Input);
68 Assembler::Condition cond;
69 switch (mir->type()) {
70 case MIRType::Int32:
71 cond = masm.testInt32(Assembler::NotEqual, value);
72 break;
73 case MIRType::Boolean:
74 cond = masm.testBoolean(Assembler::NotEqual, value);
75 break;
76 case MIRType::Object:
77 cond = masm.testObject(Assembler::NotEqual, value);
78 break;
79 case MIRType::String:
80 cond = masm.testString(Assembler::NotEqual, value);
81 break;
82 case MIRType::Symbol:
83 cond = masm.testSymbol(Assembler::NotEqual, value);
84 break;
85 default:
86 MOZ_CRASH("Given MIRType cannot be unboxed.");
87 }
88 bailoutIf(cond, unbox->snapshot());
89 } else {
90 #ifdef DEBUG
91 Operand input = ToOperand(unbox->getOperand(LUnbox::Input));
92 JSValueTag tag = MIRTypeToTag(mir->type());
93 Label ok;
94 masm.splitTag(input, ScratchReg);
95 masm.branch32(Assembler::Equal, ScratchReg, Imm32(tag), &ok);
96 masm.assumeUnreachable("Infallible unbox type mismatch");
97 masm.bind(&ok);
98 #endif
99 }
100
101 Operand input = ToOperand(unbox->getOperand(LUnbox::Input));
102 Register result = ToRegister(unbox->output());
103 switch (mir->type()) {
104 case MIRType::Int32:
105 masm.unboxInt32(input, result);
106 break;
107 case MIRType::Boolean:
108 masm.unboxBoolean(input, result);
109 break;
110 case MIRType::Object:
111 masm.unboxObject(input, result);
112 break;
113 case MIRType::String:
114 masm.unboxString(input, result);
115 break;
116 case MIRType::Symbol:
117 masm.unboxSymbol(input, result);
118 break;
119 default:
120 MOZ_CRASH("Given MIRType cannot be unboxed.");
121 }
122 }
123
visitCompareB(LCompareB * lir)124 void CodeGeneratorX64::visitCompareB(LCompareB* lir) {
125 MCompare* mir = lir->mir();
126
127 const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
128 const LAllocation* rhs = lir->rhs();
129 const Register output = ToRegister(lir->output());
130
131 MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
132
133 // Load boxed boolean in ScratchReg.
134 ScratchRegisterScope scratch(masm);
135 if (rhs->isConstant())
136 masm.moveValue(rhs->toConstant()->toJSValue(), ValueOperand(scratch));
137 else
138 masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), scratch);
139
140 // Perform the comparison.
141 masm.cmpPtr(lhs.valueReg(), scratch);
142 masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output);
143 }
144
visitCompareBAndBranch(LCompareBAndBranch * lir)145 void CodeGeneratorX64::visitCompareBAndBranch(LCompareBAndBranch* lir) {
146 MCompare* mir = lir->cmpMir();
147
148 const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
149 const LAllocation* rhs = lir->rhs();
150
151 MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
152
153 // Load boxed boolean in ScratchReg.
154 ScratchRegisterScope scratch(masm);
155 if (rhs->isConstant())
156 masm.moveValue(rhs->toConstant()->toJSValue(), ValueOperand(scratch));
157 else
158 masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), scratch);
159
160 // Perform the comparison.
161 masm.cmpPtr(lhs.valueReg(), scratch);
162 emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(),
163 lir->ifFalse());
164 }
165
visitCompareBitwise(LCompareBitwise * lir)166 void CodeGeneratorX64::visitCompareBitwise(LCompareBitwise* lir) {
167 MCompare* mir = lir->mir();
168 const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput);
169 const ValueOperand rhs = ToValue(lir, LCompareBitwise::RhsInput);
170 const Register output = ToRegister(lir->output());
171
172 MOZ_ASSERT(IsEqualityOp(mir->jsop()));
173
174 masm.cmpPtr(lhs.valueReg(), rhs.valueReg());
175 masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output);
176 }
177
visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch * lir)178 void CodeGeneratorX64::visitCompareBitwiseAndBranch(
179 LCompareBitwiseAndBranch* lir) {
180 MCompare* mir = lir->cmpMir();
181
182 const ValueOperand lhs = ToValue(lir, LCompareBitwiseAndBranch::LhsInput);
183 const ValueOperand rhs = ToValue(lir, LCompareBitwiseAndBranch::RhsInput);
184
185 MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
186 mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
187
188 masm.cmpPtr(lhs.valueReg(), rhs.valueReg());
189 emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(),
190 lir->ifFalse());
191 }
192
visitCompareI64(LCompareI64 * lir)193 void CodeGeneratorX64::visitCompareI64(LCompareI64* lir) {
194 MCompare* mir = lir->mir();
195 MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
196 mir->compareType() == MCompare::Compare_UInt64);
197
198 const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
199 const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
200 Register lhsReg = ToRegister64(lhs).reg;
201 Register output = ToRegister(lir->output());
202
203 if (IsConstant(rhs))
204 masm.cmpPtr(lhsReg, ImmWord(ToInt64(rhs)));
205 else
206 masm.cmpPtr(lhsReg, ToOperand64(rhs));
207
208 bool isSigned = mir->compareType() == MCompare::Compare_Int64;
209 masm.emitSet(JSOpToCondition(lir->jsop(), isSigned), output);
210 }
211
visitCompareI64AndBranch(LCompareI64AndBranch * lir)212 void CodeGeneratorX64::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
213 MCompare* mir = lir->cmpMir();
214 MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
215 mir->compareType() == MCompare::Compare_UInt64);
216
217 LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
218 LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
219 Register lhsReg = ToRegister64(lhs).reg;
220
221 if (IsConstant(rhs))
222 masm.cmpPtr(lhsReg, ImmWord(ToInt64(rhs)));
223 else
224 masm.cmpPtr(lhsReg, ToOperand64(rhs));
225
226 bool isSigned = mir->compareType() == MCompare::Compare_Int64;
227 emitBranch(JSOpToCondition(lir->jsop(), isSigned), lir->ifTrue(),
228 lir->ifFalse());
229 }
230
visitDivOrModI64(LDivOrModI64 * lir)231 void CodeGeneratorX64::visitDivOrModI64(LDivOrModI64* lir) {
232 Register lhs = ToRegister(lir->lhs());
233 Register rhs = ToRegister(lir->rhs());
234 Register output = ToRegister(lir->output());
235
236 MOZ_ASSERT_IF(lhs != rhs, rhs != rax);
237 MOZ_ASSERT(rhs != rdx);
238 MOZ_ASSERT_IF(output == rax, ToRegister(lir->remainder()) == rdx);
239 MOZ_ASSERT_IF(output == rdx, ToRegister(lir->remainder()) == rax);
240
241 Label done;
242
243 // Put the lhs in rax.
244 if (lhs != rax) masm.mov(lhs, rax);
245
246 // Handle divide by zero.
247 if (lir->canBeDivideByZero()) {
248 Label nonZero;
249 masm.branchTestPtr(Assembler::NonZero, rhs, rhs, &nonZero);
250 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
251 masm.bind(&nonZero);
252 }
253
254 // Handle an integer overflow exception from INT64_MIN / -1.
255 if (lir->canBeNegativeOverflow()) {
256 Label notOverflow;
257 masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), ¬Overflow);
258 masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), ¬Overflow);
259 if (lir->mir()->isMod())
260 masm.xorl(output, output);
261 else
262 masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
263 masm.jump(&done);
264 masm.bind(¬Overflow);
265 }
266
267 // Sign extend the lhs into rdx to make rdx:rax.
268 masm.cqo();
269 masm.idivq(rhs);
270
271 masm.bind(&done);
272 }
273
visitUDivOrModI64(LUDivOrModI64 * lir)274 void CodeGeneratorX64::visitUDivOrModI64(LUDivOrModI64* lir) {
275 Register lhs = ToRegister(lir->lhs());
276 Register rhs = ToRegister(lir->rhs());
277
278 DebugOnly<Register> output = ToRegister(lir->output());
279 MOZ_ASSERT_IF(lhs != rhs, rhs != rax);
280 MOZ_ASSERT(rhs != rdx);
281 MOZ_ASSERT_IF(output.value == rax, ToRegister(lir->remainder()) == rdx);
282 MOZ_ASSERT_IF(output.value == rdx, ToRegister(lir->remainder()) == rax);
283
284 // Put the lhs in rax.
285 if (lhs != rax) masm.mov(lhs, rax);
286
287 Label done;
288
289 // Prevent divide by zero.
290 if (lir->canBeDivideByZero()) {
291 Label nonZero;
292 masm.branchTestPtr(Assembler::NonZero, rhs, rhs, &nonZero);
293 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
294 masm.bind(&nonZero);
295 }
296
297 // Zero extend the lhs into rdx to make (rdx:rax).
298 masm.xorl(rdx, rdx);
299 masm.udivq(rhs);
300
301 masm.bind(&done);
302 }
303
visitWasmSelectI64(LWasmSelectI64 * lir)304 void CodeGeneratorX64::visitWasmSelectI64(LWasmSelectI64* lir) {
305 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
306
307 Register cond = ToRegister(lir->condExpr());
308
309 Operand falseExpr = ToOperandOrRegister64(lir->falseExpr());
310
311 Register64 out = ToOutRegister64(lir);
312 MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
313 "true expr is reused for input");
314
315 masm.test32(cond, cond);
316 masm.cmovzq(falseExpr, out.reg);
317 }
318
visitWasmReinterpretFromI64(LWasmReinterpretFromI64 * lir)319 void CodeGeneratorX64::visitWasmReinterpretFromI64(
320 LWasmReinterpretFromI64* lir) {
321 MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
322 MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
323 masm.vmovq(ToRegister(lir->input()), ToFloatRegister(lir->output()));
324 }
325
visitWasmReinterpretToI64(LWasmReinterpretToI64 * lir)326 void CodeGeneratorX64::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
327 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
328 MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
329 masm.vmovq(ToFloatRegister(lir->input()), ToRegister(lir->output()));
330 }
331
visitWasmUint32ToDouble(LWasmUint32ToDouble * lir)332 void CodeGeneratorX64::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
333 masm.convertUInt32ToDouble(ToRegister(lir->input()),
334 ToFloatRegister(lir->output()));
335 }
336
visitWasmUint32ToFloat32(LWasmUint32ToFloat32 * lir)337 void CodeGeneratorX64::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
338 masm.convertUInt32ToFloat32(ToRegister(lir->input()),
339 ToFloatRegister(lir->output()));
340 }
341
wasmStore(const wasm::MemoryAccessDesc & access,const LAllocation * value,Operand dstAddr)342 void CodeGeneratorX64::wasmStore(const wasm::MemoryAccessDesc& access,
343 const LAllocation* value, Operand dstAddr) {
344 if (value->isConstant()) {
345 MOZ_ASSERT(!access.isSimd());
346
347 masm.memoryBarrierBefore(access.sync());
348
349 const MConstant* mir = value->toConstant();
350 Imm32 cst =
351 Imm32(mir->type() == MIRType::Int32 ? mir->toInt32() : mir->toInt64());
352
353 size_t storeOffset = masm.size();
354 switch (access.type()) {
355 case Scalar::Int8:
356 case Scalar::Uint8:
357 masm.movb(cst, dstAddr);
358 break;
359 case Scalar::Int16:
360 case Scalar::Uint16:
361 masm.movw(cst, dstAddr);
362 break;
363 case Scalar::Int32:
364 case Scalar::Uint32:
365 masm.movl(cst, dstAddr);
366 break;
367 case Scalar::Int64:
368 case Scalar::Float32:
369 case Scalar::Float64:
370 case Scalar::Float32x4:
371 case Scalar::Int8x16:
372 case Scalar::Int16x8:
373 case Scalar::Int32x4:
374 case Scalar::Uint8Clamped:
375 case Scalar::MaxTypedArrayViewType:
376 MOZ_CRASH("unexpected array type");
377 }
378 masm.append(access, storeOffset, masm.framePushed());
379
380 masm.memoryBarrierAfter(access.sync());
381 } else {
382 masm.wasmStore(access, ToAnyRegister(value), dstAddr);
383 }
384 }
385
386 template <typename T>
emitWasmLoad(T * ins)387 void CodeGeneratorX64::emitWasmLoad(T* ins) {
388 const MWasmLoad* mir = ins->mir();
389
390 uint32_t offset = mir->access().offset();
391 MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
392
393 const LAllocation* ptr = ins->ptr();
394 Operand srcAddr = ptr->isBogus()
395 ? Operand(HeapReg, offset)
396 : Operand(HeapReg, ToRegister(ptr), TimesOne, offset);
397
398 if (mir->type() == MIRType::Int64)
399 masm.wasmLoadI64(mir->access(), srcAddr, ToOutRegister64(ins));
400 else
401 masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(ins->output()));
402 }
403
visitWasmLoad(LWasmLoad * ins)404 void CodeGeneratorX64::visitWasmLoad(LWasmLoad* ins) { emitWasmLoad(ins); }
405
visitWasmLoadI64(LWasmLoadI64 * ins)406 void CodeGeneratorX64::visitWasmLoadI64(LWasmLoadI64* ins) {
407 emitWasmLoad(ins);
408 }
409
410 template <typename T>
emitWasmStore(T * ins)411 void CodeGeneratorX64::emitWasmStore(T* ins) {
412 const MWasmStore* mir = ins->mir();
413 const wasm::MemoryAccessDesc& access = mir->access();
414
415 uint32_t offset = access.offset();
416 MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
417
418 const LAllocation* value = ins->getOperand(ins->ValueIndex);
419 const LAllocation* ptr = ins->ptr();
420 Operand dstAddr = ptr->isBogus()
421 ? Operand(HeapReg, offset)
422 : Operand(HeapReg, ToRegister(ptr), TimesOne, offset);
423
424 wasmStore(access, value, dstAddr);
425 }
426
visitWasmStore(LWasmStore * ins)427 void CodeGeneratorX64::visitWasmStore(LWasmStore* ins) { emitWasmStore(ins); }
428
visitWasmStoreI64(LWasmStoreI64 * ins)429 void CodeGeneratorX64::visitWasmStoreI64(LWasmStoreI64* ins) {
430 emitWasmStore(ins);
431 }
432
visitAsmJSLoadHeap(LAsmJSLoadHeap * ins)433 void CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
434 const MAsmJSLoadHeap* mir = ins->mir();
435 MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
436
437 const LAllocation* ptr = ins->ptr();
438 const LDefinition* out = ins->output();
439
440 Scalar::Type accessType = mir->access().type();
441 MOZ_ASSERT(!Scalar::isSimdType(accessType));
442
443 Operand srcAddr = ptr->isBogus() ? Operand(HeapReg, mir->offset())
444 : Operand(HeapReg, ToRegister(ptr), TimesOne,
445 mir->offset());
446
447 uint32_t before = masm.size();
448 masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(out));
449 uint32_t after = masm.size();
450 verifyLoadDisassembly(before, after, accessType, srcAddr, *out->output());
451 }
452
visitAsmJSStoreHeap(LAsmJSStoreHeap * ins)453 void CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
454 const MAsmJSStoreHeap* mir = ins->mir();
455 MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
456
457 const LAllocation* ptr = ins->ptr();
458 const LAllocation* value = ins->value();
459
460 Scalar::Type accessType = mir->access().type();
461 MOZ_ASSERT(!Scalar::isSimdType(accessType));
462
463 canonicalizeIfDeterministic(accessType, value);
464
465 Operand dstAddr = ptr->isBogus() ? Operand(HeapReg, mir->offset())
466 : Operand(HeapReg, ToRegister(ptr), TimesOne,
467 mir->offset());
468
469 uint32_t before = masm.size();
470 wasmStore(mir->access(), value, dstAddr);
471 uint32_t after = masm.size();
472 verifyStoreDisassembly(before, after, accessType, dstAddr, *value);
473 }
474
visitWasmCompareExchangeHeap(LWasmCompareExchangeHeap * ins)475 void CodeGeneratorX64::visitWasmCompareExchangeHeap(
476 LWasmCompareExchangeHeap* ins) {
477 MWasmCompareExchangeHeap* mir = ins->mir();
478
479 Register ptr = ToRegister(ins->ptr());
480 Register oldval = ToRegister(ins->oldValue());
481 Register newval = ToRegister(ins->newValue());
482 MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
483
484 Scalar::Type accessType = mir->access().type();
485 BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->access().offset());
486
487 if (accessType == Scalar::Int64) {
488 MOZ_ASSERT(!mir->access().isPlainAsmJS());
489 masm.compareExchange64(Synchronization::Full(), srcAddr, Register64(oldval),
490 Register64(newval), ToOutRegister64(ins));
491 } else {
492 masm.compareExchange(accessType, Synchronization::Full(), srcAddr, oldval,
493 newval, ToRegister(ins->output()));
494 }
495 }
496
visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap * ins)497 void CodeGeneratorX64::visitWasmAtomicExchangeHeap(
498 LWasmAtomicExchangeHeap* ins) {
499 MWasmAtomicExchangeHeap* mir = ins->mir();
500
501 Register ptr = ToRegister(ins->ptr());
502 Register value = ToRegister(ins->value());
503 MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
504
505 Scalar::Type accessType = mir->access().type();
506
507 BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->access().offset());
508
509 if (accessType == Scalar::Int64) {
510 MOZ_ASSERT(!mir->access().isPlainAsmJS());
511 masm.atomicExchange64(Synchronization::Full(), srcAddr, Register64(value),
512 ToOutRegister64(ins));
513 } else {
514 masm.atomicExchange(accessType, Synchronization::Full(), srcAddr, value,
515 ToRegister(ins->output()));
516 }
517 }
518
visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap * ins)519 void CodeGeneratorX64::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
520 MWasmAtomicBinopHeap* mir = ins->mir();
521 MOZ_ASSERT(mir->hasUses());
522
523 Register ptr = ToRegister(ins->ptr());
524 const LAllocation* value = ins->value();
525 Register temp =
526 ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
527 Register output = ToRegister(ins->output());
528 MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
529
530 Scalar::Type accessType = mir->access().type();
531 if (accessType == Scalar::Uint32) accessType = Scalar::Int32;
532
533 AtomicOp op = mir->operation();
534 BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->access().offset());
535
536 if (accessType == Scalar::Int64) {
537 Register64 val = Register64(ToRegister(value));
538 Register64 out = Register64(output);
539 Register64 tmp = Register64(temp);
540 masm.atomicFetchOp64(Synchronization::Full(), op, val, srcAddr, tmp, out);
541 } else if (value->isConstant()) {
542 masm.atomicFetchOp(accessType, Synchronization::Full(), op,
543 Imm32(ToInt32(value)), srcAddr, temp, output);
544 } else {
545 masm.atomicFetchOp(accessType, Synchronization::Full(), op,
546 ToRegister(value), srcAddr, temp, output);
547 }
548 }
549
visitWasmAtomicBinopHeapForEffect(LWasmAtomicBinopHeapForEffect * ins)550 void CodeGeneratorX64::visitWasmAtomicBinopHeapForEffect(
551 LWasmAtomicBinopHeapForEffect* ins) {
552 MWasmAtomicBinopHeap* mir = ins->mir();
553 MOZ_ASSERT(!mir->hasUses());
554
555 Register ptr = ToRegister(ins->ptr());
556 const LAllocation* value = ins->value();
557 MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
558
559 Scalar::Type accessType = mir->access().type();
560 AtomicOp op = mir->operation();
561
562 BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->access().offset());
563
564 if (accessType == Scalar::Int64) {
565 Register64 val = Register64(ToRegister(value));
566 masm.atomicEffectOp64(Synchronization::Full(), op, val, srcAddr);
567 } else if (value->isConstant()) {
568 Imm32 c(0);
569 if (value->toConstant()->type() == MIRType::Int64)
570 c = Imm32(ToInt64(value));
571 else
572 c = Imm32(ToInt32(value));
573 masm.atomicEffectOp(accessType, Synchronization::Full(), op, c, srcAddr,
574 InvalidReg);
575 } else {
576 masm.atomicEffectOp(accessType, Synchronization::Full(), op,
577 ToRegister(value), srcAddr, InvalidReg);
578 }
579 }
580
visitTruncateDToInt32(LTruncateDToInt32 * ins)581 void CodeGeneratorX64::visitTruncateDToInt32(LTruncateDToInt32* ins) {
582 FloatRegister input = ToFloatRegister(ins->input());
583 Register output = ToRegister(ins->output());
584
585 // On x64, branchTruncateDouble uses vcvttsd2sq. Unlike the x86
586 // implementation, this should handle most doubles and we can just
587 // call a stub if it fails.
588 emitTruncateDouble(input, output, ins->mir());
589 }
590
visitTruncateFToInt32(LTruncateFToInt32 * ins)591 void CodeGeneratorX64::visitTruncateFToInt32(LTruncateFToInt32* ins) {
592 FloatRegister input = ToFloatRegister(ins->input());
593 Register output = ToRegister(ins->output());
594
595 // On x64, branchTruncateFloat32 uses vcvttss2sq. Unlike the x86
596 // implementation, this should handle most floats and we can just
597 // call a stub if it fails.
598 emitTruncateFloat32(input, output, ins->mir());
599 }
600
visitWrapInt64ToInt32(LWrapInt64ToInt32 * lir)601 void CodeGeneratorX64::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
602 const LAllocation* input = lir->getOperand(0);
603 Register output = ToRegister(lir->output());
604
605 if (lir->mir()->bottomHalf())
606 masm.movl(ToOperand(input), output);
607 else
608 MOZ_CRASH("Not implemented.");
609 }
610
visitExtendInt32ToInt64(LExtendInt32ToInt64 * lir)611 void CodeGeneratorX64::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
612 const LAllocation* input = lir->getOperand(0);
613 Register output = ToRegister(lir->output());
614
615 if (lir->mir()->isUnsigned())
616 masm.movl(ToOperand(input), output);
617 else
618 masm.movslq(ToOperand(input), output);
619 }
620
visitSignExtendInt64(LSignExtendInt64 * ins)621 void CodeGeneratorX64::visitSignExtendInt64(LSignExtendInt64* ins) {
622 Register64 input = ToRegister64(ins->getInt64Operand(0));
623 Register64 output = ToOutRegister64(ins);
624 switch (ins->mode()) {
625 case MSignExtendInt64::Byte:
626 masm.movsbq(Operand(input.reg), output.reg);
627 break;
628 case MSignExtendInt64::Half:
629 masm.movswq(Operand(input.reg), output.reg);
630 break;
631 case MSignExtendInt64::Word:
632 masm.movslq(Operand(input.reg), output.reg);
633 break;
634 }
635 }
636
visitWasmTruncateToInt64(LWasmTruncateToInt64 * lir)637 void CodeGeneratorX64::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
638 FloatRegister input = ToFloatRegister(lir->input());
639 Register64 output = ToOutRegister64(lir);
640
641 MWasmTruncateToInt64* mir = lir->mir();
642 MIRType inputType = mir->input()->type();
643
644 MOZ_ASSERT(inputType == MIRType::Double || inputType == MIRType::Float32);
645
646 auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
647 addOutOfLineCode(ool, mir);
648
649 FloatRegister temp =
650 mir->isUnsigned() ? ToFloatRegister(lir->temp()) : InvalidFloatReg;
651
652 Label* oolEntry = ool->entry();
653 Label* oolRejoin = ool->rejoin();
654 bool isSaturating = mir->isSaturating();
655 if (inputType == MIRType::Double) {
656 if (mir->isUnsigned())
657 masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, oolEntry,
658 oolRejoin, temp);
659 else
660 masm.wasmTruncateDoubleToInt64(input, output, isSaturating, oolEntry,
661 oolRejoin, temp);
662 } else {
663 if (mir->isUnsigned())
664 masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating, oolEntry,
665 oolRejoin, temp);
666 else
667 masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, oolEntry,
668 oolRejoin, temp);
669 }
670 }
671
visitInt64ToFloatingPoint(LInt64ToFloatingPoint * lir)672 void CodeGeneratorX64::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
673 Register64 input = ToRegister64(lir->getInt64Operand(0));
674 FloatRegister output = ToFloatRegister(lir->output());
675
676 MInt64ToFloatingPoint* mir = lir->mir();
677 bool isUnsigned = mir->isUnsigned();
678
679 MIRType outputType = mir->type();
680 MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
681 MOZ_ASSERT(isUnsigned == !lir->getTemp(0)->isBogusTemp());
682
683 if (outputType == MIRType::Double) {
684 if (isUnsigned)
685 masm.convertUInt64ToDouble(input, output, ToRegister(lir->getTemp(0)));
686 else
687 masm.convertInt64ToDouble(input, output);
688 } else {
689 if (isUnsigned)
690 masm.convertUInt64ToFloat32(input, output, ToRegister(lir->getTemp(0)));
691 else
692 masm.convertInt64ToFloat32(input, output);
693 }
694 }
695
visitNotI64(LNotI64 * lir)696 void CodeGeneratorX64::visitNotI64(LNotI64* lir) {
697 masm.cmpq(Imm32(0), ToRegister(lir->input()));
698 masm.emitSet(Assembler::Equal, ToRegister(lir->output()));
699 }
700
visitClzI64(LClzI64 * lir)701 void CodeGeneratorX64::visitClzI64(LClzI64* lir) {
702 Register64 input = ToRegister64(lir->getInt64Operand(0));
703 Register64 output = ToOutRegister64(lir);
704 masm.clz64(input, output.reg);
705 }
706
visitCtzI64(LCtzI64 * lir)707 void CodeGeneratorX64::visitCtzI64(LCtzI64* lir) {
708 Register64 input = ToRegister64(lir->getInt64Operand(0));
709 Register64 output = ToOutRegister64(lir);
710 masm.ctz64(input, output.reg);
711 }
712
visitTestI64AndBranch(LTestI64AndBranch * lir)713 void CodeGeneratorX64::visitTestI64AndBranch(LTestI64AndBranch* lir) {
714 Register input = ToRegister(lir->input());
715 masm.testq(input, input);
716 emitBranch(Assembler::NonZero, lir->ifTrue(), lir->ifFalse());
717 }
718