1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/x86/CodeGenerator-x86.h"
8
9 #include "mozilla/ArrayUtils.h"
10 #include "mozilla/Casting.h"
11 #include "mozilla/DebugOnly.h"
12
13 #include "jsnum.h"
14
15 #include "jit/MIR.h"
16 #include "jit/MIRGraph.h"
17 #include "js/Conversions.h"
18 #include "vm/Shape.h"
19
20 #include "jit/MacroAssembler-inl.h"
21 #include "jit/shared/CodeGenerator-shared-inl.h"
22 #include "vm/JSScript-inl.h"
23
24 using namespace js;
25 using namespace js::jit;
26
27 using JS::GenericNaN;
28 using mozilla::BitwiseCast;
29 using mozilla::DebugOnly;
30 using mozilla::FloatingPoint;
31
CodeGeneratorX86(MIRGenerator * gen,LIRGraph * graph,MacroAssembler * masm)32 CodeGeneratorX86::CodeGeneratorX86(MIRGenerator* gen, LIRGraph* graph,
33 MacroAssembler* masm)
34 : CodeGeneratorX86Shared(gen, graph, masm) {}
35
36 static const uint32_t FrameSizes[] = {128, 256, 512, 1024};
37
FromDepth(uint32_t frameDepth)38 FrameSizeClass FrameSizeClass::FromDepth(uint32_t frameDepth) {
39 for (uint32_t i = 0; i < mozilla::ArrayLength(FrameSizes); i++) {
40 if (frameDepth < FrameSizes[i]) return FrameSizeClass(i);
41 }
42
43 return FrameSizeClass::None();
44 }
45
ClassLimit()46 FrameSizeClass FrameSizeClass::ClassLimit() {
47 return FrameSizeClass(mozilla::ArrayLength(FrameSizes));
48 }
49
frameSize() const50 uint32_t FrameSizeClass::frameSize() const {
51 MOZ_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID);
52 MOZ_ASSERT(class_ < mozilla::ArrayLength(FrameSizes));
53
54 return FrameSizes[class_];
55 }
56
ToValue(LInstruction * ins,size_t pos)57 ValueOperand CodeGeneratorX86::ToValue(LInstruction* ins, size_t pos) {
58 Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
59 Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
60 return ValueOperand(typeReg, payloadReg);
61 }
62
ToTempValue(LInstruction * ins,size_t pos)63 ValueOperand CodeGeneratorX86::ToTempValue(LInstruction* ins, size_t pos) {
64 Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
65 Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
66 return ValueOperand(typeReg, payloadReg);
67 }
68
visitValue(LValue * value)69 void CodeGeneratorX86::visitValue(LValue* value) {
70 const ValueOperand out = ToOutValue(value);
71 masm.moveValue(value->value(), out);
72 }
73
visitBox(LBox * box)74 void CodeGeneratorX86::visitBox(LBox* box) {
75 const LDefinition* type = box->getDef(TYPE_INDEX);
76
77 DebugOnly<const LAllocation*> a = box->getOperand(0);
78 MOZ_ASSERT(!a->isConstant());
79
80 // On x86, the input operand and the output payload have the same
81 // virtual register. All that needs to be written is the type tag for
82 // the type definition.
83 masm.mov(ImmWord(MIRTypeToTag(box->type())), ToRegister(type));
84 }
85
visitBoxFloatingPoint(LBoxFloatingPoint * box)86 void CodeGeneratorX86::visitBoxFloatingPoint(LBoxFloatingPoint* box) {
87 const AnyRegister in = ToAnyRegister(box->getOperand(0));
88 const ValueOperand out = ToOutValue(box);
89
90 masm.moveValue(TypedOrValueRegister(box->type(), in), out);
91 }
92
visitUnbox(LUnbox * unbox)93 void CodeGeneratorX86::visitUnbox(LUnbox* unbox) {
94 // Note that for unbox, the type and payload indexes are switched on the
95 // inputs.
96 Operand type = ToOperand(unbox->type());
97 Operand payload = ToOperand(unbox->payload());
98 Register output = ToRegister(unbox->output());
99 MUnbox* mir = unbox->mir();
100
101 JSValueTag tag = MIRTypeToTag(mir->type());
102 if (mir->fallible()) {
103 masm.cmp32(type, Imm32(tag));
104 bailoutIf(Assembler::NotEqual, unbox->snapshot());
105 } else {
106 #ifdef DEBUG
107 Label ok;
108 masm.branch32(Assembler::Equal, type, Imm32(tag), &ok);
109 masm.assumeUnreachable("Infallible unbox type mismatch");
110 masm.bind(&ok);
111 #endif
112 }
113
114 // Note: If spectreValueMasking is disabled, then this instruction will
115 // default to a no-op as long as the lowering allocate the same register for
116 // the output and the payload.
117 masm.unboxNonDouble(type, payload, output, ValueTypeFromMIRType(mir->type()));
118 }
119
visitCompareB(LCompareB * lir)120 void CodeGeneratorX86::visitCompareB(LCompareB* lir) {
121 MCompare* mir = lir->mir();
122
123 const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
124 const LAllocation* rhs = lir->rhs();
125 const Register output = ToRegister(lir->output());
126
127 MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
128
129 Label notBoolean, done;
130 masm.branchTestBoolean(Assembler::NotEqual, lhs, ¬Boolean);
131 {
132 if (rhs->isConstant())
133 masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
134 else
135 masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
136 masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output);
137 masm.jump(&done);
138 }
139 masm.bind(¬Boolean);
140 { masm.move32(Imm32(mir->jsop() == JSOP_STRICTNE), output); }
141
142 masm.bind(&done);
143 }
144
visitCompareBAndBranch(LCompareBAndBranch * lir)145 void CodeGeneratorX86::visitCompareBAndBranch(LCompareBAndBranch* lir) {
146 MCompare* mir = lir->cmpMir();
147 const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
148 const LAllocation* rhs = lir->rhs();
149
150 MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
151
152 Assembler::Condition cond = masm.testBoolean(Assembler::NotEqual, lhs);
153 jumpToBlock((mir->jsop() == JSOP_STRICTEQ) ? lir->ifFalse() : lir->ifTrue(),
154 cond);
155
156 if (rhs->isConstant())
157 masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
158 else
159 masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
160 emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(),
161 lir->ifFalse());
162 }
163
visitCompareBitwise(LCompareBitwise * lir)164 void CodeGeneratorX86::visitCompareBitwise(LCompareBitwise* lir) {
165 MCompare* mir = lir->mir();
166 Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
167 const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput);
168 const ValueOperand rhs = ToValue(lir, LCompareBitwise::RhsInput);
169 const Register output = ToRegister(lir->output());
170
171 MOZ_ASSERT(IsEqualityOp(mir->jsop()));
172
173 Label notEqual, done;
174 masm.cmp32(lhs.typeReg(), rhs.typeReg());
175 masm.j(Assembler::NotEqual, ¬Equal);
176 {
177 masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
178 masm.emitSet(cond, output);
179 masm.jump(&done);
180 }
181 masm.bind(¬Equal);
182 { masm.move32(Imm32(cond == Assembler::NotEqual), output); }
183
184 masm.bind(&done);
185 }
186
visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch * lir)187 void CodeGeneratorX86::visitCompareBitwiseAndBranch(
188 LCompareBitwiseAndBranch* lir) {
189 MCompare* mir = lir->cmpMir();
190 Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
191 const ValueOperand lhs = ToValue(lir, LCompareBitwiseAndBranch::LhsInput);
192 const ValueOperand rhs = ToValue(lir, LCompareBitwiseAndBranch::RhsInput);
193
194 MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
195 mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
196
197 MBasicBlock* notEqual =
198 (cond == Assembler::Equal) ? lir->ifFalse() : lir->ifTrue();
199
200 masm.cmp32(lhs.typeReg(), rhs.typeReg());
201 jumpToBlock(notEqual, Assembler::NotEqual);
202 masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
203 emitBranch(cond, lir->ifTrue(), lir->ifFalse());
204 }
205
visitWasmUint32ToDouble(LWasmUint32ToDouble * lir)206 void CodeGeneratorX86::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
207 Register input = ToRegister(lir->input());
208 Register temp = ToRegister(lir->temp());
209
210 if (input != temp) masm.mov(input, temp);
211
212 // Beware: convertUInt32ToDouble clobbers input.
213 masm.convertUInt32ToDouble(temp, ToFloatRegister(lir->output()));
214 }
215
visitWasmUint32ToFloat32(LWasmUint32ToFloat32 * lir)216 void CodeGeneratorX86::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
217 Register input = ToRegister(lir->input());
218 Register temp = ToRegister(lir->temp());
219 FloatRegister output = ToFloatRegister(lir->output());
220
221 if (input != temp) masm.mov(input, temp);
222
223 // Beware: convertUInt32ToFloat32 clobbers input.
224 masm.convertUInt32ToFloat32(temp, output);
225 }
226
227 template <typename T>
emitWasmLoad(T * ins)228 void CodeGeneratorX86::emitWasmLoad(T* ins) {
229 const MWasmLoad* mir = ins->mir();
230
231 uint32_t offset = mir->access().offset();
232 MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
233
234 const LAllocation* ptr = ins->ptr();
235 const LAllocation* memoryBase = ins->memoryBase();
236
237 // Lowering has set things up so that we can use a BaseIndex form if the
238 // pointer is constant and the offset is zero, or if the pointer is zero.
239
240 Operand srcAddr =
241 ptr->isBogus()
242 ? Operand(ToRegister(memoryBase),
243 offset ? offset : mir->base()->toConstant()->toInt32())
244 : Operand(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
245
246 if (mir->type() == MIRType::Int64) {
247 MOZ_ASSERT_IF(mir->access().isAtomic(),
248 mir->access().type() != Scalar::Int64);
249 masm.wasmLoadI64(mir->access(), srcAddr, ToOutRegister64(ins));
250 } else {
251 masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(ins->output()));
252 }
253 }
254
visitWasmLoad(LWasmLoad * ins)255 void CodeGeneratorX86::visitWasmLoad(LWasmLoad* ins) { emitWasmLoad(ins); }
256
visitWasmLoadI64(LWasmLoadI64 * ins)257 void CodeGeneratorX86::visitWasmLoadI64(LWasmLoadI64* ins) {
258 emitWasmLoad(ins);
259 }
260
261 template <typename T>
emitWasmStore(T * ins)262 void CodeGeneratorX86::emitWasmStore(T* ins) {
263 const MWasmStore* mir = ins->mir();
264
265 uint32_t offset = mir->access().offset();
266 MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
267
268 const LAllocation* ptr = ins->ptr();
269 const LAllocation* memoryBase = ins->memoryBase();
270
271 // Lowering has set things up so that we can use a BaseIndex form if the
272 // pointer is constant and the offset is zero, or if the pointer is zero.
273
274 Operand dstAddr =
275 ptr->isBogus()
276 ? Operand(ToRegister(memoryBase),
277 offset ? offset : mir->base()->toConstant()->toInt32())
278 : Operand(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
279
280 if (mir->access().type() == Scalar::Int64) {
281 Register64 value =
282 ToRegister64(ins->getInt64Operand(LWasmStoreI64::ValueIndex));
283 masm.wasmStoreI64(mir->access(), value, dstAddr);
284 } else {
285 AnyRegister value = ToAnyRegister(ins->getOperand(LWasmStore::ValueIndex));
286 masm.wasmStore(mir->access(), value, dstAddr);
287 }
288 }
289
visitWasmStore(LWasmStore * ins)290 void CodeGeneratorX86::visitWasmStore(LWasmStore* ins) { emitWasmStore(ins); }
291
visitWasmStoreI64(LWasmStoreI64 * ins)292 void CodeGeneratorX86::visitWasmStoreI64(LWasmStoreI64* ins) {
293 emitWasmStore(ins);
294 }
295
visitAsmJSLoadHeap(LAsmJSLoadHeap * ins)296 void CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
297 const MAsmJSLoadHeap* mir = ins->mir();
298 MOZ_ASSERT(mir->access().offset() == 0);
299
300 const LAllocation* ptr = ins->ptr();
301 const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
302 const LAllocation* memoryBase = ins->memoryBase();
303 AnyRegister out = ToAnyRegister(ins->output());
304
305 Scalar::Type accessType = mir->accessType();
306 MOZ_ASSERT(!Scalar::isSimdType(accessType));
307
308 OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
309 if (mir->needsBoundsCheck()) {
310 ool = new (alloc()) OutOfLineLoadTypedArrayOutOfBounds(out, accessType);
311 addOutOfLineCode(ool, mir);
312
313 masm.wasmBoundsCheck(Assembler::AboveOrEqual, ToRegister(ptr),
314 ToRegister(boundsCheckLimit), ool->entry());
315 }
316
317 Operand srcAddr = ptr->isBogus() ? Operand(ToRegister(memoryBase), 0)
318 : Operand(ToRegister(memoryBase),
319 ToRegister(ptr), TimesOne);
320
321 masm.wasmLoad(mir->access(), srcAddr, out);
322
323 if (ool) masm.bind(ool->rejoin());
324 }
325
visitAsmJSStoreHeap(LAsmJSStoreHeap * ins)326 void CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
327 const MAsmJSStoreHeap* mir = ins->mir();
328 MOZ_ASSERT(mir->offset() == 0);
329
330 const LAllocation* ptr = ins->ptr();
331 const LAllocation* value = ins->value();
332 const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
333 const LAllocation* memoryBase = ins->memoryBase();
334
335 Scalar::Type accessType = mir->accessType();
336 MOZ_ASSERT(!Scalar::isSimdType(accessType));
337 canonicalizeIfDeterministic(accessType, value);
338
339 Operand dstAddr = ptr->isBogus() ? Operand(ToRegister(memoryBase), 0)
340 : Operand(ToRegister(memoryBase),
341 ToRegister(ptr), TimesOne);
342
343 Label rejoin;
344 if (mir->needsBoundsCheck()) {
345 masm.wasmBoundsCheck(Assembler::AboveOrEqual, ToRegister(ptr),
346 ToRegister(boundsCheckLimit), &rejoin);
347 }
348
349 masm.wasmStore(mir->access(), ToAnyRegister(value), dstAddr);
350
351 if (rejoin.used()) masm.bind(&rejoin);
352 }
353
visitWasmCompareExchangeHeap(LWasmCompareExchangeHeap * ins)354 void CodeGeneratorX86::visitWasmCompareExchangeHeap(
355 LWasmCompareExchangeHeap* ins) {
356 MWasmCompareExchangeHeap* mir = ins->mir();
357
358 Scalar::Type accessType = mir->access().type();
359 Register ptrReg = ToRegister(ins->ptr());
360 Register oldval = ToRegister(ins->oldValue());
361 Register newval = ToRegister(ins->newValue());
362 Register addrTemp = ToRegister(ins->addrTemp());
363 Register memoryBase = ToRegister(ins->memoryBase());
364 Register output = ToRegister(ins->output());
365
366 masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
367 addrTemp);
368
369 Address memAddr(addrTemp, 0);
370 masm.compareExchange(accessType, Synchronization::Full(), memAddr, oldval,
371 newval, output);
372 }
373
visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap * ins)374 void CodeGeneratorX86::visitWasmAtomicExchangeHeap(
375 LWasmAtomicExchangeHeap* ins) {
376 MWasmAtomicExchangeHeap* mir = ins->mir();
377
378 Scalar::Type accessType = mir->access().type();
379 Register ptrReg = ToRegister(ins->ptr());
380 Register value = ToRegister(ins->value());
381 Register addrTemp = ToRegister(ins->addrTemp());
382 Register memoryBase = ToRegister(ins->memoryBase());
383 Register output = ToRegister(ins->output());
384
385 masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
386 addrTemp);
387
388 Address memAddr(addrTemp, 0);
389 masm.atomicExchange(accessType, Synchronization::Full(), memAddr, value,
390 output);
391 }
392
visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap * ins)393 void CodeGeneratorX86::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
394 MWasmAtomicBinopHeap* mir = ins->mir();
395
396 Scalar::Type accessType = mir->access().type();
397 Register ptrReg = ToRegister(ins->ptr());
398 Register temp =
399 ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
400 Register addrTemp = ToRegister(ins->addrTemp());
401 Register out = ToRegister(ins->output());
402 const LAllocation* value = ins->value();
403 AtomicOp op = mir->operation();
404 Register memoryBase = ToRegister(ins->memoryBase());
405
406 masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
407 addrTemp);
408
409 Address memAddr(addrTemp, 0);
410 if (value->isConstant()) {
411 masm.atomicFetchOp(accessType, Synchronization::Full(), op,
412 Imm32(ToInt32(value)), memAddr, temp, out);
413 } else {
414 masm.atomicFetchOp(accessType, Synchronization::Full(), op,
415 ToRegister(value), memAddr, temp, out);
416 }
417 }
418
visitWasmAtomicBinopHeapForEffect(LWasmAtomicBinopHeapForEffect * ins)419 void CodeGeneratorX86::visitWasmAtomicBinopHeapForEffect(
420 LWasmAtomicBinopHeapForEffect* ins) {
421 MWasmAtomicBinopHeap* mir = ins->mir();
422 MOZ_ASSERT(!mir->hasUses());
423
424 Scalar::Type accessType = mir->access().type();
425 Register ptrReg = ToRegister(ins->ptr());
426 Register addrTemp = ToRegister(ins->addrTemp());
427 const LAllocation* value = ins->value();
428 AtomicOp op = mir->operation();
429 Register memoryBase = ToRegister(ins->memoryBase());
430
431 masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
432 addrTemp);
433
434 Address memAddr(addrTemp, 0);
435 if (value->isConstant()) {
436 masm.atomicEffectOp(accessType, Synchronization::Full(), op,
437 Imm32(ToInt32(value)), memAddr, InvalidReg);
438 } else {
439 masm.atomicEffectOp(accessType, Synchronization::Full(), op,
440 ToRegister(value), memAddr, InvalidReg);
441 }
442 }
443
visitWasmAtomicLoadI64(LWasmAtomicLoadI64 * ins)444 void CodeGeneratorX86::visitWasmAtomicLoadI64(LWasmAtomicLoadI64* ins) {
445 uint32_t offset = ins->mir()->access().offset();
446 MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
447
448 const LAllocation* memoryBase = ins->memoryBase();
449 const LAllocation* ptr = ins->ptr();
450 Operand srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
451
452 MOZ_ASSERT(ToRegister(ins->t1()) == ecx);
453 MOZ_ASSERT(ToRegister(ins->t2()) == ebx);
454 MOZ_ASSERT(ToOutRegister64(ins).high == edx);
455 MOZ_ASSERT(ToOutRegister64(ins).low == eax);
456
457 // We must have the same values in ecx:ebx and edx:eax, but we don't care
458 // what they are. It's safe to clobber ecx:ebx for sure because they are
459 // fixed temp registers.
460 masm.movl(eax, ebx);
461 masm.movl(edx, ecx);
462 masm.lock_cmpxchg8b(edx, eax, ecx, ebx, srcAddr);
463 }
464
visitWasmCompareExchangeI64(LWasmCompareExchangeI64 * ins)465 void CodeGeneratorX86::visitWasmCompareExchangeI64(
466 LWasmCompareExchangeI64* ins) {
467 uint32_t offset = ins->mir()->access().offset();
468 MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
469
470 const LAllocation* memoryBase = ins->memoryBase();
471 const LAllocation* ptr = ins->ptr();
472 Operand srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
473
474 MOZ_ASSERT(ToRegister64(ins->expected()).low == eax);
475 MOZ_ASSERT(ToRegister64(ins->expected()).high == edx);
476 MOZ_ASSERT(ToRegister64(ins->replacement()).low == ebx);
477 MOZ_ASSERT(ToRegister64(ins->replacement()).high == ecx);
478 MOZ_ASSERT(ToOutRegister64(ins).low == eax);
479 MOZ_ASSERT(ToOutRegister64(ins).high == edx);
480
481 masm.lock_cmpxchg8b(edx, eax, ecx, ebx, srcAddr);
482 }
483
484 template <typename T>
emitWasmStoreOrExchangeAtomicI64(T * ins,uint32_t offset)485 void CodeGeneratorX86::emitWasmStoreOrExchangeAtomicI64(T* ins,
486 uint32_t offset) {
487 MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
488
489 const LAllocation* memoryBase = ins->memoryBase();
490 const LAllocation* ptr = ins->ptr();
491 Operand srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
492
493 DebugOnly<const LInt64Allocation> value = ins->value();
494 MOZ_ASSERT(ToRegister64(value).low == ebx);
495 MOZ_ASSERT(ToRegister64(value).high == ecx);
496
497 // eax and ebx will be overwritten every time through the loop but
498 // memoryBase and ptr must remain live for a possible second iteration.
499
500 MOZ_ASSERT(ToRegister(memoryBase) != edx && ToRegister(memoryBase) != eax);
501 MOZ_ASSERT(ToRegister(ptr) != edx && ToRegister(ptr) != eax);
502
503 Label again;
504 masm.bind(&again);
505 masm.lock_cmpxchg8b(edx, eax, ecx, ebx, srcAddr);
506 masm.j(Assembler::Condition::NonZero, &again);
507 }
508
visitWasmAtomicStoreI64(LWasmAtomicStoreI64 * ins)509 void CodeGeneratorX86::visitWasmAtomicStoreI64(LWasmAtomicStoreI64* ins) {
510 MOZ_ASSERT(ToRegister(ins->t1()) == edx);
511 MOZ_ASSERT(ToRegister(ins->t2()) == eax);
512
513 emitWasmStoreOrExchangeAtomicI64(ins, ins->mir()->access().offset());
514 }
515
visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64 * ins)516 void CodeGeneratorX86::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* ins) {
517 MOZ_ASSERT(ToOutRegister64(ins).high == edx);
518 MOZ_ASSERT(ToOutRegister64(ins).low == eax);
519
520 emitWasmStoreOrExchangeAtomicI64(ins, ins->access().offset());
521 }
522
visitWasmAtomicBinopI64(LWasmAtomicBinopI64 * ins)523 void CodeGeneratorX86::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* ins) {
524 uint32_t offset = ins->access().offset();
525 MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
526
527 const LAllocation* memoryBase = ins->memoryBase();
528 const LAllocation* ptr = ins->ptr();
529
530 BaseIndex srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
531
532 MOZ_ASSERT(ToRegister(memoryBase) == esi || ToRegister(memoryBase) == edi);
533 MOZ_ASSERT(ToRegister(ptr) == esi || ToRegister(ptr) == edi);
534
535 Register64 value = ToRegister64(ins->value());
536
537 MOZ_ASSERT(value.low == ebx);
538 MOZ_ASSERT(value.high == ecx);
539
540 Register64 output = ToOutRegister64(ins);
541
542 MOZ_ASSERT(output.low == eax);
543 MOZ_ASSERT(output.high == edx);
544
545 masm.Push(ecx);
546 masm.Push(ebx);
547
548 Address valueAddr(esp, 0);
549
550 // Here the `value` register acts as a temp, we'll restore it below.
551 masm.atomicFetchOp64(Synchronization::Full(), ins->operation(), valueAddr,
552 srcAddr, value, output);
553
554 masm.Pop(ebx);
555 masm.Pop(ecx);
556 }
557
558 namespace js {
559 namespace jit {
560
561 class OutOfLineTruncate : public OutOfLineCodeBase<CodeGeneratorX86> {
562 LTruncateDToInt32* ins_;
563
564 public:
OutOfLineTruncate(LTruncateDToInt32 * ins)565 explicit OutOfLineTruncate(LTruncateDToInt32* ins) : ins_(ins) {}
566
accept(CodeGeneratorX86 * codegen)567 void accept(CodeGeneratorX86* codegen) override {
568 codegen->visitOutOfLineTruncate(this);
569 }
ins() const570 LTruncateDToInt32* ins() const { return ins_; }
571 };
572
573 class OutOfLineTruncateFloat32 : public OutOfLineCodeBase<CodeGeneratorX86> {
574 LTruncateFToInt32* ins_;
575
576 public:
OutOfLineTruncateFloat32(LTruncateFToInt32 * ins)577 explicit OutOfLineTruncateFloat32(LTruncateFToInt32* ins) : ins_(ins) {}
578
accept(CodeGeneratorX86 * codegen)579 void accept(CodeGeneratorX86* codegen) override {
580 codegen->visitOutOfLineTruncateFloat32(this);
581 }
ins() const582 LTruncateFToInt32* ins() const { return ins_; }
583 };
584
585 } // namespace jit
586 } // namespace js
587
visitTruncateDToInt32(LTruncateDToInt32 * ins)588 void CodeGeneratorX86::visitTruncateDToInt32(LTruncateDToInt32* ins) {
589 FloatRegister input = ToFloatRegister(ins->input());
590 Register output = ToRegister(ins->output());
591
592 OutOfLineTruncate* ool = new (alloc()) OutOfLineTruncate(ins);
593 addOutOfLineCode(ool, ins->mir());
594
595 masm.branchTruncateDoubleMaybeModUint32(input, output, ool->entry());
596 masm.bind(ool->rejoin());
597 }
598
visitTruncateFToInt32(LTruncateFToInt32 * ins)599 void CodeGeneratorX86::visitTruncateFToInt32(LTruncateFToInt32* ins) {
600 FloatRegister input = ToFloatRegister(ins->input());
601 Register output = ToRegister(ins->output());
602
603 OutOfLineTruncateFloat32* ool = new (alloc()) OutOfLineTruncateFloat32(ins);
604 addOutOfLineCode(ool, ins->mir());
605
606 masm.branchTruncateFloat32MaybeModUint32(input, output, ool->entry());
607 masm.bind(ool->rejoin());
608 }
609
visitOutOfLineTruncate(OutOfLineTruncate * ool)610 void CodeGeneratorX86::visitOutOfLineTruncate(OutOfLineTruncate* ool) {
611 LTruncateDToInt32* ins = ool->ins();
612 FloatRegister input = ToFloatRegister(ins->input());
613 Register output = ToRegister(ins->output());
614
615 Label fail;
616
617 if (Assembler::HasSSE3()) {
618 Label failPopDouble;
619 // Push double.
620 masm.subl(Imm32(sizeof(double)), esp);
621 masm.storeDouble(input, Operand(esp, 0));
622
623 // Check exponent to avoid fp exceptions.
624 masm.branchDoubleNotInInt64Range(Address(esp, 0), output, &failPopDouble);
625
626 // Load double, perform 64-bit truncation.
627 masm.truncateDoubleToInt64(Address(esp, 0), Address(esp, 0), output);
628
629 // Load low word, pop double and jump back.
630 masm.load32(Address(esp, 0), output);
631 masm.addl(Imm32(sizeof(double)), esp);
632 masm.jump(ool->rejoin());
633
634 masm.bind(&failPopDouble);
635 masm.addl(Imm32(sizeof(double)), esp);
636 masm.jump(&fail);
637 } else {
638 FloatRegister temp = ToFloatRegister(ins->tempFloat());
639
640 // Try to convert doubles representing integers within 2^32 of a signed
641 // integer, by adding/subtracting 2^32 and then trying to convert to int32.
642 // This has to be an exact conversion, as otherwise the truncation works
643 // incorrectly on the modified value.
644 masm.zeroDouble(ScratchDoubleReg);
645 masm.vucomisd(ScratchDoubleReg, input);
646 masm.j(Assembler::Parity, &fail);
647
648 {
649 Label positive;
650 masm.j(Assembler::Above, &positive);
651
652 masm.loadConstantDouble(4294967296.0, temp);
653 Label skip;
654 masm.jmp(&skip);
655
656 masm.bind(&positive);
657 masm.loadConstantDouble(-4294967296.0, temp);
658 masm.bind(&skip);
659 }
660
661 masm.addDouble(input, temp);
662 masm.vcvttsd2si(temp, output);
663 masm.vcvtsi2sd(output, ScratchDoubleReg, ScratchDoubleReg);
664
665 masm.vucomisd(ScratchDoubleReg, temp);
666 masm.j(Assembler::Parity, &fail);
667 masm.j(Assembler::Equal, ool->rejoin());
668 }
669
670 masm.bind(&fail);
671 {
672 saveVolatile(output);
673
674 if (gen->compilingWasm()) {
675 masm.setupWasmABICall();
676 masm.passABIArg(input, MoveOp::DOUBLE);
677 masm.callWithABI(ins->mir()->bytecodeOffset(),
678 wasm::SymbolicAddress::ToInt32);
679 } else {
680 masm.setupUnalignedABICall(output);
681 masm.passABIArg(input, MoveOp::DOUBLE);
682 masm.callWithABI(BitwiseCast<void*, int32_t (*)(double)>(JS::ToInt32),
683 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
684 }
685 masm.storeCallInt32Result(output);
686
687 restoreVolatile(output);
688 }
689
690 masm.jump(ool->rejoin());
691 }
692
visitOutOfLineTruncateFloat32(OutOfLineTruncateFloat32 * ool)693 void CodeGeneratorX86::visitOutOfLineTruncateFloat32(
694 OutOfLineTruncateFloat32* ool) {
695 LTruncateFToInt32* ins = ool->ins();
696 FloatRegister input = ToFloatRegister(ins->input());
697 Register output = ToRegister(ins->output());
698
699 Label fail;
700
701 if (Assembler::HasSSE3()) {
702 Label failPopFloat;
703
704 // Push float32, but subtracts 64 bits so that the value popped by fisttp
705 // fits
706 masm.subl(Imm32(sizeof(uint64_t)), esp);
707 masm.storeFloat32(input, Operand(esp, 0));
708
709 // Check exponent to avoid fp exceptions.
710 masm.branchDoubleNotInInt64Range(Address(esp, 0), output, &failPopFloat);
711
712 // Load float, perform 32-bit truncation.
713 masm.truncateFloat32ToInt64(Address(esp, 0), Address(esp, 0), output);
714
715 // Load low word, pop 64bits and jump back.
716 masm.load32(Address(esp, 0), output);
717 masm.addl(Imm32(sizeof(uint64_t)), esp);
718 masm.jump(ool->rejoin());
719
720 masm.bind(&failPopFloat);
721 masm.addl(Imm32(sizeof(uint64_t)), esp);
722 masm.jump(&fail);
723 } else {
724 FloatRegister temp = ToFloatRegister(ins->tempFloat());
725
726 // Try to convert float32 representing integers within 2^32 of a signed
727 // integer, by adding/subtracting 2^32 and then trying to convert to int32.
728 // This has to be an exact conversion, as otherwise the truncation works
729 // incorrectly on the modified value.
730 masm.zeroFloat32(ScratchFloat32Reg);
731 masm.vucomiss(ScratchFloat32Reg, input);
732 masm.j(Assembler::Parity, &fail);
733
734 {
735 Label positive;
736 masm.j(Assembler::Above, &positive);
737
738 masm.loadConstantFloat32(4294967296.f, temp);
739 Label skip;
740 masm.jmp(&skip);
741
742 masm.bind(&positive);
743 masm.loadConstantFloat32(-4294967296.f, temp);
744 masm.bind(&skip);
745 }
746
747 masm.addFloat32(input, temp);
748 masm.vcvttss2si(temp, output);
749 masm.vcvtsi2ss(output, ScratchFloat32Reg, ScratchFloat32Reg);
750
751 masm.vucomiss(ScratchFloat32Reg, temp);
752 masm.j(Assembler::Parity, &fail);
753 masm.j(Assembler::Equal, ool->rejoin());
754 }
755
756 masm.bind(&fail);
757 {
758 saveVolatile(output);
759
760 masm.Push(input);
761
762 if (gen->compilingWasm())
763 masm.setupWasmABICall();
764 else
765 masm.setupUnalignedABICall(output);
766
767 masm.vcvtss2sd(input, input, input);
768 masm.passABIArg(input.asDouble(), MoveOp::DOUBLE);
769
770 if (gen->compilingWasm()) {
771 masm.callWithABI(ins->mir()->bytecodeOffset(),
772 wasm::SymbolicAddress::ToInt32);
773 } else {
774 masm.callWithABI(BitwiseCast<void*, int32_t (*)(double)>(JS::ToInt32),
775 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
776 }
777
778 masm.storeCallInt32Result(output);
779 masm.Pop(input);
780
781 restoreVolatile(output);
782 }
783
784 masm.jump(ool->rejoin());
785 }
786
visitCompareI64(LCompareI64 * lir)787 void CodeGeneratorX86::visitCompareI64(LCompareI64* lir) {
788 MCompare* mir = lir->mir();
789 MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
790 mir->compareType() == MCompare::Compare_UInt64);
791
792 const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
793 const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
794 Register64 lhsRegs = ToRegister64(lhs);
795 Register output = ToRegister(lir->output());
796
797 bool isSigned = mir->compareType() == MCompare::Compare_Int64;
798 Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
799 Label done;
800
801 masm.move32(Imm32(1), output);
802
803 if (IsConstant(rhs)) {
804 Imm64 imm = Imm64(ToInt64(rhs));
805 masm.branch64(condition, lhsRegs, imm, &done);
806 } else {
807 Register64 rhsRegs = ToRegister64(rhs);
808 masm.branch64(condition, lhsRegs, rhsRegs, &done);
809 }
810
811 masm.xorl(output, output);
812 masm.bind(&done);
813 }
814
visitCompareI64AndBranch(LCompareI64AndBranch * lir)815 void CodeGeneratorX86::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
816 MCompare* mir = lir->cmpMir();
817 MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
818 mir->compareType() == MCompare::Compare_UInt64);
819
820 const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
821 const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
822 Register64 lhsRegs = ToRegister64(lhs);
823
824 bool isSigned = mir->compareType() == MCompare::Compare_Int64;
825 Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
826
827 Label* trueLabel = getJumpLabelForBranch(lir->ifTrue());
828 Label* falseLabel = getJumpLabelForBranch(lir->ifFalse());
829
830 if (isNextBlock(lir->ifFalse()->lir())) {
831 falseLabel = nullptr;
832 } else if (isNextBlock(lir->ifTrue()->lir())) {
833 condition = Assembler::InvertCondition(condition);
834 trueLabel = falseLabel;
835 falseLabel = nullptr;
836 }
837
838 if (IsConstant(rhs)) {
839 Imm64 imm = Imm64(ToInt64(rhs));
840 masm.branch64(condition, lhsRegs, imm, trueLabel, falseLabel);
841 } else {
842 Register64 rhsRegs = ToRegister64(rhs);
843 masm.branch64(condition, lhsRegs, rhsRegs, trueLabel, falseLabel);
844 }
845 }
846
visitDivOrModI64(LDivOrModI64 * lir)847 void CodeGeneratorX86::visitDivOrModI64(LDivOrModI64* lir) {
848 Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
849 Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
850 Register temp = ToRegister(lir->temp());
851 Register64 output = ToOutRegister64(lir);
852
853 MOZ_ASSERT(output == ReturnReg64);
854
855 Label done;
856
857 // Handle divide by zero.
858 if (lir->canBeDivideByZero()) {
859 Label nonZero;
860 masm.branchTest64(Assembler::NonZero, rhs, rhs, temp, &nonZero);
861 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
862 masm.bind(&nonZero);
863 }
864
865 MDefinition* mir = lir->mir();
866
867 // Handle an integer overflow exception from INT64_MIN / -1.
868 if (lir->canBeNegativeOverflow()) {
869 Label notOverflow;
870 masm.branch64(Assembler::NotEqual, lhs, Imm64(INT64_MIN), ¬Overflow);
871 masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), ¬Overflow);
872 if (mir->isMod())
873 masm.xor64(output, output);
874 else
875 masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
876 masm.jump(&done);
877 masm.bind(¬Overflow);
878 }
879
880 masm.setupWasmABICall();
881 masm.passABIArg(lhs.high);
882 masm.passABIArg(lhs.low);
883 masm.passABIArg(rhs.high);
884 masm.passABIArg(rhs.low);
885
886 MOZ_ASSERT(gen->compilingWasm());
887 if (mir->isMod())
888 masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::ModI64);
889 else
890 masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::DivI64);
891
892 // output in edx:eax, move to output register.
893 masm.movl(edx, output.high);
894 MOZ_ASSERT(eax == output.low);
895
896 masm.bind(&done);
897 }
898
visitUDivOrModI64(LUDivOrModI64 * lir)899 void CodeGeneratorX86::visitUDivOrModI64(LUDivOrModI64* lir) {
900 Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
901 Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
902 Register temp = ToRegister(lir->temp());
903 Register64 output = ToOutRegister64(lir);
904
905 MOZ_ASSERT(output == ReturnReg64);
906
907 // Prevent divide by zero.
908 if (lir->canBeDivideByZero()) {
909 Label nonZero;
910 masm.branchTest64(Assembler::NonZero, rhs, rhs, temp, &nonZero);
911 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
912 masm.bind(&nonZero);
913 }
914
915 masm.setupWasmABICall();
916 masm.passABIArg(lhs.high);
917 masm.passABIArg(lhs.low);
918 masm.passABIArg(rhs.high);
919 masm.passABIArg(rhs.low);
920
921 MOZ_ASSERT(gen->compilingWasm());
922 MDefinition* mir = lir->mir();
923 if (mir->isMod())
924 masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UModI64);
925 else
926 masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UDivI64);
927
928 // output in edx:eax, move to output register.
929 masm.movl(edx, output.high);
930 MOZ_ASSERT(eax == output.low);
931 }
932
visitWasmSelectI64(LWasmSelectI64 * lir)933 void CodeGeneratorX86::visitWasmSelectI64(LWasmSelectI64* lir) {
934 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
935
936 Register cond = ToRegister(lir->condExpr());
937 Register64 falseExpr = ToRegister64(lir->falseExpr());
938 Register64 out = ToOutRegister64(lir);
939
940 MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
941 "true expr is reused for input");
942
943 Label done;
944 masm.branchTest32(Assembler::NonZero, cond, cond, &done);
945 masm.movl(falseExpr.low, out.low);
946 masm.movl(falseExpr.high, out.high);
947 masm.bind(&done);
948 }
949
visitWasmReinterpretFromI64(LWasmReinterpretFromI64 * lir)950 void CodeGeneratorX86::visitWasmReinterpretFromI64(
951 LWasmReinterpretFromI64* lir) {
952 MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
953 MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
954 Register64 input = ToRegister64(lir->getInt64Operand(0));
955
956 masm.Push(input.high);
957 masm.Push(input.low);
958 masm.vmovq(Operand(esp, 0), ToFloatRegister(lir->output()));
959 masm.freeStack(sizeof(uint64_t));
960 }
961
visitWasmReinterpretToI64(LWasmReinterpretToI64 * lir)962 void CodeGeneratorX86::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
963 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
964 MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
965 Register64 output = ToOutRegister64(lir);
966
967 masm.reserveStack(sizeof(uint64_t));
968 masm.vmovq(ToFloatRegister(lir->input()), Operand(esp, 0));
969 masm.Pop(output.low);
970 masm.Pop(output.high);
971 }
972
visitExtendInt32ToInt64(LExtendInt32ToInt64 * lir)973 void CodeGeneratorX86::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
974 Register64 output = ToOutRegister64(lir);
975 Register input = ToRegister(lir->input());
976
977 if (lir->mir()->isUnsigned()) {
978 if (output.low != input) masm.movl(input, output.low);
979 masm.xorl(output.high, output.high);
980 } else {
981 MOZ_ASSERT(output.low == input);
982 MOZ_ASSERT(output.low == eax);
983 MOZ_ASSERT(output.high == edx);
984 masm.cdq();
985 }
986 }
987
visitSignExtendInt64(LSignExtendInt64 * lir)988 void CodeGeneratorX86::visitSignExtendInt64(LSignExtendInt64* lir) {
989 #ifdef DEBUG
990 Register64 input = ToRegister64(lir->getInt64Operand(0));
991 Register64 output = ToOutRegister64(lir);
992 MOZ_ASSERT(input.low == eax);
993 MOZ_ASSERT(output.low == eax);
994 MOZ_ASSERT(input.high == edx);
995 MOZ_ASSERT(output.high == edx);
996 #endif
997 switch (lir->mode()) {
998 case MSignExtendInt64::Byte:
999 masm.move8SignExtend(eax, eax);
1000 break;
1001 case MSignExtendInt64::Half:
1002 masm.move16SignExtend(eax, eax);
1003 break;
1004 case MSignExtendInt64::Word:
1005 break;
1006 }
1007 masm.cdq();
1008 }
1009
visitWrapInt64ToInt32(LWrapInt64ToInt32 * lir)1010 void CodeGeneratorX86::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
1011 const LInt64Allocation& input = lir->getInt64Operand(0);
1012 Register output = ToRegister(lir->output());
1013
1014 if (lir->mir()->bottomHalf())
1015 masm.movl(ToRegister(input.low()), output);
1016 else
1017 masm.movl(ToRegister(input.high()), output);
1018 }
1019
visitClzI64(LClzI64 * lir)1020 void CodeGeneratorX86::visitClzI64(LClzI64* lir) {
1021 Register64 input = ToRegister64(lir->getInt64Operand(0));
1022 Register64 output = ToOutRegister64(lir);
1023
1024 masm.clz64(input, output.low);
1025 masm.xorl(output.high, output.high);
1026 }
1027
visitCtzI64(LCtzI64 * lir)1028 void CodeGeneratorX86::visitCtzI64(LCtzI64* lir) {
1029 Register64 input = ToRegister64(lir->getInt64Operand(0));
1030 Register64 output = ToOutRegister64(lir);
1031
1032 masm.ctz64(input, output.low);
1033 masm.xorl(output.high, output.high);
1034 }
1035
visitNotI64(LNotI64 * lir)1036 void CodeGeneratorX86::visitNotI64(LNotI64* lir) {
1037 Register64 input = ToRegister64(lir->getInt64Operand(0));
1038 Register output = ToRegister(lir->output());
1039
1040 if (input.high == output) {
1041 masm.orl(input.low, output);
1042 } else if (input.low == output) {
1043 masm.orl(input.high, output);
1044 } else {
1045 masm.movl(input.high, output);
1046 masm.orl(input.low, output);
1047 }
1048
1049 masm.cmpl(Imm32(0), output);
1050 masm.emitSet(Assembler::Equal, output);
1051 }
1052
visitWasmTruncateToInt64(LWasmTruncateToInt64 * lir)1053 void CodeGeneratorX86::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
1054 FloatRegister input = ToFloatRegister(lir->input());
1055 Register64 output = ToOutRegister64(lir);
1056
1057 MWasmTruncateToInt64* mir = lir->mir();
1058 FloatRegister floatTemp = ToFloatRegister(lir->temp());
1059
1060 Label fail, convert;
1061
1062 MOZ_ASSERT(mir->input()->type() == MIRType::Double ||
1063 mir->input()->type() == MIRType::Float32);
1064
1065 auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
1066 addOutOfLineCode(ool, mir);
1067
1068 bool isSaturating = mir->isSaturating();
1069 if (mir->input()->type() == MIRType::Float32) {
1070 if (mir->isUnsigned())
1071 masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating,
1072 ool->entry(), ool->rejoin(), floatTemp);
1073 else
1074 masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, ool->entry(),
1075 ool->rejoin(), floatTemp);
1076 } else {
1077 if (mir->isUnsigned())
1078 masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, ool->entry(),
1079 ool->rejoin(), floatTemp);
1080 else
1081 masm.wasmTruncateDoubleToInt64(input, output, isSaturating, ool->entry(),
1082 ool->rejoin(), floatTemp);
1083 }
1084 }
1085
visitInt64ToFloatingPoint(LInt64ToFloatingPoint * lir)1086 void CodeGeneratorX86::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
1087 Register64 input = ToRegister64(lir->getInt64Operand(0));
1088 FloatRegister output = ToFloatRegister(lir->output());
1089 Register temp =
1090 lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
1091
1092 MIRType outputType = lir->mir()->type();
1093 MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
1094
1095 if (outputType == MIRType::Double) {
1096 if (lir->mir()->isUnsigned())
1097 masm.convertUInt64ToDouble(input, output, temp);
1098 else
1099 masm.convertInt64ToDouble(input, output);
1100 } else {
1101 if (lir->mir()->isUnsigned())
1102 masm.convertUInt64ToFloat32(input, output, temp);
1103 else
1104 masm.convertInt64ToFloat32(input, output);
1105 }
1106 }
1107
visitTestI64AndBranch(LTestI64AndBranch * lir)1108 void CodeGeneratorX86::visitTestI64AndBranch(LTestI64AndBranch* lir) {
1109 Register64 input = ToRegister64(lir->getInt64Operand(0));
1110
1111 masm.testl(input.high, input.high);
1112 jumpToBlock(lir->ifTrue(), Assembler::NonZero);
1113 masm.testl(input.low, input.low);
1114 emitBranch(Assembler::NonZero, lir->ifTrue(), lir->ifFalse());
1115 }
1116