1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/x86/CodeGenerator-x86.h"
8
9 #include "mozilla/ArrayUtils.h"
10 #include "mozilla/Casting.h"
11 #include "mozilla/DebugOnly.h"
12
13 #include "jsnum.h"
14
15 #include "jit/CodeGenerator.h"
16 #include "jit/MIR.h"
17 #include "jit/MIRGraph.h"
18 #include "js/Conversions.h"
19 #include "vm/Shape.h"
20
21 #include "jit/MacroAssembler-inl.h"
22 #include "jit/shared/CodeGenerator-shared-inl.h"
23 #include "vm/JSScript-inl.h"
24
25 using namespace js;
26 using namespace js::jit;
27
28 using JS::GenericNaN;
29 using mozilla::BitwiseCast;
30 using mozilla::DebugOnly;
31 using mozilla::FloatingPoint;
32
CodeGeneratorX86(MIRGenerator * gen,LIRGraph * graph,MacroAssembler * masm)33 CodeGeneratorX86::CodeGeneratorX86(MIRGenerator* gen, LIRGraph* graph,
34 MacroAssembler* masm)
35 : CodeGeneratorX86Shared(gen, graph, masm) {}
36
37 static const uint32_t FrameSizes[] = {128, 256, 512, 1024};
38
FromDepth(uint32_t frameDepth)39 FrameSizeClass FrameSizeClass::FromDepth(uint32_t frameDepth) {
40 for (uint32_t i = 0; i < mozilla::ArrayLength(FrameSizes); i++) {
41 if (frameDepth < FrameSizes[i]) {
42 return FrameSizeClass(i);
43 }
44 }
45
46 return FrameSizeClass::None();
47 }
48
ClassLimit()49 FrameSizeClass FrameSizeClass::ClassLimit() {
50 return FrameSizeClass(mozilla::ArrayLength(FrameSizes));
51 }
52
frameSize() const53 uint32_t FrameSizeClass::frameSize() const {
54 MOZ_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID);
55 MOZ_ASSERT(class_ < mozilla::ArrayLength(FrameSizes));
56
57 return FrameSizes[class_];
58 }
59
ToValue(LInstruction * ins,size_t pos)60 ValueOperand CodeGeneratorX86::ToValue(LInstruction* ins, size_t pos) {
61 Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
62 Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
63 return ValueOperand(typeReg, payloadReg);
64 }
65
ToTempValue(LInstruction * ins,size_t pos)66 ValueOperand CodeGeneratorX86::ToTempValue(LInstruction* ins, size_t pos) {
67 Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
68 Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
69 return ValueOperand(typeReg, payloadReg);
70 }
71
visitValue(LValue * value)72 void CodeGenerator::visitValue(LValue* value) {
73 const ValueOperand out = ToOutValue(value);
74 masm.moveValue(value->value(), out);
75 }
76
visitBox(LBox * box)77 void CodeGenerator::visitBox(LBox* box) {
78 const LDefinition* type = box->getDef(TYPE_INDEX);
79
80 DebugOnly<const LAllocation*> a = box->getOperand(0);
81 MOZ_ASSERT(!a->isConstant());
82
83 // On x86, the input operand and the output payload have the same
84 // virtual register. All that needs to be written is the type tag for
85 // the type definition.
86 masm.mov(ImmWord(MIRTypeToTag(box->type())), ToRegister(type));
87 }
88
visitBoxFloatingPoint(LBoxFloatingPoint * box)89 void CodeGenerator::visitBoxFloatingPoint(LBoxFloatingPoint* box) {
90 const AnyRegister in = ToAnyRegister(box->getOperand(0));
91 const ValueOperand out = ToOutValue(box);
92
93 masm.moveValue(TypedOrValueRegister(box->type(), in), out);
94
95 if (JitOptions.spectreValueMasking) {
96 Register scratch = ToRegister(box->spectreTemp());
97 masm.move32(Imm32(JSVAL_TAG_CLEAR), scratch);
98 masm.cmp32Move32(Assembler::Below, scratch, out.typeReg(), scratch,
99 out.typeReg());
100 }
101 }
102
visitUnbox(LUnbox * unbox)103 void CodeGenerator::visitUnbox(LUnbox* unbox) {
104 // Note that for unbox, the type and payload indexes are switched on the
105 // inputs.
106 Operand type = ToOperand(unbox->type());
107 Operand payload = ToOperand(unbox->payload());
108 Register output = ToRegister(unbox->output());
109 MUnbox* mir = unbox->mir();
110
111 JSValueTag tag = MIRTypeToTag(mir->type());
112 if (mir->fallible()) {
113 masm.cmp32(type, Imm32(tag));
114 bailoutIf(Assembler::NotEqual, unbox->snapshot());
115 } else {
116 #ifdef DEBUG
117 Label ok;
118 masm.branch32(Assembler::Equal, type, Imm32(tag), &ok);
119 masm.assumeUnreachable("Infallible unbox type mismatch");
120 masm.bind(&ok);
121 #endif
122 }
123
124 // Note: If spectreValueMasking is disabled, then this instruction will
125 // default to a no-op as long as the lowering allocate the same register for
126 // the output and the payload.
127 masm.unboxNonDouble(type, payload, output, ValueTypeFromMIRType(mir->type()));
128 }
129
visitCompareB(LCompareB * lir)130 void CodeGenerator::visitCompareB(LCompareB* lir) {
131 MCompare* mir = lir->mir();
132
133 const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
134 const LAllocation* rhs = lir->rhs();
135 const Register output = ToRegister(lir->output());
136
137 MOZ_ASSERT(mir->jsop() == JSOp::StrictEq || mir->jsop() == JSOp::StrictNe);
138
139 Label notBoolean, done;
140 masm.branchTestBoolean(Assembler::NotEqual, lhs, ¬Boolean);
141 {
142 if (rhs->isConstant()) {
143 masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
144 } else {
145 masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
146 }
147 masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output);
148 masm.jump(&done);
149 }
150 masm.bind(¬Boolean);
151 { masm.move32(Imm32(mir->jsop() == JSOp::StrictNe), output); }
152
153 masm.bind(&done);
154 }
155
visitCompareBAndBranch(LCompareBAndBranch * lir)156 void CodeGenerator::visitCompareBAndBranch(LCompareBAndBranch* lir) {
157 MCompare* mir = lir->cmpMir();
158 const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
159 const LAllocation* rhs = lir->rhs();
160
161 MOZ_ASSERT(mir->jsop() == JSOp::StrictEq || mir->jsop() == JSOp::StrictNe);
162
163 Assembler::Condition cond = masm.testBoolean(Assembler::NotEqual, lhs);
164 jumpToBlock((mir->jsop() == JSOp::StrictEq) ? lir->ifFalse() : lir->ifTrue(),
165 cond);
166
167 if (rhs->isConstant()) {
168 masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
169 } else {
170 masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
171 }
172 emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(),
173 lir->ifFalse());
174 }
175
visitCompareBitwise(LCompareBitwise * lir)176 void CodeGenerator::visitCompareBitwise(LCompareBitwise* lir) {
177 MCompare* mir = lir->mir();
178 Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
179 const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput);
180 const ValueOperand rhs = ToValue(lir, LCompareBitwise::RhsInput);
181 const Register output = ToRegister(lir->output());
182
183 MOZ_ASSERT(IsEqualityOp(mir->jsop()));
184
185 Label notEqual, done;
186 masm.cmp32(lhs.typeReg(), rhs.typeReg());
187 masm.j(Assembler::NotEqual, ¬Equal);
188 {
189 masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
190 masm.emitSet(cond, output);
191 masm.jump(&done);
192 }
193 masm.bind(¬Equal);
194 { masm.move32(Imm32(cond == Assembler::NotEqual), output); }
195
196 masm.bind(&done);
197 }
198
visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch * lir)199 void CodeGenerator::visitCompareBitwiseAndBranch(
200 LCompareBitwiseAndBranch* lir) {
201 MCompare* mir = lir->cmpMir();
202 Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
203 const ValueOperand lhs = ToValue(lir, LCompareBitwiseAndBranch::LhsInput);
204 const ValueOperand rhs = ToValue(lir, LCompareBitwiseAndBranch::RhsInput);
205
206 MOZ_ASSERT(mir->jsop() == JSOp::Eq || mir->jsop() == JSOp::StrictEq ||
207 mir->jsop() == JSOp::Ne || mir->jsop() == JSOp::StrictNe);
208
209 MBasicBlock* notEqual =
210 (cond == Assembler::Equal) ? lir->ifFalse() : lir->ifTrue();
211
212 masm.cmp32(lhs.typeReg(), rhs.typeReg());
213 jumpToBlock(notEqual, Assembler::NotEqual);
214 masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
215 emitBranch(cond, lir->ifTrue(), lir->ifFalse());
216 }
217
218 // See ../CodeGenerator.cpp for more information.
visitWasmRegisterResult(LWasmRegisterResult * lir)219 void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {}
220
visitWasmUint32ToDouble(LWasmUint32ToDouble * lir)221 void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
222 Register input = ToRegister(lir->input());
223 Register temp = ToRegister(lir->temp());
224
225 if (input != temp) {
226 masm.mov(input, temp);
227 }
228
229 // Beware: convertUInt32ToDouble clobbers input.
230 masm.convertUInt32ToDouble(temp, ToFloatRegister(lir->output()));
231 }
232
visitWasmUint32ToFloat32(LWasmUint32ToFloat32 * lir)233 void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
234 Register input = ToRegister(lir->input());
235 Register temp = ToRegister(lir->temp());
236 FloatRegister output = ToFloatRegister(lir->output());
237
238 if (input != temp) {
239 masm.mov(input, temp);
240 }
241
242 // Beware: convertUInt32ToFloat32 clobbers input.
243 masm.convertUInt32ToFloat32(temp, output);
244 }
245
visitWasmHeapBase(LWasmHeapBase * ins)246 void CodeGenerator::visitWasmHeapBase(LWasmHeapBase* ins) {
247 masm.loadPtr(
248 Address(ToRegister(ins->tlsPtr()), offsetof(wasm::TlsData, memoryBase)),
249 ToRegister(ins->output()));
250 }
251
252 template <typename T>
emitWasmLoad(T * ins)253 void CodeGeneratorX86::emitWasmLoad(T* ins) {
254 const MWasmLoad* mir = ins->mir();
255
256 uint32_t offset = mir->access().offset();
257 MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
258
259 const LAllocation* ptr = ins->ptr();
260 const LAllocation* memoryBase = ins->memoryBase();
261
262 // Lowering has set things up so that we can use a BaseIndex form if the
263 // pointer is constant and the offset is zero, or if the pointer is zero.
264
265 Operand srcAddr =
266 ptr->isBogus()
267 ? Operand(ToRegister(memoryBase),
268 offset ? offset : mir->base()->toConstant()->toInt32())
269 : Operand(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
270
271 if (mir->type() == MIRType::Int64) {
272 MOZ_ASSERT_IF(mir->access().isAtomic(),
273 mir->access().type() != Scalar::Int64);
274 masm.wasmLoadI64(mir->access(), srcAddr, ToOutRegister64(ins));
275 } else {
276 masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(ins->output()));
277 }
278 }
279
visitWasmLoad(LWasmLoad * ins)280 void CodeGenerator::visitWasmLoad(LWasmLoad* ins) { emitWasmLoad(ins); }
281
visitWasmLoadI64(LWasmLoadI64 * ins)282 void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* ins) { emitWasmLoad(ins); }
283
284 template <typename T>
emitWasmStore(T * ins)285 void CodeGeneratorX86::emitWasmStore(T* ins) {
286 const MWasmStore* mir = ins->mir();
287
288 uint32_t offset = mir->access().offset();
289 MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
290
291 const LAllocation* ptr = ins->ptr();
292 const LAllocation* memoryBase = ins->memoryBase();
293
294 // Lowering has set things up so that we can use a BaseIndex form if the
295 // pointer is constant and the offset is zero, or if the pointer is zero.
296
297 Operand dstAddr =
298 ptr->isBogus()
299 ? Operand(ToRegister(memoryBase),
300 offset ? offset : mir->base()->toConstant()->toInt32())
301 : Operand(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
302
303 if (mir->access().type() == Scalar::Int64) {
304 Register64 value =
305 ToRegister64(ins->getInt64Operand(LWasmStoreI64::ValueIndex));
306 masm.wasmStoreI64(mir->access(), value, dstAddr);
307 } else {
308 AnyRegister value = ToAnyRegister(ins->getOperand(LWasmStore::ValueIndex));
309 masm.wasmStore(mir->access(), value, dstAddr);
310 }
311 }
312
visitWasmStore(LWasmStore * ins)313 void CodeGenerator::visitWasmStore(LWasmStore* ins) { emitWasmStore(ins); }
314
visitWasmStoreI64(LWasmStoreI64 * ins)315 void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* ins) {
316 emitWasmStore(ins);
317 }
318
visitWasmCompareExchangeHeap(LWasmCompareExchangeHeap * ins)319 void CodeGenerator::visitWasmCompareExchangeHeap(
320 LWasmCompareExchangeHeap* ins) {
321 MWasmCompareExchangeHeap* mir = ins->mir();
322
323 Register ptrReg = ToRegister(ins->ptr());
324 Register oldval = ToRegister(ins->oldValue());
325 Register newval = ToRegister(ins->newValue());
326 Register addrTemp = ToRegister(ins->addrTemp());
327 Register memoryBase = ToRegister(ins->memoryBase());
328 Register output = ToRegister(ins->output());
329
330 masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
331 addrTemp);
332
333 Address memAddr(addrTemp, 0);
334 masm.wasmCompareExchange(mir->access(), memAddr, oldval, newval, output);
335 }
336
visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap * ins)337 void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
338 MWasmAtomicExchangeHeap* mir = ins->mir();
339
340 Register ptrReg = ToRegister(ins->ptr());
341 Register value = ToRegister(ins->value());
342 Register addrTemp = ToRegister(ins->addrTemp());
343 Register memoryBase = ToRegister(ins->memoryBase());
344 Register output = ToRegister(ins->output());
345
346 masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
347 addrTemp);
348
349 Address memAddr(addrTemp, 0);
350 masm.wasmAtomicExchange(mir->access(), memAddr, value, output);
351 }
352
visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap * ins)353 void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
354 MWasmAtomicBinopHeap* mir = ins->mir();
355
356 Register ptrReg = ToRegister(ins->ptr());
357 Register temp =
358 ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
359 Register addrTemp = ToRegister(ins->addrTemp());
360 Register out = ToRegister(ins->output());
361 const LAllocation* value = ins->value();
362 AtomicOp op = mir->operation();
363 Register memoryBase = ToRegister(ins->memoryBase());
364
365 masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
366 addrTemp);
367
368 Address memAddr(addrTemp, 0);
369 if (value->isConstant()) {
370 masm.wasmAtomicFetchOp(mir->access(), op, Imm32(ToInt32(value)), memAddr,
371 temp, out);
372 } else {
373 masm.wasmAtomicFetchOp(mir->access(), op, ToRegister(value), memAddr, temp,
374 out);
375 }
376 }
377
visitWasmAtomicBinopHeapForEffect(LWasmAtomicBinopHeapForEffect * ins)378 void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
379 LWasmAtomicBinopHeapForEffect* ins) {
380 MWasmAtomicBinopHeap* mir = ins->mir();
381 MOZ_ASSERT(!mir->hasUses());
382
383 Register ptrReg = ToRegister(ins->ptr());
384 Register addrTemp = ToRegister(ins->addrTemp());
385 const LAllocation* value = ins->value();
386 AtomicOp op = mir->operation();
387 Register memoryBase = ToRegister(ins->memoryBase());
388
389 masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
390 addrTemp);
391
392 Address memAddr(addrTemp, 0);
393 if (value->isConstant()) {
394 masm.wasmAtomicEffectOp(mir->access(), op, Imm32(ToInt32(value)), memAddr,
395 InvalidReg);
396 } else {
397 masm.wasmAtomicEffectOp(mir->access(), op, ToRegister(value), memAddr,
398 InvalidReg);
399 }
400 }
401
visitWasmAtomicLoadI64(LWasmAtomicLoadI64 * ins)402 void CodeGenerator::visitWasmAtomicLoadI64(LWasmAtomicLoadI64* ins) {
403 uint32_t offset = ins->mir()->access().offset();
404 MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
405
406 const LAllocation* memoryBase = ins->memoryBase();
407 const LAllocation* ptr = ins->ptr();
408 BaseIndex srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
409
410 MOZ_ASSERT(ToRegister(ins->t1()) == ecx);
411 MOZ_ASSERT(ToRegister(ins->t2()) == ebx);
412 MOZ_ASSERT(ToOutRegister64(ins).high == edx);
413 MOZ_ASSERT(ToOutRegister64(ins).low == eax);
414
415 masm.wasmAtomicLoad64(ins->mir()->access(), srcAddr, Register64(ecx, ebx),
416 Register64(edx, eax));
417 }
418
visitWasmCompareExchangeI64(LWasmCompareExchangeI64 * ins)419 void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* ins) {
420 uint32_t offset = ins->mir()->access().offset();
421 MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
422
423 const LAllocation* memoryBase = ins->memoryBase();
424 const LAllocation* ptr = ins->ptr();
425 Operand srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
426
427 MOZ_ASSERT(ToRegister64(ins->expected()).low == eax);
428 MOZ_ASSERT(ToRegister64(ins->expected()).high == edx);
429 MOZ_ASSERT(ToRegister64(ins->replacement()).low == ebx);
430 MOZ_ASSERT(ToRegister64(ins->replacement()).high == ecx);
431 MOZ_ASSERT(ToOutRegister64(ins).low == eax);
432 MOZ_ASSERT(ToOutRegister64(ins).high == edx);
433
434 masm.append(ins->mir()->access(), masm.size());
435 masm.lock_cmpxchg8b(edx, eax, ecx, ebx, srcAddr);
436 }
437
438 template <typename T>
emitWasmStoreOrExchangeAtomicI64(T * ins,const wasm::MemoryAccessDesc & access)439 void CodeGeneratorX86::emitWasmStoreOrExchangeAtomicI64(
440 T* ins, const wasm::MemoryAccessDesc& access) {
441 MOZ_ASSERT(access.offset() < wasm::MaxOffsetGuardLimit);
442
443 const LAllocation* memoryBase = ins->memoryBase();
444 const LAllocation* ptr = ins->ptr();
445 Operand srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne,
446 access.offset());
447
448 DebugOnly<const LInt64Allocation> value = ins->value();
449 MOZ_ASSERT(ToRegister64(value).low == ebx);
450 MOZ_ASSERT(ToRegister64(value).high == ecx);
451
452 // eax and ebx will be overwritten every time through the loop but
453 // memoryBase and ptr must remain live for a possible second iteration.
454
455 MOZ_ASSERT(ToRegister(memoryBase) != edx && ToRegister(memoryBase) != eax);
456 MOZ_ASSERT(ToRegister(ptr) != edx && ToRegister(ptr) != eax);
457
458 Label again;
459 masm.bind(&again);
460 masm.append(access, masm.size());
461 masm.lock_cmpxchg8b(edx, eax, ecx, ebx, srcAddr);
462 masm.j(Assembler::Condition::NonZero, &again);
463 }
464
visitWasmAtomicStoreI64(LWasmAtomicStoreI64 * ins)465 void CodeGenerator::visitWasmAtomicStoreI64(LWasmAtomicStoreI64* ins) {
466 MOZ_ASSERT(ToRegister(ins->t1()) == edx);
467 MOZ_ASSERT(ToRegister(ins->t2()) == eax);
468
469 emitWasmStoreOrExchangeAtomicI64(ins, ins->mir()->access());
470 }
471
visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64 * ins)472 void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* ins) {
473 MOZ_ASSERT(ToOutRegister64(ins).high == edx);
474 MOZ_ASSERT(ToOutRegister64(ins).low == eax);
475
476 emitWasmStoreOrExchangeAtomicI64(ins, ins->access());
477 }
478
visitWasmAtomicBinopI64(LWasmAtomicBinopI64 * ins)479 void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* ins) {
480 uint32_t offset = ins->access().offset();
481 MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
482
483 const LAllocation* memoryBase = ins->memoryBase();
484 const LAllocation* ptr = ins->ptr();
485
486 BaseIndex srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
487
488 MOZ_ASSERT(ToRegister(memoryBase) == esi || ToRegister(memoryBase) == edi);
489 MOZ_ASSERT(ToRegister(ptr) == esi || ToRegister(ptr) == edi);
490
491 Register64 value = ToRegister64(ins->value());
492
493 MOZ_ASSERT(value.low == ebx);
494 MOZ_ASSERT(value.high == ecx);
495
496 Register64 output = ToOutRegister64(ins);
497
498 MOZ_ASSERT(output.low == eax);
499 MOZ_ASSERT(output.high == edx);
500
501 masm.Push(ecx);
502 masm.Push(ebx);
503
504 Address valueAddr(esp, 0);
505
506 // Here the `value` register acts as a temp, we'll restore it below.
507 masm.wasmAtomicFetchOp64(ins->access(), ins->operation(), valueAddr, srcAddr,
508 value, output);
509
510 masm.Pop(ebx);
511 masm.Pop(ecx);
512 }
513
514 namespace js {
515 namespace jit {
516
517 class OutOfLineTruncate : public OutOfLineCodeBase<CodeGeneratorX86> {
518 LTruncateDToInt32* ins_;
519
520 public:
OutOfLineTruncate(LTruncateDToInt32 * ins)521 explicit OutOfLineTruncate(LTruncateDToInt32* ins) : ins_(ins) {}
522
accept(CodeGeneratorX86 * codegen)523 void accept(CodeGeneratorX86* codegen) override {
524 codegen->visitOutOfLineTruncate(this);
525 }
ins() const526 LTruncateDToInt32* ins() const { return ins_; }
527 };
528
529 class OutOfLineTruncateFloat32 : public OutOfLineCodeBase<CodeGeneratorX86> {
530 LTruncateFToInt32* ins_;
531
532 public:
OutOfLineTruncateFloat32(LTruncateFToInt32 * ins)533 explicit OutOfLineTruncateFloat32(LTruncateFToInt32* ins) : ins_(ins) {}
534
accept(CodeGeneratorX86 * codegen)535 void accept(CodeGeneratorX86* codegen) override {
536 codegen->visitOutOfLineTruncateFloat32(this);
537 }
ins() const538 LTruncateFToInt32* ins() const { return ins_; }
539 };
540
541 } // namespace jit
542 } // namespace js
543
visitTruncateDToInt32(LTruncateDToInt32 * ins)544 void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
545 FloatRegister input = ToFloatRegister(ins->input());
546 Register output = ToRegister(ins->output());
547
548 OutOfLineTruncate* ool = new (alloc()) OutOfLineTruncate(ins);
549 addOutOfLineCode(ool, ins->mir());
550
551 masm.branchTruncateDoubleMaybeModUint32(input, output, ool->entry());
552 masm.bind(ool->rejoin());
553 }
554
visitTruncateFToInt32(LTruncateFToInt32 * ins)555 void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
556 FloatRegister input = ToFloatRegister(ins->input());
557 Register output = ToRegister(ins->output());
558
559 OutOfLineTruncateFloat32* ool = new (alloc()) OutOfLineTruncateFloat32(ins);
560 addOutOfLineCode(ool, ins->mir());
561
562 masm.branchTruncateFloat32MaybeModUint32(input, output, ool->entry());
563 masm.bind(ool->rejoin());
564 }
565
visitOutOfLineTruncate(OutOfLineTruncate * ool)566 void CodeGeneratorX86::visitOutOfLineTruncate(OutOfLineTruncate* ool) {
567 LTruncateDToInt32* ins = ool->ins();
568 FloatRegister input = ToFloatRegister(ins->input());
569 Register output = ToRegister(ins->output());
570
571 Label fail;
572
573 if (Assembler::HasSSE3()) {
574 Label failPopDouble;
575 // Push double.
576 masm.subl(Imm32(sizeof(double)), esp);
577 masm.storeDouble(input, Operand(esp, 0));
578
579 // Check exponent to avoid fp exceptions.
580 masm.branchDoubleNotInInt64Range(Address(esp, 0), output, &failPopDouble);
581
582 // Load double, perform 64-bit truncation.
583 masm.truncateDoubleToInt64(Address(esp, 0), Address(esp, 0), output);
584
585 // Load low word, pop double and jump back.
586 masm.load32(Address(esp, 0), output);
587 masm.addl(Imm32(sizeof(double)), esp);
588 masm.jump(ool->rejoin());
589
590 masm.bind(&failPopDouble);
591 masm.addl(Imm32(sizeof(double)), esp);
592 masm.jump(&fail);
593 } else {
594 FloatRegister temp = ToFloatRegister(ins->tempFloat());
595
596 // Try to convert doubles representing integers within 2^32 of a signed
597 // integer, by adding/subtracting 2^32 and then trying to convert to int32.
598 // This has to be an exact conversion, as otherwise the truncation works
599 // incorrectly on the modified value.
600 {
601 ScratchDoubleScope fpscratch(masm);
602 masm.zeroDouble(fpscratch);
603 masm.vucomisd(fpscratch, input);
604 masm.j(Assembler::Parity, &fail);
605 }
606
607 {
608 Label positive;
609 masm.j(Assembler::Above, &positive);
610
611 masm.loadConstantDouble(4294967296.0, temp);
612 Label skip;
613 masm.jmp(&skip);
614
615 masm.bind(&positive);
616 masm.loadConstantDouble(-4294967296.0, temp);
617 masm.bind(&skip);
618 }
619
620 masm.addDouble(input, temp);
621 masm.vcvttsd2si(temp, output);
622 ScratchDoubleScope fpscratch(masm);
623 masm.vcvtsi2sd(output, fpscratch, fpscratch);
624
625 masm.vucomisd(fpscratch, temp);
626 masm.j(Assembler::Parity, &fail);
627 masm.j(Assembler::Equal, ool->rejoin());
628 }
629
630 masm.bind(&fail);
631 {
632 saveVolatile(output);
633
634 if (gen->compilingWasm()) {
635 masm.setupWasmABICall();
636 masm.passABIArg(input, MoveOp::DOUBLE);
637 masm.callWithABI(ins->mir()->bytecodeOffset(),
638 wasm::SymbolicAddress::ToInt32);
639 } else {
640 masm.setupUnalignedABICall(output);
641 masm.passABIArg(input, MoveOp::DOUBLE);
642 masm.callWithABI(BitwiseCast<void*, int32_t (*)(double)>(JS::ToInt32),
643 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
644 }
645 masm.storeCallInt32Result(output);
646
647 restoreVolatile(output);
648 }
649
650 masm.jump(ool->rejoin());
651 }
652
visitOutOfLineTruncateFloat32(OutOfLineTruncateFloat32 * ool)653 void CodeGeneratorX86::visitOutOfLineTruncateFloat32(
654 OutOfLineTruncateFloat32* ool) {
655 LTruncateFToInt32* ins = ool->ins();
656 FloatRegister input = ToFloatRegister(ins->input());
657 Register output = ToRegister(ins->output());
658
659 Label fail;
660
661 if (Assembler::HasSSE3()) {
662 Label failPopFloat;
663
664 // Push float32, but subtracts 64 bits so that the value popped by fisttp
665 // fits
666 masm.subl(Imm32(sizeof(uint64_t)), esp);
667 masm.storeFloat32(input, Operand(esp, 0));
668
669 // Check exponent to avoid fp exceptions.
670 masm.branchDoubleNotInInt64Range(Address(esp, 0), output, &failPopFloat);
671
672 // Load float, perform 32-bit truncation.
673 masm.truncateFloat32ToInt64(Address(esp, 0), Address(esp, 0), output);
674
675 // Load low word, pop 64bits and jump back.
676 masm.load32(Address(esp, 0), output);
677 masm.addl(Imm32(sizeof(uint64_t)), esp);
678 masm.jump(ool->rejoin());
679
680 masm.bind(&failPopFloat);
681 masm.addl(Imm32(sizeof(uint64_t)), esp);
682 masm.jump(&fail);
683 } else {
684 FloatRegister temp = ToFloatRegister(ins->tempFloat());
685
686 // Try to convert float32 representing integers within 2^32 of a signed
687 // integer, by adding/subtracting 2^32 and then trying to convert to int32.
688 // This has to be an exact conversion, as otherwise the truncation works
689 // incorrectly on the modified value.
690 {
691 ScratchFloat32Scope fpscratch(masm);
692 masm.zeroFloat32(fpscratch);
693 masm.vucomiss(fpscratch, input);
694 masm.j(Assembler::Parity, &fail);
695 }
696
697 {
698 Label positive;
699 masm.j(Assembler::Above, &positive);
700
701 masm.loadConstantFloat32(4294967296.f, temp);
702 Label skip;
703 masm.jmp(&skip);
704
705 masm.bind(&positive);
706 masm.loadConstantFloat32(-4294967296.f, temp);
707 masm.bind(&skip);
708 }
709
710 masm.addFloat32(input, temp);
711 masm.vcvttss2si(temp, output);
712 ScratchFloat32Scope fpscratch(masm);
713 masm.vcvtsi2ss(output, fpscratch, fpscratch);
714
715 masm.vucomiss(fpscratch, temp);
716 masm.j(Assembler::Parity, &fail);
717 masm.j(Assembler::Equal, ool->rejoin());
718 }
719
720 masm.bind(&fail);
721 {
722 saveVolatile(output);
723
724 masm.Push(input);
725
726 if (gen->compilingWasm()) {
727 masm.setupWasmABICall();
728 } else {
729 masm.setupUnalignedABICall(output);
730 }
731
732 masm.vcvtss2sd(input, input, input);
733 masm.passABIArg(input.asDouble(), MoveOp::DOUBLE);
734
735 if (gen->compilingWasm()) {
736 masm.callWithABI(ins->mir()->bytecodeOffset(),
737 wasm::SymbolicAddress::ToInt32);
738 } else {
739 masm.callWithABI(BitwiseCast<void*, int32_t (*)(double)>(JS::ToInt32),
740 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
741 }
742
743 masm.storeCallInt32Result(output);
744 masm.Pop(input);
745
746 restoreVolatile(output);
747 }
748
749 masm.jump(ool->rejoin());
750 }
751
visitCompareI64(LCompareI64 * lir)752 void CodeGenerator::visitCompareI64(LCompareI64* lir) {
753 MCompare* mir = lir->mir();
754 MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
755 mir->compareType() == MCompare::Compare_UInt64);
756
757 const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
758 const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
759 Register64 lhsRegs = ToRegister64(lhs);
760 Register output = ToRegister(lir->output());
761
762 bool isSigned = mir->compareType() == MCompare::Compare_Int64;
763 Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
764 Label done;
765
766 masm.move32(Imm32(1), output);
767
768 if (IsConstant(rhs)) {
769 Imm64 imm = Imm64(ToInt64(rhs));
770 masm.branch64(condition, lhsRegs, imm, &done);
771 } else {
772 Register64 rhsRegs = ToRegister64(rhs);
773 masm.branch64(condition, lhsRegs, rhsRegs, &done);
774 }
775
776 masm.xorl(output, output);
777 masm.bind(&done);
778 }
779
visitCompareI64AndBranch(LCompareI64AndBranch * lir)780 void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
781 MCompare* mir = lir->cmpMir();
782 MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
783 mir->compareType() == MCompare::Compare_UInt64);
784
785 const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
786 const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
787 Register64 lhsRegs = ToRegister64(lhs);
788
789 bool isSigned = mir->compareType() == MCompare::Compare_Int64;
790 Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
791
792 Label* trueLabel = getJumpLabelForBranch(lir->ifTrue());
793 Label* falseLabel = getJumpLabelForBranch(lir->ifFalse());
794
795 if (isNextBlock(lir->ifFalse()->lir())) {
796 falseLabel = nullptr;
797 } else if (isNextBlock(lir->ifTrue()->lir())) {
798 condition = Assembler::InvertCondition(condition);
799 trueLabel = falseLabel;
800 falseLabel = nullptr;
801 }
802
803 if (IsConstant(rhs)) {
804 Imm64 imm = Imm64(ToInt64(rhs));
805 masm.branch64(condition, lhsRegs, imm, trueLabel, falseLabel);
806 } else {
807 Register64 rhsRegs = ToRegister64(rhs);
808 masm.branch64(condition, lhsRegs, rhsRegs, trueLabel, falseLabel);
809 }
810 }
811
visitDivOrModI64(LDivOrModI64 * lir)812 void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
813 Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
814 Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
815 Register temp = ToRegister(lir->temp());
816 Register64 output = ToOutRegister64(lir);
817
818 MOZ_ASSERT(output == ReturnReg64);
819
820 Label done;
821
822 // Handle divide by zero.
823 if (lir->canBeDivideByZero()) {
824 Label nonZero;
825 masm.branchTest64(Assembler::NonZero, rhs, rhs, temp, &nonZero);
826 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
827 masm.bind(&nonZero);
828 }
829
830 MDefinition* mir = lir->mir();
831
832 // Handle an integer overflow exception from INT64_MIN / -1.
833 if (lir->canBeNegativeOverflow()) {
834 Label notOverflow;
835 masm.branch64(Assembler::NotEqual, lhs, Imm64(INT64_MIN), ¬Overflow);
836 masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), ¬Overflow);
837 if (mir->isMod()) {
838 masm.xor64(output, output);
839 } else {
840 masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
841 }
842 masm.jump(&done);
843 masm.bind(¬Overflow);
844 }
845
846 masm.setupWasmABICall();
847 masm.passABIArg(lhs.high);
848 masm.passABIArg(lhs.low);
849 masm.passABIArg(rhs.high);
850 masm.passABIArg(rhs.low);
851
852 MOZ_ASSERT(gen->compilingWasm());
853 if (mir->isMod()) {
854 masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::ModI64);
855 } else {
856 masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::DivI64);
857 }
858
859 // output in edx:eax, move to output register.
860 masm.movl(edx, output.high);
861 MOZ_ASSERT(eax == output.low);
862
863 masm.bind(&done);
864 }
865
visitUDivOrModI64(LUDivOrModI64 * lir)866 void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
867 Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
868 Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
869 Register temp = ToRegister(lir->temp());
870 Register64 output = ToOutRegister64(lir);
871
872 MOZ_ASSERT(output == ReturnReg64);
873
874 // Prevent divide by zero.
875 if (lir->canBeDivideByZero()) {
876 Label nonZero;
877 masm.branchTest64(Assembler::NonZero, rhs, rhs, temp, &nonZero);
878 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
879 masm.bind(&nonZero);
880 }
881
882 masm.setupWasmABICall();
883 masm.passABIArg(lhs.high);
884 masm.passABIArg(lhs.low);
885 masm.passABIArg(rhs.high);
886 masm.passABIArg(rhs.low);
887
888 MOZ_ASSERT(gen->compilingWasm());
889 MDefinition* mir = lir->mir();
890 if (mir->isMod()) {
891 masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UModI64);
892 } else {
893 masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UDivI64);
894 }
895
896 // output in edx:eax, move to output register.
897 masm.movl(edx, output.high);
898 MOZ_ASSERT(eax == output.low);
899 }
900
visitWasmSelectI64(LWasmSelectI64 * lir)901 void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
902 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
903
904 Register cond = ToRegister(lir->condExpr());
905 Register64 falseExpr = ToRegister64(lir->falseExpr());
906 Register64 out = ToOutRegister64(lir);
907
908 MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
909 "true expr is reused for input");
910
911 Label done;
912 masm.branchTest32(Assembler::NonZero, cond, cond, &done);
913 masm.movl(falseExpr.low, out.low);
914 masm.movl(falseExpr.high, out.high);
915 masm.bind(&done);
916 }
917
visitWasmReinterpretFromI64(LWasmReinterpretFromI64 * lir)918 void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
919 MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
920 MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
921 Register64 input = ToRegister64(lir->getInt64Operand(0));
922
923 masm.Push(input.high);
924 masm.Push(input.low);
925 masm.vmovq(Operand(esp, 0), ToFloatRegister(lir->output()));
926 masm.freeStack(sizeof(uint64_t));
927 }
928
visitWasmReinterpretToI64(LWasmReinterpretToI64 * lir)929 void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
930 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
931 MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
932 Register64 output = ToOutRegister64(lir);
933
934 masm.reserveStack(sizeof(uint64_t));
935 masm.vmovq(ToFloatRegister(lir->input()), Operand(esp, 0));
936 masm.Pop(output.low);
937 masm.Pop(output.high);
938 }
939
visitExtendInt32ToInt64(LExtendInt32ToInt64 * lir)940 void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
941 Register64 output = ToOutRegister64(lir);
942 Register input = ToRegister(lir->input());
943
944 if (lir->mir()->isUnsigned()) {
945 if (output.low != input) {
946 masm.movl(input, output.low);
947 }
948 masm.xorl(output.high, output.high);
949 } else {
950 MOZ_ASSERT(output.low == input);
951 MOZ_ASSERT(output.low == eax);
952 MOZ_ASSERT(output.high == edx);
953 masm.cdq();
954 }
955 }
956
visitSignExtendInt64(LSignExtendInt64 * lir)957 void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
958 #ifdef DEBUG
959 Register64 input = ToRegister64(lir->getInt64Operand(0));
960 Register64 output = ToOutRegister64(lir);
961 MOZ_ASSERT(input.low == eax);
962 MOZ_ASSERT(output.low == eax);
963 MOZ_ASSERT(input.high == edx);
964 MOZ_ASSERT(output.high == edx);
965 #endif
966 switch (lir->mode()) {
967 case MSignExtendInt64::Byte:
968 masm.move8SignExtend(eax, eax);
969 break;
970 case MSignExtendInt64::Half:
971 masm.move16SignExtend(eax, eax);
972 break;
973 case MSignExtendInt64::Word:
974 break;
975 }
976 masm.cdq();
977 }
978
visitWrapInt64ToInt32(LWrapInt64ToInt32 * lir)979 void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
980 const LInt64Allocation& input = lir->getInt64Operand(0);
981 Register output = ToRegister(lir->output());
982
983 if (lir->mir()->bottomHalf()) {
984 masm.movl(ToRegister(input.low()), output);
985 } else {
986 masm.movl(ToRegister(input.high()), output);
987 }
988 }
989
visitClzI64(LClzI64 * lir)990 void CodeGenerator::visitClzI64(LClzI64* lir) {
991 Register64 input = ToRegister64(lir->getInt64Operand(0));
992 Register64 output = ToOutRegister64(lir);
993
994 masm.clz64(input, output.low);
995 masm.xorl(output.high, output.high);
996 }
997
visitCtzI64(LCtzI64 * lir)998 void CodeGenerator::visitCtzI64(LCtzI64* lir) {
999 Register64 input = ToRegister64(lir->getInt64Operand(0));
1000 Register64 output = ToOutRegister64(lir);
1001
1002 masm.ctz64(input, output.low);
1003 masm.xorl(output.high, output.high);
1004 }
1005
visitNotI64(LNotI64 * lir)1006 void CodeGenerator::visitNotI64(LNotI64* lir) {
1007 Register64 input = ToRegister64(lir->getInt64Operand(0));
1008 Register output = ToRegister(lir->output());
1009
1010 if (input.high == output) {
1011 masm.orl(input.low, output);
1012 } else if (input.low == output) {
1013 masm.orl(input.high, output);
1014 } else {
1015 masm.movl(input.high, output);
1016 masm.orl(input.low, output);
1017 }
1018
1019 masm.cmpl(Imm32(0), output);
1020 masm.emitSet(Assembler::Equal, output);
1021 }
1022
visitWasmTruncateToInt64(LWasmTruncateToInt64 * lir)1023 void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
1024 FloatRegister input = ToFloatRegister(lir->input());
1025 Register64 output = ToOutRegister64(lir);
1026
1027 MWasmTruncateToInt64* mir = lir->mir();
1028 FloatRegister floatTemp = ToFloatRegister(lir->temp());
1029
1030 Label fail, convert;
1031
1032 MOZ_ASSERT(mir->input()->type() == MIRType::Double ||
1033 mir->input()->type() == MIRType::Float32);
1034
1035 auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
1036 addOutOfLineCode(ool, mir);
1037
1038 bool isSaturating = mir->isSaturating();
1039 if (mir->input()->type() == MIRType::Float32) {
1040 if (mir->isUnsigned()) {
1041 masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating,
1042 ool->entry(), ool->rejoin(), floatTemp);
1043 } else {
1044 masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, ool->entry(),
1045 ool->rejoin(), floatTemp);
1046 }
1047 } else {
1048 if (mir->isUnsigned()) {
1049 masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, ool->entry(),
1050 ool->rejoin(), floatTemp);
1051 } else {
1052 masm.wasmTruncateDoubleToInt64(input, output, isSaturating, ool->entry(),
1053 ool->rejoin(), floatTemp);
1054 }
1055 }
1056 }
1057
visitInt64ToFloatingPoint(LInt64ToFloatingPoint * lir)1058 void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
1059 Register64 input = ToRegister64(lir->getInt64Operand(0));
1060 FloatRegister output = ToFloatRegister(lir->output());
1061 Register temp =
1062 lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
1063
1064 MIRType outputType = lir->mir()->type();
1065 MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
1066
1067 if (outputType == MIRType::Double) {
1068 if (lir->mir()->isUnsigned()) {
1069 masm.convertUInt64ToDouble(input, output, temp);
1070 } else {
1071 masm.convertInt64ToDouble(input, output);
1072 }
1073 } else {
1074 if (lir->mir()->isUnsigned()) {
1075 masm.convertUInt64ToFloat32(input, output, temp);
1076 } else {
1077 masm.convertInt64ToFloat32(input, output);
1078 }
1079 }
1080 }
1081
visitTestI64AndBranch(LTestI64AndBranch * lir)1082 void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
1083 Register64 input = ToRegister64(lir->getInt64Operand(0));
1084
1085 masm.testl(input.high, input.high);
1086 jumpToBlock(lir->ifTrue(), Assembler::NonZero);
1087 masm.testl(input.low, input.low);
1088 emitBranch(Assembler::NonZero, lir->ifTrue(), lir->ifFalse());
1089 }
1090