1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/mips32/CodeGenerator-mips32.h"
8 
9 #include "mozilla/MathAlgorithms.h"
10 
11 #include "jit/CodeGenerator.h"
12 #include "jit/JitCompartment.h"
13 #include "jit/JitFrames.h"
14 #include "jit/MIR.h"
15 #include "jit/MIRGraph.h"
16 #include "js/Conversions.h"
17 #include "vm/Shape.h"
18 #include "vm/TraceLogging.h"
19 
20 #include "jit/MacroAssembler-inl.h"
21 #include "jit/shared/CodeGenerator-shared-inl.h"
22 
23 using namespace js;
24 using namespace js::jit;
25 
ToValue(LInstruction * ins,size_t pos)26 ValueOperand CodeGeneratorMIPS::ToValue(LInstruction* ins, size_t pos) {
27   Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
28   Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
29   return ValueOperand(typeReg, payloadReg);
30 }
31 
ToTempValue(LInstruction * ins,size_t pos)32 ValueOperand CodeGeneratorMIPS::ToTempValue(LInstruction* ins, size_t pos) {
33   Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
34   Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
35   return ValueOperand(typeReg, payloadReg);
36 }
37 
visitBox(LBox * box)38 void CodeGeneratorMIPS::visitBox(LBox* box) {
39   const LDefinition* type = box->getDef(TYPE_INDEX);
40 
41   MOZ_ASSERT(!box->getOperand(0)->isConstant());
42 
43   // For NUNBOX32, the input operand and the output payload have the same
44   // virtual register. All that needs to be written is the type tag for
45   // the type definition.
46   masm.move32(Imm32(MIRTypeToTag(box->type())), ToRegister(type));
47 }
48 
visitBoxFloatingPoint(LBoxFloatingPoint * box)49 void CodeGeneratorMIPS::visitBoxFloatingPoint(LBoxFloatingPoint* box) {
50   const AnyRegister in = ToAnyRegister(box->getOperand(0));
51   const ValueOperand out = ToOutValue(box);
52 
53   masm.moveValue(TypedOrValueRegister(box->type(), in), out);
54 }
55 
visitUnbox(LUnbox * unbox)56 void CodeGeneratorMIPS::visitUnbox(LUnbox* unbox) {
57   // Note that for unbox, the type and payload indexes are switched on the
58   // inputs.
59   MUnbox* mir = unbox->mir();
60   Register type = ToRegister(unbox->type());
61 
62   if (mir->fallible()) {
63     bailoutCmp32(Assembler::NotEqual, type, Imm32(MIRTypeToTag(mir->type())),
64                  unbox->snapshot());
65   }
66 }
67 
splitTagForTest(const ValueOperand & value,ScratchTagScope & tag)68 void CodeGeneratorMIPS::splitTagForTest(const ValueOperand& value,
69                                         ScratchTagScope& tag) {
70   MOZ_ASSERT(value.typeReg() == tag);
71 }
72 
visitCompareB(LCompareB * lir)73 void CodeGeneratorMIPS::visitCompareB(LCompareB* lir) {
74   MCompare* mir = lir->mir();
75 
76   const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
77   const LAllocation* rhs = lir->rhs();
78   const Register output = ToRegister(lir->output());
79 
80   MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
81   Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
82 
83   Label notBoolean, done;
84   masm.branchTestBoolean(Assembler::NotEqual, lhs, &notBoolean);
85   {
86     if (rhs->isConstant())
87       masm.cmp32Set(cond, lhs.payloadReg(),
88                     Imm32(rhs->toConstant()->toBoolean()), output);
89     else
90       masm.cmp32Set(cond, lhs.payloadReg(), ToRegister(rhs), output);
91     masm.jump(&done);
92   }
93 
94   masm.bind(&notBoolean);
95   { masm.move32(Imm32(mir->jsop() == JSOP_STRICTNE), output); }
96 
97   masm.bind(&done);
98 }
99 
visitCompareBAndBranch(LCompareBAndBranch * lir)100 void CodeGeneratorMIPS::visitCompareBAndBranch(LCompareBAndBranch* lir) {
101   MCompare* mir = lir->cmpMir();
102   const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
103   const LAllocation* rhs = lir->rhs();
104 
105   MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
106 
107   MBasicBlock* mirNotBoolean =
108       (mir->jsop() == JSOP_STRICTEQ) ? lir->ifFalse() : lir->ifTrue();
109   branchToBlock(lhs.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN), mirNotBoolean,
110                 Assembler::NotEqual);
111 
112   Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
113   if (rhs->isConstant())
114     emitBranch(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()), cond,
115                lir->ifTrue(), lir->ifFalse());
116   else
117     emitBranch(lhs.payloadReg(), ToRegister(rhs), cond, lir->ifTrue(),
118                lir->ifFalse());
119 }
120 
visitCompareBitwise(LCompareBitwise * lir)121 void CodeGeneratorMIPS::visitCompareBitwise(LCompareBitwise* lir) {
122   MCompare* mir = lir->mir();
123   Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
124   const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput);
125   const ValueOperand rhs = ToValue(lir, LCompareBitwise::RhsInput);
126   const Register output = ToRegister(lir->output());
127 
128   MOZ_ASSERT(IsEqualityOp(mir->jsop()));
129 
130   Label notEqual, done;
131   masm.ma_b(lhs.typeReg(), rhs.typeReg(), &notEqual, Assembler::NotEqual,
132             ShortJump);
133   {
134     masm.cmp32Set(cond, lhs.payloadReg(), rhs.payloadReg(), output);
135     masm.ma_b(&done, ShortJump);
136   }
137   masm.bind(&notEqual);
138   { masm.move32(Imm32(cond == Assembler::NotEqual), output); }
139 
140   masm.bind(&done);
141 }
142 
visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch * lir)143 void CodeGeneratorMIPS::visitCompareBitwiseAndBranch(
144     LCompareBitwiseAndBranch* lir) {
145   MCompare* mir = lir->cmpMir();
146   Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
147   const ValueOperand lhs = ToValue(lir, LCompareBitwiseAndBranch::LhsInput);
148   const ValueOperand rhs = ToValue(lir, LCompareBitwiseAndBranch::RhsInput);
149 
150   MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
151              mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
152 
153   MBasicBlock* notEqual =
154       (cond == Assembler::Equal) ? lir->ifFalse() : lir->ifTrue();
155 
156   branchToBlock(lhs.typeReg(), rhs.typeReg(), notEqual, Assembler::NotEqual);
157   emitBranch(lhs.payloadReg(), rhs.payloadReg(), cond, lir->ifTrue(),
158              lir->ifFalse());
159 }
160 
visitCompareI64(LCompareI64 * lir)161 void CodeGeneratorMIPS::visitCompareI64(LCompareI64* lir) {
162   MCompare* mir = lir->mir();
163   MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
164              mir->compareType() == MCompare::Compare_UInt64);
165 
166   const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
167   const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
168   Register64 lhsRegs = ToRegister64(lhs);
169   Register output = ToRegister(lir->output());
170 
171   bool isSigned = mir->compareType() == MCompare::Compare_Int64;
172   Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
173 
174   if (IsConstant(rhs)) {
175     Imm64 imm = Imm64(ToInt64(rhs));
176     masm.cmp64Set(condition, lhsRegs, imm, output);
177   } else {
178     Register64 rhsRegs = ToRegister64(rhs);
179     masm.cmp64Set(condition, lhsRegs, rhsRegs, output);
180   }
181 }
182 
visitCompareI64AndBranch(LCompareI64AndBranch * lir)183 void CodeGeneratorMIPS::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
184   MCompare* mir = lir->cmpMir();
185   MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
186              mir->compareType() == MCompare::Compare_UInt64);
187 
188   const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
189   const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
190   Register64 lhsRegs = ToRegister64(lhs);
191 
192   bool isSigned = mir->compareType() == MCompare::Compare_Int64;
193   Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
194 
195   Label* trueLabel = getJumpLabelForBranch(lir->ifTrue());
196   Label* falseLabel = getJumpLabelForBranch(lir->ifFalse());
197 
198   if (isNextBlock(lir->ifFalse()->lir())) {
199     falseLabel = nullptr;
200   } else if (isNextBlock(lir->ifTrue()->lir())) {
201     condition = Assembler::InvertCondition(condition);
202     trueLabel = falseLabel;
203     falseLabel = nullptr;
204   }
205 
206   if (IsConstant(rhs)) {
207     Imm64 imm = Imm64(ToInt64(rhs));
208     masm.branch64(condition, lhsRegs, imm, trueLabel, falseLabel);
209   } else {
210     Register64 rhsRegs = ToRegister64(rhs);
211     masm.branch64(condition, lhsRegs, rhsRegs, trueLabel, falseLabel);
212   }
213 }
214 
visitDivOrModI64(LDivOrModI64 * lir)215 void CodeGeneratorMIPS::visitDivOrModI64(LDivOrModI64* lir) {
216   Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
217   Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
218   Register64 output = ToOutRegister64(lir);
219 
220   MOZ_ASSERT(output == ReturnReg64);
221 
222   Label done;
223 
224   // Handle divide by zero.
225   if (lir->canBeDivideByZero()) {
226     Label nonZero;
227     masm.branchTest64(Assembler::NonZero, rhs, rhs, InvalidReg, &nonZero);
228     masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
229     masm.bind(&nonZero);
230   }
231 
232   // Handle an integer overflow exception from INT64_MIN / -1.
233   if (lir->canBeNegativeOverflow()) {
234     Label notOverflow;
235     masm.branch64(Assembler::NotEqual, lhs, Imm64(INT64_MIN), &notOverflow);
236     masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), &notOverflow);
237     if (lir->mir()->isMod())
238       masm.xor64(output, output);
239     else
240       masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
241     masm.jump(&done);
242     masm.bind(&notOverflow);
243   }
244 
245   masm.setupWasmABICall();
246   masm.passABIArg(lhs.high);
247   masm.passABIArg(lhs.low);
248   masm.passABIArg(rhs.high);
249   masm.passABIArg(rhs.low);
250 
251   MOZ_ASSERT(gen->compilingWasm());
252   if (lir->mir()->isMod())
253     masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::ModI64);
254   else
255     masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::DivI64);
256   MOZ_ASSERT(ReturnReg64 == output);
257 
258   masm.bind(&done);
259 }
260 
visitUDivOrModI64(LUDivOrModI64 * lir)261 void CodeGeneratorMIPS::visitUDivOrModI64(LUDivOrModI64* lir) {
262   Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
263   Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
264 
265   MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
266 
267   // Prevent divide by zero.
268   if (lir->canBeDivideByZero()) {
269     Label nonZero;
270     masm.branchTest64(Assembler::NonZero, rhs, rhs, InvalidReg, &nonZero);
271     masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
272     masm.bind(&nonZero);
273   }
274 
275   masm.setupWasmABICall();
276   masm.passABIArg(lhs.high);
277   masm.passABIArg(lhs.low);
278   masm.passABIArg(rhs.high);
279   masm.passABIArg(rhs.low);
280 
281   MOZ_ASSERT(gen->compilingWasm());
282   if (lir->mir()->isMod())
283     masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UModI64);
284   else
285     masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UDivI64);
286 }
287 
288 template <typename T>
emitWasmLoadI64(T * lir)289 void CodeGeneratorMIPS::emitWasmLoadI64(T* lir) {
290   const MWasmLoad* mir = lir->mir();
291 
292   Register ptrScratch = InvalidReg;
293   if (!lir->ptrCopy()->isBogusTemp()) {
294     ptrScratch = ToRegister(lir->ptrCopy());
295   }
296 
297   if (IsUnaligned(mir->access())) {
298     masm.wasmUnalignedLoadI64(mir->access(), HeapReg, ToRegister(lir->ptr()),
299                               ptrScratch, ToOutRegister64(lir),
300                               ToRegister(lir->getTemp(1)));
301   } else {
302     masm.wasmLoadI64(mir->access(), HeapReg, ToRegister(lir->ptr()), ptrScratch,
303                      ToOutRegister64(lir));
304   }
305 }
306 
visitWasmLoadI64(LWasmLoadI64 * lir)307 void CodeGeneratorMIPS::visitWasmLoadI64(LWasmLoadI64* lir) {
308   emitWasmLoadI64(lir);
309 }
310 
visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64 * lir)311 void CodeGeneratorMIPS::visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir) {
312   emitWasmLoadI64(lir);
313 }
314 
315 template <typename T>
emitWasmStoreI64(T * lir)316 void CodeGeneratorMIPS::emitWasmStoreI64(T* lir) {
317   const MWasmStore* mir = lir->mir();
318 
319   Register ptrScratch = InvalidReg;
320   if (!lir->ptrCopy()->isBogusTemp()) {
321     ptrScratch = ToRegister(lir->ptrCopy());
322   }
323 
324   if (IsUnaligned(mir->access())) {
325     masm.wasmUnalignedStoreI64(mir->access(), ToRegister64(lir->value()),
326                                HeapReg, ToRegister(lir->ptr()), ptrScratch,
327                                ToRegister(lir->getTemp(1)));
328   } else {
329     masm.wasmStoreI64(mir->access(), ToRegister64(lir->value()), HeapReg,
330                       ToRegister(lir->ptr()), ptrScratch);
331   }
332 }
333 
visitWasmStoreI64(LWasmStoreI64 * lir)334 void CodeGeneratorMIPS::visitWasmStoreI64(LWasmStoreI64* lir) {
335   emitWasmStoreI64(lir);
336 }
337 
visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64 * lir)338 void CodeGeneratorMIPS::visitWasmUnalignedStoreI64(
339     LWasmUnalignedStoreI64* lir) {
340   emitWasmStoreI64(lir);
341 }
342 
visitWasmSelectI64(LWasmSelectI64 * lir)343 void CodeGeneratorMIPS::visitWasmSelectI64(LWasmSelectI64* lir) {
344   MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
345   Register cond = ToRegister(lir->condExpr());
346   const LInt64Allocation trueExpr = lir->trueExpr();
347   const LInt64Allocation falseExpr = lir->falseExpr();
348 
349   Register64 output = ToOutRegister64(lir);
350 
351   masm.move64(ToRegister64(trueExpr), output);
352 
353   if (falseExpr.low().isRegister()) {
354     masm.as_movz(output.low, ToRegister(falseExpr.low()), cond);
355     masm.as_movz(output.high, ToRegister(falseExpr.high()), cond);
356   } else {
357     Label done;
358     masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
359     masm.loadPtr(ToAddress(falseExpr.low()), output.low);
360     masm.loadPtr(ToAddress(falseExpr.high()), output.high);
361     masm.bind(&done);
362   }
363 }
364 
visitWasmReinterpretFromI64(LWasmReinterpretFromI64 * lir)365 void CodeGeneratorMIPS::visitWasmReinterpretFromI64(
366     LWasmReinterpretFromI64* lir) {
367   MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
368   MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
369   Register64 input = ToRegister64(lir->getInt64Operand(0));
370   FloatRegister output = ToFloatRegister(lir->output());
371 
372   masm.moveToDoubleLo(input.low, output);
373   masm.moveToDoubleHi(input.high, output);
374 }
375 
visitWasmReinterpretToI64(LWasmReinterpretToI64 * lir)376 void CodeGeneratorMIPS::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
377   MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
378   MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
379   FloatRegister input = ToFloatRegister(lir->getOperand(0));
380   Register64 output = ToOutRegister64(lir);
381 
382   masm.moveFromDoubleLo(input, output.low);
383   masm.moveFromDoubleHi(input, output.high);
384 }
385 
visitExtendInt32ToInt64(LExtendInt32ToInt64 * lir)386 void CodeGeneratorMIPS::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
387   Register input = ToRegister(lir->input());
388   Register64 output = ToOutRegister64(lir);
389 
390   if (input != output.low) masm.move32(input, output.low);
391   if (lir->mir()->isUnsigned())
392     masm.move32(Imm32(0), output.high);
393   else
394     masm.ma_sra(output.high, output.low, Imm32(31));
395 }
396 
visitWrapInt64ToInt32(LWrapInt64ToInt32 * lir)397 void CodeGeneratorMIPS::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
398   const LInt64Allocation& input = lir->getInt64Operand(0);
399   Register output = ToRegister(lir->output());
400 
401   if (lir->mir()->bottomHalf())
402     masm.move32(ToRegister(input.low()), output);
403   else
404     masm.move32(ToRegister(input.high()), output);
405 }
406 
visitSignExtendInt64(LSignExtendInt64 * lir)407 void CodeGeneratorMIPS::visitSignExtendInt64(LSignExtendInt64* lir) {
408   Register64 input = ToRegister64(lir->getInt64Operand(0));
409   Register64 output = ToOutRegister64(lir);
410   switch (lir->mode()) {
411     case MSignExtendInt64::Byte:
412       masm.move8SignExtend(input.low, output.low);
413       break;
414     case MSignExtendInt64::Half:
415       masm.move16SignExtend(input.low, output.low);
416       break;
417     case MSignExtendInt64::Word:
418       masm.move32(input.low, output.low);
419       break;
420   }
421   masm.ma_sra(output.high, output.low, Imm32(31));
422 }
423 
visitClzI64(LClzI64 * lir)424 void CodeGeneratorMIPS::visitClzI64(LClzI64* lir) {
425   Register64 input = ToRegister64(lir->getInt64Operand(0));
426   Register64 output = ToOutRegister64(lir);
427   masm.clz64(input, output.low);
428   masm.move32(Imm32(0), output.high);
429 }
430 
visitCtzI64(LCtzI64 * lir)431 void CodeGeneratorMIPS::visitCtzI64(LCtzI64* lir) {
432   Register64 input = ToRegister64(lir->getInt64Operand(0));
433   Register64 output = ToOutRegister64(lir);
434   masm.ctz64(input, output.low);
435   masm.move32(Imm32(0), output.high);
436 }
437 
visitNotI64(LNotI64 * lir)438 void CodeGeneratorMIPS::visitNotI64(LNotI64* lir) {
439   Register64 input = ToRegister64(lir->getInt64Operand(0));
440   Register output = ToRegister(lir->output());
441 
442   masm.as_or(output, input.low, input.high);
443   masm.cmp32Set(Assembler::Equal, output, Imm32(0), output);
444 }
445 
visitWasmTruncateToInt64(LWasmTruncateToInt64 * lir)446 void CodeGeneratorMIPS::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
447   FloatRegister input = ToFloatRegister(lir->input());
448   FloatRegister arg = input;
449   Register64 output = ToOutRegister64(lir);
450   MWasmTruncateToInt64* mir = lir->mir();
451   MIRType fromType = mir->input()->type();
452 
453   auto* ool = new (alloc())
454       OutOfLineWasmTruncateCheck(mir, input, Register64::Invalid());
455   addOutOfLineCode(ool, mir);
456 
457   if (fromType == MIRType::Float32) {
458     arg = ScratchDoubleReg;
459     masm.convertFloat32ToDouble(input, arg);
460   }
461 
462   if (!lir->mir()->isSaturating()) {
463     masm.Push(input);
464 
465     masm.setupWasmABICall();
466     masm.passABIArg(arg, MoveOp::DOUBLE);
467 
468     if (lir->mir()->isUnsigned())
469       masm.callWithABI(mir->bytecodeOffset(),
470                        wasm::SymbolicAddress::TruncateDoubleToUint64);
471     else
472       masm.callWithABI(mir->bytecodeOffset(),
473                        wasm::SymbolicAddress::TruncateDoubleToInt64);
474 
475     masm.Pop(input);
476 
477     masm.ma_xor(ScratchRegister, output.high, Imm32(0x80000000));
478     masm.ma_or(ScratchRegister, output.low);
479     masm.ma_b(ScratchRegister, Imm32(0), ool->entry(), Assembler::Equal);
480 
481     masm.bind(ool->rejoin());
482   } else {
483     masm.setupWasmABICall();
484     masm.passABIArg(arg, MoveOp::DOUBLE);
485     if (lir->mir()->isUnsigned())
486       masm.callWithABI(mir->bytecodeOffset(),
487                        wasm::SymbolicAddress::SaturatingTruncateDoubleToUint64);
488     else
489       masm.callWithABI(mir->bytecodeOffset(),
490                        wasm::SymbolicAddress::SaturatingTruncateDoubleToInt64);
491   }
492 
493   MOZ_ASSERT(ReturnReg64 == output);
494 }
495 
visitInt64ToFloatingPoint(LInt64ToFloatingPoint * lir)496 void CodeGeneratorMIPS::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
497   Register64 input = ToRegister64(lir->getInt64Operand(0));
498   mozilla::DebugOnly<FloatRegister> output = ToFloatRegister(lir->output());
499 
500   MInt64ToFloatingPoint* mir = lir->mir();
501   MIRType toType = mir->type();
502 
503   masm.setupWasmABICall();
504   masm.passABIArg(input.high);
505   masm.passABIArg(input.low);
506 
507   if (lir->mir()->isUnsigned())
508     if (toType == MIRType::Double)
509       masm.callWithABI(mir->bytecodeOffset(),
510                        wasm::SymbolicAddress::Uint64ToDouble, MoveOp::DOUBLE);
511     else
512       masm.callWithABI(mir->bytecodeOffset(),
513                        wasm::SymbolicAddress::Uint64ToFloat32, MoveOp::FLOAT32);
514   else if (toType == MIRType::Double)
515     masm.callWithABI(mir->bytecodeOffset(),
516                      wasm::SymbolicAddress::Int64ToDouble, MoveOp::DOUBLE);
517   else
518     masm.callWithABI(mir->bytecodeOffset(),
519                      wasm::SymbolicAddress::Int64ToFloat32, MoveOp::FLOAT32);
520 
521   MOZ_ASSERT_IF(toType == MIRType::Double, *(&output) == ReturnDoubleReg);
522   MOZ_ASSERT_IF(toType == MIRType::Float32, *(&output) == ReturnFloat32Reg);
523 }
524 
visitTestI64AndBranch(LTestI64AndBranch * lir)525 void CodeGeneratorMIPS::visitTestI64AndBranch(LTestI64AndBranch* lir) {
526   Register64 input = ToRegister64(lir->getInt64Operand(0));
527 
528   branchToBlock(input.high, Imm32(0), lir->ifTrue(), Assembler::NonZero);
529   emitBranch(input.low, Imm32(0), Assembler::NonZero, lir->ifTrue(),
530              lir->ifFalse());
531 }
532 
setReturnDoubleRegs(LiveRegisterSet * regs)533 void CodeGeneratorMIPS::setReturnDoubleRegs(LiveRegisterSet* regs) {
534   MOZ_ASSERT(ReturnFloat32Reg.code_ == ReturnDoubleReg.code_);
535   regs->add(ReturnFloat32Reg);
536   regs->add(ReturnDoubleReg);
537 }
538 
visitWasmAtomicLoadI64(LWasmAtomicLoadI64 * lir)539 void CodeGeneratorMIPS::visitWasmAtomicLoadI64(LWasmAtomicLoadI64* lir) {
540   Register ptr = ToRegister(lir->ptr());
541   Register64 output = ToOutRegister64(lir);
542   uint32_t offset = lir->mir()->access().offset();
543 
544   BaseIndex addr(HeapReg, ptr, TimesOne, offset);
545 
546   masm.atomicLoad64(Synchronization::Full(), addr, Register64::Invalid(),
547                     output);
548 }
549 
visitWasmAtomicStoreI64(LWasmAtomicStoreI64 * lir)550 void CodeGeneratorMIPS::visitWasmAtomicStoreI64(LWasmAtomicStoreI64* lir) {
551   Register ptr = ToRegister(lir->ptr());
552   Register64 value = ToRegister64(lir->value());
553   Register tmp = ToRegister(lir->tmp());
554   uint32_t offset = lir->mir()->access().offset();
555 
556   BaseIndex addr(HeapReg, ptr, TimesOne, offset);
557 
558   masm.atomicStore64(addr, tmp, value);
559 }
560