1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/mips-shared/CodeGenerator-mips-shared.h"
8 
9 #include "mozilla/DebugOnly.h"
10 #include "mozilla/MathAlgorithms.h"
11 
12 #include "jsnum.h"
13 
14 #include "jit/CodeGenerator.h"
15 #include "jit/InlineScriptTree.h"
16 #include "jit/JitRuntime.h"
17 #include "jit/MIR.h"
18 #include "jit/MIRGraph.h"
19 #include "js/Conversions.h"
20 #include "vm/JSContext.h"
21 #include "vm/Realm.h"
22 #include "vm/Shape.h"
23 #include "vm/TraceLogging.h"
24 
25 #include "jit/MacroAssembler-inl.h"
26 #include "jit/shared/CodeGenerator-shared-inl.h"
27 #include "vm/JSScript-inl.h"
28 
29 using namespace js;
30 using namespace js::jit;
31 
32 using JS::GenericNaN;
33 using JS::ToInt32;
34 using mozilla::DebugOnly;
35 using mozilla::FloorLog2;
36 using mozilla::NegativeInfinity;
37 
38 // shared
CodeGeneratorMIPSShared(MIRGenerator * gen,LIRGraph * graph,MacroAssembler * masm)39 CodeGeneratorMIPSShared::CodeGeneratorMIPSShared(MIRGenerator* gen,
40                                                  LIRGraph* graph,
41                                                  MacroAssembler* masm)
42     : CodeGeneratorShared(gen, graph, masm) {}
43 
ToOperand(const LAllocation & a)44 Operand CodeGeneratorMIPSShared::ToOperand(const LAllocation& a) {
45   if (a.isGeneralReg()) {
46     return Operand(a.toGeneralReg()->reg());
47   }
48   if (a.isFloatReg()) {
49     return Operand(a.toFloatReg()->reg());
50   }
51   return Operand(ToAddress(a));
52 }
53 
ToOperand(const LAllocation * a)54 Operand CodeGeneratorMIPSShared::ToOperand(const LAllocation* a) {
55   return ToOperand(*a);
56 }
57 
ToOperand(const LDefinition * def)58 Operand CodeGeneratorMIPSShared::ToOperand(const LDefinition* def) {
59   return ToOperand(def->output());
60 }
61 
62 #ifdef JS_PUNBOX64
ToOperandOrRegister64(const LInt64Allocation input)63 Operand CodeGeneratorMIPSShared::ToOperandOrRegister64(
64     const LInt64Allocation input) {
65   return ToOperand(input.value());
66 }
67 #else
ToOperandOrRegister64(const LInt64Allocation input)68 Register64 CodeGeneratorMIPSShared::ToOperandOrRegister64(
69     const LInt64Allocation input) {
70   return ToRegister64(input);
71 }
72 #endif
73 
branchToBlock(Assembler::FloatFormat fmt,FloatRegister lhs,FloatRegister rhs,MBasicBlock * mir,Assembler::DoubleCondition cond)74 void CodeGeneratorMIPSShared::branchToBlock(Assembler::FloatFormat fmt,
75                                             FloatRegister lhs,
76                                             FloatRegister rhs, MBasicBlock* mir,
77                                             Assembler::DoubleCondition cond) {
78   // Skip past trivial blocks.
79   Label* label = skipTrivialBlocks(mir)->lir()->label();
80   if (fmt == Assembler::DoubleFloat) {
81     masm.branchDouble(cond, lhs, rhs, label);
82   } else {
83     masm.branchFloat(cond, lhs, rhs, label);
84   }
85 }
86 
FromDepth(uint32_t frameDepth)87 FrameSizeClass FrameSizeClass::FromDepth(uint32_t frameDepth) {
88   return FrameSizeClass::None();
89 }
90 
ClassLimit()91 FrameSizeClass FrameSizeClass::ClassLimit() { return FrameSizeClass(0); }
92 
frameSize() const93 uint32_t FrameSizeClass::frameSize() const {
94   MOZ_CRASH("MIPS does not use frame size classes");
95 }
96 
accept(CodeGeneratorMIPSShared * codegen)97 void OutOfLineBailout::accept(CodeGeneratorMIPSShared* codegen) {
98   codegen->visitOutOfLineBailout(this);
99 }
100 
visitTestIAndBranch(LTestIAndBranch * test)101 void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) {
102   const LAllocation* opd = test->getOperand(0);
103   MBasicBlock* ifTrue = test->ifTrue();
104   MBasicBlock* ifFalse = test->ifFalse();
105 
106   emitBranch(ToRegister(opd), Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
107 }
108 
visitCompare(LCompare * comp)109 void CodeGenerator::visitCompare(LCompare* comp) {
110   MCompare* mir = comp->mir();
111   Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
112   const LAllocation* left = comp->getOperand(0);
113   const LAllocation* right = comp->getOperand(1);
114   const LDefinition* def = comp->getDef(0);
115 
116 #ifdef JS_CODEGEN_MIPS64
117   if (mir->compareType() == MCompare::Compare_Object ||
118       mir->compareType() == MCompare::Compare_Symbol ||
119       mir->compareType() == MCompare::Compare_UIntPtr) {
120     if (right->isConstant()) {
121       MOZ_ASSERT(mir->compareType() == MCompare::Compare_UIntPtr);
122       masm.cmpPtrSet(cond, ToRegister(left), Imm32(ToInt32(right)),
123                      ToRegister(def));
124     } else if (right->isGeneralReg()) {
125       masm.cmpPtrSet(cond, ToRegister(left), ToRegister(right),
126                      ToRegister(def));
127     } else {
128       masm.cmpPtrSet(cond, ToRegister(left), ToAddress(right), ToRegister(def));
129     }
130     return;
131   }
132 #endif
133 
134   if (right->isConstant()) {
135     masm.cmp32Set(cond, ToRegister(left), Imm32(ToInt32(right)),
136                   ToRegister(def));
137   } else if (right->isGeneralReg()) {
138     masm.cmp32Set(cond, ToRegister(left), ToRegister(right), ToRegister(def));
139   } else {
140     masm.cmp32Set(cond, ToRegister(left), ToAddress(right), ToRegister(def));
141   }
142 }
143 
visitCompareAndBranch(LCompareAndBranch * comp)144 void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
145   MCompare* mir = comp->cmpMir();
146   Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
147 
148 #ifdef JS_CODEGEN_MIPS64
149   if (mir->compareType() == MCompare::Compare_Object ||
150       mir->compareType() == MCompare::Compare_Symbol ||
151       mir->compareType() == MCompare::Compare_UIntPtr) {
152     if (comp->right()->isConstant()) {
153       MOZ_ASSERT(mir->compareType() == MCompare::Compare_UIntPtr);
154       emitBranch(ToRegister(comp->left()), Imm32(ToInt32(comp->right())), cond,
155                  comp->ifTrue(), comp->ifFalse());
156     } else if (comp->right()->isGeneralReg()) {
157       emitBranch(ToRegister(comp->left()), ToRegister(comp->right()), cond,
158                  comp->ifTrue(), comp->ifFalse());
159     } else {
160       masm.loadPtr(ToAddress(comp->right()), ScratchRegister);
161       emitBranch(ToRegister(comp->left()), ScratchRegister, cond,
162                  comp->ifTrue(), comp->ifFalse());
163     }
164     return;
165   }
166 #endif
167 
168   if (comp->right()->isConstant()) {
169     emitBranch(ToRegister(comp->left()), Imm32(ToInt32(comp->right())), cond,
170                comp->ifTrue(), comp->ifFalse());
171   } else if (comp->right()->isGeneralReg()) {
172     emitBranch(ToRegister(comp->left()), ToRegister(comp->right()), cond,
173                comp->ifTrue(), comp->ifFalse());
174   } else {
175     masm.load32(ToAddress(comp->right()), ScratchRegister);
176     emitBranch(ToRegister(comp->left()), ScratchRegister, cond, comp->ifTrue(),
177                comp->ifFalse());
178   }
179 }
180 
generateOutOfLineCode()181 bool CodeGeneratorMIPSShared::generateOutOfLineCode() {
182   if (!CodeGeneratorShared::generateOutOfLineCode()) {
183     return false;
184   }
185 
186   if (deoptLabel_.used()) {
187     // All non-table-based bailouts will go here.
188     masm.bind(&deoptLabel_);
189 
190     // Push the frame size, so the handler can recover the IonScript.
191     // Frame size is stored in 'ra' and pushed by GenerateBailoutThunk
192     // We have to use 'ra' because generateBailoutTable will implicitly do
193     // the same.
194     masm.move32(Imm32(frameSize()), ra);
195 
196     TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
197     masm.jump(handler);
198   }
199 
200   return !masm.oom();
201 }
202 
bailoutFrom(Label * label,LSnapshot * snapshot)203 void CodeGeneratorMIPSShared::bailoutFrom(Label* label, LSnapshot* snapshot) {
204   MOZ_ASSERT_IF(!masm.oom(), label->used());
205   MOZ_ASSERT_IF(!masm.oom(), !label->bound());
206 
207   encode(snapshot);
208 
209   // Though the assembler doesn't track all frame pushes, at least make sure
210   // the known value makes sense. We can't use bailout tables if the stack
211   // isn't properly aligned to the static frame size.
212   MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None(),
213                 frameClass_.frameSize() == masm.framePushed());
214 
215   // We don't use table bailouts because retargeting is easier this way.
216   InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
217   OutOfLineBailout* ool =
218       new (alloc()) OutOfLineBailout(snapshot, masm.framePushed());
219   addOutOfLineCode(ool,
220                    new (alloc()) BytecodeSite(tree, tree->script()->code()));
221 
222   masm.retarget(label, ool->entry());
223 }
224 
bailout(LSnapshot * snapshot)225 void CodeGeneratorMIPSShared::bailout(LSnapshot* snapshot) {
226   Label label;
227   masm.jump(&label);
228   bailoutFrom(&label, snapshot);
229 }
230 
visitMinMaxD(LMinMaxD * ins)231 void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
232   FloatRegister first = ToFloatRegister(ins->first());
233   FloatRegister second = ToFloatRegister(ins->second());
234 
235   MOZ_ASSERT(first == ToFloatRegister(ins->output()));
236 
237   if (ins->mir()->isMax()) {
238     masm.maxDouble(second, first, true);
239   } else {
240     masm.minDouble(second, first, true);
241   }
242 }
243 
visitMinMaxF(LMinMaxF * ins)244 void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
245   FloatRegister first = ToFloatRegister(ins->first());
246   FloatRegister second = ToFloatRegister(ins->second());
247 
248   MOZ_ASSERT(first == ToFloatRegister(ins->output()));
249 
250   if (ins->mir()->isMax()) {
251     masm.maxFloat32(second, first, true);
252   } else {
253     masm.minFloat32(second, first, true);
254   }
255 }
256 
visitAddI(LAddI * ins)257 void CodeGenerator::visitAddI(LAddI* ins) {
258   const LAllocation* lhs = ins->getOperand(0);
259   const LAllocation* rhs = ins->getOperand(1);
260   const LDefinition* dest = ins->getDef(0);
261 
262   MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
263 
264   // If there is no snapshot, we don't need to check for overflow
265   if (!ins->snapshot()) {
266     if (rhs->isConstant()) {
267       masm.ma_addu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
268     } else {
269       masm.as_addu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
270     }
271     return;
272   }
273 
274   Label overflow;
275   if (rhs->isConstant()) {
276     masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
277                               Imm32(ToInt32(rhs)), &overflow);
278   } else {
279     masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
280                               ToRegister(rhs), &overflow);
281   }
282 
283   bailoutFrom(&overflow, ins->snapshot());
284 }
285 
visitAddI64(LAddI64 * lir)286 void CodeGenerator::visitAddI64(LAddI64* lir) {
287   const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
288   const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
289 
290   MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
291 
292   if (IsConstant(rhs)) {
293     masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
294     return;
295   }
296 
297   masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
298 }
299 
visitSubI(LSubI * ins)300 void CodeGenerator::visitSubI(LSubI* ins) {
301   const LAllocation* lhs = ins->getOperand(0);
302   const LAllocation* rhs = ins->getOperand(1);
303   const LDefinition* dest = ins->getDef(0);
304 
305   MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
306 
307   // If there is no snapshot, we don't need to check for overflow
308   if (!ins->snapshot()) {
309     if (rhs->isConstant()) {
310       masm.ma_subu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
311     } else {
312       masm.as_subu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
313     }
314     return;
315   }
316 
317   Label overflow;
318   if (rhs->isConstant()) {
319     masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
320                               Imm32(ToInt32(rhs)), &overflow);
321   } else {
322     masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
323                               ToRegister(rhs), &overflow);
324   }
325 
326   bailoutFrom(&overflow, ins->snapshot());
327 }
328 
visitSubI64(LSubI64 * lir)329 void CodeGenerator::visitSubI64(LSubI64* lir) {
330   const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs);
331   const LInt64Allocation rhs = lir->getInt64Operand(LSubI64::Rhs);
332 
333   MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
334 
335   if (IsConstant(rhs)) {
336     masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
337     return;
338   }
339 
340   masm.sub64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
341 }
342 
visitMulI(LMulI * ins)343 void CodeGenerator::visitMulI(LMulI* ins) {
344   const LAllocation* lhs = ins->lhs();
345   const LAllocation* rhs = ins->rhs();
346   Register dest = ToRegister(ins->output());
347   MMul* mul = ins->mir();
348 
349   MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
350                 !mul->canBeNegativeZero() && !mul->canOverflow());
351 
352   if (rhs->isConstant()) {
353     int32_t constant = ToInt32(rhs);
354     Register src = ToRegister(lhs);
355 
356     // Bailout on -0.0
357     if (mul->canBeNegativeZero() && constant <= 0) {
358       Assembler::Condition cond =
359           (constant == 0) ? Assembler::LessThan : Assembler::Equal;
360       bailoutCmp32(cond, src, Imm32(0), ins->snapshot());
361     }
362 
363     switch (constant) {
364       case -1:
365         if (mul->canOverflow()) {
366           bailoutCmp32(Assembler::Equal, src, Imm32(INT32_MIN),
367                        ins->snapshot());
368         }
369 
370         masm.ma_negu(dest, src);
371         break;
372       case 0:
373         masm.move32(Imm32(0), dest);
374         break;
375       case 1:
376         masm.move32(src, dest);
377         break;
378       case 2:
379         if (mul->canOverflow()) {
380           Label mulTwoOverflow;
381           masm.ma_add32TestOverflow(dest, src, src, &mulTwoOverflow);
382 
383           bailoutFrom(&mulTwoOverflow, ins->snapshot());
384         } else {
385           masm.as_addu(dest, src, src);
386         }
387         break;
388       default:
389         uint32_t shift = FloorLog2(constant);
390 
391         if (!mul->canOverflow() && (constant > 0)) {
392           // If it cannot overflow, we can do lots of optimizations.
393           uint32_t rest = constant - (1 << shift);
394 
395           // See if the constant has one bit set, meaning it can be
396           // encoded as a bitshift.
397           if ((1 << shift) == constant) {
398             masm.ma_sll(dest, src, Imm32(shift));
399             return;
400           }
401 
402           // If the constant cannot be encoded as (1<<C1), see if it can
403           // be encoded as (1<<C1) | (1<<C2), which can be computed
404           // using an add and a shift.
405           uint32_t shift_rest = FloorLog2(rest);
406           if (src != dest && (1u << shift_rest) == rest) {
407             masm.ma_sll(dest, src, Imm32(shift - shift_rest));
408             masm.add32(src, dest);
409             if (shift_rest != 0) {
410               masm.ma_sll(dest, dest, Imm32(shift_rest));
411             }
412             return;
413           }
414         }
415 
416         if (mul->canOverflow() && (constant > 0) && (src != dest)) {
417           // To stay on the safe side, only optimize things that are a
418           // power of 2.
419 
420           if ((1 << shift) == constant) {
421             // dest = lhs * pow(2, shift)
422             masm.ma_sll(dest, src, Imm32(shift));
423             // At runtime, check (lhs == dest >> shift), if this does
424             // not hold, some bits were lost due to overflow, and the
425             // computation should be resumed as a double.
426             masm.ma_sra(ScratchRegister, dest, Imm32(shift));
427             bailoutCmp32(Assembler::NotEqual, src, ScratchRegister,
428                          ins->snapshot());
429             return;
430           }
431         }
432 
433         if (mul->canOverflow()) {
434           Label mulConstOverflow;
435           masm.ma_mul32TestOverflow(dest, ToRegister(lhs), Imm32(ToInt32(rhs)),
436                                     &mulConstOverflow);
437 
438           bailoutFrom(&mulConstOverflow, ins->snapshot());
439         } else {
440           masm.ma_mul(dest, src, Imm32(ToInt32(rhs)));
441         }
442         break;
443     }
444   } else {
445     Label multRegOverflow;
446 
447     if (mul->canOverflow()) {
448       masm.ma_mul32TestOverflow(dest, ToRegister(lhs), ToRegister(rhs),
449                                 &multRegOverflow);
450       bailoutFrom(&multRegOverflow, ins->snapshot());
451     } else {
452       masm.as_mul(dest, ToRegister(lhs), ToRegister(rhs));
453     }
454 
455     if (mul->canBeNegativeZero()) {
456       Label done;
457       masm.ma_b(dest, dest, &done, Assembler::NonZero, ShortJump);
458 
459       // Result is -0 if lhs or rhs is negative.
460       // In that case result must be double value so bailout
461       Register scratch = SecondScratchReg;
462       masm.as_or(scratch, ToRegister(lhs), ToRegister(rhs));
463       bailoutCmp32(Assembler::Signed, scratch, scratch, ins->snapshot());
464 
465       masm.bind(&done);
466     }
467   }
468 }
469 
visitMulI64(LMulI64 * lir)470 void CodeGenerator::visitMulI64(LMulI64* lir) {
471   const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
472   const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
473   const Register64 output = ToOutRegister64(lir);
474 
475   if (IsConstant(rhs)) {
476     int64_t constant = ToInt64(rhs);
477     switch (constant) {
478       case -1:
479         masm.neg64(ToRegister64(lhs));
480         return;
481       case 0:
482         masm.xor64(ToRegister64(lhs), ToRegister64(lhs));
483         return;
484       case 1:
485         // nop
486         return;
487       default:
488         if (constant > 0) {
489           if (mozilla::IsPowerOfTwo(static_cast<uint32_t>(constant + 1))) {
490             masm.move64(ToRegister64(lhs), output);
491             masm.lshift64(Imm32(FloorLog2(constant + 1)), output);
492             masm.sub64(ToRegister64(lhs), output);
493             return;
494           } else if (mozilla::IsPowerOfTwo(
495                          static_cast<uint32_t>(constant - 1))) {
496             masm.move64(ToRegister64(lhs), output);
497             masm.lshift64(Imm32(FloorLog2(constant - 1u)), output);
498             masm.add64(ToRegister64(lhs), output);
499             return;
500           }
501           // Use shift if constant is power of 2.
502           int32_t shift = mozilla::FloorLog2(constant);
503           if (int64_t(1) << shift == constant) {
504             masm.lshift64(Imm32(shift), ToRegister64(lhs));
505             return;
506           }
507         }
508         Register temp = ToTempRegisterOrInvalid(lir->temp());
509         masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
510     }
511   } else {
512     Register temp = ToTempRegisterOrInvalid(lir->temp());
513     masm.mul64(ToOperandOrRegister64(rhs), ToRegister64(lhs), temp);
514   }
515 }
516 
visitDivI(LDivI * ins)517 void CodeGenerator::visitDivI(LDivI* ins) {
518   // Extract the registers from this instruction
519   Register lhs = ToRegister(ins->lhs());
520   Register rhs = ToRegister(ins->rhs());
521   Register dest = ToRegister(ins->output());
522   Register temp = ToRegister(ins->getTemp(0));
523   MDiv* mir = ins->mir();
524 
525   Label done;
526 
527   // Handle divide by zero.
528   if (mir->canBeDivideByZero()) {
529     if (mir->trapOnError()) {
530       Label nonZero;
531       masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
532       masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
533       masm.bind(&nonZero);
534     } else if (mir->canTruncateInfinities()) {
535       // Truncated division by zero is zero (Infinity|0 == 0)
536       Label notzero;
537       masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
538       masm.move32(Imm32(0), dest);
539       masm.ma_b(&done, ShortJump);
540       masm.bind(&notzero);
541     } else {
542       MOZ_ASSERT(mir->fallible());
543       bailoutCmp32(Assembler::Zero, rhs, rhs, ins->snapshot());
544     }
545   }
546 
547   // Handle an integer overflow exception from -2147483648 / -1.
548   if (mir->canBeNegativeOverflow()) {
549     Label notMinInt;
550     masm.move32(Imm32(INT32_MIN), temp);
551     masm.ma_b(lhs, temp, &notMinInt, Assembler::NotEqual, ShortJump);
552 
553     masm.move32(Imm32(-1), temp);
554     if (mir->trapOnError()) {
555       Label ok;
556       masm.ma_b(rhs, temp, &ok, Assembler::NotEqual);
557       masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
558       masm.bind(&ok);
559     } else if (mir->canTruncateOverflow()) {
560       // (-INT32_MIN)|0 == INT32_MIN
561       Label skip;
562       masm.ma_b(rhs, temp, &skip, Assembler::NotEqual, ShortJump);
563       masm.move32(Imm32(INT32_MIN), dest);
564       masm.ma_b(&done, ShortJump);
565       masm.bind(&skip);
566     } else {
567       MOZ_ASSERT(mir->fallible());
568       bailoutCmp32(Assembler::Equal, rhs, temp, ins->snapshot());
569     }
570     masm.bind(&notMinInt);
571   }
572 
573   // Handle negative 0. (0/-Y)
574   if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
575     Label nonzero;
576     masm.ma_b(lhs, lhs, &nonzero, Assembler::NonZero, ShortJump);
577     bailoutCmp32(Assembler::LessThan, rhs, Imm32(0), ins->snapshot());
578     masm.bind(&nonzero);
579   }
580   // Note: above safety checks could not be verified as Ion seems to be
581   // smarter and requires double arithmetic in such cases.
582 
583   // All regular. Lets call div.
584   if (mir->canTruncateRemainder()) {
585 #ifdef MIPSR6
586     masm.as_div(dest, lhs, rhs);
587 #else
588     masm.as_div(lhs, rhs);
589     masm.as_mflo(dest);
590 #endif
591   } else {
592     MOZ_ASSERT(mir->fallible());
593 
594     Label remainderNonZero;
595     masm.ma_div_branch_overflow(dest, lhs, rhs, &remainderNonZero);
596     bailoutFrom(&remainderNonZero, ins->snapshot());
597   }
598 
599   masm.bind(&done);
600 }
601 
visitDivPowTwoI(LDivPowTwoI * ins)602 void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
603   Register lhs = ToRegister(ins->numerator());
604   Register dest = ToRegister(ins->output());
605   Register tmp = ToRegister(ins->getTemp(0));
606   int32_t shift = ins->shift();
607 
608   if (shift != 0) {
609     MDiv* mir = ins->mir();
610     if (!mir->isTruncated()) {
611       // If the remainder is going to be != 0, bailout since this must
612       // be a double.
613       masm.ma_sll(tmp, lhs, Imm32(32 - shift));
614       bailoutCmp32(Assembler::NonZero, tmp, tmp, ins->snapshot());
615     }
616 
617     if (!mir->canBeNegativeDividend()) {
618       // Numerator is unsigned, so needs no adjusting. Do the shift.
619       masm.ma_sra(dest, lhs, Imm32(shift));
620       return;
621     }
622 
623     // Adjust the value so that shifting produces a correctly rounded result
624     // when the numerator is negative. See 10-1 "Signed Division by a Known
625     // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
626     if (shift > 1) {
627       masm.ma_sra(tmp, lhs, Imm32(31));
628       masm.ma_srl(tmp, tmp, Imm32(32 - shift));
629       masm.add32(lhs, tmp);
630     } else {
631       masm.ma_srl(tmp, lhs, Imm32(32 - shift));
632       masm.add32(lhs, tmp);
633     }
634 
635     // Do the shift.
636     masm.ma_sra(dest, tmp, Imm32(shift));
637   } else {
638     masm.move32(lhs, dest);
639   }
640 }
641 
visitModI(LModI * ins)642 void CodeGenerator::visitModI(LModI* ins) {
643   // Extract the registers from this instruction
644   Register lhs = ToRegister(ins->lhs());
645   Register rhs = ToRegister(ins->rhs());
646   Register dest = ToRegister(ins->output());
647   Register callTemp = ToRegister(ins->callTemp());
648   MMod* mir = ins->mir();
649   Label done, prevent;
650 
651   masm.move32(lhs, callTemp);
652 
653   // Prevent INT_MIN % -1;
654   // The integer division will give INT_MIN, but we want -(double)INT_MIN.
655   if (mir->canBeNegativeDividend()) {
656     masm.ma_b(lhs, Imm32(INT_MIN), &prevent, Assembler::NotEqual, ShortJump);
657     if (mir->isTruncated()) {
658       // (INT_MIN % -1)|0 == 0
659       Label skip;
660       masm.ma_b(rhs, Imm32(-1), &skip, Assembler::NotEqual, ShortJump);
661       masm.move32(Imm32(0), dest);
662       masm.ma_b(&done, ShortJump);
663       masm.bind(&skip);
664     } else {
665       MOZ_ASSERT(mir->fallible());
666       bailoutCmp32(Assembler::Equal, rhs, Imm32(-1), ins->snapshot());
667     }
668     masm.bind(&prevent);
669   }
670 
671   // 0/X (with X < 0) is bad because both of these values *should* be
672   // doubles, and the result should be -0.0, which cannot be represented in
673   // integers. X/0 is bad because it will give garbage (or abort), when it
674   // should give either \infty, -\infty or NAN.
675 
676   // Prevent 0 / X (with X < 0) and X / 0
677   // testing X / Y.  Compare Y with 0.
678   // There are three cases: (Y < 0), (Y == 0) and (Y > 0)
679   // If (Y < 0), then we compare X with 0, and bail if X == 0
680   // If (Y == 0), then we simply want to bail.
681   // if (Y > 0), we don't bail.
682 
683   if (mir->canBeDivideByZero()) {
684     if (mir->isTruncated()) {
685       if (mir->trapOnError()) {
686         Label nonZero;
687         masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
688         masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
689         masm.bind(&nonZero);
690       } else {
691         Label skip;
692         masm.ma_b(rhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
693         masm.move32(Imm32(0), dest);
694         masm.ma_b(&done, ShortJump);
695         masm.bind(&skip);
696       }
697     } else {
698       MOZ_ASSERT(mir->fallible());
699       bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
700     }
701   }
702 
703   if (mir->canBeNegativeDividend()) {
704     Label notNegative;
705     masm.ma_b(rhs, Imm32(0), &notNegative, Assembler::GreaterThan, ShortJump);
706     if (mir->isTruncated()) {
707       // NaN|0 == 0 and (0 % -X)|0 == 0
708       Label skip;
709       masm.ma_b(lhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
710       masm.move32(Imm32(0), dest);
711       masm.ma_b(&done, ShortJump);
712       masm.bind(&skip);
713     } else {
714       MOZ_ASSERT(mir->fallible());
715       bailoutCmp32(Assembler::Equal, lhs, Imm32(0), ins->snapshot());
716     }
717     masm.bind(&notNegative);
718   }
719 #ifdef MIPSR6
720   masm.as_mod(dest, lhs, rhs);
721 #else
722   masm.as_div(lhs, rhs);
723   masm.as_mfhi(dest);
724 #endif
725 
726   // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
727   if (mir->canBeNegativeDividend()) {
728     if (mir->isTruncated()) {
729       // -0.0|0 == 0
730     } else {
731       MOZ_ASSERT(mir->fallible());
732       // See if X < 0
733       masm.ma_b(dest, Imm32(0), &done, Assembler::NotEqual, ShortJump);
734       bailoutCmp32(Assembler::Signed, callTemp, Imm32(0), ins->snapshot());
735     }
736   }
737   masm.bind(&done);
738 }
739 
visitModPowTwoI(LModPowTwoI * ins)740 void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
741   Register in = ToRegister(ins->getOperand(0));
742   Register out = ToRegister(ins->getDef(0));
743   MMod* mir = ins->mir();
744   Label negative, done;
745 
746   masm.move32(in, out);
747   masm.ma_b(in, in, &done, Assembler::Zero, ShortJump);
748   // Switch based on sign of the lhs.
749   // Positive numbers are just a bitmask
750   masm.ma_b(in, in, &negative, Assembler::Signed, ShortJump);
751   {
752     masm.and32(Imm32((1 << ins->shift()) - 1), out);
753     masm.ma_b(&done, ShortJump);
754   }
755 
756   // Negative numbers need a negate, bitmask, negate
757   {
758     masm.bind(&negative);
759     masm.neg32(out);
760     masm.and32(Imm32((1 << ins->shift()) - 1), out);
761     masm.neg32(out);
762   }
763   if (mir->canBeNegativeDividend()) {
764     if (!mir->isTruncated()) {
765       MOZ_ASSERT(mir->fallible());
766       bailoutCmp32(Assembler::Equal, out, zero, ins->snapshot());
767     } else {
768       // -0|0 == 0
769     }
770   }
771   masm.bind(&done);
772 }
773 
visitModMaskI(LModMaskI * ins)774 void CodeGenerator::visitModMaskI(LModMaskI* ins) {
775   Register src = ToRegister(ins->getOperand(0));
776   Register dest = ToRegister(ins->getDef(0));
777   Register tmp0 = ToRegister(ins->getTemp(0));
778   Register tmp1 = ToRegister(ins->getTemp(1));
779   MMod* mir = ins->mir();
780 
781   if (!mir->isTruncated() && mir->canBeNegativeDividend()) {
782     MOZ_ASSERT(mir->fallible());
783 
784     Label bail;
785     masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), &bail);
786     bailoutFrom(&bail, ins->snapshot());
787   } else {
788     masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), nullptr);
789   }
790 }
791 
visitBitNotI(LBitNotI * ins)792 void CodeGenerator::visitBitNotI(LBitNotI* ins) {
793   const LAllocation* input = ins->getOperand(0);
794   const LDefinition* dest = ins->getDef(0);
795   MOZ_ASSERT(!input->isConstant());
796 
797   masm.ma_not(ToRegister(dest), ToRegister(input));
798 }
799 
visitBitOpI(LBitOpI * ins)800 void CodeGenerator::visitBitOpI(LBitOpI* ins) {
801   const LAllocation* lhs = ins->getOperand(0);
802   const LAllocation* rhs = ins->getOperand(1);
803   const LDefinition* dest = ins->getDef(0);
804   // all of these bitops should be either imm32's, or integer registers.
805   switch (ins->bitop()) {
806     case JSOp::BitOr:
807       if (rhs->isConstant()) {
808         masm.ma_or(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
809       } else {
810         masm.as_or(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
811       }
812       break;
813     case JSOp::BitXor:
814       if (rhs->isConstant()) {
815         masm.ma_xor(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
816       } else {
817         masm.as_xor(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
818       }
819       break;
820     case JSOp::BitAnd:
821       if (rhs->isConstant()) {
822         masm.ma_and(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
823       } else {
824         masm.as_and(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
825       }
826       break;
827     default:
828       MOZ_CRASH("unexpected binary opcode");
829   }
830 }
831 
visitBitOpI64(LBitOpI64 * lir)832 void CodeGenerator::visitBitOpI64(LBitOpI64* lir) {
833   const LInt64Allocation lhs = lir->getInt64Operand(LBitOpI64::Lhs);
834   const LInt64Allocation rhs = lir->getInt64Operand(LBitOpI64::Rhs);
835 
836   MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
837 
838   switch (lir->bitop()) {
839     case JSOp::BitOr:
840       if (IsConstant(rhs)) {
841         masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
842       } else {
843         masm.or64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
844       }
845       break;
846     case JSOp::BitXor:
847       if (IsConstant(rhs)) {
848         masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
849       } else {
850         masm.xor64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
851       }
852       break;
853     case JSOp::BitAnd:
854       if (IsConstant(rhs)) {
855         masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
856       } else {
857         masm.and64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
858       }
859       break;
860     default:
861       MOZ_CRASH("unexpected binary opcode");
862   }
863 }
864 
visitShiftI(LShiftI * ins)865 void CodeGenerator::visitShiftI(LShiftI* ins) {
866   Register lhs = ToRegister(ins->lhs());
867   const LAllocation* rhs = ins->rhs();
868   Register dest = ToRegister(ins->output());
869 
870   if (rhs->isConstant()) {
871     int32_t shift = ToInt32(rhs) & 0x1F;
872     switch (ins->bitop()) {
873       case JSOp::Lsh:
874         if (shift) {
875           masm.ma_sll(dest, lhs, Imm32(shift));
876         } else {
877           masm.move32(lhs, dest);
878         }
879         break;
880       case JSOp::Rsh:
881         if (shift) {
882           masm.ma_sra(dest, lhs, Imm32(shift));
883         } else {
884           masm.move32(lhs, dest);
885         }
886         break;
887       case JSOp::Ursh:
888         if (shift) {
889           masm.ma_srl(dest, lhs, Imm32(shift));
890         } else {
891           // x >>> 0 can overflow.
892           if (ins->mir()->toUrsh()->fallible()) {
893             bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
894           }
895           masm.move32(lhs, dest);
896         }
897         break;
898       default:
899         MOZ_CRASH("Unexpected shift op");
900     }
901   } else {
902     // The shift amounts should be AND'ed into the 0-31 range
903     masm.ma_and(dest, ToRegister(rhs), Imm32(0x1F));
904 
905     switch (ins->bitop()) {
906       case JSOp::Lsh:
907         masm.ma_sll(dest, lhs, dest);
908         break;
909       case JSOp::Rsh:
910         masm.ma_sra(dest, lhs, dest);
911         break;
912       case JSOp::Ursh:
913         masm.ma_srl(dest, lhs, dest);
914         if (ins->mir()->toUrsh()->fallible()) {
915           // x >>> 0 can overflow.
916           bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot());
917         }
918         break;
919       default:
920         MOZ_CRASH("Unexpected shift op");
921     }
922   }
923 }
924 
visitShiftI64(LShiftI64 * lir)925 void CodeGenerator::visitShiftI64(LShiftI64* lir) {
926   const LInt64Allocation lhs = lir->getInt64Operand(LShiftI64::Lhs);
927   LAllocation* rhs = lir->getOperand(LShiftI64::Rhs);
928 
929   MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
930 
931   if (rhs->isConstant()) {
932     int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
933     switch (lir->bitop()) {
934       case JSOp::Lsh:
935         if (shift) {
936           masm.lshift64(Imm32(shift), ToRegister64(lhs));
937         }
938         break;
939       case JSOp::Rsh:
940         if (shift) {
941           masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs));
942         }
943         break;
944       case JSOp::Ursh:
945         if (shift) {
946           masm.rshift64(Imm32(shift), ToRegister64(lhs));
947         }
948         break;
949       default:
950         MOZ_CRASH("Unexpected shift op");
951     }
952     return;
953   }
954 
955   switch (lir->bitop()) {
956     case JSOp::Lsh:
957       masm.lshift64(ToRegister(rhs), ToRegister64(lhs));
958       break;
959     case JSOp::Rsh:
960       masm.rshift64Arithmetic(ToRegister(rhs), ToRegister64(lhs));
961       break;
962     case JSOp::Ursh:
963       masm.rshift64(ToRegister(rhs), ToRegister64(lhs));
964       break;
965     default:
966       MOZ_CRASH("Unexpected shift op");
967   }
968 }
969 
visitRotateI64(LRotateI64 * lir)970 void CodeGenerator::visitRotateI64(LRotateI64* lir) {
971   MRotate* mir = lir->mir();
972   LAllocation* count = lir->count();
973 
974   Register64 input = ToRegister64(lir->input());
975   Register64 output = ToOutRegister64(lir);
976   Register temp = ToTempRegisterOrInvalid(lir->temp());
977 
978 #ifdef JS_CODEGEN_MIPS64
979   MOZ_ASSERT(input == output);
980 #endif
981 
982   if (count->isConstant()) {
983     int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
984     if (!c) {
985 #ifdef JS_CODEGEN_MIPS32
986       masm.move64(input, output);
987 #endif
988       return;
989     }
990     if (mir->isLeftRotate()) {
991       masm.rotateLeft64(Imm32(c), input, output, temp);
992     } else {
993       masm.rotateRight64(Imm32(c), input, output, temp);
994     }
995   } else {
996     if (mir->isLeftRotate()) {
997       masm.rotateLeft64(ToRegister(count), input, output, temp);
998     } else {
999       masm.rotateRight64(ToRegister(count), input, output, temp);
1000     }
1001   }
1002 }
1003 
visitUrshD(LUrshD * ins)1004 void CodeGenerator::visitUrshD(LUrshD* ins) {
1005   Register lhs = ToRegister(ins->lhs());
1006   Register temp = ToRegister(ins->temp());
1007 
1008   const LAllocation* rhs = ins->rhs();
1009   FloatRegister out = ToFloatRegister(ins->output());
1010 
1011   if (rhs->isConstant()) {
1012     masm.ma_srl(temp, lhs, Imm32(ToInt32(rhs)));
1013   } else {
1014     masm.ma_srl(temp, lhs, ToRegister(rhs));
1015   }
1016 
1017   masm.convertUInt32ToDouble(temp, out);
1018 }
1019 
visitClzI(LClzI * ins)1020 void CodeGenerator::visitClzI(LClzI* ins) {
1021   Register input = ToRegister(ins->input());
1022   Register output = ToRegister(ins->output());
1023 
1024   masm.as_clz(output, input);
1025 }
1026 
visitCtzI(LCtzI * ins)1027 void CodeGenerator::visitCtzI(LCtzI* ins) {
1028   Register input = ToRegister(ins->input());
1029   Register output = ToRegister(ins->output());
1030 
1031   masm.ma_ctz(output, input);
1032 }
1033 
visitPopcntI(LPopcntI * ins)1034 void CodeGenerator::visitPopcntI(LPopcntI* ins) {
1035   Register input = ToRegister(ins->input());
1036   Register output = ToRegister(ins->output());
1037   Register tmp = ToRegister(ins->temp());
1038 
1039   masm.popcnt32(input, output, tmp);
1040 }
1041 
visitPopcntI64(LPopcntI64 * ins)1042 void CodeGenerator::visitPopcntI64(LPopcntI64* ins) {
1043   Register64 input = ToRegister64(ins->getInt64Operand(0));
1044   Register64 output = ToOutRegister64(ins);
1045   Register tmp = ToRegister(ins->getTemp(0));
1046 
1047   masm.popcnt64(input, output, tmp);
1048 }
1049 
visitPowHalfD(LPowHalfD * ins)1050 void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
1051   FloatRegister input = ToFloatRegister(ins->input());
1052   FloatRegister output = ToFloatRegister(ins->output());
1053 
1054   Label done, skip;
1055 
1056   // Masm.pow(-Infinity, 0.5) == Infinity.
1057   masm.loadConstantDouble(NegativeInfinity<double>(), ScratchDoubleReg);
1058   masm.ma_bc1d(input, ScratchDoubleReg, &skip,
1059                Assembler::DoubleNotEqualOrUnordered, ShortJump);
1060   masm.as_negd(output, ScratchDoubleReg);
1061   masm.ma_b(&done, ShortJump);
1062 
1063   masm.bind(&skip);
1064   // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
1065   // Adding 0 converts any -0 to 0.
1066   masm.loadConstantDouble(0.0, ScratchDoubleReg);
1067   masm.as_addd(output, input, ScratchDoubleReg);
1068   masm.as_sqrtd(output, output);
1069 
1070   masm.bind(&done);
1071 }
1072 
toMoveOperand(LAllocation a) const1073 MoveOperand CodeGeneratorMIPSShared::toMoveOperand(LAllocation a) const {
1074   if (a.isGeneralReg()) {
1075     return MoveOperand(ToRegister(a));
1076   }
1077   if (a.isFloatReg()) {
1078     return MoveOperand(ToFloatRegister(a));
1079   }
1080   MoveOperand::Kind kind =
1081       a.isStackArea() ? MoveOperand::EFFECTIVE_ADDRESS : MoveOperand::MEMORY;
1082   Address address = ToAddress(a);
1083   MOZ_ASSERT((address.offset & 3) == 0);
1084   return MoveOperand(address, kind);
1085 }
1086 
visitMathD(LMathD * math)1087 void CodeGenerator::visitMathD(LMathD* math) {
1088   FloatRegister src1 = ToFloatRegister(math->getOperand(0));
1089   FloatRegister src2 = ToFloatRegister(math->getOperand(1));
1090   FloatRegister output = ToFloatRegister(math->getDef(0));
1091 
1092   switch (math->jsop()) {
1093     case JSOp::Add:
1094       masm.as_addd(output, src1, src2);
1095       break;
1096     case JSOp::Sub:
1097       masm.as_subd(output, src1, src2);
1098       break;
1099     case JSOp::Mul:
1100       masm.as_muld(output, src1, src2);
1101       break;
1102     case JSOp::Div:
1103       masm.as_divd(output, src1, src2);
1104       break;
1105     default:
1106       MOZ_CRASH("unexpected opcode");
1107   }
1108 }
1109 
visitMathF(LMathF * math)1110 void CodeGenerator::visitMathF(LMathF* math) {
1111   FloatRegister src1 = ToFloatRegister(math->getOperand(0));
1112   FloatRegister src2 = ToFloatRegister(math->getOperand(1));
1113   FloatRegister output = ToFloatRegister(math->getDef(0));
1114 
1115   switch (math->jsop()) {
1116     case JSOp::Add:
1117       masm.as_adds(output, src1, src2);
1118       break;
1119     case JSOp::Sub:
1120       masm.as_subs(output, src1, src2);
1121       break;
1122     case JSOp::Mul:
1123       masm.as_muls(output, src1, src2);
1124       break;
1125     case JSOp::Div:
1126       masm.as_divs(output, src1, src2);
1127       break;
1128     default:
1129       MOZ_CRASH("unexpected opcode");
1130   }
1131 }
1132 
visitTruncateDToInt32(LTruncateDToInt32 * ins)1133 void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
1134   emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
1135                      ins->mir());
1136 }
1137 
visitTruncateFToInt32(LTruncateFToInt32 * ins)1138 void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
1139   emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
1140                       ins->mir());
1141 }
1142 
visitWasmBuiltinTruncateDToInt32(LWasmBuiltinTruncateDToInt32 * lir)1143 void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
1144     LWasmBuiltinTruncateDToInt32* lir) {
1145   emitTruncateDouble(ToFloatRegister(lir->getOperand(0)),
1146                      ToRegister(lir->getDef(0)), lir->mir());
1147 }
1148 
visitWasmBuiltinTruncateFToInt32(LWasmBuiltinTruncateFToInt32 * lir)1149 void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
1150     LWasmBuiltinTruncateFToInt32* lir) {
1151   emitTruncateFloat32(ToFloatRegister(lir->getOperand(0)),
1152                       ToRegister(lir->getDef(0)), lir->mir());
1153 }
1154 
visitWasmTruncateToInt32(LWasmTruncateToInt32 * lir)1155 void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
1156   auto input = ToFloatRegister(lir->input());
1157   auto output = ToRegister(lir->output());
1158 
1159   MWasmTruncateToInt32* mir = lir->mir();
1160   MIRType fromType = mir->input()->type();
1161 
1162   MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
1163 
1164   auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
1165   addOutOfLineCode(ool, mir);
1166 
1167   Label* oolEntry = ool->entry();
1168   if (mir->isUnsigned()) {
1169     if (fromType == MIRType::Double) {
1170       masm.wasmTruncateDoubleToUInt32(input, output, mir->isSaturating(),
1171                                       oolEntry);
1172     } else if (fromType == MIRType::Float32) {
1173       masm.wasmTruncateFloat32ToUInt32(input, output, mir->isSaturating(),
1174                                        oolEntry);
1175     } else {
1176       MOZ_CRASH("unexpected type");
1177     }
1178 
1179     masm.bind(ool->rejoin());
1180     return;
1181   }
1182 
1183   if (fromType == MIRType::Double) {
1184     masm.wasmTruncateDoubleToInt32(input, output, mir->isSaturating(),
1185                                    oolEntry);
1186   } else if (fromType == MIRType::Float32) {
1187     masm.wasmTruncateFloat32ToInt32(input, output, mir->isSaturating(),
1188                                     oolEntry);
1189   } else {
1190     MOZ_CRASH("unexpected type");
1191   }
1192 
1193   masm.bind(ool->rejoin());
1194 }
1195 
visitOutOfLineBailout(OutOfLineBailout * ool)1196 void CodeGeneratorMIPSShared::visitOutOfLineBailout(OutOfLineBailout* ool) {
1197   // Push snapshotOffset and make sure stack is aligned.
1198   masm.subPtr(Imm32(sizeof(Value)), StackPointer);
1199   masm.storePtr(ImmWord(ool->snapshot()->snapshotOffset()),
1200                 Address(StackPointer, 0));
1201 
1202   masm.jump(&deoptLabel_);
1203 }
1204 
visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck * ool)1205 void CodeGeneratorMIPSShared::visitOutOfLineWasmTruncateCheck(
1206     OutOfLineWasmTruncateCheck* ool) {
1207   if (ool->toType() == MIRType::Int32) {
1208     masm.outOfLineWasmTruncateToInt32Check(
1209         ool->input(), ool->output(), ool->fromType(), ool->flags(),
1210         ool->rejoin(), ool->bytecodeOffset());
1211   } else {
1212     MOZ_ASSERT(ool->toType() == MIRType::Int64);
1213     masm.outOfLineWasmTruncateToInt64Check(
1214         ool->input(), ool->output64(), ool->fromType(), ool->flags(),
1215         ool->rejoin(), ool->bytecodeOffset());
1216   }
1217 }
1218 
visitCopySignF(LCopySignF * ins)1219 void CodeGenerator::visitCopySignF(LCopySignF* ins) {
1220   FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
1221   FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
1222   FloatRegister output = ToFloatRegister(ins->getDef(0));
1223 
1224   Register lhsi = ToRegister(ins->getTemp(0));
1225   Register rhsi = ToRegister(ins->getTemp(1));
1226 
1227   masm.moveFromFloat32(lhs, lhsi);
1228   masm.moveFromFloat32(rhs, rhsi);
1229 
1230   // Combine.
1231   masm.ma_ins(rhsi, lhsi, 0, 31);
1232 
1233   masm.moveToFloat32(rhsi, output);
1234 }
1235 
visitCopySignD(LCopySignD * ins)1236 void CodeGenerator::visitCopySignD(LCopySignD* ins) {
1237   FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
1238   FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
1239   FloatRegister output = ToFloatRegister(ins->getDef(0));
1240 
1241   Register lhsi = ToRegister(ins->getTemp(0));
1242   Register rhsi = ToRegister(ins->getTemp(1));
1243 
1244   // Manipulate high words of double inputs.
1245   masm.moveFromDoubleHi(lhs, lhsi);
1246   masm.moveFromDoubleHi(rhs, rhsi);
1247 
1248   // Combine.
1249   masm.ma_ins(rhsi, lhsi, 0, 31);
1250 
1251   masm.moveToDoubleHi(rhsi, output);
1252 }
1253 
visitValue(LValue * value)1254 void CodeGenerator::visitValue(LValue* value) {
1255   const ValueOperand out = ToOutValue(value);
1256 
1257   masm.moveValue(value->value(), out);
1258 }
1259 
visitDouble(LDouble * ins)1260 void CodeGenerator::visitDouble(LDouble* ins) {
1261   const LDefinition* out = ins->getDef(0);
1262 
1263   masm.loadConstantDouble(ins->getDouble(), ToFloatRegister(out));
1264 }
1265 
visitFloat32(LFloat32 * ins)1266 void CodeGenerator::visitFloat32(LFloat32* ins) {
1267   const LDefinition* out = ins->getDef(0);
1268   masm.loadConstantFloat32(ins->getFloat(), ToFloatRegister(out));
1269 }
1270 
visitTestDAndBranch(LTestDAndBranch * test)1271 void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
1272   FloatRegister input = ToFloatRegister(test->input());
1273 
1274   MBasicBlock* ifTrue = test->ifTrue();
1275   MBasicBlock* ifFalse = test->ifFalse();
1276 
1277   masm.loadConstantDouble(0.0, ScratchDoubleReg);
1278   // If 0, or NaN, the result is false.
1279 
1280   if (isNextBlock(ifFalse->lir())) {
1281     branchToBlock(Assembler::DoubleFloat, input, ScratchDoubleReg, ifTrue,
1282                   Assembler::DoubleNotEqual);
1283   } else {
1284     branchToBlock(Assembler::DoubleFloat, input, ScratchDoubleReg, ifFalse,
1285                   Assembler::DoubleEqualOrUnordered);
1286     jumpToBlock(ifTrue);
1287   }
1288 }
1289 
visitTestFAndBranch(LTestFAndBranch * test)1290 void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
1291   FloatRegister input = ToFloatRegister(test->input());
1292 
1293   MBasicBlock* ifTrue = test->ifTrue();
1294   MBasicBlock* ifFalse = test->ifFalse();
1295 
1296   masm.loadConstantFloat32(0.0f, ScratchFloat32Reg);
1297   // If 0, or NaN, the result is false.
1298 
1299   if (isNextBlock(ifFalse->lir())) {
1300     branchToBlock(Assembler::SingleFloat, input, ScratchFloat32Reg, ifTrue,
1301                   Assembler::DoubleNotEqual);
1302   } else {
1303     branchToBlock(Assembler::SingleFloat, input, ScratchFloat32Reg, ifFalse,
1304                   Assembler::DoubleEqualOrUnordered);
1305     jumpToBlock(ifTrue);
1306   }
1307 }
1308 
visitCompareD(LCompareD * comp)1309 void CodeGenerator::visitCompareD(LCompareD* comp) {
1310   FloatRegister lhs = ToFloatRegister(comp->left());
1311   FloatRegister rhs = ToFloatRegister(comp->right());
1312   Register dest = ToRegister(comp->output());
1313 
1314   Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
1315   masm.ma_cmp_set_double(dest, lhs, rhs, cond);
1316 }
1317 
visitCompareF(LCompareF * comp)1318 void CodeGenerator::visitCompareF(LCompareF* comp) {
1319   FloatRegister lhs = ToFloatRegister(comp->left());
1320   FloatRegister rhs = ToFloatRegister(comp->right());
1321   Register dest = ToRegister(comp->output());
1322 
1323   Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
1324   masm.ma_cmp_set_float32(dest, lhs, rhs, cond);
1325 }
1326 
visitCompareDAndBranch(LCompareDAndBranch * comp)1327 void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
1328   FloatRegister lhs = ToFloatRegister(comp->left());
1329   FloatRegister rhs = ToFloatRegister(comp->right());
1330 
1331   Assembler::DoubleCondition cond =
1332       JSOpToDoubleCondition(comp->cmpMir()->jsop());
1333   MBasicBlock* ifTrue = comp->ifTrue();
1334   MBasicBlock* ifFalse = comp->ifFalse();
1335 
1336   if (isNextBlock(ifFalse->lir())) {
1337     branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifTrue, cond);
1338   } else {
1339     branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifFalse,
1340                   Assembler::InvertCondition(cond));
1341     jumpToBlock(ifTrue);
1342   }
1343 }
1344 
visitCompareFAndBranch(LCompareFAndBranch * comp)1345 void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
1346   FloatRegister lhs = ToFloatRegister(comp->left());
1347   FloatRegister rhs = ToFloatRegister(comp->right());
1348 
1349   Assembler::DoubleCondition cond =
1350       JSOpToDoubleCondition(comp->cmpMir()->jsop());
1351   MBasicBlock* ifTrue = comp->ifTrue();
1352   MBasicBlock* ifFalse = comp->ifFalse();
1353 
1354   if (isNextBlock(ifFalse->lir())) {
1355     branchToBlock(Assembler::SingleFloat, lhs, rhs, ifTrue, cond);
1356   } else {
1357     branchToBlock(Assembler::SingleFloat, lhs, rhs, ifFalse,
1358                   Assembler::InvertCondition(cond));
1359     jumpToBlock(ifTrue);
1360   }
1361 }
1362 
visitBitAndAndBranch(LBitAndAndBranch * lir)1363 void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* lir) {
1364   if (lir->right()->isConstant()) {
1365     masm.ma_and(ScratchRegister, ToRegister(lir->left()),
1366                 Imm32(ToInt32(lir->right())));
1367   } else {
1368     masm.as_and(ScratchRegister, ToRegister(lir->left()),
1369                 ToRegister(lir->right()));
1370   }
1371   emitBranch(ScratchRegister, ScratchRegister, lir->cond(), lir->ifTrue(),
1372              lir->ifFalse());
1373 }
1374 
1375 // See ../CodeGenerator.cpp for more information.
visitWasmRegisterResult(LWasmRegisterResult * lir)1376 void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {}
1377 
visitWasmUint32ToDouble(LWasmUint32ToDouble * lir)1378 void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
1379   masm.convertUInt32ToDouble(ToRegister(lir->input()),
1380                              ToFloatRegister(lir->output()));
1381 }
1382 
visitWasmUint32ToFloat32(LWasmUint32ToFloat32 * lir)1383 void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
1384   masm.convertUInt32ToFloat32(ToRegister(lir->input()),
1385                               ToFloatRegister(lir->output()));
1386 }
1387 
visitNotI(LNotI * ins)1388 void CodeGenerator::visitNotI(LNotI* ins) {
1389   masm.cmp32Set(Assembler::Equal, ToRegister(ins->input()), Imm32(0),
1390                 ToRegister(ins->output()));
1391 }
1392 
visitNotD(LNotD * ins)1393 void CodeGenerator::visitNotD(LNotD* ins) {
1394   // Since this operation is not, we want to set a bit if
1395   // the double is falsey, which means 0.0, -0.0 or NaN.
1396   FloatRegister in = ToFloatRegister(ins->input());
1397   Register dest = ToRegister(ins->output());
1398 
1399   masm.loadConstantDouble(0.0, ScratchDoubleReg);
1400   masm.ma_cmp_set_double(dest, in, ScratchDoubleReg,
1401                          Assembler::DoubleEqualOrUnordered);
1402 }
1403 
visitNotF(LNotF * ins)1404 void CodeGenerator::visitNotF(LNotF* ins) {
1405   // Since this operation is not, we want to set a bit if
1406   // the float32 is falsey, which means 0.0, -0.0 or NaN.
1407   FloatRegister in = ToFloatRegister(ins->input());
1408   Register dest = ToRegister(ins->output());
1409 
1410   masm.loadConstantFloat32(0.0f, ScratchFloat32Reg);
1411   masm.ma_cmp_set_float32(dest, in, ScratchFloat32Reg,
1412                           Assembler::DoubleEqualOrUnordered);
1413 }
1414 
visitMemoryBarrier(LMemoryBarrier * ins)1415 void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
1416   masm.memoryBarrier(ins->type());
1417 }
1418 
generateInvalidateEpilogue()1419 void CodeGeneratorMIPSShared::generateInvalidateEpilogue() {
1420   // Ensure that there is enough space in the buffer for the OsiPoint
1421   // patching to occur. Otherwise, we could overwrite the invalidation
1422   // epilogue.
1423   for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
1424     masm.nop();
1425   }
1426 
1427   masm.bind(&invalidate_);
1428 
1429   // Push the return address of the point that we bailed out at to the stack
1430   masm.Push(ra);
1431 
1432   // Push the Ion script onto the stack (when we determine what that
1433   // pointer is).
1434   invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
1435 
1436   // Jump to the invalidator which will replace the current frame.
1437   TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
1438   masm.jump(thunk);
1439 }
1440 
1441 class js::jit::OutOfLineTableSwitch
1442     : public OutOfLineCodeBase<CodeGeneratorMIPSShared> {
1443   MTableSwitch* mir_;
1444   CodeLabel jumpLabel_;
1445 
accept(CodeGeneratorMIPSShared * codegen)1446   void accept(CodeGeneratorMIPSShared* codegen) {
1447     codegen->visitOutOfLineTableSwitch(this);
1448   }
1449 
1450  public:
OutOfLineTableSwitch(MTableSwitch * mir)1451   OutOfLineTableSwitch(MTableSwitch* mir) : mir_(mir) {}
1452 
mir() const1453   MTableSwitch* mir() const { return mir_; }
1454 
jumpLabel()1455   CodeLabel* jumpLabel() { return &jumpLabel_; }
1456 };
1457 
visitOutOfLineTableSwitch(OutOfLineTableSwitch * ool)1458 void CodeGeneratorMIPSShared::visitOutOfLineTableSwitch(
1459     OutOfLineTableSwitch* ool) {
1460   MTableSwitch* mir = ool->mir();
1461 
1462   masm.haltingAlign(sizeof(void*));
1463   masm.bind(ool->jumpLabel());
1464   masm.addCodeLabel(*ool->jumpLabel());
1465 
1466   for (size_t i = 0; i < mir->numCases(); i++) {
1467     LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
1468     Label* caseheader = caseblock->label();
1469     uint32_t caseoffset = caseheader->offset();
1470 
1471     // The entries of the jump table need to be absolute addresses and thus
1472     // must be patched after codegen is finished.
1473     CodeLabel cl;
1474     masm.writeCodePointer(&cl);
1475     cl.target()->bind(caseoffset);
1476     masm.addCodeLabel(cl);
1477   }
1478 }
1479 
emitTableSwitchDispatch(MTableSwitch * mir,Register index,Register base)1480 void CodeGeneratorMIPSShared::emitTableSwitchDispatch(MTableSwitch* mir,
1481                                                       Register index,
1482                                                       Register base) {
1483   Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
1484 
1485   // Lower value with low value
1486   if (mir->low() != 0) {
1487     masm.subPtr(Imm32(mir->low()), index);
1488   }
1489 
1490   // Jump to default case if input is out of range
1491   int32_t cases = mir->numCases();
1492   masm.branchPtr(Assembler::AboveOrEqual, index, ImmWord(cases), defaultcase);
1493 
1494   // To fill in the CodeLabels for the case entries, we need to first
1495   // generate the case entries (we don't yet know their offsets in the
1496   // instruction stream).
1497   OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(mir);
1498   addOutOfLineCode(ool, mir);
1499 
1500   // Compute the position where a pointer to the right case stands.
1501   masm.ma_li(base, ool->jumpLabel());
1502 
1503   BaseIndex pointer(base, index, ScalePointer);
1504 
1505   // Jump to the right case
1506   masm.branchToComputedAddress(pointer);
1507 }
1508 
visitWasmHeapBase(LWasmHeapBase * ins)1509 void CodeGenerator::visitWasmHeapBase(LWasmHeapBase* ins) {
1510   MOZ_ASSERT(ins->tlsPtr()->isBogus());
1511   masm.movePtr(HeapReg, ToRegister(ins->output()));
1512 }
1513 
1514 template <typename T>
emitWasmLoad(T * lir)1515 void CodeGeneratorMIPSShared::emitWasmLoad(T* lir) {
1516   const MWasmLoad* mir = lir->mir();
1517 
1518   Register ptrScratch = InvalidReg;
1519   if (!lir->ptrCopy()->isBogusTemp()) {
1520     ptrScratch = ToRegister(lir->ptrCopy());
1521   }
1522 
1523   if (IsUnaligned(mir->access())) {
1524     if (IsFloatingPointType(mir->type())) {
1525       masm.wasmUnalignedLoadFP(mir->access(), HeapReg, ToRegister(lir->ptr()),
1526                                ptrScratch, ToFloatRegister(lir->output()),
1527                                ToRegister(lir->getTemp(1)), InvalidReg,
1528                                InvalidReg);
1529     } else {
1530       masm.wasmUnalignedLoad(mir->access(), HeapReg, ToRegister(lir->ptr()),
1531                              ptrScratch, ToRegister(lir->output()),
1532                              ToRegister(lir->getTemp(1)));
1533     }
1534   } else {
1535     masm.wasmLoad(mir->access(), HeapReg, ToRegister(lir->ptr()), ptrScratch,
1536                   ToAnyRegister(lir->output()));
1537   }
1538 }
1539 
visitWasmLoad(LWasmLoad * lir)1540 void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); }
1541 
visitWasmUnalignedLoad(LWasmUnalignedLoad * lir)1542 void CodeGenerator::visitWasmUnalignedLoad(LWasmUnalignedLoad* lir) {
1543   emitWasmLoad(lir);
1544 }
1545 
1546 template <typename T>
emitWasmStore(T * lir)1547 void CodeGeneratorMIPSShared::emitWasmStore(T* lir) {
1548   const MWasmStore* mir = lir->mir();
1549 
1550   Register ptrScratch = InvalidReg;
1551   if (!lir->ptrCopy()->isBogusTemp()) {
1552     ptrScratch = ToRegister(lir->ptrCopy());
1553   }
1554 
1555   if (IsUnaligned(mir->access())) {
1556     if (mir->access().type() == Scalar::Float32 ||
1557         mir->access().type() == Scalar::Float64) {
1558       masm.wasmUnalignedStoreFP(mir->access(), ToFloatRegister(lir->value()),
1559                                 HeapReg, ToRegister(lir->ptr()), ptrScratch,
1560                                 ToRegister(lir->getTemp(1)));
1561     } else {
1562       masm.wasmUnalignedStore(mir->access(), ToRegister(lir->value()), HeapReg,
1563                               ToRegister(lir->ptr()), ptrScratch,
1564                               ToRegister(lir->getTemp(1)));
1565     }
1566   } else {
1567     masm.wasmStore(mir->access(), ToAnyRegister(lir->value()), HeapReg,
1568                    ToRegister(lir->ptr()), ptrScratch);
1569   }
1570 }
1571 
visitWasmStore(LWasmStore * lir)1572 void CodeGenerator::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); }
1573 
visitWasmUnalignedStore(LWasmUnalignedStore * lir)1574 void CodeGenerator::visitWasmUnalignedStore(LWasmUnalignedStore* lir) {
1575   emitWasmStore(lir);
1576 }
1577 
visitAsmJSLoadHeap(LAsmJSLoadHeap * ins)1578 void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
1579   const MAsmJSLoadHeap* mir = ins->mir();
1580   const LAllocation* ptr = ins->ptr();
1581   const LDefinition* out = ins->output();
1582   const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
1583 
1584   bool isSigned;
1585   int size;
1586   bool isFloat = false;
1587   switch (mir->access().type()) {
1588     case Scalar::Int8:
1589       isSigned = true;
1590       size = 8;
1591       break;
1592     case Scalar::Uint8:
1593       isSigned = false;
1594       size = 8;
1595       break;
1596     case Scalar::Int16:
1597       isSigned = true;
1598       size = 16;
1599       break;
1600     case Scalar::Uint16:
1601       isSigned = false;
1602       size = 16;
1603       break;
1604     case Scalar::Int32:
1605       isSigned = true;
1606       size = 32;
1607       break;
1608     case Scalar::Uint32:
1609       isSigned = false;
1610       size = 32;
1611       break;
1612     case Scalar::Float64:
1613       isFloat = true;
1614       size = 64;
1615       break;
1616     case Scalar::Float32:
1617       isFloat = true;
1618       size = 32;
1619       break;
1620     default:
1621       MOZ_CRASH("unexpected array type");
1622   }
1623 
1624   if (ptr->isConstant()) {
1625     MOZ_ASSERT(!mir->needsBoundsCheck());
1626     int32_t ptrImm = ptr->toConstant()->toInt32();
1627     MOZ_ASSERT(ptrImm >= 0);
1628     if (isFloat) {
1629       if (size == 32) {
1630         masm.loadFloat32(Address(HeapReg, ptrImm), ToFloatRegister(out));
1631       } else {
1632         masm.loadDouble(Address(HeapReg, ptrImm), ToFloatRegister(out));
1633       }
1634     } else {
1635       masm.ma_load(ToRegister(out), Address(HeapReg, ptrImm),
1636                    static_cast<LoadStoreSize>(size),
1637                    isSigned ? SignExtend : ZeroExtend);
1638     }
1639     return;
1640   }
1641 
1642   Register ptrReg = ToRegister(ptr);
1643 
1644   if (!mir->needsBoundsCheck()) {
1645     if (isFloat) {
1646       if (size == 32) {
1647         masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne),
1648                          ToFloatRegister(out));
1649       } else {
1650         masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne),
1651                         ToFloatRegister(out));
1652       }
1653     } else {
1654       masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
1655                    static_cast<LoadStoreSize>(size),
1656                    isSigned ? SignExtend : ZeroExtend);
1657     }
1658     return;
1659   }
1660 
1661   Label done, outOfRange;
1662   masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg,
1663                          ToRegister(boundsCheckLimit), &outOfRange);
1664   // Offset is ok, let's load value.
1665   if (isFloat) {
1666     if (size == 32) {
1667       masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne),
1668                        ToFloatRegister(out));
1669     } else {
1670       masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne),
1671                       ToFloatRegister(out));
1672     }
1673   } else {
1674     masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
1675                  static_cast<LoadStoreSize>(size),
1676                  isSigned ? SignExtend : ZeroExtend);
1677   }
1678   masm.ma_b(&done, ShortJump);
1679   masm.bind(&outOfRange);
1680   // Offset is out of range. Load default values.
1681   if (isFloat) {
1682     if (size == 32) {
1683       masm.loadConstantFloat32(float(GenericNaN()), ToFloatRegister(out));
1684     } else {
1685       masm.loadConstantDouble(GenericNaN(), ToFloatRegister(out));
1686     }
1687   } else {
1688     masm.move32(Imm32(0), ToRegister(out));
1689   }
1690   masm.bind(&done);
1691 }
1692 
visitAsmJSStoreHeap(LAsmJSStoreHeap * ins)1693 void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
1694   const MAsmJSStoreHeap* mir = ins->mir();
1695   const LAllocation* value = ins->value();
1696   const LAllocation* ptr = ins->ptr();
1697   const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
1698 
1699   bool isSigned;
1700   int size;
1701   bool isFloat = false;
1702   switch (mir->access().type()) {
1703     case Scalar::Int8:
1704       isSigned = true;
1705       size = 8;
1706       break;
1707     case Scalar::Uint8:
1708       isSigned = false;
1709       size = 8;
1710       break;
1711     case Scalar::Int16:
1712       isSigned = true;
1713       size = 16;
1714       break;
1715     case Scalar::Uint16:
1716       isSigned = false;
1717       size = 16;
1718       break;
1719     case Scalar::Int32:
1720       isSigned = true;
1721       size = 32;
1722       break;
1723     case Scalar::Uint32:
1724       isSigned = false;
1725       size = 32;
1726       break;
1727     case Scalar::Float64:
1728       isFloat = true;
1729       size = 64;
1730       break;
1731     case Scalar::Float32:
1732       isFloat = true;
1733       size = 32;
1734       break;
1735     default:
1736       MOZ_CRASH("unexpected array type");
1737   }
1738 
1739   if (ptr->isConstant()) {
1740     MOZ_ASSERT(!mir->needsBoundsCheck());
1741     int32_t ptrImm = ptr->toConstant()->toInt32();
1742     MOZ_ASSERT(ptrImm >= 0);
1743 
1744     if (isFloat) {
1745       FloatRegister freg = ToFloatRegister(value);
1746       Address addr(HeapReg, ptrImm);
1747       if (size == 32) {
1748         masm.storeFloat32(freg, addr);
1749       } else {
1750         masm.storeDouble(freg, addr);
1751       }
1752     } else {
1753       masm.ma_store(ToRegister(value), Address(HeapReg, ptrImm),
1754                     static_cast<LoadStoreSize>(size),
1755                     isSigned ? SignExtend : ZeroExtend);
1756     }
1757     return;
1758   }
1759 
1760   Register ptrReg = ToRegister(ptr);
1761   Address dstAddr(ptrReg, 0);
1762 
1763   if (!mir->needsBoundsCheck()) {
1764     if (isFloat) {
1765       FloatRegister freg = ToFloatRegister(value);
1766       BaseIndex bi(HeapReg, ptrReg, TimesOne);
1767       if (size == 32) {
1768         masm.storeFloat32(freg, bi);
1769       } else {
1770         masm.storeDouble(freg, bi);
1771       }
1772     } else {
1773       masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
1774                     static_cast<LoadStoreSize>(size),
1775                     isSigned ? SignExtend : ZeroExtend);
1776     }
1777     return;
1778   }
1779 
1780   Label outOfRange;
1781   masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg,
1782                          ToRegister(boundsCheckLimit), &outOfRange);
1783 
1784   // Offset is ok, let's store value.
1785   if (isFloat) {
1786     if (size == 32) {
1787       masm.storeFloat32(ToFloatRegister(value),
1788                         BaseIndex(HeapReg, ptrReg, TimesOne));
1789     } else
1790       masm.storeDouble(ToFloatRegister(value),
1791                        BaseIndex(HeapReg, ptrReg, TimesOne));
1792   } else {
1793     masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
1794                   static_cast<LoadStoreSize>(size),
1795                   isSigned ? SignExtend : ZeroExtend);
1796   }
1797 
1798   masm.bind(&outOfRange);
1799 }
1800 
visitWasmCompareExchangeHeap(LWasmCompareExchangeHeap * ins)1801 void CodeGenerator::visitWasmCompareExchangeHeap(
1802     LWasmCompareExchangeHeap* ins) {
1803   MWasmCompareExchangeHeap* mir = ins->mir();
1804   Register ptrReg = ToRegister(ins->ptr());
1805   BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
1806   MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
1807 
1808   Register oldval = ToRegister(ins->oldValue());
1809   Register newval = ToRegister(ins->newValue());
1810   Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
1811   Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
1812   Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
1813 
1814   masm.wasmCompareExchange(mir->access(), srcAddr, oldval, newval, valueTemp,
1815                            offsetTemp, maskTemp, ToRegister(ins->output()));
1816 }
1817 
visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap * ins)1818 void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
1819   MWasmAtomicExchangeHeap* mir = ins->mir();
1820   Register ptrReg = ToRegister(ins->ptr());
1821   Register value = ToRegister(ins->value());
1822   BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
1823   MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
1824 
1825   Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
1826   Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
1827   Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
1828 
1829   masm.wasmAtomicExchange(mir->access(), srcAddr, value, valueTemp, offsetTemp,
1830                           maskTemp, ToRegister(ins->output()));
1831 }
1832 
visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap * ins)1833 void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
1834   MOZ_ASSERT(ins->mir()->hasUses());
1835   MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
1836 
1837   MWasmAtomicBinopHeap* mir = ins->mir();
1838   Register ptrReg = ToRegister(ins->ptr());
1839   Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
1840   Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
1841   Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
1842 
1843   BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
1844 
1845   masm.wasmAtomicFetchOp(mir->access(), mir->operation(),
1846                          ToRegister(ins->value()), srcAddr, valueTemp,
1847                          offsetTemp, maskTemp, ToRegister(ins->output()));
1848 }
1849 
visitWasmAtomicBinopHeapForEffect(LWasmAtomicBinopHeapForEffect * ins)1850 void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
1851     LWasmAtomicBinopHeapForEffect* ins) {
1852   MOZ_ASSERT(!ins->mir()->hasUses());
1853   MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
1854 
1855   MWasmAtomicBinopHeap* mir = ins->mir();
1856   Register ptrReg = ToRegister(ins->ptr());
1857   Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
1858   Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
1859   Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
1860 
1861   BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
1862   masm.wasmAtomicEffectOp(mir->access(), mir->operation(),
1863                           ToRegister(ins->value()), srcAddr, valueTemp,
1864                           offsetTemp, maskTemp);
1865 }
1866 
visitWasmStackArg(LWasmStackArg * ins)1867 void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
1868   const MWasmStackArg* mir = ins->mir();
1869   if (ins->arg()->isConstant()) {
1870     masm.storePtr(ImmWord(ToInt32(ins->arg())),
1871                   Address(StackPointer, mir->spOffset()));
1872   } else {
1873     if (ins->arg()->isGeneralReg()) {
1874       masm.storePtr(ToRegister(ins->arg()),
1875                     Address(StackPointer, mir->spOffset()));
1876     } else if (mir->input()->type() == MIRType::Double) {
1877       masm.storeDouble(ToFloatRegister(ins->arg()).doubleOverlay(),
1878                        Address(StackPointer, mir->spOffset()));
1879     } else {
1880       masm.storeFloat32(ToFloatRegister(ins->arg()),
1881                         Address(StackPointer, mir->spOffset()));
1882     }
1883   }
1884 }
1885 
visitWasmStackArgI64(LWasmStackArgI64 * ins)1886 void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) {
1887   const MWasmStackArg* mir = ins->mir();
1888   Address dst(StackPointer, mir->spOffset());
1889   if (IsConstant(ins->arg())) {
1890     masm.store64(Imm64(ToInt64(ins->arg())), dst);
1891   } else {
1892     masm.store64(ToRegister64(ins->arg()), dst);
1893   }
1894 }
1895 
visitWasmSelect(LWasmSelect * ins)1896 void CodeGenerator::visitWasmSelect(LWasmSelect* ins) {
1897   MIRType mirType = ins->mir()->type();
1898 
1899   Register cond = ToRegister(ins->condExpr());
1900   const LAllocation* falseExpr = ins->falseExpr();
1901 
1902   if (mirType == MIRType::Int32 || mirType == MIRType::RefOrNull) {
1903     Register out = ToRegister(ins->output());
1904     MOZ_ASSERT(ToRegister(ins->trueExpr()) == out,
1905                "true expr input is reused for output");
1906     if (falseExpr->isRegister()) {
1907       masm.as_movz(out, ToRegister(falseExpr), cond);
1908     } else {
1909       masm.cmp32Load32(Assembler::Zero, cond, cond, ToAddress(falseExpr), out);
1910     }
1911     return;
1912   }
1913 
1914   FloatRegister out = ToFloatRegister(ins->output());
1915   MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out,
1916              "true expr input is reused for output");
1917 
1918   if (falseExpr->isFloatReg()) {
1919     if (mirType == MIRType::Float32) {
1920       masm.as_movz(Assembler::SingleFloat, out, ToFloatRegister(falseExpr),
1921                    cond);
1922     } else if (mirType == MIRType::Double) {
1923       masm.as_movz(Assembler::DoubleFloat, out, ToFloatRegister(falseExpr),
1924                    cond);
1925     } else {
1926       MOZ_CRASH("unhandled type in visitWasmSelect!");
1927     }
1928   } else {
1929     Label done;
1930     masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
1931 
1932     if (mirType == MIRType::Float32) {
1933       masm.loadFloat32(ToAddress(falseExpr), out);
1934     } else if (mirType == MIRType::Double) {
1935       masm.loadDouble(ToAddress(falseExpr), out);
1936     } else {
1937       MOZ_CRASH("unhandled type in visitWasmSelect!");
1938     }
1939 
1940     masm.bind(&done);
1941   }
1942 }
1943 
visitWasmCompareAndSelect(LWasmCompareAndSelect * ins)1944 void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
1945   emitWasmCompareAndSelect(ins);
1946 }
1947 
visitWasmReinterpret(LWasmReinterpret * lir)1948 void CodeGenerator::visitWasmReinterpret(LWasmReinterpret* lir) {
1949   MOZ_ASSERT(gen->compilingWasm());
1950   MWasmReinterpret* ins = lir->mir();
1951 
1952   MIRType to = ins->type();
1953   DebugOnly<MIRType> from = ins->input()->type();
1954 
1955   switch (to) {
1956     case MIRType::Int32:
1957       MOZ_ASSERT(from == MIRType::Float32);
1958       masm.as_mfc1(ToRegister(lir->output()), ToFloatRegister(lir->input()));
1959       break;
1960     case MIRType::Float32:
1961       MOZ_ASSERT(from == MIRType::Int32);
1962       masm.as_mtc1(ToRegister(lir->input()), ToFloatRegister(lir->output()));
1963       break;
1964     case MIRType::Double:
1965     case MIRType::Int64:
1966       MOZ_CRASH("not handled by this LIR opcode");
1967     default:
1968       MOZ_CRASH("unexpected WasmReinterpret");
1969   }
1970 }
1971 
visitUDivOrMod(LUDivOrMod * ins)1972 void CodeGenerator::visitUDivOrMod(LUDivOrMod* ins) {
1973   Register lhs = ToRegister(ins->lhs());
1974   Register rhs = ToRegister(ins->rhs());
1975   Register output = ToRegister(ins->output());
1976   Label done;
1977 
1978   // Prevent divide by zero.
1979   if (ins->canBeDivideByZero()) {
1980     if (ins->mir()->isTruncated()) {
1981       if (ins->trapOnError()) {
1982         Label nonZero;
1983         masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
1984         masm.wasmTrap(wasm::Trap::IntegerDivideByZero, ins->bytecodeOffset());
1985         masm.bind(&nonZero);
1986       } else {
1987         // Infinity|0 == 0
1988         Label notzero;
1989         masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
1990         masm.move32(Imm32(0), output);
1991         masm.ma_b(&done, ShortJump);
1992         masm.bind(&notzero);
1993       }
1994     } else {
1995       bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
1996     }
1997   }
1998 
1999 #ifdef MIPSR6
2000   masm.as_modu(output, lhs, rhs);
2001 #else
2002   masm.as_divu(lhs, rhs);
2003   masm.as_mfhi(output);
2004 #endif
2005 
2006   // If the remainder is > 0, bailout since this must be a double.
2007   if (ins->mir()->isDiv()) {
2008     if (!ins->mir()->toDiv()->canTruncateRemainder()) {
2009       bailoutCmp32(Assembler::NonZero, output, output, ins->snapshot());
2010     }
2011     // Get quotient
2012 #ifdef MIPSR6
2013     masm.as_divu(output, lhs, rhs);
2014 #else
2015     masm.as_mflo(output);
2016 #endif
2017   }
2018 
2019   if (!ins->mir()->isTruncated()) {
2020     bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot());
2021   }
2022 
2023   masm.bind(&done);
2024 }
2025 
visitEffectiveAddress(LEffectiveAddress * ins)2026 void CodeGenerator::visitEffectiveAddress(LEffectiveAddress* ins) {
2027   const MEffectiveAddress* mir = ins->mir();
2028   Register base = ToRegister(ins->base());
2029   Register index = ToRegister(ins->index());
2030   Register output = ToRegister(ins->output());
2031 
2032   BaseIndex address(base, index, mir->scale(), mir->displacement());
2033   masm.computeEffectiveAddress(address, output);
2034 }
2035 
visitNegI(LNegI * ins)2036 void CodeGenerator::visitNegI(LNegI* ins) {
2037   Register input = ToRegister(ins->input());
2038   Register output = ToRegister(ins->output());
2039 
2040   masm.ma_negu(output, input);
2041 }
2042 
visitNegI64(LNegI64 * ins)2043 void CodeGenerator::visitNegI64(LNegI64* ins) {
2044   Register64 input = ToRegister64(ins->getInt64Operand(0));
2045   MOZ_ASSERT(input == ToOutRegister64(ins));
2046   masm.neg64(input);
2047 }
2048 
visitNegD(LNegD * ins)2049 void CodeGenerator::visitNegD(LNegD* ins) {
2050   FloatRegister input = ToFloatRegister(ins->input());
2051   FloatRegister output = ToFloatRegister(ins->output());
2052 
2053   masm.as_negd(output, input);
2054 }
2055 
visitNegF(LNegF * ins)2056 void CodeGenerator::visitNegF(LNegF* ins) {
2057   FloatRegister input = ToFloatRegister(ins->input());
2058   FloatRegister output = ToFloatRegister(ins->output());
2059 
2060   masm.as_negs(output, input);
2061 }
2062 
visitWasmAddOffset(LWasmAddOffset * lir)2063 void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
2064   MWasmAddOffset* mir = lir->mir();
2065   Register base = ToRegister(lir->base());
2066   Register out = ToRegister(lir->output());
2067 
2068   Label ok;
2069   masm.ma_add32TestCarry(Assembler::CarryClear, out, base, Imm32(mir->offset()),
2070                          &ok);
2071   masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
2072   masm.bind(&ok);
2073 }
2074 
visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop * lir)2075 void CodeGenerator::visitAtomicTypedArrayElementBinop(
2076     LAtomicTypedArrayElementBinop* lir) {
2077   MOZ_ASSERT(!lir->mir()->isForEffect());
2078 
2079   AnyRegister output = ToAnyRegister(lir->output());
2080   Register elements = ToRegister(lir->elements());
2081   Register outTemp = ToTempRegisterOrInvalid(lir->temp2());
2082   Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
2083   Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
2084   Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
2085   Register value = ToRegister(lir->value());
2086 
2087   Scalar::Type arrayType = lir->mir()->arrayType();
2088 
2089   if (lir->index()->isConstant()) {
2090     Address mem = ToAddress(elements, lir->index(), arrayType);
2091     masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
2092                          lir->mir()->operation(), value, mem, valueTemp,
2093                          offsetTemp, maskTemp, outTemp, output);
2094   } else {
2095     BaseIndex mem(elements, ToRegister(lir->index()),
2096                   ScaleFromScalarType(arrayType));
2097     masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
2098                          lir->mir()->operation(), value, mem, valueTemp,
2099                          offsetTemp, maskTemp, outTemp, output);
2100   }
2101 }
2102 
visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect * lir)2103 void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
2104     LAtomicTypedArrayElementBinopForEffect* lir) {
2105   MOZ_ASSERT(lir->mir()->isForEffect());
2106 
2107   Register elements = ToRegister(lir->elements());
2108   Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
2109   Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
2110   Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
2111   Register value = ToRegister(lir->value());
2112   Scalar::Type arrayType = lir->mir()->arrayType();
2113 
2114   if (lir->index()->isConstant()) {
2115     Address mem = ToAddress(elements, lir->index(), arrayType);
2116     masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
2117                           lir->mir()->operation(), value, mem, valueTemp,
2118                           offsetTemp, maskTemp);
2119   } else {
2120     BaseIndex mem(elements, ToRegister(lir->index()),
2121                   ScaleFromScalarType(arrayType));
2122     masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
2123                           lir->mir()->operation(), value, mem, valueTemp,
2124                           offsetTemp, maskTemp);
2125   }
2126 }
2127 
visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement * lir)2128 void CodeGenerator::visitCompareExchangeTypedArrayElement(
2129     LCompareExchangeTypedArrayElement* lir) {
2130   Register elements = ToRegister(lir->elements());
2131   AnyRegister output = ToAnyRegister(lir->output());
2132   Register outTemp = ToTempRegisterOrInvalid(lir->temp());
2133 
2134   Register oldval = ToRegister(lir->oldval());
2135   Register newval = ToRegister(lir->newval());
2136   Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
2137   Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
2138   Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
2139 
2140   Scalar::Type arrayType = lir->mir()->arrayType();
2141 
2142   if (lir->index()->isConstant()) {
2143     Address dest = ToAddress(elements, lir->index(), arrayType);
2144     masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
2145                            newval, valueTemp, offsetTemp, maskTemp, outTemp,
2146                            output);
2147   } else {
2148     BaseIndex dest(elements, ToRegister(lir->index()),
2149                    ScaleFromScalarType(arrayType));
2150     masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
2151                            newval, valueTemp, offsetTemp, maskTemp, outTemp,
2152                            output);
2153   }
2154 }
2155 
visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement * lir)2156 void CodeGenerator::visitAtomicExchangeTypedArrayElement(
2157     LAtomicExchangeTypedArrayElement* lir) {
2158   Register elements = ToRegister(lir->elements());
2159   AnyRegister output = ToAnyRegister(lir->output());
2160   Register outTemp = ToTempRegisterOrInvalid(lir->temp());
2161 
2162   Register value = ToRegister(lir->value());
2163   Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
2164   Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
2165   Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
2166 
2167   Scalar::Type arrayType = lir->mir()->arrayType();
2168 
2169   if (lir->index()->isConstant()) {
2170     Address dest = ToAddress(elements, lir->index(), arrayType);
2171     masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
2172                           valueTemp, offsetTemp, maskTemp, outTemp, output);
2173   } else {
2174     BaseIndex dest(elements, ToRegister(lir->index()),
2175                    ScaleFromScalarType(arrayType));
2176     masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
2177                           valueTemp, offsetTemp, maskTemp, outTemp, output);
2178   }
2179 }
2180 
visitCompareExchangeTypedArrayElement64(LCompareExchangeTypedArrayElement64 * lir)2181 void CodeGenerator::visitCompareExchangeTypedArrayElement64(
2182     LCompareExchangeTypedArrayElement64* lir) {
2183   Register elements = ToRegister(lir->elements());
2184   Register oldval = ToRegister(lir->oldval());
2185   Register newval = ToRegister(lir->newval());
2186   Register64 temp1 = ToRegister64(lir->temp1());
2187   Register64 temp2 = ToRegister64(lir->temp2());
2188   Register out = ToRegister(lir->output());
2189   Register64 tempOut(out);
2190 
2191   Scalar::Type arrayType = lir->mir()->arrayType();
2192 
2193   masm.loadBigInt64(oldval, temp1);
2194   masm.loadBigInt64(newval, tempOut);
2195 
2196   if (lir->index()->isConstant()) {
2197     Address dest = ToAddress(elements, lir->index(), arrayType);
2198     masm.compareExchange64(Synchronization::Full(), dest, temp1, tempOut,
2199                            temp2);
2200   } else {
2201     BaseIndex dest(elements, ToRegister(lir->index()),
2202                    ScaleFromScalarType(arrayType));
2203     masm.compareExchange64(Synchronization::Full(), dest, temp1, tempOut,
2204                            temp2);
2205   }
2206 
2207   emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
2208 }
2209 
visitAtomicExchangeTypedArrayElement64(LAtomicExchangeTypedArrayElement64 * lir)2210 void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
2211     LAtomicExchangeTypedArrayElement64* lir) {
2212   Register elements = ToRegister(lir->elements());
2213   Register value = ToRegister(lir->value());
2214   Register64 temp1 = ToRegister64(lir->temp1());
2215   Register64 temp2 = Register64(ToRegister(lir->temp2()));
2216   Register out = ToRegister(lir->output());
2217 
2218   Scalar::Type arrayType = lir->mir()->arrayType();
2219 
2220   masm.loadBigInt64(value, temp1);
2221 
2222   if (lir->index()->isConstant()) {
2223     Address dest = ToAddress(elements, lir->index(), arrayType);
2224     masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
2225   } else {
2226     BaseIndex dest(elements, ToRegister(lir->index()),
2227                    ScaleFromScalarType(arrayType));
2228     masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
2229   }
2230 
2231   emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
2232 }
2233 
visitAtomicTypedArrayElementBinop64(LAtomicTypedArrayElementBinop64 * lir)2234 void CodeGenerator::visitAtomicTypedArrayElementBinop64(
2235     LAtomicTypedArrayElementBinop64* lir) {
2236   MOZ_ASSERT(lir->mir()->hasUses());
2237 
2238   Register elements = ToRegister(lir->elements());
2239   Register value = ToRegister(lir->value());
2240   Register64 temp1 = ToRegister64(lir->temp1());
2241   Register64 temp2 = ToRegister64(lir->temp2());
2242   Register out = ToRegister(lir->output());
2243   Register64 tempOut = Register64(out);
2244 
2245   Scalar::Type arrayType = lir->mir()->arrayType();
2246   AtomicOp atomicOp = lir->mir()->operation();
2247 
2248   masm.loadBigInt64(value, temp1);
2249 
2250   if (lir->index()->isConstant()) {
2251     Address dest = ToAddress(elements, lir->index(), arrayType);
2252     masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
2253                          tempOut, temp2);
2254   } else {
2255     BaseIndex dest(elements, ToRegister(lir->index()),
2256                    ScaleFromScalarType(arrayType));
2257     masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
2258                          tempOut, temp2);
2259   }
2260 
2261   emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
2262 }
2263 
visitAtomicTypedArrayElementBinopForEffect64(LAtomicTypedArrayElementBinopForEffect64 * lir)2264 void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
2265     LAtomicTypedArrayElementBinopForEffect64* lir) {
2266   MOZ_ASSERT(!lir->mir()->hasUses());
2267 
2268   Register elements = ToRegister(lir->elements());
2269   Register value = ToRegister(lir->value());
2270   Register64 temp1 = ToRegister64(lir->temp1());
2271   Register64 temp2 = ToRegister64(lir->temp2());
2272 
2273   Scalar::Type arrayType = lir->mir()->arrayType();
2274   AtomicOp atomicOp = lir->mir()->operation();
2275 
2276   masm.loadBigInt64(value, temp1);
2277 
2278   if (lir->index()->isConstant()) {
2279     Address dest = ToAddress(elements, lir->index(), arrayType);
2280     masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
2281                           temp2);
2282   } else {
2283     BaseIndex dest(elements, ToRegister(lir->index()),
2284                    ScaleFromScalarType(arrayType));
2285     masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
2286                           temp2);
2287   }
2288 }
2289 
visitWasmCompareExchangeI64(LWasmCompareExchangeI64 * lir)2290 void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir) {
2291   Register ptr = ToRegister(lir->ptr());
2292   Register64 oldValue = ToRegister64(lir->oldValue());
2293   Register64 newValue = ToRegister64(lir->newValue());
2294   Register64 output = ToOutRegister64(lir);
2295   uint32_t offset = lir->mir()->access().offset();
2296 
2297   BaseIndex addr(HeapReg, ptr, TimesOne, offset);
2298   masm.wasmCompareExchange64(lir->mir()->access(), addr, oldValue, newValue,
2299                              output);
2300 }
2301 
visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64 * lir)2302 void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir) {
2303   Register ptr = ToRegister(lir->ptr());
2304   Register64 value = ToRegister64(lir->value());
2305   Register64 output = ToOutRegister64(lir);
2306   uint32_t offset = lir->mir()->access().offset();
2307 
2308   BaseIndex addr(HeapReg, ptr, TimesOne, offset);
2309   masm.wasmAtomicExchange64(lir->mir()->access(), addr, value, output);
2310 }
2311 
visitWasmAtomicBinopI64(LWasmAtomicBinopI64 * lir)2312 void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir) {
2313   Register ptr = ToRegister(lir->ptr());
2314   Register64 value = ToRegister64(lir->value());
2315   Register64 output = ToOutRegister64(lir);
2316 #ifdef JS_CODEGEN_MIPS32
2317   Register64 temp(ToRegister(lir->getTemp(0)), ToRegister(lir->getTemp(1)));
2318 #else
2319   Register64 temp(ToRegister(lir->getTemp(0)));
2320 #endif
2321   uint32_t offset = lir->mir()->access().offset();
2322 
2323   BaseIndex addr(HeapReg, ptr, TimesOne, offset);
2324 
2325   masm.wasmAtomicFetchOp64(lir->mir()->access(), lir->mir()->operation(), value,
2326                            addr, temp, output);
2327 }
2328 
visitNearbyInt(LNearbyInt *)2329 void CodeGenerator::visitNearbyInt(LNearbyInt*) { MOZ_CRASH("NYI"); }
2330 
visitNearbyIntF(LNearbyIntF *)2331 void CodeGenerator::visitNearbyIntF(LNearbyIntF*) { MOZ_CRASH("NYI"); }
2332 
visitSimd128(LSimd128 * ins)2333 void CodeGenerator::visitSimd128(LSimd128* ins) { MOZ_CRASH("No SIMD"); }
2334 
visitWasmBitselectSimd128(LWasmBitselectSimd128 * ins)2335 void CodeGenerator::visitWasmBitselectSimd128(LWasmBitselectSimd128* ins) {
2336   MOZ_CRASH("No SIMD");
2337 }
2338 
visitWasmBinarySimd128(LWasmBinarySimd128 * ins)2339 void CodeGenerator::visitWasmBinarySimd128(LWasmBinarySimd128* ins) {
2340   MOZ_CRASH("No SIMD");
2341 }
2342 
visitWasmBinarySimd128WithConstant(LWasmBinarySimd128WithConstant * ins)2343 void CodeGenerator::visitWasmBinarySimd128WithConstant(
2344     LWasmBinarySimd128WithConstant* ins) {
2345   MOZ_CRASH("No SIMD");
2346 }
2347 
visitWasmVariableShiftSimd128(LWasmVariableShiftSimd128 * ins)2348 void CodeGenerator::visitWasmVariableShiftSimd128(
2349     LWasmVariableShiftSimd128* ins) {
2350   MOZ_CRASH("No SIMD");
2351 }
2352 
visitWasmConstantShiftSimd128(LWasmConstantShiftSimd128 * ins)2353 void CodeGenerator::visitWasmConstantShiftSimd128(
2354     LWasmConstantShiftSimd128* ins) {
2355   MOZ_CRASH("No SIMD");
2356 }
2357 
visitWasmSignReplicationSimd128(LWasmSignReplicationSimd128 * ins)2358 void CodeGenerator::visitWasmSignReplicationSimd128(
2359     LWasmSignReplicationSimd128* ins) {
2360   MOZ_CRASH("No SIMD");
2361 }
2362 
visitWasmShuffleSimd128(LWasmShuffleSimd128 * ins)2363 void CodeGenerator::visitWasmShuffleSimd128(LWasmShuffleSimd128* ins) {
2364   MOZ_CRASH("No SIMD");
2365 }
2366 
visitWasmPermuteSimd128(LWasmPermuteSimd128 * ins)2367 void CodeGenerator::visitWasmPermuteSimd128(LWasmPermuteSimd128* ins) {
2368   MOZ_CRASH("No SIMD");
2369 }
2370 
visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128 * ins)2371 void CodeGenerator::visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128* ins) {
2372   MOZ_CRASH("No SIMD");
2373 }
2374 
visitWasmReplaceInt64LaneSimd128(LWasmReplaceInt64LaneSimd128 * ins)2375 void CodeGenerator::visitWasmReplaceInt64LaneSimd128(
2376     LWasmReplaceInt64LaneSimd128* ins) {
2377   MOZ_CRASH("No SIMD");
2378 }
2379 
visitWasmScalarToSimd128(LWasmScalarToSimd128 * ins)2380 void CodeGenerator::visitWasmScalarToSimd128(LWasmScalarToSimd128* ins) {
2381   MOZ_CRASH("No SIMD");
2382 }
2383 
visitWasmInt64ToSimd128(LWasmInt64ToSimd128 * ins)2384 void CodeGenerator::visitWasmInt64ToSimd128(LWasmInt64ToSimd128* ins) {
2385   MOZ_CRASH("No SIMD");
2386 }
2387 
visitWasmUnarySimd128(LWasmUnarySimd128 * ins)2388 void CodeGenerator::visitWasmUnarySimd128(LWasmUnarySimd128* ins) {
2389   MOZ_CRASH("No SIMD");
2390 }
2391 
visitWasmReduceSimd128(LWasmReduceSimd128 * ins)2392 void CodeGenerator::visitWasmReduceSimd128(LWasmReduceSimd128* ins) {
2393   MOZ_CRASH("No SIMD");
2394 }
2395 
visitWasmReduceAndBranchSimd128(LWasmReduceAndBranchSimd128 * ins)2396 void CodeGenerator::visitWasmReduceAndBranchSimd128(
2397     LWasmReduceAndBranchSimd128* ins) {
2398   MOZ_CRASH("No SIMD");
2399 }
2400 
visitWasmReduceSimd128ToInt64(LWasmReduceSimd128ToInt64 * ins)2401 void CodeGenerator::visitWasmReduceSimd128ToInt64(
2402     LWasmReduceSimd128ToInt64* ins) {
2403   MOZ_CRASH("No SIMD");
2404 }
2405 
visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128 * ins)2406 void CodeGenerator::visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128* ins) {
2407   MOZ_CRASH("No SIMD");
2408 }
2409 
visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128 * ins)2410 void CodeGenerator::visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128* ins) {
2411   MOZ_CRASH("No SIMD");
2412 }
2413