1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/mips-shared/Lowering-mips-shared.h"
8 
9 #include "mozilla/MathAlgorithms.h"
10 
11 #include "jit/Lowering.h"
12 #include "jit/MIR.h"
13 
14 #include "jit/shared/Lowering-shared-inl.h"
15 
16 using namespace js;
17 using namespace js::jit;
18 
19 using mozilla::FloorLog2;
20 
useByteOpRegister(MDefinition * mir)21 LAllocation LIRGeneratorMIPSShared::useByteOpRegister(MDefinition* mir) {
22   return useRegister(mir);
23 }
24 
useByteOpRegisterAtStart(MDefinition * mir)25 LAllocation LIRGeneratorMIPSShared::useByteOpRegisterAtStart(MDefinition* mir) {
26   return useRegisterAtStart(mir);
27 }
28 
useByteOpRegisterOrNonDoubleConstant(MDefinition * mir)29 LAllocation LIRGeneratorMIPSShared::useByteOpRegisterOrNonDoubleConstant(
30     MDefinition* mir) {
31   return useRegisterOrNonDoubleConstant(mir);
32 }
33 
tempByteOpRegister()34 LDefinition LIRGeneratorMIPSShared::tempByteOpRegister() { return temp(); }
35 
36 // x = !y
lowerForALU(LInstructionHelper<1,1,0> * ins,MDefinition * mir,MDefinition * input)37 void LIRGeneratorMIPSShared::lowerForALU(LInstructionHelper<1, 1, 0>* ins,
38                                          MDefinition* mir, MDefinition* input) {
39   ins->setOperand(0, useRegister(input));
40   define(
41       ins, mir,
42       LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
43 }
44 
45 // z = x+y
lowerForALU(LInstructionHelper<1,2,0> * ins,MDefinition * mir,MDefinition * lhs,MDefinition * rhs)46 void LIRGeneratorMIPSShared::lowerForALU(LInstructionHelper<1, 2, 0>* ins,
47                                          MDefinition* mir, MDefinition* lhs,
48                                          MDefinition* rhs) {
49   ins->setOperand(0, useRegister(lhs));
50   ins->setOperand(1, useRegisterOrConstant(rhs));
51   define(
52       ins, mir,
53       LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
54 }
55 
lowerForALUInt64(LInstructionHelper<INT64_PIECES,INT64_PIECES,0> * ins,MDefinition * mir,MDefinition * input)56 void LIRGeneratorMIPSShared::lowerForALUInt64(
57     LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
58     MDefinition* input) {
59   ins->setInt64Operand(0, useInt64RegisterAtStart(input));
60   defineInt64ReuseInput(ins, mir, 0);
61 }
62 
lowerForALUInt64(LInstructionHelper<INT64_PIECES,2* INT64_PIECES,0> * ins,MDefinition * mir,MDefinition * lhs,MDefinition * rhs)63 void LIRGeneratorMIPSShared::lowerForALUInt64(
64     LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
65     MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
66   ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
67   ins->setInt64Operand(INT64_PIECES, willHaveDifferentLIRNodes(lhs, rhs)
68                                          ? useInt64OrConstant(rhs)
69                                          : useInt64OrConstantAtStart(rhs));
70   defineInt64ReuseInput(ins, mir, 0);
71 }
72 
lowerForMulInt64(LMulI64 * ins,MMul * mir,MDefinition * lhs,MDefinition * rhs)73 void LIRGeneratorMIPSShared::lowerForMulInt64(LMulI64* ins, MMul* mir,
74                                               MDefinition* lhs,
75                                               MDefinition* rhs) {
76   bool needsTemp = false;
77   bool cannotAliasRhs = false;
78   bool reuseInput = true;
79 
80 #ifdef JS_CODEGEN_MIPS32
81   needsTemp = true;
82   cannotAliasRhs = true;
83   // See the documentation on willHaveDifferentLIRNodes; that test does not
84   // allow additional constraints.
85   MOZ_CRASH(
86       "cannotAliasRhs cannot be used the way it is used in the guard below");
87   if (rhs->isConstant()) {
88     int64_t constant = rhs->toConstant()->toInt64();
89     int32_t shift = mozilla::FloorLog2(constant);
90     // See special cases in CodeGeneratorMIPSShared::visitMulI64
91     if (constant >= -1 && constant <= 2) {
92       needsTemp = false;
93     }
94     if (int64_t(1) << shift == constant) {
95       needsTemp = false;
96     }
97     if (mozilla::IsPowerOfTwo(static_cast<uint32_t>(constant + 1)) ||
98         mozilla::IsPowerOfTwo(static_cast<uint32_t>(constant - 1)))
99       reuseInput = false;
100   }
101 #endif
102   ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
103   ins->setInt64Operand(INT64_PIECES,
104                        (willHaveDifferentLIRNodes(lhs, rhs) || cannotAliasRhs)
105                            ? useInt64OrConstant(rhs)
106                            : useInt64OrConstantAtStart(rhs));
107 
108   if (needsTemp) {
109     ins->setTemp(0, temp());
110   }
111   if (reuseInput) {
112     defineInt64ReuseInput(ins, mir, 0);
113   } else {
114     defineInt64(ins, mir);
115   }
116 }
117 
118 template <size_t Temps>
lowerForShiftInt64(LInstructionHelper<INT64_PIECES,INT64_PIECES+1,Temps> * ins,MDefinition * mir,MDefinition * lhs,MDefinition * rhs)119 void LIRGeneratorMIPSShared::lowerForShiftInt64(
120     LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
121     MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
122 #ifdef JS_CODEGEN_MIPS32
123   if (mir->isRotate()) {
124     if (!rhs->isConstant()) {
125       ins->setTemp(0, temp());
126     }
127     ins->setInt64Operand(0, useInt64Register(lhs));
128   } else {
129     ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
130   }
131 #else
132   ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
133 #endif
134 
135   static_assert(LShiftI64::Rhs == INT64_PIECES,
136                 "Assume Rhs is located at INT64_PIECES.");
137   static_assert(LRotateI64::Count == INT64_PIECES,
138                 "Assume Count is located at INT64_PIECES.");
139 
140   ins->setOperand(INT64_PIECES, useRegisterOrConstant(rhs));
141 
142 #ifdef JS_CODEGEN_MIPS32
143   if (mir->isRotate()) {
144     defineInt64(ins, mir);
145   } else {
146     defineInt64ReuseInput(ins, mir, 0);
147   }
148 #else
149   defineInt64ReuseInput(ins, mir, 0);
150 #endif
151 }
152 
153 template void LIRGeneratorMIPSShared::lowerForShiftInt64(
154     LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 0>* ins,
155     MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
156 template void LIRGeneratorMIPSShared::lowerForShiftInt64(
157     LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 1>* ins,
158     MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
159 
lowerForCompareI64AndBranch(MTest * mir,MCompare * comp,JSOp op,MDefinition * left,MDefinition * right,MBasicBlock * ifTrue,MBasicBlock * ifFalse)160 void LIRGeneratorMIPSShared::lowerForCompareI64AndBranch(
161     MTest* mir, MCompare* comp, JSOp op, MDefinition* left, MDefinition* right,
162     MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
163   LCompareI64AndBranch* lir = new (alloc())
164       LCompareI64AndBranch(comp, op, useInt64Register(left),
165                            useInt64OrConstant(right), ifTrue, ifFalse);
166   add(lir, mir);
167 }
168 
lowerForFPU(LInstructionHelper<1,1,0> * ins,MDefinition * mir,MDefinition * input)169 void LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 1, 0>* ins,
170                                          MDefinition* mir, MDefinition* input) {
171   ins->setOperand(0, useRegister(input));
172   define(
173       ins, mir,
174       LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
175 }
176 
177 template <size_t Temps>
lowerForFPU(LInstructionHelper<1,2,Temps> * ins,MDefinition * mir,MDefinition * lhs,MDefinition * rhs)178 void LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 2, Temps>* ins,
179                                          MDefinition* mir, MDefinition* lhs,
180                                          MDefinition* rhs) {
181   ins->setOperand(0, useRegister(lhs));
182   ins->setOperand(1, useRegister(rhs));
183   define(
184       ins, mir,
185       LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
186 }
187 
188 template void LIRGeneratorMIPSShared::lowerForFPU(
189     LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, MDefinition* lhs,
190     MDefinition* rhs);
191 template void LIRGeneratorMIPSShared::lowerForFPU(
192     LInstructionHelper<1, 2, 1>* ins, MDefinition* mir, MDefinition* lhs,
193     MDefinition* rhs);
194 
lowerForBitAndAndBranch(LBitAndAndBranch * baab,MInstruction * mir,MDefinition * lhs,MDefinition * rhs)195 void LIRGeneratorMIPSShared::lowerForBitAndAndBranch(LBitAndAndBranch* baab,
196                                                      MInstruction* mir,
197                                                      MDefinition* lhs,
198                                                      MDefinition* rhs) {
199   baab->setOperand(0, useRegisterAtStart(lhs));
200   baab->setOperand(1, useRegisterOrConstantAtStart(rhs));
201   add(baab, mir);
202 }
203 
lowerWasmBuiltinTruncateToInt32(MWasmBuiltinTruncateToInt32 * ins)204 void LIRGeneratorMIPSShared::lowerWasmBuiltinTruncateToInt32(
205     MWasmBuiltinTruncateToInt32* ins) {
206   MDefinition* opd = ins->input();
207   MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
208 
209   if (opd->type() == MIRType::Double) {
210     define(new (alloc()) LWasmBuiltinTruncateDToInt32(
211                useRegister(opd), useFixed(ins->tls(), WasmTlsReg),
212                LDefinition::BogusTemp()),
213            ins);
214     return;
215   }
216 
217   define(new (alloc()) LWasmBuiltinTruncateFToInt32(
218              useRegister(opd), useFixed(ins->tls(), WasmTlsReg),
219              LDefinition::BogusTemp()),
220          ins);
221 }
222 
lowerForShift(LInstructionHelper<1,2,0> * ins,MDefinition * mir,MDefinition * lhs,MDefinition * rhs)223 void LIRGeneratorMIPSShared::lowerForShift(LInstructionHelper<1, 2, 0>* ins,
224                                            MDefinition* mir, MDefinition* lhs,
225                                            MDefinition* rhs) {
226   ins->setOperand(0, useRegister(lhs));
227   ins->setOperand(1, useRegisterOrConstant(rhs));
228   define(ins, mir);
229 }
230 
lowerDivI(MDiv * div)231 void LIRGeneratorMIPSShared::lowerDivI(MDiv* div) {
232   if (div->isUnsigned()) {
233     lowerUDiv(div);
234     return;
235   }
236 
237   // Division instructions are slow. Division by constant denominators can be
238   // rewritten to use other instructions.
239   if (div->rhs()->isConstant()) {
240     int32_t rhs = div->rhs()->toConstant()->toInt32();
241     // Check for division by a positive power of two, which is an easy and
242     // important case to optimize. Note that other optimizations are also
243     // possible; division by negative powers of two can be optimized in a
244     // similar manner as positive powers of two, and division by other
245     // constants can be optimized by a reciprocal multiplication technique.
246     int32_t shift = FloorLog2(rhs);
247     if (rhs > 0 && 1 << shift == rhs) {
248       LDivPowTwoI* lir =
249           new (alloc()) LDivPowTwoI(useRegister(div->lhs()), shift, temp());
250       if (div->fallible()) {
251         assignSnapshot(lir, div->bailoutKind());
252       }
253       define(lir, div);
254       return;
255     }
256   }
257 
258   LDivI* lir = new (alloc())
259       LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
260   if (div->fallible()) {
261     assignSnapshot(lir, div->bailoutKind());
262   }
263   define(lir, div);
264 }
265 
lowerNegI(MInstruction * ins,MDefinition * input)266 void LIRGeneratorMIPSShared::lowerNegI(MInstruction* ins, MDefinition* input) {
267   define(new (alloc()) LNegI(useRegisterAtStart(input)), ins);
268 }
269 
lowerNegI64(MInstruction * ins,MDefinition * input)270 void LIRGeneratorMIPSShared::lowerNegI64(MInstruction* ins,
271                                          MDefinition* input) {
272   defineInt64ReuseInput(new (alloc()) LNegI64(useInt64RegisterAtStart(input)),
273                         ins, 0);
274 }
275 
visitAbs(MAbs * ins)276 void LIRGenerator::visitAbs(MAbs* ins) {
277   define(allocateAbs(ins, useRegisterAtStart(ins->input())), ins);
278 }
279 
lowerMulI(MMul * mul,MDefinition * lhs,MDefinition * rhs)280 void LIRGeneratorMIPSShared::lowerMulI(MMul* mul, MDefinition* lhs,
281                                        MDefinition* rhs) {
282   LMulI* lir = new (alloc()) LMulI;
283   if (mul->fallible()) {
284     assignSnapshot(lir, mul->bailoutKind());
285   }
286 
287   lowerForALU(lir, mul, lhs, rhs);
288 }
289 
lowerModI(MMod * mod)290 void LIRGeneratorMIPSShared::lowerModI(MMod* mod) {
291   if (mod->isUnsigned()) {
292     lowerUMod(mod);
293     return;
294   }
295 
296   if (mod->rhs()->isConstant()) {
297     int32_t rhs = mod->rhs()->toConstant()->toInt32();
298     int32_t shift = FloorLog2(rhs);
299     if (rhs > 0 && 1 << shift == rhs) {
300       LModPowTwoI* lir =
301           new (alloc()) LModPowTwoI(useRegister(mod->lhs()), shift);
302       if (mod->fallible()) {
303         assignSnapshot(lir, mod->bailoutKind());
304       }
305       define(lir, mod);
306       return;
307     } else if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) {
308       LModMaskI* lir = new (alloc())
309           LModMaskI(useRegister(mod->lhs()), temp(LDefinition::GENERAL),
310                     temp(LDefinition::GENERAL), shift + 1);
311       if (mod->fallible()) {
312         assignSnapshot(lir, mod->bailoutKind());
313       }
314       define(lir, mod);
315       return;
316     }
317   }
318   LModI* lir =
319       new (alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()),
320                           temp(LDefinition::GENERAL));
321 
322   if (mod->fallible()) {
323     assignSnapshot(lir, mod->bailoutKind());
324   }
325   define(lir, mod);
326 }
327 
visitPowHalf(MPowHalf * ins)328 void LIRGenerator::visitPowHalf(MPowHalf* ins) {
329   MDefinition* input = ins->input();
330   MOZ_ASSERT(input->type() == MIRType::Double);
331   LPowHalfD* lir = new (alloc()) LPowHalfD(useRegisterAtStart(input));
332   defineReuseInput(lir, ins, 0);
333 }
334 
lowerWasmSelectI(MWasmSelect * select)335 void LIRGeneratorMIPSShared::lowerWasmSelectI(MWasmSelect* select) {
336   auto* lir = new (alloc())
337       LWasmSelect(useRegisterAtStart(select->trueExpr()),
338                   useAny(select->falseExpr()), useRegister(select->condExpr()));
339   defineReuseInput(lir, select, LWasmSelect::TrueExprIndex);
340 }
341 
lowerWasmSelectI64(MWasmSelect * select)342 void LIRGeneratorMIPSShared::lowerWasmSelectI64(MWasmSelect* select) {
343   auto* lir = new (alloc()) LWasmSelectI64(
344       useInt64RegisterAtStart(select->trueExpr()),
345       useInt64(select->falseExpr()), useRegister(select->condExpr()));
346   defineInt64ReuseInput(lir, select, LWasmSelectI64::TrueExprIndex);
347 }
348 
newLTableSwitch(const LAllocation & in,const LDefinition & inputCopy,MTableSwitch * tableswitch)349 LTableSwitch* LIRGeneratorMIPSShared::newLTableSwitch(
350     const LAllocation& in, const LDefinition& inputCopy,
351     MTableSwitch* tableswitch) {
352   return new (alloc()) LTableSwitch(in, inputCopy, temp(), tableswitch);
353 }
354 
newLTableSwitchV(MTableSwitch * tableswitch)355 LTableSwitchV* LIRGeneratorMIPSShared::newLTableSwitchV(
356     MTableSwitch* tableswitch) {
357   return new (alloc()) LTableSwitchV(useBox(tableswitch->getOperand(0)), temp(),
358                                      tempDouble(), temp(), tableswitch);
359 }
360 
lowerUrshD(MUrsh * mir)361 void LIRGeneratorMIPSShared::lowerUrshD(MUrsh* mir) {
362   MDefinition* lhs = mir->lhs();
363   MDefinition* rhs = mir->rhs();
364 
365   MOZ_ASSERT(lhs->type() == MIRType::Int32);
366   MOZ_ASSERT(rhs->type() == MIRType::Int32);
367 
368   LUrshD* lir = new (alloc())
369       LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
370   define(lir, mir);
371 }
372 
lowerPowOfTwoI(MPow * mir)373 void LIRGeneratorMIPSShared::lowerPowOfTwoI(MPow* mir) {
374   int32_t base = mir->input()->toConstant()->toInt32();
375   MDefinition* power = mir->power();
376 
377   auto* lir = new (alloc()) LPowOfTwoI(useRegister(power), base);
378   assignSnapshot(lir, mir->bailoutKind());
379   define(lir, mir);
380 }
381 
lowerBigIntLsh(MBigIntLsh * ins)382 void LIRGeneratorMIPSShared::lowerBigIntLsh(MBigIntLsh* ins) {
383   auto* lir = new (alloc()) LBigIntLsh(
384       useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp(), temp());
385   define(lir, ins);
386   assignSafepoint(lir, ins);
387 }
388 
lowerBigIntRsh(MBigIntRsh * ins)389 void LIRGeneratorMIPSShared::lowerBigIntRsh(MBigIntRsh* ins) {
390   auto* lir = new (alloc()) LBigIntRsh(
391       useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp(), temp());
392   define(lir, ins);
393   assignSafepoint(lir, ins);
394 }
395 
visitWasmNeg(MWasmNeg * ins)396 void LIRGenerator::visitWasmNeg(MWasmNeg* ins) {
397   if (ins->type() == MIRType::Int32) {
398     define(new (alloc()) LNegI(useRegisterAtStart(ins->input())), ins);
399   } else if (ins->type() == MIRType::Float32) {
400     define(new (alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
401   } else {
402     MOZ_ASSERT(ins->type() == MIRType::Double);
403     define(new (alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
404   }
405 }
406 
visitWasmHeapBase(MWasmHeapBase * ins)407 void LIRGenerator::visitWasmHeapBase(MWasmHeapBase* ins) {
408   auto* lir = new (alloc()) LWasmHeapBase(LAllocation());
409   define(lir, ins);
410 }
411 
visitWasmLoad(MWasmLoad * ins)412 void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
413   MDefinition* base = ins->base();
414   // 'base' is a GPR but may be of either type. If it is 32-bit, it is
415   // sign-extended on mips64 platform and we should explicitly promote it to
416   // 64-bit when use it as an index register in memory accesses.
417   MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
418 
419   LAllocation ptr;
420 #ifdef JS_CODEGEN_MIPS32
421   if (ins->type() == MIRType::Int64) {
422     ptr = useRegister(base);
423   } else {
424     ptr = useRegisterAtStart(base);
425   }
426 #else
427   ptr = useRegisterAtStart(base);
428 #endif
429 
430   if (IsUnaligned(ins->access())) {
431     if (ins->type() == MIRType::Int64) {
432       auto* lir = new (alloc()) LWasmUnalignedLoadI64(ptr, temp());
433       if (ins->access().offset()) {
434         lir->setTemp(0, tempCopy(base, 0));
435       }
436 
437       defineInt64(lir, ins);
438       return;
439     }
440 
441     auto* lir = new (alloc()) LWasmUnalignedLoad(ptr, temp());
442     if (ins->access().offset()) {
443       lir->setTemp(0, tempCopy(base, 0));
444     }
445 
446     define(lir, ins);
447     return;
448   }
449 
450   if (ins->type() == MIRType::Int64) {
451 #ifdef JS_CODEGEN_MIPS32
452     if (ins->access().isAtomic()) {
453       auto* lir = new (alloc()) LWasmAtomicLoadI64(ptr);
454       defineInt64(lir, ins);
455       return;
456     }
457 #endif
458     auto* lir = new (alloc()) LWasmLoadI64(ptr);
459     if (ins->access().offset()) {
460       lir->setTemp(0, tempCopy(base, 0));
461     }
462 
463     defineInt64(lir, ins);
464     return;
465   }
466 
467   auto* lir = new (alloc()) LWasmLoad(ptr);
468   if (ins->access().offset()) {
469     lir->setTemp(0, tempCopy(base, 0));
470   }
471 
472   define(lir, ins);
473 }
474 
visitWasmStore(MWasmStore * ins)475 void LIRGenerator::visitWasmStore(MWasmStore* ins) {
476   MDefinition* base = ins->base();
477   // See comment in visitWasmLoad re the type of 'base'.
478   MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
479 
480   MDefinition* value = ins->value();
481 
482   if (IsUnaligned(ins->access())) {
483     LAllocation baseAlloc = useRegisterAtStart(base);
484     if (ins->access().type() == Scalar::Int64) {
485       LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
486       auto* lir =
487           new (alloc()) LWasmUnalignedStoreI64(baseAlloc, valueAlloc, temp());
488       if (ins->access().offset()) {
489         lir->setTemp(0, tempCopy(base, 0));
490       }
491 
492       add(lir, ins);
493       return;
494     }
495 
496     LAllocation valueAlloc = useRegisterAtStart(value);
497     auto* lir =
498         new (alloc()) LWasmUnalignedStore(baseAlloc, valueAlloc, temp());
499     if (ins->access().offset()) {
500       lir->setTemp(0, tempCopy(base, 0));
501     }
502 
503     add(lir, ins);
504     return;
505   }
506 
507   if (ins->access().type() == Scalar::Int64) {
508 #ifdef JS_CODEGEN_MIPS32
509     if (ins->access().isAtomic()) {
510       auto* lir = new (alloc()) LWasmAtomicStoreI64(
511           useRegister(base), useInt64Register(value), temp());
512       add(lir, ins);
513       return;
514     }
515 #endif
516 
517     LAllocation baseAlloc = useRegisterAtStart(base);
518     LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
519     auto* lir = new (alloc()) LWasmStoreI64(baseAlloc, valueAlloc);
520     if (ins->access().offset()) {
521       lir->setTemp(0, tempCopy(base, 0));
522     }
523 
524     add(lir, ins);
525     return;
526   }
527 
528   LAllocation baseAlloc = useRegisterAtStart(base);
529   LAllocation valueAlloc = useRegisterAtStart(value);
530   auto* lir = new (alloc()) LWasmStore(baseAlloc, valueAlloc);
531   if (ins->access().offset()) {
532     lir->setTemp(0, tempCopy(base, 0));
533   }
534 
535   add(lir, ins);
536 }
537 
lowerUDiv(MDiv * div)538 void LIRGeneratorMIPSShared::lowerUDiv(MDiv* div) {
539   MDefinition* lhs = div->getOperand(0);
540   MDefinition* rhs = div->getOperand(1);
541 
542   LUDivOrMod* lir = new (alloc()) LUDivOrMod;
543   lir->setOperand(0, useRegister(lhs));
544   lir->setOperand(1, useRegister(rhs));
545   if (div->fallible()) {
546     assignSnapshot(lir, div->bailoutKind());
547   }
548 
549   define(lir, div);
550 }
551 
lowerUMod(MMod * mod)552 void LIRGeneratorMIPSShared::lowerUMod(MMod* mod) {
553   MDefinition* lhs = mod->getOperand(0);
554   MDefinition* rhs = mod->getOperand(1);
555 
556   LUDivOrMod* lir = new (alloc()) LUDivOrMod;
557   lir->setOperand(0, useRegister(lhs));
558   lir->setOperand(1, useRegister(rhs));
559   if (mod->fallible()) {
560     assignSnapshot(lir, mod->bailoutKind());
561   }
562 
563   define(lir, mod);
564 }
565 
visitWasmUnsignedToDouble(MWasmUnsignedToDouble * ins)566 void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
567   MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
568   LWasmUint32ToDouble* lir =
569       new (alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
570   define(lir, ins);
571 }
572 
visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32 * ins)573 void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
574   MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
575   LWasmUint32ToFloat32* lir =
576       new (alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
577   define(lir, ins);
578 }
579 
visitAsmJSLoadHeap(MAsmJSLoadHeap * ins)580 void LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) {
581   MOZ_ASSERT(ins->access().offset() == 0);
582 
583   MDefinition* base = ins->base();
584   MOZ_ASSERT(base->type() == MIRType::Int32);
585   LAllocation baseAlloc;
586   LAllocation limitAlloc;
587   // For MIPS it is best to keep the 'base' in a register if a bounds check
588   // is needed.
589   if (base->isConstant() && !ins->needsBoundsCheck()) {
590     // A bounds check is only skipped for a positive index.
591     MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
592     baseAlloc = LAllocation(base->toConstant());
593   } else {
594     baseAlloc = useRegisterAtStart(base);
595     if (ins->needsBoundsCheck()) {
596       MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
597       MOZ_ASSERT(boundsCheckLimit->type() == MIRType::Int32);
598       limitAlloc = useRegisterAtStart(boundsCheckLimit);
599     }
600   }
601 
602   define(new (alloc()) LAsmJSLoadHeap(baseAlloc, limitAlloc, LAllocation()),
603          ins);
604 }
605 
visitAsmJSStoreHeap(MAsmJSStoreHeap * ins)606 void LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) {
607   MOZ_ASSERT(ins->access().offset() == 0);
608 
609   MDefinition* base = ins->base();
610   MOZ_ASSERT(base->type() == MIRType::Int32);
611   LAllocation baseAlloc;
612   LAllocation limitAlloc;
613   if (base->isConstant() && !ins->needsBoundsCheck()) {
614     MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
615     baseAlloc = LAllocation(base->toConstant());
616   } else {
617     baseAlloc = useRegisterAtStart(base);
618     if (ins->needsBoundsCheck()) {
619       MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
620       MOZ_ASSERT(boundsCheckLimit->type() == MIRType::Int32);
621       limitAlloc = useRegisterAtStart(boundsCheckLimit);
622     }
623   }
624 
625   add(new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()),
626                                     limitAlloc, LAllocation()),
627       ins);
628 }
629 
visitSubstr(MSubstr * ins)630 void LIRGenerator::visitSubstr(MSubstr* ins) {
631   LSubstr* lir = new (alloc())
632       LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
633               useRegister(ins->length()), temp(), temp(), tempByteOpRegister());
634   define(lir, ins);
635   assignSafepoint(lir, ins);
636 }
637 
visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement * ins)638 void LIRGenerator::visitCompareExchangeTypedArrayElement(
639     MCompareExchangeTypedArrayElement* ins) {
640   MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
641   MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
642 
643   MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
644   MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
645 
646   const LUse elements = useRegister(ins->elements());
647   const LAllocation index =
648       useRegisterOrIndexConstant(ins->index(), ins->arrayType());
649 
650   const LAllocation newval = useRegister(ins->newval());
651   const LAllocation oldval = useRegister(ins->oldval());
652 
653   if (Scalar::isBigIntType(ins->arrayType())) {
654     LInt64Definition temp1 = tempInt64();
655     LInt64Definition temp2 = tempInt64();
656 
657     auto* lir = new (alloc()) LCompareExchangeTypedArrayElement64(
658         elements, index, oldval, newval, temp1, temp2);
659     define(lir, ins);
660     assignSafepoint(lir, ins);
661     return;
662   }
663 
664   // If the target is a floating register then we need a temp at the
665   // CodeGenerator level for creating the result.
666 
667   LDefinition outTemp = LDefinition::BogusTemp();
668   LDefinition valueTemp = LDefinition::BogusTemp();
669   LDefinition offsetTemp = LDefinition::BogusTemp();
670   LDefinition maskTemp = LDefinition::BogusTemp();
671 
672   if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
673     outTemp = temp();
674   }
675 
676   if (Scalar::byteSize(ins->arrayType()) < 4) {
677     valueTemp = temp();
678     offsetTemp = temp();
679     maskTemp = temp();
680   }
681 
682   LCompareExchangeTypedArrayElement* lir = new (alloc())
683       LCompareExchangeTypedArrayElement(elements, index, oldval, newval,
684                                         outTemp, valueTemp, offsetTemp,
685                                         maskTemp);
686 
687   define(lir, ins);
688 }
689 
visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement * ins)690 void LIRGenerator::visitAtomicExchangeTypedArrayElement(
691     MAtomicExchangeTypedArrayElement* ins) {
692   MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
693   MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
694 
695   const LUse elements = useRegister(ins->elements());
696   const LAllocation index =
697       useRegisterOrIndexConstant(ins->index(), ins->arrayType());
698 
699   const LAllocation value = useRegister(ins->value());
700 
701   if (Scalar::isBigIntType(ins->arrayType())) {
702     LInt64Definition temp1 = tempInt64();
703     LDefinition temp2 = temp();
704 
705     auto* lir = new (alloc()) LAtomicExchangeTypedArrayElement64(
706         elements, index, value, temp1, temp2);
707     define(lir, ins);
708     assignSafepoint(lir, ins);
709     return;
710   }
711 
712   // If the target is a floating register then we need a temp at the
713   // CodeGenerator level for creating the result.
714 
715   MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
716 
717   LDefinition outTemp = LDefinition::BogusTemp();
718   LDefinition valueTemp = LDefinition::BogusTemp();
719   LDefinition offsetTemp = LDefinition::BogusTemp();
720   LDefinition maskTemp = LDefinition::BogusTemp();
721 
722   if (ins->arrayType() == Scalar::Uint32) {
723     MOZ_ASSERT(ins->type() == MIRType::Double);
724     outTemp = temp();
725   }
726 
727   if (Scalar::byteSize(ins->arrayType()) < 4) {
728     valueTemp = temp();
729     offsetTemp = temp();
730     maskTemp = temp();
731   }
732 
733   LAtomicExchangeTypedArrayElement* lir =
734       new (alloc()) LAtomicExchangeTypedArrayElement(
735           elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
736 
737   define(lir, ins);
738 }
739 
visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap * ins)740 void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
741   MDefinition* base = ins->base();
742   // See comment in visitWasmLoad re the type of 'base'.
743   MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
744 
745   if (ins->access().type() == Scalar::Int64) {
746     auto* lir = new (alloc()) LWasmCompareExchangeI64(
747         useRegister(base), useInt64Register(ins->oldValue()),
748         useInt64Register(ins->newValue()));
749     defineInt64(lir, ins);
750     return;
751   }
752 
753   LDefinition valueTemp = LDefinition::BogusTemp();
754   LDefinition offsetTemp = LDefinition::BogusTemp();
755   LDefinition maskTemp = LDefinition::BogusTemp();
756 
757   if (ins->access().byteSize() < 4) {
758     valueTemp = temp();
759     offsetTemp = temp();
760     maskTemp = temp();
761   }
762 
763   LWasmCompareExchangeHeap* lir = new (alloc()) LWasmCompareExchangeHeap(
764       useRegister(base), useRegister(ins->oldValue()),
765       useRegister(ins->newValue()), valueTemp, offsetTemp, maskTemp);
766 
767   define(lir, ins);
768 }
769 
visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap * ins)770 void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
771   MDefinition* base = ins->base();
772   // See comment in visitWasmLoad re the type of 'base'.
773   MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
774 
775   if (ins->access().type() == Scalar::Int64) {
776     auto* lir = new (alloc()) LWasmAtomicExchangeI64(
777         useRegister(base), useInt64Register(ins->value()));
778     defineInt64(lir, ins);
779     return;
780   }
781 
782   LDefinition valueTemp = LDefinition::BogusTemp();
783   LDefinition offsetTemp = LDefinition::BogusTemp();
784   LDefinition maskTemp = LDefinition::BogusTemp();
785 
786   if (ins->access().byteSize() < 4) {
787     valueTemp = temp();
788     offsetTemp = temp();
789     maskTemp = temp();
790   }
791 
792   LWasmAtomicExchangeHeap* lir = new (alloc())
793       LWasmAtomicExchangeHeap(useRegister(base), useRegister(ins->value()),
794                               valueTemp, offsetTemp, maskTemp);
795   define(lir, ins);
796 }
797 
visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap * ins)798 void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
799   MDefinition* base = ins->base();
800   // See comment in visitWasmLoad re the type of 'base'.
801   MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
802 
803   if (ins->access().type() == Scalar::Int64) {
804     auto* lir = new (alloc())
805         LWasmAtomicBinopI64(useRegister(base), useInt64Register(ins->value()));
806     lir->setTemp(0, temp());
807 #ifdef JS_CODEGEN_MIPS32
808     lir->setTemp(1, temp());
809 #endif
810     defineInt64(lir, ins);
811     return;
812   }
813 
814   LDefinition valueTemp = LDefinition::BogusTemp();
815   LDefinition offsetTemp = LDefinition::BogusTemp();
816   LDefinition maskTemp = LDefinition::BogusTemp();
817 
818   if (ins->access().byteSize() < 4) {
819     valueTemp = temp();
820     offsetTemp = temp();
821     maskTemp = temp();
822   }
823 
824   if (!ins->hasUses()) {
825     LWasmAtomicBinopHeapForEffect* lir = new (alloc())
826         LWasmAtomicBinopHeapForEffect(useRegister(base),
827                                       useRegister(ins->value()), valueTemp,
828                                       offsetTemp, maskTemp);
829     add(lir, ins);
830     return;
831   }
832 
833   LWasmAtomicBinopHeap* lir = new (alloc())
834       LWasmAtomicBinopHeap(useRegister(base), useRegister(ins->value()),
835                            valueTemp, offsetTemp, maskTemp);
836 
837   define(lir, ins);
838 }
839 
visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop * ins)840 void LIRGenerator::visitAtomicTypedArrayElementBinop(
841     MAtomicTypedArrayElementBinop* ins) {
842   MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
843   MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
844   MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
845 
846   MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
847   MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
848 
849   const LUse elements = useRegister(ins->elements());
850   const LAllocation index =
851       useRegisterOrIndexConstant(ins->index(), ins->arrayType());
852   const LAllocation value = useRegister(ins->value());
853 
854   if (Scalar::isBigIntType(ins->arrayType())) {
855     LInt64Definition temp1 = tempInt64();
856     LInt64Definition temp2 = tempInt64();
857 
858     // Case 1: the result of the operation is not used.
859     //
860     // We can omit allocating the result BigInt.
861 
862     if (ins->isForEffect()) {
863       auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
864           elements, index, value, temp1, temp2);
865       add(lir, ins);
866       return;
867     }
868 
869     // Case 2: the result of the operation is used.
870 
871     auto* lir = new (alloc())
872         LAtomicTypedArrayElementBinop64(elements, index, value, temp1, temp2);
873     define(lir, ins);
874     assignSafepoint(lir, ins);
875     return;
876   }
877 
878   LDefinition valueTemp = LDefinition::BogusTemp();
879   LDefinition offsetTemp = LDefinition::BogusTemp();
880   LDefinition maskTemp = LDefinition::BogusTemp();
881 
882   if (Scalar::byteSize(ins->arrayType()) < 4) {
883     valueTemp = temp();
884     offsetTemp = temp();
885     maskTemp = temp();
886   }
887 
888   if (ins->isForEffect()) {
889     LAtomicTypedArrayElementBinopForEffect* lir =
890         new (alloc()) LAtomicTypedArrayElementBinopForEffect(
891             elements, index, value, valueTemp, offsetTemp, maskTemp);
892     add(lir, ins);
893     return;
894   }
895 
896   // For a Uint32Array with a known double result we need a temp for
897   // the intermediate output.
898 
899   LDefinition outTemp = LDefinition::BogusTemp();
900 
901   if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
902     outTemp = temp();
903   }
904 
905   LAtomicTypedArrayElementBinop* lir =
906       new (alloc()) LAtomicTypedArrayElementBinop(
907           elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
908   define(lir, ins);
909 }
910 
visitCopySign(MCopySign * ins)911 void LIRGenerator::visitCopySign(MCopySign* ins) {
912   MDefinition* lhs = ins->lhs();
913   MDefinition* rhs = ins->rhs();
914 
915   MOZ_ASSERT(IsFloatingPointType(lhs->type()));
916   MOZ_ASSERT(lhs->type() == rhs->type());
917   MOZ_ASSERT(lhs->type() == ins->type());
918 
919   LInstructionHelper<1, 2, 2>* lir;
920   if (lhs->type() == MIRType::Double) {
921     lir = new (alloc()) LCopySignD();
922   } else {
923     lir = new (alloc()) LCopySignF();
924   }
925 
926   lir->setTemp(0, temp());
927   lir->setTemp(1, temp());
928 
929   lir->setOperand(0, useRegisterAtStart(lhs));
930   lir->setOperand(1, useRegister(rhs));
931   defineReuseInput(lir, ins, 0);
932 }
933 
visitExtendInt32ToInt64(MExtendInt32ToInt64 * ins)934 void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
935   defineInt64(
936       new (alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input())), ins);
937 }
938 
visitSignExtendInt64(MSignExtendInt64 * ins)939 void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
940   defineInt64(new (alloc())
941                   LSignExtendInt64(useInt64RegisterAtStart(ins->input())),
942               ins);
943 }
944 
945 // On mips we specialize the only cases where compare is {U,}Int32 and select
946 // is {U,}Int32.
canSpecializeWasmCompareAndSelect(MCompare::CompareType compTy,MIRType insTy)947 bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
948     MCompare::CompareType compTy, MIRType insTy) {
949   return insTy == MIRType::Int32 && (compTy == MCompare::Compare_Int32 ||
950                                      compTy == MCompare::Compare_UInt32);
951 }
952 
lowerWasmCompareAndSelect(MWasmSelect * ins,MDefinition * lhs,MDefinition * rhs,MCompare::CompareType compTy,JSOp jsop)953 void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect* ins,
954                                                    MDefinition* lhs,
955                                                    MDefinition* rhs,
956                                                    MCompare::CompareType compTy,
957                                                    JSOp jsop) {
958   MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
959   auto* lir = new (alloc()) LWasmCompareAndSelect(
960       useRegister(lhs), useRegister(rhs), compTy, jsop,
961       useRegisterAtStart(ins->trueExpr()), useRegister(ins->falseExpr()));
962   defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
963 }
964 
visitWasmTernarySimd128(MWasmTernarySimd128 * ins)965 void LIRGenerator::visitWasmTernarySimd128(MWasmTernarySimd128* ins) {
966   MOZ_CRASH("ternary SIMD NYI");
967 }
968 
visitWasmBinarySimd128(MWasmBinarySimd128 * ins)969 void LIRGenerator::visitWasmBinarySimd128(MWasmBinarySimd128* ins) {
970   MOZ_CRASH("binary SIMD NYI");
971 }
972 
973 #ifdef ENABLE_WASM_SIMD
specializeBitselectConstantMaskAsShuffle(int8_t shuffle[16])974 bool MWasmTernarySimd128::specializeBitselectConstantMaskAsShuffle(
975     int8_t shuffle[16]) {
976   return false;
977 }
canRelaxBitselect()978 bool MWasmTernarySimd128::canRelaxBitselect() { return false; }
979 #endif
980 
specializeForConstantRhs()981 bool MWasmBinarySimd128::specializeForConstantRhs() {
982   // Probably many we want to do here
983   return false;
984 }
985 
visitWasmBinarySimd128WithConstant(MWasmBinarySimd128WithConstant * ins)986 void LIRGenerator::visitWasmBinarySimd128WithConstant(
987     MWasmBinarySimd128WithConstant* ins) {
988   MOZ_CRASH("binary SIMD with constant NYI");
989 }
990 
visitWasmShiftSimd128(MWasmShiftSimd128 * ins)991 void LIRGenerator::visitWasmShiftSimd128(MWasmShiftSimd128* ins) {
992   MOZ_CRASH("shift SIMD NYI");
993 }
994 
visitWasmShuffleSimd128(MWasmShuffleSimd128 * ins)995 void LIRGenerator::visitWasmShuffleSimd128(MWasmShuffleSimd128* ins) {
996   MOZ_CRASH("shuffle SIMD NYI");
997 }
998 
visitWasmReplaceLaneSimd128(MWasmReplaceLaneSimd128 * ins)999 void LIRGenerator::visitWasmReplaceLaneSimd128(MWasmReplaceLaneSimd128* ins) {
1000   MOZ_CRASH("replace-lane SIMD NYI");
1001 }
1002 
visitWasmScalarToSimd128(MWasmScalarToSimd128 * ins)1003 void LIRGenerator::visitWasmScalarToSimd128(MWasmScalarToSimd128* ins) {
1004   MOZ_CRASH("scalar-to-SIMD NYI");
1005 }
1006 
visitWasmUnarySimd128(MWasmUnarySimd128 * ins)1007 void LIRGenerator::visitWasmUnarySimd128(MWasmUnarySimd128* ins) {
1008   MOZ_CRASH("unary SIMD NYI");
1009 }
1010 
visitWasmReduceSimd128(MWasmReduceSimd128 * ins)1011 void LIRGenerator::visitWasmReduceSimd128(MWasmReduceSimd128* ins) {
1012   MOZ_CRASH("reduce-SIMD NYI");
1013 }
1014 
visitWasmLoadLaneSimd128(MWasmLoadLaneSimd128 * ins)1015 void LIRGenerator::visitWasmLoadLaneSimd128(MWasmLoadLaneSimd128* ins) {
1016   MOZ_CRASH("load-lane SIMD NYI");
1017 }
1018 
visitWasmStoreLaneSimd128(MWasmStoreLaneSimd128 * ins)1019 void LIRGenerator::visitWasmStoreLaneSimd128(MWasmStoreLaneSimd128* ins) {
1020   MOZ_CRASH("store-lane SIMD NYI");
1021 }
1022