1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/arm/Lowering-arm.h"
8 
9 #include "mozilla/MathAlgorithms.h"
10 
11 #include "jit/arm/Assembler-arm.h"
12 #include "jit/Lowering.h"
13 #include "jit/MIR.h"
14 #include "jit/shared/Lowering-shared-inl.h"
15 
16 using namespace js;
17 using namespace js::jit;
18 
19 using mozilla::FloorLog2;
20 
useBoxFixed(MDefinition * mir,Register reg1,Register reg2,bool useAtStart)21 LBoxAllocation LIRGeneratorARM::useBoxFixed(MDefinition* mir, Register reg1,
22                                             Register reg2, bool useAtStart) {
23   MOZ_ASSERT(mir->type() == MIRType::Value);
24   MOZ_ASSERT(reg1 != reg2);
25 
26   ensureDefined(mir);
27   return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart),
28                         LUse(reg2, VirtualRegisterOfPayload(mir), useAtStart));
29 }
30 
useByteOpRegister(MDefinition * mir)31 LAllocation LIRGeneratorARM::useByteOpRegister(MDefinition* mir) {
32   return useRegister(mir);
33 }
34 
useByteOpRegisterAtStart(MDefinition * mir)35 LAllocation LIRGeneratorARM::useByteOpRegisterAtStart(MDefinition* mir) {
36   return useRegisterAtStart(mir);
37 }
38 
useByteOpRegisterOrNonDoubleConstant(MDefinition * mir)39 LAllocation LIRGeneratorARM::useByteOpRegisterOrNonDoubleConstant(
40     MDefinition* mir) {
41   return useRegisterOrNonDoubleConstant(mir);
42 }
43 
tempByteOpRegister()44 LDefinition LIRGeneratorARM::tempByteOpRegister() { return temp(); }
45 
visitBox(MBox * box)46 void LIRGenerator::visitBox(MBox* box) {
47   MDefinition* inner = box->getOperand(0);
48 
49   // If the box wrapped a double, it needs a new register.
50   if (IsFloatingPointType(inner->type())) {
51     defineBox(new (alloc()) LBoxFloatingPoint(
52                   useRegisterAtStart(inner), tempCopy(inner, 0), inner->type()),
53               box);
54     return;
55   }
56 
57   if (box->canEmitAtUses()) {
58     emitAtUses(box);
59     return;
60   }
61 
62   if (inner->isConstant()) {
63     defineBox(new (alloc()) LValue(inner->toConstant()->toJSValue()), box);
64     return;
65   }
66 
67   LBox* lir = new (alloc()) LBox(use(inner), inner->type());
68 
69   // Otherwise, we should not define a new register for the payload portion
70   // of the output, so bypass defineBox().
71   uint32_t vreg = getVirtualRegister();
72 
73   // Note that because we're using BogusTemp(), we do not change the type of
74   // the definition. We also do not define the first output as "TYPE",
75   // because it has no corresponding payload at (vreg + 1). Also note that
76   // although we copy the input's original type for the payload half of the
77   // definition, this is only for clarity. BogusTemp() definitions are
78   // ignored.
79   lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL));
80   lir->setDef(1, LDefinition::BogusTemp());
81   box->setVirtualRegister(vreg);
82   add(lir);
83 }
84 
visitUnbox(MUnbox * unbox)85 void LIRGenerator::visitUnbox(MUnbox* unbox) {
86   MDefinition* inner = unbox->getOperand(0);
87 
88   if (inner->type() == MIRType::ObjectOrNull) {
89     LUnboxObjectOrNull* lir =
90         new (alloc()) LUnboxObjectOrNull(useRegisterAtStart(inner));
91     if (unbox->fallible()) {
92       assignSnapshot(lir, unbox->bailoutKind());
93     }
94     defineReuseInput(lir, unbox, 0);
95     return;
96   }
97 
98   // An unbox on arm reads in a type tag (either in memory or a register) and
99   // a payload. Unlike most instructions consuming a box, we ask for the type
100   // second, so that the result can re-use the first input.
101   MOZ_ASSERT(inner->type() == MIRType::Value);
102 
103   ensureDefined(inner);
104 
105   if (IsFloatingPointType(unbox->type())) {
106     LUnboxFloatingPoint* lir =
107         new (alloc()) LUnboxFloatingPoint(useBox(inner), unbox->type());
108     if (unbox->fallible()) {
109       assignSnapshot(lir, unbox->bailoutKind());
110     }
111     define(lir, unbox);
112     return;
113   }
114 
115   // Swap the order we use the box pieces so we can re-use the payload register.
116   LUnbox* lir = new (alloc()) LUnbox;
117   lir->setOperand(0, usePayloadInRegisterAtStart(inner));
118   lir->setOperand(1, useType(inner, LUse::REGISTER));
119 
120   if (unbox->fallible()) {
121     assignSnapshot(lir, unbox->bailoutKind());
122   }
123 
124   // Types and payloads form two separate intervals. If the type becomes dead
125   // before the payload, it could be used as a Value without the type being
126   // recoverable. Unbox's purpose is to eagerly kill the definition of a type
127   // tag, so keeping both alive (for the purpose of gcmaps) is unappealing.
128   // Instead, we create a new virtual register.
129   defineReuseInput(lir, unbox, 0);
130 }
131 
visitReturn(MReturn * ret)132 void LIRGenerator::visitReturn(MReturn* ret) {
133   MDefinition* opd = ret->getOperand(0);
134   MOZ_ASSERT(opd->type() == MIRType::Value);
135 
136   LReturn* ins = new (alloc()) LReturn;
137   ins->setOperand(0, LUse(JSReturnReg_Type));
138   ins->setOperand(1, LUse(JSReturnReg_Data));
139   fillBoxUses(ins, 0, opd);
140   add(ins);
141 }
142 
defineInt64Phi(MPhi * phi,size_t lirIndex)143 void LIRGeneratorARM::defineInt64Phi(MPhi* phi, size_t lirIndex) {
144   LPhi* low = current->getPhi(lirIndex + INT64LOW_INDEX);
145   LPhi* high = current->getPhi(lirIndex + INT64HIGH_INDEX);
146 
147   uint32_t lowVreg = getVirtualRegister();
148 
149   phi->setVirtualRegister(lowVreg);
150 
151   uint32_t highVreg = getVirtualRegister();
152   MOZ_ASSERT(lowVreg + INT64HIGH_INDEX == highVreg + INT64LOW_INDEX);
153 
154   low->setDef(0, LDefinition(lowVreg, LDefinition::INT32));
155   high->setDef(0, LDefinition(highVreg, LDefinition::INT32));
156   annotate(high);
157   annotate(low);
158 }
159 
lowerInt64PhiInput(MPhi * phi,uint32_t inputPosition,LBlock * block,size_t lirIndex)160 void LIRGeneratorARM::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
161                                          LBlock* block, size_t lirIndex) {
162   MDefinition* operand = phi->getOperand(inputPosition);
163   LPhi* low = block->getPhi(lirIndex + INT64LOW_INDEX);
164   LPhi* high = block->getPhi(lirIndex + INT64HIGH_INDEX);
165   low->setOperand(inputPosition,
166                   LUse(operand->virtualRegister() + INT64LOW_INDEX, LUse::ANY));
167   high->setOperand(
168       inputPosition,
169       LUse(operand->virtualRegister() + INT64HIGH_INDEX, LUse::ANY));
170 }
171 
172 // x = !y
lowerForALU(LInstructionHelper<1,1,0> * ins,MDefinition * mir,MDefinition * input)173 void LIRGeneratorARM::lowerForALU(LInstructionHelper<1, 1, 0>* ins,
174                                   MDefinition* mir, MDefinition* input) {
175   ins->setOperand(
176       0, ins->snapshot() ? useRegister(input) : useRegisterAtStart(input));
177   define(
178       ins, mir,
179       LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
180 }
181 
182 // z = x+y
lowerForALU(LInstructionHelper<1,2,0> * ins,MDefinition * mir,MDefinition * lhs,MDefinition * rhs)183 void LIRGeneratorARM::lowerForALU(LInstructionHelper<1, 2, 0>* ins,
184                                   MDefinition* mir, MDefinition* lhs,
185                                   MDefinition* rhs) {
186   // Some operations depend on checking inputs after writing the result, e.g.
187   // MulI, but only for bail out paths so useAtStart when no bailouts.
188   ins->setOperand(0,
189                   ins->snapshot() ? useRegister(lhs) : useRegisterAtStart(lhs));
190   ins->setOperand(1, ins->snapshot() ? useRegisterOrConstant(rhs)
191                                      : useRegisterOrConstantAtStart(rhs));
192   define(
193       ins, mir,
194       LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
195 }
196 
lowerForALUInt64(LInstructionHelper<INT64_PIECES,2* INT64_PIECES,0> * ins,MDefinition * mir,MDefinition * lhs,MDefinition * rhs)197 void LIRGeneratorARM::lowerForALUInt64(
198     LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
199     MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
200   ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
201   ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
202   defineInt64ReuseInput(ins, mir, 0);
203 }
204 
lowerForMulInt64(LMulI64 * ins,MMul * mir,MDefinition * lhs,MDefinition * rhs)205 void LIRGeneratorARM::lowerForMulInt64(LMulI64* ins, MMul* mir,
206                                        MDefinition* lhs, MDefinition* rhs) {
207   bool needsTemp = true;
208 
209   if (rhs->isConstant()) {
210     int64_t constant = rhs->toConstant()->toInt64();
211     int32_t shift = mozilla::FloorLog2(constant);
212     // See special cases in CodeGeneratorARM::visitMulI64
213     if (constant >= -1 && constant <= 2) {
214       needsTemp = false;
215     }
216     if (constant > 0 && int64_t(1) << shift == constant) {
217       needsTemp = false;
218     }
219   }
220 
221   ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
222   ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
223   if (needsTemp) {
224     ins->setTemp(0, temp());
225   }
226 
227   defineInt64ReuseInput(ins, mir, 0);
228 }
229 
lowerForFPU(LInstructionHelper<1,1,0> * ins,MDefinition * mir,MDefinition * input)230 void LIRGeneratorARM::lowerForFPU(LInstructionHelper<1, 1, 0>* ins,
231                                   MDefinition* mir, MDefinition* input) {
232   ins->setOperand(0, useRegisterAtStart(input));
233   define(
234       ins, mir,
235       LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
236 }
237 
238 template <size_t Temps>
lowerForFPU(LInstructionHelper<1,2,Temps> * ins,MDefinition * mir,MDefinition * lhs,MDefinition * rhs)239 void LIRGeneratorARM::lowerForFPU(LInstructionHelper<1, 2, Temps>* ins,
240                                   MDefinition* mir, MDefinition* lhs,
241                                   MDefinition* rhs) {
242   ins->setOperand(0, useRegisterAtStart(lhs));
243   ins->setOperand(1, useRegisterAtStart(rhs));
244   define(
245       ins, mir,
246       LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
247 }
248 
249 template void LIRGeneratorARM::lowerForFPU(LInstructionHelper<1, 2, 0>* ins,
250                                            MDefinition* mir, MDefinition* lhs,
251                                            MDefinition* rhs);
252 template void LIRGeneratorARM::lowerForFPU(LInstructionHelper<1, 2, 1>* ins,
253                                            MDefinition* mir, MDefinition* lhs,
254                                            MDefinition* rhs);
255 
lowerForBitAndAndBranch(LBitAndAndBranch * baab,MInstruction * mir,MDefinition * lhs,MDefinition * rhs)256 void LIRGeneratorARM::lowerForBitAndAndBranch(LBitAndAndBranch* baab,
257                                               MInstruction* mir,
258                                               MDefinition* lhs,
259                                               MDefinition* rhs) {
260   baab->setOperand(0, useRegisterAtStart(lhs));
261   baab->setOperand(1, useRegisterOrConstantAtStart(rhs));
262   add(baab, mir);
263 }
264 
lowerUntypedPhiInput(MPhi * phi,uint32_t inputPosition,LBlock * block,size_t lirIndex)265 void LIRGeneratorARM::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
266                                            LBlock* block, size_t lirIndex) {
267   MDefinition* operand = phi->getOperand(inputPosition);
268   LPhi* type = block->getPhi(lirIndex + VREG_TYPE_OFFSET);
269   LPhi* payload = block->getPhi(lirIndex + VREG_DATA_OFFSET);
270   type->setOperand(
271       inputPosition,
272       LUse(operand->virtualRegister() + VREG_TYPE_OFFSET, LUse::ANY));
273   payload->setOperand(inputPosition,
274                       LUse(VirtualRegisterOfPayload(operand), LUse::ANY));
275 }
276 
lowerForShift(LInstructionHelper<1,2,0> * ins,MDefinition * mir,MDefinition * lhs,MDefinition * rhs)277 void LIRGeneratorARM::lowerForShift(LInstructionHelper<1, 2, 0>* ins,
278                                     MDefinition* mir, MDefinition* lhs,
279                                     MDefinition* rhs) {
280   ins->setOperand(0, useRegister(lhs));
281   ins->setOperand(1, useRegisterOrConstant(rhs));
282   define(ins, mir);
283 }
284 
285 template <size_t Temps>
lowerForShiftInt64(LInstructionHelper<INT64_PIECES,INT64_PIECES+1,Temps> * ins,MDefinition * mir,MDefinition * lhs,MDefinition * rhs)286 void LIRGeneratorARM::lowerForShiftInt64(
287     LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
288     MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
289   if (mir->isRotate() && !rhs->isConstant()) {
290     ins->setTemp(0, temp());
291   }
292 
293   ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
294   ins->setOperand(INT64_PIECES, useRegisterOrConstant(rhs));
295   defineInt64ReuseInput(ins, mir, 0);
296 }
297 
298 template void LIRGeneratorARM::lowerForShiftInt64(
299     LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 0>* ins,
300     MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
301 template void LIRGeneratorARM::lowerForShiftInt64(
302     LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 1>* ins,
303     MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
304 
lowerDivI(MDiv * div)305 void LIRGeneratorARM::lowerDivI(MDiv* div) {
306   if (div->isUnsigned()) {
307     lowerUDiv(div);
308     return;
309   }
310 
311   // Division instructions are slow. Division by constant denominators can be
312   // rewritten to use other instructions.
313   if (div->rhs()->isConstant()) {
314     int32_t rhs = div->rhs()->toConstant()->toInt32();
315     // Check for division by a positive power of two, which is an easy and
316     // important case to optimize. Note that other optimizations are also
317     // possible; division by negative powers of two can be optimized in a
318     // similar manner as positive powers of two, and division by other
319     // constants can be optimized by a reciprocal multiplication technique.
320     int32_t shift = FloorLog2(rhs);
321     if (rhs > 0 && 1 << shift == rhs) {
322       LDivPowTwoI* lir =
323           new (alloc()) LDivPowTwoI(useRegisterAtStart(div->lhs()), shift);
324       if (div->fallible()) {
325         assignSnapshot(lir, Bailout_DoubleOutput);
326       }
327       define(lir, div);
328       return;
329     }
330   }
331 
332   if (HasIDIV()) {
333     LDivI* lir = new (alloc())
334         LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
335     if (div->fallible()) {
336       assignSnapshot(lir, Bailout_DoubleOutput);
337     }
338     define(lir, div);
339     return;
340   }
341 
342   LSoftDivI* lir = new (alloc()) LSoftDivI(useFixedAtStart(div->lhs(), r0),
343                                            useFixedAtStart(div->rhs(), r1));
344 
345   if (div->fallible()) {
346     assignSnapshot(lir, Bailout_DoubleOutput);
347   }
348 
349   defineReturn(lir, div);
350 }
351 
lowerMulI(MMul * mul,MDefinition * lhs,MDefinition * rhs)352 void LIRGeneratorARM::lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs) {
353   LMulI* lir = new (alloc()) LMulI;
354   if (mul->fallible()) {
355     assignSnapshot(lir, Bailout_DoubleOutput);
356   }
357   lowerForALU(lir, mul, lhs, rhs);
358 }
359 
lowerModI(MMod * mod)360 void LIRGeneratorARM::lowerModI(MMod* mod) {
361   if (mod->isUnsigned()) {
362     lowerUMod(mod);
363     return;
364   }
365 
366   if (mod->rhs()->isConstant()) {
367     int32_t rhs = mod->rhs()->toConstant()->toInt32();
368     int32_t shift = FloorLog2(rhs);
369     if (rhs > 0 && 1 << shift == rhs) {
370       LModPowTwoI* lir =
371           new (alloc()) LModPowTwoI(useRegister(mod->lhs()), shift);
372       if (mod->fallible()) {
373         assignSnapshot(lir, Bailout_DoubleOutput);
374       }
375       define(lir, mod);
376       return;
377     }
378     if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) {
379       MOZ_ASSERT(rhs);
380       LModMaskI* lir = new (alloc())
381           LModMaskI(useRegister(mod->lhs()), temp(), temp(), shift + 1);
382       if (mod->fallible()) {
383         assignSnapshot(lir, Bailout_DoubleOutput);
384       }
385       define(lir, mod);
386       return;
387     }
388   }
389 
390   if (HasIDIV()) {
391     LModI* lir = new (alloc())
392         LModI(useRegister(mod->lhs()), useRegister(mod->rhs()), temp());
393     if (mod->fallible()) {
394       assignSnapshot(lir, Bailout_DoubleOutput);
395     }
396     define(lir, mod);
397     return;
398   }
399 
400   LSoftModI* lir = new (alloc()) LSoftModI(
401       useFixedAtStart(mod->lhs(), r0), useFixedAtStart(mod->rhs(), r1), temp());
402 
403   if (mod->fallible()) {
404     assignSnapshot(lir, Bailout_DoubleOutput);
405   }
406 
407   defineReturn(lir, mod);
408 }
409 
lowerDivI64(MDiv * div)410 void LIRGeneratorARM::lowerDivI64(MDiv* div) {
411   if (div->isUnsigned()) {
412     lowerUDivI64(div);
413     return;
414   }
415 
416   LDivOrModI64* lir = new (alloc()) LDivOrModI64(
417       useInt64RegisterAtStart(div->lhs()), useInt64RegisterAtStart(div->rhs()));
418   defineReturn(lir, div);
419 }
420 
lowerModI64(MMod * mod)421 void LIRGeneratorARM::lowerModI64(MMod* mod) {
422   if (mod->isUnsigned()) {
423     lowerUModI64(mod);
424     return;
425   }
426 
427   LDivOrModI64* lir = new (alloc()) LDivOrModI64(
428       useInt64RegisterAtStart(mod->lhs()), useInt64RegisterAtStart(mod->rhs()));
429   defineReturn(lir, mod);
430 }
431 
lowerUDivI64(MDiv * div)432 void LIRGeneratorARM::lowerUDivI64(MDiv* div) {
433   LUDivOrModI64* lir = new (alloc()) LUDivOrModI64(
434       useInt64RegisterAtStart(div->lhs()), useInt64RegisterAtStart(div->rhs()));
435   defineReturn(lir, div);
436 }
437 
lowerUModI64(MMod * mod)438 void LIRGeneratorARM::lowerUModI64(MMod* mod) {
439   LUDivOrModI64* lir = new (alloc()) LUDivOrModI64(
440       useInt64RegisterAtStart(mod->lhs()), useInt64RegisterAtStart(mod->rhs()));
441   defineReturn(lir, mod);
442 }
443 
visitPowHalf(MPowHalf * ins)444 void LIRGenerator::visitPowHalf(MPowHalf* ins) {
445   MDefinition* input = ins->input();
446   MOZ_ASSERT(input->type() == MIRType::Double);
447   LPowHalfD* lir = new (alloc()) LPowHalfD(useRegisterAtStart(input));
448   defineReuseInput(lir, ins, 0);
449 }
450 
newLTableSwitch(const LAllocation & in,const LDefinition & inputCopy,MTableSwitch * tableswitch)451 LTableSwitch* LIRGeneratorARM::newLTableSwitch(const LAllocation& in,
452                                                const LDefinition& inputCopy,
453                                                MTableSwitch* tableswitch) {
454   return new (alloc()) LTableSwitch(in, inputCopy, tableswitch);
455 }
456 
newLTableSwitchV(MTableSwitch * tableswitch)457 LTableSwitchV* LIRGeneratorARM::newLTableSwitchV(MTableSwitch* tableswitch) {
458   return new (alloc()) LTableSwitchV(useBox(tableswitch->getOperand(0)), temp(),
459                                      tempDouble(), tableswitch);
460 }
461 
lowerUrshD(MUrsh * mir)462 void LIRGeneratorARM::lowerUrshD(MUrsh* mir) {
463   MDefinition* lhs = mir->lhs();
464   MDefinition* rhs = mir->rhs();
465 
466   MOZ_ASSERT(lhs->type() == MIRType::Int32);
467   MOZ_ASSERT(rhs->type() == MIRType::Int32);
468 
469   LUrshD* lir = new (alloc())
470       LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
471   define(lir, mir);
472 }
473 
visitWasmNeg(MWasmNeg * ins)474 void LIRGenerator::visitWasmNeg(MWasmNeg* ins) {
475   if (ins->type() == MIRType::Int32) {
476     define(new (alloc()) LNegI(useRegisterAtStart(ins->input())), ins);
477   } else if (ins->type() == MIRType::Float32) {
478     define(new (alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
479   } else {
480     MOZ_ASSERT(ins->type() == MIRType::Double);
481     define(new (alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
482   }
483 }
484 
lowerUDiv(MDiv * div)485 void LIRGeneratorARM::lowerUDiv(MDiv* div) {
486   MDefinition* lhs = div->getOperand(0);
487   MDefinition* rhs = div->getOperand(1);
488 
489   if (HasIDIV()) {
490     LUDiv* lir = new (alloc()) LUDiv;
491     lir->setOperand(0, useRegister(lhs));
492     lir->setOperand(1, useRegister(rhs));
493     if (div->fallible()) {
494       assignSnapshot(lir, Bailout_DoubleOutput);
495     }
496     define(lir, div);
497     return;
498   }
499 
500   LSoftUDivOrMod* lir = new (alloc())
501       LSoftUDivOrMod(useFixedAtStart(lhs, r0), useFixedAtStart(rhs, r1));
502 
503   if (div->fallible()) {
504     assignSnapshot(lir, Bailout_DoubleOutput);
505   }
506 
507   defineReturn(lir, div);
508 }
509 
lowerUMod(MMod * mod)510 void LIRGeneratorARM::lowerUMod(MMod* mod) {
511   MDefinition* lhs = mod->getOperand(0);
512   MDefinition* rhs = mod->getOperand(1);
513 
514   if (HasIDIV()) {
515     LUMod* lir = new (alloc()) LUMod;
516     lir->setOperand(0, useRegister(lhs));
517     lir->setOperand(1, useRegister(rhs));
518     if (mod->fallible()) {
519       assignSnapshot(lir, Bailout_DoubleOutput);
520     }
521     define(lir, mod);
522     return;
523   }
524 
525   LSoftUDivOrMod* lir = new (alloc())
526       LSoftUDivOrMod(useFixedAtStart(lhs, r0), useFixedAtStart(rhs, r1));
527 
528   if (mod->fallible()) {
529     assignSnapshot(lir, Bailout_DoubleOutput);
530   }
531 
532   defineReturn(lir, mod);
533 }
534 
visitWasmUnsignedToDouble(MWasmUnsignedToDouble * ins)535 void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
536   MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
537   LWasmUint32ToDouble* lir =
538       new (alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
539   define(lir, ins);
540 }
541 
visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32 * ins)542 void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
543   MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
544   LWasmUint32ToFloat32* lir =
545       new (alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
546   define(lir, ins);
547 }
548 
visitWasmHeapBase(MWasmHeapBase * ins)549 void LIRGenerator::visitWasmHeapBase(MWasmHeapBase* ins) {
550   auto* lir = new (alloc()) LWasmHeapBase(LAllocation());
551   define(lir, ins);
552 }
553 
visitWasmLoad(MWasmLoad * ins)554 void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
555   MDefinition* base = ins->base();
556   MOZ_ASSERT(base->type() == MIRType::Int32);
557 
558   if (ins->access().type() == Scalar::Int64 && ins->access().isAtomic()) {
559     auto* lir = new (alloc()) LWasmAtomicLoadI64(useRegisterAtStart(base));
560     defineInt64Fixed(lir, ins,
561                      LInt64Allocation(LAllocation(AnyRegister(IntArgReg1)),
562                                       LAllocation(AnyRegister(IntArgReg0))));
563     return;
564   }
565 
566   LAllocation ptr = useRegisterAtStart(base);
567 
568   if (IsUnaligned(ins->access())) {
569     MOZ_ASSERT(!ins->access().isAtomic());
570 
571     // Unaligned access expected! Revert to a byte load.
572     LDefinition ptrCopy = tempCopy(base, 0);
573 
574     LDefinition noTemp = LDefinition::BogusTemp();
575     if (ins->type() == MIRType::Int64) {
576       auto* lir = new (alloc())
577           LWasmUnalignedLoadI64(ptr, ptrCopy, temp(), noTemp, noTemp);
578       defineInt64(lir, ins);
579       return;
580     }
581 
582     LDefinition temp2 = noTemp;
583     LDefinition temp3 = noTemp;
584     if (IsFloatingPointType(ins->type())) {
585       // For putting the low value in a GPR.
586       temp2 = temp();
587       // For putting the high value in a GPR.
588       if (ins->type() == MIRType::Double) {
589         temp3 = temp();
590       }
591     }
592 
593     auto* lir =
594         new (alloc()) LWasmUnalignedLoad(ptr, ptrCopy, temp(), temp2, temp3);
595     define(lir, ins);
596     return;
597   }
598 
599   if (ins->type() == MIRType::Int64) {
600     auto* lir = new (alloc()) LWasmLoadI64(ptr);
601     if (ins->access().offset() || ins->access().type() == Scalar::Int64) {
602       lir->setTemp(0, tempCopy(base, 0));
603     }
604     defineInt64(lir, ins);
605     return;
606   }
607 
608   auto* lir = new (alloc()) LWasmLoad(ptr);
609   if (ins->access().offset()) {
610     lir->setTemp(0, tempCopy(base, 0));
611   }
612 
613   define(lir, ins);
614 }
615 
visitWasmStore(MWasmStore * ins)616 void LIRGenerator::visitWasmStore(MWasmStore* ins) {
617   MDefinition* base = ins->base();
618   MOZ_ASSERT(base->type() == MIRType::Int32);
619 
620   if (ins->access().type() == Scalar::Int64 && ins->access().isAtomic()) {
621     auto* lir = new (alloc()) LWasmAtomicStoreI64(
622         useRegister(base),
623         useInt64Fixed(ins->value(), Register64(IntArgReg1, IntArgReg0)),
624         tempFixed(IntArgReg2), tempFixed(IntArgReg3));
625     add(lir, ins);
626     return;
627   }
628 
629   LAllocation ptr = useRegisterAtStart(base);
630 
631   if (IsUnaligned(ins->access())) {
632     MOZ_ASSERT(!ins->access().isAtomic());
633 
634     // Unaligned access expected! Revert to a byte store.
635     LDefinition ptrCopy = tempCopy(base, 0);
636 
637     MIRType valueType = ins->value()->type();
638     if (valueType == MIRType::Int64) {
639       LInt64Allocation value = useInt64RegisterAtStart(ins->value());
640       auto* lir =
641           new (alloc()) LWasmUnalignedStoreI64(ptr, value, ptrCopy, temp());
642       add(lir, ins);
643       return;
644     }
645 
646     LAllocation value = useRegisterAtStart(ins->value());
647     LDefinition valueHelper = IsFloatingPointType(valueType)
648                                   ? temp()  // to do a FPU -> GPR move.
649                                   : tempCopy(base, 1);  // to clobber the value.
650 
651     auto* lir =
652         new (alloc()) LWasmUnalignedStore(ptr, value, ptrCopy, valueHelper);
653     add(lir, ins);
654     return;
655   }
656 
657   if (ins->value()->type() == MIRType::Int64) {
658     LInt64Allocation value = useInt64RegisterAtStart(ins->value());
659     auto* lir = new (alloc()) LWasmStoreI64(ptr, value);
660     if (ins->access().offset() || ins->access().type() == Scalar::Int64) {
661       lir->setTemp(0, tempCopy(base, 0));
662     }
663     add(lir, ins);
664     return;
665   }
666 
667   LAllocation value = useRegisterAtStart(ins->value());
668   auto* lir = new (alloc()) LWasmStore(ptr, value);
669 
670   if (ins->access().offset()) {
671     lir->setTemp(0, tempCopy(base, 0));
672   }
673 
674   add(lir, ins);
675 }
676 
visitAsmJSLoadHeap(MAsmJSLoadHeap * ins)677 void LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) {
678   MOZ_ASSERT(ins->offset() == 0);
679 
680   MDefinition* base = ins->base();
681   MOZ_ASSERT(base->type() == MIRType::Int32);
682 
683   // For the ARM it is best to keep the 'base' in a register if a bounds check
684   // is needed.
685   LAllocation baseAlloc;
686   LAllocation limitAlloc;
687 
688   if (base->isConstant() && !ins->needsBoundsCheck()) {
689     // A bounds check is only skipped for a positive index.
690     MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
691     baseAlloc = LAllocation(base->toConstant());
692   } else {
693     baseAlloc = useRegisterAtStart(base);
694     if (ins->needsBoundsCheck()) {
695       MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
696       MOZ_ASSERT(boundsCheckLimit->type() == MIRType::Int32);
697       limitAlloc = useRegisterAtStart(boundsCheckLimit);
698     }
699   }
700 
701   define(new (alloc()) LAsmJSLoadHeap(baseAlloc, limitAlloc), ins);
702 }
703 
visitAsmJSStoreHeap(MAsmJSStoreHeap * ins)704 void LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) {
705   MOZ_ASSERT(ins->offset() == 0);
706 
707   MDefinition* base = ins->base();
708   MOZ_ASSERT(base->type() == MIRType::Int32);
709 
710   LAllocation baseAlloc;
711   LAllocation limitAlloc;
712 
713   if (base->isConstant() && !ins->needsBoundsCheck()) {
714     MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
715     baseAlloc = LAllocation(base->toConstant());
716   } else {
717     baseAlloc = useRegisterAtStart(base);
718     if (ins->needsBoundsCheck()) {
719       MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
720       MOZ_ASSERT(boundsCheckLimit->type() == MIRType::Int32);
721       limitAlloc = useRegisterAtStart(boundsCheckLimit);
722     }
723   }
724 
725   add(new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()),
726                                     limitAlloc),
727       ins);
728 }
729 
lowerTruncateDToInt32(MTruncateToInt32 * ins)730 void LIRGeneratorARM::lowerTruncateDToInt32(MTruncateToInt32* ins) {
731   MDefinition* opd = ins->input();
732   MOZ_ASSERT(opd->type() == MIRType::Double);
733 
734   define(new (alloc())
735              LTruncateDToInt32(useRegister(opd), LDefinition::BogusTemp()),
736          ins);
737 }
738 
lowerTruncateFToInt32(MTruncateToInt32 * ins)739 void LIRGeneratorARM::lowerTruncateFToInt32(MTruncateToInt32* ins) {
740   MDefinition* opd = ins->input();
741   MOZ_ASSERT(opd->type() == MIRType::Float32);
742 
743   define(new (alloc())
744              LTruncateFToInt32(useRegister(opd), LDefinition::BogusTemp()),
745          ins);
746 }
747 
visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement * ins)748 void LIRGenerator::visitAtomicExchangeTypedArrayElement(
749     MAtomicExchangeTypedArrayElement* ins) {
750   MOZ_ASSERT(HasLDSTREXBHD());
751   MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
752 
753   MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
754   MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
755 
756   const LUse elements = useRegister(ins->elements());
757   const LAllocation index = useRegisterOrConstant(ins->index());
758 
759   // If the target is a floating register then we need a temp at the
760   // CodeGenerator level for creating the result.
761 
762   const LAllocation value = useRegister(ins->value());
763   LDefinition tempDef = LDefinition::BogusTemp();
764   if (ins->arrayType() == Scalar::Uint32) {
765     MOZ_ASSERT(ins->type() == MIRType::Double);
766     tempDef = temp();
767   }
768 
769   LAtomicExchangeTypedArrayElement* lir = new (alloc())
770       LAtomicExchangeTypedArrayElement(elements, index, value, tempDef);
771 
772   define(lir, ins);
773 }
774 
visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop * ins)775 void LIRGenerator::visitAtomicTypedArrayElementBinop(
776     MAtomicTypedArrayElementBinop* ins) {
777   MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
778   MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
779   MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
780 
781   MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
782   MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
783 
784   const LUse elements = useRegister(ins->elements());
785   const LAllocation index = useRegisterOrConstant(ins->index());
786   const LAllocation value = useRegister(ins->value());
787 
788   if (!ins->hasUses()) {
789     LAtomicTypedArrayElementBinopForEffect* lir = new (alloc())
790         LAtomicTypedArrayElementBinopForEffect(elements, index, value,
791                                                /* flagTemp= */ temp());
792     add(lir, ins);
793     return;
794   }
795 
796   // For a Uint32Array with a known double result we need a temp for
797   // the intermediate output.
798   //
799   // Optimization opportunity (bug 1077317): We can do better by
800   // allowing 'value' to remain as an imm32 if it is small enough to
801   // fit in an instruction.
802 
803   LDefinition flagTemp = temp();
804   LDefinition outTemp = LDefinition::BogusTemp();
805 
806   if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
807     outTemp = temp();
808   }
809 
810   // On arm, map flagTemp to temp1 and outTemp to temp2, at least for now.
811 
812   LAtomicTypedArrayElementBinop* lir = new (alloc())
813       LAtomicTypedArrayElementBinop(elements, index, value, flagTemp, outTemp);
814   define(lir, ins);
815 }
816 
visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement * ins)817 void LIRGenerator::visitCompareExchangeTypedArrayElement(
818     MCompareExchangeTypedArrayElement* ins) {
819   MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
820   MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
821 
822   MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
823   MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
824 
825   const LUse elements = useRegister(ins->elements());
826   const LAllocation index = useRegisterOrConstant(ins->index());
827 
828   // If the target is a floating register then we need a temp at the
829   // CodeGenerator level for creating the result.
830   //
831   // Optimization opportunity (bug 1077317): We could do better by
832   // allowing oldval to remain an immediate, if it is small enough
833   // to fit in an instruction.
834 
835   const LAllocation newval = useRegister(ins->newval());
836   const LAllocation oldval = useRegister(ins->oldval());
837   LDefinition tempDef = LDefinition::BogusTemp();
838   if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
839     tempDef = temp();
840   }
841 
842   LCompareExchangeTypedArrayElement* lir =
843       new (alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval,
844                                                       newval, tempDef);
845 
846   define(lir, ins);
847 }
848 
visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap * ins)849 void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
850   MDefinition* base = ins->base();
851   MOZ_ASSERT(base->type() == MIRType::Int32);
852 
853   if (ins->access().type() == Scalar::Int64) {
854     // The three register pairs must be distinct.
855     auto* lir = new (alloc()) LWasmCompareExchangeI64(
856         useRegister(base), useInt64Fixed(ins->oldValue(), CmpXchgOld64),
857         useInt64Fixed(ins->newValue(), CmpXchgNew64));
858     defineInt64Fixed(lir, ins,
859                      LInt64Allocation(LAllocation(AnyRegister(CmpXchgOutHi)),
860                                       LAllocation(AnyRegister(CmpXchgOutLo))));
861     return;
862   }
863 
864   MOZ_ASSERT(ins->access().type() < Scalar::Float32);
865   MOZ_ASSERT(HasLDSTREXBHD(), "by HasPlatformSupport() constraints");
866 
867   LWasmCompareExchangeHeap* lir = new (alloc())
868       LWasmCompareExchangeHeap(useRegister(base), useRegister(ins->oldValue()),
869                                useRegister(ins->newValue()));
870 
871   define(lir, ins);
872 }
873 
visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap * ins)874 void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
875   MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
876 
877   if (ins->access().type() == Scalar::Int64) {
878     auto* lir = new (alloc()) LWasmAtomicExchangeI64(
879         useRegister(ins->base()), useInt64Fixed(ins->value(), XchgNew64),
880         ins->access());
881     defineInt64Fixed(lir, ins,
882                      LInt64Allocation(LAllocation(AnyRegister(XchgOutHi)),
883                                       LAllocation(AnyRegister(XchgOutLo))));
884     return;
885   }
886 
887   MOZ_ASSERT(ins->access().type() < Scalar::Float32);
888   MOZ_ASSERT(HasLDSTREXBHD(), "by HasPlatformSupport() constraints");
889 
890   const LAllocation base = useRegister(ins->base());
891   const LAllocation value = useRegister(ins->value());
892   define(new (alloc()) LWasmAtomicExchangeHeap(base, value), ins);
893 }
894 
visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap * ins)895 void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
896   if (ins->access().type() == Scalar::Int64) {
897     auto* lir = new (alloc()) LWasmAtomicBinopI64(
898         useRegister(ins->base()), useInt64Fixed(ins->value(), FetchOpVal64),
899         tempFixed(FetchOpTmpLo), tempFixed(FetchOpTmpHi), ins->access(),
900         ins->operation());
901     defineInt64Fixed(lir, ins,
902                      LInt64Allocation(LAllocation(AnyRegister(FetchOpOutHi)),
903                                       LAllocation(AnyRegister(FetchOpOutLo))));
904     return;
905   }
906 
907   MOZ_ASSERT(ins->access().type() < Scalar::Float32);
908   MOZ_ASSERT(HasLDSTREXBHD(), "by HasPlatformSupport() constraints");
909 
910   MDefinition* base = ins->base();
911   MOZ_ASSERT(base->type() == MIRType::Int32);
912 
913   if (!ins->hasUses()) {
914     LWasmAtomicBinopHeapForEffect* lir =
915         new (alloc()) LWasmAtomicBinopHeapForEffect(useRegister(base),
916                                                     useRegister(ins->value()),
917                                                     /* flagTemp= */ temp());
918     add(lir, ins);
919     return;
920   }
921 
922   LWasmAtomicBinopHeap* lir = new (alloc())
923       LWasmAtomicBinopHeap(useRegister(base), useRegister(ins->value()),
924                            /* temp = */ LDefinition::BogusTemp(),
925                            /* flagTemp= */ temp());
926   define(lir, ins);
927 }
928 
visitSubstr(MSubstr * ins)929 void LIRGenerator::visitSubstr(MSubstr* ins) {
930   LSubstr* lir = new (alloc())
931       LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
932               useRegister(ins->length()), temp(), temp(), tempByteOpRegister());
933   define(lir, ins);
934   assignSafepoint(lir, ins);
935 }
936 
visitRandom(MRandom * ins)937 void LIRGenerator::visitRandom(MRandom* ins) {
938   LRandom* lir = new (alloc()) LRandom(temp(), temp(), temp(), temp(), temp());
939   defineFixed(lir, ins, LFloatReg(ReturnDoubleReg));
940 }
941 
visitWasmTruncateToInt64(MWasmTruncateToInt64 * ins)942 void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
943   MDefinition* opd = ins->input();
944   MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
945 
946   defineReturn(new (alloc()) LWasmTruncateToInt64(useRegisterAtStart(opd)),
947                ins);
948 }
949 
visitInt64ToFloatingPoint(MInt64ToFloatingPoint * ins)950 void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
951   MOZ_ASSERT(ins->type() == MIRType::Double || ins->type() == MIRType::Float32);
952 
953   auto* lir = new (alloc()) LInt64ToFloatingPointCall();
954   lir->setInt64Operand(0, useInt64RegisterAtStart(ins->input()));
955   defineReturn(lir, ins);
956 }
957 
visitCopySign(MCopySign * ins)958 void LIRGenerator::visitCopySign(MCopySign* ins) {
959   MDefinition* lhs = ins->lhs();
960   MDefinition* rhs = ins->rhs();
961 
962   MOZ_ASSERT(IsFloatingPointType(lhs->type()));
963   MOZ_ASSERT(lhs->type() == rhs->type());
964   MOZ_ASSERT(lhs->type() == ins->type());
965 
966   LInstructionHelper<1, 2, 2>* lir;
967   if (lhs->type() == MIRType::Double) {
968     lir = new (alloc()) LCopySignD();
969   } else {
970     lir = new (alloc()) LCopySignF();
971   }
972 
973   lir->setTemp(0, temp());
974   lir->setTemp(1, temp());
975 
976   lowerForFPU(lir, ins, lhs, rhs);
977 }
978 
visitExtendInt32ToInt64(MExtendInt32ToInt64 * ins)979 void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
980   auto* lir =
981       new (alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input()));
982   defineInt64(lir, ins);
983 
984   LDefinition def(LDefinition::GENERAL, LDefinition::MUST_REUSE_INPUT);
985   def.setReusedInput(0);
986   def.setVirtualRegister(ins->virtualRegister());
987 
988   lir->setDef(0, def);
989 }
990 
visitSignExtendInt64(MSignExtendInt64 * ins)991 void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
992   defineInt64(new (alloc())
993                   LSignExtendInt64(useInt64RegisterAtStart(ins->input())),
994               ins);
995 }
996