1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/arm64/CodeGenerator-arm64.h"
8 
9 #include "mozilla/MathAlgorithms.h"
10 
11 #include "jsnum.h"
12 
13 #include "jit/CodeGenerator.h"
14 #include "jit/JitFrames.h"
15 #include "jit/JitRealm.h"
16 #include "jit/MIR.h"
17 #include "jit/MIRGraph.h"
18 #include "vm/JSContext.h"
19 #include "vm/Realm.h"
20 #include "vm/Shape.h"
21 #include "vm/TraceLogging.h"
22 
23 #include "jit/shared/CodeGenerator-shared-inl.h"
24 #include "vm/JSScript-inl.h"
25 
26 using namespace js;
27 using namespace js::jit;
28 
29 using JS::GenericNaN;
30 using mozilla::FloorLog2;
31 using mozilla::NegativeInfinity;
32 
33 // shared
CodeGeneratorARM64(MIRGenerator * gen,LIRGraph * graph,MacroAssembler * masm)34 CodeGeneratorARM64::CodeGeneratorARM64(MIRGenerator* gen, LIRGraph* graph,
35                                        MacroAssembler* masm)
36     : CodeGeneratorShared(gen, graph, masm) {}
37 
generateOutOfLineCode()38 bool CodeGeneratorARM64::generateOutOfLineCode() {
39   if (!CodeGeneratorShared::generateOutOfLineCode()) {
40     return false;
41   }
42 
43   if (deoptLabel_.used()) {
44     // All non-table-based bailouts will go here.
45     masm.bind(&deoptLabel_);
46 
47     // Store the frame size, so the handler can recover the IonScript.
48     masm.push(Imm32(frameSize()));
49 
50     TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
51     masm.jump(handler);
52   }
53 
54   return !masm.oom();
55 }
56 
emitBranch(Assembler::Condition cond,MBasicBlock * mirTrue,MBasicBlock * mirFalse)57 void CodeGeneratorARM64::emitBranch(Assembler::Condition cond,
58                                     MBasicBlock* mirTrue,
59                                     MBasicBlock* mirFalse) {
60   if (isNextBlock(mirFalse->lir())) {
61     jumpToBlock(mirTrue, cond);
62   } else {
63     jumpToBlock(mirFalse, Assembler::InvertCondition(cond));
64     jumpToBlock(mirTrue);
65   }
66 }
67 
accept(CodeGeneratorARM64 * codegen)68 void OutOfLineBailout::accept(CodeGeneratorARM64* codegen) {
69   codegen->visitOutOfLineBailout(this);
70 }
71 
visitTestIAndBranch(LTestIAndBranch * test)72 void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) {
73   Register input = ToRegister(test->input());
74   MBasicBlock* mirTrue = test->ifTrue();
75   MBasicBlock* mirFalse = test->ifFalse();
76 
77   masm.test32(input, input);
78 
79   // Jump to the True block if NonZero.
80   // Jump to the False block if Zero.
81   if (isNextBlock(mirFalse->lir())) {
82     jumpToBlock(mirTrue, Assembler::NonZero);
83   } else {
84     jumpToBlock(mirFalse, Assembler::Zero);
85     if (!isNextBlock(mirTrue->lir())) {
86       jumpToBlock(mirTrue);
87     }
88   }
89 }
90 
visitCompare(LCompare * comp)91 void CodeGenerator::visitCompare(LCompare* comp) {
92   const MCompare* mir = comp->mir();
93   const MCompare::CompareType type = mir->compareType();
94   const Assembler::Condition cond = JSOpToCondition(type, comp->jsop());
95   const Register leftreg = ToRegister(comp->getOperand(0));
96   const LAllocation* right = comp->getOperand(1);
97   const Register defreg = ToRegister(comp->getDef(0));
98 
99   if (type == MCompare::Compare_Object || type == MCompare::Compare_Symbol) {
100     masm.cmpPtrSet(cond, leftreg, ToRegister(right), defreg);
101     return;
102   }
103 
104   if (right->isConstant()) {
105     masm.cmp32Set(cond, leftreg, Imm32(ToInt32(right)), defreg);
106   } else {
107     masm.cmp32Set(cond, leftreg, ToRegister(right), defreg);
108   }
109 }
110 
visitCompareAndBranch(LCompareAndBranch * comp)111 void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
112   const MCompare* mir = comp->cmpMir();
113   const MCompare::CompareType type = mir->compareType();
114   const LAllocation* left = comp->left();
115   const LAllocation* right = comp->right();
116 
117   if (type == MCompare::Compare_Object || type == MCompare::Compare_Symbol) {
118     masm.cmpPtr(ToRegister(left), ToRegister(right));
119   } else if (right->isConstant()) {
120     masm.cmp32(ToRegister(left), Imm32(ToInt32(right)));
121   } else {
122     masm.cmp32(ToRegister(left), ToRegister(right));
123   }
124 
125   Assembler::Condition cond = JSOpToCondition(type, comp->jsop());
126   emitBranch(cond, comp->ifTrue(), comp->ifFalse());
127 }
128 
bailoutIf(Assembler::Condition condition,LSnapshot * snapshot)129 void CodeGeneratorARM64::bailoutIf(Assembler::Condition condition,
130                                    LSnapshot* snapshot) {
131   encode(snapshot);
132 
133   // Though the assembler doesn't track all frame pushes, at least make sure
134   // the known value makes sense.
135   MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None() && deoptTable_,
136                 frameClass_.frameSize() == masm.framePushed());
137 
138   // ARM64 doesn't use a bailout table.
139   InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
140   OutOfLineBailout* ool = new (alloc()) OutOfLineBailout(snapshot);
141   addOutOfLineCode(ool,
142                    new (alloc()) BytecodeSite(tree, tree->script()->code()));
143 
144   masm.B(ool->entry(), condition);
145 }
146 
bailoutFrom(Label * label,LSnapshot * snapshot)147 void CodeGeneratorARM64::bailoutFrom(Label* label, LSnapshot* snapshot) {
148   MOZ_ASSERT_IF(!masm.oom(), label->used());
149   MOZ_ASSERT_IF(!masm.oom(), !label->bound());
150 
151   encode(snapshot);
152 
153   // Though the assembler doesn't track all frame pushes, at least make sure
154   // the known value makes sense.
155   MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None() && deoptTable_,
156                 frameClass_.frameSize() == masm.framePushed());
157 
158   // ARM64 doesn't use a bailout table.
159   InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
160   OutOfLineBailout* ool = new (alloc()) OutOfLineBailout(snapshot);
161   addOutOfLineCode(ool,
162                    new (alloc()) BytecodeSite(tree, tree->script()->code()));
163 
164   masm.retarget(label, ool->entry());
165 }
166 
bailout(LSnapshot * snapshot)167 void CodeGeneratorARM64::bailout(LSnapshot* snapshot) {
168   Label label;
169   masm.b(&label);
170   bailoutFrom(&label, snapshot);
171 }
172 
visitOutOfLineBailout(OutOfLineBailout * ool)173 void CodeGeneratorARM64::visitOutOfLineBailout(OutOfLineBailout* ool) {
174   masm.push(Imm32(ool->snapshot()->snapshotOffset()));
175   masm.B(&deoptLabel_);
176 }
177 
visitMinMaxD(LMinMaxD * ins)178 void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
179   ARMFPRegister lhs(ToFloatRegister(ins->first()), 64);
180   ARMFPRegister rhs(ToFloatRegister(ins->second()), 64);
181   ARMFPRegister output(ToFloatRegister(ins->output()), 64);
182   if (ins->mir()->isMax()) {
183     masm.Fmax(output, lhs, rhs);
184   } else {
185     masm.Fmin(output, lhs, rhs);
186   }
187 }
188 
visitMinMaxF(LMinMaxF * ins)189 void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
190   ARMFPRegister lhs(ToFloatRegister(ins->first()), 32);
191   ARMFPRegister rhs(ToFloatRegister(ins->second()), 32);
192   ARMFPRegister output(ToFloatRegister(ins->output()), 32);
193   if (ins->mir()->isMax()) {
194     masm.Fmax(output, lhs, rhs);
195   } else {
196     masm.Fmin(output, lhs, rhs);
197   }
198 }
199 
200 // FIXME: Uh, is this a static function? It looks like it is...
201 template <typename T>
toWRegister(const T * a)202 ARMRegister toWRegister(const T* a) {
203   return ARMRegister(ToRegister(a), 32);
204 }
205 
206 // FIXME: Uh, is this a static function? It looks like it is...
207 template <typename T>
toXRegister(const T * a)208 ARMRegister toXRegister(const T* a) {
209   return ARMRegister(ToRegister(a), 64);
210 }
211 
toWOperand(const LAllocation * a)212 Operand toWOperand(const LAllocation* a) {
213   if (a->isConstant()) {
214     return Operand(ToInt32(a));
215   }
216   return Operand(toWRegister(a));
217 }
218 
ToCPURegister(const LAllocation * a,Scalar::Type type)219 vixl::CPURegister ToCPURegister(const LAllocation* a, Scalar::Type type) {
220   if (a->isFloatReg() && type == Scalar::Float64) {
221     return ARMFPRegister(ToFloatRegister(a), 64);
222   }
223   if (a->isFloatReg() && type == Scalar::Float32) {
224     return ARMFPRegister(ToFloatRegister(a), 32);
225   }
226   if (a->isGeneralReg()) {
227     return ARMRegister(ToRegister(a), 32);
228   }
229   MOZ_CRASH("Unknown LAllocation");
230 }
231 
ToCPURegister(const LDefinition * d,Scalar::Type type)232 vixl::CPURegister ToCPURegister(const LDefinition* d, Scalar::Type type) {
233   return ToCPURegister(d->output(), type);
234 }
235 
visitAddI(LAddI * ins)236 void CodeGenerator::visitAddI(LAddI* ins) {
237   const LAllocation* lhs = ins->getOperand(0);
238   const LAllocation* rhs = ins->getOperand(1);
239   const LDefinition* dest = ins->getDef(0);
240 
241   // Platforms with three-operand arithmetic ops don't need recovery.
242   MOZ_ASSERT(!ins->recoversInput());
243 
244   if (ins->snapshot()) {
245     masm.Adds(toWRegister(dest), toWRegister(lhs), toWOperand(rhs));
246     bailoutIf(Assembler::Overflow, ins->snapshot());
247   } else {
248     masm.Add(toWRegister(dest), toWRegister(lhs), toWOperand(rhs));
249   }
250 }
251 
visitSubI(LSubI * ins)252 void CodeGenerator::visitSubI(LSubI* ins) {
253   const LAllocation* lhs = ins->getOperand(0);
254   const LAllocation* rhs = ins->getOperand(1);
255   const LDefinition* dest = ins->getDef(0);
256 
257   // Platforms with three-operand arithmetic ops don't need recovery.
258   MOZ_ASSERT(!ins->recoversInput());
259 
260   if (ins->snapshot()) {
261     masm.Subs(toWRegister(dest), toWRegister(lhs), toWOperand(rhs));
262     bailoutIf(Assembler::Overflow, ins->snapshot());
263   } else {
264     masm.Sub(toWRegister(dest), toWRegister(lhs), toWOperand(rhs));
265   }
266 }
267 
visitMulI(LMulI * ins)268 void CodeGenerator::visitMulI(LMulI* ins) {
269   const LAllocation* lhs = ins->getOperand(0);
270   const LAllocation* rhs = ins->getOperand(1);
271   const LDefinition* dest = ins->getDef(0);
272   MMul* mul = ins->mir();
273   MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
274                 !mul->canBeNegativeZero() && !mul->canOverflow());
275 
276   Register lhsreg = ToRegister(lhs);
277   const ARMRegister lhsreg32 = ARMRegister(lhsreg, 32);
278   Register destreg = ToRegister(dest);
279   const ARMRegister destreg32 = ARMRegister(destreg, 32);
280 
281   if (rhs->isConstant()) {
282     // Bailout on -0.0.
283     int32_t constant = ToInt32(rhs);
284     if (mul->canBeNegativeZero() && constant <= 0) {
285       Assembler::Condition bailoutCond =
286           (constant == 0) ? Assembler::LessThan : Assembler::Equal;
287       masm.Cmp(toWRegister(lhs), Operand(0));
288       bailoutIf(bailoutCond, ins->snapshot());
289     }
290 
291     switch (constant) {
292       case -1:
293         masm.Negs(destreg32, Operand(lhsreg32));
294         break;  // Go to overflow check.
295       case 0:
296         masm.Mov(destreg32, wzr);
297         return;  // Avoid overflow check.
298       case 1:
299         if (destreg != lhsreg) {
300           masm.Mov(destreg32, lhsreg32);
301         }
302         return;  // Avoid overflow check.
303       case 2:
304         masm.Adds(destreg32, lhsreg32, Operand(lhsreg32));
305         break;  // Go to overflow check.
306       default:
307         // Use shift if cannot overflow and constant is a power of 2
308         if (!mul->canOverflow() && constant > 0) {
309           int32_t shift = FloorLog2(constant);
310           if ((1 << shift) == constant) {
311             masm.Lsl(destreg32, lhsreg32, shift);
312             return;
313           }
314         }
315 
316         // Otherwise, just multiply. We have to check for overflow.
317         // Negative zero was handled above.
318         Label bailout;
319         Label* onOverflow = mul->canOverflow() ? &bailout : nullptr;
320 
321         vixl::UseScratchRegisterScope temps(&masm.asVIXL());
322         const Register scratch = temps.AcquireW().asUnsized();
323 
324         masm.move32(Imm32(constant), scratch);
325         masm.mul32(lhsreg, scratch, destreg, onOverflow);
326 
327         if (onOverflow) {
328           MOZ_ASSERT(lhsreg != destreg);
329           bailoutFrom(&bailout, ins->snapshot());
330         }
331         return;
332     }
333 
334     // Overflow check.
335     if (mul->canOverflow()) {
336       bailoutIf(Assembler::Overflow, ins->snapshot());
337     }
338   } else {
339     Register rhsreg = ToRegister(rhs);
340     const ARMRegister rhsreg32 = ARMRegister(rhsreg, 32);
341 
342     Label bailout;
343     Label* onOverflow = mul->canOverflow() ? &bailout : nullptr;
344 
345     if (mul->canBeNegativeZero()) {
346       // The product of two integer operands is negative zero iff one
347       // operand is zero, and the other is negative. Therefore, the
348       // sum of the two operands will also be negative (specifically,
349       // it will be the non-zero operand). If the result of the
350       // multiplication is 0, we can check the sign of the sum to
351       // determine whether we should bail out.
352 
353       // This code can bailout, so lowering guarantees that the input
354       // operands are not overwritten.
355       MOZ_ASSERT(destreg != lhsreg);
356       MOZ_ASSERT(destreg != rhsreg);
357 
358       // Do the multiplication.
359       masm.mul32(lhsreg, rhsreg, destreg, onOverflow);
360 
361       // Set Zero flag if destreg is 0.
362       masm.test32(destreg, destreg);
363 
364       // ccmn is 'conditional compare negative'.
365       // If the Zero flag is set:
366       //    perform a compare negative (compute lhs+rhs and set flags)
367       // else:
368       //    clear flags
369       masm.Ccmn(lhsreg32, rhsreg32, vixl::NoFlag, Assembler::Zero);
370 
371       // Bails out if (lhs * rhs == 0) && (lhs + rhs < 0):
372       bailoutIf(Assembler::LessThan, ins->snapshot());
373 
374     } else {
375       masm.mul32(lhsreg, rhsreg, destreg, onOverflow);
376     }
377     if (onOverflow) {
378       bailoutFrom(&bailout, ins->snapshot());
379     }
380   }
381 }
382 
visitDivI(LDivI * ins)383 void CodeGenerator::visitDivI(LDivI* ins) {
384   const Register lhs = ToRegister(ins->lhs());
385   const Register rhs = ToRegister(ins->rhs());
386   const Register output = ToRegister(ins->output());
387 
388   const ARMRegister lhs32 = toWRegister(ins->lhs());
389   const ARMRegister rhs32 = toWRegister(ins->rhs());
390   const ARMRegister temp32 = toWRegister(ins->getTemp(0));
391   const ARMRegister output32 = toWRegister(ins->output());
392 
393   MDiv* mir = ins->mir();
394 
395   Label done;
396 
397   // Handle division by zero.
398   if (mir->canBeDivideByZero()) {
399     masm.test32(rhs, rhs);
400     if (mir->trapOnError()) {
401       Label nonZero;
402       masm.j(Assembler::NonZero, &nonZero);
403       masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
404       masm.bind(&nonZero);
405     } else if (mir->canTruncateInfinities()) {
406       // Truncated division by zero is zero: (Infinity|0 = 0).
407       Label nonZero;
408       masm.j(Assembler::NonZero, &nonZero);
409       masm.Mov(output32, wzr);
410       masm.jump(&done);
411       masm.bind(&nonZero);
412     } else {
413       MOZ_ASSERT(mir->fallible());
414       bailoutIf(Assembler::Zero, ins->snapshot());
415     }
416   }
417 
418   // Handle an integer overflow from (INT32_MIN / -1).
419   // The integer division gives INT32_MIN, but should be -(double)INT32_MIN.
420   if (mir->canBeNegativeOverflow()) {
421     Label notOverflow;
422 
423     // Branch to handle the non-overflow cases.
424     masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), &notOverflow);
425     masm.branch32(Assembler::NotEqual, rhs, Imm32(-1), &notOverflow);
426 
427     // Handle overflow.
428     if (mir->trapOnError()) {
429       masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
430     } else if (mir->canTruncateOverflow()) {
431       // (-INT32_MIN)|0 == INT32_MIN, which is already in lhs.
432       masm.move32(lhs, output);
433       masm.jump(&done);
434     } else {
435       MOZ_ASSERT(mir->fallible());
436       bailout(ins->snapshot());
437     }
438     masm.bind(&notOverflow);
439   }
440 
441   // Handle negative zero: lhs == 0 && rhs < 0.
442   if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
443     Label nonZero;
444     masm.branch32(Assembler::NotEqual, lhs, Imm32(0), &nonZero);
445     masm.cmp32(rhs, Imm32(0));
446     bailoutIf(Assembler::LessThan, ins->snapshot());
447     masm.bind(&nonZero);
448   }
449 
450   // Perform integer division.
451   if (mir->canTruncateRemainder()) {
452     masm.Sdiv(output32, lhs32, rhs32);
453   } else {
454     vixl::UseScratchRegisterScope temps(&masm.asVIXL());
455     ARMRegister scratch32 = temps.AcquireW();
456 
457     // ARM does not automatically calculate the remainder.
458     // The ISR suggests multiplication to determine whether a remainder exists.
459     masm.Sdiv(scratch32, lhs32, rhs32);
460     masm.Mul(temp32, scratch32, rhs32);
461     masm.Cmp(lhs32, temp32);
462     bailoutIf(Assembler::NotEqual, ins->snapshot());
463     masm.Mov(output32, scratch32);
464   }
465 
466   masm.bind(&done);
467 }
468 
visitDivPowTwoI(LDivPowTwoI * ins)469 void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
470   const Register numerator = ToRegister(ins->numerator());
471   const ARMRegister numerator32 = toWRegister(ins->numerator());
472   const ARMRegister output32 = toWRegister(ins->output());
473 
474   int32_t shift = ins->shift();
475   bool negativeDivisor = ins->negativeDivisor();
476   MDiv* mir = ins->mir();
477 
478   if (!mir->isTruncated() && negativeDivisor) {
479     // 0 divided by a negative number returns a -0 double.
480     bailoutTest32(Assembler::Zero, numerator, numerator, ins->snapshot());
481   }
482 
483   if (shift) {
484     if (!mir->isTruncated()) {
485       // If the remainder is != 0, bailout since this must be a double.
486       bailoutTest32(Assembler::NonZero, numerator,
487                     Imm32(UINT32_MAX >> (32 - shift)), ins->snapshot());
488     }
489 
490     if (mir->isUnsigned()) {
491       // shift right
492       masm.Lsr(output32, numerator32, shift);
493     } else {
494       ARMRegister temp32 = numerator32;
495       // Adjust the value so that shifting produces a correctly
496       // rounded result when the numerator is negative. See 10-1
497       // "Signed Division by a Known Power of 2" in Henry
498       // S. Warren, Jr.'s Hacker's Delight.
499       if (mir->canBeNegativeDividend() && mir->isTruncated()) {
500         if (shift > 1) {
501           // Copy the sign bit of the numerator. (= (2^32 - 1) or 0)
502           masm.Asr(output32, numerator32, 31);
503           temp32 = output32;
504         }
505         // Divide by 2^(32 - shift)
506         // i.e. (= (2^32 - 1) / 2^(32 - shift) or 0)
507         // i.e. (= (2^shift - 1) or 0)
508         masm.Lsr(output32, temp32, 32 - shift);
509         // If signed, make any 1 bit below the shifted bits to bubble up, such
510         // that once shifted the value would be rounded towards 0.
511         masm.Add(output32, output32, numerator32);
512         temp32 = output32;
513       }
514       masm.Asr(output32, temp32, shift);
515 
516       if (negativeDivisor) {
517         masm.Neg(output32, output32);
518       }
519     }
520     return;
521   }
522 
523   if (negativeDivisor) {
524     // INT32_MIN / -1 overflows.
525     if (!mir->isTruncated()) {
526       masm.Negs(output32, numerator32);
527       bailoutIf(Assembler::Overflow, ins->snapshot());
528     } else if (mir->trapOnError()) {
529       Label ok;
530       masm.Negs(output32, numerator32);
531       masm.branch(Assembler::NoOverflow, &ok);
532       masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
533       masm.bind(&ok);
534     } else {
535       // Do not set condition flags.
536       masm.Neg(output32, numerator32);
537     }
538   } else {
539     if (mir->isUnsigned() && !mir->isTruncated()) {
540       // Copy and set flags.
541       masm.Adds(output32, numerator32, 0);
542       // Unsigned division by 1 can overflow if output is not truncated, as we
543       // do not have an Unsigned type for MIR instructions.
544       bailoutIf(Assembler::Signed, ins->snapshot());
545     } else {
546       // Copy the result.
547       masm.Mov(output32, numerator32);
548     }
549   }
550 }
551 
visitDivConstantI(LDivConstantI * ins)552 void CodeGenerator::visitDivConstantI(LDivConstantI* ins) {
553   const ARMRegister lhs32 = toWRegister(ins->numerator());
554   const ARMRegister lhs64 = toXRegister(ins->numerator());
555   const ARMRegister const32 = toWRegister(ins->temp());
556   const ARMRegister output32 = toWRegister(ins->output());
557   const ARMRegister output64 = toXRegister(ins->output());
558   int32_t d = ins->denominator();
559 
560   // The absolute value of the denominator isn't a power of 2.
561   using mozilla::Abs;
562   MOZ_ASSERT((Abs(d) & (Abs(d) - 1)) != 0);
563 
564   // We will first divide by Abs(d), and negate the answer if d is negative.
565   // If desired, this can be avoided by generalizing computeDivisionConstants.
566   ReciprocalMulConstants rmc =
567       computeDivisionConstants(Abs(d), /* maxLog = */ 31);
568 
569   // We first compute (M * n) >> 32, where M = rmc.multiplier.
570   masm.Mov(const32, int32_t(rmc.multiplier));
571   if (rmc.multiplier > INT32_MAX) {
572     MOZ_ASSERT(rmc.multiplier < (int64_t(1) << 32));
573 
574     // We actually compute (int32_t(M) * n) instead, without the upper bit.
575     // Thus, (M * n) = (int32_t(M) * n) + n << 32.
576     //
577     // ((int32_t(M) * n) + n << 32) can't overflow, as both operands have
578     // opposite signs because int32_t(M) is negative.
579     masm.Lsl(output64, lhs64, 32);
580 
581     // Store (M * n) in output64.
582     masm.Smaddl(output64, const32, lhs32, output64);
583   } else {
584     // Store (M * n) in output64.
585     masm.Smull(output64, const32, lhs32);
586   }
587 
588   // (M * n) >> (32 + shift) is the truncated division answer if n is
589   // non-negative, as proved in the comments of computeDivisionConstants. We
590   // must add 1 later if n is negative to get the right answer in all cases.
591   masm.Asr(output64, output64, 32 + rmc.shiftAmount);
592 
593   // We'll subtract -1 instead of adding 1, because (n < 0 ? -1 : 0) can be
594   // computed with just a sign-extending shift of 31 bits.
595   if (ins->canBeNegativeDividend()) {
596     masm.Asr(const32, lhs32, 31);
597     masm.Sub(output32, output32, const32);
598   }
599 
600   // After this, output32 contains the correct truncated division result.
601   if (d < 0) {
602     masm.Neg(output32, output32);
603   }
604 
605   if (!ins->mir()->isTruncated()) {
606     // This is a division op. Multiply the obtained value by d to check if
607     // the correct answer is an integer. This cannot overflow, since |d| > 1.
608     masm.Mov(const32, d);
609     masm.Msub(const32, output32, const32, lhs32);
610     // bailout if (lhs - output * d != 0)
611     masm.Cmp(const32, wzr);
612     auto bailoutCond = Assembler::NonZero;
613 
614     // If lhs is zero and the divisor is negative, the answer should have
615     // been -0.
616     if (d < 0) {
617       // or bailout if (lhs == 0).
618       // ^                  ^
619       // |                  '-- masm.Ccmp(lhs32, lhs32, .., ..)
620       // '-- masm.Ccmp(.., .., vixl::ZFlag, ! bailoutCond)
621       masm.Ccmp(lhs32, wzr, vixl::ZFlag, Assembler::Zero);
622       bailoutCond = Assembler::Zero;
623     }
624 
625     // bailout if (lhs - output * d != 0) or (d < 0 && lhs == 0)
626     bailoutIf(bailoutCond, ins->snapshot());
627   }
628 }
629 
visitUDivConstantI(LUDivConstantI * ins)630 void CodeGenerator::visitUDivConstantI(LUDivConstantI* ins) {
631   const ARMRegister lhs32 = toWRegister(ins->numerator());
632   const ARMRegister lhs64 = toXRegister(ins->numerator());
633   const ARMRegister const32 = toWRegister(ins->temp());
634   const ARMRegister output32 = toWRegister(ins->output());
635   const ARMRegister output64 = toXRegister(ins->output());
636   uint32_t d = ins->denominator();
637 
638   if (d == 0) {
639     if (ins->mir()->isTruncated()) {
640       if (ins->mir()->trapOnError()) {
641         masm.wasmTrap(wasm::Trap::IntegerDivideByZero,
642                       ins->mir()->bytecodeOffset());
643       } else {
644         masm.Mov(output32, wzr);
645       }
646     } else {
647       bailout(ins->snapshot());
648     }
649     return;
650   }
651 
652   // The denominator isn't a power of 2 (see LDivPowTwoI).
653   MOZ_ASSERT((d & (d - 1)) != 0);
654 
655   ReciprocalMulConstants rmc = computeDivisionConstants(d, /* maxLog = */ 32);
656 
657   // We first compute (M * n) >> 32, where M = rmc.multiplier.
658   masm.Mov(const32, int32_t(rmc.multiplier));
659   masm.Umull(output64, const32, lhs32);
660   if (rmc.multiplier > UINT32_MAX) {
661     // M >= 2^32 and shift == 0 is impossible, as d >= 2 implies that
662     // ((M * n) >> (32 + shift)) >= n > floor(n/d) whenever n >= d,
663     // contradicting the proof of correctness in computeDivisionConstants.
664     MOZ_ASSERT(rmc.shiftAmount > 0);
665     MOZ_ASSERT(rmc.multiplier < (int64_t(1) << 33));
666 
667     // We actually compute (uint32_t(M) * n) instead, without the upper bit.
668     // Thus, (M * n) = (uint32_t(M) * n) + n << 32.
669     //
670     // ((uint32_t(M) * n) + n << 32) can overflow. Hacker's Delight explains a
671     // trick to avoid this overflow case, but we can avoid it by computing the
672     // addition on 64 bits registers.
673     //
674     // Compute ((uint32_t(M) * n) >> 32 + n)
675     masm.Add(output64, lhs64, Operand(output64, vixl::LSR, 32));
676 
677     // (M * n) >> (32 + shift) is the truncated division answer.
678     masm.Asr(output64, output64, rmc.shiftAmount);
679   } else {
680     // (M * n) >> (32 + shift) is the truncated division answer.
681     masm.Asr(output64, output64, 32 + rmc.shiftAmount);
682   }
683 
684   // We now have the truncated division value. We are checking whether the
685   // division resulted in an integer, we multiply the obtained value by d and
686   // check the remainder of the division.
687   if (!ins->mir()->isTruncated()) {
688     masm.Mov(const32, d);
689     masm.Msub(const32, output32, const32, lhs32);
690     // bailout if (lhs - output * d != 0)
691     masm.Cmp(const32, const32);
692     bailoutIf(Assembler::NonZero, ins->snapshot());
693   }
694 }
695 
modICommon(MMod * mir,Register lhs,Register rhs,Register output,LSnapshot * snapshot,Label & done)696 void CodeGeneratorARM64::modICommon(MMod* mir, Register lhs, Register rhs,
697                                     Register output, LSnapshot* snapshot,
698                                     Label& done) {
699   MOZ_CRASH("CodeGeneratorARM64::modICommon");
700 }
701 
visitModI(LModI * ins)702 void CodeGenerator::visitModI(LModI* ins) {
703   if (gen->compilingWasm()) {
704     MOZ_CRASH("visitModI while compilingWasm");
705   }
706 
707   MMod* mir = ins->mir();
708   ARMRegister lhs = toWRegister(ins->lhs());
709   ARMRegister rhs = toWRegister(ins->rhs());
710   ARMRegister output = toWRegister(ins->output());
711   Label done;
712 
713   if (mir->canBeDivideByZero() && !mir->isTruncated()) {
714     // Non-truncated division by zero produces a non-integer.
715     masm.Cmp(rhs, Operand(0));
716     bailoutIf(Assembler::Equal, ins->snapshot());
717   } else if (mir->canBeDivideByZero()) {
718     // Truncated division by zero yields integer zero.
719     masm.Mov(output, rhs);
720     masm.Cbz(rhs, &done);
721   }
722 
723   // Signed division.
724   masm.Sdiv(output, lhs, rhs);
725 
726   // Compute the remainder: output = lhs - (output * rhs).
727   masm.Msub(output, output, rhs, lhs);
728 
729   if (mir->canBeNegativeDividend() && !mir->isTruncated()) {
730     // If output == 0 and lhs < 0, then the result should be double -0.0.
731     // Note that this guard handles lhs == INT_MIN and rhs == -1:
732     //   output = INT_MIN - (INT_MIN / -1) * -1
733     //          = INT_MIN - INT_MIN
734     //          = 0
735     masm.Cbnz(output, &done);
736     bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
737   }
738 
739   if (done.used()) {
740     masm.bind(&done);
741   }
742 }
743 
visitModPowTwoI(LModPowTwoI * ins)744 void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
745   Register lhs = ToRegister(ins->getOperand(0));
746   ARMRegister lhsw = toWRegister(ins->getOperand(0));
747   ARMRegister outw = toWRegister(ins->output());
748 
749   int32_t shift = ins->shift();
750   bool canBeNegative =
751       !ins->mir()->isUnsigned() && ins->mir()->canBeNegativeDividend();
752 
753   Label negative;
754   if (canBeNegative) {
755     // Switch based on sign of the lhs.
756     // Positive numbers are just a bitmask.
757     masm.branchTest32(Assembler::Signed, lhs, lhs, &negative);
758   }
759 
760   masm.And(outw, lhsw, Operand((uint32_t(1) << shift) - 1));
761 
762   if (canBeNegative) {
763     Label done;
764     masm.jump(&done);
765 
766     // Negative numbers need a negate, bitmask, negate.
767     masm.bind(&negative);
768     masm.Neg(outw, Operand(lhsw));
769     masm.And(outw, outw, Operand((uint32_t(1) << shift) - 1));
770 
771     // Since a%b has the same sign as b, and a is negative in this branch,
772     // an answer of 0 means the correct result is actually -0. Bail out.
773     if (!ins->mir()->isTruncated()) {
774       masm.Negs(outw, Operand(outw));
775       bailoutIf(Assembler::Zero, ins->snapshot());
776     } else {
777       masm.Neg(outw, Operand(outw));
778     }
779 
780     masm.bind(&done);
781   }
782 }
783 
visitModMaskI(LModMaskI * ins)784 void CodeGenerator::visitModMaskI(LModMaskI* ins) {
785   MMod* mir = ins->mir();
786   int32_t shift = ins->shift();
787 
788   const Register src = ToRegister(ins->getOperand(0));
789   const Register dest = ToRegister(ins->getDef(0));
790   const Register hold = ToRegister(ins->getTemp(0));
791   const Register remain = ToRegister(ins->getTemp(1));
792 
793   const ARMRegister src32 = ARMRegister(src, 32);
794   const ARMRegister dest32 = ARMRegister(dest, 32);
795   const ARMRegister remain32 = ARMRegister(remain, 32);
796 
797   vixl::UseScratchRegisterScope temps(&masm.asVIXL());
798   const ARMRegister scratch32 = temps.AcquireW();
799   const Register scratch = scratch32.asUnsized();
800 
801   // We wish to compute x % (1<<y) - 1 for a known constant, y.
802   //
803   // 1. Let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit dividend as
804   // a number in base b, namely c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
805   //
806   // 2. Since both addition and multiplication commute with modulus:
807   //   x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
808   //    (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
809   //
810   // 3. Since b == C + 1, b % C == 1, and b^n % C == 1 the whole thing
811   // simplifies to: c_0 + c_1 + c_2 ... c_n % C
812   //
813   // Each c_n can easily be computed by a shift/bitextract, and the modulus
814   // can be maintained by simply subtracting by C whenever the number gets
815   // over C.
816   int32_t mask = (1 << shift) - 1;
817   Label loop;
818 
819   // Register 'hold' holds -1 if the value was negative, 1 otherwise.
820   // The remain reg holds the remaining bits that have not been processed.
821   // The scratch reg serves as a temporary location to store extracted bits.
822   // The dest reg is the accumulator, becoming final result.
823   //
824   // Move the whole value into the remain.
825   masm.Mov(remain32, src32);
826   // Zero out the dest.
827   masm.Mov(dest32, wzr);
828   // Set the hold appropriately.
829   {
830     Label negative;
831     masm.branch32(Assembler::Signed, remain, Imm32(0), &negative);
832     masm.move32(Imm32(1), hold);
833     masm.jump(&loop);
834 
835     masm.bind(&negative);
836     masm.move32(Imm32(-1), hold);
837     masm.neg32(remain);
838   }
839 
840   // Begin the main loop.
841   masm.bind(&loop);
842   {
843     // Extract the bottom bits into scratch.
844     masm.And(scratch32, remain32, Operand(mask));
845     // Add those bits to the accumulator.
846     masm.Add(dest32, dest32, scratch32);
847     // Do a trial subtraction. This functions as a cmp but remembers the result.
848     masm.Subs(scratch32, dest32, Operand(mask));
849     // If (sum - C) > 0, store sum - C back into sum, thus performing a modulus.
850     {
851       Label sumSigned;
852       masm.branch32(Assembler::Signed, scratch, scratch, &sumSigned);
853       masm.Mov(dest32, scratch32);
854       masm.bind(&sumSigned);
855     }
856     // Get rid of the bits that we extracted before.
857     masm.Lsr(remain32, remain32, shift);
858     // If the shift produced zero, finish, otherwise, continue in the loop.
859     masm.branchTest32(Assembler::NonZero, remain, remain, &loop);
860   }
861 
862   // Check the hold to see if we need to negate the result.
863   {
864     Label done;
865 
866     // If the hold was non-zero, negate the result to match JS expectations.
867     masm.branchTest32(Assembler::NotSigned, hold, hold, &done);
868     if (mir->canBeNegativeDividend() && !mir->isTruncated()) {
869       // Bail in case of negative zero hold.
870       bailoutTest32(Assembler::Zero, hold, hold, ins->snapshot());
871     }
872 
873     masm.neg32(dest);
874     masm.bind(&done);
875   }
876 }
877 
visitBitNotI(LBitNotI * ins)878 void CodeGenerator::visitBitNotI(LBitNotI* ins) {
879   const LAllocation* input = ins->getOperand(0);
880   const LDefinition* output = ins->getDef(0);
881   masm.Mvn(toWRegister(output), toWOperand(input));
882 }
883 
visitBitOpI(LBitOpI * ins)884 void CodeGenerator::visitBitOpI(LBitOpI* ins) {
885   const ARMRegister lhs = toWRegister(ins->getOperand(0));
886   const Operand rhs = toWOperand(ins->getOperand(1));
887   const ARMRegister dest = toWRegister(ins->getDef(0));
888 
889   switch (ins->bitop()) {
890     case JSOp::BitOr:
891       masm.Orr(dest, lhs, rhs);
892       break;
893     case JSOp::BitXor:
894       masm.Eor(dest, lhs, rhs);
895       break;
896     case JSOp::BitAnd:
897       masm.And(dest, lhs, rhs);
898       break;
899     default:
900       MOZ_CRASH("unexpected binary opcode");
901   }
902 }
903 
visitShiftI(LShiftI * ins)904 void CodeGenerator::visitShiftI(LShiftI* ins) {
905   const ARMRegister lhs = toWRegister(ins->lhs());
906   const LAllocation* rhs = ins->rhs();
907   const ARMRegister dest = toWRegister(ins->output());
908 
909   if (rhs->isConstant()) {
910     int32_t shift = ToInt32(rhs) & 0x1F;
911     switch (ins->bitop()) {
912       case JSOp::Lsh:
913         masm.Lsl(dest, lhs, shift);
914         break;
915       case JSOp::Rsh:
916         masm.Asr(dest, lhs, shift);
917         break;
918       case JSOp::Ursh:
919         if (shift) {
920           masm.Lsr(dest, lhs, shift);
921         } else if (ins->mir()->toUrsh()->fallible()) {
922           // x >>> 0 can overflow.
923           masm.Ands(dest, lhs, Operand(0xFFFFFFFF));
924           bailoutIf(Assembler::Signed, ins->snapshot());
925         } else {
926           masm.Mov(dest, lhs);
927         }
928         break;
929       default:
930         MOZ_CRASH("Unexpected shift op");
931     }
932   } else {
933     const ARMRegister rhsreg = toWRegister(rhs);
934     switch (ins->bitop()) {
935       case JSOp::Lsh:
936         masm.Lsl(dest, lhs, rhsreg);
937         break;
938       case JSOp::Rsh:
939         masm.Asr(dest, lhs, rhsreg);
940         break;
941       case JSOp::Ursh:
942         masm.Lsr(dest, lhs, rhsreg);
943         if (ins->mir()->toUrsh()->fallible()) {
944           /// x >>> 0 can overflow.
945           masm.Cmp(dest, Operand(0));
946           bailoutIf(Assembler::LessThan, ins->snapshot());
947         }
948         break;
949       default:
950         MOZ_CRASH("Unexpected shift op");
951     }
952   }
953 }
954 
visitUrshD(LUrshD * ins)955 void CodeGenerator::visitUrshD(LUrshD* ins) {
956   const ARMRegister lhs = toWRegister(ins->lhs());
957   const LAllocation* rhs = ins->rhs();
958   const FloatRegister out = ToFloatRegister(ins->output());
959 
960   const Register temp = ToRegister(ins->temp());
961   const ARMRegister temp32 = toWRegister(ins->temp());
962 
963   if (rhs->isConstant()) {
964     int32_t shift = ToInt32(rhs) & 0x1F;
965     if (shift) {
966       masm.Lsr(temp32, lhs, shift);
967       masm.convertUInt32ToDouble(temp, out);
968     } else {
969       masm.convertUInt32ToDouble(ToRegister(ins->lhs()), out);
970     }
971   } else {
972     masm.And(temp32, toWRegister(rhs), Operand(0x1F));
973     masm.Lsr(temp32, lhs, temp32);
974     masm.convertUInt32ToDouble(temp, out);
975   }
976 }
977 
visitPowHalfD(LPowHalfD * ins)978 void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
979   FloatRegister input = ToFloatRegister(ins->input());
980   FloatRegister output = ToFloatRegister(ins->output());
981 
982   ScratchDoubleScope scratch(masm);
983 
984   Label done, sqrt;
985 
986   if (!ins->mir()->operandIsNeverNegativeInfinity()) {
987     // Branch if not -Infinity.
988     masm.loadConstantDouble(NegativeInfinity<double>(), scratch);
989 
990     Assembler::DoubleCondition cond = Assembler::DoubleNotEqualOrUnordered;
991     if (ins->mir()->operandIsNeverNaN()) {
992       cond = Assembler::DoubleNotEqual;
993     }
994     masm.branchDouble(cond, input, scratch, &sqrt);
995 
996     // Math.pow(-Infinity, 0.5) == Infinity.
997     masm.zeroDouble(output);
998     masm.subDouble(scratch, output);
999     masm.jump(&done);
1000 
1001     masm.bind(&sqrt);
1002   }
1003 
1004   if (!ins->mir()->operandIsNeverNegativeZero()) {
1005     // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
1006     // Adding 0 converts any -0 to 0.
1007     masm.zeroDouble(scratch);
1008     masm.addDouble(input, scratch);
1009     masm.sqrtDouble(scratch, output);
1010   } else {
1011     masm.sqrtDouble(input, output);
1012   }
1013 
1014   masm.bind(&done);
1015 }
1016 
toMoveOperand(const LAllocation a) const1017 MoveOperand CodeGeneratorARM64::toMoveOperand(const LAllocation a) const {
1018   if (a.isGeneralReg()) {
1019     return MoveOperand(ToRegister(a));
1020   }
1021   if (a.isFloatReg()) {
1022     return MoveOperand(ToFloatRegister(a));
1023   }
1024   MoveOperand::Kind kind =
1025       a.isStackArea() ? MoveOperand::EFFECTIVE_ADDRESS : MoveOperand::MEMORY;
1026   return MoveOperand(ToAddress(a), kind);
1027 }
1028 
1029 class js::jit::OutOfLineTableSwitch
1030     : public OutOfLineCodeBase<CodeGeneratorARM64> {
1031   MTableSwitch* mir_;
1032   CodeLabel jumpLabel_;
1033 
accept(CodeGeneratorARM64 * codegen)1034   void accept(CodeGeneratorARM64* codegen) override {
1035     codegen->visitOutOfLineTableSwitch(this);
1036   }
1037 
1038  public:
OutOfLineTableSwitch(MTableSwitch * mir)1039   explicit OutOfLineTableSwitch(MTableSwitch* mir) : mir_(mir) {}
1040 
mir() const1041   MTableSwitch* mir() const { return mir_; }
1042 
jumpLabel()1043   CodeLabel* jumpLabel() { return &jumpLabel_; }
1044 };
1045 
visitOutOfLineTableSwitch(OutOfLineTableSwitch * ool)1046 void CodeGeneratorARM64::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool) {
1047   MTableSwitch* mir = ool->mir();
1048 
1049   // Prevent nop and pools sequences to appear in the jump table.
1050   AutoForbidPoolsAndNops afp(
1051       &masm, (mir->numCases() + 1) * (sizeof(void*) / vixl::kInstructionSize));
1052   masm.haltingAlign(sizeof(void*));
1053   masm.bind(ool->jumpLabel());
1054   masm.addCodeLabel(*ool->jumpLabel());
1055 
1056   for (size_t i = 0; i < mir->numCases(); i++) {
1057     LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
1058     Label* caseheader = caseblock->label();
1059     uint32_t caseoffset = caseheader->offset();
1060 
1061     // The entries of the jump table need to be absolute addresses,
1062     // and thus must be patched after codegen is finished.
1063     CodeLabel cl;
1064     masm.writeCodePointer(&cl);
1065     cl.target()->bind(caseoffset);
1066     masm.addCodeLabel(cl);
1067   }
1068 }
1069 
emitTableSwitchDispatch(MTableSwitch * mir,Register index,Register base)1070 void CodeGeneratorARM64::emitTableSwitchDispatch(MTableSwitch* mir,
1071                                                  Register index,
1072                                                  Register base) {
1073   Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
1074 
1075   // Let the lowest table entry be indexed at 0.
1076   if (mir->low() != 0) {
1077     masm.sub32(Imm32(mir->low()), index);
1078   }
1079 
1080   // Jump to the default case if input is out of range.
1081   int32_t cases = mir->numCases();
1082   masm.branch32(Assembler::AboveOrEqual, index, Imm32(cases), defaultcase);
1083 
1084   // Because the target code has not yet been generated, we cannot know the
1085   // instruction offsets for use as jump targets. Therefore we construct
1086   // an OutOfLineTableSwitch that winds up holding the jump table.
1087   //
1088   // Because the jump table is generated as part of out-of-line code,
1089   // it is generated after all the regular codegen, so the jump targets
1090   // are guaranteed to exist when generating the jump table.
1091   OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(mir);
1092   addOutOfLineCode(ool, mir);
1093 
1094   // Use the index to get the address of the jump target from the table.
1095   masm.mov(ool->jumpLabel(), base);
1096   BaseIndex pointer(base, index, ScalePointer);
1097 
1098   // Load the target from the jump table and branch to it.
1099   masm.branchToComputedAddress(pointer);
1100 }
1101 
visitMathD(LMathD * math)1102 void CodeGenerator::visitMathD(LMathD* math) {
1103   ARMFPRegister lhs(ToFloatRegister(math->lhs()), 64);
1104   ARMFPRegister rhs(ToFloatRegister(math->rhs()), 64);
1105   ARMFPRegister output(ToFloatRegister(math->output()), 64);
1106 
1107   switch (math->jsop()) {
1108     case JSOp::Add:
1109       masm.Fadd(output, lhs, rhs);
1110       break;
1111     case JSOp::Sub:
1112       masm.Fsub(output, lhs, rhs);
1113       break;
1114     case JSOp::Mul:
1115       masm.Fmul(output, lhs, rhs);
1116       break;
1117     case JSOp::Div:
1118       masm.Fdiv(output, lhs, rhs);
1119       break;
1120     default:
1121       MOZ_CRASH("unexpected opcode");
1122   }
1123 }
1124 
visitMathF(LMathF * math)1125 void CodeGenerator::visitMathF(LMathF* math) {
1126   ARMFPRegister lhs(ToFloatRegister(math->lhs()), 32);
1127   ARMFPRegister rhs(ToFloatRegister(math->rhs()), 32);
1128   ARMFPRegister output(ToFloatRegister(math->output()), 32);
1129 
1130   switch (math->jsop()) {
1131     case JSOp::Add:
1132       masm.Fadd(output, lhs, rhs);
1133       break;
1134     case JSOp::Sub:
1135       masm.Fsub(output, lhs, rhs);
1136       break;
1137     case JSOp::Mul:
1138       masm.Fmul(output, lhs, rhs);
1139       break;
1140     case JSOp::Div:
1141       masm.Fdiv(output, lhs, rhs);
1142       break;
1143     default:
1144       MOZ_CRASH("unexpected opcode");
1145   }
1146 }
1147 
visitTrunc(LTrunc * lir)1148 void CodeGenerator::visitTrunc(LTrunc* lir) {
1149   const FloatRegister input = ToFloatRegister(lir->input());
1150   const ARMFPRegister input64(input, 64);
1151   const Register output = ToRegister(lir->output());
1152   const ARMRegister output32(output, 32);
1153   const ARMRegister output64(output, 64);
1154 
1155   Label done, zeroCase;
1156 
1157   // Convert scalar to signed 32-bit fixed-point, rounding toward zero.
1158   // In the case of overflow, the output is saturated.
1159   // In the case of NaN and -0, the output is zero.
1160   masm.Fcvtzs(output32, input64);
1161 
1162   // If the output was zero, worry about special cases.
1163   masm.branch32(Assembler::Equal, output, Imm32(0), &zeroCase);
1164 
1165   // Bail on overflow cases.
1166   bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
1167   bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
1168 
1169   // If the output was non-zero and wasn't saturated, just return it.
1170   masm.jump(&done);
1171 
1172   // Handle the case of a zero output:
1173   // 1. The input may have been NaN, requiring a bail.
1174   // 2. The input may have been in (-1,-0], requiring a bail.
1175   {
1176     masm.bind(&zeroCase);
1177 
1178     // If input is a negative number that truncated to zero, the real
1179     // output should be the non-integer -0.
1180     // The use of "lt" instead of "lo" also catches unordered NaN input.
1181     masm.Fcmp(input64, 0.0);
1182     bailoutIf(vixl::lt, lir->snapshot());
1183 
1184     // Check explicitly for -0, bitwise.
1185     masm.Fmov(output64, input64);
1186     bailoutTestPtr(Assembler::Signed, output, output, lir->snapshot());
1187     masm.movePtr(ImmPtr(0), output);
1188   }
1189 
1190   masm.bind(&done);
1191 }
1192 
visitTruncF(LTruncF * lir)1193 void CodeGenerator::visitTruncF(LTruncF* lir) {
1194   const FloatRegister input = ToFloatRegister(lir->input());
1195   const ARMFPRegister input32(input, 32);
1196   const Register output = ToRegister(lir->output());
1197   const ARMRegister output32(output, 32);
1198 
1199   Label done, zeroCase;
1200 
1201   // Convert scalar to signed 32-bit fixed-point, rounding toward zero.
1202   // In the case of overflow, the output is saturated.
1203   // In the case of NaN and -0, the output is zero.
1204   masm.Fcvtzs(output32, input32);
1205 
1206   // If the output was zero, worry about special cases.
1207   masm.branch32(Assembler::Equal, output, Imm32(0), &zeroCase);
1208 
1209   // Bail on overflow cases.
1210   bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
1211   bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
1212 
1213   // If the output was non-zero and wasn't saturated, just return it.
1214   masm.jump(&done);
1215 
1216   // Handle the case of a zero output:
1217   // 1. The input may have been NaN, requiring a bail.
1218   // 2. The input may have been in (-1,-0], requiring a bail.
1219   {
1220     masm.bind(&zeroCase);
1221 
1222     // If input is a negative number that truncated to zero, the real
1223     // output should be the non-integer -0.
1224     // The use of "lt" instead of "lo" also catches unordered NaN input.
1225     masm.Fcmp(input32, 0.0f);
1226     bailoutIf(vixl::lt, lir->snapshot());
1227 
1228     // Check explicitly for -0, bitwise.
1229     masm.Fmov(output32, input32);
1230     bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
1231     masm.move32(Imm32(0), output);
1232   }
1233 
1234   masm.bind(&done);
1235 }
1236 
visitClzI(LClzI * lir)1237 void CodeGenerator::visitClzI(LClzI* lir) {
1238   ARMRegister input = toWRegister(lir->input());
1239   ARMRegister output = toWRegister(lir->output());
1240   masm.Clz(output, input);
1241 }
1242 
visitCtzI(LCtzI * lir)1243 void CodeGenerator::visitCtzI(LCtzI* lir) {
1244   Register input = ToRegister(lir->input());
1245   Register output = ToRegister(lir->output());
1246   masm.ctz32(input, output, /* knownNotZero = */ false);
1247 }
1248 
visitTruncateDToInt32(LTruncateDToInt32 * ins)1249 void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
1250   emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
1251                      ins->mir());
1252 }
1253 
visitTruncateFToInt32(LTruncateFToInt32 * ins)1254 void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
1255   emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
1256                       ins->mir());
1257 }
1258 
FromDepth(uint32_t frameDepth)1259 FrameSizeClass FrameSizeClass::FromDepth(uint32_t frameDepth) {
1260   return FrameSizeClass::None();
1261 }
1262 
ClassLimit()1263 FrameSizeClass FrameSizeClass::ClassLimit() { return FrameSizeClass(0); }
1264 
frameSize() const1265 uint32_t FrameSizeClass::frameSize() const {
1266   MOZ_CRASH("arm64 does not use frame size classes");
1267 }
1268 
ToValue(LInstruction * ins,size_t pos)1269 ValueOperand CodeGeneratorARM64::ToValue(LInstruction* ins, size_t pos) {
1270   return ValueOperand(ToRegister(ins->getOperand(pos)));
1271 }
1272 
ToTempValue(LInstruction * ins,size_t pos)1273 ValueOperand CodeGeneratorARM64::ToTempValue(LInstruction* ins, size_t pos) {
1274   MOZ_CRASH("CodeGeneratorARM64::ToTempValue");
1275 }
1276 
visitValue(LValue * value)1277 void CodeGenerator::visitValue(LValue* value) {
1278   ValueOperand result = ToOutValue(value);
1279   masm.moveValue(value->value(), result);
1280 }
1281 
visitBox(LBox * box)1282 void CodeGenerator::visitBox(LBox* box) {
1283   const LAllocation* in = box->getOperand(0);
1284   ValueOperand result = ToOutValue(box);
1285 
1286   masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result);
1287 }
1288 
visitUnbox(LUnbox * unbox)1289 void CodeGenerator::visitUnbox(LUnbox* unbox) {
1290   MUnbox* mir = unbox->mir();
1291 
1292   Register result = ToRegister(unbox->output());
1293 
1294   if (mir->fallible()) {
1295     const ValueOperand value = ToValue(unbox, LUnbox::Input);
1296     Label bail;
1297     switch (mir->type()) {
1298       case MIRType::Int32:
1299         masm.fallibleUnboxInt32(value, result, &bail);
1300         break;
1301       case MIRType::Boolean:
1302         masm.fallibleUnboxBoolean(value, result, &bail);
1303         break;
1304       case MIRType::Object:
1305         masm.fallibleUnboxObject(value, result, &bail);
1306         break;
1307       case MIRType::String:
1308         masm.fallibleUnboxString(value, result, &bail);
1309         break;
1310       case MIRType::Symbol:
1311         masm.fallibleUnboxSymbol(value, result, &bail);
1312         break;
1313       case MIRType::BigInt:
1314         masm.fallibleUnboxBigInt(value, result, &bail);
1315         break;
1316       default:
1317         MOZ_CRASH("Given MIRType cannot be unboxed.");
1318     }
1319     bailoutFrom(&bail, unbox->snapshot());
1320     return;
1321   }
1322 
1323   // Infallible unbox.
1324 
1325   ValueOperand input = ToValue(unbox, LUnbox::Input);
1326 
1327 #ifdef DEBUG
1328   // Assert the types match.
1329   JSValueTag tag = MIRTypeToTag(mir->type());
1330   Label ok;
1331   ScratchTagScope scratch(masm, input);
1332   masm.splitTagForTest(input, scratch);
1333   masm.cmpTag(scratch, ImmTag(tag));
1334   masm.B(&ok, Assembler::Condition::Equal);
1335   masm.assumeUnreachable("Infallible unbox type mismatch");
1336   masm.bind(&ok);
1337 #endif
1338 
1339   switch (mir->type()) {
1340     case MIRType::Int32:
1341       masm.unboxInt32(input, result);
1342       break;
1343     case MIRType::Boolean:
1344       masm.unboxBoolean(input, result);
1345       break;
1346     case MIRType::Object:
1347       masm.unboxObject(input, result);
1348       break;
1349     case MIRType::String:
1350       masm.unboxString(input, result);
1351       break;
1352     case MIRType::Symbol:
1353       masm.unboxSymbol(input, result);
1354       break;
1355     case MIRType::BigInt:
1356       masm.unboxBigInt(input, result);
1357       break;
1358     default:
1359       MOZ_CRASH("Given MIRType cannot be unboxed.");
1360   }
1361 }
1362 
visitDouble(LDouble * ins)1363 void CodeGenerator::visitDouble(LDouble* ins) {
1364   ARMFPRegister output(ToFloatRegister(ins->getDef(0)), 64);
1365   masm.Fmov(output, ins->getDouble());
1366 }
1367 
visitFloat32(LFloat32 * ins)1368 void CodeGenerator::visitFloat32(LFloat32* ins) {
1369   ARMFPRegister output(ToFloatRegister(ins->getDef(0)), 32);
1370   masm.Fmov(output, ins->getFloat());
1371 }
1372 
visitTestDAndBranch(LTestDAndBranch * test)1373 void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
1374   const LAllocation* opd = test->input();
1375   MBasicBlock* ifTrue = test->ifTrue();
1376   MBasicBlock* ifFalse = test->ifFalse();
1377 
1378   masm.Fcmp(ARMFPRegister(ToFloatRegister(opd), 64), 0.0);
1379 
1380   // If the compare set the 0 bit, then the result is definitely false.
1381   jumpToBlock(ifFalse, Assembler::Zero);
1382 
1383   // Overflow means one of the operands was NaN, which is also false.
1384   jumpToBlock(ifFalse, Assembler::Overflow);
1385   jumpToBlock(ifTrue);
1386 }
1387 
visitTestFAndBranch(LTestFAndBranch * test)1388 void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
1389   const LAllocation* opd = test->input();
1390   MBasicBlock* ifTrue = test->ifTrue();
1391   MBasicBlock* ifFalse = test->ifFalse();
1392 
1393   masm.Fcmp(ARMFPRegister(ToFloatRegister(opd), 32), 0.0);
1394 
1395   // If the compare set the 0 bit, then the result is definitely false.
1396   jumpToBlock(ifFalse, Assembler::Zero);
1397 
1398   // Overflow means one of the operands was NaN, which is also false.
1399   jumpToBlock(ifFalse, Assembler::Overflow);
1400   jumpToBlock(ifTrue);
1401 }
1402 
visitCompareD(LCompareD * comp)1403 void CodeGenerator::visitCompareD(LCompareD* comp) {
1404   const FloatRegister left = ToFloatRegister(comp->left());
1405   const FloatRegister right = ToFloatRegister(comp->right());
1406   ARMRegister output = toWRegister(comp->output());
1407   Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
1408 
1409   masm.compareDouble(cond, left, right);
1410   masm.cset(output, Assembler::ConditionFromDoubleCondition(cond));
1411 }
1412 
visitCompareF(LCompareF * comp)1413 void CodeGenerator::visitCompareF(LCompareF* comp) {
1414   const FloatRegister left = ToFloatRegister(comp->left());
1415   const FloatRegister right = ToFloatRegister(comp->right());
1416   ARMRegister output = toWRegister(comp->output());
1417   Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
1418 
1419   masm.compareFloat(cond, left, right);
1420   masm.cset(output, Assembler::ConditionFromDoubleCondition(cond));
1421 }
1422 
visitCompareDAndBranch(LCompareDAndBranch * comp)1423 void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
1424   const FloatRegister left = ToFloatRegister(comp->left());
1425   const FloatRegister right = ToFloatRegister(comp->right());
1426   Assembler::DoubleCondition doubleCond =
1427       JSOpToDoubleCondition(comp->cmpMir()->jsop());
1428   Assembler::Condition cond =
1429       Assembler::ConditionFromDoubleCondition(doubleCond);
1430 
1431   masm.compareDouble(doubleCond, left, right);
1432   emitBranch(cond, comp->ifTrue(), comp->ifFalse());
1433 }
1434 
visitCompareFAndBranch(LCompareFAndBranch * comp)1435 void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
1436   const FloatRegister left = ToFloatRegister(comp->left());
1437   const FloatRegister right = ToFloatRegister(comp->right());
1438   Assembler::DoubleCondition doubleCond =
1439       JSOpToDoubleCondition(comp->cmpMir()->jsop());
1440   Assembler::Condition cond =
1441       Assembler::ConditionFromDoubleCondition(doubleCond);
1442 
1443   masm.compareFloat(doubleCond, left, right);
1444   emitBranch(cond, comp->ifTrue(), comp->ifFalse());
1445 }
1446 
visitCompareB(LCompareB * lir)1447 void CodeGenerator::visitCompareB(LCompareB* lir) {
1448   MCompare* mir = lir->mir();
1449   const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
1450   const LAllocation* rhs = lir->rhs();
1451   const Register output = ToRegister(lir->output());
1452   const Assembler::Condition cond =
1453       JSOpToCondition(mir->compareType(), mir->jsop());
1454 
1455   vixl::UseScratchRegisterScope temps(&masm.asVIXL());
1456   const Register scratch = temps.AcquireX().asUnsized();
1457 
1458   MOZ_ASSERT(mir->jsop() == JSOp::StrictEq || mir->jsop() == JSOp::StrictNe);
1459 
1460   // Load boxed boolean into scratch.
1461   if (rhs->isConstant()) {
1462     masm.moveValue(rhs->toConstant()->toJSValue(), ValueOperand(scratch));
1463   } else {
1464     masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), scratch);
1465   }
1466 
1467   // Compare the entire Value.
1468   masm.cmpPtrSet(cond, lhs.valueReg(), scratch, output);
1469 }
1470 
visitCompareBAndBranch(LCompareBAndBranch * lir)1471 void CodeGenerator::visitCompareBAndBranch(LCompareBAndBranch* lir) {
1472   MCompare* mir = lir->cmpMir();
1473   const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
1474   const LAllocation* rhs = lir->rhs();
1475   const Assembler::Condition cond =
1476       JSOpToCondition(mir->compareType(), mir->jsop());
1477 
1478   vixl::UseScratchRegisterScope temps(&masm.asVIXL());
1479   const Register scratch = temps.AcquireX().asUnsized();
1480 
1481   MOZ_ASSERT(mir->jsop() == JSOp::StrictEq || mir->jsop() == JSOp::StrictNe);
1482 
1483   // Load boxed boolean into scratch.
1484   if (rhs->isConstant()) {
1485     masm.moveValue(rhs->toConstant()->toJSValue(), ValueOperand(scratch));
1486   } else {
1487     masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), scratch);
1488   }
1489 
1490   // Compare the entire Value.
1491   masm.cmpPtr(lhs.valueReg(), scratch);
1492   emitBranch(cond, lir->ifTrue(), lir->ifFalse());
1493 }
1494 
visitCompareBitwise(LCompareBitwise * lir)1495 void CodeGenerator::visitCompareBitwise(LCompareBitwise* lir) {
1496   MCompare* mir = lir->mir();
1497   Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
1498   const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput);
1499   const ValueOperand rhs = ToValue(lir, LCompareBitwise::RhsInput);
1500   const Register output = ToRegister(lir->output());
1501 
1502   MOZ_ASSERT(IsEqualityOp(mir->jsop()));
1503 
1504   masm.cmpPtrSet(cond, lhs.valueReg(), rhs.valueReg(), output);
1505 }
1506 
visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch * lir)1507 void CodeGenerator::visitCompareBitwiseAndBranch(
1508     LCompareBitwiseAndBranch* lir) {
1509   MCompare* mir = lir->cmpMir();
1510   Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
1511   const ValueOperand lhs = ToValue(lir, LCompareBitwiseAndBranch::LhsInput);
1512   const ValueOperand rhs = ToValue(lir, LCompareBitwiseAndBranch::RhsInput);
1513 
1514   MOZ_ASSERT(mir->jsop() == JSOp::Eq || mir->jsop() == JSOp::StrictEq ||
1515              mir->jsop() == JSOp::Ne || mir->jsop() == JSOp::StrictNe);
1516 
1517   masm.cmpPtr(lhs.valueReg(), rhs.valueReg());
1518   emitBranch(cond, lir->ifTrue(), lir->ifFalse());
1519 }
1520 
visitBitAndAndBranch(LBitAndAndBranch * baab)1521 void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* baab) {
1522   if (baab->right()->isConstant()) {
1523     masm.Tst(toWRegister(baab->left()), Operand(ToInt32(baab->right())));
1524   } else {
1525     masm.Tst(toWRegister(baab->left()), toWRegister(baab->right()));
1526   }
1527   emitBranch(baab->cond(), baab->ifTrue(), baab->ifFalse());
1528 }
1529 
1530 // See ../CodeGenerator.cpp for more information.
visitWasmRegisterResult(LWasmRegisterResult * lir)1531 void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {}
1532 
visitWasmUint32ToDouble(LWasmUint32ToDouble * lir)1533 void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
1534   masm.convertUInt32ToDouble(ToRegister(lir->input()),
1535                              ToFloatRegister(lir->output()));
1536 }
1537 
visitWasmUint32ToFloat32(LWasmUint32ToFloat32 * lir)1538 void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
1539   masm.convertUInt32ToFloat32(ToRegister(lir->input()),
1540                               ToFloatRegister(lir->output()));
1541 }
1542 
visitNotI(LNotI * ins)1543 void CodeGenerator::visitNotI(LNotI* ins) {
1544   ARMRegister input = toWRegister(ins->input());
1545   ARMRegister output = toWRegister(ins->output());
1546 
1547   masm.Cmp(input, ZeroRegister32);
1548   masm.Cset(output, Assembler::Zero);
1549 }
1550 
1551 //        NZCV
1552 // NAN -> 0011
1553 // ==  -> 0110
1554 // <   -> 1000
1555 // >   -> 0010
visitNotD(LNotD * ins)1556 void CodeGenerator::visitNotD(LNotD* ins) {
1557   ARMFPRegister input(ToFloatRegister(ins->input()), 64);
1558   ARMRegister output = toWRegister(ins->output());
1559 
1560   // Set output to 1 if input compares equal to 0.0, else 0.
1561   masm.Fcmp(input, 0.0);
1562   masm.Cset(output, Assembler::Equal);
1563 
1564   // Comparison with NaN sets V in the NZCV register.
1565   // If the input was NaN, output must now be zero, so it can be incremented.
1566   // The instruction is read: "output = if NoOverflow then output else 0+1".
1567   masm.Csinc(output, output, ZeroRegister32, Assembler::NoOverflow);
1568 }
1569 
visitNotF(LNotF * ins)1570 void CodeGenerator::visitNotF(LNotF* ins) {
1571   ARMFPRegister input(ToFloatRegister(ins->input()), 32);
1572   ARMRegister output = toWRegister(ins->output());
1573 
1574   // Set output to 1 input compares equal to 0.0, else 0.
1575   masm.Fcmp(input, 0.0);
1576   masm.Cset(output, Assembler::Equal);
1577 
1578   // Comparison with NaN sets V in the NZCV register.
1579   // If the input was NaN, output must now be zero, so it can be incremented.
1580   // The instruction is read: "output = if NoOverflow then output else 0+1".
1581   masm.Csinc(output, output, ZeroRegister32, Assembler::NoOverflow);
1582 }
1583 
storeElementTyped(const LAllocation * value,MIRType valueType,MIRType elementType,Register elements,const LAllocation * index)1584 void CodeGeneratorARM64::storeElementTyped(const LAllocation* value,
1585                                            MIRType valueType,
1586                                            MIRType elementType,
1587                                            Register elements,
1588                                            const LAllocation* index) {
1589   MOZ_CRASH("CodeGeneratorARM64::storeElementTyped");
1590 }
1591 
generateInvalidateEpilogue()1592 void CodeGeneratorARM64::generateInvalidateEpilogue() {
1593   // Ensure that there is enough space in the buffer for the OsiPoint patching
1594   // to occur. Otherwise, we could overwrite the invalidation epilogue.
1595   for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
1596     masm.nop();
1597   }
1598 
1599   masm.bind(&invalidate_);
1600 
1601   // Push the return address of the point that we bailout out onto the stack.
1602   masm.push(lr);
1603 
1604   // Push the Ion script onto the stack (when we determine what that pointer
1605   // is).
1606   invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
1607 
1608   // Jump to the invalidator which will replace the current frame.
1609   TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
1610   masm.jump(thunk);
1611 }
1612 
1613 template <class U>
getBase(U * mir)1614 Register getBase(U* mir) {
1615   switch (mir->base()) {
1616     case U::Heap:
1617       return HeapReg;
1618   }
1619   return InvalidReg;
1620 }
1621 
visitAsmJSLoadHeap(LAsmJSLoadHeap * ins)1622 void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
1623   MOZ_CRASH("visitAsmJSLoadHeap");
1624 }
1625 
visitAsmJSStoreHeap(LAsmJSStoreHeap * ins)1626 void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
1627   MOZ_CRASH("visitAsmJSStoreHeap");
1628 }
1629 
visitWasmCompareExchangeHeap(LWasmCompareExchangeHeap * ins)1630 void CodeGenerator::visitWasmCompareExchangeHeap(
1631     LWasmCompareExchangeHeap* ins) {
1632   MOZ_CRASH("visitWasmCompareExchangeHeap");
1633 }
1634 
visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap * ins)1635 void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
1636   MOZ_CRASH("visitWasmAtomicBinopHeap");
1637 }
1638 
visitWasmStackArg(LWasmStackArg * ins)1639 void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
1640   MOZ_CRASH("visitWasmStackArg");
1641 }
1642 
visitUDiv(LUDiv * ins)1643 void CodeGenerator::visitUDiv(LUDiv* ins) {
1644   MDiv* mir = ins->mir();
1645   Register lhs = ToRegister(ins->lhs());
1646   Register rhs = ToRegister(ins->rhs());
1647   Register output = ToRegister(ins->output());
1648   ARMRegister lhs32 = ARMRegister(lhs, 32);
1649   ARMRegister rhs32 = ARMRegister(rhs, 32);
1650   ARMRegister output32 = ARMRegister(output, 32);
1651 
1652   // Prevent divide by zero.
1653   if (mir->canBeDivideByZero()) {
1654     if (mir->isTruncated()) {
1655       if (mir->trapOnError()) {
1656         Label nonZero;
1657         masm.branchTest32(Assembler::NonZero, rhs, rhs, &nonZero);
1658         masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
1659         masm.bind(&nonZero);
1660       } else {
1661         // ARM64 UDIV instruction will return 0 when divided by 0.
1662         // No need for extra tests.
1663       }
1664     } else {
1665       bailoutTest32(Assembler::Zero, rhs, rhs, ins->snapshot());
1666     }
1667   }
1668 
1669   // Unsigned division.
1670   masm.Udiv(output32, lhs32, rhs32);
1671 
1672   // If the remainder is > 0, bailout since this must be a double.
1673   if (!mir->canTruncateRemainder()) {
1674     Register remainder = ToRegister(ins->remainder());
1675     ARMRegister remainder32 = ARMRegister(remainder, 32);
1676 
1677     // Compute the remainder: remainder = lhs - (output * rhs).
1678     masm.Msub(remainder32, output32, rhs32, lhs32);
1679 
1680     bailoutTest32(Assembler::NonZero, remainder, remainder, ins->snapshot());
1681   }
1682 
1683   // Unsigned div can return a value that's not a signed int32.
1684   // If our users aren't expecting that, bail.
1685   if (!mir->isTruncated()) {
1686     bailoutTest32(Assembler::Signed, output, output, ins->snapshot());
1687   }
1688 }
1689 
visitUMod(LUMod * ins)1690 void CodeGenerator::visitUMod(LUMod* ins) {
1691   MMod* mir = ins->mir();
1692   ARMRegister lhs = toWRegister(ins->lhs());
1693   ARMRegister rhs = toWRegister(ins->rhs());
1694   ARMRegister output = toWRegister(ins->output());
1695   Label done;
1696 
1697   if (mir->canBeDivideByZero() && !mir->isTruncated()) {
1698     // Non-truncated division by zero produces a non-integer.
1699     masm.Cmp(rhs, Operand(0));
1700     bailoutIf(Assembler::Equal, ins->snapshot());
1701   } else if (mir->canBeDivideByZero()) {
1702     // Truncated division by zero yields integer zero.
1703     masm.Mov(output, rhs);
1704     masm.Cbz(rhs, &done);
1705   }
1706 
1707   // Unsigned division.
1708   masm.Udiv(output, lhs, rhs);
1709 
1710   // Compute the remainder: output = lhs - (output * rhs).
1711   masm.Msub(output, output, rhs, lhs);
1712 
1713   if (!mir->isTruncated()) {
1714     // Bail if the output would be negative.
1715     //
1716     // LUMod inputs may be Uint32, so care is taken to ensure the result
1717     // is not unexpectedly signed.
1718     bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot());
1719   }
1720 
1721   if (done.used()) {
1722     masm.bind(&done);
1723   }
1724 }
1725 
visitEffectiveAddress(LEffectiveAddress * ins)1726 void CodeGenerator::visitEffectiveAddress(LEffectiveAddress* ins) {
1727   const MEffectiveAddress* mir = ins->mir();
1728   const ARMRegister base = toWRegister(ins->base());
1729   const ARMRegister index = toWRegister(ins->index());
1730   const ARMRegister output = toWRegister(ins->output());
1731 
1732   masm.Add(output, base, Operand(index, vixl::LSL, mir->scale()));
1733   masm.Add(output, output, Operand(mir->displacement()));
1734 }
1735 
visitNegI(LNegI * ins)1736 void CodeGenerator::visitNegI(LNegI* ins) {
1737   const ARMRegister input = toWRegister(ins->input());
1738   const ARMRegister output = toWRegister(ins->output());
1739   masm.Neg(output, input);
1740 }
1741 
visitNegD(LNegD * ins)1742 void CodeGenerator::visitNegD(LNegD* ins) {
1743   const ARMFPRegister input(ToFloatRegister(ins->input()), 64);
1744   const ARMFPRegister output(ToFloatRegister(ins->input()), 64);
1745   masm.Fneg(output, input);
1746 }
1747 
visitNegF(LNegF * ins)1748 void CodeGenerator::visitNegF(LNegF* ins) {
1749   const ARMFPRegister input(ToFloatRegister(ins->input()), 32);
1750   const ARMFPRegister output(ToFloatRegister(ins->input()), 32);
1751   masm.Fneg(output, input);
1752 }
1753 
visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement * lir)1754 void CodeGenerator::visitCompareExchangeTypedArrayElement(
1755     LCompareExchangeTypedArrayElement* lir) {
1756   Register elements = ToRegister(lir->elements());
1757   AnyRegister output = ToAnyRegister(lir->output());
1758   Register temp =
1759       lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
1760 
1761   Register oldval = ToRegister(lir->oldval());
1762   Register newval = ToRegister(lir->newval());
1763 
1764   Scalar::Type arrayType = lir->mir()->arrayType();
1765   size_t width = Scalar::byteSize(arrayType);
1766 
1767   if (lir->index()->isConstant()) {
1768     Address dest(elements, ToInt32(lir->index()) * width);
1769     masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
1770                            newval, temp, output);
1771   } else {
1772     BaseIndex dest(elements, ToRegister(lir->index()),
1773                    ScaleFromElemWidth(width));
1774     masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
1775                            newval, temp, output);
1776   }
1777 }
1778 
visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement * lir)1779 void CodeGenerator::visitAtomicExchangeTypedArrayElement(
1780     LAtomicExchangeTypedArrayElement* lir) {
1781   Register elements = ToRegister(lir->elements());
1782   AnyRegister output = ToAnyRegister(lir->output());
1783   Register temp =
1784       lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
1785 
1786   Register value = ToRegister(lir->value());
1787 
1788   Scalar::Type arrayType = lir->mir()->arrayType();
1789   size_t width = Scalar::byteSize(arrayType);
1790 
1791   if (lir->index()->isConstant()) {
1792     Address dest(elements, ToInt32(lir->index()) * width);
1793     masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp,
1794                           output);
1795   } else {
1796     BaseIndex dest(elements, ToRegister(lir->index()),
1797                    ScaleFromElemWidth(width));
1798     masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp,
1799                           output);
1800   }
1801 }
1802 
visitAddI64(LAddI64 *)1803 void CodeGenerator::visitAddI64(LAddI64*) { MOZ_CRASH("NYI"); }
1804 
visitClzI64(LClzI64 *)1805 void CodeGenerator::visitClzI64(LClzI64*) { MOZ_CRASH("NYI"); }
1806 
visitCtzI64(LCtzI64 *)1807 void CodeGenerator::visitCtzI64(LCtzI64*) { MOZ_CRASH("NYI"); }
1808 
visitMulI64(LMulI64 *)1809 void CodeGenerator::visitMulI64(LMulI64*) { MOZ_CRASH("NYI"); }
1810 
visitNotI64(LNotI64 *)1811 void CodeGenerator::visitNotI64(LNotI64*) { MOZ_CRASH("NYI"); }
1812 
visitSubI64(LSubI64 *)1813 void CodeGenerator::visitSubI64(LSubI64*) { MOZ_CRASH("NYI"); }
1814 
visitPopcntI(LPopcntI *)1815 void CodeGenerator::visitPopcntI(LPopcntI*) { MOZ_CRASH("NYI"); }
1816 
visitBitOpI64(LBitOpI64 *)1817 void CodeGenerator::visitBitOpI64(LBitOpI64*) { MOZ_CRASH("NYI"); }
1818 
visitShiftI64(LShiftI64 *)1819 void CodeGenerator::visitShiftI64(LShiftI64*) { MOZ_CRASH("NYI"); }
1820 
visitWasmHeapBase(LWasmHeapBase * ins)1821 void CodeGenerator::visitWasmHeapBase(LWasmHeapBase* ins) { MOZ_CRASH("NYI"); }
1822 
visitWasmLoad(LWasmLoad *)1823 void CodeGenerator::visitWasmLoad(LWasmLoad*) { MOZ_CRASH("NYI"); }
1824 
visitCopySignD(LCopySignD *)1825 void CodeGenerator::visitCopySignD(LCopySignD*) { MOZ_CRASH("NYI"); }
1826 
visitCopySignF(LCopySignF *)1827 void CodeGenerator::visitCopySignF(LCopySignF*) { MOZ_CRASH("NYI"); }
1828 
visitNearbyInt(LNearbyInt *)1829 void CodeGenerator::visitNearbyInt(LNearbyInt*) { MOZ_CRASH("NYI"); }
1830 
visitPopcntI64(LPopcntI64 *)1831 void CodeGenerator::visitPopcntI64(LPopcntI64*) { MOZ_CRASH("NYI"); }
1832 
visitRotateI64(LRotateI64 *)1833 void CodeGenerator::visitRotateI64(LRotateI64*) { MOZ_CRASH("NYI"); }
1834 
visitWasmStore(LWasmStore *)1835 void CodeGenerator::visitWasmStore(LWasmStore*) { MOZ_CRASH("NYI"); }
1836 
visitCompareI64(LCompareI64 *)1837 void CodeGenerator::visitCompareI64(LCompareI64*) { MOZ_CRASH("NYI"); }
1838 
visitNearbyIntF(LNearbyIntF *)1839 void CodeGenerator::visitNearbyIntF(LNearbyIntF*) { MOZ_CRASH("NYI"); }
1840 
visitWasmSelect(LWasmSelect *)1841 void CodeGenerator::visitWasmSelect(LWasmSelect*) { MOZ_CRASH("NYI"); }
1842 
visitWasmLoadI64(LWasmLoadI64 *)1843 void CodeGenerator::visitWasmLoadI64(LWasmLoadI64*) { MOZ_CRASH("NYI"); }
1844 
visitWasmStoreI64(LWasmStoreI64 *)1845 void CodeGenerator::visitWasmStoreI64(LWasmStoreI64*) { MOZ_CRASH("NYI"); }
1846 
visitMemoryBarrier(LMemoryBarrier * ins)1847 void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
1848   masm.memoryBarrier(ins->type());
1849 }
1850 
visitWasmAddOffset(LWasmAddOffset *)1851 void CodeGenerator::visitWasmAddOffset(LWasmAddOffset*) { MOZ_CRASH("NYI"); }
1852 
visitWasmSelectI64(LWasmSelectI64 *)1853 void CodeGenerator::visitWasmSelectI64(LWasmSelectI64*) { MOZ_CRASH("NYI"); }
1854 
visitSignExtendInt64(LSignExtendInt64 *)1855 void CodeGenerator::visitSignExtendInt64(LSignExtendInt64*) {
1856   MOZ_CRASH("NYI");
1857 }
1858 
visitWasmReinterpret(LWasmReinterpret *)1859 void CodeGenerator::visitWasmReinterpret(LWasmReinterpret*) {
1860   MOZ_CRASH("NYI");
1861 }
1862 
visitWasmStackArgI64(LWasmStackArgI64 *)1863 void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64*) {
1864   MOZ_CRASH("NYI");
1865 }
1866 
visitTestI64AndBranch(LTestI64AndBranch *)1867 void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch*) {
1868   MOZ_CRASH("NYI");
1869 }
1870 
visitWrapInt64ToInt32(LWrapInt64ToInt32 *)1871 void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32*) {
1872   MOZ_CRASH("NYI");
1873 }
1874 
visitExtendInt32ToInt64(LExtendInt32ToInt64 *)1875 void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64*) {
1876   MOZ_CRASH("NYI");
1877 }
1878 
visitCompareI64AndBranch(LCompareI64AndBranch *)1879 void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch*) {
1880   MOZ_CRASH("NYI");
1881 }
1882 
visitWasmTruncateToInt32(LWasmTruncateToInt32 *)1883 void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32*) {
1884   MOZ_CRASH("NYI");
1885 }
1886 
visitWasmReinterpretToI64(LWasmReinterpretToI64 *)1887 void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64*) {
1888   MOZ_CRASH("NYI");
1889 }
1890 
visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap *)1891 void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap*) {
1892   MOZ_CRASH("NYI");
1893 }
1894 
visitWasmReinterpretFromI64(LWasmReinterpretFromI64 *)1895 void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64*) {
1896   MOZ_CRASH("NYI");
1897 }
1898 
visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop * lir)1899 void CodeGenerator::visitAtomicTypedArrayElementBinop(
1900     LAtomicTypedArrayElementBinop* lir) {
1901   MOZ_ASSERT(lir->mir()->hasUses());
1902 
1903   AnyRegister output = ToAnyRegister(lir->output());
1904   Register elements = ToRegister(lir->elements());
1905   Register flagTemp = ToRegister(lir->temp1());
1906   Register outTemp =
1907       lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
1908   Register value = ToRegister(lir->value());
1909 
1910   Scalar::Type arrayType = lir->mir()->arrayType();
1911   size_t width = Scalar::byteSize(arrayType);
1912 
1913   if (lir->index()->isConstant()) {
1914     Address mem(elements, ToInt32(lir->index()) * width);
1915     masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
1916                          lir->mir()->operation(), value, mem, flagTemp, outTemp,
1917                          output);
1918   } else {
1919     BaseIndex mem(elements, ToRegister(lir->index()),
1920                   ScaleFromElemWidth(width));
1921     masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
1922                          lir->mir()->operation(), value, mem, flagTemp, outTemp,
1923                          output);
1924   }
1925 }
1926 
visitWasmAtomicBinopHeapForEffect(LWasmAtomicBinopHeapForEffect *)1927 void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
1928     LWasmAtomicBinopHeapForEffect*) {
1929   MOZ_CRASH("NYI");
1930 }
1931 
visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect *)1932 void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
1933     LAtomicTypedArrayElementBinopForEffect*) {
1934   MOZ_CRASH("NYI");
1935 }
1936