1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/arm/CodeGenerator-arm.h"
8 
9 #include "mozilla/MathAlgorithms.h"
10 
11 #include "jscntxt.h"
12 #include "jscompartment.h"
13 #include "jsnum.h"
14 
15 #include "jit/CodeGenerator.h"
16 #include "jit/JitCompartment.h"
17 #include "jit/JitFrames.h"
18 #include "jit/MIR.h"
19 #include "jit/MIRGraph.h"
20 #include "js/Conversions.h"
21 #include "vm/Shape.h"
22 #include "vm/TraceLogging.h"
23 
24 #include "jsscriptinlines.h"
25 
26 #include "jit/MacroAssembler-inl.h"
27 #include "jit/shared/CodeGenerator-shared-inl.h"
28 
29 using namespace js;
30 using namespace js::jit;
31 
32 using mozilla::FloorLog2;
33 using mozilla::NegativeInfinity;
34 using JS::GenericNaN;
35 using JS::ToInt32;
36 
37 // shared
CodeGeneratorARM(MIRGenerator * gen,LIRGraph * graph,MacroAssembler * masm)38 CodeGeneratorARM::CodeGeneratorARM(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
39   : CodeGeneratorShared(gen, graph, masm)
40 {
41 }
42 
43 void
emitBranch(Assembler::Condition cond,MBasicBlock * mirTrue,MBasicBlock * mirFalse)44 CodeGeneratorARM::emitBranch(Assembler::Condition cond, MBasicBlock* mirTrue, MBasicBlock* mirFalse)
45 {
46     if (isNextBlock(mirFalse->lir())) {
47         jumpToBlock(mirTrue, cond);
48     } else {
49         jumpToBlock(mirFalse, Assembler::InvertCondition(cond));
50         jumpToBlock(mirTrue);
51     }
52 }
53 
54 void
accept(CodeGeneratorARM * codegen)55 OutOfLineBailout::accept(CodeGeneratorARM* codegen)
56 {
57     codegen->visitOutOfLineBailout(this);
58 }
59 
60 void
visitTestIAndBranch(LTestIAndBranch * test)61 CodeGeneratorARM::visitTestIAndBranch(LTestIAndBranch* test)
62 {
63     const LAllocation* opd = test->getOperand(0);
64     MBasicBlock* ifTrue = test->ifTrue();
65     MBasicBlock* ifFalse = test->ifFalse();
66 
67     // Test the operand
68     masm.ma_cmp(ToRegister(opd), Imm32(0));
69 
70     if (isNextBlock(ifFalse->lir())) {
71         jumpToBlock(ifTrue, Assembler::NonZero);
72     } else if (isNextBlock(ifTrue->lir())) {
73         jumpToBlock(ifFalse, Assembler::Zero);
74     } else {
75         jumpToBlock(ifFalse, Assembler::Zero);
76         jumpToBlock(ifTrue);
77     }
78 }
79 
80 void
visitCompare(LCompare * comp)81 CodeGeneratorARM::visitCompare(LCompare* comp)
82 {
83     Assembler::Condition cond = JSOpToCondition(comp->mir()->compareType(), comp->jsop());
84     const LAllocation* left = comp->getOperand(0);
85     const LAllocation* right = comp->getOperand(1);
86     const LDefinition* def = comp->getDef(0);
87 
88     if (right->isConstant())
89         masm.ma_cmp(ToRegister(left), Imm32(ToInt32(right)));
90     else
91         masm.ma_cmp(ToRegister(left), ToOperand(right));
92     masm.ma_mov(Imm32(0), ToRegister(def));
93     masm.ma_mov(Imm32(1), ToRegister(def), LeaveCC, cond);
94 }
95 
96 void
visitCompareAndBranch(LCompareAndBranch * comp)97 CodeGeneratorARM::visitCompareAndBranch(LCompareAndBranch* comp)
98 {
99     Assembler::Condition cond = JSOpToCondition(comp->cmpMir()->compareType(), comp->jsop());
100     if (comp->right()->isConstant())
101         masm.ma_cmp(ToRegister(comp->left()), Imm32(ToInt32(comp->right())));
102     else
103         masm.ma_cmp(ToRegister(comp->left()), ToOperand(comp->right()));
104     emitBranch(cond, comp->ifTrue(), comp->ifFalse());
105 }
106 
107 bool
generateOutOfLineCode()108 CodeGeneratorARM::generateOutOfLineCode()
109 {
110     if (!CodeGeneratorShared::generateOutOfLineCode())
111         return false;
112 
113     if (deoptLabel_.used()) {
114         // All non-table-based bailouts will go here.
115         masm.bind(&deoptLabel_);
116 
117         // Push the frame size, so the handler can recover the IonScript.
118         masm.ma_mov(Imm32(frameSize()), lr);
119 
120         JitCode* handler = gen->jitRuntime()->getGenericBailoutHandler();
121         masm.branch(handler);
122     }
123 
124     return !masm.oom();
125 }
126 
127 void
bailoutIf(Assembler::Condition condition,LSnapshot * snapshot)128 CodeGeneratorARM::bailoutIf(Assembler::Condition condition, LSnapshot* snapshot)
129 {
130     encode(snapshot);
131 
132     // Though the assembler doesn't track all frame pushes, at least make sure
133     // the known value makes sense. We can't use bailout tables if the stack
134     // isn't properly aligned to the static frame size.
135     MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None(),
136                   frameClass_.frameSize() == masm.framePushed());
137 
138     if (assignBailoutId(snapshot)) {
139         uint8_t* bailoutTable = Assembler::BailoutTableStart(deoptTable_->raw());
140         uint8_t* code = bailoutTable + snapshot->bailoutId() * BAILOUT_TABLE_ENTRY_SIZE;
141         masm.ma_b(code, condition);
142         return;
143     }
144 
145     // We could not use a jump table, either because all bailout IDs were
146     // reserved, or a jump table is not optimal for this frame size or
147     // platform. Whatever, we will generate a lazy bailout.
148     InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
149     OutOfLineBailout* ool = new(alloc()) OutOfLineBailout(snapshot, masm.framePushed());
150 
151     // All bailout code is associated with the bytecodeSite of the block we are
152     // bailing out from.
153     addOutOfLineCode(ool, new(alloc()) BytecodeSite(tree, tree->script()->code()));
154 
155     masm.ma_b(ool->entry(), condition);
156 }
157 
158 void
bailoutFrom(Label * label,LSnapshot * snapshot)159 CodeGeneratorARM::bailoutFrom(Label* label, LSnapshot* snapshot)
160 {
161     if (masm.bailed())
162         return;
163 
164     MOZ_ASSERT_IF(!masm.oom(), label->used());
165     MOZ_ASSERT_IF(!masm.oom(), !label->bound());
166 
167     encode(snapshot);
168 
169     // Though the assembler doesn't track all frame pushes, at least make sure
170     // the known value makes sense. We can't use bailout tables if the stack
171     // isn't properly aligned to the static frame size.
172     MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None(),
173                   frameClass_.frameSize() == masm.framePushed());
174 
175     // On ARM we don't use a bailout table.
176     InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
177     OutOfLineBailout* ool = new(alloc()) OutOfLineBailout(snapshot, masm.framePushed());
178 
179     // All bailout code is associated with the bytecodeSite of the block we are
180     // bailing out from.
181     addOutOfLineCode(ool, new(alloc()) BytecodeSite(tree, tree->script()->code()));
182 
183     masm.retarget(label, ool->entry());
184 }
185 
186 void
bailout(LSnapshot * snapshot)187 CodeGeneratorARM::bailout(LSnapshot* snapshot)
188 {
189     Label label;
190     masm.ma_b(&label);
191     bailoutFrom(&label, snapshot);
192 }
193 
194 void
visitOutOfLineBailout(OutOfLineBailout * ool)195 CodeGeneratorARM::visitOutOfLineBailout(OutOfLineBailout* ool)
196 {
197     ScratchRegisterScope scratch(masm);
198     masm.ma_mov(Imm32(ool->snapshot()->snapshotOffset()), scratch);
199     masm.ma_push(scratch); // BailoutStack::padding_
200     masm.ma_push(scratch); // BailoutStack::snapshotOffset_
201     masm.ma_b(&deoptLabel_);
202 }
203 
204 void
visitMinMaxD(LMinMaxD * ins)205 CodeGeneratorARM::visitMinMaxD(LMinMaxD* ins)
206 {
207     FloatRegister first = ToFloatRegister(ins->first());
208     FloatRegister second = ToFloatRegister(ins->second());
209     FloatRegister output = ToFloatRegister(ins->output());
210 
211     MOZ_ASSERT(first == output);
212 
213     Assembler::Condition cond = ins->mir()->isMax()
214         ? Assembler::VFP_LessThanOrEqual
215         : Assembler::VFP_GreaterThanOrEqual;
216     Label nan, equal, returnSecond, done;
217 
218     masm.compareDouble(first, second);
219     // First or second is NaN, result is NaN.
220     masm.ma_b(&nan, Assembler::VFP_Unordered);
221     // Make sure we handle -0 and 0 right.
222     masm.ma_b(&equal, Assembler::VFP_Equal);
223     masm.ma_b(&returnSecond, cond);
224     masm.ma_b(&done);
225 
226     // Check for zero.
227     masm.bind(&equal);
228     masm.compareDouble(first, NoVFPRegister);
229     // First wasn't 0 or -0, so just return it.
230     masm.ma_b(&done, Assembler::VFP_NotEqualOrUnordered);
231     // So now both operands are either -0 or 0.
232     if (ins->mir()->isMax()) {
233         // -0 + -0 = -0 and -0 + 0 = 0.
234         masm.ma_vadd(second, first, first);
235     } else {
236         masm.ma_vneg(first, first);
237         masm.ma_vsub(first, second, first);
238         masm.ma_vneg(first, first);
239     }
240     masm.ma_b(&done);
241 
242     masm.bind(&nan);
243     masm.loadConstantDouble(GenericNaN(), output);
244     masm.ma_b(&done);
245 
246     masm.bind(&returnSecond);
247     masm.ma_vmov(second, output);
248 
249     masm.bind(&done);
250 }
251 
252 void
visitMinMaxF(LMinMaxF * ins)253 CodeGeneratorARM::visitMinMaxF(LMinMaxF* ins)
254 {
255     FloatRegister first = ToFloatRegister(ins->first());
256     FloatRegister second = ToFloatRegister(ins->second());
257     FloatRegister output = ToFloatRegister(ins->output());
258 
259     MOZ_ASSERT(first == output);
260 
261     Assembler::Condition cond = ins->mir()->isMax()
262         ? Assembler::VFP_LessThanOrEqual
263         : Assembler::VFP_GreaterThanOrEqual;
264     Label nan, equal, returnSecond, done;
265 
266     masm.compareFloat(first, second);
267     // First or second is NaN, result is NaN.
268     masm.ma_b(&nan, Assembler::VFP_Unordered);
269     // Make sure we handle -0 and 0 right.
270     masm.ma_b(&equal, Assembler::VFP_Equal);
271     masm.ma_b(&returnSecond, cond);
272     masm.ma_b(&done);
273 
274     // Check for zero.
275     masm.bind(&equal);
276     masm.compareFloat(first, NoVFPRegister);
277     // First wasn't 0 or -0, so just return it.
278     masm.ma_b(&done, Assembler::VFP_NotEqualOrUnordered);
279     // So now both operands are either -0 or 0.
280     if (ins->mir()->isMax()) {
281         // -0 + -0 = -0 and -0 + 0 = 0.
282         masm.ma_vadd_f32(second, first, first);
283     } else {
284         masm.ma_vneg_f32(first, first);
285         masm.ma_vsub_f32(first, second, first);
286         masm.ma_vneg_f32(first, first);
287     }
288     masm.ma_b(&done);
289 
290     masm.bind(&nan);
291     masm.loadConstantFloat32(GenericNaN(), output);
292     masm.ma_b(&done);
293 
294     masm.bind(&returnSecond);
295     masm.ma_vmov_f32(second, output);
296 
297     masm.bind(&done);
298 }
299 
300 void
visitAbsD(LAbsD * ins)301 CodeGeneratorARM::visitAbsD(LAbsD* ins)
302 {
303     FloatRegister input = ToFloatRegister(ins->input());
304     MOZ_ASSERT(input == ToFloatRegister(ins->output()));
305     masm.ma_vabs(input, input);
306 }
307 
308 void
visitAbsF(LAbsF * ins)309 CodeGeneratorARM::visitAbsF(LAbsF* ins)
310 {
311     FloatRegister input = ToFloatRegister(ins->input());
312     MOZ_ASSERT(input == ToFloatRegister(ins->output()));
313     masm.ma_vabs_f32(input, input);
314 }
315 
316 void
visitSqrtD(LSqrtD * ins)317 CodeGeneratorARM::visitSqrtD(LSqrtD* ins)
318 {
319     FloatRegister input = ToFloatRegister(ins->input());
320     FloatRegister output = ToFloatRegister(ins->output());
321     masm.ma_vsqrt(input, output);
322 }
323 
324 void
visitSqrtF(LSqrtF * ins)325 CodeGeneratorARM::visitSqrtF(LSqrtF* ins)
326 {
327     FloatRegister input = ToFloatRegister(ins->input());
328     FloatRegister output = ToFloatRegister(ins->output());
329     masm.ma_vsqrt_f32(input, output);
330 }
331 
332 void
visitAddI(LAddI * ins)333 CodeGeneratorARM::visitAddI(LAddI* ins)
334 {
335     const LAllocation* lhs = ins->getOperand(0);
336     const LAllocation* rhs = ins->getOperand(1);
337     const LDefinition* dest = ins->getDef(0);
338 
339     if (rhs->isConstant())
340         masm.ma_add(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCC);
341     else
342         masm.ma_add(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCC);
343 
344     if (ins->snapshot())
345         bailoutIf(Assembler::Overflow, ins->snapshot());
346 }
347 
348 void
visitSubI(LSubI * ins)349 CodeGeneratorARM::visitSubI(LSubI* ins)
350 {
351     const LAllocation* lhs = ins->getOperand(0);
352     const LAllocation* rhs = ins->getOperand(1);
353     const LDefinition* dest = ins->getDef(0);
354 
355     if (rhs->isConstant())
356         masm.ma_sub(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCC);
357     else
358         masm.ma_sub(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCC);
359 
360     if (ins->snapshot())
361         bailoutIf(Assembler::Overflow, ins->snapshot());
362 }
363 
364 void
visitMulI(LMulI * ins)365 CodeGeneratorARM::visitMulI(LMulI* ins)
366 {
367     const LAllocation* lhs = ins->getOperand(0);
368     const LAllocation* rhs = ins->getOperand(1);
369     const LDefinition* dest = ins->getDef(0);
370     MMul* mul = ins->mir();
371     MOZ_ASSERT_IF(mul->mode() == MMul::Integer, !mul->canBeNegativeZero() && !mul->canOverflow());
372 
373     if (rhs->isConstant()) {
374         // Bailout when this condition is met.
375         Assembler::Condition c = Assembler::Overflow;
376         // Bailout on -0.0
377         int32_t constant = ToInt32(rhs);
378         if (mul->canBeNegativeZero() && constant <= 0) {
379             Assembler::Condition bailoutCond = (constant == 0) ? Assembler::LessThan : Assembler::Equal;
380             masm.ma_cmp(ToRegister(lhs), Imm32(0));
381             bailoutIf(bailoutCond, ins->snapshot());
382         }
383         // TODO: move these to ma_mul.
384         switch (constant) {
385           case -1:
386             masm.ma_rsb(ToRegister(lhs), Imm32(0), ToRegister(dest), SetCC);
387             break;
388           case 0:
389             masm.ma_mov(Imm32(0), ToRegister(dest));
390             return; // Escape overflow check;
391           case 1:
392             // Nop
393             masm.ma_mov(ToRegister(lhs), ToRegister(dest));
394             return; // Escape overflow check;
395           case 2:
396             masm.ma_add(ToRegister(lhs), ToRegister(lhs), ToRegister(dest), SetCC);
397             // Overflow is handled later.
398             break;
399           default: {
400             bool handled = false;
401             if (constant > 0) {
402                 // Try shift and add sequences for a positive constant.
403                 if (!mul->canOverflow()) {
404                     // If it cannot overflow, we can do lots of optimizations.
405                     Register src = ToRegister(lhs);
406                     uint32_t shift = FloorLog2(constant);
407                     uint32_t rest = constant - (1 << shift);
408                     // See if the constant has one bit set, meaning it can be
409                     // encoded as a bitshift.
410                     if ((1 << shift) == constant) {
411                         masm.ma_lsl(Imm32(shift), src, ToRegister(dest));
412                         handled = true;
413                     } else {
414                         // If the constant cannot be encoded as (1 << C1), see
415                         // if it can be encoded as (1 << C1) | (1 << C2), which
416                         // can be computed using an add and a shift.
417                         uint32_t shift_rest = FloorLog2(rest);
418                         if ((1u << shift_rest) == rest) {
419                             masm.as_add(ToRegister(dest), src, lsl(src, shift-shift_rest));
420                             if (shift_rest != 0)
421                                 masm.ma_lsl(Imm32(shift_rest), ToRegister(dest), ToRegister(dest));
422                             handled = true;
423                         }
424                     }
425                 } else if (ToRegister(lhs) != ToRegister(dest)) {
426                     // To stay on the safe side, only optimize things that are a
427                     // power of 2.
428 
429                     uint32_t shift = FloorLog2(constant);
430                     if ((1 << shift) == constant) {
431                         // dest = lhs * pow(2,shift)
432                         masm.ma_lsl(Imm32(shift), ToRegister(lhs), ToRegister(dest));
433                         // At runtime, check (lhs == dest >> shift), if this
434                         // does not hold, some bits were lost due to overflow,
435                         // and the computation should be resumed as a double.
436                         masm.as_cmp(ToRegister(lhs), asr(ToRegister(dest), shift));
437                         c = Assembler::NotEqual;
438                         handled = true;
439                     }
440                 }
441             }
442 
443             if (!handled) {
444                 if (mul->canOverflow())
445                     c = masm.ma_check_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), c);
446                 else
447                     masm.ma_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest));
448             }
449           }
450         }
451         // Bailout on overflow.
452         if (mul->canOverflow())
453             bailoutIf(c, ins->snapshot());
454     } else {
455         Assembler::Condition c = Assembler::Overflow;
456 
457         // masm.imull(ToOperand(rhs), ToRegister(lhs));
458         if (mul->canOverflow())
459             c = masm.ma_check_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), c);
460         else
461             masm.ma_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest));
462 
463         // Bailout on overflow.
464         if (mul->canOverflow())
465             bailoutIf(c, ins->snapshot());
466 
467         if (mul->canBeNegativeZero()) {
468             Label done;
469             masm.ma_cmp(ToRegister(dest), Imm32(0));
470             masm.ma_b(&done, Assembler::NotEqual);
471 
472             // Result is -0 if lhs or rhs is negative.
473             masm.ma_cmn(ToRegister(lhs), ToRegister(rhs));
474             bailoutIf(Assembler::Signed, ins->snapshot());
475 
476             masm.bind(&done);
477         }
478     }
479 }
480 
481 void
divICommon(MDiv * mir,Register lhs,Register rhs,Register output,LSnapshot * snapshot,Label & done)482 CodeGeneratorARM::divICommon(MDiv* mir, Register lhs, Register rhs, Register output,
483                              LSnapshot* snapshot, Label& done)
484 {
485     if (mir->canBeNegativeOverflow()) {
486         // Handle INT32_MIN / -1;
487         // The integer division will give INT32_MIN, but we want -(double)INT32_MIN.
488 
489         // Sets EQ if lhs == INT32_MIN.
490         masm.ma_cmp(lhs, Imm32(INT32_MIN));
491         // If EQ (LHS == INT32_MIN), sets EQ if rhs == -1.
492         masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal);
493         if (mir->canTruncateOverflow()) {
494             // (-INT32_MIN)|0 = INT32_MIN
495             Label skip;
496             masm.ma_b(&skip, Assembler::NotEqual);
497             masm.ma_mov(Imm32(INT32_MIN), output);
498             masm.ma_b(&done);
499             masm.bind(&skip);
500         } else {
501             MOZ_ASSERT(mir->fallible());
502             bailoutIf(Assembler::Equal, snapshot);
503         }
504     }
505 
506     // Handle divide by zero.
507     if (mir->canBeDivideByZero()) {
508         masm.ma_cmp(rhs, Imm32(0));
509         if (mir->canTruncateInfinities()) {
510             // Infinity|0 == 0
511             Label skip;
512             masm.ma_b(&skip, Assembler::NotEqual);
513             masm.ma_mov(Imm32(0), output);
514             masm.ma_b(&done);
515             masm.bind(&skip);
516         } else {
517             MOZ_ASSERT(mir->fallible());
518             bailoutIf(Assembler::Equal, snapshot);
519         }
520     }
521 
522     // Handle negative 0.
523     if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
524         Label nonzero;
525         masm.ma_cmp(lhs, Imm32(0));
526         masm.ma_b(&nonzero, Assembler::NotEqual);
527         masm.ma_cmp(rhs, Imm32(0));
528         MOZ_ASSERT(mir->fallible());
529         bailoutIf(Assembler::LessThan, snapshot);
530         masm.bind(&nonzero);
531     }
532 }
533 
534 void
visitDivI(LDivI * ins)535 CodeGeneratorARM::visitDivI(LDivI* ins)
536 {
537     // Extract the registers from this instruction.
538     Register lhs = ToRegister(ins->lhs());
539     Register rhs = ToRegister(ins->rhs());
540     Register temp = ToRegister(ins->getTemp(0));
541     Register output = ToRegister(ins->output());
542     MDiv* mir = ins->mir();
543 
544     Label done;
545     divICommon(mir, lhs, rhs, output, ins->snapshot(), done);
546 
547     if (mir->canTruncateRemainder()) {
548         masm.ma_sdiv(lhs, rhs, output);
549     } else {
550         {
551             ScratchRegisterScope scratch(masm);
552             masm.ma_sdiv(lhs, rhs, temp);
553             masm.ma_mul(temp, rhs, scratch);
554             masm.ma_cmp(lhs, scratch);
555         }
556         bailoutIf(Assembler::NotEqual, ins->snapshot());
557         masm.ma_mov(temp, output);
558     }
559 
560     masm.bind(&done);
561 }
562 
563 extern "C" {
564     extern MOZ_EXPORT int64_t __aeabi_idivmod(int,int);
565     extern MOZ_EXPORT int64_t __aeabi_uidivmod(int,int);
566 }
567 
568 void
visitSoftDivI(LSoftDivI * ins)569 CodeGeneratorARM::visitSoftDivI(LSoftDivI* ins)
570 {
571     // Extract the registers from this instruction.
572     Register lhs = ToRegister(ins->lhs());
573     Register rhs = ToRegister(ins->rhs());
574     Register output = ToRegister(ins->output());
575     MDiv* mir = ins->mir();
576 
577     Label done;
578     divICommon(mir, lhs, rhs, output, ins->snapshot(), done);
579 
580     masm.setupAlignedABICall();
581     masm.passABIArg(lhs);
582     masm.passABIArg(rhs);
583     if (gen->compilingAsmJS())
584         masm.callWithABI(wasm::SymbolicAddress::aeabi_idivmod);
585     else
586         masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, __aeabi_idivmod));
587 
588     // idivmod returns the quotient in r0, and the remainder in r1.
589     if (!mir->canTruncateRemainder()) {
590         MOZ_ASSERT(mir->fallible());
591         masm.ma_cmp(r1, Imm32(0));
592         bailoutIf(Assembler::NonZero, ins->snapshot());
593     }
594 
595     masm.bind(&done);
596 }
597 
598 void
visitDivPowTwoI(LDivPowTwoI * ins)599 CodeGeneratorARM::visitDivPowTwoI(LDivPowTwoI* ins)
600 {
601     MDiv* mir = ins->mir();
602     Register lhs = ToRegister(ins->numerator());
603     Register output = ToRegister(ins->output());
604     int32_t shift = ins->shift();
605 
606     if (shift == 0) {
607         masm.ma_mov(lhs, output);
608         return;
609     }
610 
611     if (!mir->isTruncated()) {
612         // If the remainder is != 0, bailout since this must be a double.
613         {
614             // The bailout code also needs the scratch register.
615             // Here it is only used as a dummy target to set CC flags.
616             ScratchRegisterScope scratch(masm);
617             masm.as_mov(scratch, lsl(lhs, 32 - shift), SetCC);
618         }
619         bailoutIf(Assembler::NonZero, ins->snapshot());
620     }
621 
622     if (!mir->canBeNegativeDividend()) {
623         // Numerator is unsigned, so needs no adjusting. Do the shift.
624         masm.as_mov(output, asr(lhs, shift));
625         return;
626     }
627 
628     // Adjust the value so that shifting produces a correctly rounded result
629     // when the numerator is negative. See 10-1 "Signed Division by a Known
630     // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
631     ScratchRegisterScope scratch(masm);
632 
633     if (shift > 1) {
634         masm.as_mov(scratch, asr(lhs, 31));
635         masm.as_add(scratch, lhs, lsr(scratch, 32 - shift));
636     } else {
637         masm.as_add(scratch, lhs, lsr(lhs, 32 - shift));
638     }
639 
640     // Do the shift.
641     masm.as_mov(output, asr(scratch, shift));
642 }
643 
644 void
modICommon(MMod * mir,Register lhs,Register rhs,Register output,LSnapshot * snapshot,Label & done)645 CodeGeneratorARM::modICommon(MMod* mir, Register lhs, Register rhs, Register output,
646                              LSnapshot* snapshot, Label& done)
647 {
648     // 0/X (with X < 0) is bad because both of these values *should* be doubles,
649     // and the result should be -0.0, which cannot be represented in integers.
650     // X/0 is bad because it will give garbage (or abort), when it should give
651     // either \infty, -\infty or NAN.
652 
653     // Prevent 0 / X (with X < 0) and X / 0
654     // testing X / Y. Compare Y with 0.
655     // There are three cases: (Y < 0), (Y == 0) and (Y > 0).
656     // If (Y < 0), then we compare X with 0, and bail if X == 0.
657     // If (Y == 0), then we simply want to bail. Since this does not set the
658     // flags necessary for LT to trigger, we don't test X, and take the bailout
659     // because the EQ flag is set.
660     // If (Y > 0), we don't set EQ, and we don't trigger LT, so we don't take
661     // the bailout.
662     if (mir->canBeDivideByZero() || mir->canBeNegativeDividend()) {
663         masm.ma_cmp(rhs, Imm32(0));
664         masm.ma_cmp(lhs, Imm32(0), Assembler::LessThan);
665         if (mir->isTruncated()) {
666             // NaN|0 == 0 and (0 % -X)|0 == 0
667             Label skip;
668             masm.ma_b(&skip, Assembler::NotEqual);
669             masm.ma_mov(Imm32(0), output);
670             masm.ma_b(&done);
671             masm.bind(&skip);
672         } else {
673             MOZ_ASSERT(mir->fallible());
674             bailoutIf(Assembler::Equal, snapshot);
675         }
676     }
677 }
678 
679 void
visitModI(LModI * ins)680 CodeGeneratorARM::visitModI(LModI* ins)
681 {
682     Register lhs = ToRegister(ins->lhs());
683     Register rhs = ToRegister(ins->rhs());
684     Register output = ToRegister(ins->output());
685     Register callTemp = ToRegister(ins->callTemp());
686     MMod* mir = ins->mir();
687 
688     // Save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
689     masm.ma_mov(lhs, callTemp);
690 
691     Label done;
692     modICommon(mir, lhs, rhs, output, ins->snapshot(), done);
693 
694     masm.ma_smod(lhs, rhs, output);
695 
696     // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0.
697     if (mir->canBeNegativeDividend()) {
698         if (mir->isTruncated()) {
699             // -0.0|0 == 0
700         } else {
701             MOZ_ASSERT(mir->fallible());
702             // See if X < 0
703             masm.ma_cmp(output, Imm32(0));
704             masm.ma_b(&done, Assembler::NotEqual);
705             masm.ma_cmp(callTemp, Imm32(0));
706             bailoutIf(Assembler::Signed, ins->snapshot());
707         }
708     }
709 
710     masm.bind(&done);
711 }
712 
713 void
visitSoftModI(LSoftModI * ins)714 CodeGeneratorARM::visitSoftModI(LSoftModI* ins)
715 {
716     // Extract the registers from this instruction.
717     Register lhs = ToRegister(ins->lhs());
718     Register rhs = ToRegister(ins->rhs());
719     Register output = ToRegister(ins->output());
720     Register callTemp = ToRegister(ins->callTemp());
721     MMod* mir = ins->mir();
722     Label done;
723 
724     // Save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
725     MOZ_ASSERT(callTemp.code() > r3.code() && callTemp.code() < r12.code());
726     masm.ma_mov(lhs, callTemp);
727 
728     // Prevent INT_MIN % -1;
729     // The integer division will give INT_MIN, but we want -(double)INT_MIN.
730     if (mir->canBeNegativeDividend()) {
731         // Sets EQ if lhs == INT_MIN
732         masm.ma_cmp(lhs, Imm32(INT_MIN));
733         // If EQ (LHS == INT_MIN), sets EQ if rhs == -1
734         masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal);
735         if (mir->isTruncated()) {
736             // (INT_MIN % -1)|0 == 0
737             Label skip;
738             masm.ma_b(&skip, Assembler::NotEqual);
739             masm.ma_mov(Imm32(0), output);
740             masm.ma_b(&done);
741             masm.bind(&skip);
742         } else {
743             MOZ_ASSERT(mir->fallible());
744             bailoutIf(Assembler::Equal, ins->snapshot());
745         }
746     }
747 
748     modICommon(mir, lhs, rhs, output, ins->snapshot(), done);
749 
750     masm.setupAlignedABICall();
751     masm.passABIArg(lhs);
752     masm.passABIArg(rhs);
753     if (gen->compilingAsmJS())
754         masm.callWithABI(wasm::SymbolicAddress::aeabi_idivmod);
755     else
756         masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, __aeabi_idivmod));
757 
758     // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
759     if (mir->canBeNegativeDividend()) {
760         if (mir->isTruncated()) {
761             // -0.0|0 == 0
762         } else {
763             MOZ_ASSERT(mir->fallible());
764             // See if X < 0
765             masm.ma_cmp(r1, Imm32(0));
766             masm.ma_b(&done, Assembler::NotEqual);
767             masm.ma_cmp(callTemp, Imm32(0));
768             bailoutIf(Assembler::Signed, ins->snapshot());
769         }
770     }
771     masm.bind(&done);
772 }
773 
774 void
visitModPowTwoI(LModPowTwoI * ins)775 CodeGeneratorARM::visitModPowTwoI(LModPowTwoI* ins)
776 {
777     Register in = ToRegister(ins->getOperand(0));
778     Register out = ToRegister(ins->getDef(0));
779     MMod* mir = ins->mir();
780     Label fin;
781     // bug 739870, jbramley has a different sequence that may help with speed
782     // here.
783     masm.ma_mov(in, out, SetCC);
784     masm.ma_b(&fin, Assembler::Zero);
785     masm.ma_rsb(Imm32(0), out, LeaveCC, Assembler::Signed);
786     masm.ma_and(Imm32((1 << ins->shift()) - 1), out);
787     masm.ma_rsb(Imm32(0), out, SetCC, Assembler::Signed);
788     if (mir->canBeNegativeDividend()) {
789         if (!mir->isTruncated()) {
790             MOZ_ASSERT(mir->fallible());
791             bailoutIf(Assembler::Zero, ins->snapshot());
792         } else {
793             // -0|0 == 0
794         }
795     }
796     masm.bind(&fin);
797 }
798 
799 void
visitModMaskI(LModMaskI * ins)800 CodeGeneratorARM::visitModMaskI(LModMaskI* ins)
801 {
802     Register src = ToRegister(ins->getOperand(0));
803     Register dest = ToRegister(ins->getDef(0));
804     Register tmp1 = ToRegister(ins->getTemp(0));
805     Register tmp2 = ToRegister(ins->getTemp(1));
806     MMod* mir = ins->mir();
807     masm.ma_mod_mask(src, dest, tmp1, tmp2, ins->shift());
808     if (mir->canBeNegativeDividend()) {
809         if (!mir->isTruncated()) {
810             MOZ_ASSERT(mir->fallible());
811             bailoutIf(Assembler::Zero, ins->snapshot());
812         } else {
813             // -0|0 == 0
814         }
815     }
816 }
817 
818 void
visitBitNotI(LBitNotI * ins)819 CodeGeneratorARM::visitBitNotI(LBitNotI* ins)
820 {
821     const LAllocation* input = ins->getOperand(0);
822     const LDefinition* dest = ins->getDef(0);
823     // This will not actually be true on arm. We can not an imm8m in order to
824     // get a wider range of numbers
825     MOZ_ASSERT(!input->isConstant());
826 
827     masm.ma_mvn(ToRegister(input), ToRegister(dest));
828 }
829 
830 void
visitBitOpI(LBitOpI * ins)831 CodeGeneratorARM::visitBitOpI(LBitOpI* ins)
832 {
833     const LAllocation* lhs = ins->getOperand(0);
834     const LAllocation* rhs = ins->getOperand(1);
835     const LDefinition* dest = ins->getDef(0);
836     // All of these bitops should be either imm32's, or integer registers.
837     switch (ins->bitop()) {
838       case JSOP_BITOR:
839         if (rhs->isConstant())
840             masm.ma_orr(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest));
841         else
842             masm.ma_orr(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
843         break;
844       case JSOP_BITXOR:
845         if (rhs->isConstant())
846             masm.ma_eor(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest));
847         else
848             masm.ma_eor(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
849         break;
850       case JSOP_BITAND:
851         if (rhs->isConstant())
852             masm.ma_and(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest));
853         else
854             masm.ma_and(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
855         break;
856       default:
857         MOZ_CRASH("unexpected binary opcode");
858     }
859 }
860 
861 void
visitShiftI(LShiftI * ins)862 CodeGeneratorARM::visitShiftI(LShiftI* ins)
863 {
864     Register lhs = ToRegister(ins->lhs());
865     const LAllocation* rhs = ins->rhs();
866     Register dest = ToRegister(ins->output());
867 
868     if (rhs->isConstant()) {
869         int32_t shift = ToInt32(rhs) & 0x1F;
870         switch (ins->bitop()) {
871           case JSOP_LSH:
872             if (shift)
873                 masm.ma_lsl(Imm32(shift), lhs, dest);
874             else
875                 masm.ma_mov(lhs, dest);
876             break;
877           case JSOP_RSH:
878             if (shift)
879                 masm.ma_asr(Imm32(shift), lhs, dest);
880             else
881                 masm.ma_mov(lhs, dest);
882             break;
883           case JSOP_URSH:
884             if (shift) {
885                 masm.ma_lsr(Imm32(shift), lhs, dest);
886             } else {
887                 // x >>> 0 can overflow.
888                 masm.ma_mov(lhs, dest);
889                 if (ins->mir()->toUrsh()->fallible()) {
890                     masm.ma_cmp(dest, Imm32(0));
891                     bailoutIf(Assembler::LessThan, ins->snapshot());
892                 }
893             }
894             break;
895           default:
896             MOZ_CRASH("Unexpected shift op");
897         }
898     } else {
899         // The shift amounts should be AND'ed into the 0-31 range since arm
900         // shifts by the lower byte of the register (it will attempt to shift by
901         // 250 if you ask it to).
902         masm.ma_and(Imm32(0x1F), ToRegister(rhs), dest);
903 
904         switch (ins->bitop()) {
905           case JSOP_LSH:
906             masm.ma_lsl(dest, lhs, dest);
907             break;
908           case JSOP_RSH:
909             masm.ma_asr(dest, lhs, dest);
910             break;
911           case JSOP_URSH:
912             masm.ma_lsr(dest, lhs, dest);
913             if (ins->mir()->toUrsh()->fallible()) {
914                 // x >>> 0 can overflow.
915                 masm.ma_cmp(dest, Imm32(0));
916                 bailoutIf(Assembler::LessThan, ins->snapshot());
917             }
918             break;
919           default:
920             MOZ_CRASH("Unexpected shift op");
921         }
922     }
923 }
924 
925 void
visitUrshD(LUrshD * ins)926 CodeGeneratorARM::visitUrshD(LUrshD* ins)
927 {
928     Register lhs = ToRegister(ins->lhs());
929     Register temp = ToRegister(ins->temp());
930 
931     const LAllocation* rhs = ins->rhs();
932     FloatRegister out = ToFloatRegister(ins->output());
933 
934     if (rhs->isConstant()) {
935         int32_t shift = ToInt32(rhs) & 0x1F;
936         if (shift)
937             masm.ma_lsr(Imm32(shift), lhs, temp);
938         else
939             masm.ma_mov(lhs, temp);
940     } else {
941         masm.ma_and(Imm32(0x1F), ToRegister(rhs), temp);
942         masm.ma_lsr(temp, lhs, temp);
943     }
944 
945     masm.convertUInt32ToDouble(temp, out);
946 }
947 
948 void
visitClzI(LClzI * ins)949 CodeGeneratorARM::visitClzI(LClzI* ins)
950 {
951     Register input = ToRegister(ins->input());
952     Register output = ToRegister(ins->output());
953 
954     masm.ma_clz(input, output);
955 }
956 
957 void
visitPowHalfD(LPowHalfD * ins)958 CodeGeneratorARM::visitPowHalfD(LPowHalfD* ins)
959 {
960     FloatRegister input = ToFloatRegister(ins->input());
961     FloatRegister output = ToFloatRegister(ins->output());
962     ScratchDoubleScope scratch(masm);
963 
964     Label done;
965 
966     // Masm.pow(-Infinity, 0.5) == Infinity.
967     masm.ma_vimm(NegativeInfinity<double>(), scratch);
968     masm.compareDouble(input, scratch);
969     masm.ma_vneg(scratch, output, Assembler::Equal);
970     masm.ma_b(&done, Assembler::Equal);
971 
972     // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
973     // Adding 0 converts any -0 to 0.
974     masm.ma_vimm(0.0, scratch);
975     masm.ma_vadd(scratch, input, output);
976     masm.ma_vsqrt(output, output);
977 
978     masm.bind(&done);
979 }
980 
981 MoveOperand
toMoveOperand(LAllocation a) const982 CodeGeneratorARM::toMoveOperand(LAllocation a) const
983 {
984     if (a.isGeneralReg())
985         return MoveOperand(ToRegister(a));
986     if (a.isFloatReg())
987         return MoveOperand(ToFloatRegister(a));
988     int32_t offset = ToStackOffset(a);
989     MOZ_ASSERT((offset & 3) == 0);
990     return MoveOperand(StackPointer, offset);
991 }
992 
993 class js::jit::OutOfLineTableSwitch : public OutOfLineCodeBase<CodeGeneratorARM>
994 {
995     MTableSwitch* mir_;
996     Vector<CodeLabel, 8, JitAllocPolicy> codeLabels_;
997 
accept(CodeGeneratorARM * codegen)998     void accept(CodeGeneratorARM* codegen) {
999         codegen->visitOutOfLineTableSwitch(this);
1000     }
1001 
1002   public:
OutOfLineTableSwitch(TempAllocator & alloc,MTableSwitch * mir)1003     OutOfLineTableSwitch(TempAllocator& alloc, MTableSwitch* mir)
1004       : mir_(mir),
1005         codeLabels_(alloc)
1006     {}
1007 
mir() const1008     MTableSwitch* mir() const {
1009         return mir_;
1010     }
1011 
addCodeLabel(CodeLabel label)1012     bool addCodeLabel(CodeLabel label) {
1013         return codeLabels_.append(label);
1014     }
codeLabel(unsigned i)1015     CodeLabel codeLabel(unsigned i) {
1016         return codeLabels_[i];
1017     }
1018 };
1019 
1020 void
visitOutOfLineTableSwitch(OutOfLineTableSwitch * ool)1021 CodeGeneratorARM::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool)
1022 {
1023     MTableSwitch* mir = ool->mir();
1024 
1025     size_t numCases = mir->numCases();
1026     for (size_t i = 0; i < numCases; i++) {
1027         LBlock* caseblock = skipTrivialBlocks(mir->getCase(numCases - 1 - i))->lir();
1028         Label* caseheader = caseblock->label();
1029         uint32_t caseoffset = caseheader->offset();
1030 
1031         // The entries of the jump table need to be absolute addresses and thus
1032         // must be patched after codegen is finished.
1033         CodeLabel cl = ool->codeLabel(i);
1034         cl.target()->bind(caseoffset);
1035         masm.addCodeLabel(cl);
1036     }
1037 }
1038 
1039 void
emitTableSwitchDispatch(MTableSwitch * mir,Register index,Register base)1040 CodeGeneratorARM::emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base)
1041 {
1042     // The code generated by this is utter hax.
1043     // The end result looks something like:
1044     // SUBS index, input, #base
1045     // RSBSPL index, index, #max
1046     // LDRPL pc, pc, index lsl 2
1047     // B default
1048 
1049     // If the range of targets in N through M, we first subtract off the lowest
1050     // case (N), which both shifts the arguments into the range 0 to (M - N)
1051     // with and sets the MInus flag if the argument was out of range on the low
1052     // end.
1053 
1054     // Then we a reverse subtract with the size of the jump table, which will
1055     // reverse the order of range (It is size through 0, rather than 0 through
1056     // size). The main purpose of this is that we set the same flag as the lower
1057     // bound check for the upper bound check. Lastly, we do this conditionally
1058     // on the previous check succeeding.
1059 
1060     // Then we conditionally load the pc offset by the (reversed) index (times
1061     // the address size) into the pc, which branches to the correct case. NOTE:
1062     // when we go to read the pc, the value that we get back is the pc of the
1063     // current instruction *PLUS 8*. This means that ldr foo, [pc, +0] reads
1064     // $pc+8. In other words, there is an empty word after the branch into the
1065     // switch table before the table actually starts. Since the only other
1066     // unhandled case is the default case (both out of range high and out of
1067     // range low) I then insert a branch to default case into the extra slot,
1068     // which ensures we don't attempt to execute the address table.
1069     Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
1070 
1071     int32_t cases = mir->numCases();
1072     // Lower value with low value.
1073     masm.ma_sub(index, Imm32(mir->low()), index, SetCC);
1074     masm.ma_rsb(index, Imm32(cases - 1), index, SetCC, Assembler::NotSigned);
1075     // Inhibit pools within the following sequence because we are indexing into
1076     // a pc relative table. The region will have one instruction for ma_ldr, one
1077     // for ma_b, and each table case takes one word.
1078     AutoForbidPools afp(&masm, 1 + 1 + cases);
1079     masm.ma_ldr(DTRAddr(pc, DtrRegImmShift(index, LSL, 2)), pc, Offset, Assembler::NotSigned);
1080     masm.ma_b(defaultcase);
1081 
1082     // To fill in the CodeLabels for the case entries, we need to first generate
1083     // the case entries (we don't yet know their offsets in the instruction
1084     // stream).
1085     OutOfLineTableSwitch* ool = new(alloc()) OutOfLineTableSwitch(alloc(), mir);
1086     for (int32_t i = 0; i < cases; i++) {
1087         CodeLabel cl;
1088         masm.writeCodePointer(cl.patchAt());
1089         masm.propagateOOM(ool->addCodeLabel(cl));
1090     }
1091     addOutOfLineCode(ool, mir);
1092 }
1093 
1094 void
visitMathD(LMathD * math)1095 CodeGeneratorARM::visitMathD(LMathD* math)
1096 {
1097     const LAllocation* src1 = math->getOperand(0);
1098     const LAllocation* src2 = math->getOperand(1);
1099     const LDefinition* output = math->getDef(0);
1100 
1101     switch (math->jsop()) {
1102       case JSOP_ADD:
1103         masm.ma_vadd(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
1104         break;
1105       case JSOP_SUB:
1106         masm.ma_vsub(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
1107         break;
1108       case JSOP_MUL:
1109         masm.ma_vmul(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
1110         break;
1111       case JSOP_DIV:
1112         masm.ma_vdiv(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
1113         break;
1114       default:
1115         MOZ_CRASH("unexpected opcode");
1116     }
1117 }
1118 
1119 void
visitMathF(LMathF * math)1120 CodeGeneratorARM::visitMathF(LMathF* math)
1121 {
1122     const LAllocation* src1 = math->getOperand(0);
1123     const LAllocation* src2 = math->getOperand(1);
1124     const LDefinition* output = math->getDef(0);
1125 
1126     switch (math->jsop()) {
1127       case JSOP_ADD:
1128         masm.ma_vadd_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
1129         break;
1130       case JSOP_SUB:
1131         masm.ma_vsub_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
1132         break;
1133       case JSOP_MUL:
1134         masm.ma_vmul_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
1135         break;
1136       case JSOP_DIV:
1137         masm.ma_vdiv_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
1138         break;
1139       default:
1140         MOZ_CRASH("unexpected opcode");
1141     }
1142 }
1143 
1144 void
visitFloor(LFloor * lir)1145 CodeGeneratorARM::visitFloor(LFloor* lir)
1146 {
1147     FloatRegister input = ToFloatRegister(lir->input());
1148     Register output = ToRegister(lir->output());
1149     Label bail;
1150     masm.floor(input, output, &bail);
1151     bailoutFrom(&bail, lir->snapshot());
1152 }
1153 
1154 void
visitFloorF(LFloorF * lir)1155 CodeGeneratorARM::visitFloorF(LFloorF* lir)
1156 {
1157     FloatRegister input = ToFloatRegister(lir->input());
1158     Register output = ToRegister(lir->output());
1159     Label bail;
1160     masm.floorf(input, output, &bail);
1161     bailoutFrom(&bail, lir->snapshot());
1162 }
1163 
1164 void
visitCeil(LCeil * lir)1165 CodeGeneratorARM::visitCeil(LCeil* lir)
1166 {
1167     FloatRegister input = ToFloatRegister(lir->input());
1168     Register output = ToRegister(lir->output());
1169     Label bail;
1170     masm.ceil(input, output, &bail);
1171     bailoutFrom(&bail, lir->snapshot());
1172 }
1173 
1174 void
visitCeilF(LCeilF * lir)1175 CodeGeneratorARM::visitCeilF(LCeilF* lir)
1176 {
1177     FloatRegister input = ToFloatRegister(lir->input());
1178     Register output = ToRegister(lir->output());
1179     Label bail;
1180     masm.ceilf(input, output, &bail);
1181     bailoutFrom(&bail, lir->snapshot());
1182 }
1183 
1184 void
visitRound(LRound * lir)1185 CodeGeneratorARM::visitRound(LRound* lir)
1186 {
1187     FloatRegister input = ToFloatRegister(lir->input());
1188     Register output = ToRegister(lir->output());
1189     FloatRegister tmp = ToFloatRegister(lir->temp());
1190     Label bail;
1191     // Output is either correct, or clamped. All -0 cases have been translated
1192     // to a clamped case.
1193     masm.round(input, output, &bail, tmp);
1194     bailoutFrom(&bail, lir->snapshot());
1195 }
1196 
1197 void
visitRoundF(LRoundF * lir)1198 CodeGeneratorARM::visitRoundF(LRoundF* lir)
1199 {
1200     FloatRegister input = ToFloatRegister(lir->input());
1201     Register output = ToRegister(lir->output());
1202     FloatRegister tmp = ToFloatRegister(lir->temp());
1203     Label bail;
1204     // Output is either correct, or clamped. All -0 cases have been translated
1205     // to a clamped case.
1206     masm.roundf(input, output, &bail, tmp);
1207     bailoutFrom(&bail, lir->snapshot());
1208 }
1209 
1210 void
emitRoundDouble(FloatRegister src,Register dest,Label * fail)1211 CodeGeneratorARM::emitRoundDouble(FloatRegister src, Register dest, Label* fail)
1212 {
1213     ScratchDoubleScope scratch(masm);
1214 
1215     masm.ma_vcvt_F64_I32(src, scratch);
1216     masm.ma_vxfer(scratch, dest);
1217     masm.ma_cmp(dest, Imm32(0x7fffffff));
1218     masm.ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual);
1219     masm.ma_b(fail, Assembler::Equal);
1220 }
1221 
1222 void
visitTruncateDToInt32(LTruncateDToInt32 * ins)1223 CodeGeneratorARM::visitTruncateDToInt32(LTruncateDToInt32* ins)
1224 {
1225     emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()), ins->mir());
1226 }
1227 
1228 void
visitTruncateFToInt32(LTruncateFToInt32 * ins)1229 CodeGeneratorARM::visitTruncateFToInt32(LTruncateFToInt32* ins)
1230 {
1231     emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()), ins->mir());
1232 }
1233 
1234 static const uint32_t FrameSizes[] = { 128, 256, 512, 1024 };
1235 
1236 FrameSizeClass
FromDepth(uint32_t frameDepth)1237 FrameSizeClass::FromDepth(uint32_t frameDepth)
1238 {
1239     for (uint32_t i = 0; i < JS_ARRAY_LENGTH(FrameSizes); i++) {
1240         if (frameDepth < FrameSizes[i])
1241             return FrameSizeClass(i);
1242     }
1243 
1244     return FrameSizeClass::None();
1245 }
1246 
1247 FrameSizeClass
ClassLimit()1248 FrameSizeClass::ClassLimit()
1249 {
1250     return FrameSizeClass(JS_ARRAY_LENGTH(FrameSizes));
1251 }
1252 
1253 uint32_t
frameSize() const1254 FrameSizeClass::frameSize() const
1255 {
1256     MOZ_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID);
1257     MOZ_ASSERT(class_ < JS_ARRAY_LENGTH(FrameSizes));
1258 
1259     return FrameSizes[class_];
1260 }
1261 
1262 ValueOperand
ToValue(LInstruction * ins,size_t pos)1263 CodeGeneratorARM::ToValue(LInstruction* ins, size_t pos)
1264 {
1265     Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
1266     Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
1267     return ValueOperand(typeReg, payloadReg);
1268 }
1269 
1270 ValueOperand
ToOutValue(LInstruction * ins)1271 CodeGeneratorARM::ToOutValue(LInstruction* ins)
1272 {
1273     Register typeReg = ToRegister(ins->getDef(TYPE_INDEX));
1274     Register payloadReg = ToRegister(ins->getDef(PAYLOAD_INDEX));
1275     return ValueOperand(typeReg, payloadReg);
1276 }
1277 
1278 ValueOperand
ToTempValue(LInstruction * ins,size_t pos)1279 CodeGeneratorARM::ToTempValue(LInstruction* ins, size_t pos)
1280 {
1281     Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
1282     Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
1283     return ValueOperand(typeReg, payloadReg);
1284 }
1285 
1286 void
visitValue(LValue * value)1287 CodeGeneratorARM::visitValue(LValue* value)
1288 {
1289     const ValueOperand out = ToOutValue(value);
1290 
1291     masm.moveValue(value->value(), out);
1292 }
1293 
1294 void
visitBox(LBox * box)1295 CodeGeneratorARM::visitBox(LBox* box)
1296 {
1297     const LDefinition* type = box->getDef(TYPE_INDEX);
1298 
1299     MOZ_ASSERT(!box->getOperand(0)->isConstant());
1300 
1301     // On x86, the input operand and the output payload have the same virtual
1302     // register. All that needs to be written is the type tag for the type
1303     // definition.
1304     masm.ma_mov(Imm32(MIRTypeToTag(box->type())), ToRegister(type));
1305 }
1306 
1307 void
visitBoxFloatingPoint(LBoxFloatingPoint * box)1308 CodeGeneratorARM::visitBoxFloatingPoint(LBoxFloatingPoint* box)
1309 {
1310     const LDefinition* payload = box->getDef(PAYLOAD_INDEX);
1311     const LDefinition* type = box->getDef(TYPE_INDEX);
1312     const LAllocation* in = box->getOperand(0);
1313     FloatRegister reg = ToFloatRegister(in);
1314 
1315     if (box->type() == MIRType_Float32) {
1316         ScratchFloat32Scope scratch(masm);
1317         masm.convertFloat32ToDouble(reg, scratch);
1318         masm.ma_vxfer(VFPRegister(scratch), ToRegister(payload), ToRegister(type));
1319     } else {
1320         masm.ma_vxfer(VFPRegister(reg), ToRegister(payload), ToRegister(type));
1321     }
1322 }
1323 
1324 void
visitUnbox(LUnbox * unbox)1325 CodeGeneratorARM::visitUnbox(LUnbox* unbox)
1326 {
1327     // Note that for unbox, the type and payload indexes are switched on the
1328     // inputs.
1329     MUnbox* mir = unbox->mir();
1330     Register type = ToRegister(unbox->type());
1331 
1332     if (mir->fallible()) {
1333         masm.ma_cmp(type, Imm32(MIRTypeToTag(mir->type())));
1334         bailoutIf(Assembler::NotEqual, unbox->snapshot());
1335     }
1336 }
1337 
1338 void
visitDouble(LDouble * ins)1339 CodeGeneratorARM::visitDouble(LDouble* ins)
1340 {
1341     const LDefinition* out = ins->getDef(0);
1342 
1343     masm.ma_vimm(ins->getDouble(), ToFloatRegister(out));
1344 }
1345 
1346 void
visitFloat32(LFloat32 * ins)1347 CodeGeneratorARM::visitFloat32(LFloat32* ins)
1348 {
1349     const LDefinition* out = ins->getDef(0);
1350     masm.loadConstantFloat32(ins->getFloat(), ToFloatRegister(out));
1351 }
1352 
1353 Register
splitTagForTest(const ValueOperand & value)1354 CodeGeneratorARM::splitTagForTest(const ValueOperand& value)
1355 {
1356     return value.typeReg();
1357 }
1358 
1359 void
visitTestDAndBranch(LTestDAndBranch * test)1360 CodeGeneratorARM::visitTestDAndBranch(LTestDAndBranch* test)
1361 {
1362     const LAllocation* opd = test->input();
1363     masm.ma_vcmpz(ToFloatRegister(opd));
1364     masm.as_vmrs(pc);
1365 
1366     MBasicBlock* ifTrue = test->ifTrue();
1367     MBasicBlock* ifFalse = test->ifFalse();
1368     // If the compare set the 0 bit, then the result is definately false.
1369     jumpToBlock(ifFalse, Assembler::Zero);
1370     // It is also false if one of the operands is NAN, which is shown as
1371     // Overflow.
1372     jumpToBlock(ifFalse, Assembler::Overflow);
1373     jumpToBlock(ifTrue);
1374 }
1375 
1376 void
visitTestFAndBranch(LTestFAndBranch * test)1377 CodeGeneratorARM::visitTestFAndBranch(LTestFAndBranch* test)
1378 {
1379     const LAllocation* opd = test->input();
1380     masm.ma_vcmpz_f32(ToFloatRegister(opd));
1381     masm.as_vmrs(pc);
1382 
1383     MBasicBlock* ifTrue = test->ifTrue();
1384     MBasicBlock* ifFalse = test->ifFalse();
1385     // If the compare set the 0 bit, then the result is definately false.
1386     jumpToBlock(ifFalse, Assembler::Zero);
1387     // It is also false if one of the operands is NAN, which is shown as
1388     // Overflow.
1389     jumpToBlock(ifFalse, Assembler::Overflow);
1390     jumpToBlock(ifTrue);
1391 }
1392 
1393 void
visitCompareD(LCompareD * comp)1394 CodeGeneratorARM::visitCompareD(LCompareD* comp)
1395 {
1396     FloatRegister lhs = ToFloatRegister(comp->left());
1397     FloatRegister rhs = ToFloatRegister(comp->right());
1398 
1399     Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
1400     masm.compareDouble(lhs, rhs);
1401     masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output()));
1402 }
1403 
1404 void
visitCompareF(LCompareF * comp)1405 CodeGeneratorARM::visitCompareF(LCompareF* comp)
1406 {
1407     FloatRegister lhs = ToFloatRegister(comp->left());
1408     FloatRegister rhs = ToFloatRegister(comp->right());
1409 
1410     Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
1411     masm.compareFloat(lhs, rhs);
1412     masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output()));
1413 }
1414 
1415 void
visitCompareDAndBranch(LCompareDAndBranch * comp)1416 CodeGeneratorARM::visitCompareDAndBranch(LCompareDAndBranch* comp)
1417 {
1418     FloatRegister lhs = ToFloatRegister(comp->left());
1419     FloatRegister rhs = ToFloatRegister(comp->right());
1420 
1421     Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop());
1422     masm.compareDouble(lhs, rhs);
1423     emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(), comp->ifFalse());
1424 }
1425 
1426 void
visitCompareFAndBranch(LCompareFAndBranch * comp)1427 CodeGeneratorARM::visitCompareFAndBranch(LCompareFAndBranch* comp)
1428 {
1429     FloatRegister lhs = ToFloatRegister(comp->left());
1430     FloatRegister rhs = ToFloatRegister(comp->right());
1431 
1432     Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop());
1433     masm.compareFloat(lhs, rhs);
1434     emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(), comp->ifFalse());
1435 }
1436 
1437 void
visitCompareB(LCompareB * lir)1438 CodeGeneratorARM::visitCompareB(LCompareB* lir)
1439 {
1440     MCompare* mir = lir->mir();
1441 
1442     const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
1443     const LAllocation* rhs = lir->rhs();
1444     const Register output = ToRegister(lir->output());
1445 
1446     MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
1447 
1448     Label notBoolean, done;
1449     masm.branchTestBoolean(Assembler::NotEqual, lhs, &notBoolean);
1450     {
1451         if (rhs->isConstant())
1452             masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
1453         else
1454             masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
1455         masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output);
1456         masm.jump(&done);
1457     }
1458 
1459     masm.bind(&notBoolean);
1460     {
1461         masm.move32(Imm32(mir->jsop() == JSOP_STRICTNE), output);
1462     }
1463 
1464     masm.bind(&done);
1465 }
1466 
1467 void
visitCompareBAndBranch(LCompareBAndBranch * lir)1468 CodeGeneratorARM::visitCompareBAndBranch(LCompareBAndBranch* lir)
1469 {
1470     MCompare* mir = lir->cmpMir();
1471     const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
1472     const LAllocation* rhs = lir->rhs();
1473 
1474     MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
1475 
1476     Assembler::Condition cond = masm.testBoolean(Assembler::NotEqual, lhs);
1477     jumpToBlock((mir->jsop() == JSOP_STRICTEQ) ? lir->ifFalse() : lir->ifTrue(), cond);
1478 
1479     if (rhs->isConstant())
1480         masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
1481     else
1482         masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
1483     emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse());
1484 }
1485 
1486 void
visitCompareBitwise(LCompareBitwise * lir)1487 CodeGeneratorARM::visitCompareBitwise(LCompareBitwise* lir)
1488 {
1489     MCompare* mir = lir->mir();
1490     Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
1491     const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput);
1492     const ValueOperand rhs = ToValue(lir, LCompareBitwise::RhsInput);
1493     const Register output = ToRegister(lir->output());
1494 
1495     MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
1496                mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
1497 
1498     Label notEqual, done;
1499     masm.cmp32(lhs.typeReg(), rhs.typeReg());
1500     masm.j(Assembler::NotEqual, &notEqual);
1501     {
1502         masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
1503         masm.emitSet(cond, output);
1504         masm.jump(&done);
1505     }
1506     masm.bind(&notEqual);
1507     {
1508         masm.move32(Imm32(cond == Assembler::NotEqual), output);
1509     }
1510 
1511     masm.bind(&done);
1512 }
1513 
1514 void
visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch * lir)1515 CodeGeneratorARM::visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir)
1516 {
1517     MCompare* mir = lir->cmpMir();
1518     Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
1519     const ValueOperand lhs = ToValue(lir, LCompareBitwiseAndBranch::LhsInput);
1520     const ValueOperand rhs = ToValue(lir, LCompareBitwiseAndBranch::RhsInput);
1521 
1522     MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
1523                mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
1524 
1525     MBasicBlock* notEqual = (cond == Assembler::Equal) ? lir->ifFalse() : lir->ifTrue();
1526 
1527     masm.cmp32(lhs.typeReg(), rhs.typeReg());
1528     jumpToBlock(notEqual, Assembler::NotEqual);
1529     masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
1530     emitBranch(cond, lir->ifTrue(), lir->ifFalse());
1531 }
1532 
1533 void
visitBitAndAndBranch(LBitAndAndBranch * baab)1534 CodeGeneratorARM::visitBitAndAndBranch(LBitAndAndBranch* baab)
1535 {
1536     if (baab->right()->isConstant())
1537         masm.ma_tst(ToRegister(baab->left()), Imm32(ToInt32(baab->right())));
1538     else
1539         masm.ma_tst(ToRegister(baab->left()), ToRegister(baab->right()));
1540     emitBranch(Assembler::NonZero, baab->ifTrue(), baab->ifFalse());
1541 }
1542 
1543 void
visitAsmJSUInt32ToDouble(LAsmJSUInt32ToDouble * lir)1544 CodeGeneratorARM::visitAsmJSUInt32ToDouble(LAsmJSUInt32ToDouble* lir)
1545 {
1546     masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output()));
1547 }
1548 
1549 void
visitAsmJSUInt32ToFloat32(LAsmJSUInt32ToFloat32 * lir)1550 CodeGeneratorARM::visitAsmJSUInt32ToFloat32(LAsmJSUInt32ToFloat32* lir)
1551 {
1552     masm.convertUInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output()));
1553 }
1554 
1555 void
visitNotI(LNotI * ins)1556 CodeGeneratorARM::visitNotI(LNotI* ins)
1557 {
1558     // It is hard to optimize !x, so just do it the basic way for now.
1559     masm.ma_cmp(ToRegister(ins->input()), Imm32(0));
1560     masm.emitSet(Assembler::Equal, ToRegister(ins->output()));
1561 }
1562 
1563 void
visitNotD(LNotD * ins)1564 CodeGeneratorARM::visitNotD(LNotD* ins)
1565 {
1566     // Since this operation is not, we want to set a bit if the double is
1567     // falsey, which means 0.0, -0.0 or NaN. When comparing with 0, an input of
1568     // 0 will set the Z bit (30) and NaN will set the V bit (28) of the APSR.
1569     FloatRegister opd = ToFloatRegister(ins->input());
1570     Register dest = ToRegister(ins->output());
1571 
1572     // Do the compare.
1573     masm.ma_vcmpz(opd);
1574     // TODO There are three variations here to compare performance-wise.
1575     bool nocond = true;
1576     if (nocond) {
1577         // Load the value into the dest register.
1578         masm.as_vmrs(dest);
1579         masm.ma_lsr(Imm32(28), dest, dest);
1580         // 28 + 2 = 30
1581         masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
1582         masm.ma_and(Imm32(1), dest);
1583     } else {
1584         masm.as_vmrs(pc);
1585         masm.ma_mov(Imm32(0), dest);
1586         masm.ma_mov(Imm32(1), dest, LeaveCC, Assembler::Equal);
1587         masm.ma_mov(Imm32(1), dest, LeaveCC, Assembler::Overflow);
1588     }
1589 }
1590 
1591 void
visitNotF(LNotF * ins)1592 CodeGeneratorARM::visitNotF(LNotF* ins)
1593 {
1594     // Since this operation is not, we want to set a bit if the double is
1595     // falsey, which means 0.0, -0.0 or NaN. When comparing with 0, an input of
1596     // 0 will set the Z bit (30) and NaN will set the V bit (28) of the APSR.
1597     FloatRegister opd = ToFloatRegister(ins->input());
1598     Register dest = ToRegister(ins->output());
1599 
1600     // Do the compare.
1601     masm.ma_vcmpz_f32(opd);
1602     // TODO There are three variations here to compare performance-wise.
1603     bool nocond = true;
1604     if (nocond) {
1605         // Load the value into the dest register.
1606         masm.as_vmrs(dest);
1607         masm.ma_lsr(Imm32(28), dest, dest);
1608         // 28 + 2 = 30
1609         masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
1610         masm.ma_and(Imm32(1), dest);
1611     } else {
1612         masm.as_vmrs(pc);
1613         masm.ma_mov(Imm32(0), dest);
1614         masm.ma_mov(Imm32(1), dest, LeaveCC, Assembler::Equal);
1615         masm.ma_mov(Imm32(1), dest, LeaveCC, Assembler::Overflow);
1616     }
1617 }
1618 
1619 void
visitGuardShape(LGuardShape * guard)1620 CodeGeneratorARM::visitGuardShape(LGuardShape* guard)
1621 {
1622     Register obj = ToRegister(guard->input());
1623     Register tmp = ToRegister(guard->tempInt());
1624 
1625     masm.ma_ldr(DTRAddr(obj, DtrOffImm(JSObject::offsetOfShape())), tmp);
1626     masm.ma_cmp(tmp, ImmGCPtr(guard->mir()->shape()));
1627 
1628     bailoutIf(Assembler::NotEqual, guard->snapshot());
1629 }
1630 
1631 void
visitGuardObjectGroup(LGuardObjectGroup * guard)1632 CodeGeneratorARM::visitGuardObjectGroup(LGuardObjectGroup* guard)
1633 {
1634     Register obj = ToRegister(guard->input());
1635     Register tmp = ToRegister(guard->tempInt());
1636     MOZ_ASSERT(obj != tmp);
1637 
1638     masm.ma_ldr(DTRAddr(obj, DtrOffImm(JSObject::offsetOfGroup())), tmp);
1639     masm.ma_cmp(tmp, ImmGCPtr(guard->mir()->group()));
1640 
1641     Assembler::Condition cond =
1642         guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
1643     bailoutIf(cond, guard->snapshot());
1644 }
1645 
1646 void
visitGuardClass(LGuardClass * guard)1647 CodeGeneratorARM::visitGuardClass(LGuardClass* guard)
1648 {
1649     Register obj = ToRegister(guard->input());
1650     Register tmp = ToRegister(guard->tempInt());
1651 
1652     masm.loadObjClass(obj, tmp);
1653     masm.ma_cmp(tmp, Imm32((uint32_t)guard->mir()->getClass()));
1654     bailoutIf(Assembler::NotEqual, guard->snapshot());
1655 }
1656 
1657 void
generateInvalidateEpilogue()1658 CodeGeneratorARM::generateInvalidateEpilogue()
1659 {
1660     // Ensure that there is enough space in the buffer for the OsiPoint patching
1661     // to occur. Otherwise, we could overwrite the invalidation epilogue.
1662     for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize())
1663         masm.nop();
1664 
1665     masm.bind(&invalidate_);
1666 
1667     // Push the return address of the point that we bailed out at onto the stack.
1668     masm.Push(lr);
1669 
1670     // Push the Ion script onto the stack (when we determine what that pointer is).
1671     invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
1672     JitCode* thunk = gen->jitRuntime()->getInvalidationThunk();
1673 
1674     masm.branch(thunk);
1675 
1676     // We should never reach this point in JIT code -- the invalidation thunk
1677     // should pop the invalidated JS frame and return directly to its caller.
1678     masm.assumeUnreachable("Should have returned directly to its caller instead of here.");
1679 }
1680 
1681 void
visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic * ins)1682 CodeGeneratorARM::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins)
1683 {
1684     MOZ_CRASH("NYI");
1685 }
1686 
1687 void
visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic * ins)1688 CodeGeneratorARM::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins)
1689 {
1690     MOZ_CRASH("NYI");
1691 }
1692 
1693 void
visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement * lir)1694 CodeGeneratorARM::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir)
1695 {
1696     Register elements = ToRegister(lir->elements());
1697     AnyRegister output = ToAnyRegister(lir->output());
1698     Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
1699 
1700     Register oldval = ToRegister(lir->oldval());
1701     Register newval = ToRegister(lir->newval());
1702 
1703     Scalar::Type arrayType = lir->mir()->arrayType();
1704     int width = Scalar::byteSize(arrayType);
1705 
1706     if (lir->index()->isConstant()) {
1707         Address dest(elements, ToInt32(lir->index()) * width);
1708         masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
1709     } else {
1710         BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
1711         masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
1712     }
1713 }
1714 
1715 void
visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement * lir)1716 CodeGeneratorARM::visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir)
1717 {
1718     Register elements = ToRegister(lir->elements());
1719     AnyRegister output = ToAnyRegister(lir->output());
1720     Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
1721 
1722     Register value = ToRegister(lir->value());
1723 
1724     Scalar::Type arrayType = lir->mir()->arrayType();
1725     int width = Scalar::byteSize(arrayType);
1726 
1727     if (lir->index()->isConstant()) {
1728         Address dest(elements, ToInt32(lir->index()) * width);
1729         masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp, output);
1730     } else {
1731         BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
1732         masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp, output);
1733     }
1734 }
1735 
1736 template<typename S, typename T>
1737 void
atomicBinopToTypedIntArray(AtomicOp op,Scalar::Type arrayType,const S & value,const T & mem,Register flagTemp,Register outTemp,AnyRegister output)1738 CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
1739                                              const S& value, const T& mem, Register flagTemp,
1740                                              Register outTemp, AnyRegister output)
1741 {
1742     MOZ_ASSERT(flagTemp != InvalidReg);
1743     MOZ_ASSERT_IF(arrayType == Scalar::Uint32, outTemp != InvalidReg);
1744 
1745     switch (arrayType) {
1746       case Scalar::Int8:
1747         switch (op) {
1748           case AtomicFetchAddOp:
1749             masm.atomicFetchAdd8SignExtend(value, mem, flagTemp, output.gpr());
1750             break;
1751           case AtomicFetchSubOp:
1752             masm.atomicFetchSub8SignExtend(value, mem, flagTemp, output.gpr());
1753             break;
1754           case AtomicFetchAndOp:
1755             masm.atomicFetchAnd8SignExtend(value, mem, flagTemp, output.gpr());
1756             break;
1757           case AtomicFetchOrOp:
1758             masm.atomicFetchOr8SignExtend(value, mem, flagTemp, output.gpr());
1759             break;
1760           case AtomicFetchXorOp:
1761             masm.atomicFetchXor8SignExtend(value, mem, flagTemp, output.gpr());
1762             break;
1763           default:
1764             MOZ_CRASH("Invalid typed array atomic operation");
1765         }
1766         break;
1767       case Scalar::Uint8:
1768         switch (op) {
1769           case AtomicFetchAddOp:
1770             masm.atomicFetchAdd8ZeroExtend(value, mem, flagTemp, output.gpr());
1771             break;
1772           case AtomicFetchSubOp:
1773             masm.atomicFetchSub8ZeroExtend(value, mem, flagTemp, output.gpr());
1774             break;
1775           case AtomicFetchAndOp:
1776             masm.atomicFetchAnd8ZeroExtend(value, mem, flagTemp, output.gpr());
1777             break;
1778           case AtomicFetchOrOp:
1779             masm.atomicFetchOr8ZeroExtend(value, mem, flagTemp, output.gpr());
1780             break;
1781           case AtomicFetchXorOp:
1782             masm.atomicFetchXor8ZeroExtend(value, mem, flagTemp, output.gpr());
1783             break;
1784           default:
1785             MOZ_CRASH("Invalid typed array atomic operation");
1786         }
1787         break;
1788       case Scalar::Int16:
1789         switch (op) {
1790           case AtomicFetchAddOp:
1791             masm.atomicFetchAdd16SignExtend(value, mem, flagTemp, output.gpr());
1792             break;
1793           case AtomicFetchSubOp:
1794             masm.atomicFetchSub16SignExtend(value, mem, flagTemp, output.gpr());
1795             break;
1796           case AtomicFetchAndOp:
1797             masm.atomicFetchAnd16SignExtend(value, mem, flagTemp, output.gpr());
1798             break;
1799           case AtomicFetchOrOp:
1800             masm.atomicFetchOr16SignExtend(value, mem, flagTemp, output.gpr());
1801             break;
1802           case AtomicFetchXorOp:
1803             masm.atomicFetchXor16SignExtend(value, mem, flagTemp, output.gpr());
1804             break;
1805           default:
1806             MOZ_CRASH("Invalid typed array atomic operation");
1807         }
1808         break;
1809       case Scalar::Uint16:
1810         switch (op) {
1811           case AtomicFetchAddOp:
1812             masm.atomicFetchAdd16ZeroExtend(value, mem, flagTemp, output.gpr());
1813             break;
1814           case AtomicFetchSubOp:
1815             masm.atomicFetchSub16ZeroExtend(value, mem, flagTemp, output.gpr());
1816             break;
1817           case AtomicFetchAndOp:
1818             masm.atomicFetchAnd16ZeroExtend(value, mem, flagTemp, output.gpr());
1819             break;
1820           case AtomicFetchOrOp:
1821             masm.atomicFetchOr16ZeroExtend(value, mem, flagTemp, output.gpr());
1822             break;
1823           case AtomicFetchXorOp:
1824             masm.atomicFetchXor16ZeroExtend(value, mem, flagTemp, output.gpr());
1825             break;
1826           default:
1827             MOZ_CRASH("Invalid typed array atomic operation");
1828         }
1829         break;
1830       case Scalar::Int32:
1831         switch (op) {
1832           case AtomicFetchAddOp:
1833             masm.atomicFetchAdd32(value, mem, flagTemp, output.gpr());
1834             break;
1835           case AtomicFetchSubOp:
1836             masm.atomicFetchSub32(value, mem, flagTemp, output.gpr());
1837             break;
1838           case AtomicFetchAndOp:
1839             masm.atomicFetchAnd32(value, mem, flagTemp, output.gpr());
1840             break;
1841           case AtomicFetchOrOp:
1842             masm.atomicFetchOr32(value, mem, flagTemp, output.gpr());
1843             break;
1844           case AtomicFetchXorOp:
1845             masm.atomicFetchXor32(value, mem, flagTemp, output.gpr());
1846             break;
1847           default:
1848             MOZ_CRASH("Invalid typed array atomic operation");
1849         }
1850         break;
1851       case Scalar::Uint32:
1852         // At the moment, the code in MCallOptimize.cpp requires the output
1853         // type to be double for uint32 arrays.  See bug 1077305.
1854         MOZ_ASSERT(output.isFloat());
1855         switch (op) {
1856           case AtomicFetchAddOp:
1857             masm.atomicFetchAdd32(value, mem, flagTemp, outTemp);
1858             break;
1859           case AtomicFetchSubOp:
1860             masm.atomicFetchSub32(value, mem, flagTemp, outTemp);
1861             break;
1862           case AtomicFetchAndOp:
1863             masm.atomicFetchAnd32(value, mem, flagTemp, outTemp);
1864             break;
1865           case AtomicFetchOrOp:
1866             masm.atomicFetchOr32(value, mem, flagTemp, outTemp);
1867             break;
1868           case AtomicFetchXorOp:
1869             masm.atomicFetchXor32(value, mem, flagTemp, outTemp);
1870             break;
1871           default:
1872             MOZ_CRASH("Invalid typed array atomic operation");
1873         }
1874         masm.convertUInt32ToDouble(outTemp, output.fpu());
1875         break;
1876       default:
1877         MOZ_CRASH("Invalid typed array type");
1878     }
1879 }
1880 
1881 template void
1882 CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
1883                                              const Imm32& value, const Address& mem,
1884                                              Register flagTemp, Register outTemp,
1885                                              AnyRegister output);
1886 template void
1887 CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
1888                                              const Imm32& value, const BaseIndex& mem,
1889                                              Register flagTemp, Register outTemp,
1890                                              AnyRegister output);
1891 template void
1892 CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
1893                                              const Register& value, const Address& mem,
1894                                              Register flagTemp, Register outTemp,
1895                                              AnyRegister output);
1896 template void
1897 CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
1898                                              const Register& value, const BaseIndex& mem,
1899                                              Register flagTemp, Register outTemp,
1900                                              AnyRegister output);
1901 
1902 // Binary operation for effect, result discarded.
1903 template<typename S, typename T>
1904 void
atomicBinopToTypedIntArray(AtomicOp op,Scalar::Type arrayType,const S & value,const T & mem,Register flagTemp)1905 CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
1906                                              const T& mem, Register flagTemp)
1907 {
1908     MOZ_ASSERT(flagTemp != InvalidReg);
1909 
1910     switch (arrayType) {
1911       case Scalar::Int8:
1912       case Scalar::Uint8:
1913         switch (op) {
1914           case AtomicFetchAddOp:
1915             masm.atomicAdd8(value, mem, flagTemp);
1916             break;
1917           case AtomicFetchSubOp:
1918             masm.atomicSub8(value, mem, flagTemp);
1919             break;
1920           case AtomicFetchAndOp:
1921             masm.atomicAnd8(value, mem, flagTemp);
1922             break;
1923           case AtomicFetchOrOp:
1924             masm.atomicOr8(value, mem, flagTemp);
1925             break;
1926           case AtomicFetchXorOp:
1927             masm.atomicXor8(value, mem, flagTemp);
1928             break;
1929           default:
1930             MOZ_CRASH("Invalid typed array atomic operation");
1931         }
1932         break;
1933       case Scalar::Int16:
1934       case Scalar::Uint16:
1935         switch (op) {
1936           case AtomicFetchAddOp:
1937             masm.atomicAdd16(value, mem, flagTemp);
1938             break;
1939           case AtomicFetchSubOp:
1940             masm.atomicSub16(value, mem, flagTemp);
1941             break;
1942           case AtomicFetchAndOp:
1943             masm.atomicAnd16(value, mem, flagTemp);
1944             break;
1945           case AtomicFetchOrOp:
1946             masm.atomicOr16(value, mem, flagTemp);
1947             break;
1948           case AtomicFetchXorOp:
1949             masm.atomicXor16(value, mem, flagTemp);
1950             break;
1951           default:
1952             MOZ_CRASH("Invalid typed array atomic operation");
1953         }
1954         break;
1955       case Scalar::Int32:
1956       case Scalar::Uint32:
1957         switch (op) {
1958           case AtomicFetchAddOp:
1959             masm.atomicAdd32(value, mem, flagTemp);
1960             break;
1961           case AtomicFetchSubOp:
1962             masm.atomicSub32(value, mem, flagTemp);
1963             break;
1964           case AtomicFetchAndOp:
1965             masm.atomicAnd32(value, mem, flagTemp);
1966             break;
1967           case AtomicFetchOrOp:
1968             masm.atomicOr32(value, mem, flagTemp);
1969             break;
1970           case AtomicFetchXorOp:
1971             masm.atomicXor32(value, mem, flagTemp);
1972             break;
1973           default:
1974             MOZ_CRASH("Invalid typed array atomic operation");
1975         }
1976         break;
1977       default:
1978         MOZ_CRASH("Invalid typed array type");
1979     }
1980 }
1981 
1982 template void
1983 CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
1984                                              const Imm32& value, const Address& mem,
1985                                              Register flagTemp);
1986 template void
1987 CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
1988                                              const Imm32& value, const BaseIndex& mem,
1989                                              Register flagTemp);
1990 template void
1991 CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
1992                                              const Register& value, const Address& mem,
1993                                              Register flagTemp);
1994 template void
1995 CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
1996                                              const Register& value, const BaseIndex& mem,
1997                                              Register flagTemp);
1998 
1999 
2000 template <typename T>
2001 static inline void
AtomicBinopToTypedArray(CodeGeneratorARM * cg,AtomicOp op,Scalar::Type arrayType,const LAllocation * value,const T & mem,Register flagTemp,Register outTemp,AnyRegister output)2002 AtomicBinopToTypedArray(CodeGeneratorARM* cg, AtomicOp op,
2003                         Scalar::Type arrayType, const LAllocation* value, const T& mem,
2004                         Register flagTemp, Register outTemp, AnyRegister output)
2005 {
2006     if (value->isConstant())
2007         cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, flagTemp, outTemp, output);
2008     else
2009         cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, flagTemp, outTemp, output);
2010 }
2011 
2012 void
visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop * lir)2013 CodeGeneratorARM::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir)
2014 {
2015     MOZ_ASSERT(lir->mir()->hasUses());
2016 
2017     AnyRegister output = ToAnyRegister(lir->output());
2018     Register elements = ToRegister(lir->elements());
2019     Register flagTemp = ToRegister(lir->temp1());
2020     Register outTemp = lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
2021     const LAllocation* value = lir->value();
2022 
2023     Scalar::Type arrayType = lir->mir()->arrayType();
2024     int width = Scalar::byteSize(arrayType);
2025 
2026     if (lir->index()->isConstant()) {
2027         Address mem(elements, ToInt32(lir->index()) * width);
2028         AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp, outTemp, output);
2029     } else {
2030         BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
2031         AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp, outTemp, output);
2032     }
2033 }
2034 
2035 template <typename T>
2036 static inline void
AtomicBinopToTypedArray(CodeGeneratorARM * cg,AtomicOp op,Scalar::Type arrayType,const LAllocation * value,const T & mem,Register flagTemp)2037 AtomicBinopToTypedArray(CodeGeneratorARM* cg, AtomicOp op, Scalar::Type arrayType,
2038                         const LAllocation* value, const T& mem, Register flagTemp)
2039 {
2040     if (value->isConstant())
2041         cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, flagTemp);
2042     else
2043         cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, flagTemp);
2044 }
2045 
2046 void
visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect * lir)2047 CodeGeneratorARM::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir)
2048 {
2049     MOZ_ASSERT(!lir->mir()->hasUses());
2050 
2051     Register elements = ToRegister(lir->elements());
2052     Register flagTemp = ToRegister(lir->flagTemp());
2053     const LAllocation* value = lir->value();
2054     Scalar::Type arrayType = lir->mir()->arrayType();
2055     int width = Scalar::byteSize(arrayType);
2056 
2057     if (lir->index()->isConstant()) {
2058         Address mem(elements, ToInt32(lir->index()) * width);
2059         AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp);
2060     } else {
2061         BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
2062         AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp);
2063     }
2064 }
2065 
2066 void
visitAsmJSCall(LAsmJSCall * ins)2067 CodeGeneratorARM::visitAsmJSCall(LAsmJSCall* ins)
2068 {
2069     MAsmJSCall* mir = ins->mir();
2070 
2071     if (UseHardFpABI() || mir->callee().which() != MAsmJSCall::Callee::Builtin) {
2072         emitAsmJSCall(ins);
2073         return;
2074     }
2075 
2076     // The soft ABI passes floating point arguments in GPRs. Since basically
2077     // nothing is set up to handle this, the values are placed in the
2078     // corresponding VFP registers, then transferred to GPRs immediately
2079     // before the call. The mapping is sN <-> rN, where double registers
2080     // can be treated as their two component single registers.
2081 
2082     for (unsigned i = 0, e = ins->numOperands(); i < e; i++) {
2083         LAllocation* a = ins->getOperand(i);
2084         if (a->isFloatReg()) {
2085             FloatRegister fr = ToFloatRegister(a);
2086             if (fr.isDouble()) {
2087                 uint32_t srcId = fr.singleOverlay().id();
2088                 masm.ma_vxfer(fr, Register::FromCode(srcId), Register::FromCode(srcId + 1));
2089             } else {
2090                 uint32_t srcId = fr.id();
2091                 masm.ma_vxfer(fr, Register::FromCode(srcId));
2092             }
2093         }
2094     }
2095 
2096     emitAsmJSCall(ins);
2097 
2098     switch (mir->type()) {
2099       case MIRType_Double:
2100         masm.ma_vxfer(r0, r1, d0);
2101         break;
2102       case MIRType_Float32:
2103         masm.as_vxfer(r0, InvalidReg, VFPRegister(d0).singleOverlay(), Assembler::CoreToFloat);
2104         break;
2105       default:
2106         break;
2107     }
2108 }
2109 
2110 void
visitAsmJSLoadHeap(LAsmJSLoadHeap * ins)2111 CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
2112 {
2113     const MAsmJSLoadHeap* mir = ins->mir();
2114     bool isSigned;
2115     int size;
2116     bool isFloat = false;
2117     switch (mir->accessType()) {
2118       case Scalar::Int8:    isSigned = true;  size =  8; break;
2119       case Scalar::Uint8:   isSigned = false; size =  8; break;
2120       case Scalar::Int16:   isSigned = true;  size = 16; break;
2121       case Scalar::Uint16:  isSigned = false; size = 16; break;
2122       case Scalar::Int32:
2123       case Scalar::Uint32:  isSigned = true;  size = 32; break;
2124       case Scalar::Float64: isFloat = true;   size = 64; break;
2125       case Scalar::Float32: isFloat = true;   size = 32; break;
2126       default: MOZ_CRASH("unexpected array type");
2127     }
2128 
2129     memoryBarrier(mir->barrierBefore());
2130 
2131     const LAllocation* ptr = ins->ptr();
2132 
2133     if (ptr->isConstant()) {
2134         MOZ_ASSERT(!mir->needsBoundsCheck());
2135         int32_t ptrImm = ptr->toConstant()->toInt32();
2136         MOZ_ASSERT(ptrImm >= 0);
2137         if (isFloat) {
2138             VFPRegister vd(ToFloatRegister(ins->output()));
2139             if (size == 32)
2140                 masm.ma_vldr(Address(HeapReg, ptrImm), vd.singleOverlay(), Assembler::Always);
2141             else
2142                 masm.ma_vldr(Address(HeapReg, ptrImm), vd, Assembler::Always);
2143         }  else {
2144             masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, Imm32(ptrImm),
2145                                   ToRegister(ins->output()), Offset, Assembler::Always);
2146         }
2147         memoryBarrier(mir->barrierAfter());
2148         return;
2149     }
2150 
2151     Register ptrReg = ToRegister(ptr);
2152 
2153     if (!mir->needsBoundsCheck()) {
2154         if (isFloat) {
2155             VFPRegister vd(ToFloatRegister(ins->output()));
2156             if (size == 32)
2157                 masm.ma_vldr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Always);
2158             else
2159                 masm.ma_vldr(vd, HeapReg, ptrReg, 0, Assembler::Always);
2160         } else {
2161             masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg,
2162                                   ToRegister(ins->output()), Offset, Assembler::Always);
2163         }
2164         memoryBarrier(mir->barrierAfter());
2165         return;
2166     }
2167 
2168     BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
2169     if (isFloat) {
2170         FloatRegister dst = ToFloatRegister(ins->output());
2171         VFPRegister vd(dst);
2172         if (size == 32) {
2173             masm.ma_vldr(Address(GlobalReg, wasm::NaN32GlobalDataOffset - AsmJSGlobalRegBias),
2174                          vd.singleOverlay(), Assembler::AboveOrEqual);
2175             masm.ma_vldr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Below);
2176         } else {
2177             masm.ma_vldr(Address(GlobalReg, wasm::NaN64GlobalDataOffset - AsmJSGlobalRegBias),
2178                          vd, Assembler::AboveOrEqual);
2179             masm.ma_vldr(vd, HeapReg, ptrReg, 0, Assembler::Below);
2180         }
2181     } else {
2182         Register d = ToRegister(ins->output());
2183         if (mir->isAtomicAccess())
2184             masm.ma_b(masm.asmOnOutOfBoundsLabel(), Assembler::AboveOrEqual);
2185         else
2186             masm.ma_mov(Imm32(0), d, LeaveCC, Assembler::AboveOrEqual);
2187         masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, d, Offset, Assembler::Below);
2188     }
2189     memoryBarrier(mir->barrierAfter());
2190     masm.append(wasm::HeapAccess(bo.getOffset()));
2191 }
2192 
2193 void
visitAsmJSStoreHeap(LAsmJSStoreHeap * ins)2194 CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
2195 {
2196     const MAsmJSStoreHeap* mir = ins->mir();
2197     bool isSigned;
2198     int size;
2199     bool isFloat = false;
2200     switch (mir->accessType()) {
2201       case Scalar::Int8:
2202       case Scalar::Uint8:   isSigned = false; size = 8; break;
2203       case Scalar::Int16:
2204       case Scalar::Uint16:  isSigned = false; size = 16; break;
2205       case Scalar::Int32:
2206       case Scalar::Uint32:  isSigned = true;  size = 32; break;
2207       case Scalar::Float64: isFloat  = true;  size = 64; break;
2208       case Scalar::Float32: isFloat = true;   size = 32; break;
2209       default: MOZ_CRASH("unexpected array type");
2210     }
2211     const LAllocation* ptr = ins->ptr();
2212     memoryBarrier(mir->barrierBefore());
2213     if (ptr->isConstant()) {
2214         MOZ_ASSERT(!mir->needsBoundsCheck());
2215         int32_t ptrImm = ptr->toConstant()->toInt32();
2216         MOZ_ASSERT(ptrImm >= 0);
2217         if (isFloat) {
2218             VFPRegister vd(ToFloatRegister(ins->value()));
2219             if (size == 32)
2220                 masm.ma_vstr(vd.singleOverlay(), Address(HeapReg, ptrImm), Assembler::Always);
2221             else
2222                 masm.ma_vstr(vd, Address(HeapReg, ptrImm), Assembler::Always);
2223         } else {
2224             masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, Imm32(ptrImm),
2225                                   ToRegister(ins->value()), Offset, Assembler::Always);
2226         }
2227         memoryBarrier(mir->barrierAfter());
2228         return;
2229     }
2230 
2231     Register ptrReg = ToRegister(ptr);
2232 
2233     if (!mir->needsBoundsCheck()) {
2234         Register ptrReg = ToRegister(ptr);
2235         if (isFloat) {
2236             VFPRegister vd(ToFloatRegister(ins->value()));
2237             if (size == 32)
2238                 masm.ma_vstr(vd.singleOverlay(), HeapReg, ptrReg, 0, 0, Assembler::Always);
2239             else
2240                 masm.ma_vstr(vd, HeapReg, ptrReg, 0, 0, Assembler::Always);
2241         } else {
2242             masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg,
2243                                   ToRegister(ins->value()), Offset, Assembler::Always);
2244         }
2245         memoryBarrier(mir->barrierAfter());
2246         return;
2247     }
2248 
2249     BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
2250     if (isFloat) {
2251         VFPRegister vd(ToFloatRegister(ins->value()));
2252         if (size == 32)
2253             masm.ma_vstr(vd.singleOverlay(), HeapReg, ptrReg, 0, 0, Assembler::Below);
2254         else
2255             masm.ma_vstr(vd, HeapReg, ptrReg, 0, 0, Assembler::Below);
2256     } else {
2257         if (mir->isAtomicAccess())
2258             masm.ma_b(masm.asmOnOutOfBoundsLabel(), Assembler::AboveOrEqual);
2259         masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg,
2260                               ToRegister(ins->value()), Offset, Assembler::Below);
2261     }
2262     memoryBarrier(mir->barrierAfter());
2263     masm.append(wasm::HeapAccess(bo.getOffset()));
2264 }
2265 
2266 void
visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap * ins)2267 CodeGeneratorARM::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
2268 {
2269     MAsmJSCompareExchangeHeap* mir = ins->mir();
2270     Scalar::Type vt = mir->accessType();
2271     const LAllocation* ptr = ins->ptr();
2272     Register ptrReg = ToRegister(ptr);
2273     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
2274     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
2275 
2276     Register oldval = ToRegister(ins->oldValue());
2277     Register newval = ToRegister(ins->newValue());
2278 
2279     uint32_t maybeCmpOffset = 0;
2280     if (mir->needsBoundsCheck()) {
2281         BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
2282         maybeCmpOffset = bo.getOffset();
2283         masm.ma_b(masm.asmOnOutOfBoundsLabel(), Assembler::AboveOrEqual);
2284     }
2285     masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
2286                                         srcAddr, oldval, newval, InvalidReg,
2287                                         ToAnyRegister(ins->output()));
2288     if (mir->needsBoundsCheck())
2289         masm.append(wasm::HeapAccess(maybeCmpOffset));
2290 }
2291 
2292 void
visitAsmJSCompareExchangeCallout(LAsmJSCompareExchangeCallout * ins)2293 CodeGeneratorARM::visitAsmJSCompareExchangeCallout(LAsmJSCompareExchangeCallout* ins)
2294 {
2295     const MAsmJSCompareExchangeHeap* mir = ins->mir();
2296     Scalar::Type viewType = mir->accessType();
2297     Register ptr = ToRegister(ins->ptr());
2298     Register oldval = ToRegister(ins->oldval());
2299     Register newval = ToRegister(ins->newval());
2300 
2301     MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg);
2302 
2303     masm.setupAlignedABICall();
2304     {
2305         ScratchRegisterScope scratch(masm);
2306         masm.ma_mov(Imm32(viewType), scratch);
2307         masm.passABIArg(scratch);
2308         masm.passABIArg(ptr);
2309         masm.passABIArg(oldval);
2310         masm.passABIArg(newval);
2311     }
2312     masm.callWithABI(wasm::SymbolicAddress::AtomicCmpXchg);
2313 }
2314 
2315 void
visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap * ins)2316 CodeGeneratorARM::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
2317 {
2318     MAsmJSAtomicExchangeHeap* mir = ins->mir();
2319     Scalar::Type vt = mir->accessType();
2320     Register ptrReg = ToRegister(ins->ptr());
2321     Register value = ToRegister(ins->value());
2322     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
2323     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
2324 
2325     uint32_t maybeCmpOffset = 0;
2326     if (mir->needsBoundsCheck()) {
2327         BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
2328         maybeCmpOffset = bo.getOffset();
2329         masm.ma_b(masm.asmOnOutOfBoundsLabel(), Assembler::AboveOrEqual);
2330     }
2331 
2332     masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
2333                                        srcAddr, value, InvalidReg, ToAnyRegister(ins->output()));
2334 
2335     if (mir->needsBoundsCheck())
2336         masm.append(wasm::HeapAccess(maybeCmpOffset));
2337 }
2338 
2339 void
visitAsmJSAtomicExchangeCallout(LAsmJSAtomicExchangeCallout * ins)2340 CodeGeneratorARM::visitAsmJSAtomicExchangeCallout(LAsmJSAtomicExchangeCallout* ins)
2341 {
2342     const MAsmJSAtomicExchangeHeap* mir = ins->mir();
2343     Scalar::Type viewType = mir->accessType();
2344     Register ptr = ToRegister(ins->ptr());
2345     Register value = ToRegister(ins->value());
2346 
2347     MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg);
2348 
2349     masm.setupAlignedABICall();
2350     {
2351         ScratchRegisterScope scratch(masm);
2352         masm.ma_mov(Imm32(viewType), scratch);
2353         masm.passABIArg(scratch);
2354     }
2355     masm.passABIArg(ptr);
2356     masm.passABIArg(value);
2357 
2358     masm.callWithABI(wasm::SymbolicAddress::AtomicXchg);
2359 }
2360 
2361 void
visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap * ins)2362 CodeGeneratorARM::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
2363 {
2364     MOZ_ASSERT(ins->mir()->hasUses());
2365     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
2366 
2367     MAsmJSAtomicBinopHeap* mir = ins->mir();
2368     Scalar::Type vt = mir->accessType();
2369     Register ptrReg = ToRegister(ins->ptr());
2370     Register flagTemp = ToRegister(ins->flagTemp());
2371     const LAllocation* value = ins->value();
2372     AtomicOp op = mir->operation();
2373 
2374     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
2375 
2376     uint32_t maybeCmpOffset = 0;
2377     if (mir->needsBoundsCheck()) {
2378         BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
2379         maybeCmpOffset = bo.getOffset();
2380         masm.ma_b(masm.asmOnOutOfBoundsLabel(), Assembler::AboveOrEqual);
2381     }
2382 
2383     if (value->isConstant())
2384         atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
2385                                    Imm32(ToInt32(value)), srcAddr, flagTemp, InvalidReg,
2386                                    ToAnyRegister(ins->output()));
2387     else
2388         atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
2389                                    ToRegister(value), srcAddr, flagTemp, InvalidReg,
2390                                    ToAnyRegister(ins->output()));
2391 
2392     if (mir->needsBoundsCheck())
2393         masm.append(wasm::HeapAccess(maybeCmpOffset));
2394 }
2395 
2396 void
visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect * ins)2397 CodeGeneratorARM::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
2398 {
2399     MOZ_ASSERT(!ins->mir()->hasUses());
2400     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
2401 
2402     MAsmJSAtomicBinopHeap* mir = ins->mir();
2403     Scalar::Type vt = mir->accessType();
2404     Register ptrReg = ToRegister(ins->ptr());
2405     Register flagTemp = ToRegister(ins->flagTemp());
2406     const LAllocation* value = ins->value();
2407     AtomicOp op = mir->operation();
2408 
2409     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
2410 
2411     uint32_t maybeCmpOffset = 0;
2412     if (mir->needsBoundsCheck()) {
2413         BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
2414         maybeCmpOffset = bo.getOffset();
2415         masm.ma_b(masm.asmOnOutOfBoundsLabel(), Assembler::AboveOrEqual);
2416     }
2417 
2418     if (value->isConstant())
2419         atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr, flagTemp);
2420     else
2421         atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr, flagTemp);
2422 
2423     if (mir->needsBoundsCheck())
2424         masm.append(wasm::HeapAccess(maybeCmpOffset));
2425 }
2426 
2427 void
visitAsmJSAtomicBinopCallout(LAsmJSAtomicBinopCallout * ins)2428 CodeGeneratorARM::visitAsmJSAtomicBinopCallout(LAsmJSAtomicBinopCallout* ins)
2429 {
2430     const MAsmJSAtomicBinopHeap* mir = ins->mir();
2431     Scalar::Type viewType = mir->accessType();
2432     Register ptr = ToRegister(ins->ptr());
2433     Register value = ToRegister(ins->value());
2434 
2435     masm.setupAlignedABICall();
2436     {
2437         ScratchRegisterScope scratch(masm);
2438         masm.move32(Imm32(viewType), scratch);
2439         masm.passABIArg(scratch);
2440     }
2441     masm.passABIArg(ptr);
2442     masm.passABIArg(value);
2443 
2444     switch (mir->operation()) {
2445       case AtomicFetchAddOp:
2446         masm.callWithABI(wasm::SymbolicAddress::AtomicFetchAdd);
2447         break;
2448       case AtomicFetchSubOp:
2449         masm.callWithABI(wasm::SymbolicAddress::AtomicFetchSub);
2450         break;
2451       case AtomicFetchAndOp:
2452         masm.callWithABI(wasm::SymbolicAddress::AtomicFetchAnd);
2453         break;
2454       case AtomicFetchOrOp:
2455         masm.callWithABI(wasm::SymbolicAddress::AtomicFetchOr);
2456         break;
2457       case AtomicFetchXorOp:
2458         masm.callWithABI(wasm::SymbolicAddress::AtomicFetchXor);
2459         break;
2460       default:
2461         MOZ_CRASH("Unknown op");
2462     }
2463 }
2464 
2465 void
visitAsmJSPassStackArg(LAsmJSPassStackArg * ins)2466 CodeGeneratorARM::visitAsmJSPassStackArg(LAsmJSPassStackArg* ins)
2467 {
2468     const MAsmJSPassStackArg* mir = ins->mir();
2469     Address dst(StackPointer, mir->spOffset());
2470     if (ins->arg()->isConstant()) {
2471         //masm.as_bkpt();
2472         masm.ma_storeImm(Imm32(ToInt32(ins->arg())), dst);
2473     } else {
2474         if (ins->arg()->isGeneralReg())
2475             masm.ma_str(ToRegister(ins->arg()), dst);
2476         else
2477             masm.ma_vstr(ToFloatRegister(ins->arg()), dst);
2478     }
2479 }
2480 
2481 void
visitUDiv(LUDiv * ins)2482 CodeGeneratorARM::visitUDiv(LUDiv* ins)
2483 {
2484     Register lhs = ToRegister(ins->lhs());
2485     Register rhs = ToRegister(ins->rhs());
2486     Register output = ToRegister(ins->output());
2487 
2488     Label done;
2489     generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), ins->mir());
2490 
2491     masm.ma_udiv(lhs, rhs, output);
2492 
2493     // Check for large unsigned result - represent as double.
2494     if (!ins->mir()->isTruncated()) {
2495         MOZ_ASSERT(ins->mir()->fallible());
2496         masm.ma_cmp(output, Imm32(0));
2497         bailoutIf(Assembler::LessThan, ins->snapshot());
2498     }
2499 
2500     // Check for non-zero remainder if not truncating to int.
2501     if (!ins->mir()->canTruncateRemainder()) {
2502         MOZ_ASSERT(ins->mir()->fallible());
2503         {
2504             ScratchRegisterScope scratch(masm);
2505             masm.ma_mul(rhs, output, scratch);
2506             masm.ma_cmp(scratch, lhs);
2507         }
2508         bailoutIf(Assembler::NotEqual, ins->snapshot());
2509     }
2510 
2511     if (done.used())
2512         masm.bind(&done);
2513 }
2514 
2515 void
visitUMod(LUMod * ins)2516 CodeGeneratorARM::visitUMod(LUMod* ins)
2517 {
2518     Register lhs = ToRegister(ins->lhs());
2519     Register rhs = ToRegister(ins->rhs());
2520     Register output = ToRegister(ins->output());
2521 
2522     Label done;
2523     generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), ins->mir());
2524 
2525     masm.ma_umod(lhs, rhs, output);
2526 
2527     // Check for large unsigned result - represent as double.
2528     if (!ins->mir()->isTruncated()) {
2529         MOZ_ASSERT(ins->mir()->fallible());
2530         masm.ma_cmp(output, Imm32(0));
2531         bailoutIf(Assembler::LessThan, ins->snapshot());
2532     }
2533 
2534     if (done.used())
2535         masm.bind(&done);
2536 }
2537 
2538 template<class T>
2539 void
generateUDivModZeroCheck(Register rhs,Register output,Label * done,LSnapshot * snapshot,T * mir)2540 CodeGeneratorARM::generateUDivModZeroCheck(Register rhs, Register output, Label* done,
2541                                            LSnapshot* snapshot, T* mir)
2542 {
2543     if (!mir)
2544         return;
2545     if (mir->canBeDivideByZero()) {
2546         masm.ma_cmp(rhs, Imm32(0));
2547         if (mir->isTruncated()) {
2548             Label skip;
2549             masm.ma_b(&skip, Assembler::NotEqual);
2550             // Infinity|0 == 0
2551             masm.ma_mov(Imm32(0), output);
2552             masm.ma_b(done);
2553             masm.bind(&skip);
2554         } else {
2555             // Bailout for divide by zero
2556             MOZ_ASSERT(mir->fallible());
2557             bailoutIf(Assembler::Equal, snapshot);
2558         }
2559     }
2560 }
2561 
2562 void
visitSoftUDivOrMod(LSoftUDivOrMod * ins)2563 CodeGeneratorARM::visitSoftUDivOrMod(LSoftUDivOrMod* ins)
2564 {
2565     Register lhs = ToRegister(ins->lhs());
2566     Register rhs = ToRegister(ins->rhs());
2567     Register output = ToRegister(ins->output());
2568 
2569     MOZ_ASSERT(lhs == r0);
2570     MOZ_ASSERT(rhs == r1);
2571     MOZ_ASSERT(ins->mirRaw()->isDiv() || ins->mirRaw()->isMod());
2572     MOZ_ASSERT_IF(ins->mirRaw()->isDiv(), output == r0);
2573     MOZ_ASSERT_IF(ins->mirRaw()->isMod(), output == r1);
2574 
2575     Label done;
2576     MDiv* div = ins->mir()->isDiv() ? ins->mir()->toDiv() : nullptr;
2577     MMod* mod = !div ? ins->mir()->toMod() : nullptr;
2578 
2579     generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), div);
2580     generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), mod);
2581 
2582     masm.setupAlignedABICall();
2583     masm.passABIArg(lhs);
2584     masm.passABIArg(rhs);
2585     if (gen->compilingAsmJS())
2586         masm.callWithABI(wasm::SymbolicAddress::aeabi_uidivmod);
2587     else
2588         masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, __aeabi_uidivmod));
2589 
2590     // uidivmod returns the quotient in r0, and the remainder in r1.
2591     if (div && !div->canTruncateRemainder()) {
2592         MOZ_ASSERT(div->fallible());
2593         masm.ma_cmp(r1, Imm32(0));
2594         bailoutIf(Assembler::NonZero, ins->snapshot());
2595     }
2596 
2597     // Bailout for big unsigned results
2598     if ((div && !div->isTruncated()) || (mod && !mod->isTruncated())) {
2599         DebugOnly<bool> isFallible = (div && div->fallible()) || (mod && mod->fallible());
2600         MOZ_ASSERT(isFallible);
2601         masm.ma_cmp(output, Imm32(0));
2602         bailoutIf(Assembler::LessThan, ins->snapshot());
2603     }
2604 
2605     masm.bind(&done);
2606 }
2607 
2608 void
visitEffectiveAddress(LEffectiveAddress * ins)2609 CodeGeneratorARM::visitEffectiveAddress(LEffectiveAddress* ins)
2610 {
2611     const MEffectiveAddress* mir = ins->mir();
2612     Register base = ToRegister(ins->base());
2613     Register index = ToRegister(ins->index());
2614     Register output = ToRegister(ins->output());
2615     masm.as_add(output, base, lsl(index, mir->scale()));
2616     masm.ma_add(Imm32(mir->displacement()), output);
2617 }
2618 
2619 void
visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar * ins)2620 CodeGeneratorARM::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar* ins)
2621 {
2622     const MAsmJSLoadGlobalVar* mir = ins->mir();
2623     unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
2624     if (mir->type() == MIRType_Int32) {
2625         masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr), ToRegister(ins->output()));
2626     } else if (mir->type() == MIRType_Float32) {
2627         VFPRegister vd(ToFloatRegister(ins->output()));
2628         masm.ma_vldr(Address(GlobalReg, addr), vd.singleOverlay());
2629     } else {
2630         masm.ma_vldr(Address(GlobalReg, addr), ToFloatRegister(ins->output()));
2631     }
2632 }
2633 
2634 void
visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar * ins)2635 CodeGeneratorARM::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar* ins)
2636 {
2637     const MAsmJSStoreGlobalVar* mir = ins->mir();
2638 
2639     MIRType type = mir->value()->type();
2640     MOZ_ASSERT(IsNumberType(type));
2641 
2642     unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
2643     if (type == MIRType_Int32) {
2644         masm.ma_dtr(IsStore, GlobalReg, Imm32(addr), ToRegister(ins->value()));
2645     } else if (type == MIRType_Float32) {
2646         VFPRegister vd(ToFloatRegister(ins->value()));
2647         masm.ma_vstr(vd.singleOverlay(), Address(GlobalReg, addr));
2648     } else {
2649         masm.ma_vstr(ToFloatRegister(ins->value()), Address(GlobalReg, addr));
2650     }
2651 }
2652 
2653 void
visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr * ins)2654 CodeGeneratorARM::visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr* ins)
2655 {
2656     const MAsmJSLoadFuncPtr* mir = ins->mir();
2657 
2658     Register index = ToRegister(ins->index());
2659     Register tmp = ToRegister(ins->temp());
2660     Register out = ToRegister(ins->output());
2661     unsigned addr = mir->globalDataOffset();
2662     masm.ma_mov(Imm32(addr - AsmJSGlobalRegBias), tmp);
2663     masm.as_add(tmp, tmp, lsl(index, 2));
2664     masm.ma_ldr(DTRAddr(GlobalReg, DtrRegImmShift(tmp, LSL, 0)), out);
2665 }
2666 
2667 void
visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc * ins)2668 CodeGeneratorARM::visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc* ins)
2669 {
2670     const MAsmJSLoadFFIFunc* mir = ins->mir();
2671 
2672     masm.ma_ldr(Address(GlobalReg, mir->globalDataOffset() - AsmJSGlobalRegBias),
2673                 ToRegister(ins->output()));
2674 }
2675 
2676 void
visitNegI(LNegI * ins)2677 CodeGeneratorARM::visitNegI(LNegI* ins)
2678 {
2679     Register input = ToRegister(ins->input());
2680     masm.ma_neg(input, ToRegister(ins->output()));
2681 }
2682 
2683 void
visitNegD(LNegD * ins)2684 CodeGeneratorARM::visitNegD(LNegD* ins)
2685 {
2686     FloatRegister input = ToFloatRegister(ins->input());
2687     masm.ma_vneg(input, ToFloatRegister(ins->output()));
2688 }
2689 
2690 void
visitNegF(LNegF * ins)2691 CodeGeneratorARM::visitNegF(LNegF* ins)
2692 {
2693     FloatRegister input = ToFloatRegister(ins->input());
2694     masm.ma_vneg_f32(input, ToFloatRegister(ins->output()));
2695 }
2696 
2697 void
memoryBarrier(MemoryBarrierBits barrier)2698 CodeGeneratorARM::memoryBarrier(MemoryBarrierBits barrier)
2699 {
2700     // On ARMv6 the optional argument (BarrierST, etc) is ignored.
2701     if (barrier == (MembarStoreStore|MembarSynchronizing))
2702         masm.ma_dsb(masm.BarrierST);
2703     else if (barrier & MembarSynchronizing)
2704         masm.ma_dsb();
2705     else if (barrier == MembarStoreStore)
2706         masm.ma_dmb(masm.BarrierST);
2707     else if (barrier)
2708         masm.ma_dmb();
2709 }
2710 
2711 void
visitMemoryBarrier(LMemoryBarrier * ins)2712 CodeGeneratorARM::visitMemoryBarrier(LMemoryBarrier* ins)
2713 {
2714     memoryBarrier(ins->type());
2715 }
2716 
2717 void
setReturnDoubleRegs(LiveRegisterSet * regs)2718 CodeGeneratorARM::setReturnDoubleRegs(LiveRegisterSet* regs)
2719 {
2720     MOZ_ASSERT(ReturnFloat32Reg.code_ == FloatRegisters::s0);
2721     MOZ_ASSERT(ReturnDoubleReg.code_ == FloatRegisters::s0);
2722     FloatRegister s1 = {FloatRegisters::s1, VFPRegister::Single};
2723     regs->add(ReturnFloat32Reg);
2724     regs->add(s1);
2725     regs->add(ReturnDoubleReg);
2726 }
2727