1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/mips-shared/Lowering-mips-shared.h"
8 
9 #include "mozilla/MathAlgorithms.h"
10 
11 #include "jit/MIR.h"
12 
13 #include "jit/shared/Lowering-shared-inl.h"
14 
15 using namespace js;
16 using namespace js::jit;
17 
18 using mozilla::FloorLog2;
19 
20 LAllocation
useByteOpRegister(MDefinition * mir)21 LIRGeneratorMIPSShared::useByteOpRegister(MDefinition* mir)
22 {
23     return useRegister(mir);
24 }
25 
26 LAllocation
useByteOpRegisterOrNonDoubleConstant(MDefinition * mir)27 LIRGeneratorMIPSShared::useByteOpRegisterOrNonDoubleConstant(MDefinition* mir)
28 {
29     return useRegisterOrNonDoubleConstant(mir);
30 }
31 
32 LDefinition
tempByteOpRegister()33 LIRGeneratorMIPSShared::tempByteOpRegister()
34 {
35     return temp();
36 }
37 
38 // x = !y
39 void
lowerForALU(LInstructionHelper<1,1,0> * ins,MDefinition * mir,MDefinition * input)40 LIRGeneratorMIPSShared::lowerForALU(LInstructionHelper<1, 1, 0>* ins,
41                                     MDefinition* mir, MDefinition* input)
42 {
43     ins->setOperand(0, useRegister(input));
44     define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
45 }
46 
47 // z = x+y
48 void
lowerForALU(LInstructionHelper<1,2,0> * ins,MDefinition * mir,MDefinition * lhs,MDefinition * rhs)49 LIRGeneratorMIPSShared::lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
50                                     MDefinition* lhs, MDefinition* rhs)
51 {
52     ins->setOperand(0, useRegister(lhs));
53     ins->setOperand(1, useRegisterOrConstant(rhs));
54     define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
55 }
56 
57 void
lowerForFPU(LInstructionHelper<1,1,0> * ins,MDefinition * mir,MDefinition * input)58 LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
59                                     MDefinition* input)
60 {
61     ins->setOperand(0, useRegister(input));
62     define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
63 }
64 
65 template<size_t Temps>
66 void
lowerForFPU(LInstructionHelper<1,2,Temps> * ins,MDefinition * mir,MDefinition * lhs,MDefinition * rhs)67 LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
68                                     MDefinition* lhs, MDefinition* rhs)
69 {
70     ins->setOperand(0, useRegister(lhs));
71     ins->setOperand(1, useRegister(rhs));
72     define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
73 }
74 
75 template void LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
76                                                   MDefinition* lhs, MDefinition* rhs);
77 template void LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 2, 1>* ins, MDefinition* mir,
78                                                   MDefinition* lhs, MDefinition* rhs);
79 
80 void
lowerForBitAndAndBranch(LBitAndAndBranch * baab,MInstruction * mir,MDefinition * lhs,MDefinition * rhs)81 LIRGeneratorMIPSShared::lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
82                                                 MDefinition* lhs, MDefinition* rhs)
83 {
84     baab->setOperand(0, useRegisterAtStart(lhs));
85     baab->setOperand(1, useRegisterOrConstantAtStart(rhs));
86     add(baab, mir);
87 }
88 
89 void
lowerForShift(LInstructionHelper<1,2,0> * ins,MDefinition * mir,MDefinition * lhs,MDefinition * rhs)90 LIRGeneratorMIPSShared::lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
91                                       MDefinition* lhs, MDefinition* rhs)
92 {
93     ins->setOperand(0, useRegister(lhs));
94     ins->setOperand(1, useRegisterOrConstant(rhs));
95     define(ins, mir);
96 }
97 
98 void
lowerDivI(MDiv * div)99 LIRGeneratorMIPSShared::lowerDivI(MDiv* div)
100 {
101     if (div->isUnsigned()) {
102         lowerUDiv(div);
103         return;
104     }
105 
106     // Division instructions are slow. Division by constant denominators can be
107     // rewritten to use other instructions.
108     if (div->rhs()->isConstant()) {
109         int32_t rhs = div->rhs()->toConstant()->value().toInt32();
110         // Check for division by a positive power of two, which is an easy and
111         // important case to optimize. Note that other optimizations are also
112         // possible; division by negative powers of two can be optimized in a
113         // similar manner as positive powers of two, and division by other
114         // constants can be optimized by a reciprocal multiplication technique.
115         int32_t shift = FloorLog2(rhs);
116         if (rhs > 0 && 1 << shift == rhs) {
117             LDivPowTwoI* lir = new(alloc()) LDivPowTwoI(useRegister(div->lhs()), shift, temp());
118             if (div->fallible())
119                 assignSnapshot(lir, Bailout_DoubleOutput);
120             define(lir, div);
121             return;
122         }
123     }
124 
125     LDivI* lir = new(alloc()) LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
126     if (div->fallible())
127         assignSnapshot(lir, Bailout_DoubleOutput);
128     define(lir, div);
129 }
130 
131 void
lowerMulI(MMul * mul,MDefinition * lhs,MDefinition * rhs)132 LIRGeneratorMIPSShared::lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs)
133 {
134     LMulI* lir = new(alloc()) LMulI;
135     if (mul->fallible())
136         assignSnapshot(lir, Bailout_DoubleOutput);
137 
138     lowerForALU(lir, mul, lhs, rhs);
139 }
140 
141 void
lowerModI(MMod * mod)142 LIRGeneratorMIPSShared::lowerModI(MMod* mod)
143 {
144     if (mod->isUnsigned()) {
145         lowerUMod(mod);
146         return;
147     }
148 
149     if (mod->rhs()->isConstant()) {
150         int32_t rhs = mod->rhs()->toConstant()->value().toInt32();
151         int32_t shift = FloorLog2(rhs);
152         if (rhs > 0 && 1 << shift == rhs) {
153             LModPowTwoI* lir = new(alloc()) LModPowTwoI(useRegister(mod->lhs()), shift);
154             if (mod->fallible())
155                 assignSnapshot(lir, Bailout_DoubleOutput);
156             define(lir, mod);
157             return;
158         } else if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) {
159             LModMaskI* lir = new(alloc()) LModMaskI(useRegister(mod->lhs()),
160                                                     temp(LDefinition::GENERAL),
161                                                     temp(LDefinition::GENERAL),
162                                                     shift + 1);
163             if (mod->fallible())
164                 assignSnapshot(lir, Bailout_DoubleOutput);
165             define(lir, mod);
166             return;
167         }
168     }
169     LModI* lir = new(alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()),
170                            temp(LDefinition::GENERAL));
171 
172     if (mod->fallible())
173         assignSnapshot(lir, Bailout_DoubleOutput);
174     define(lir, mod);
175 }
176 
177 void
visitPowHalf(MPowHalf * ins)178 LIRGeneratorMIPSShared::visitPowHalf(MPowHalf* ins)
179 {
180     MDefinition* input = ins->input();
181     MOZ_ASSERT(input->type() == MIRType_Double);
182     LPowHalfD* lir = new(alloc()) LPowHalfD(useRegisterAtStart(input));
183     defineReuseInput(lir, ins, 0);
184 }
185 
186 LTableSwitch*
newLTableSwitch(const LAllocation & in,const LDefinition & inputCopy,MTableSwitch * tableswitch)187 LIRGeneratorMIPSShared::newLTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
188                                         MTableSwitch* tableswitch)
189 {
190     return new(alloc()) LTableSwitch(in, inputCopy, temp(), tableswitch);
191 }
192 
193 LTableSwitchV*
newLTableSwitchV(MTableSwitch * tableswitch)194 LIRGeneratorMIPSShared::newLTableSwitchV(MTableSwitch* tableswitch)
195 {
196     return new(alloc()) LTableSwitchV(temp(), tempDouble(), temp(), tableswitch);
197 }
198 
199 void
visitGuardShape(MGuardShape * ins)200 LIRGeneratorMIPSShared::visitGuardShape(MGuardShape* ins)
201 {
202     MOZ_ASSERT(ins->obj()->type() == MIRType_Object);
203 
204     LDefinition tempObj = temp(LDefinition::OBJECT);
205     LGuardShape* guard = new(alloc()) LGuardShape(useRegister(ins->obj()), tempObj);
206     assignSnapshot(guard, ins->bailoutKind());
207     add(guard, ins);
208     redefine(ins, ins->obj());
209 }
210 
211 void
visitGuardObjectGroup(MGuardObjectGroup * ins)212 LIRGeneratorMIPSShared::visitGuardObjectGroup(MGuardObjectGroup* ins)
213 {
214     MOZ_ASSERT(ins->obj()->type() == MIRType_Object);
215 
216     LDefinition tempObj = temp(LDefinition::OBJECT);
217     LGuardObjectGroup* guard = new(alloc()) LGuardObjectGroup(useRegister(ins->obj()), tempObj);
218     assignSnapshot(guard, ins->bailoutKind());
219     add(guard, ins);
220     redefine(ins, ins->obj());
221 }
222 
223 void
lowerUrshD(MUrsh * mir)224 LIRGeneratorMIPSShared::lowerUrshD(MUrsh* mir)
225 {
226     MDefinition* lhs = mir->lhs();
227     MDefinition* rhs = mir->rhs();
228 
229     MOZ_ASSERT(lhs->type() == MIRType_Int32);
230     MOZ_ASSERT(rhs->type() == MIRType_Int32);
231 
232     LUrshD* lir = new(alloc()) LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
233     define(lir, mir);
234 }
235 
236 void
visitAsmJSNeg(MAsmJSNeg * ins)237 LIRGeneratorMIPSShared::visitAsmJSNeg(MAsmJSNeg* ins)
238 {
239     if (ins->type() == MIRType_Int32) {
240         define(new(alloc()) LNegI(useRegisterAtStart(ins->input())), ins);
241     } else if (ins->type() == MIRType_Float32) {
242         define(new(alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
243     } else {
244         MOZ_ASSERT(ins->type() == MIRType_Double);
245         define(new(alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
246     }
247 }
248 
249 void
lowerUDiv(MDiv * div)250 LIRGeneratorMIPSShared::lowerUDiv(MDiv* div)
251 {
252     MDefinition* lhs = div->getOperand(0);
253     MDefinition* rhs = div->getOperand(1);
254 
255     LUDivOrMod* lir = new(alloc()) LUDivOrMod;
256     lir->setOperand(0, useRegister(lhs));
257     lir->setOperand(1, useRegister(rhs));
258     if (div->fallible())
259         assignSnapshot(lir, Bailout_DoubleOutput);
260 
261     define(lir, div);
262 }
263 
264 void
lowerUMod(MMod * mod)265 LIRGeneratorMIPSShared::lowerUMod(MMod* mod)
266 {
267     MDefinition* lhs = mod->getOperand(0);
268     MDefinition* rhs = mod->getOperand(1);
269 
270     LUDivOrMod* lir = new(alloc()) LUDivOrMod;
271     lir->setOperand(0, useRegister(lhs));
272     lir->setOperand(1, useRegister(rhs));
273     if (mod->fallible())
274         assignSnapshot(lir, Bailout_DoubleOutput);
275 
276     define(lir, mod);
277 }
278 
279 void
visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble * ins)280 LIRGeneratorMIPSShared::visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins)
281 {
282     MOZ_ASSERT(ins->input()->type() == MIRType_Int32);
283     LAsmJSUInt32ToDouble* lir = new(alloc()) LAsmJSUInt32ToDouble(useRegisterAtStart(ins->input()));
284     define(lir, ins);
285 }
286 
287 void
visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32 * ins)288 LIRGeneratorMIPSShared::visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins)
289 {
290     MOZ_ASSERT(ins->input()->type() == MIRType_Int32);
291     LAsmJSUInt32ToFloat32* lir = new(alloc()) LAsmJSUInt32ToFloat32(useRegisterAtStart(ins->input()));
292     define(lir, ins);
293 }
294 
295 void
visitAsmJSLoadHeap(MAsmJSLoadHeap * ins)296 LIRGeneratorMIPSShared::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
297 {
298     MDefinition* ptr = ins->ptr();
299     MOZ_ASSERT(ptr->type() == MIRType_Int32);
300     LAllocation ptrAlloc;
301 
302     // For MIPS it is best to keep the 'ptr' in a register if a bounds check
303     // is needed.
304     if (ptr->isConstantValue() && !ins->needsBoundsCheck()) {
305         // A bounds check is only skipped for a positive index.
306         MOZ_ASSERT(ptr->constantValue().toInt32() >= 0);
307         ptrAlloc = LAllocation(ptr->constantVp());
308     } else
309         ptrAlloc = useRegisterAtStart(ptr);
310 
311     define(new(alloc()) LAsmJSLoadHeap(ptrAlloc), ins);
312 }
313 
314 void
visitAsmJSStoreHeap(MAsmJSStoreHeap * ins)315 LIRGeneratorMIPSShared::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
316 {
317     MDefinition* ptr = ins->ptr();
318     MOZ_ASSERT(ptr->type() == MIRType_Int32);
319     LAllocation ptrAlloc;
320 
321     if (ptr->isConstantValue() && !ins->needsBoundsCheck()) {
322         MOZ_ASSERT(ptr->constantValue().toInt32() >= 0);
323         ptrAlloc = LAllocation(ptr->constantVp());
324     } else
325         ptrAlloc = useRegisterAtStart(ptr);
326 
327     add(new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value())), ins);
328 }
329 
330 void
visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr * ins)331 LIRGeneratorMIPSShared::visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr* ins)
332 {
333     define(new(alloc()) LAsmJSLoadFuncPtr(useRegister(ins->index())), ins);
334 }
335 
336 void
visitSubstr(MSubstr * ins)337 LIRGeneratorMIPSShared::visitSubstr(MSubstr* ins)
338 {
339     LSubstr* lir = new (alloc()) LSubstr(useRegister(ins->string()),
340                                          useRegister(ins->begin()),
341                                          useRegister(ins->length()),
342                                          temp(),
343                                          temp(),
344                                          tempByteOpRegister());
345     define(lir, ins);
346     assignSafepoint(lir, ins);
347 }
348 
349 void
visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic * ins)350 LIRGeneratorMIPSShared::visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins)
351 {
352     MOZ_CRASH("NYI");
353 }
354 
355 void
visitSimdBinaryArith(MSimdBinaryArith * ins)356 LIRGeneratorMIPSShared::visitSimdBinaryArith(MSimdBinaryArith* ins)
357 {
358     MOZ_CRASH("NYI");
359 }
360 
361 void
visitSimdSelect(MSimdSelect * ins)362 LIRGeneratorMIPSShared::visitSimdSelect(MSimdSelect* ins)
363 {
364     MOZ_CRASH("NYI");
365 }
366 
367 void
visitSimdSplatX4(MSimdSplatX4 * ins)368 LIRGeneratorMIPSShared::visitSimdSplatX4(MSimdSplatX4* ins)
369 {
370     MOZ_CRASH("NYI");
371 }
372 
373 void
visitSimdValueX4(MSimdValueX4 * ins)374 LIRGeneratorMIPSShared::visitSimdValueX4(MSimdValueX4* ins)
375 {
376     MOZ_CRASH("NYI");
377 }
378 
379 void
visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement * ins)380 LIRGeneratorMIPSShared::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins)
381 {
382     MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
383     MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
384 
385     MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
386     MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
387 
388     const LUse elements = useRegister(ins->elements());
389     const LAllocation index = useRegisterOrConstant(ins->index());
390 
391     // If the target is a floating register then we need a temp at the
392     // CodeGenerator level for creating the result.
393 
394     const LAllocation newval = useRegister(ins->newval());
395     const LAllocation oldval = useRegister(ins->oldval());
396     LDefinition uint32Temp = LDefinition::BogusTemp();
397     if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
398         uint32Temp = temp();
399 
400     LCompareExchangeTypedArrayElement* lir =
401         new(alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval, newval, uint32Temp,
402                                                        /* valueTemp= */ temp(), /* offsetTemp= */ temp(),
403                                                        /* maskTemp= */ temp());
404 
405     define(lir, ins);
406 }
407 
408 void
visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement * ins)409 LIRGeneratorMIPSShared::visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins)
410 {
411     MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
412 
413     MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
414     MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
415 
416     const LUse elements = useRegister(ins->elements());
417     const LAllocation index = useRegisterOrConstant(ins->index());
418 
419     // If the target is a floating register then we need a temp at the
420     // CodeGenerator level for creating the result.
421 
422     const LAllocation value = useRegister(ins->value());
423     LDefinition uint32Temp = LDefinition::BogusTemp();
424     if (ins->arrayType() == Scalar::Uint32) {
425         MOZ_ASSERT(ins->type() == MIRType_Double);
426         uint32Temp = temp();
427     }
428 
429     LAtomicExchangeTypedArrayElement* lir =
430         new(alloc()) LAtomicExchangeTypedArrayElement(elements, index, value, uint32Temp,
431                                                       /* valueTemp= */ temp(), /* offsetTemp= */ temp(),
432                                                       /* maskTemp= */ temp());
433 
434     define(lir, ins);
435 }
436 
437 void
visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap * ins)438 LIRGeneratorMIPSShared::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
439 {
440     MOZ_ASSERT(ins->accessType() < Scalar::Float32);
441 
442     MDefinition* ptr = ins->ptr();
443     MOZ_ASSERT(ptr->type() == MIRType_Int32);
444 
445     LAsmJSCompareExchangeHeap* lir =
446         new(alloc()) LAsmJSCompareExchangeHeap(useRegister(ptr),
447                                                useRegister(ins->oldValue()),
448                                                useRegister(ins->newValue()),
449                                                /* valueTemp= */ temp(),
450                                                /* offsetTemp= */ temp(),
451                                                /* maskTemp= */ temp());
452 
453     define(lir, ins);
454 }
455 
456 void
visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap * ins)457 LIRGeneratorMIPSShared::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
458 {
459     MOZ_ASSERT(ins->ptr()->type() == MIRType_Int32);
460 
461     const LAllocation ptr = useRegister(ins->ptr());
462     const LAllocation value = useRegister(ins->value());
463 
464     // The output may not be used but will be clobbered regardless,
465     // so ignore the case where we're not using the value and just
466     // use the output register as a temp.
467 
468     LAsmJSAtomicExchangeHeap* lir =
469         new(alloc()) LAsmJSAtomicExchangeHeap(ptr, value,
470                                               /* valueTemp= */ temp(),
471                                               /* offsetTemp= */ temp(),
472                                               /* maskTemp= */ temp());
473     define(lir, ins);
474 }
475 
476 void
visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap * ins)477 LIRGeneratorMIPSShared::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
478 {
479     MOZ_ASSERT(ins->accessType() < Scalar::Float32);
480 
481     MDefinition* ptr = ins->ptr();
482     MOZ_ASSERT(ptr->type() == MIRType_Int32);
483 
484     if (!ins->hasUses()) {
485         LAsmJSAtomicBinopHeapForEffect* lir =
486             new(alloc()) LAsmJSAtomicBinopHeapForEffect(useRegister(ptr),
487                                                         useRegister(ins->value()),
488                                                         /* flagTemp= */ temp(),
489                                                         /* valueTemp= */ temp(),
490                                                         /* offsetTemp= */ temp(),
491                                                         /* maskTemp= */ temp());
492         add(lir, ins);
493         return;
494     }
495 
496     LAsmJSAtomicBinopHeap* lir =
497         new(alloc()) LAsmJSAtomicBinopHeap(useRegister(ptr),
498                                            useRegister(ins->value()),
499                                            /* temp= */ LDefinition::BogusTemp(),
500                                            /* flagTemp= */ temp(),
501                                            /* valueTemp= */ temp(),
502                                            /* offsetTemp= */ temp(),
503                                            /* maskTemp= */ temp());
504 
505     define(lir, ins);
506 }
507 
508 void
visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop * ins)509 LIRGeneratorMIPSShared::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins)
510 {
511     MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
512     MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
513     MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
514 
515     MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
516     MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
517 
518     const LUse elements = useRegister(ins->elements());
519     const LAllocation index = useRegisterOrConstant(ins->index());
520     const LAllocation value = useRegister(ins->value());
521 
522     if (!ins->hasUses()) {
523         LAtomicTypedArrayElementBinopForEffect* lir =
524             new(alloc()) LAtomicTypedArrayElementBinopForEffect(elements, index, value,
525                                                                 /* flagTemp= */ temp(),
526                                                                 /* valueTemp= */ temp(),
527                                                                 /* offsetTemp= */ temp(),
528                                                                 /* maskTemp= */ temp());
529         add(lir, ins);
530         return;
531     }
532 
533     // For a Uint32Array with a known double result we need a temp for
534     // the intermediate output.
535 
536     LDefinition flagTemp = temp();
537     LDefinition outTemp = LDefinition::BogusTemp();
538 
539     if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
540         outTemp = temp();
541 
542     // On mips, map flagTemp to temp1 and outTemp to temp2, at least for now.
543 
544     LAtomicTypedArrayElementBinop* lir =
545         new(alloc()) LAtomicTypedArrayElementBinop(elements, index, value, flagTemp, outTemp,
546                                                    /* valueTemp= */ temp(), /* offsetTemp= */ temp(),
547                                                    /* maskTemp= */ temp());
548     define(lir, ins);
549 }
550