1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/mips64/CodeGenerator-mips64.h"
8 
9 #include "mozilla/MathAlgorithms.h"
10 
11 #include "jit/CodeGenerator.h"
12 #include "jit/MIR.h"
13 #include "jit/MIRGraph.h"
14 #include "js/Conversions.h"
15 #include "vm/Shape.h"
16 #include "vm/TraceLogging.h"
17 
18 #include "jit/MacroAssembler-inl.h"
19 #include "jit/shared/CodeGenerator-shared-inl.h"
20 
21 using namespace js;
22 using namespace js::jit;
23 
ToValue(LInstruction * ins,size_t pos)24 ValueOperand CodeGeneratorMIPS64::ToValue(LInstruction* ins, size_t pos) {
25   return ValueOperand(ToRegister(ins->getOperand(pos)));
26 }
27 
ToTempValue(LInstruction * ins,size_t pos)28 ValueOperand CodeGeneratorMIPS64::ToTempValue(LInstruction* ins, size_t pos) {
29   return ValueOperand(ToRegister(ins->getTemp(pos)));
30 }
31 
visitBox(LBox * box)32 void CodeGenerator::visitBox(LBox* box) {
33   const LAllocation* in = box->getOperand(0);
34   ValueOperand result = ToOutValue(box);
35 
36   masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result);
37 }
38 
visitUnbox(LUnbox * unbox)39 void CodeGenerator::visitUnbox(LUnbox* unbox) {
40   MUnbox* mir = unbox->mir();
41 
42   Register result = ToRegister(unbox->output());
43 
44   if (mir->fallible()) {
45     const ValueOperand value = ToValue(unbox, LUnbox::Input);
46     Label bail;
47     switch (mir->type()) {
48       case MIRType::Int32:
49         masm.fallibleUnboxInt32(value, result, &bail);
50         break;
51       case MIRType::Boolean:
52         masm.fallibleUnboxBoolean(value, result, &bail);
53         break;
54       case MIRType::Object:
55         masm.fallibleUnboxObject(value, result, &bail);
56         break;
57       case MIRType::String:
58         masm.fallibleUnboxString(value, result, &bail);
59         break;
60       case MIRType::Symbol:
61         masm.fallibleUnboxSymbol(value, result, &bail);
62         break;
63       case MIRType::BigInt:
64         masm.fallibleUnboxBigInt(value, result, &bail);
65         break;
66       default:
67         MOZ_CRASH("Given MIRType cannot be unboxed.");
68     }
69     bailoutFrom(&bail, unbox->snapshot());
70     return;
71   }
72 
73   LAllocation* input = unbox->getOperand(LUnbox::Input);
74   if (input->isRegister()) {
75     Register inputReg = ToRegister(input);
76     switch (mir->type()) {
77       case MIRType::Int32:
78         masm.unboxInt32(inputReg, result);
79         break;
80       case MIRType::Boolean:
81         masm.unboxBoolean(inputReg, result);
82         break;
83       case MIRType::Object:
84         masm.unboxObject(inputReg, result);
85         break;
86       case MIRType::String:
87         masm.unboxString(inputReg, result);
88         break;
89       case MIRType::Symbol:
90         masm.unboxSymbol(inputReg, result);
91         break;
92       case MIRType::BigInt:
93         masm.unboxBigInt(inputReg, result);
94         break;
95       default:
96         MOZ_CRASH("Given MIRType cannot be unboxed.");
97     }
98     return;
99   }
100 
101   Address inputAddr = ToAddress(input);
102   switch (mir->type()) {
103     case MIRType::Int32:
104       masm.unboxInt32(inputAddr, result);
105       break;
106     case MIRType::Boolean:
107       masm.unboxBoolean(inputAddr, result);
108       break;
109     case MIRType::Object:
110       masm.unboxObject(inputAddr, result);
111       break;
112     case MIRType::String:
113       masm.unboxString(inputAddr, result);
114       break;
115     case MIRType::Symbol:
116       masm.unboxSymbol(inputAddr, result);
117       break;
118     case MIRType::BigInt:
119       masm.unboxBigInt(inputAddr, result);
120       break;
121     default:
122       MOZ_CRASH("Given MIRType cannot be unboxed.");
123   }
124 }
125 
splitTagForTest(const ValueOperand & value,ScratchTagScope & tag)126 void CodeGeneratorMIPS64::splitTagForTest(const ValueOperand& value,
127                                           ScratchTagScope& tag) {
128   masm.splitTag(value.valueReg(), tag);
129 }
130 
visitCompareI64(LCompareI64 * lir)131 void CodeGenerator::visitCompareI64(LCompareI64* lir) {
132   MCompare* mir = lir->mir();
133   MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
134              mir->compareType() == MCompare::Compare_UInt64);
135 
136   const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
137   const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
138   Register lhsReg = ToRegister64(lhs).reg;
139   Register output = ToRegister(lir->output());
140   Register rhsReg;
141   ScratchRegisterScope scratch(masm);
142 
143   if (IsConstant(rhs)) {
144     rhsReg = scratch;
145     masm.ma_li(rhsReg, ImmWord(ToInt64(rhs)));
146   } else if (rhs.value().isGeneralReg()) {
147     rhsReg = ToRegister64(rhs).reg;
148   } else {
149     rhsReg = scratch;
150     masm.loadPtr(ToAddress(rhs.value()), rhsReg);
151   }
152 
153   bool isSigned = mir->compareType() == MCompare::Compare_Int64;
154   masm.cmpPtrSet(JSOpToCondition(lir->jsop(), isSigned), lhsReg, rhsReg,
155                  output);
156 }
157 
visitCompareI64AndBranch(LCompareI64AndBranch * lir)158 void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
159   MCompare* mir = lir->cmpMir();
160   MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
161              mir->compareType() == MCompare::Compare_UInt64);
162 
163   const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
164   const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
165   Register lhsReg = ToRegister64(lhs).reg;
166   Register rhsReg;
167   ScratchRegisterScope scratch(masm);
168 
169   if (IsConstant(rhs)) {
170     rhsReg = scratch;
171     masm.ma_li(rhsReg, ImmWord(ToInt64(rhs)));
172   } else if (rhs.value().isGeneralReg()) {
173     rhsReg = ToRegister64(rhs).reg;
174   } else {
175     rhsReg = scratch;
176     masm.loadPtr(ToAddress(rhs.value()), rhsReg);
177   }
178 
179   bool isSigned = mir->compareType() == MCompare::Compare_Int64;
180   Assembler::Condition cond = JSOpToCondition(lir->jsop(), isSigned);
181   emitBranch(lhsReg, rhsReg, cond, lir->ifTrue(), lir->ifFalse());
182 }
183 
visitDivOrModI64(LDivOrModI64 * lir)184 void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
185   Register lhs = ToRegister(lir->lhs());
186   Register rhs = ToRegister(lir->rhs());
187   Register output = ToRegister(lir->output());
188 
189   Label done;
190 
191   // Handle divide by zero.
192   if (lir->canBeDivideByZero()) {
193     Label nonZero;
194     masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
195     masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
196     masm.bind(&nonZero);
197   }
198 
199   // Handle an integer overflow exception from INT64_MIN / -1.
200   if (lir->canBeNegativeOverflow()) {
201     Label notOverflow;
202     masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), &notOverflow);
203     masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), &notOverflow);
204     if (lir->mir()->isMod()) {
205       masm.ma_xor(output, output);
206     } else {
207       masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
208     }
209     masm.jump(&done);
210     masm.bind(&notOverflow);
211   }
212 
213 #ifdef MIPSR6
214   if (lir->mir()->isMod()) {
215     masm.as_dmod(output, lhs, rhs);
216   } else {
217     masm.as_ddiv(output, lhs, rhs);
218   }
219 #else
220   masm.as_ddiv(lhs, rhs);
221   if (lir->mir()->isMod()) {
222     masm.as_mfhi(output);
223   } else {
224     masm.as_mflo(output);
225   }
226 #endif
227   masm.bind(&done);
228 }
229 
visitUDivOrModI64(LUDivOrModI64 * lir)230 void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
231   Register lhs = ToRegister(lir->lhs());
232   Register rhs = ToRegister(lir->rhs());
233   Register output = ToRegister(lir->output());
234 
235   Label done;
236 
237   // Prevent divide by zero.
238   if (lir->canBeDivideByZero()) {
239     Label nonZero;
240     masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
241     masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
242     masm.bind(&nonZero);
243   }
244 
245 #ifdef MIPSR6
246   if (lir->mir()->isMod()) {
247     masm.as_dmodu(output, lhs, rhs);
248   } else {
249     masm.as_ddivu(output, lhs, rhs);
250   }
251 #else
252   masm.as_ddivu(lhs, rhs);
253   if (lir->mir()->isMod()) {
254     masm.as_mfhi(output);
255   } else {
256     masm.as_mflo(output);
257   }
258 #endif
259   masm.bind(&done);
260 }
261 
emitBigIntDiv(LBigIntDiv * ins,Register dividend,Register divisor,Register output,Label * fail)262 void CodeGeneratorMIPS64::emitBigIntDiv(LBigIntDiv* ins, Register dividend,
263                                         Register divisor, Register output,
264                                         Label* fail) {
265   // Callers handle division by zero and integer overflow.
266 
267 #ifdef MIPSR6
268   masm.as_ddiv(/* result= */ dividend, dividend, divisor);
269 #else
270   masm.as_ddiv(dividend, divisor);
271   masm.as_mflo(dividend);
272 #endif
273 
274   // Create and return the result.
275   masm.newGCBigInt(output, divisor, fail, bigIntsCanBeInNursery());
276   masm.initializeBigInt(output, dividend);
277 }
278 
emitBigIntMod(LBigIntMod * ins,Register dividend,Register divisor,Register output,Label * fail)279 void CodeGeneratorMIPS64::emitBigIntMod(LBigIntMod* ins, Register dividend,
280                                         Register divisor, Register output,
281                                         Label* fail) {
282   // Callers handle division by zero and integer overflow.
283 
284 #ifdef MIPSR6
285   masm.as_dmod(/* result= */ dividend, dividend, divisor);
286 #else
287   masm.as_ddiv(dividend, divisor);
288   masm.as_mfhi(dividend);
289 #endif
290 
291   // Create and return the result.
292   masm.newGCBigInt(output, divisor, fail, bigIntsCanBeInNursery());
293   masm.initializeBigInt(output, dividend);
294 }
295 
296 template <typename T>
emitWasmLoadI64(T * lir)297 void CodeGeneratorMIPS64::emitWasmLoadI64(T* lir) {
298   const MWasmLoad* mir = lir->mir();
299 
300   Register ptrScratch = InvalidReg;
301   if (!lir->ptrCopy()->isBogusTemp()) {
302     ptrScratch = ToRegister(lir->ptrCopy());
303   }
304 
305   if (IsUnaligned(mir->access())) {
306     masm.wasmUnalignedLoadI64(mir->access(), HeapReg, ToRegister(lir->ptr()),
307                               ptrScratch, ToOutRegister64(lir),
308                               ToRegister(lir->getTemp(1)));
309   } else {
310     masm.wasmLoadI64(mir->access(), HeapReg, ToRegister(lir->ptr()), ptrScratch,
311                      ToOutRegister64(lir));
312   }
313 }
314 
visitWasmLoadI64(LWasmLoadI64 * lir)315 void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* lir) {
316   emitWasmLoadI64(lir);
317 }
318 
visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64 * lir)319 void CodeGenerator::visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir) {
320   emitWasmLoadI64(lir);
321 }
322 
323 template <typename T>
emitWasmStoreI64(T * lir)324 void CodeGeneratorMIPS64::emitWasmStoreI64(T* lir) {
325   const MWasmStore* mir = lir->mir();
326 
327   Register ptrScratch = InvalidReg;
328   if (!lir->ptrCopy()->isBogusTemp()) {
329     ptrScratch = ToRegister(lir->ptrCopy());
330   }
331 
332   if (IsUnaligned(mir->access())) {
333     masm.wasmUnalignedStoreI64(mir->access(), ToRegister64(lir->value()),
334                                HeapReg, ToRegister(lir->ptr()), ptrScratch,
335                                ToRegister(lir->getTemp(1)));
336   } else {
337     masm.wasmStoreI64(mir->access(), ToRegister64(lir->value()), HeapReg,
338                       ToRegister(lir->ptr()), ptrScratch);
339   }
340 }
341 
visitWasmStoreI64(LWasmStoreI64 * lir)342 void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) {
343   emitWasmStoreI64(lir);
344 }
345 
visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64 * lir)346 void CodeGenerator::visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* lir) {
347   emitWasmStoreI64(lir);
348 }
349 
visitWasmSelectI64(LWasmSelectI64 * lir)350 void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
351   MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
352 
353   Register cond = ToRegister(lir->condExpr());
354   const LInt64Allocation falseExpr = lir->falseExpr();
355 
356   Register64 out = ToOutRegister64(lir);
357   MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
358              "true expr is reused for input");
359 
360   if (falseExpr.value().isRegister()) {
361     masm.as_movz(out.reg, ToRegister(falseExpr.value()), cond);
362   } else {
363     Label done;
364     masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
365     masm.loadPtr(ToAddress(falseExpr.value()), out.reg);
366     masm.bind(&done);
367   }
368 }
369 
visitWasmReinterpretFromI64(LWasmReinterpretFromI64 * lir)370 void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
371   MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
372   MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
373   masm.as_dmtc1(ToRegister(lir->input()), ToFloatRegister(lir->output()));
374 }
375 
visitWasmReinterpretToI64(LWasmReinterpretToI64 * lir)376 void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
377   MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
378   MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
379   masm.as_dmfc1(ToRegister(lir->output()), ToFloatRegister(lir->input()));
380 }
381 
visitExtendInt32ToInt64(LExtendInt32ToInt64 * lir)382 void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
383   const LAllocation* input = lir->getOperand(0);
384   Register output = ToRegister(lir->output());
385 
386   if (lir->mir()->isUnsigned()) {
387     masm.ma_dext(output, ToRegister(input), Imm32(0), Imm32(32));
388   } else {
389     masm.ma_sll(output, ToRegister(input), Imm32(0));
390   }
391 }
392 
visitWrapInt64ToInt32(LWrapInt64ToInt32 * lir)393 void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
394   const LAllocation* input = lir->getOperand(0);
395   Register output = ToRegister(lir->output());
396 
397   if (lir->mir()->bottomHalf()) {
398     if (input->isMemory()) {
399       masm.load32(ToAddress(input), output);
400     } else {
401       masm.ma_sll(output, ToRegister(input), Imm32(0));
402     }
403   } else {
404     MOZ_CRASH("Not implemented.");
405   }
406 }
407 
visitSignExtendInt64(LSignExtendInt64 * lir)408 void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
409   Register64 input = ToRegister64(lir->getInt64Operand(0));
410   Register64 output = ToOutRegister64(lir);
411   switch (lir->mode()) {
412     case MSignExtendInt64::Byte:
413       masm.move32To64SignExtend(input.reg, output);
414       masm.move8SignExtend(output.reg, output.reg);
415       break;
416     case MSignExtendInt64::Half:
417       masm.move32To64SignExtend(input.reg, output);
418       masm.move16SignExtend(output.reg, output.reg);
419       break;
420     case MSignExtendInt64::Word:
421       masm.move32To64SignExtend(input.reg, output);
422       break;
423   }
424 }
425 
visitWasmExtendU32Index(LWasmExtendU32Index *)426 void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index*) {
427   MOZ_CRASH("Unused - no support on MIPS64 for indices > INT_MAX");
428 }
429 
visitWasmWrapU32Index(LWasmWrapU32Index *)430 void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index*) {
431   MOZ_CRASH("Unused - no support on MIPS64 for indices > INT_MAX");
432 }
433 
visitClzI64(LClzI64 * lir)434 void CodeGenerator::visitClzI64(LClzI64* lir) {
435   Register64 input = ToRegister64(lir->getInt64Operand(0));
436   Register64 output = ToOutRegister64(lir);
437   masm.clz64(input, output.reg);
438 }
439 
visitCtzI64(LCtzI64 * lir)440 void CodeGenerator::visitCtzI64(LCtzI64* lir) {
441   Register64 input = ToRegister64(lir->getInt64Operand(0));
442   Register64 output = ToOutRegister64(lir);
443   masm.ctz64(input, output.reg);
444 }
445 
visitNotI64(LNotI64 * lir)446 void CodeGenerator::visitNotI64(LNotI64* lir) {
447   Register64 input = ToRegister64(lir->getInt64Operand(0));
448   Register output = ToRegister(lir->output());
449 
450   masm.cmp64Set(Assembler::Equal, input.reg, Imm32(0), output);
451 }
452 
visitWasmTruncateToInt64(LWasmTruncateToInt64 * lir)453 void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
454   FloatRegister input = ToFloatRegister(lir->input());
455   Register64 output = ToOutRegister64(lir);
456 
457   MWasmTruncateToInt64* mir = lir->mir();
458   MIRType fromType = mir->input()->type();
459 
460   MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
461 
462   auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
463   addOutOfLineCode(ool, mir);
464 
465   Label* oolEntry = ool->entry();
466   Label* oolRejoin = ool->rejoin();
467   bool isSaturating = mir->isSaturating();
468 
469   if (fromType == MIRType::Double) {
470     if (mir->isUnsigned()) {
471       masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, oolEntry,
472                                       oolRejoin, InvalidFloatReg);
473     } else {
474       masm.wasmTruncateDoubleToInt64(input, output, isSaturating, oolEntry,
475                                      oolRejoin, InvalidFloatReg);
476     }
477   } else {
478     if (mir->isUnsigned()) {
479       masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating, oolEntry,
480                                        oolRejoin, InvalidFloatReg);
481     } else {
482       masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, oolEntry,
483                                       oolRejoin, InvalidFloatReg);
484     }
485   }
486 }
487 
visitInt64ToFloatingPoint(LInt64ToFloatingPoint * lir)488 void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
489   Register64 input = ToRegister64(lir->getInt64Operand(0));
490   FloatRegister output = ToFloatRegister(lir->output());
491 
492   MIRType outputType = lir->mir()->type();
493   MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
494 
495   if (outputType == MIRType::Double) {
496     if (lir->mir()->isUnsigned()) {
497       masm.convertUInt64ToDouble(input, output, Register::Invalid());
498     } else {
499       masm.convertInt64ToDouble(input, output);
500     }
501   } else {
502     if (lir->mir()->isUnsigned()) {
503       masm.convertUInt64ToFloat32(input, output, Register::Invalid());
504     } else {
505       masm.convertInt64ToFloat32(input, output);
506     }
507   }
508 }
509 
visitTestI64AndBranch(LTestI64AndBranch * lir)510 void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
511   Register64 input = ToRegister64(lir->getInt64Operand(0));
512   MBasicBlock* ifTrue = lir->ifTrue();
513   MBasicBlock* ifFalse = lir->ifFalse();
514 
515   emitBranch(input.reg, Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
516 }
517 
visitAtomicLoad64(LAtomicLoad64 * lir)518 void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
519   Register elements = ToRegister(lir->elements());
520   Register temp = ToRegister(lir->temp());
521   Register64 temp64 = ToRegister64(lir->temp64());
522   Register out = ToRegister(lir->output());
523   const MLoadUnboxedScalar* mir = lir->mir();
524 
525   Scalar::Type storageType = mir->storageType();
526 
527   auto sync = Synchronization::Load();
528   masm.memoryBarrierBefore(sync);
529   if (lir->index()->isConstant()) {
530     Address source =
531         ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
532     masm.load64(source, temp64);
533   } else {
534     BaseIndex source(elements, ToRegister(lir->index()),
535                      ScaleFromScalarType(storageType), mir->offsetAdjustment());
536     masm.load64(source, temp64);
537   }
538   masm.memoryBarrierAfter(sync);
539   emitCreateBigInt(lir, storageType, temp64, out, temp);
540 }
541 
visitAtomicStore64(LAtomicStore64 * lir)542 void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) {
543   Register elements = ToRegister(lir->elements());
544   Register value = ToRegister(lir->value());
545   Register64 temp1 = ToRegister64(lir->temp1());
546 
547   Scalar::Type writeType = lir->mir()->writeType();
548 
549   masm.loadBigInt64(value, temp1);
550   auto sync = Synchronization::Store();
551   masm.memoryBarrierBefore(sync);
552   if (lir->index()->isConstant()) {
553     Address dest = ToAddress(elements, lir->index(), writeType);
554     masm.store64(temp1, dest);
555   } else {
556     BaseIndex dest(elements, ToRegister(lir->index()),
557                    ScaleFromScalarType(writeType));
558     masm.store64(temp1, dest);
559   }
560   masm.memoryBarrierAfter(sync);
561 }
562