1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/mips32/MacroAssembler-mips32.h"
8 
9 #include "mozilla/DebugOnly.h"
10 #include "mozilla/EndianUtils.h"
11 #include "mozilla/MathAlgorithms.h"
12 
13 #include "jit/Bailouts.h"
14 #include "jit/BaselineFrame.h"
15 #include "jit/JitFrames.h"
16 #include "jit/JitRuntime.h"
17 #include "jit/MacroAssembler.h"
18 #include "jit/mips32/Simulator-mips32.h"
19 #include "jit/MoveEmitter.h"
20 #include "jit/SharedICRegisters.h"
21 #include "util/Memory.h"
22 #include "vm/JitActivation.h"  // js::jit::JitActivation
23 #include "vm/JSContext.h"
24 
25 #include "jit/MacroAssembler-inl.h"
26 
27 using namespace js;
28 using namespace jit;
29 
30 using mozilla::Abs;
31 
32 static const int32_t PAYLOAD_OFFSET = NUNBOX32_PAYLOAD_OFFSET;
33 static const int32_t TAG_OFFSET = NUNBOX32_TYPE_OFFSET;
34 
35 static_assert(sizeof(intptr_t) == 4, "Not 64-bit clean.");
36 
convertBoolToInt32(Register src,Register dest)37 void MacroAssemblerMIPSCompat::convertBoolToInt32(Register src, Register dest) {
38   // Note that C++ bool is only 1 byte, so zero extend it to clear the
39   // higher-order bits.
40   ma_and(dest, src, Imm32(0xff));
41 }
42 
convertInt32ToDouble(Register src,FloatRegister dest)43 void MacroAssemblerMIPSCompat::convertInt32ToDouble(Register src,
44                                                     FloatRegister dest) {
45   as_mtc1(src, dest);
46   as_cvtdw(dest, dest);
47 }
48 
convertInt32ToDouble(const Address & src,FloatRegister dest)49 void MacroAssemblerMIPSCompat::convertInt32ToDouble(const Address& src,
50                                                     FloatRegister dest) {
51   ma_ls(dest, src);
52   as_cvtdw(dest, dest);
53 }
54 
convertInt32ToDouble(const BaseIndex & src,FloatRegister dest)55 void MacroAssemblerMIPSCompat::convertInt32ToDouble(const BaseIndex& src,
56                                                     FloatRegister dest) {
57   computeScaledAddress(src, ScratchRegister);
58   convertInt32ToDouble(Address(ScratchRegister, src.offset), dest);
59 }
60 
convertUInt32ToDouble(Register src,FloatRegister dest)61 void MacroAssemblerMIPSCompat::convertUInt32ToDouble(Register src,
62                                                      FloatRegister dest) {
63   Label positive, done;
64   ma_b(src, src, &positive, NotSigned, ShortJump);
65 
66   const uint32_t kExponentShift =
67       mozilla::FloatingPoint<double>::kExponentShift - 32;
68   const uint32_t kExponent =
69       (31 + mozilla::FloatingPoint<double>::kExponentBias);
70 
71   ma_ext(SecondScratchReg, src, 31 - kExponentShift, kExponentShift);
72   ma_li(ScratchRegister, Imm32(kExponent << kExponentShift));
73   ma_or(SecondScratchReg, ScratchRegister);
74   ma_sll(ScratchRegister, src, Imm32(kExponentShift + 1));
75   moveToDoubleHi(SecondScratchReg, dest);
76   moveToDoubleLo(ScratchRegister, dest);
77 
78   ma_b(&done, ShortJump);
79 
80   bind(&positive);
81   convertInt32ToDouble(src, dest);
82 
83   bind(&done);
84 }
85 
convertUInt32ToFloat32(Register src,FloatRegister dest)86 void MacroAssemblerMIPSCompat::convertUInt32ToFloat32(Register src,
87                                                       FloatRegister dest) {
88   Label positive, done;
89   ma_b(src, src, &positive, NotSigned, ShortJump);
90 
91   const uint32_t kExponentShift =
92       mozilla::FloatingPoint<double>::kExponentShift - 32;
93   const uint32_t kExponent =
94       (31 + mozilla::FloatingPoint<double>::kExponentBias);
95 
96   ma_ext(SecondScratchReg, src, 31 - kExponentShift, kExponentShift);
97   ma_li(ScratchRegister, Imm32(kExponent << kExponentShift));
98   ma_or(SecondScratchReg, ScratchRegister);
99   ma_sll(ScratchRegister, src, Imm32(kExponentShift + 1));
100   FloatRegister destDouble = dest.asDouble();
101   moveToDoubleHi(SecondScratchReg, destDouble);
102   moveToDoubleLo(ScratchRegister, destDouble);
103 
104   convertDoubleToFloat32(destDouble, dest);
105 
106   ma_b(&done, ShortJump);
107 
108   bind(&positive);
109   convertInt32ToFloat32(src, dest);
110 
111   bind(&done);
112 }
113 
convertDoubleToFloat32(FloatRegister src,FloatRegister dest)114 void MacroAssemblerMIPSCompat::convertDoubleToFloat32(FloatRegister src,
115                                                       FloatRegister dest) {
116   as_cvtsd(dest, src);
117 }
118 
convertDoubleToPtr(FloatRegister src,Register dest,Label * fail,bool negativeZeroCheck)119 void MacroAssemblerMIPSCompat::convertDoubleToPtr(FloatRegister src,
120                                                   Register dest, Label* fail,
121                                                   bool negativeZeroCheck) {
122   convertDoubleToInt32(src, dest, fail, negativeZeroCheck);
123 }
124 
125 const int CauseBitPos = int(Assembler::CauseI);
126 const int CauseBitCount = 1 + int(Assembler::CauseV) - int(Assembler::CauseI);
127 const int CauseIOrVMask = ((1 << int(Assembler::CauseI)) |
128                            (1 << int(Assembler::CauseV))) >>
129                           int(Assembler::CauseI);
130 
131 // Checks whether a double is representable as a 32-bit integer. If so, the
132 // integer is written to the output register. Otherwise, a bailout is taken to
133 // the given snapshot. This function overwrites the scratch float register.
convertDoubleToInt32(FloatRegister src,Register dest,Label * fail,bool negativeZeroCheck)134 void MacroAssemblerMIPSCompat::convertDoubleToInt32(FloatRegister src,
135                                                     Register dest, Label* fail,
136                                                     bool negativeZeroCheck) {
137   if (negativeZeroCheck) {
138     moveFromDoubleHi(src, dest);
139     moveFromDoubleLo(src, SecondScratchReg);
140     ma_xor(dest, Imm32(INT32_MIN));
141     ma_or(dest, SecondScratchReg);
142     ma_b(dest, Imm32(0), fail, Assembler::Equal);
143   }
144 
145   // Truncate double to int ; if result is inexact or invalid fail.
146   as_truncwd(ScratchFloat32Reg, src);
147   as_cfc1(ScratchRegister, Assembler::FCSR);
148   moveFromFloat32(ScratchFloat32Reg, dest);
149   ma_ext(ScratchRegister, ScratchRegister, CauseBitPos, CauseBitCount);
150   // Here adding the masking andi instruction just for a precaution.
151   // For the instruction of trunc.*.*, the Floating Point Exceptions can be
152   // only Inexact, Invalid Operation, Unimplemented Operation.
153   // Leaving it maybe is also ok.
154   as_andi(ScratchRegister, ScratchRegister, CauseIOrVMask);
155   ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
156 }
157 
158 // Checks whether a float32 is representable as a 32-bit integer. If so, the
159 // integer is written to the output register. Otherwise, a bailout is taken to
160 // the given snapshot. This function overwrites the scratch float register.
convertFloat32ToInt32(FloatRegister src,Register dest,Label * fail,bool negativeZeroCheck)161 void MacroAssemblerMIPSCompat::convertFloat32ToInt32(FloatRegister src,
162                                                      Register dest, Label* fail,
163                                                      bool negativeZeroCheck) {
164   if (negativeZeroCheck) {
165     moveFromFloat32(src, dest);
166     ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
167   }
168 
169   as_truncws(ScratchFloat32Reg, src);
170   as_cfc1(ScratchRegister, Assembler::FCSR);
171   moveFromFloat32(ScratchFloat32Reg, dest);
172   ma_ext(ScratchRegister, ScratchRegister, CauseBitPos, CauseBitCount);
173   as_andi(ScratchRegister, ScratchRegister, CauseIOrVMask);
174   ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
175 }
176 
convertFloat32ToDouble(FloatRegister src,FloatRegister dest)177 void MacroAssemblerMIPSCompat::convertFloat32ToDouble(FloatRegister src,
178                                                       FloatRegister dest) {
179   as_cvtds(dest, src);
180 }
181 
convertInt32ToFloat32(Register src,FloatRegister dest)182 void MacroAssemblerMIPSCompat::convertInt32ToFloat32(Register src,
183                                                      FloatRegister dest) {
184   as_mtc1(src, dest);
185   as_cvtsw(dest, dest);
186 }
187 
convertInt32ToFloat32(const Address & src,FloatRegister dest)188 void MacroAssemblerMIPSCompat::convertInt32ToFloat32(const Address& src,
189                                                      FloatRegister dest) {
190   ma_ls(dest, src);
191   as_cvtsw(dest, dest);
192 }
193 
ma_li(Register dest,CodeLabel * label)194 void MacroAssemblerMIPS::ma_li(Register dest, CodeLabel* label) {
195   BufferOffset bo = m_buffer.nextOffset();
196   ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
197   label->patchAt()->bind(bo.getOffset());
198   label->setLinkMode(CodeLabel::MoveImmediate);
199 }
200 
ma_li(Register dest,ImmWord imm)201 void MacroAssemblerMIPS::ma_li(Register dest, ImmWord imm) {
202   ma_li(dest, Imm32(uint32_t(imm.value)));
203 }
204 
ma_liPatchable(Register dest,ImmPtr imm)205 void MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmPtr imm) {
206   ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
207 }
208 
ma_liPatchable(Register dest,ImmWord imm)209 void MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmWord imm) {
210   ma_liPatchable(dest, Imm32(int32_t(imm.value)));
211 }
212 
213 // Arithmetic-based ops.
214 
215 // Add.
ma_add32TestOverflow(Register rd,Register rs,Register rt,Label * overflow)216 void MacroAssemblerMIPS::ma_add32TestOverflow(Register rd, Register rs,
217                                               Register rt, Label* overflow) {
218   MOZ_ASSERT_IF(rs == rd, rs != rt);
219   MOZ_ASSERT(rs != ScratchRegister);
220   MOZ_ASSERT(rt != ScratchRegister);
221   MOZ_ASSERT(rd != rt);
222   MOZ_ASSERT(rd != ScratchRegister);
223   MOZ_ASSERT(rd != SecondScratchReg);
224 
225   if (rs == rt) {
226     as_addu(rd, rs, rs);
227     as_xor(SecondScratchReg, rs, rd);
228     ma_b(SecondScratchReg, Imm32(0), overflow, Assembler::LessThan);
229     return;
230   }
231 
232   // If different sign, no overflow
233   as_xor(ScratchRegister, rs, rt);
234 
235   as_addu(rd, rs, rt);
236   as_nor(ScratchRegister, ScratchRegister, zero);
237   // If different sign, then overflow
238   as_xor(SecondScratchReg, rt, rd);
239   as_and(SecondScratchReg, SecondScratchReg, ScratchRegister);
240   ma_b(SecondScratchReg, Imm32(0), overflow, Assembler::LessThan);
241 }
242 
ma_add32TestOverflow(Register rd,Register rs,Imm32 imm,Label * overflow)243 void MacroAssemblerMIPS::ma_add32TestOverflow(Register rd, Register rs,
244                                               Imm32 imm, Label* overflow) {
245   MOZ_ASSERT(rs != ScratchRegister);
246   MOZ_ASSERT(rs != SecondScratchReg);
247   MOZ_ASSERT(rd != ScratchRegister);
248   MOZ_ASSERT(rd != SecondScratchReg);
249 
250   Register rs_copy = rs;
251 
252   if (imm.value > 0) {
253     as_nor(ScratchRegister, rs, zero);
254   } else if (rs == rd) {
255     ma_move(ScratchRegister, rs);
256     rs_copy = ScratchRegister;
257   }
258 
259   if (Imm16::IsInSignedRange(imm.value)) {
260     as_addiu(rd, rs, imm.value);
261   } else {
262     ma_li(SecondScratchReg, imm);
263     as_addu(rd, rs, SecondScratchReg);
264   }
265 
266   if (imm.value > 0) {
267     as_and(ScratchRegister, ScratchRegister, rd);
268   } else {
269     as_nor(SecondScratchReg, rd, zero);
270     as_and(ScratchRegister, rs_copy, SecondScratchReg);
271   }
272 
273   ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
274 }
275 
276 // Subtract.
ma_sub32TestOverflow(Register rd,Register rs,Register rt,Label * overflow)277 void MacroAssemblerMIPS::ma_sub32TestOverflow(Register rd, Register rs,
278                                               Register rt, Label* overflow) {
279   // The rs == rt case should probably be folded at MIR stage.
280   // Happens for Number_isInteger*. Not worth specializing here.
281   MOZ_ASSERT_IF(rs == rd, rs != rt);
282   MOZ_ASSERT(rs != SecondScratchReg);
283   MOZ_ASSERT(rt != SecondScratchReg);
284   MOZ_ASSERT(rd != rt);
285   MOZ_ASSERT(rd != ScratchRegister);
286   MOZ_ASSERT(rd != SecondScratchReg);
287 
288   Register rs_copy = rs;
289 
290   if (rs == rd) {
291     ma_move(SecondScratchReg, rs);
292     rs_copy = SecondScratchReg;
293   }
294 
295   as_subu(rd, rs, rt);
296   // If same sign, no overflow
297   as_xor(ScratchRegister, rs_copy, rt);
298   // If different sign, then overflow
299   as_xor(SecondScratchReg, rs_copy, rd);
300   as_and(SecondScratchReg, SecondScratchReg, ScratchRegister);
301   ma_b(SecondScratchReg, Imm32(0), overflow, Assembler::LessThan);
302 }
303 
304 // Memory.
305 
ma_load(Register dest,Address address,LoadStoreSize size,LoadStoreExtension extension)306 void MacroAssemblerMIPS::ma_load(Register dest, Address address,
307                                  LoadStoreSize size,
308                                  LoadStoreExtension extension) {
309   int16_t encodedOffset;
310   Register base;
311 
312   if (isLoongson() && ZeroExtend != extension &&
313       !Imm16::IsInSignedRange(address.offset)) {
314     ma_li(ScratchRegister, Imm32(address.offset));
315     base = address.base;
316 
317     switch (size) {
318       case SizeByte:
319         as_gslbx(dest, base, ScratchRegister, 0);
320         break;
321       case SizeHalfWord:
322         as_gslhx(dest, base, ScratchRegister, 0);
323         break;
324       case SizeWord:
325         as_gslwx(dest, base, ScratchRegister, 0);
326         break;
327       case SizeDouble:
328         as_gsldx(dest, base, ScratchRegister, 0);
329         break;
330       default:
331         MOZ_CRASH("Invalid argument for ma_load");
332     }
333     return;
334   }
335 
336   if (!Imm16::IsInSignedRange(address.offset)) {
337     ma_li(ScratchRegister, Imm32(address.offset));
338     as_addu(ScratchRegister, address.base, ScratchRegister);
339     base = ScratchRegister;
340     encodedOffset = Imm16(0).encode();
341   } else {
342     encodedOffset = Imm16(address.offset).encode();
343     base = address.base;
344   }
345 
346   switch (size) {
347     case SizeByte:
348       if (ZeroExtend == extension) {
349         as_lbu(dest, base, encodedOffset);
350       } else {
351         as_lb(dest, base, encodedOffset);
352       }
353       break;
354     case SizeHalfWord:
355       if (ZeroExtend == extension) {
356         as_lhu(dest, base, encodedOffset);
357       } else {
358         as_lh(dest, base, encodedOffset);
359       }
360       break;
361     case SizeWord:
362       as_lw(dest, base, encodedOffset);
363       break;
364     default:
365       MOZ_CRASH("Invalid argument for ma_load");
366   }
367 }
368 
ma_store(Register data,Address address,LoadStoreSize size,LoadStoreExtension extension)369 void MacroAssemblerMIPS::ma_store(Register data, Address address,
370                                   LoadStoreSize size,
371                                   LoadStoreExtension extension) {
372   int16_t encodedOffset;
373   Register base;
374 
375   if (isLoongson() && !Imm16::IsInSignedRange(address.offset)) {
376     ma_li(ScratchRegister, Imm32(address.offset));
377     base = address.base;
378 
379     switch (size) {
380       case SizeByte:
381         as_gssbx(data, base, ScratchRegister, 0);
382         break;
383       case SizeHalfWord:
384         as_gsshx(data, base, ScratchRegister, 0);
385         break;
386       case SizeWord:
387         as_gsswx(data, base, ScratchRegister, 0);
388         break;
389       case SizeDouble:
390         as_gssdx(data, base, ScratchRegister, 0);
391         break;
392       default:
393         MOZ_CRASH("Invalid argument for ma_store");
394     }
395     return;
396   }
397 
398   if (!Imm16::IsInSignedRange(address.offset)) {
399     ma_li(ScratchRegister, Imm32(address.offset));
400     as_addu(ScratchRegister, address.base, ScratchRegister);
401     base = ScratchRegister;
402     encodedOffset = Imm16(0).encode();
403   } else {
404     encodedOffset = Imm16(address.offset).encode();
405     base = address.base;
406   }
407 
408   switch (size) {
409     case SizeByte:
410       as_sb(data, base, encodedOffset);
411       break;
412     case SizeHalfWord:
413       as_sh(data, base, encodedOffset);
414       break;
415     case SizeWord:
416       as_sw(data, base, encodedOffset);
417       break;
418     default:
419       MOZ_CRASH("Invalid argument for ma_store");
420   }
421 }
422 
computeScaledAddress(const BaseIndex & address,Register dest)423 void MacroAssemblerMIPSCompat::computeScaledAddress(const BaseIndex& address,
424                                                     Register dest) {
425   int32_t shift = Imm32::ShiftOf(address.scale).value;
426   if (shift) {
427     ma_sll(ScratchRegister, address.index, Imm32(shift));
428     as_addu(dest, address.base, ScratchRegister);
429   } else {
430     as_addu(dest, address.base, address.index);
431   }
432 }
433 
434 // Shortcut for when we know we're transferring 32 bits of data.
ma_lw(Register data,Address address)435 void MacroAssemblerMIPS::ma_lw(Register data, Address address) {
436   ma_load(data, address, SizeWord);
437 }
438 
ma_sw(Register data,Address address)439 void MacroAssemblerMIPS::ma_sw(Register data, Address address) {
440   ma_store(data, address, SizeWord);
441 }
442 
ma_sw(Imm32 imm,Address address)443 void MacroAssemblerMIPS::ma_sw(Imm32 imm, Address address) {
444   MOZ_ASSERT(address.base != ScratchRegister);
445   ma_li(ScratchRegister, imm);
446 
447   if (Imm16::IsInSignedRange(address.offset)) {
448     as_sw(ScratchRegister, address.base, address.offset);
449   } else {
450     MOZ_ASSERT(address.base != SecondScratchReg);
451 
452     ma_li(SecondScratchReg, Imm32(address.offset));
453     as_addu(SecondScratchReg, address.base, SecondScratchReg);
454     as_sw(ScratchRegister, SecondScratchReg, 0);
455   }
456 }
457 
ma_sw(Register data,BaseIndex & address)458 void MacroAssemblerMIPS::ma_sw(Register data, BaseIndex& address) {
459   ma_store(data, address, SizeWord);
460 }
461 
ma_pop(Register r)462 void MacroAssemblerMIPS::ma_pop(Register r) {
463   as_lw(r, StackPointer, 0);
464   as_addiu(StackPointer, StackPointer, sizeof(intptr_t));
465 }
466 
ma_push(Register r)467 void MacroAssemblerMIPS::ma_push(Register r) {
468   if (r == sp) {
469     // Pushing sp requires one more instruction.
470     ma_move(ScratchRegister, sp);
471     r = ScratchRegister;
472   }
473 
474   as_addiu(StackPointer, StackPointer, -sizeof(intptr_t));
475   as_sw(r, StackPointer, 0);
476 }
477 
478 // Branches when done from within mips-specific code.
ma_b(Register lhs,Address addr,Label * label,Condition c,JumpKind jumpKind)479 void MacroAssemblerMIPS::ma_b(Register lhs, Address addr, Label* label,
480                               Condition c, JumpKind jumpKind) {
481   MOZ_ASSERT(lhs != ScratchRegister);
482   ma_lw(ScratchRegister, addr);
483   ma_b(lhs, ScratchRegister, label, c, jumpKind);
484 }
485 
ma_b(Address addr,Imm32 imm,Label * label,Condition c,JumpKind jumpKind)486 void MacroAssemblerMIPS::ma_b(Address addr, Imm32 imm, Label* label,
487                               Condition c, JumpKind jumpKind) {
488   ma_lw(SecondScratchReg, addr);
489   ma_b(SecondScratchReg, imm, label, c, jumpKind);
490 }
491 
ma_b(Address addr,ImmGCPtr imm,Label * label,Condition c,JumpKind jumpKind)492 void MacroAssemblerMIPS::ma_b(Address addr, ImmGCPtr imm, Label* label,
493                               Condition c, JumpKind jumpKind) {
494   ma_lw(SecondScratchReg, addr);
495   ma_b(SecondScratchReg, imm, label, c, jumpKind);
496 }
497 
ma_bal(Label * label,DelaySlotFill delaySlotFill)498 void MacroAssemblerMIPS::ma_bal(Label* label, DelaySlotFill delaySlotFill) {
499   spew("branch .Llabel %p\n", label);
500   if (label->bound()) {
501     // Generate the long jump for calls because return address has to be
502     // the address after the reserved block.
503     addLongJump(nextOffset(), BufferOffset(label->offset()));
504     ma_liPatchable(ScratchRegister, Imm32(LabelBase::INVALID_OFFSET));
505     as_jalr(ScratchRegister);
506     if (delaySlotFill == FillDelaySlot) {
507       as_nop();
508     }
509     return;
510   }
511 
512   // Second word holds a pointer to the next branch in label's chain.
513   uint32_t nextInChain =
514       label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
515 
516   // Make the whole branch continous in the buffer.
517   m_buffer.ensureSpace(4 * sizeof(uint32_t));
518 
519   spew("bal .Llabel %p\n", label);
520   BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
521   writeInst(nextInChain);
522   if (!oom()) {
523     label->use(bo.getOffset());
524   }
525   // Leave space for long jump.
526   as_nop();
527   if (delaySlotFill == FillDelaySlot) {
528     as_nop();
529   }
530 }
531 
branchWithCode(InstImm code,Label * label,JumpKind jumpKind)532 void MacroAssemblerMIPS::branchWithCode(InstImm code, Label* label,
533                                         JumpKind jumpKind) {
534   spew("branch .Llabel %p", label);
535   MOZ_ASSERT(code.encode() !=
536              InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
537   InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
538 
539   if (label->bound()) {
540     int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
541 
542     if (BOffImm16::IsInRange(offset)) {
543       jumpKind = ShortJump;
544     }
545 
546     if (jumpKind == ShortJump) {
547       MOZ_ASSERT(BOffImm16::IsInRange(offset));
548       code.setBOffImm16(BOffImm16(offset));
549 #ifdef JS_JITSPEW
550       decodeBranchInstAndSpew(code);
551 #endif
552       writeInst(code.encode());
553       as_nop();
554       return;
555     }
556 
557     if (code.encode() == inst_beq.encode()) {
558       // Handle long jump
559       addLongJump(nextOffset(), BufferOffset(label->offset()));
560       ma_liPatchable(ScratchRegister, Imm32(LabelBase::INVALID_OFFSET));
561       as_jr(ScratchRegister);
562       as_nop();
563       return;
564     }
565 
566     // Handle long conditional branch
567     spew("invert branch .Llabel %p", label);
568     InstImm code_r = invertBranch(code, BOffImm16(5 * sizeof(uint32_t)));
569 #ifdef JS_JITSPEW
570     decodeBranchInstAndSpew(code_r);
571 #endif
572     writeInst(code_r.encode());
573 
574     // No need for a "nop" here because we can clobber scratch.
575     addLongJump(nextOffset(), BufferOffset(label->offset()));
576     ma_liPatchable(ScratchRegister, Imm32(LabelBase::INVALID_OFFSET));
577     as_jr(ScratchRegister);
578     as_nop();
579     return;
580   }
581 
582   // Generate open jump and link it to a label.
583 
584   // Second word holds a pointer to the next branch in label's chain.
585   uint32_t nextInChain =
586       label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
587 
588   if (jumpKind == ShortJump) {
589     // Make the whole branch continous in the buffer.
590     m_buffer.ensureSpace(2 * sizeof(uint32_t));
591 
592     // Indicate that this is short jump with offset 4.
593     code.setBOffImm16(BOffImm16(4));
594 #ifdef JS_JITSPEW
595     decodeBranchInstAndSpew(code);
596 #endif
597     BufferOffset bo = writeInst(code.encode());
598     writeInst(nextInChain);
599     if (!oom()) {
600       label->use(bo.getOffset());
601     }
602     return;
603   }
604 
605   bool conditional = code.encode() != inst_beq.encode();
606 
607   // Make the whole branch continous in the buffer.
608   m_buffer.ensureSpace((conditional ? 5 : 4) * sizeof(uint32_t));
609 
610 #ifdef JS_JITSPEW
611   decodeBranchInstAndSpew(code);
612 #endif
613   BufferOffset bo = writeInst(code.encode());
614   writeInst(nextInChain);
615   if (!oom()) {
616     label->use(bo.getOffset());
617   }
618   // Leave space for potential long jump.
619   as_nop();
620   as_nop();
621   if (conditional) {
622     as_nop();
623   }
624 }
625 
cmp64Set(Condition cond,Register64 lhs,Imm64 val,Register dest)626 void MacroAssemblerMIPSCompat::cmp64Set(Condition cond, Register64 lhs,
627                                         Imm64 val, Register dest) {
628   if (val.value == 0) {
629     switch (cond) {
630       case Assembler::Equal:
631       case Assembler::BelowOrEqual:
632         as_or(dest, lhs.high, lhs.low);
633         as_sltiu(dest, dest, 1);
634         break;
635       case Assembler::NotEqual:
636       case Assembler::Above:
637         as_or(dest, lhs.high, lhs.low);
638         as_sltu(dest, zero, dest);
639         break;
640       case Assembler::LessThan:
641       case Assembler::GreaterThanOrEqual:
642         as_slt(dest, lhs.high, zero);
643         if (cond == Assembler::GreaterThanOrEqual) {
644           as_xori(dest, dest, 1);
645         }
646         break;
647       case Assembler::GreaterThan:
648       case Assembler::LessThanOrEqual:
649         as_or(SecondScratchReg, lhs.high, lhs.low);
650         as_sra(ScratchRegister, lhs.high, 31);
651         as_sltu(dest, ScratchRegister, SecondScratchReg);
652         if (cond == Assembler::LessThanOrEqual) {
653           as_xori(dest, dest, 1);
654         }
655         break;
656       case Assembler::Below:
657       case Assembler::AboveOrEqual:
658         as_ori(dest, zero, cond == Assembler::AboveOrEqual ? 1 : 0);
659         break;
660       default:
661         MOZ_CRASH("Condition code not supported");
662         break;
663     }
664     return;
665   }
666 
667   Condition c = ma_cmp64(cond, lhs, val, dest);
668 
669   switch (cond) {
670     // For Equal/NotEqual cond ma_cmp64 dest holds non boolean result.
671     case Assembler::Equal:
672       as_sltiu(dest, dest, 1);
673       break;
674     case Assembler::NotEqual:
675       as_sltu(dest, zero, dest);
676       break;
677     default:
678       if (c == Assembler::Zero) as_xori(dest, dest, 1);
679       break;
680   }
681 }
682 
cmp64Set(Condition cond,Register64 lhs,Register64 rhs,Register dest)683 void MacroAssemblerMIPSCompat::cmp64Set(Condition cond, Register64 lhs,
684                                         Register64 rhs, Register dest) {
685   Condition c = ma_cmp64(cond, lhs, rhs, dest);
686 
687   switch (cond) {
688     // For Equal/NotEqual cond ma_cmp64 dest holds non boolean result.
689     case Assembler::Equal:
690       as_sltiu(dest, dest, 1);
691       break;
692     case Assembler::NotEqual:
693       as_sltu(dest, zero, dest);
694       break;
695     default:
696       if (c == Assembler::Zero) as_xori(dest, dest, 1);
697       break;
698   }
699 }
700 
ma_cmp64(Condition cond,Register64 lhs,Register64 rhs,Register dest)701 Assembler::Condition MacroAssemblerMIPSCompat::ma_cmp64(Condition cond,
702                                                         Register64 lhs,
703                                                         Register64 rhs,
704                                                         Register dest) {
705   switch (cond) {
706     case Assembler::Equal:
707     case Assembler::NotEqual:
708       as_xor(SecondScratchReg, lhs.high, rhs.high);
709       as_xor(ScratchRegister, lhs.low, rhs.low);
710       as_or(dest, SecondScratchReg, ScratchRegister);
711       return (cond == Assembler::Equal) ? Assembler::Zero : Assembler::NonZero;
712       break;
713     case Assembler::LessThan:
714     case Assembler::GreaterThanOrEqual:
715       as_slt(SecondScratchReg, rhs.high, lhs.high);
716       as_sltu(ScratchRegister, lhs.low, rhs.low);
717       as_slt(SecondScratchReg, SecondScratchReg, ScratchRegister);
718       as_slt(ScratchRegister, lhs.high, rhs.high);
719       as_or(dest, ScratchRegister, SecondScratchReg);
720       return (cond == Assembler::GreaterThanOrEqual) ? Assembler::Zero
721                                                      : Assembler::NonZero;
722       break;
723     case Assembler::GreaterThan:
724     case Assembler::LessThanOrEqual:
725       as_slt(SecondScratchReg, lhs.high, rhs.high);
726       as_sltu(ScratchRegister, rhs.low, lhs.low);
727       as_slt(SecondScratchReg, SecondScratchReg, ScratchRegister);
728       as_slt(ScratchRegister, rhs.high, lhs.high);
729       as_or(dest, ScratchRegister, SecondScratchReg);
730       return (cond == Assembler::LessThanOrEqual) ? Assembler::Zero
731                                                   : Assembler::NonZero;
732       break;
733     case Assembler::Below:
734     case Assembler::AboveOrEqual:
735       as_sltu(SecondScratchReg, rhs.high, lhs.high);
736       as_sltu(ScratchRegister, lhs.low, rhs.low);
737       as_slt(SecondScratchReg, SecondScratchReg, ScratchRegister);
738       as_sltu(ScratchRegister, lhs.high, rhs.high);
739       as_or(dest, ScratchRegister, SecondScratchReg);
740       return (cond == Assembler::AboveOrEqual) ? Assembler::Zero
741                                                : Assembler::NonZero;
742       break;
743     case Assembler::Above:
744     case Assembler::BelowOrEqual:
745       as_sltu(SecondScratchReg, lhs.high, rhs.high);
746       as_sltu(ScratchRegister, rhs.low, lhs.low);
747       as_slt(SecondScratchReg, SecondScratchReg, ScratchRegister);
748       as_sltu(ScratchRegister, rhs.high, lhs.high);
749       as_or(dest, ScratchRegister, SecondScratchReg);
750       return (cond == Assembler::BelowOrEqual) ? Assembler::Zero
751                                                : Assembler::NonZero;
752       break;
753     default:
754       MOZ_CRASH("Condition code not supported");
755       break;
756   }
757 }
758 
ma_cmp64(Condition cond,Register64 lhs,Imm64 val,Register dest)759 Assembler::Condition MacroAssemblerMIPSCompat::ma_cmp64(Condition cond,
760                                                         Register64 lhs,
761                                                         Imm64 val,
762                                                         Register dest) {
763   MOZ_ASSERT(val.value != 0);
764 
765   switch (cond) {
766     case Assembler::Equal:
767     case Assembler::NotEqual:
768       ma_xor(SecondScratchReg, lhs.high, val.hi());
769       ma_xor(ScratchRegister, lhs.low, val.low());
770       as_or(dest, SecondScratchReg, ScratchRegister);
771       return (cond == Assembler::Equal) ? Assembler::Zero : Assembler::NonZero;
772       break;
773     case Assembler::LessThan:
774     case Assembler::GreaterThanOrEqual:
775       ma_li(SecondScratchReg, val.hi());
776       as_slt(ScratchRegister, lhs.high, SecondScratchReg);
777       as_slt(SecondScratchReg, SecondScratchReg, lhs.high);
778       as_subu(SecondScratchReg, SecondScratchReg, ScratchRegister);
779       ma_li(ScratchRegister, val.low());
780       as_sltu(ScratchRegister, lhs.low, ScratchRegister);
781       as_slt(dest, SecondScratchReg, ScratchRegister);
782       return (cond == Assembler::GreaterThanOrEqual) ? Assembler::Zero
783                                                      : Assembler::NonZero;
784       break;
785     case Assembler::GreaterThan:
786     case Assembler::LessThanOrEqual:
787       ma_li(SecondScratchReg, val.hi());
788       as_slt(ScratchRegister, SecondScratchReg, lhs.high);
789       as_slt(SecondScratchReg, lhs.high, SecondScratchReg);
790       as_subu(SecondScratchReg, SecondScratchReg, ScratchRegister);
791       ma_li(ScratchRegister, val.low());
792       as_sltu(ScratchRegister, ScratchRegister, lhs.low);
793       as_slt(dest, SecondScratchReg, ScratchRegister);
794       return (cond == Assembler::LessThanOrEqual) ? Assembler::Zero
795                                                   : Assembler::NonZero;
796       break;
797     case Assembler::Below:
798     case Assembler::AboveOrEqual:
799       ma_li(SecondScratchReg, val.hi());
800       as_sltu(ScratchRegister, lhs.high, SecondScratchReg);
801       as_sltu(SecondScratchReg, SecondScratchReg, lhs.high);
802       as_subu(SecondScratchReg, SecondScratchReg, ScratchRegister);
803       ma_li(ScratchRegister, val.low());
804       as_sltu(ScratchRegister, lhs.low, ScratchRegister);
805       as_slt(dest, SecondScratchReg, ScratchRegister);
806       return (cond == Assembler::AboveOrEqual) ? Assembler::Zero
807                                                : Assembler::NonZero;
808       break;
809     case Assembler::Above:
810     case Assembler::BelowOrEqual:
811       ma_li(SecondScratchReg, val.hi());
812       as_sltu(ScratchRegister, SecondScratchReg, lhs.high);
813       as_sltu(SecondScratchReg, lhs.high, SecondScratchReg);
814       as_subu(SecondScratchReg, SecondScratchReg, ScratchRegister);
815       ma_li(ScratchRegister, val.low());
816       as_sltu(ScratchRegister, ScratchRegister, lhs.low);
817       as_slt(dest, SecondScratchReg, ScratchRegister);
818       return (cond == Assembler::BelowOrEqual) ? Assembler::Zero
819                                                : Assembler::NonZero;
820       break;
821     default:
822       MOZ_CRASH("Condition code not supported");
823       break;
824   }
825 }
826 
827 // fp instructions
ma_lid(FloatRegister dest,double value)828 void MacroAssemblerMIPS::ma_lid(FloatRegister dest, double value) {
829   struct DoubleStruct {
830     uint32_t lo;
831     uint32_t hi;
832   };
833   DoubleStruct intStruct = mozilla::BitwiseCast<DoubleStruct>(value);
834 #if MOZ_BIG_ENDIAN()
835   std::swap(intStruct.hi, intStruct.lo);
836 #endif
837 
838   // put hi part of 64 bit value into the odd register
839   if (intStruct.hi == 0) {
840     moveToDoubleHi(zero, dest);
841   } else {
842     ma_li(ScratchRegister, Imm32(intStruct.hi));
843     moveToDoubleHi(ScratchRegister, dest);
844   }
845 
846   // put low part of 64 bit value into the even register
847   if (intStruct.lo == 0) {
848     moveToDoubleLo(zero, dest);
849   } else {
850     ma_li(ScratchRegister, Imm32(intStruct.lo));
851     moveToDoubleLo(ScratchRegister, dest);
852   }
853 }
854 
ma_mv(FloatRegister src,ValueOperand dest)855 void MacroAssemblerMIPS::ma_mv(FloatRegister src, ValueOperand dest) {
856   moveFromDoubleLo(src, dest.payloadReg());
857   moveFromDoubleHi(src, dest.typeReg());
858 }
859 
ma_mv(ValueOperand src,FloatRegister dest)860 void MacroAssemblerMIPS::ma_mv(ValueOperand src, FloatRegister dest) {
861   moveToDoubleLo(src.payloadReg(), dest);
862   moveToDoubleHi(src.typeReg(), dest);
863 }
864 
ma_ls(FloatRegister ft,Address address)865 void MacroAssemblerMIPS::ma_ls(FloatRegister ft, Address address) {
866   if (Imm16::IsInSignedRange(address.offset)) {
867     as_lwc1(ft, address.base, address.offset);
868   } else {
869     MOZ_ASSERT(address.base != ScratchRegister);
870     ma_li(ScratchRegister, Imm32(address.offset));
871     if (isLoongson()) {
872       as_gslsx(ft, address.base, ScratchRegister, 0);
873     } else {
874       as_addu(ScratchRegister, address.base, ScratchRegister);
875       as_lwc1(ft, ScratchRegister, 0);
876     }
877   }
878 }
879 
ma_ld(FloatRegister ft,Address address)880 void MacroAssemblerMIPS::ma_ld(FloatRegister ft, Address address) {
881   if (Imm16::IsInSignedRange(address.offset)) {
882     as_ldc1(ft, address.base, address.offset);
883   } else {
884     MOZ_ASSERT(address.base != ScratchRegister);
885     ma_li(ScratchRegister, Imm32(address.offset));
886     if (isLoongson()) {
887       as_gsldx(ft, address.base, ScratchRegister, 0);
888     } else {
889       as_addu(ScratchRegister, address.base, ScratchRegister);
890       as_ldc1(ft, ScratchRegister, 0);
891     }
892   }
893 }
894 
ma_sd(FloatRegister ft,Address address)895 void MacroAssemblerMIPS::ma_sd(FloatRegister ft, Address address) {
896   if (Imm16::IsInSignedRange(address.offset)) {
897     as_sdc1(ft, address.base, address.offset);
898   } else {
899     MOZ_ASSERT(address.base != ScratchRegister);
900     ma_li(ScratchRegister, Imm32(address.offset));
901     if (isLoongson()) {
902       as_gssdx(ft, address.base, ScratchRegister, 0);
903     } else {
904       as_addu(ScratchRegister, address.base, ScratchRegister);
905       as_sdc1(ft, ScratchRegister, 0);
906     }
907   }
908 }
909 
ma_ss(FloatRegister ft,Address address)910 void MacroAssemblerMIPS::ma_ss(FloatRegister ft, Address address) {
911   if (Imm16::IsInSignedRange(address.offset)) {
912     as_swc1(ft, address.base, address.offset);
913   } else {
914     MOZ_ASSERT(address.base != ScratchRegister);
915     ma_li(ScratchRegister, Imm32(address.offset));
916     if (isLoongson()) {
917       as_gsssx(ft, address.base, ScratchRegister, 0);
918     } else {
919       as_addu(ScratchRegister, address.base, ScratchRegister);
920       as_swc1(ft, ScratchRegister, 0);
921     }
922   }
923 }
924 
ma_ldc1WordAligned(FloatRegister ft,Register base,int32_t off)925 void MacroAssemblerMIPS::ma_ldc1WordAligned(FloatRegister ft, Register base,
926                                             int32_t off) {
927   MOZ_ASSERT(Imm16::IsInSignedRange(off + PAYLOAD_OFFSET) &&
928              Imm16::IsInSignedRange(off + TAG_OFFSET));
929 
930   as_lwc1(ft, base, off + PAYLOAD_OFFSET);
931   as_lwc1(getOddPair(ft), base, off + TAG_OFFSET);
932 }
933 
ma_sdc1WordAligned(FloatRegister ft,Register base,int32_t off)934 void MacroAssemblerMIPS::ma_sdc1WordAligned(FloatRegister ft, Register base,
935                                             int32_t off) {
936   MOZ_ASSERT(Imm16::IsInSignedRange(off + PAYLOAD_OFFSET) &&
937              Imm16::IsInSignedRange(off + TAG_OFFSET));
938 
939   as_swc1(ft, base, off + PAYLOAD_OFFSET);
940   as_swc1(getOddPair(ft), base, off + TAG_OFFSET);
941 }
942 
ma_pop(FloatRegister f)943 void MacroAssemblerMIPS::ma_pop(FloatRegister f) {
944   if (f.isDouble()) {
945     ma_ldc1WordAligned(f, StackPointer, 0);
946   } else {
947     as_lwc1(f, StackPointer, 0);
948   }
949 
950   as_addiu(StackPointer, StackPointer, f.size());
951 }
952 
ma_push(FloatRegister f)953 void MacroAssemblerMIPS::ma_push(FloatRegister f) {
954   as_addiu(StackPointer, StackPointer, -f.size());
955 
956   if (f.isDouble()) {
957     ma_sdc1WordAligned(f, StackPointer, 0);
958   } else {
959     as_swc1(f, StackPointer, 0);
960   }
961 }
962 
buildOOLFakeExitFrame(void * fakeReturnAddr)963 bool MacroAssemblerMIPSCompat::buildOOLFakeExitFrame(void* fakeReturnAddr) {
964   uint32_t descriptor = MakeFrameDescriptor(
965       asMasm().framePushed(), FrameType::IonJS, ExitFrameLayout::Size());
966 
967   asMasm().Push(Imm32(descriptor));  // descriptor_
968   asMasm().Push(ImmPtr(fakeReturnAddr));
969 
970   return true;
971 }
972 
move32(Imm32 imm,Register dest)973 void MacroAssemblerMIPSCompat::move32(Imm32 imm, Register dest) {
974   ma_li(dest, imm);
975 }
976 
move32(Register src,Register dest)977 void MacroAssemblerMIPSCompat::move32(Register src, Register dest) {
978   ma_move(dest, src);
979 }
980 
movePtr(Register src,Register dest)981 void MacroAssemblerMIPSCompat::movePtr(Register src, Register dest) {
982   ma_move(dest, src);
983 }
movePtr(ImmWord imm,Register dest)984 void MacroAssemblerMIPSCompat::movePtr(ImmWord imm, Register dest) {
985   ma_li(dest, imm);
986 }
987 
movePtr(ImmGCPtr imm,Register dest)988 void MacroAssemblerMIPSCompat::movePtr(ImmGCPtr imm, Register dest) {
989   ma_li(dest, imm);
990 }
991 
movePtr(ImmPtr imm,Register dest)992 void MacroAssemblerMIPSCompat::movePtr(ImmPtr imm, Register dest) {
993   movePtr(ImmWord(uintptr_t(imm.value)), dest);
994 }
movePtr(wasm::SymbolicAddress imm,Register dest)995 void MacroAssemblerMIPSCompat::movePtr(wasm::SymbolicAddress imm,
996                                        Register dest) {
997   append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm));
998   ma_liPatchable(dest, ImmWord(-1));
999 }
1000 
load8ZeroExtend(const Address & address,Register dest)1001 void MacroAssemblerMIPSCompat::load8ZeroExtend(const Address& address,
1002                                                Register dest) {
1003   ma_load(dest, address, SizeByte, ZeroExtend);
1004 }
1005 
load8ZeroExtend(const BaseIndex & src,Register dest)1006 void MacroAssemblerMIPSCompat::load8ZeroExtend(const BaseIndex& src,
1007                                                Register dest) {
1008   ma_load(dest, src, SizeByte, ZeroExtend);
1009 }
1010 
load8SignExtend(const Address & address,Register dest)1011 void MacroAssemblerMIPSCompat::load8SignExtend(const Address& address,
1012                                                Register dest) {
1013   ma_load(dest, address, SizeByte, SignExtend);
1014 }
1015 
load8SignExtend(const BaseIndex & src,Register dest)1016 void MacroAssemblerMIPSCompat::load8SignExtend(const BaseIndex& src,
1017                                                Register dest) {
1018   ma_load(dest, src, SizeByte, SignExtend);
1019 }
1020 
load16ZeroExtend(const Address & address,Register dest)1021 void MacroAssemblerMIPSCompat::load16ZeroExtend(const Address& address,
1022                                                 Register dest) {
1023   ma_load(dest, address, SizeHalfWord, ZeroExtend);
1024 }
1025 
load16ZeroExtend(const BaseIndex & src,Register dest)1026 void MacroAssemblerMIPSCompat::load16ZeroExtend(const BaseIndex& src,
1027                                                 Register dest) {
1028   ma_load(dest, src, SizeHalfWord, ZeroExtend);
1029 }
1030 
load16SignExtend(const Address & address,Register dest)1031 void MacroAssemblerMIPSCompat::load16SignExtend(const Address& address,
1032                                                 Register dest) {
1033   ma_load(dest, address, SizeHalfWord, SignExtend);
1034 }
1035 
load16SignExtend(const BaseIndex & src,Register dest)1036 void MacroAssemblerMIPSCompat::load16SignExtend(const BaseIndex& src,
1037                                                 Register dest) {
1038   ma_load(dest, src, SizeHalfWord, SignExtend);
1039 }
1040 
load32(const Address & address,Register dest)1041 void MacroAssemblerMIPSCompat::load32(const Address& address, Register dest) {
1042   ma_load(dest, address, SizeWord);
1043 }
1044 
load32(const BaseIndex & address,Register dest)1045 void MacroAssemblerMIPSCompat::load32(const BaseIndex& address, Register dest) {
1046   ma_load(dest, address, SizeWord);
1047 }
1048 
load32(AbsoluteAddress address,Register dest)1049 void MacroAssemblerMIPSCompat::load32(AbsoluteAddress address, Register dest) {
1050   movePtr(ImmPtr(address.addr), ScratchRegister);
1051   load32(Address(ScratchRegister, 0), dest);
1052 }
1053 
load32(wasm::SymbolicAddress address,Register dest)1054 void MacroAssemblerMIPSCompat::load32(wasm::SymbolicAddress address,
1055                                       Register dest) {
1056   movePtr(address, ScratchRegister);
1057   load32(Address(ScratchRegister, 0), dest);
1058 }
1059 
loadPtr(const Address & address,Register dest)1060 void MacroAssemblerMIPSCompat::loadPtr(const Address& address, Register dest) {
1061   ma_load(dest, address, SizeWord);
1062 }
1063 
loadPtr(const BaseIndex & src,Register dest)1064 void MacroAssemblerMIPSCompat::loadPtr(const BaseIndex& src, Register dest) {
1065   ma_load(dest, src, SizeWord);
1066 }
1067 
loadPtr(AbsoluteAddress address,Register dest)1068 void MacroAssemblerMIPSCompat::loadPtr(AbsoluteAddress address, Register dest) {
1069   movePtr(ImmPtr(address.addr), ScratchRegister);
1070   loadPtr(Address(ScratchRegister, 0), dest);
1071 }
1072 
loadPtr(wasm::SymbolicAddress address,Register dest)1073 void MacroAssemblerMIPSCompat::loadPtr(wasm::SymbolicAddress address,
1074                                        Register dest) {
1075   movePtr(address, ScratchRegister);
1076   loadPtr(Address(ScratchRegister, 0), dest);
1077 }
1078 
loadPrivate(const Address & address,Register dest)1079 void MacroAssemblerMIPSCompat::loadPrivate(const Address& address,
1080                                            Register dest) {
1081   ma_lw(dest, Address(address.base, address.offset + PAYLOAD_OFFSET));
1082 }
1083 
loadUnalignedDouble(const wasm::MemoryAccessDesc & access,const BaseIndex & src,Register temp,FloatRegister dest)1084 void MacroAssemblerMIPSCompat::loadUnalignedDouble(
1085     const wasm::MemoryAccessDesc& access, const BaseIndex& src, Register temp,
1086     FloatRegister dest) {
1087   MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
1088   computeScaledAddress(src, SecondScratchReg);
1089 
1090   BufferOffset load;
1091   if (Imm16::IsInSignedRange(src.offset) &&
1092       Imm16::IsInSignedRange(src.offset + 7)) {
1093     load = as_lwl(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET + 3);
1094     as_lwr(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET);
1095     append(access, load.getOffset());
1096     moveToDoubleLo(temp, dest);
1097     load = as_lwl(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET + 3);
1098     as_lwr(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET);
1099     append(access, load.getOffset());
1100     moveToDoubleHi(temp, dest);
1101   } else {
1102     ma_li(ScratchRegister, Imm32(src.offset));
1103     as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
1104     load = as_lwl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
1105     as_lwr(temp, ScratchRegister, INT64LOW_OFFSET);
1106     append(access, load.getOffset());
1107     moveToDoubleLo(temp, dest);
1108     load = as_lwl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
1109     as_lwr(temp, ScratchRegister, INT64HIGH_OFFSET);
1110     append(access, load.getOffset());
1111     moveToDoubleHi(temp, dest);
1112   }
1113 }
1114 
loadUnalignedFloat32(const wasm::MemoryAccessDesc & access,const BaseIndex & src,Register temp,FloatRegister dest)1115 void MacroAssemblerMIPSCompat::loadUnalignedFloat32(
1116     const wasm::MemoryAccessDesc& access, const BaseIndex& src, Register temp,
1117     FloatRegister dest) {
1118   MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
1119   computeScaledAddress(src, SecondScratchReg);
1120   BufferOffset load;
1121   if (Imm16::IsInSignedRange(src.offset) &&
1122       Imm16::IsInSignedRange(src.offset + 3)) {
1123     load = as_lwl(temp, SecondScratchReg, src.offset + 3);
1124     as_lwr(temp, SecondScratchReg, src.offset);
1125   } else {
1126     ma_li(ScratchRegister, Imm32(src.offset));
1127     as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
1128     load = as_lwl(temp, ScratchRegister, 3);
1129     as_lwr(temp, ScratchRegister, 0);
1130   }
1131   append(access, load.getOffset());
1132   moveToFloat32(temp, dest);
1133 }
1134 
store8(Imm32 imm,const Address & address)1135 void MacroAssemblerMIPSCompat::store8(Imm32 imm, const Address& address) {
1136   ma_li(SecondScratchReg, imm);
1137   ma_store(SecondScratchReg, address, SizeByte);
1138 }
1139 
store8(Register src,const Address & address)1140 void MacroAssemblerMIPSCompat::store8(Register src, const Address& address) {
1141   ma_store(src, address, SizeByte);
1142 }
1143 
store8(Imm32 imm,const BaseIndex & dest)1144 void MacroAssemblerMIPSCompat::store8(Imm32 imm, const BaseIndex& dest) {
1145   ma_store(imm, dest, SizeByte);
1146 }
1147 
store8(Register src,const BaseIndex & dest)1148 void MacroAssemblerMIPSCompat::store8(Register src, const BaseIndex& dest) {
1149   ma_store(src, dest, SizeByte);
1150 }
1151 
store16(Imm32 imm,const Address & address)1152 void MacroAssemblerMIPSCompat::store16(Imm32 imm, const Address& address) {
1153   ma_li(SecondScratchReg, imm);
1154   ma_store(SecondScratchReg, address, SizeHalfWord);
1155 }
1156 
store16(Register src,const Address & address)1157 void MacroAssemblerMIPSCompat::store16(Register src, const Address& address) {
1158   ma_store(src, address, SizeHalfWord);
1159 }
1160 
store16(Imm32 imm,const BaseIndex & dest)1161 void MacroAssemblerMIPSCompat::store16(Imm32 imm, const BaseIndex& dest) {
1162   ma_store(imm, dest, SizeHalfWord);
1163 }
1164 
store16(Register src,const BaseIndex & address)1165 void MacroAssemblerMIPSCompat::store16(Register src, const BaseIndex& address) {
1166   ma_store(src, address, SizeHalfWord);
1167 }
1168 
store32(Register src,AbsoluteAddress address)1169 void MacroAssemblerMIPSCompat::store32(Register src, AbsoluteAddress address) {
1170   movePtr(ImmPtr(address.addr), ScratchRegister);
1171   store32(src, Address(ScratchRegister, 0));
1172 }
1173 
store32(Register src,const Address & address)1174 void MacroAssemblerMIPSCompat::store32(Register src, const Address& address) {
1175   ma_store(src, address, SizeWord);
1176 }
1177 
store32(Imm32 src,const Address & address)1178 void MacroAssemblerMIPSCompat::store32(Imm32 src, const Address& address) {
1179   move32(src, SecondScratchReg);
1180   ma_store(SecondScratchReg, address, SizeWord);
1181 }
1182 
store32(Imm32 imm,const BaseIndex & dest)1183 void MacroAssemblerMIPSCompat::store32(Imm32 imm, const BaseIndex& dest) {
1184   ma_store(imm, dest, SizeWord);
1185 }
1186 
store32(Register src,const BaseIndex & dest)1187 void MacroAssemblerMIPSCompat::store32(Register src, const BaseIndex& dest) {
1188   ma_store(src, dest, SizeWord);
1189 }
1190 
1191 template <typename T>
storePtr(ImmWord imm,T address)1192 void MacroAssemblerMIPSCompat::storePtr(ImmWord imm, T address) {
1193   ma_li(SecondScratchReg, imm);
1194   ma_store(SecondScratchReg, address, SizeWord);
1195 }
1196 
1197 template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmWord imm,
1198                                                           Address address);
1199 template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmWord imm,
1200                                                             BaseIndex address);
1201 
1202 template <typename T>
storePtr(ImmPtr imm,T address)1203 void MacroAssemblerMIPSCompat::storePtr(ImmPtr imm, T address) {
1204   storePtr(ImmWord(uintptr_t(imm.value)), address);
1205 }
1206 
1207 template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmPtr imm,
1208                                                           Address address);
1209 template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmPtr imm,
1210                                                             BaseIndex address);
1211 
1212 template <typename T>
storePtr(ImmGCPtr imm,T address)1213 void MacroAssemblerMIPSCompat::storePtr(ImmGCPtr imm, T address) {
1214   movePtr(imm, SecondScratchReg);
1215   storePtr(SecondScratchReg, address);
1216 }
1217 
1218 template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmGCPtr imm,
1219                                                           Address address);
1220 template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmGCPtr imm,
1221                                                             BaseIndex address);
1222 
storePtr(Register src,const Address & address)1223 void MacroAssemblerMIPSCompat::storePtr(Register src, const Address& address) {
1224   ma_store(src, address, SizeWord);
1225 }
1226 
storePtr(Register src,const BaseIndex & address)1227 void MacroAssemblerMIPSCompat::storePtr(Register src,
1228                                         const BaseIndex& address) {
1229   ma_store(src, address, SizeWord);
1230 }
1231 
storePtr(Register src,AbsoluteAddress dest)1232 void MacroAssemblerMIPSCompat::storePtr(Register src, AbsoluteAddress dest) {
1233   movePtr(ImmPtr(dest.addr), ScratchRegister);
1234   storePtr(src, Address(ScratchRegister, 0));
1235 }
1236 
storeUnalignedFloat32(const wasm::MemoryAccessDesc & access,FloatRegister src,Register temp,const BaseIndex & dest)1237 void MacroAssemblerMIPSCompat::storeUnalignedFloat32(
1238     const wasm::MemoryAccessDesc& access, FloatRegister src, Register temp,
1239     const BaseIndex& dest) {
1240   MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
1241   computeScaledAddress(dest, SecondScratchReg);
1242   moveFromFloat32(src, temp);
1243 
1244   BufferOffset store;
1245   if (Imm16::IsInSignedRange(dest.offset) &&
1246       Imm16::IsInSignedRange(dest.offset + 3)) {
1247     store = as_swl(temp, SecondScratchReg, dest.offset + 3);
1248     as_swr(temp, SecondScratchReg, dest.offset);
1249   } else {
1250     ma_li(ScratchRegister, Imm32(dest.offset));
1251     as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
1252     store = as_swl(temp, ScratchRegister, 3);
1253     as_swr(temp, ScratchRegister, 0);
1254   }
1255   append(access, store.getOffset());
1256 }
1257 
storeUnalignedDouble(const wasm::MemoryAccessDesc & access,FloatRegister src,Register temp,const BaseIndex & dest)1258 void MacroAssemblerMIPSCompat::storeUnalignedDouble(
1259     const wasm::MemoryAccessDesc& access, FloatRegister src, Register temp,
1260     const BaseIndex& dest) {
1261   MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
1262   computeScaledAddress(dest, SecondScratchReg);
1263 
1264   BufferOffset store;
1265   if (Imm16::IsInSignedRange(dest.offset) &&
1266       Imm16::IsInSignedRange(dest.offset + 7)) {
1267     moveFromDoubleHi(src, temp);
1268     store = as_swl(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET + 3);
1269     as_swr(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET);
1270     moveFromDoubleLo(src, temp);
1271     as_swl(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET + 3);
1272     as_swr(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET);
1273 
1274   } else {
1275     ma_li(ScratchRegister, Imm32(dest.offset));
1276     as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
1277     moveFromDoubleHi(src, temp);
1278     store = as_swl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
1279     as_swr(temp, ScratchRegister, INT64HIGH_OFFSET);
1280     moveFromDoubleLo(src, temp);
1281     as_swl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
1282     as_swr(temp, ScratchRegister, INT64LOW_OFFSET);
1283   }
1284   append(access, store.getOffset());
1285 }
1286 
clampDoubleToUint8(FloatRegister input,Register output)1287 void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) {
1288   as_roundwd(ScratchDoubleReg, input);
1289   ma_li(ScratchRegister, Imm32(255));
1290   as_mfc1(output, ScratchDoubleReg);
1291   zeroDouble(ScratchDoubleReg);
1292   as_sltiu(SecondScratchReg, output, 255);
1293   as_colt(DoubleFloat, ScratchDoubleReg, input);
1294   // if res > 255; res = 255;
1295   as_movz(output, ScratchRegister, SecondScratchReg);
1296   // if !(input > 0); res = 0;
1297   as_movf(output, zero);
1298 }
1299 
1300 // higher level tag testing code
ToPayload(Operand base)1301 Operand MacroAssemblerMIPSCompat::ToPayload(Operand base) {
1302   return Operand(Register::FromCode(base.base()), base.disp() + PAYLOAD_OFFSET);
1303 }
1304 
ToType(Operand base)1305 Operand MacroAssemblerMIPSCompat::ToType(Operand base) {
1306   return Operand(Register::FromCode(base.base()), base.disp() + TAG_OFFSET);
1307 }
1308 
testNullSet(Condition cond,const ValueOperand & value,Register dest)1309 void MacroAssemblerMIPSCompat::testNullSet(Condition cond,
1310                                            const ValueOperand& value,
1311                                            Register dest) {
1312   MOZ_ASSERT(cond == Equal || cond == NotEqual);
1313   ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_NULL), cond);
1314 }
1315 
testObjectSet(Condition cond,const ValueOperand & value,Register dest)1316 void MacroAssemblerMIPSCompat::testObjectSet(Condition cond,
1317                                              const ValueOperand& value,
1318                                              Register dest) {
1319   MOZ_ASSERT(cond == Equal || cond == NotEqual);
1320   ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_OBJECT), cond);
1321 }
1322 
testUndefinedSet(Condition cond,const ValueOperand & value,Register dest)1323 void MacroAssemblerMIPSCompat::testUndefinedSet(Condition cond,
1324                                                 const ValueOperand& value,
1325                                                 Register dest) {
1326   MOZ_ASSERT(cond == Equal || cond == NotEqual);
1327   ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_UNDEFINED), cond);
1328 }
1329 
1330 // unboxing code
unboxNonDouble(const ValueOperand & operand,Register dest,JSValueType)1331 void MacroAssemblerMIPSCompat::unboxNonDouble(const ValueOperand& operand,
1332                                               Register dest, JSValueType) {
1333   if (operand.payloadReg() != dest) {
1334     ma_move(dest, operand.payloadReg());
1335   }
1336 }
1337 
unboxNonDouble(const Address & src,Register dest,JSValueType)1338 void MacroAssemblerMIPSCompat::unboxNonDouble(const Address& src, Register dest,
1339                                               JSValueType) {
1340   ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
1341 }
1342 
unboxNonDouble(const BaseIndex & src,Register dest,JSValueType)1343 void MacroAssemblerMIPSCompat::unboxNonDouble(const BaseIndex& src,
1344                                               Register dest, JSValueType) {
1345   computeScaledAddress(src, SecondScratchReg);
1346   ma_lw(dest, Address(SecondScratchReg, src.offset + PAYLOAD_OFFSET));
1347 }
1348 
unboxInt32(const ValueOperand & operand,Register dest)1349 void MacroAssemblerMIPSCompat::unboxInt32(const ValueOperand& operand,
1350                                           Register dest) {
1351   ma_move(dest, operand.payloadReg());
1352 }
1353 
unboxInt32(const Address & src,Register dest)1354 void MacroAssemblerMIPSCompat::unboxInt32(const Address& src, Register dest) {
1355   ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
1356 }
1357 
unboxBoolean(const ValueOperand & operand,Register dest)1358 void MacroAssemblerMIPSCompat::unboxBoolean(const ValueOperand& operand,
1359                                             Register dest) {
1360   ma_move(dest, operand.payloadReg());
1361 }
1362 
unboxBoolean(const Address & src,Register dest)1363 void MacroAssemblerMIPSCompat::unboxBoolean(const Address& src, Register dest) {
1364   ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
1365 }
1366 
unboxDouble(const ValueOperand & operand,FloatRegister dest)1367 void MacroAssemblerMIPSCompat::unboxDouble(const ValueOperand& operand,
1368                                            FloatRegister dest) {
1369   moveToDoubleLo(operand.payloadReg(), dest);
1370   moveToDoubleHi(operand.typeReg(), dest);
1371 }
1372 
unboxDouble(const Address & src,FloatRegister dest)1373 void MacroAssemblerMIPSCompat::unboxDouble(const Address& src,
1374                                            FloatRegister dest) {
1375   ma_lw(ScratchRegister, Address(src.base, src.offset + PAYLOAD_OFFSET));
1376   moveToDoubleLo(ScratchRegister, dest);
1377   ma_lw(ScratchRegister, Address(src.base, src.offset + TAG_OFFSET));
1378   moveToDoubleHi(ScratchRegister, dest);
1379 }
1380 
unboxDouble(const BaseIndex & src,FloatRegister dest)1381 void MacroAssemblerMIPSCompat::unboxDouble(const BaseIndex& src,
1382                                            FloatRegister dest) {
1383   loadDouble(src, dest);
1384 }
1385 
unboxString(const ValueOperand & operand,Register dest)1386 void MacroAssemblerMIPSCompat::unboxString(const ValueOperand& operand,
1387                                            Register dest) {
1388   ma_move(dest, operand.payloadReg());
1389 }
1390 
unboxString(const Address & src,Register dest)1391 void MacroAssemblerMIPSCompat::unboxString(const Address& src, Register dest) {
1392   ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
1393 }
1394 
unboxBigInt(const ValueOperand & operand,Register dest)1395 void MacroAssemblerMIPSCompat::unboxBigInt(const ValueOperand& operand,
1396                                            Register dest) {
1397   ma_move(dest, operand.payloadReg());
1398 }
1399 
unboxBigInt(const Address & src,Register dest)1400 void MacroAssemblerMIPSCompat::unboxBigInt(const Address& src, Register dest) {
1401   ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
1402 }
1403 
unboxObject(const ValueOperand & src,Register dest)1404 void MacroAssemblerMIPSCompat::unboxObject(const ValueOperand& src,
1405                                            Register dest) {
1406   ma_move(dest, src.payloadReg());
1407 }
1408 
unboxObject(const Address & src,Register dest)1409 void MacroAssemblerMIPSCompat::unboxObject(const Address& src, Register dest) {
1410   ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
1411 }
1412 
unboxObjectOrNull(const Address & src,Register dest)1413 void MacroAssemblerMIPSCompat::unboxObjectOrNull(const Address& src,
1414                                                  Register dest) {
1415   ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
1416 }
1417 
unboxValue(const ValueOperand & src,AnyRegister dest,JSValueType)1418 void MacroAssemblerMIPSCompat::unboxValue(const ValueOperand& src,
1419                                           AnyRegister dest, JSValueType) {
1420   if (dest.isFloat()) {
1421     Label notInt32, end;
1422     asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
1423     convertInt32ToDouble(src.payloadReg(), dest.fpu());
1424     ma_b(&end, ShortJump);
1425     bind(&notInt32);
1426     unboxDouble(src, dest.fpu());
1427     bind(&end);
1428   } else if (src.payloadReg() != dest.gpr()) {
1429     ma_move(dest.gpr(), src.payloadReg());
1430   }
1431 }
1432 
boxDouble(FloatRegister src,const ValueOperand & dest,FloatRegister)1433 void MacroAssemblerMIPSCompat::boxDouble(FloatRegister src,
1434                                          const ValueOperand& dest,
1435                                          FloatRegister) {
1436   moveFromDoubleLo(src, dest.payloadReg());
1437   moveFromDoubleHi(src, dest.typeReg());
1438 }
1439 
boxNonDouble(JSValueType type,Register src,const ValueOperand & dest)1440 void MacroAssemblerMIPSCompat::boxNonDouble(JSValueType type, Register src,
1441                                             const ValueOperand& dest) {
1442   if (src != dest.payloadReg()) {
1443     ma_move(dest.payloadReg(), src);
1444   }
1445   ma_li(dest.typeReg(), ImmType(type));
1446 }
1447 
boolValueToDouble(const ValueOperand & operand,FloatRegister dest)1448 void MacroAssemblerMIPSCompat::boolValueToDouble(const ValueOperand& operand,
1449                                                  FloatRegister dest) {
1450   convertBoolToInt32(operand.payloadReg(), ScratchRegister);
1451   convertInt32ToDouble(ScratchRegister, dest);
1452 }
1453 
int32ValueToDouble(const ValueOperand & operand,FloatRegister dest)1454 void MacroAssemblerMIPSCompat::int32ValueToDouble(const ValueOperand& operand,
1455                                                   FloatRegister dest) {
1456   convertInt32ToDouble(operand.payloadReg(), dest);
1457 }
1458 
boolValueToFloat32(const ValueOperand & operand,FloatRegister dest)1459 void MacroAssemblerMIPSCompat::boolValueToFloat32(const ValueOperand& operand,
1460                                                   FloatRegister dest) {
1461   convertBoolToInt32(operand.payloadReg(), ScratchRegister);
1462   convertInt32ToFloat32(ScratchRegister, dest);
1463 }
1464 
int32ValueToFloat32(const ValueOperand & operand,FloatRegister dest)1465 void MacroAssemblerMIPSCompat::int32ValueToFloat32(const ValueOperand& operand,
1466                                                    FloatRegister dest) {
1467   convertInt32ToFloat32(operand.payloadReg(), dest);
1468 }
1469 
loadConstantFloat32(float f,FloatRegister dest)1470 void MacroAssemblerMIPSCompat::loadConstantFloat32(float f,
1471                                                    FloatRegister dest) {
1472   ma_lis(dest, f);
1473 }
1474 
loadInt32OrDouble(const Address & src,FloatRegister dest)1475 void MacroAssemblerMIPSCompat::loadInt32OrDouble(const Address& src,
1476                                                  FloatRegister dest) {
1477   Label notInt32, end;
1478   // If it's an int, convert it to double.
1479   ma_lw(SecondScratchReg, Address(src.base, src.offset + TAG_OFFSET));
1480   asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
1481   ma_lw(SecondScratchReg, Address(src.base, src.offset + PAYLOAD_OFFSET));
1482   convertInt32ToDouble(SecondScratchReg, dest);
1483   ma_b(&end, ShortJump);
1484 
1485   // Not an int, just load as double.
1486   bind(&notInt32);
1487   ma_ld(dest, src);
1488   bind(&end);
1489 }
1490 
loadInt32OrDouble(Register base,Register index,FloatRegister dest,int32_t shift)1491 void MacroAssemblerMIPSCompat::loadInt32OrDouble(Register base, Register index,
1492                                                  FloatRegister dest,
1493                                                  int32_t shift) {
1494   Label notInt32, end;
1495 
1496   // If it's an int, convert it to double.
1497 
1498   computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)),
1499                        SecondScratchReg);
1500   // Since we only have one scratch, we need to stomp over it with the tag.
1501   load32(Address(SecondScratchReg, TAG_OFFSET), SecondScratchReg);
1502   asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
1503 
1504   computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)),
1505                        SecondScratchReg);
1506   load32(Address(SecondScratchReg, PAYLOAD_OFFSET), SecondScratchReg);
1507   convertInt32ToDouble(SecondScratchReg, dest);
1508   ma_b(&end, ShortJump);
1509 
1510   // Not an int, just load as double.
1511   bind(&notInt32);
1512   // First, recompute the offset that had been stored in the scratch register
1513   // since the scratch register was overwritten loading in the type.
1514   computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)),
1515                        SecondScratchReg);
1516   loadDouble(Address(SecondScratchReg, 0), dest);
1517   bind(&end);
1518 }
1519 
loadConstantDouble(double dp,FloatRegister dest)1520 void MacroAssemblerMIPSCompat::loadConstantDouble(double dp,
1521                                                   FloatRegister dest) {
1522   ma_lid(dest, dp);
1523 }
1524 
extractObject(const Address & address,Register scratch)1525 Register MacroAssemblerMIPSCompat::extractObject(const Address& address,
1526                                                  Register scratch) {
1527   ma_lw(scratch, Address(address.base, address.offset + PAYLOAD_OFFSET));
1528   return scratch;
1529 }
1530 
extractTag(const Address & address,Register scratch)1531 Register MacroAssemblerMIPSCompat::extractTag(const Address& address,
1532                                               Register scratch) {
1533   ma_lw(scratch, Address(address.base, address.offset + TAG_OFFSET));
1534   return scratch;
1535 }
1536 
extractTag(const BaseIndex & address,Register scratch)1537 Register MacroAssemblerMIPSCompat::extractTag(const BaseIndex& address,
1538                                               Register scratch) {
1539   computeScaledAddress(address, scratch);
1540   return extractTag(Address(scratch, address.offset), scratch);
1541 }
1542 
getType(const Value & val)1543 uint32_t MacroAssemblerMIPSCompat::getType(const Value& val) {
1544   return val.toNunboxTag();
1545 }
1546 
moveData(const Value & val,Register data)1547 void MacroAssemblerMIPSCompat::moveData(const Value& val, Register data) {
1548   if (val.isGCThing()) {
1549     ma_li(data, ImmGCPtr(val.toGCThing()));
1550   } else {
1551     ma_li(data, Imm32(val.toNunboxPayload()));
1552   }
1553 }
1554 
1555 /////////////////////////////////////////////////////////////////
1556 // X86/X64-common/ARM/MIPS interface.
1557 /////////////////////////////////////////////////////////////////
storeValue(ValueOperand val,Operand dst)1558 void MacroAssemblerMIPSCompat::storeValue(ValueOperand val, Operand dst) {
1559   storeValue(val, Address(Register::FromCode(dst.base()), dst.disp()));
1560 }
1561 
storeValue(ValueOperand val,const BaseIndex & dest)1562 void MacroAssemblerMIPSCompat::storeValue(ValueOperand val,
1563                                           const BaseIndex& dest) {
1564   computeScaledAddress(dest, SecondScratchReg);
1565   storeValue(val, Address(SecondScratchReg, dest.offset));
1566 }
1567 
storeValue(JSValueType type,Register reg,BaseIndex dest)1568 void MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg,
1569                                           BaseIndex dest) {
1570   computeScaledAddress(dest, ScratchRegister);
1571 
1572   // Make sure that ma_sw doesn't clobber ScratchRegister
1573   int32_t offset = dest.offset;
1574   if (!Imm16::IsInSignedRange(offset)) {
1575     ma_li(SecondScratchReg, Imm32(offset));
1576     as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
1577     offset = 0;
1578   }
1579 
1580   storeValue(type, reg, Address(ScratchRegister, offset));
1581 }
1582 
storeValue(ValueOperand val,const Address & dest)1583 void MacroAssemblerMIPSCompat::storeValue(ValueOperand val,
1584                                           const Address& dest) {
1585   ma_sw(val.payloadReg(), Address(dest.base, dest.offset + PAYLOAD_OFFSET));
1586   ma_sw(val.typeReg(), Address(dest.base, dest.offset + TAG_OFFSET));
1587 }
1588 
storeValue(JSValueType type,Register reg,Address dest)1589 void MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg,
1590                                           Address dest) {
1591   MOZ_ASSERT(dest.base != SecondScratchReg);
1592 
1593   ma_sw(reg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
1594   ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
1595   ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
1596 }
1597 
storeValue(const Value & val,Address dest)1598 void MacroAssemblerMIPSCompat::storeValue(const Value& val, Address dest) {
1599   MOZ_ASSERT(dest.base != SecondScratchReg);
1600 
1601   ma_li(SecondScratchReg, Imm32(getType(val)));
1602   ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
1603   moveData(val, SecondScratchReg);
1604   ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
1605 }
1606 
storeValue(const Value & val,BaseIndex dest)1607 void MacroAssemblerMIPSCompat::storeValue(const Value& val, BaseIndex dest) {
1608   computeScaledAddress(dest, ScratchRegister);
1609 
1610   // Make sure that ma_sw doesn't clobber ScratchRegister
1611   int32_t offset = dest.offset;
1612   if (!Imm16::IsInSignedRange(offset)) {
1613     ma_li(SecondScratchReg, Imm32(offset));
1614     as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
1615     offset = 0;
1616   }
1617   storeValue(val, Address(ScratchRegister, offset));
1618 }
1619 
loadValue(const BaseIndex & addr,ValueOperand val)1620 void MacroAssemblerMIPSCompat::loadValue(const BaseIndex& addr,
1621                                          ValueOperand val) {
1622   computeScaledAddress(addr, SecondScratchReg);
1623   loadValue(Address(SecondScratchReg, addr.offset), val);
1624 }
1625 
loadValue(Address src,ValueOperand val)1626 void MacroAssemblerMIPSCompat::loadValue(Address src, ValueOperand val) {
1627   // Ensure that loading the payload does not erase the pointer to the
1628   // Value in memory.
1629   if (src.base != val.payloadReg()) {
1630     ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
1631     ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
1632   } else {
1633     ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
1634     ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
1635   }
1636 }
1637 
tagValue(JSValueType type,Register payload,ValueOperand dest)1638 void MacroAssemblerMIPSCompat::tagValue(JSValueType type, Register payload,
1639                                         ValueOperand dest) {
1640   MOZ_ASSERT(dest.typeReg() != dest.payloadReg());
1641   if (payload != dest.payloadReg()) {
1642     ma_move(dest.payloadReg(), payload);
1643   }
1644   ma_li(dest.typeReg(), ImmType(type));
1645 }
1646 
pushValue(ValueOperand val)1647 void MacroAssemblerMIPSCompat::pushValue(ValueOperand val) {
1648   // Allocate stack slots for type and payload. One for each.
1649   asMasm().subPtr(Imm32(sizeof(Value)), StackPointer);
1650   // Store type and payload.
1651   storeValue(val, Address(StackPointer, 0));
1652 }
1653 
pushValue(const Address & addr)1654 void MacroAssemblerMIPSCompat::pushValue(const Address& addr) {
1655   // Allocate stack slots for type and payload. One for each.
1656   ma_subu(StackPointer, StackPointer, Imm32(sizeof(Value)));
1657   // If address is based on StackPointer its offset needs to be adjusted
1658   // to accommodate for previous stack allocation.
1659   int32_t offset =
1660       addr.base != StackPointer ? addr.offset : addr.offset + sizeof(Value);
1661   // Store type and payload.
1662   ma_lw(ScratchRegister, Address(addr.base, offset + TAG_OFFSET));
1663   ma_sw(ScratchRegister, Address(StackPointer, TAG_OFFSET));
1664   ma_lw(ScratchRegister, Address(addr.base, offset + PAYLOAD_OFFSET));
1665   ma_sw(ScratchRegister, Address(StackPointer, PAYLOAD_OFFSET));
1666 }
1667 
popValue(ValueOperand val)1668 void MacroAssemblerMIPSCompat::popValue(ValueOperand val) {
1669   // Load payload and type.
1670   as_lw(val.payloadReg(), StackPointer, PAYLOAD_OFFSET);
1671   as_lw(val.typeReg(), StackPointer, TAG_OFFSET);
1672   // Free stack.
1673   as_addiu(StackPointer, StackPointer, sizeof(Value));
1674 }
1675 
storePayload(const Value & val,Address dest)1676 void MacroAssemblerMIPSCompat::storePayload(const Value& val, Address dest) {
1677   moveData(val, SecondScratchReg);
1678   ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
1679 }
1680 
storePayload(Register src,Address dest)1681 void MacroAssemblerMIPSCompat::storePayload(Register src, Address dest) {
1682   ma_sw(src, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
1683   return;
1684 }
1685 
storePayload(const Value & val,const BaseIndex & dest)1686 void MacroAssemblerMIPSCompat::storePayload(const Value& val,
1687                                             const BaseIndex& dest) {
1688   MOZ_ASSERT(dest.offset == 0);
1689 
1690   computeScaledAddress(dest, SecondScratchReg);
1691 
1692   moveData(val, ScratchRegister);
1693 
1694   as_sw(ScratchRegister, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
1695 }
1696 
storePayload(Register src,const BaseIndex & dest)1697 void MacroAssemblerMIPSCompat::storePayload(Register src,
1698                                             const BaseIndex& dest) {
1699   MOZ_ASSERT(dest.offset == 0);
1700 
1701   computeScaledAddress(dest, SecondScratchReg);
1702   as_sw(src, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
1703 }
1704 
storeTypeTag(ImmTag tag,Address dest)1705 void MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, Address dest) {
1706   ma_li(SecondScratchReg, tag);
1707   ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
1708 }
1709 
storeTypeTag(ImmTag tag,const BaseIndex & dest)1710 void MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, const BaseIndex& dest) {
1711   MOZ_ASSERT(dest.offset == 0);
1712 
1713   computeScaledAddress(dest, SecondScratchReg);
1714   ma_li(ScratchRegister, tag);
1715   as_sw(ScratchRegister, SecondScratchReg, TAG_OFFSET);
1716 }
1717 
breakpoint()1718 void MacroAssemblerMIPSCompat::breakpoint() { as_break(0); }
1719 
ensureDouble(const ValueOperand & source,FloatRegister dest,Label * failure)1720 void MacroAssemblerMIPSCompat::ensureDouble(const ValueOperand& source,
1721                                             FloatRegister dest,
1722                                             Label* failure) {
1723   Label isDouble, done;
1724   asMasm().branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble);
1725   asMasm().branchTestInt32(Assembler::NotEqual, source.typeReg(), failure);
1726 
1727   convertInt32ToDouble(source.payloadReg(), dest);
1728   jump(&done);
1729 
1730   bind(&isDouble);
1731   unboxDouble(source, dest);
1732 
1733   bind(&done);
1734 }
1735 
checkStackAlignment()1736 void MacroAssemblerMIPSCompat::checkStackAlignment() {
1737 #ifdef DEBUG
1738   Label aligned;
1739   as_andi(ScratchRegister, sp, ABIStackAlignment - 1);
1740   ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
1741   as_break(BREAK_STACK_UNALIGNED);
1742   bind(&aligned);
1743 #endif
1744 }
1745 
alignStackPointer()1746 void MacroAssemblerMIPSCompat::alignStackPointer() {
1747   movePtr(StackPointer, SecondScratchReg);
1748   asMasm().subPtr(Imm32(sizeof(intptr_t)), StackPointer);
1749   asMasm().andPtr(Imm32(~(ABIStackAlignment - 1)), StackPointer);
1750   storePtr(SecondScratchReg, Address(StackPointer, 0));
1751 }
1752 
restoreStackPointer()1753 void MacroAssemblerMIPSCompat::restoreStackPointer() {
1754   loadPtr(Address(StackPointer, 0), StackPointer);
1755 }
1756 
handleFailureWithHandlerTail(Label * profilerExitTail)1757 void MacroAssemblerMIPSCompat::handleFailureWithHandlerTail(
1758     Label* profilerExitTail) {
1759   // Reserve space for exception information.
1760   int size = (sizeof(ResumeFromException) + ABIStackAlignment) &
1761              ~(ABIStackAlignment - 1);
1762   asMasm().subPtr(Imm32(size), StackPointer);
1763   ma_move(a0, StackPointer);  // Use a0 since it is a first function argument
1764 
1765   // Call the handler.
1766   using Fn = void (*)(ResumeFromException * rfe);
1767   asMasm().setupUnalignedABICall(a1);
1768   asMasm().passABIArg(a0);
1769   asMasm().callWithABI<Fn, HandleException>(
1770       MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
1771 
1772   Label entryFrame;
1773   Label catch_;
1774   Label finally;
1775   Label return_;
1776   Label bailout;
1777   Label wasm;
1778   Label wasmCatch;
1779 
1780   // Already clobbered a0, so use it...
1781   load32(Address(StackPointer, offsetof(ResumeFromException, kind)), a0);
1782   asMasm().branch32(Assembler::Equal, a0,
1783                     Imm32(ResumeFromException::RESUME_ENTRY_FRAME),
1784                     &entryFrame);
1785   asMasm().branch32(Assembler::Equal, a0,
1786                     Imm32(ResumeFromException::RESUME_CATCH), &catch_);
1787   asMasm().branch32(Assembler::Equal, a0,
1788                     Imm32(ResumeFromException::RESUME_FINALLY), &finally);
1789   asMasm().branch32(Assembler::Equal, a0,
1790                     Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_);
1791   asMasm().branch32(Assembler::Equal, a0,
1792                     Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
1793   asMasm().branch32(Assembler::Equal, a0,
1794                     Imm32(ResumeFromException::RESUME_WASM), &wasm);
1795   asMasm().branch32(Assembler::Equal, a0,
1796                     Imm32(ResumeFromException::RESUME_WASM_CATCH), &wasmCatch);
1797 
1798   breakpoint();  // Invalid kind.
1799 
1800   // No exception handler. Load the error value, load the new stack pointer
1801   // and return from the entry frame.
1802   bind(&entryFrame);
1803   asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
1804   loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
1805           StackPointer);
1806 
1807   // We're going to be returning by the ion calling convention
1808   ma_pop(ra);
1809   as_jr(ra);
1810   as_nop();
1811 
1812   // If we found a catch handler, this must be a baseline frame. Restore
1813   // state and jump to the catch block.
1814   bind(&catch_);
1815   loadPtr(Address(StackPointer, offsetof(ResumeFromException, target)), a0);
1816   loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
1817           BaselineFrameReg);
1818   loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
1819           StackPointer);
1820   jump(a0);
1821 
1822   // If we found a finally block, this must be a baseline frame. Push
1823   // two values expected by JSOp::Retsub: BooleanValue(true) and the
1824   // exception.
1825   bind(&finally);
1826   ValueOperand exception = ValueOperand(a1, a2);
1827   loadValue(Address(sp, offsetof(ResumeFromException, exception)), exception);
1828 
1829   loadPtr(Address(sp, offsetof(ResumeFromException, target)), a0);
1830   loadPtr(Address(sp, offsetof(ResumeFromException, framePointer)),
1831           BaselineFrameReg);
1832   loadPtr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp);
1833 
1834   pushValue(BooleanValue(true));
1835   pushValue(exception);
1836   jump(a0);
1837 
1838   // Only used in debug mode. Return BaselineFrame->returnValue() to the
1839   // caller.
1840   bind(&return_);
1841   loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
1842           BaselineFrameReg);
1843   loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
1844           StackPointer);
1845   loadValue(
1846       Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
1847       JSReturnOperand);
1848   ma_move(StackPointer, BaselineFrameReg);
1849   pop(BaselineFrameReg);
1850 
1851   // If profiling is enabled, then update the lastProfilingFrame to refer to
1852   // caller frame before returning.
1853   {
1854     Label skipProfilingInstrumentation;
1855     // Test if profiler enabled.
1856     AbsoluteAddress addressOfEnabled(
1857         GetJitContext()->runtime->geckoProfiler().addressOfEnabled());
1858     asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
1859                       &skipProfilingInstrumentation);
1860     jump(profilerExitTail);
1861     bind(&skipProfilingInstrumentation);
1862   }
1863 
1864   ret();
1865 
1866   // If we are bailing out to baseline to handle an exception, jump to
1867   // the bailout tail stub. Load 1 (true) in ReturnReg to indicate success.
1868   bind(&bailout);
1869   loadPtr(Address(sp, offsetof(ResumeFromException, bailoutInfo)), a2);
1870   ma_li(ReturnReg, Imm32(1));
1871   loadPtr(Address(sp, offsetof(ResumeFromException, target)), a1);
1872   jump(a1);
1873 
1874   // If we are throwing and the innermost frame was a wasm frame, reset SP and
1875   // FP; SP is pointing to the unwound return address to the wasm entry, so
1876   // we can just ret().
1877   bind(&wasm);
1878   loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
1879           FramePointer);
1880   loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
1881           StackPointer);
1882   ret();
1883 
1884   // Found a wasm catch handler, restore state and jump to it.
1885   bind(&wasmCatch);
1886   loadPtr(Address(sp, offsetof(ResumeFromException, target)), a1);
1887   loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
1888           FramePointer);
1889   loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
1890           StackPointer);
1891   jump(a1);
1892 }
1893 
toggledJump(Label * label)1894 CodeOffset MacroAssemblerMIPSCompat::toggledJump(Label* label) {
1895   CodeOffset ret(nextOffset().getOffset());
1896   ma_b(label);
1897   return ret;
1898 }
1899 
toggledCall(JitCode * target,bool enabled)1900 CodeOffset MacroAssemblerMIPSCompat::toggledCall(JitCode* target,
1901                                                  bool enabled) {
1902   BufferOffset bo = nextOffset();
1903   CodeOffset offset(bo.getOffset());
1904   addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
1905   ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
1906   if (enabled) {
1907     as_jalr(ScratchRegister);
1908     as_nop();
1909   } else {
1910     as_nop();
1911     as_nop();
1912   }
1913   MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() ==
1914                             ToggledCallSize(nullptr));
1915   return offset;
1916 }
1917 
profilerEnterFrame(Register framePtr,Register scratch)1918 void MacroAssemblerMIPSCompat::profilerEnterFrame(Register framePtr,
1919                                                   Register scratch) {
1920   asMasm().loadJSContext(scratch);
1921   loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
1922   storePtr(framePtr,
1923            Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
1924   storePtr(ImmPtr(nullptr),
1925            Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
1926 }
1927 
profilerExitFrame()1928 void MacroAssemblerMIPSCompat::profilerExitFrame() {
1929   jump(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
1930 }
1931 
subFromStackPtr(Imm32 imm32)1932 void MacroAssembler::subFromStackPtr(Imm32 imm32) {
1933   if (imm32.value) {
1934     asMasm().subPtr(imm32, StackPointer);
1935   }
1936 }
1937 
1938 //{{{ check_macroassembler_style
1939 // ===============================================================
1940 // Stack manipulation functions.
1941 
PushRegsInMaskSizeInBytes(LiveRegisterSet set)1942 size_t MacroAssembler::PushRegsInMaskSizeInBytes(LiveRegisterSet set) {
1943   return set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
1944 }
1945 
PushRegsInMask(LiveRegisterSet set)1946 void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
1947   int32_t diffF = set.fpus().getPushSizeInBytes();
1948   int32_t diffG = set.gprs().size() * sizeof(intptr_t);
1949 
1950   reserveStack(diffG);
1951   for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
1952     diffG -= sizeof(intptr_t);
1953     storePtr(*iter, Address(StackPointer, diffG));
1954   }
1955   MOZ_ASSERT(diffG == 0);
1956 
1957 #ifdef ENABLE_WASM_SIMD
1958 #  error "Needs more careful logic if SIMD is enabled"
1959 #endif
1960 
1961   if (diffF > 0) {
1962     // Double values have to be aligned. We reserve extra space so that we can
1963     // start writing from the first aligned location.
1964     // We reserve a whole extra double so that the buffer has even size.
1965     ma_and(SecondScratchReg, sp, Imm32(~(ABIStackAlignment - 1)));
1966     reserveStack(diffF);
1967 
1968     diffF -= sizeof(double);
1969 
1970     for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush());
1971          iter.more(); ++iter) {
1972       as_sdc1(*iter, SecondScratchReg, -diffF);
1973       diffF -= sizeof(double);
1974     }
1975 
1976     MOZ_ASSERT(diffF == 0);
1977   }
1978 }
1979 
PopRegsInMaskIgnore(LiveRegisterSet set,LiveRegisterSet ignore)1980 void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
1981                                          LiveRegisterSet ignore) {
1982   int32_t diffG = set.gprs().size() * sizeof(intptr_t);
1983   int32_t diffF = set.fpus().getPushSizeInBytes();
1984   const int32_t reservedG = diffG;
1985   const int32_t reservedF = diffF;
1986 
1987 #ifdef ENABLE_WASM_SIMD
1988 #  error "Needs more careful logic if SIMD is enabled"
1989 #endif
1990 
1991   if (reservedF > 0) {
1992     // Read the buffer form the first aligned location.
1993     ma_addu(SecondScratchReg, sp, Imm32(reservedF));
1994     ma_and(SecondScratchReg, SecondScratchReg, Imm32(~(ABIStackAlignment - 1)));
1995 
1996     diffF -= sizeof(double);
1997 
1998     LiveFloatRegisterSet fpignore(ignore.fpus().reduceSetForPush());
1999     for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush());
2000          iter.more(); ++iter) {
2001       if (!ignore.has(*iter)) {
2002         as_ldc1(*iter, SecondScratchReg, -diffF);
2003       }
2004       diffF -= sizeof(double);
2005     }
2006     freeStack(reservedF);
2007     MOZ_ASSERT(diffF == 0);
2008   }
2009 
2010   for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
2011     diffG -= sizeof(intptr_t);
2012     if (!ignore.has(*iter)) {
2013       loadPtr(Address(StackPointer, diffG), *iter);
2014     }
2015   }
2016   freeStack(reservedG);
2017   MOZ_ASSERT(diffG == 0);
2018 }
2019 
storeRegsInMask(LiveRegisterSet set,Address dest,Register scratch)2020 void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
2021                                      Register scratch) {
2022   int32_t diffF = set.fpus().getPushSizeInBytes();
2023   int32_t diffG = set.gprs().size() * sizeof(intptr_t);
2024 
2025   MOZ_ASSERT(dest.offset >= diffG + diffF);
2026   MOZ_ASSERT(dest.base == StackPointer);
2027 
2028   for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
2029     diffG -= sizeof(intptr_t);
2030     dest.offset -= sizeof(intptr_t);
2031     storePtr(*iter, dest);
2032   }
2033   MOZ_ASSERT(diffG == 0);
2034 
2035 #ifdef ENABLE_WASM_SIMD
2036 #  error "Needs more careful logic if SIMD is enabled"
2037 #endif
2038 
2039   if (diffF > 0) {
2040     computeEffectiveAddress(dest, scratch);
2041     ma_and(scratch, scratch, Imm32(~(ABIStackAlignment - 1)));
2042 
2043     diffF -= sizeof(double);
2044 
2045     for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush());
2046          iter.more(); ++iter) {
2047       as_sdc1(*iter, scratch, -diffF);
2048       diffF -= sizeof(double);
2049     }
2050     MOZ_ASSERT(diffF == 0);
2051   }
2052 }
2053 // ===============================================================
2054 // ABI function calls.
2055 
setupUnalignedABICall(Register scratch)2056 void MacroAssembler::setupUnalignedABICall(Register scratch) {
2057   MOZ_ASSERT(!IsCompilingWasm(), "wasm should only use aligned ABI calls");
2058   setupNativeABICall();
2059   dynamicAlignment_ = true;
2060 
2061   ma_move(scratch, StackPointer);
2062 
2063   // Force sp to be aligned
2064   asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
2065   ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
2066   storePtr(scratch, Address(StackPointer, 0));
2067 }
2068 
callWithABIPre(uint32_t * stackAdjust,bool callFromWasm)2069 void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
2070   MOZ_ASSERT(inCall_);
2071   uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
2072 
2073   // Reserve place for $ra.
2074   stackForCall += sizeof(intptr_t);
2075 
2076   if (dynamicAlignment_) {
2077     stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
2078   } else {
2079     uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
2080     stackForCall += ComputeByteAlignment(
2081         stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
2082   }
2083 
2084   *stackAdjust = stackForCall;
2085   reserveStack(stackForCall);
2086 
2087   // Save $ra because call is going to clobber it. Restore it in
2088   // callWithABIPost. NOTE: This is needed for calls from SharedIC.
2089   // Maybe we can do this differently.
2090   storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
2091 
2092   // Position all arguments.
2093   {
2094     enoughMemory_ &= moveResolver_.resolve();
2095     if (!enoughMemory_) {
2096       return;
2097     }
2098 
2099     MoveEmitter emitter(*this);
2100     emitter.emit(moveResolver_);
2101     emitter.finish();
2102   }
2103 
2104   assertStackAlignment(ABIStackAlignment);
2105 }
2106 
callWithABIPost(uint32_t stackAdjust,MoveOp::Type result,bool callFromWasm)2107 void MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
2108                                      bool callFromWasm) {
2109   // Restore ra value (as stored in callWithABIPre()).
2110   loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
2111 
2112   if (dynamicAlignment_) {
2113     // Restore sp value from stack (as stored in setupUnalignedABICall()).
2114     loadPtr(Address(StackPointer, stackAdjust), StackPointer);
2115     // Use adjustFrame instead of freeStack because we already restored sp.
2116     adjustFrame(-stackAdjust);
2117   } else {
2118     freeStack(stackAdjust);
2119   }
2120 
2121 #ifdef DEBUG
2122   MOZ_ASSERT(inCall_);
2123   inCall_ = false;
2124 #endif
2125 }
2126 
callWithABINoProfiler(Register fun,MoveOp::Type result)2127 void MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) {
2128   // Load the callee in t9, no instruction between the lw and call
2129   // should clobber it. Note that we can't use fun.base because it may
2130   // be one of the IntArg registers clobbered before the call.
2131   ma_move(t9, fun);
2132   uint32_t stackAdjust;
2133   callWithABIPre(&stackAdjust);
2134   call(t9);
2135   callWithABIPost(stackAdjust, result);
2136 }
2137 
callWithABINoProfiler(const Address & fun,MoveOp::Type result)2138 void MacroAssembler::callWithABINoProfiler(const Address& fun,
2139                                            MoveOp::Type result) {
2140   // Load the callee in t9, as above.
2141   loadPtr(Address(fun.base, fun.offset), t9);
2142   uint32_t stackAdjust;
2143   callWithABIPre(&stackAdjust);
2144   call(t9);
2145   callWithABIPost(stackAdjust, result);
2146 }
2147 // ===============================================================
2148 // Move instructions
2149 
moveValue(const TypedOrValueRegister & src,const ValueOperand & dest)2150 void MacroAssembler::moveValue(const TypedOrValueRegister& src,
2151                                const ValueOperand& dest) {
2152   if (src.hasValue()) {
2153     moveValue(src.valueReg(), dest);
2154     return;
2155   }
2156 
2157   MIRType type = src.type();
2158   AnyRegister reg = src.typedReg();
2159 
2160   if (!IsFloatingPointType(type)) {
2161     if (reg.gpr() != dest.payloadReg()) {
2162       move32(reg.gpr(), dest.payloadReg());
2163     }
2164     mov(ImmWord(MIRTypeToTag(type)), dest.typeReg());
2165     return;
2166   }
2167 
2168   ScratchDoubleScope scratch(*this);
2169   FloatRegister freg = reg.fpu();
2170   if (type == MIRType::Float32) {
2171     convertFloat32ToDouble(freg, scratch);
2172     freg = scratch;
2173   }
2174   boxDouble(freg, dest, scratch);
2175 }
2176 
moveValue(const ValueOperand & src,const ValueOperand & dest)2177 void MacroAssembler::moveValue(const ValueOperand& src,
2178                                const ValueOperand& dest) {
2179   Register s0 = src.typeReg();
2180   Register s1 = src.payloadReg();
2181   Register d0 = dest.typeReg();
2182   Register d1 = dest.payloadReg();
2183 
2184   // Either one or both of the source registers could be the same as a
2185   // destination register.
2186   if (s1 == d0) {
2187     if (s0 == d1) {
2188       // If both are, this is just a swap of two registers.
2189       ScratchRegisterScope scratch(*this);
2190       MOZ_ASSERT(d1 != scratch);
2191       MOZ_ASSERT(d0 != scratch);
2192       move32(d1, scratch);
2193       move32(d0, d1);
2194       move32(scratch, d0);
2195       return;
2196     }
2197     // If only one is, copy that source first.
2198     std::swap(s0, s1);
2199     std::swap(d0, d1);
2200   }
2201 
2202   if (s0 != d0) {
2203     move32(s0, d0);
2204   }
2205   if (s1 != d1) {
2206     move32(s1, d1);
2207   }
2208 }
2209 
moveValue(const Value & src,const ValueOperand & dest)2210 void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
2211   move32(Imm32(src.toNunboxTag()), dest.typeReg());
2212   if (src.isGCThing()) {
2213     movePtr(ImmGCPtr(src.toGCThing()), dest.payloadReg());
2214   } else {
2215     move32(Imm32(src.toNunboxPayload()), dest.payloadReg());
2216   }
2217 }
2218 
2219 // ===============================================================
2220 // Branch functions
2221 
branchValueIsNurseryCell(Condition cond,const Address & address,Register temp,Label * label)2222 void MacroAssembler::branchValueIsNurseryCell(Condition cond,
2223                                               const Address& address,
2224                                               Register temp, Label* label) {
2225   MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
2226 
2227   Label done;
2228 
2229   branchTestGCThing(Assembler::NotEqual, address,
2230                     cond == Assembler::Equal ? &done : label);
2231 
2232   loadPtr(address, temp);
2233   branchPtrInNurseryChunk(cond, temp, InvalidReg, label);
2234 
2235   bind(&done);
2236 }
2237 
branchValueIsNurseryCell(Condition cond,ValueOperand value,Register temp,Label * label)2238 void MacroAssembler::branchValueIsNurseryCell(Condition cond,
2239                                               ValueOperand value, Register temp,
2240                                               Label* label) {
2241   MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
2242 
2243   Label done;
2244 
2245   branchTestGCThing(Assembler::NotEqual, value,
2246                     cond == Assembler::Equal ? &done : label);
2247   branchPtrInNurseryChunk(cond, value.payloadReg(), temp, label);
2248 
2249   bind(&done);
2250 }
2251 
branchTestValue(Condition cond,const ValueOperand & lhs,const Value & rhs,Label * label)2252 void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
2253                                      const Value& rhs, Label* label) {
2254   MOZ_ASSERT(cond == Equal || cond == NotEqual);
2255   ScratchRegisterScope scratch(*this);
2256   moveData(rhs, scratch);
2257 
2258   if (cond == Equal) {
2259     Label done;
2260     ma_b(lhs.payloadReg(), scratch, &done, NotEqual, ShortJump);
2261     { ma_b(lhs.typeReg(), Imm32(getType(rhs)), label, Equal); }
2262     bind(&done);
2263   } else {
2264     ma_b(lhs.payloadReg(), scratch, label, NotEqual);
2265 
2266     ma_b(lhs.typeReg(), Imm32(getType(rhs)), label, NotEqual);
2267   }
2268 }
2269 
2270 // ========================================================================
2271 // Memory access primitives.
2272 template <typename T>
storeUnboxedValue(const ConstantOrRegister & value,MIRType valueType,const T & dest,MIRType slotType)2273 void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
2274                                        MIRType valueType, const T& dest,
2275                                        MIRType slotType) {
2276   if (valueType == MIRType::Double) {
2277     storeDouble(value.reg().typedReg().fpu(), dest);
2278     return;
2279   }
2280 
2281   // Store the type tag if needed.
2282   if (valueType != slotType) {
2283     storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), dest);
2284   }
2285 
2286   // Store the payload.
2287   if (value.constant()) {
2288     storePayload(value.value(), dest);
2289   } else {
2290     storePayload(value.reg().typedReg().gpr(), dest);
2291   }
2292 }
2293 
2294 template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
2295                                                 MIRType valueType,
2296                                                 const Address& dest,
2297                                                 MIRType slotType);
2298 template void MacroAssembler::storeUnboxedValue(
2299     const ConstantOrRegister& value, MIRType valueType,
2300     const BaseObjectElementIndex& dest, MIRType slotType);
2301 
PushBoxed(FloatRegister reg)2302 void MacroAssembler::PushBoxed(FloatRegister reg) { Push(reg); }
2303 
wasmBoundsCheck32(Condition cond,Register index,Register boundsCheckLimit,Label * ok)2304 void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
2305                                        Register boundsCheckLimit, Label* ok) {
2306   ma_b(index, boundsCheckLimit, ok, cond);
2307 }
2308 
wasmBoundsCheck32(Condition cond,Register index,Address boundsCheckLimit,Label * ok)2309 void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
2310                                        Address boundsCheckLimit, Label* ok) {
2311   SecondScratchRegisterScope scratch2(*this);
2312   load32(boundsCheckLimit, SecondScratchReg);
2313   ma_b(index, SecondScratchReg, ok, cond);
2314 }
2315 
wasmTruncateDoubleToUInt32(FloatRegister input,Register output,bool isSaturating,Label * oolEntry)2316 void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
2317                                                 Register output,
2318                                                 bool isSaturating,
2319                                                 Label* oolEntry) {
2320   Label done;
2321 
2322   as_truncwd(ScratchFloat32Reg, input);
2323   ma_li(ScratchRegister, Imm32(INT32_MAX));
2324   moveFromFloat32(ScratchFloat32Reg, output);
2325 
2326   // For numbers in  -1.[ : ]INT32_MAX range do nothing more
2327   ma_b(output, ScratchRegister, &done, Assembler::Below, ShortJump);
2328 
2329   loadConstantDouble(double(INT32_MAX + 1ULL), ScratchDoubleReg);
2330   ma_li(ScratchRegister, Imm32(INT32_MIN));
2331   as_subd(ScratchDoubleReg, input, ScratchDoubleReg);
2332   as_truncwd(ScratchFloat32Reg, ScratchDoubleReg);
2333   as_cfc1(SecondScratchReg, Assembler::FCSR);
2334   moveFromFloat32(ScratchFloat32Reg, output);
2335   ma_ext(SecondScratchReg, SecondScratchReg, Assembler::CauseV, 1);
2336   ma_addu(output, ScratchRegister);
2337 
2338   ma_b(SecondScratchReg, Imm32(0), oolEntry, Assembler::NotEqual);
2339 
2340   bind(&done);
2341 }
2342 
wasmTruncateFloat32ToUInt32(FloatRegister input,Register output,bool isSaturating,Label * oolEntry)2343 void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
2344                                                  Register output,
2345                                                  bool isSaturating,
2346                                                  Label* oolEntry) {
2347   Label done;
2348 
2349   as_truncws(ScratchFloat32Reg, input);
2350   ma_li(ScratchRegister, Imm32(INT32_MAX));
2351   moveFromFloat32(ScratchFloat32Reg, output);
2352   // For numbers in  -1.[ : ]INT32_MAX range do nothing more
2353   ma_b(output, ScratchRegister, &done, Assembler::Below, ShortJump);
2354 
2355   loadConstantFloat32(float(INT32_MAX + 1ULL), ScratchFloat32Reg);
2356   ma_li(ScratchRegister, Imm32(INT32_MIN));
2357   as_subs(ScratchFloat32Reg, input, ScratchFloat32Reg);
2358   as_truncws(ScratchFloat32Reg, ScratchFloat32Reg);
2359   as_cfc1(SecondScratchReg, Assembler::FCSR);
2360   moveFromFloat32(ScratchFloat32Reg, output);
2361   ma_ext(SecondScratchReg, SecondScratchReg, Assembler::CauseV, 1);
2362   ma_addu(output, ScratchRegister);
2363 
2364   // Guard against negative values that result in 0 due the precision loss.
2365   as_sltiu(ScratchRegister, output, 1);
2366   ma_or(SecondScratchReg, ScratchRegister);
2367 
2368   ma_b(SecondScratchReg, Imm32(0), oolEntry, Assembler::NotEqual);
2369 
2370   bind(&done);
2371 }
2372 
wasmLoadI64(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,Register64 output)2373 void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
2374                                  Register memoryBase, Register ptr,
2375                                  Register ptrScratch, Register64 output) {
2376   wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
2377 }
2378 
wasmUnalignedLoadI64(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,Register64 output,Register tmp)2379 void MacroAssembler::wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access,
2380                                           Register memoryBase, Register ptr,
2381                                           Register ptrScratch,
2382                                           Register64 output, Register tmp) {
2383   wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, tmp);
2384 }
2385 
wasmStoreI64(const wasm::MemoryAccessDesc & access,Register64 value,Register memoryBase,Register ptr,Register ptrScratch)2386 void MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access,
2387                                   Register64 value, Register memoryBase,
2388                                   Register ptr, Register ptrScratch) {
2389   wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
2390 }
2391 
wasmUnalignedStoreI64(const wasm::MemoryAccessDesc & access,Register64 value,Register memoryBase,Register ptr,Register ptrScratch,Register tmp)2392 void MacroAssembler::wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access,
2393                                            Register64 value,
2394                                            Register memoryBase, Register ptr,
2395                                            Register ptrScratch, Register tmp) {
2396   wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, tmp);
2397 }
2398 
wasmLoadI64Impl(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,Register64 output,Register tmp)2399 void MacroAssemblerMIPSCompat::wasmLoadI64Impl(
2400     const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
2401     Register ptrScratch, Register64 output, Register tmp) {
2402   uint32_t offset = access.offset();
2403   MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
2404 
2405   MOZ_ASSERT(!access.isZeroExtendSimd128Load());
2406   MOZ_ASSERT(!access.isSplatSimd128Load());
2407   MOZ_ASSERT(!access.isWidenSimd128Load());
2408 
2409   // Maybe add the offset.
2410   if (offset) {
2411     asMasm().movePtr(ptr, ptrScratch);
2412     asMasm().addPtr(Imm32(offset), ptrScratch);
2413     ptr = ptrScratch;
2414   }
2415 
2416   unsigned byteSize = access.byteSize();
2417   bool isSigned;
2418 
2419   switch (access.type()) {
2420     case Scalar::Int8:
2421       isSigned = true;
2422       break;
2423     case Scalar::Uint8:
2424       isSigned = false;
2425       break;
2426     case Scalar::Int16:
2427       isSigned = true;
2428       break;
2429     case Scalar::Uint16:
2430       isSigned = false;
2431       break;
2432     case Scalar::Int32:
2433       isSigned = true;
2434       break;
2435     case Scalar::Uint32:
2436       isSigned = false;
2437       break;
2438     case Scalar::Int64:
2439       isSigned = true;
2440       break;
2441     default:
2442       MOZ_CRASH("unexpected array type");
2443   }
2444 
2445   BaseIndex address(memoryBase, ptr, TimesOne);
2446   MOZ_ASSERT(INT64LOW_OFFSET == 0);
2447   if (IsUnaligned(access)) {
2448     MOZ_ASSERT(tmp != InvalidReg);
2449     if (byteSize <= 4) {
2450       asMasm().ma_load_unaligned(access, output.low, address, tmp,
2451                                  static_cast<LoadStoreSize>(8 * byteSize),
2452                                  isSigned ? SignExtend : ZeroExtend);
2453       if (!isSigned) {
2454         asMasm().move32(Imm32(0), output.high);
2455       } else {
2456         asMasm().ma_sra(output.high, output.low, Imm32(31));
2457       }
2458     } else {
2459       MOZ_ASSERT(output.low != ptr);
2460       asMasm().ma_load_unaligned(access, output.low, address, tmp, SizeWord,
2461                                  ZeroExtend);
2462       asMasm().ma_load_unaligned(
2463           access, output.high,
2464           BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), tmp, SizeWord,
2465           SignExtend);
2466     }
2467     return;
2468   }
2469 
2470   asMasm().memoryBarrierBefore(access.sync());
2471   if (byteSize <= 4) {
2472     asMasm().ma_load(output.low, address,
2473                      static_cast<LoadStoreSize>(8 * byteSize),
2474                      isSigned ? SignExtend : ZeroExtend);
2475     asMasm().append(access, asMasm().size() - 4);
2476     if (!isSigned) {
2477       asMasm().move32(Imm32(0), output.high);
2478     } else {
2479       asMasm().ma_sra(output.high, output.low, Imm32(31));
2480     }
2481   } else {
2482     MOZ_ASSERT(output.low != ptr);
2483     asMasm().ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord);
2484     asMasm().append(access, asMasm().size() - 4);
2485     asMasm().ma_load(output.high,
2486                      BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET),
2487                      SizeWord);
2488     asMasm().append(access, asMasm().size() - 4);
2489   }
2490   asMasm().memoryBarrierAfter(access.sync());
2491 }
2492 
wasmStoreI64Impl(const wasm::MemoryAccessDesc & access,Register64 value,Register memoryBase,Register ptr,Register ptrScratch,Register tmp)2493 void MacroAssemblerMIPSCompat::wasmStoreI64Impl(
2494     const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
2495     Register ptr, Register ptrScratch, Register tmp) {
2496   uint32_t offset = access.offset();
2497   MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
2498   MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
2499 
2500   // Maybe add the offset.
2501   if (offset) {
2502     asMasm().addPtr(Imm32(offset), ptrScratch);
2503     ptr = ptrScratch;
2504   }
2505 
2506   unsigned byteSize = access.byteSize();
2507   bool isSigned;
2508   switch (access.type()) {
2509     case Scalar::Int8:
2510       isSigned = true;
2511       break;
2512     case Scalar::Uint8:
2513       isSigned = false;
2514       break;
2515     case Scalar::Int16:
2516       isSigned = true;
2517       break;
2518     case Scalar::Uint16:
2519       isSigned = false;
2520       break;
2521     case Scalar::Int32:
2522       isSigned = true;
2523       break;
2524     case Scalar::Uint32:
2525       isSigned = false;
2526       break;
2527     case Scalar::Int64:
2528       isSigned = true;
2529       break;
2530     default:
2531       MOZ_CRASH("unexpected array type");
2532   }
2533 
2534   MOZ_ASSERT(INT64LOW_OFFSET == 0);
2535   BaseIndex address(memoryBase, ptr, TimesOne);
2536   if (IsUnaligned(access)) {
2537     MOZ_ASSERT(tmp != InvalidReg);
2538     if (byteSize <= 4) {
2539       asMasm().ma_store_unaligned(access, value.low, address, tmp,
2540                                   static_cast<LoadStoreSize>(8 * byteSize),
2541                                   isSigned ? SignExtend : ZeroExtend);
2542     } else {
2543       asMasm().ma_store_unaligned(
2544           access, value.high,
2545           BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), tmp, SizeWord,
2546           SignExtend);
2547       asMasm().ma_store_unaligned(access, value.low, address, tmp, SizeWord,
2548                                   ZeroExtend);
2549     }
2550     return;
2551   }
2552 
2553   asMasm().memoryBarrierBefore(access.sync());
2554   if (byteSize <= 4) {
2555     asMasm().ma_store(value.low, address,
2556                       static_cast<LoadStoreSize>(8 * byteSize));
2557     asMasm().append(access, asMasm().size() - 4);
2558   } else {
2559     asMasm().ma_store(value.high,
2560                       BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET),
2561                       SizeWord);
2562     asMasm().append(access, asMasm().size() - 4);
2563     asMasm().ma_store(value.low, address, SizeWord);
2564   }
2565   asMasm().memoryBarrierAfter(access.sync());
2566 }
2567 
EnterAtomic64Region(MacroAssembler & masm,const wasm::MemoryAccessDesc & access,Register addr,Register spinlock,Register scratch)2568 static void EnterAtomic64Region(MacroAssembler& masm,
2569                                 const wasm::MemoryAccessDesc& access,
2570                                 Register addr, Register spinlock,
2571                                 Register scratch) {
2572   masm.movePtr(wasm::SymbolicAddress::js_jit_gAtomic64Lock, spinlock);
2573 
2574   masm.append(access, masm.size());
2575   masm.as_lbu(
2576       zero, addr,
2577       7);  // Force memory trap on invalid access before we enter the spinlock.
2578 
2579   Label tryLock;
2580 
2581   masm.memoryBarrier(MembarFull);
2582 
2583   masm.bind(&tryLock);
2584 
2585   masm.as_ll(scratch, spinlock, 0);
2586   masm.ma_b(scratch, scratch, &tryLock, Assembler::NonZero, ShortJump);
2587   masm.ma_li(scratch, Imm32(1));
2588   masm.as_sc(scratch, spinlock, 0);
2589   masm.ma_b(scratch, scratch, &tryLock, Assembler::Zero, ShortJump);
2590 
2591   masm.memoryBarrier(MembarFull);
2592 }
2593 
ExitAtomic64Region(MacroAssembler & masm,Register spinlock)2594 static void ExitAtomic64Region(MacroAssembler& masm, Register spinlock) {
2595   masm.memoryBarrier(MembarFull);
2596   masm.as_sw(zero, spinlock, 0);
2597   masm.memoryBarrier(MembarFull);
2598 }
2599 
2600 template <typename T>
AtomicLoad64(MacroAssembler & masm,const wasm::MemoryAccessDesc & access,const T & mem,Register64 temp,Register64 output)2601 static void AtomicLoad64(MacroAssembler& masm,
2602                          const wasm::MemoryAccessDesc& access, const T& mem,
2603                          Register64 temp, Register64 output) {
2604   MOZ_ASSERT(temp.low == InvalidReg && temp.high == InvalidReg);
2605 
2606   masm.computeEffectiveAddress(mem, SecondScratchReg);
2607 
2608   EnterAtomic64Region(masm, access, /* addr= */ SecondScratchReg,
2609                       /* spinlock= */ ScratchRegister,
2610                       /* scratch= */ output.low);
2611 
2612   masm.load64(Address(SecondScratchReg, 0), output);
2613 
2614   ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
2615 }
2616 
wasmAtomicLoad64(const wasm::MemoryAccessDesc & access,const Address & mem,Register64 temp,Register64 output)2617 void MacroAssembler::wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
2618                                       const Address& mem, Register64 temp,
2619                                       Register64 output) {
2620   AtomicLoad64(*this, access, mem, temp, output);
2621 }
2622 
wasmAtomicLoad64(const wasm::MemoryAccessDesc & access,const BaseIndex & mem,Register64 temp,Register64 output)2623 void MacroAssembler::wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
2624                                       const BaseIndex& mem, Register64 temp,
2625                                       Register64 output) {
2626   AtomicLoad64(*this, access, mem, temp, output);
2627 }
2628 
2629 template <typename T>
wasmAtomicStore64(const wasm::MemoryAccessDesc & access,const T & mem,Register temp,Register64 value)2630 void MacroAssemblerMIPSCompat::wasmAtomicStore64(
2631     const wasm::MemoryAccessDesc& access, const T& mem, Register temp,
2632     Register64 value) {
2633   computeEffectiveAddress(mem, SecondScratchReg);
2634 
2635   EnterAtomic64Region(asMasm(), access, /* addr= */ SecondScratchReg,
2636                       /* spinlock= */ ScratchRegister, /* scratch= */ temp);
2637 
2638   store64(value, Address(SecondScratchReg, 0));
2639 
2640   ExitAtomic64Region(asMasm(), /* spinlock= */ ScratchRegister);
2641 }
2642 
2643 template void MacroAssemblerMIPSCompat::wasmAtomicStore64(
2644     const wasm::MemoryAccessDesc& access, const Address& mem, Register temp,
2645     Register64 value);
2646 template void MacroAssemblerMIPSCompat::wasmAtomicStore64(
2647     const wasm::MemoryAccessDesc& access, const BaseIndex& mem, Register temp,
2648     Register64 value);
2649 
2650 template <typename T>
WasmCompareExchange64(MacroAssembler & masm,const wasm::MemoryAccessDesc & access,const T & mem,Register64 expect,Register64 replace,Register64 output)2651 static void WasmCompareExchange64(MacroAssembler& masm,
2652                                   const wasm::MemoryAccessDesc& access,
2653                                   const T& mem, Register64 expect,
2654                                   Register64 replace, Register64 output) {
2655   MOZ_ASSERT(output != expect);
2656   MOZ_ASSERT(output != replace);
2657 
2658   Label exit;
2659 
2660   masm.computeEffectiveAddress(mem, SecondScratchReg);
2661   Address addr(SecondScratchReg, 0);
2662 
2663   EnterAtomic64Region(masm, access, /* addr= */ SecondScratchReg,
2664                       /* spinlock= */ ScratchRegister,
2665                       /* scratch= */ output.low);
2666 
2667   masm.load64(addr, output);
2668 
2669   masm.ma_b(output.low, expect.low, &exit, Assembler::NotEqual, ShortJump);
2670   masm.ma_b(output.high, expect.high, &exit, Assembler::NotEqual, ShortJump);
2671   masm.store64(replace, addr);
2672   masm.bind(&exit);
2673   ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
2674 }
2675 
wasmCompareExchange64(const wasm::MemoryAccessDesc & access,const Address & mem,Register64 expect,Register64 replace,Register64 output)2676 void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
2677                                            const Address& mem,
2678                                            Register64 expect,
2679                                            Register64 replace,
2680                                            Register64 output) {
2681   WasmCompareExchange64(*this, access, mem, expect, replace, output);
2682 }
2683 
wasmCompareExchange64(const wasm::MemoryAccessDesc & access,const BaseIndex & mem,Register64 expect,Register64 replace,Register64 output)2684 void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
2685                                            const BaseIndex& mem,
2686                                            Register64 expect,
2687                                            Register64 replace,
2688                                            Register64 output) {
2689   WasmCompareExchange64(*this, access, mem, expect, replace, output);
2690 }
2691 
2692 template <typename T>
WasmAtomicExchange64(MacroAssembler & masm,const wasm::MemoryAccessDesc & access,const T & mem,Register64 src,Register64 output)2693 static void WasmAtomicExchange64(MacroAssembler& masm,
2694                                  const wasm::MemoryAccessDesc& access,
2695                                  const T& mem, Register64 src,
2696                                  Register64 output) {
2697   masm.computeEffectiveAddress(mem, SecondScratchReg);
2698   Address addr(SecondScratchReg, 0);
2699 
2700   EnterAtomic64Region(masm, access, /* addr= */ SecondScratchReg,
2701                       /* spinlock= */ ScratchRegister,
2702                       /* scratch= */ output.low);
2703 
2704   masm.load64(addr, output);
2705   masm.store64(src, addr);
2706 
2707   ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
2708 }
2709 
wasmAtomicExchange64(const wasm::MemoryAccessDesc & access,const Address & mem,Register64 src,Register64 output)2710 void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
2711                                           const Address& mem, Register64 src,
2712                                           Register64 output) {
2713   WasmAtomicExchange64(*this, access, mem, src, output);
2714 }
2715 
wasmAtomicExchange64(const wasm::MemoryAccessDesc & access,const BaseIndex & mem,Register64 src,Register64 output)2716 void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
2717                                           const BaseIndex& mem, Register64 src,
2718                                           Register64 output) {
2719   WasmAtomicExchange64(*this, access, mem, src, output);
2720 }
2721 
2722 template <typename T>
AtomicFetchOp64(MacroAssembler & masm,const wasm::MemoryAccessDesc & access,AtomicOp op,Register64 value,const T & mem,Register64 temp,Register64 output)2723 static void AtomicFetchOp64(MacroAssembler& masm,
2724                             const wasm::MemoryAccessDesc& access, AtomicOp op,
2725                             Register64 value, const T& mem, Register64 temp,
2726                             Register64 output) {
2727   masm.computeEffectiveAddress(mem, SecondScratchReg);
2728 
2729   EnterAtomic64Region(masm, access, /* addr= */ SecondScratchReg,
2730                       /* spinlock= */ ScratchRegister,
2731                       /* scratch= */ output.low);
2732 
2733   masm.load64(Address(SecondScratchReg, 0), output);
2734 
2735   switch (op) {
2736     case AtomicFetchAddOp:
2737       masm.as_addu(temp.low, output.low, value.low);
2738       masm.as_sltu(temp.high, temp.low, output.low);
2739       masm.as_addu(temp.high, temp.high, output.high);
2740       masm.as_addu(temp.high, temp.high, value.high);
2741       break;
2742     case AtomicFetchSubOp:
2743       masm.as_sltu(temp.high, output.low, value.low);
2744       masm.as_subu(temp.high, output.high, temp.high);
2745       masm.as_subu(temp.low, output.low, value.low);
2746       masm.as_subu(temp.high, temp.high, value.high);
2747       break;
2748     case AtomicFetchAndOp:
2749       masm.as_and(temp.low, output.low, value.low);
2750       masm.as_and(temp.high, output.high, value.high);
2751       break;
2752     case AtomicFetchOrOp:
2753       masm.as_or(temp.low, output.low, value.low);
2754       masm.as_or(temp.high, output.high, value.high);
2755       break;
2756     case AtomicFetchXorOp:
2757       masm.as_xor(temp.low, output.low, value.low);
2758       masm.as_xor(temp.high, output.high, value.high);
2759       break;
2760     default:
2761       MOZ_CRASH();
2762   }
2763 
2764   masm.store64(temp, Address(SecondScratchReg, 0));
2765 
2766   ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
2767 }
2768 
wasmAtomicFetchOp64(const wasm::MemoryAccessDesc & access,AtomicOp op,Register64 value,const Address & mem,Register64 temp,Register64 output)2769 void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
2770                                          AtomicOp op, Register64 value,
2771                                          const Address& mem, Register64 temp,
2772                                          Register64 output) {
2773   AtomicFetchOp64(*this, access, op, value, mem, temp, output);
2774 }
2775 
wasmAtomicFetchOp64(const wasm::MemoryAccessDesc & access,AtomicOp op,Register64 value,const BaseIndex & mem,Register64 temp,Register64 output)2776 void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
2777                                          AtomicOp op, Register64 value,
2778                                          const BaseIndex& mem, Register64 temp,
2779                                          Register64 output) {
2780   AtomicFetchOp64(*this, access, op, value, mem, temp, output);
2781 }
2782 
2783 // ========================================================================
2784 // Convert floating point.
2785 
2786 static const double TO_DOUBLE_HIGH_SCALE = 0x100000000;
2787 
convertUInt64ToDoubleNeedsTemp()2788 bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return false; }
2789 
convertUInt64ToDouble(Register64 src,FloatRegister dest,Register temp)2790 void MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest,
2791                                            Register temp) {
2792   MOZ_ASSERT(temp == Register::Invalid());
2793   convertUInt32ToDouble(src.high, dest);
2794   loadConstantDouble(TO_DOUBLE_HIGH_SCALE, ScratchDoubleReg);
2795   mulDouble(ScratchDoubleReg, dest);
2796   convertUInt32ToDouble(src.low, ScratchDoubleReg);
2797   addDouble(ScratchDoubleReg, dest);
2798 }
2799 
convertInt64ToDouble(Register64 src,FloatRegister dest)2800 void MacroAssembler::convertInt64ToDouble(Register64 src, FloatRegister dest) {
2801   convertInt32ToDouble(src.high, dest);
2802   loadConstantDouble(TO_DOUBLE_HIGH_SCALE, ScratchDoubleReg);
2803   mulDouble(ScratchDoubleReg, dest);
2804   convertUInt32ToDouble(src.low, ScratchDoubleReg);
2805   addDouble(ScratchDoubleReg, dest);
2806 }
2807 
convertIntPtrToDouble(Register src,FloatRegister dest)2808 void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
2809   convertInt32ToDouble(src, dest);
2810 }
2811 
2812 //}}} check_macroassembler_style
2813