1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/mips64/MacroAssembler-mips64.h"
8 
9 #include "mozilla/DebugOnly.h"
10 #include "mozilla/MathAlgorithms.h"
11 
12 #include "jit/Bailouts.h"
13 #include "jit/BaselineFrame.h"
14 #include "jit/JitFrames.h"
15 #include "jit/JitRuntime.h"
16 #include "jit/MacroAssembler.h"
17 #include "jit/mips64/Simulator-mips64.h"
18 #include "jit/MoveEmitter.h"
19 #include "jit/SharedICRegisters.h"
20 #include "util/Memory.h"
21 #include "vm/JitActivation.h"  // js::jit::JitActivation
22 #include "vm/JSContext.h"
23 
24 #include "jit/MacroAssembler-inl.h"
25 
26 using namespace js;
27 using namespace jit;
28 
29 using mozilla::Abs;
30 
31 static_assert(sizeof(intptr_t) == 8, "Not 32-bit clean.");
32 
convertBoolToInt32(Register src,Register dest)33 void MacroAssemblerMIPS64Compat::convertBoolToInt32(Register src,
34                                                     Register dest) {
35   // Note that C++ bool is only 1 byte, so zero extend it to clear the
36   // higher-order bits.
37   ma_and(dest, src, Imm32(0xff));
38 }
39 
convertInt32ToDouble(Register src,FloatRegister dest)40 void MacroAssemblerMIPS64Compat::convertInt32ToDouble(Register src,
41                                                       FloatRegister dest) {
42   as_mtc1(src, dest);
43   as_cvtdw(dest, dest);
44 }
45 
convertInt32ToDouble(const Address & src,FloatRegister dest)46 void MacroAssemblerMIPS64Compat::convertInt32ToDouble(const Address& src,
47                                                       FloatRegister dest) {
48   ma_ls(dest, src);
49   as_cvtdw(dest, dest);
50 }
51 
convertInt32ToDouble(const BaseIndex & src,FloatRegister dest)52 void MacroAssemblerMIPS64Compat::convertInt32ToDouble(const BaseIndex& src,
53                                                       FloatRegister dest) {
54   computeScaledAddress(src, ScratchRegister);
55   convertInt32ToDouble(Address(ScratchRegister, src.offset), dest);
56 }
57 
convertUInt32ToDouble(Register src,FloatRegister dest)58 void MacroAssemblerMIPS64Compat::convertUInt32ToDouble(Register src,
59                                                        FloatRegister dest) {
60   ma_dext(ScratchRegister, src, Imm32(0), Imm32(32));
61   asMasm().convertInt64ToDouble(Register64(ScratchRegister), dest);
62 }
63 
convertUInt64ToDouble(Register src,FloatRegister dest)64 void MacroAssemblerMIPS64Compat::convertUInt64ToDouble(Register src,
65                                                        FloatRegister dest) {
66   Label positive, done;
67   ma_b(src, src, &positive, NotSigned, ShortJump);
68 
69   MOZ_ASSERT(src != ScratchRegister);
70   MOZ_ASSERT(src != SecondScratchReg);
71 
72   ma_and(ScratchRegister, src, Imm32(1));
73   ma_dsrl(SecondScratchReg, src, Imm32(1));
74   ma_or(ScratchRegister, SecondScratchReg);
75   as_dmtc1(ScratchRegister, dest);
76   as_cvtdl(dest, dest);
77   asMasm().addDouble(dest, dest);
78   ma_b(&done, ShortJump);
79 
80   bind(&positive);
81   as_dmtc1(src, dest);
82   as_cvtdl(dest, dest);
83 
84   bind(&done);
85 }
86 
convertUInt32ToFloat32(Register src,FloatRegister dest)87 void MacroAssemblerMIPS64Compat::convertUInt32ToFloat32(Register src,
88                                                         FloatRegister dest) {
89   ma_dext(ScratchRegister, src, Imm32(0), Imm32(32));
90   asMasm().convertInt64ToFloat32(Register64(ScratchRegister), dest);
91 }
92 
convertDoubleToFloat32(FloatRegister src,FloatRegister dest)93 void MacroAssemblerMIPS64Compat::convertDoubleToFloat32(FloatRegister src,
94                                                         FloatRegister dest) {
95   as_cvtsd(dest, src);
96 }
97 
98 const int CauseBitPos = int(Assembler::CauseI);
99 const int CauseBitCount = 1 + int(Assembler::CauseV) - int(Assembler::CauseI);
100 const int CauseIOrVMask = ((1 << int(Assembler::CauseI)) |
101                            (1 << int(Assembler::CauseV))) >>
102                           int(Assembler::CauseI);
103 
104 // Checks whether a double is representable as a 32-bit integer. If so, the
105 // integer is written to the output register. Otherwise, a bailout is taken to
106 // the given snapshot. This function overwrites the scratch float register.
convertDoubleToInt32(FloatRegister src,Register dest,Label * fail,bool negativeZeroCheck)107 void MacroAssemblerMIPS64Compat::convertDoubleToInt32(FloatRegister src,
108                                                       Register dest,
109                                                       Label* fail,
110                                                       bool negativeZeroCheck) {
111   if (negativeZeroCheck) {
112     moveFromDouble(src, dest);
113     ma_drol(dest, dest, Imm32(1));
114     ma_b(dest, Imm32(1), fail, Assembler::Equal);
115   }
116 
117   // Truncate double to int ; if result is inexact or invalid fail.
118   as_truncwd(ScratchFloat32Reg, src);
119   as_cfc1(ScratchRegister, Assembler::FCSR);
120   moveFromFloat32(ScratchFloat32Reg, dest);
121   ma_ext(ScratchRegister, ScratchRegister, CauseBitPos, CauseBitCount);
122   as_andi(ScratchRegister, ScratchRegister,
123           CauseIOrVMask);  // masking for Inexact and Invalid flag.
124   ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
125 }
126 
convertDoubleToPtr(FloatRegister src,Register dest,Label * fail,bool negativeZeroCheck)127 void MacroAssemblerMIPS64Compat::convertDoubleToPtr(FloatRegister src,
128                                                     Register dest, Label* fail,
129                                                     bool negativeZeroCheck) {
130   if (negativeZeroCheck) {
131     moveFromDouble(src, dest);
132     ma_drol(dest, dest, Imm32(1));
133     ma_b(dest, Imm32(1), fail, Assembler::Equal);
134   }
135   as_truncld(ScratchDoubleReg, src);
136   as_cfc1(ScratchRegister, Assembler::FCSR);
137   moveFromDouble(ScratchDoubleReg, dest);
138   ma_ext(ScratchRegister, ScratchRegister, CauseBitPos, CauseBitCount);
139   as_andi(ScratchRegister, ScratchRegister, CauseIOrVMask);
140   ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
141 }
142 
143 // Checks whether a float32 is representable as a 32-bit integer. If so, the
144 // integer is written to the output register. Otherwise, a bailout is taken to
145 // the given snapshot. This function overwrites the scratch float register.
convertFloat32ToInt32(FloatRegister src,Register dest,Label * fail,bool negativeZeroCheck)146 void MacroAssemblerMIPS64Compat::convertFloat32ToInt32(FloatRegister src,
147                                                        Register dest,
148                                                        Label* fail,
149                                                        bool negativeZeroCheck) {
150   if (negativeZeroCheck) {
151     moveFromFloat32(src, dest);
152     ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
153   }
154 
155   as_truncws(ScratchFloat32Reg, src);
156   as_cfc1(ScratchRegister, Assembler::FCSR);
157   moveFromFloat32(ScratchFloat32Reg, dest);
158   ma_ext(ScratchRegister, ScratchRegister, CauseBitPos, CauseBitCount);
159   as_andi(ScratchRegister, ScratchRegister, CauseIOrVMask);
160   ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
161 }
162 
convertFloat32ToDouble(FloatRegister src,FloatRegister dest)163 void MacroAssemblerMIPS64Compat::convertFloat32ToDouble(FloatRegister src,
164                                                         FloatRegister dest) {
165   as_cvtds(dest, src);
166 }
167 
convertInt32ToFloat32(Register src,FloatRegister dest)168 void MacroAssemblerMIPS64Compat::convertInt32ToFloat32(Register src,
169                                                        FloatRegister dest) {
170   as_mtc1(src, dest);
171   as_cvtsw(dest, dest);
172 }
173 
convertInt32ToFloat32(const Address & src,FloatRegister dest)174 void MacroAssemblerMIPS64Compat::convertInt32ToFloat32(const Address& src,
175                                                        FloatRegister dest) {
176   ma_ls(dest, src);
177   as_cvtsw(dest, dest);
178 }
179 
convertIntPtrToDouble(Register src,FloatRegister dest)180 void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
181   convertInt64ToDouble(Register64(src), dest);
182 }
183 
movq(Register rs,Register rd)184 void MacroAssemblerMIPS64Compat::movq(Register rs, Register rd) {
185   ma_move(rd, rs);
186 }
187 
ma_li(Register dest,CodeLabel * label)188 void MacroAssemblerMIPS64::ma_li(Register dest, CodeLabel* label) {
189   BufferOffset bo = m_buffer.nextOffset();
190   ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
191   label->patchAt()->bind(bo.getOffset());
192   label->setLinkMode(CodeLabel::MoveImmediate);
193 }
194 
ma_li(Register dest,ImmWord imm)195 void MacroAssemblerMIPS64::ma_li(Register dest, ImmWord imm) {
196   int64_t value = imm.value;
197 
198   if (-1 == (value >> 15) || 0 == (value >> 15)) {
199     as_addiu(dest, zero, value);
200     return;
201   }
202   if (0 == (value >> 16)) {
203     as_ori(dest, zero, value);
204     return;
205   }
206 
207   if (-1 == (value >> 31) || 0 == (value >> 31)) {
208     as_lui(dest, uint16_t(value >> 16));
209   } else if (0 == (value >> 32)) {
210     as_lui(dest, uint16_t(value >> 16));
211     as_dinsu(dest, zero, 32, 32);
212   } else if (-1 == (value >> 47) || 0 == (value >> 47)) {
213     as_lui(dest, uint16_t(value >> 32));
214     if (uint16_t(value >> 16)) {
215       as_ori(dest, dest, uint16_t(value >> 16));
216     }
217     as_dsll(dest, dest, 16);
218   } else if (0 == (value >> 48)) {
219     as_lui(dest, uint16_t(value >> 32));
220     as_dinsu(dest, zero, 32, 32);
221     if (uint16_t(value >> 16)) {
222       as_ori(dest, dest, uint16_t(value >> 16));
223     }
224     as_dsll(dest, dest, 16);
225   } else {
226     as_lui(dest, uint16_t(value >> 48));
227     if (uint16_t(value >> 32)) {
228       as_ori(dest, dest, uint16_t(value >> 32));
229     }
230     if (uint16_t(value >> 16)) {
231       as_dsll(dest, dest, 16);
232       as_ori(dest, dest, uint16_t(value >> 16));
233       as_dsll(dest, dest, 16);
234     } else {
235       as_dsll32(dest, dest, 32);
236     }
237   }
238   if (uint16_t(value)) {
239     as_ori(dest, dest, uint16_t(value));
240   }
241 }
242 
243 // This method generates lui, dsll and ori instruction block that can be
244 // modified by UpdateLoad64Value, either during compilation (eg.
245 // Assembler::bind), or during execution (eg. jit::PatchJump).
ma_liPatchable(Register dest,ImmPtr imm)246 void MacroAssemblerMIPS64::ma_liPatchable(Register dest, ImmPtr imm) {
247   return ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
248 }
249 
ma_liPatchable(Register dest,ImmWord imm,LiFlags flags)250 void MacroAssemblerMIPS64::ma_liPatchable(Register dest, ImmWord imm,
251                                           LiFlags flags) {
252   if (Li64 == flags) {
253     m_buffer.ensureSpace(6 * sizeof(uint32_t));
254     as_lui(dest, Imm16::Upper(Imm32(imm.value >> 32)).encode());
255     as_ori(dest, dest, Imm16::Lower(Imm32(imm.value >> 32)).encode());
256     as_dsll(dest, dest, 16);
257     as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode());
258     as_dsll(dest, dest, 16);
259     as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
260   } else {
261     m_buffer.ensureSpace(4 * sizeof(uint32_t));
262     as_lui(dest, Imm16::Lower(Imm32(imm.value >> 32)).encode());
263     as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode());
264     as_drotr32(dest, dest, 48);
265     as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
266   }
267 }
268 
ma_dnegu(Register rd,Register rs)269 void MacroAssemblerMIPS64::ma_dnegu(Register rd, Register rs) {
270   as_dsubu(rd, zero, rs);
271 }
272 
273 // Shifts
ma_dsll(Register rd,Register rt,Imm32 shift)274 void MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Imm32 shift) {
275   if (31 < shift.value) {
276     as_dsll32(rd, rt, shift.value);
277   } else {
278     as_dsll(rd, rt, shift.value);
279   }
280 }
281 
ma_dsrl(Register rd,Register rt,Imm32 shift)282 void MacroAssemblerMIPS64::ma_dsrl(Register rd, Register rt, Imm32 shift) {
283   if (31 < shift.value) {
284     as_dsrl32(rd, rt, shift.value);
285   } else {
286     as_dsrl(rd, rt, shift.value);
287   }
288 }
289 
ma_dsra(Register rd,Register rt,Imm32 shift)290 void MacroAssemblerMIPS64::ma_dsra(Register rd, Register rt, Imm32 shift) {
291   if (31 < shift.value) {
292     as_dsra32(rd, rt, shift.value);
293   } else {
294     as_dsra(rd, rt, shift.value);
295   }
296 }
297 
ma_dror(Register rd,Register rt,Imm32 shift)298 void MacroAssemblerMIPS64::ma_dror(Register rd, Register rt, Imm32 shift) {
299   if (31 < shift.value) {
300     as_drotr32(rd, rt, shift.value);
301   } else {
302     as_drotr(rd, rt, shift.value);
303   }
304 }
305 
ma_drol(Register rd,Register rt,Imm32 shift)306 void MacroAssemblerMIPS64::ma_drol(Register rd, Register rt, Imm32 shift) {
307   uint32_t s = 64 - shift.value;
308 
309   if (31 < s) {
310     as_drotr32(rd, rt, s);
311   } else {
312     as_drotr(rd, rt, s);
313   }
314 }
315 
ma_dsll(Register rd,Register rt,Register shift)316 void MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Register shift) {
317   as_dsllv(rd, rt, shift);
318 }
319 
ma_dsrl(Register rd,Register rt,Register shift)320 void MacroAssemblerMIPS64::ma_dsrl(Register rd, Register rt, Register shift) {
321   as_dsrlv(rd, rt, shift);
322 }
323 
ma_dsra(Register rd,Register rt,Register shift)324 void MacroAssemblerMIPS64::ma_dsra(Register rd, Register rt, Register shift) {
325   as_dsrav(rd, rt, shift);
326 }
327 
ma_dror(Register rd,Register rt,Register shift)328 void MacroAssemblerMIPS64::ma_dror(Register rd, Register rt, Register shift) {
329   as_drotrv(rd, rt, shift);
330 }
331 
ma_drol(Register rd,Register rt,Register shift)332 void MacroAssemblerMIPS64::ma_drol(Register rd, Register rt, Register shift) {
333   as_dsubu(ScratchRegister, zero, shift);
334   as_drotrv(rd, rt, ScratchRegister);
335 }
336 
ma_dins(Register rt,Register rs,Imm32 pos,Imm32 size)337 void MacroAssemblerMIPS64::ma_dins(Register rt, Register rs, Imm32 pos,
338                                    Imm32 size) {
339   if (pos.value >= 0 && pos.value < 32) {
340     if (pos.value + size.value > 32) {
341       as_dinsm(rt, rs, pos.value, size.value);
342     } else {
343       as_dins(rt, rs, pos.value, size.value);
344     }
345   } else {
346     as_dinsu(rt, rs, pos.value, size.value);
347   }
348 }
349 
ma_dext(Register rt,Register rs,Imm32 pos,Imm32 size)350 void MacroAssemblerMIPS64::ma_dext(Register rt, Register rs, Imm32 pos,
351                                    Imm32 size) {
352   if (pos.value >= 0 && pos.value < 32) {
353     if (size.value > 32) {
354       as_dextm(rt, rs, pos.value, size.value);
355     } else {
356       as_dext(rt, rs, pos.value, size.value);
357     }
358   } else {
359     as_dextu(rt, rs, pos.value, size.value);
360   }
361 }
362 
ma_dsbh(Register rd,Register rt)363 void MacroAssemblerMIPS64::ma_dsbh(Register rd, Register rt) {
364   as_dsbh(rd, rt);
365 }
366 
ma_dshd(Register rd,Register rt)367 void MacroAssemblerMIPS64::ma_dshd(Register rd, Register rt) {
368   as_dshd(rd, rt);
369 }
370 
ma_dctz(Register rd,Register rs)371 void MacroAssemblerMIPS64::ma_dctz(Register rd, Register rs) {
372   ma_dnegu(ScratchRegister, rs);
373   as_and(rd, ScratchRegister, rs);
374   as_dclz(rd, rd);
375   ma_dnegu(SecondScratchReg, rd);
376   ma_daddu(SecondScratchReg, Imm32(0x3f));
377 #ifdef MIPS64
378   as_selnez(SecondScratchReg, SecondScratchReg, ScratchRegister);
379   as_seleqz(rd, rd, ScratchRegister);
380   as_or(rd, rd, SecondScratchReg);
381 #else
382   as_movn(rd, SecondScratchReg, ScratchRegister);
383 #endif
384 }
385 
386 // Arithmetic-based ops.
387 
388 // Add.
ma_daddu(Register rd,Register rs,Imm32 imm)389 void MacroAssemblerMIPS64::ma_daddu(Register rd, Register rs, Imm32 imm) {
390   if (Imm16::IsInSignedRange(imm.value)) {
391     as_daddiu(rd, rs, imm.value);
392   } else {
393     ma_li(ScratchRegister, imm);
394     as_daddu(rd, rs, ScratchRegister);
395   }
396 }
397 
ma_daddu(Register rd,Register rs)398 void MacroAssemblerMIPS64::ma_daddu(Register rd, Register rs) {
399   as_daddu(rd, rd, rs);
400 }
401 
ma_daddu(Register rd,Imm32 imm)402 void MacroAssemblerMIPS64::ma_daddu(Register rd, Imm32 imm) {
403   ma_daddu(rd, rd, imm);
404 }
405 
ma_add32TestOverflow(Register rd,Register rs,Register rt,Label * overflow)406 void MacroAssemblerMIPS64::ma_add32TestOverflow(Register rd, Register rs,
407                                                 Register rt, Label* overflow) {
408   as_daddu(SecondScratchReg, rs, rt);
409   as_addu(rd, rs, rt);
410   ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual);
411 }
412 
ma_add32TestOverflow(Register rd,Register rs,Imm32 imm,Label * overflow)413 void MacroAssemblerMIPS64::ma_add32TestOverflow(Register rd, Register rs,
414                                                 Imm32 imm, Label* overflow) {
415   // Check for signed range because of as_daddiu
416   if (Imm16::IsInSignedRange(imm.value)) {
417     as_daddiu(SecondScratchReg, rs, imm.value);
418     as_addiu(rd, rs, imm.value);
419     ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual);
420   } else {
421     ma_li(ScratchRegister, imm);
422     ma_add32TestOverflow(rd, rs, ScratchRegister, overflow);
423   }
424 }
425 
ma_addPtrTestOverflow(Register rd,Register rs,Register rt,Label * overflow)426 void MacroAssemblerMIPS64::ma_addPtrTestOverflow(Register rd, Register rs,
427                                                  Register rt, Label* overflow) {
428   SecondScratchRegisterScope scratch2(asMasm());
429   MOZ_ASSERT(rd != rt);
430   MOZ_ASSERT(rd != scratch2);
431 
432   if (rs == rt) {
433     as_daddu(rd, rs, rs);
434     as_xor(scratch2, rs, rd);
435   } else {
436     ScratchRegisterScope scratch(asMasm());
437     MOZ_ASSERT(rs != scratch2);
438     MOZ_ASSERT(rt != scratch2);
439 
440     // If the sign of rs and rt are different, no overflow
441     as_xor(scratch2, rs, rt);
442     as_nor(scratch2, scratch2, zero);
443 
444     as_daddu(rd, rs, rt);
445     as_xor(scratch, rd, rt);
446     as_and(scratch, scratch, scratch2);
447   }
448 
449   ma_b(scratch2, zero, overflow, Assembler::LessThan);
450 }
451 
ma_addPtrTestOverflow(Register rd,Register rs,Imm32 imm,Label * overflow)452 void MacroAssemblerMIPS64::ma_addPtrTestOverflow(Register rd, Register rs,
453                                                  Imm32 imm, Label* overflow) {
454   ma_li(ScratchRegister, imm);
455   ma_addPtrTestOverflow(rd, rs, ScratchRegister, overflow);
456 }
457 
ma_addPtrTestCarry(Condition cond,Register rd,Register rs,Register rt,Label * overflow)458 void MacroAssemblerMIPS64::ma_addPtrTestCarry(Condition cond, Register rd,
459                                               Register rs, Register rt,
460                                               Label* overflow) {
461   SecondScratchRegisterScope scratch2(asMasm());
462   as_daddu(rd, rs, rt);
463   as_sltu(scratch2, rd, rt);
464   ma_b(scratch2, scratch2, overflow,
465        cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
466 }
467 
ma_addPtrTestCarry(Condition cond,Register rd,Register rs,Imm32 imm,Label * overflow)468 void MacroAssemblerMIPS64::ma_addPtrTestCarry(Condition cond, Register rd,
469                                               Register rs, Imm32 imm,
470                                               Label* overflow) {
471   // Check for signed range because of as_daddiu
472   if (Imm16::IsInSignedRange(imm.value)) {
473     SecondScratchRegisterScope scratch2(asMasm());
474     as_daddiu(rd, rs, imm.value);
475     as_sltiu(scratch2, rd, imm.value);
476     ma_b(scratch2, scratch2, overflow,
477          cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
478   } else {
479     ma_li(ScratchRegister, imm);
480     ma_addPtrTestCarry(cond, rd, rs, ScratchRegister, overflow);
481   }
482 }
483 
484 // Subtract.
ma_dsubu(Register rd,Register rs,Imm32 imm)485 void MacroAssemblerMIPS64::ma_dsubu(Register rd, Register rs, Imm32 imm) {
486   if (Imm16::IsInSignedRange(-imm.value)) {
487     as_daddiu(rd, rs, -imm.value);
488   } else {
489     ma_li(ScratchRegister, imm);
490     as_dsubu(rd, rs, ScratchRegister);
491   }
492 }
493 
ma_dsubu(Register rd,Register rs)494 void MacroAssemblerMIPS64::ma_dsubu(Register rd, Register rs) {
495   as_dsubu(rd, rd, rs);
496 }
497 
ma_dsubu(Register rd,Imm32 imm)498 void MacroAssemblerMIPS64::ma_dsubu(Register rd, Imm32 imm) {
499   ma_dsubu(rd, rd, imm);
500 }
501 
ma_sub32TestOverflow(Register rd,Register rs,Register rt,Label * overflow)502 void MacroAssemblerMIPS64::ma_sub32TestOverflow(Register rd, Register rs,
503                                                 Register rt, Label* overflow) {
504   as_dsubu(SecondScratchReg, rs, rt);
505   as_subu(rd, rs, rt);
506   ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual);
507 }
508 
ma_subPtrTestOverflow(Register rd,Register rs,Register rt,Label * overflow)509 void MacroAssemblerMIPS64::ma_subPtrTestOverflow(Register rd, Register rs,
510                                                  Register rt, Label* overflow) {
511   SecondScratchRegisterScope scratch2(asMasm());
512   MOZ_ASSERT_IF(rs == rd, rs != rt);
513   MOZ_ASSERT(rd != rt);
514   MOZ_ASSERT(rs != scratch2);
515   MOZ_ASSERT(rt != scratch2);
516   MOZ_ASSERT(rd != scratch2);
517 
518   Register rs_copy = rs;
519 
520   if (rs == rd) {
521     ma_move(scratch2, rs);
522     rs_copy = scratch2;
523   }
524 
525   {
526     ScratchRegisterScope scratch(asMasm());
527     MOZ_ASSERT(rd != scratch);
528 
529     as_dsubu(rd, rs, rt);
530     // If the sign of rs and rt are the same, no overflow
531     as_xor(scratch, rs_copy, rt);
532     // Check if the sign of rd and rs are the same
533     as_xor(scratch2, rd, rs_copy);
534     as_and(scratch2, scratch2, scratch);
535   }
536 
537   ma_b(scratch2, zero, overflow, Assembler::LessThan);
538 }
539 
ma_subPtrTestOverflow(Register rd,Register rs,Imm32 imm,Label * overflow)540 void MacroAssemblerMIPS64::ma_subPtrTestOverflow(Register rd, Register rs,
541                                                  Imm32 imm, Label* overflow) {
542   ma_li(ScratchRegister, imm);
543   ma_subPtrTestOverflow(rd, rs, ScratchRegister, overflow);
544 }
545 
ma_dmult(Register rs,Imm32 imm)546 void MacroAssemblerMIPS64::ma_dmult(Register rs, Imm32 imm) {
547   ma_li(ScratchRegister, imm);
548 #ifdef MIPSR6
549   as_dmul(rs, ScratchRegister, SecondScratchReg);
550   as_dmuh(rs, ScratchRegister, rs);
551   ma_move(rs, SecondScratchReg);
552 #else
553   as_dmult(rs, ScratchRegister);
554 #endif
555 }
556 
ma_mulPtrTestOverflow(Register rd,Register rs,Register rt,Label * overflow)557 void MacroAssemblerMIPS64::ma_mulPtrTestOverflow(Register rd, Register rs,
558                                                  Register rt, Label* overflow) {
559 #ifdef MIPSR6
560   if (rd == rs) {
561     ma_move(SecondScratchReg, rs);
562     rs = SecondScratchReg;
563   }
564   as_dmul(rd, rs, rt);
565   as_dmuh(SecondScratchReg, rs, rt);
566 #else
567   as_dmult(rs, rt);
568   as_mflo(rd);
569   as_mfhi(SecondScratchReg);
570 #endif
571   as_dsra32(ScratchRegister, rd, 63);
572   ma_b(ScratchRegister, SecondScratchReg, overflow, Assembler::NotEqual);
573 }
574 
575 // Memory.
ma_load(Register dest,Address address,LoadStoreSize size,LoadStoreExtension extension)576 void MacroAssemblerMIPS64::ma_load(Register dest, Address address,
577                                    LoadStoreSize size,
578                                    LoadStoreExtension extension) {
579   int16_t encodedOffset;
580   Register base;
581 
582   if (isLoongson() && ZeroExtend != extension &&
583       !Imm16::IsInSignedRange(address.offset)) {
584     ma_li(ScratchRegister, Imm32(address.offset));
585     base = address.base;
586 
587     switch (size) {
588       case SizeByte:
589         as_gslbx(dest, base, ScratchRegister, 0);
590         break;
591       case SizeHalfWord:
592         as_gslhx(dest, base, ScratchRegister, 0);
593         break;
594       case SizeWord:
595         as_gslwx(dest, base, ScratchRegister, 0);
596         break;
597       case SizeDouble:
598         as_gsldx(dest, base, ScratchRegister, 0);
599         break;
600       default:
601         MOZ_CRASH("Invalid argument for ma_load");
602     }
603     return;
604   }
605 
606   if (!Imm16::IsInSignedRange(address.offset)) {
607     ma_li(ScratchRegister, Imm32(address.offset));
608     as_daddu(ScratchRegister, address.base, ScratchRegister);
609     base = ScratchRegister;
610     encodedOffset = Imm16(0).encode();
611   } else {
612     encodedOffset = Imm16(address.offset).encode();
613     base = address.base;
614   }
615 
616   switch (size) {
617     case SizeByte:
618       if (ZeroExtend == extension) {
619         as_lbu(dest, base, encodedOffset);
620       } else {
621         as_lb(dest, base, encodedOffset);
622       }
623       break;
624     case SizeHalfWord:
625       if (ZeroExtend == extension) {
626         as_lhu(dest, base, encodedOffset);
627       } else {
628         as_lh(dest, base, encodedOffset);
629       }
630       break;
631     case SizeWord:
632       if (ZeroExtend == extension) {
633         as_lwu(dest, base, encodedOffset);
634       } else {
635         as_lw(dest, base, encodedOffset);
636       }
637       break;
638     case SizeDouble:
639       as_ld(dest, base, encodedOffset);
640       break;
641     default:
642       MOZ_CRASH("Invalid argument for ma_load");
643   }
644 }
645 
ma_store(Register data,Address address,LoadStoreSize size,LoadStoreExtension extension)646 void MacroAssemblerMIPS64::ma_store(Register data, Address address,
647                                     LoadStoreSize size,
648                                     LoadStoreExtension extension) {
649   int16_t encodedOffset;
650   Register base;
651 
652   if (isLoongson() && !Imm16::IsInSignedRange(address.offset)) {
653     ma_li(ScratchRegister, Imm32(address.offset));
654     base = address.base;
655 
656     switch (size) {
657       case SizeByte:
658         as_gssbx(data, base, ScratchRegister, 0);
659         break;
660       case SizeHalfWord:
661         as_gsshx(data, base, ScratchRegister, 0);
662         break;
663       case SizeWord:
664         as_gsswx(data, base, ScratchRegister, 0);
665         break;
666       case SizeDouble:
667         as_gssdx(data, base, ScratchRegister, 0);
668         break;
669       default:
670         MOZ_CRASH("Invalid argument for ma_store");
671     }
672     return;
673   }
674 
675   if (!Imm16::IsInSignedRange(address.offset)) {
676     ma_li(ScratchRegister, Imm32(address.offset));
677     as_daddu(ScratchRegister, address.base, ScratchRegister);
678     base = ScratchRegister;
679     encodedOffset = Imm16(0).encode();
680   } else {
681     encodedOffset = Imm16(address.offset).encode();
682     base = address.base;
683   }
684 
685   switch (size) {
686     case SizeByte:
687       as_sb(data, base, encodedOffset);
688       break;
689     case SizeHalfWord:
690       as_sh(data, base, encodedOffset);
691       break;
692     case SizeWord:
693       as_sw(data, base, encodedOffset);
694       break;
695     case SizeDouble:
696       as_sd(data, base, encodedOffset);
697       break;
698     default:
699       MOZ_CRASH("Invalid argument for ma_store");
700   }
701 }
702 
computeScaledAddress(const BaseIndex & address,Register dest)703 void MacroAssemblerMIPS64Compat::computeScaledAddress(const BaseIndex& address,
704                                                       Register dest) {
705   int32_t shift = Imm32::ShiftOf(address.scale).value;
706   if (shift) {
707     ma_dsll(ScratchRegister, address.index, Imm32(shift));
708     as_daddu(dest, address.base, ScratchRegister);
709   } else {
710     as_daddu(dest, address.base, address.index);
711   }
712 }
713 
computeEffectiveAddress(const BaseIndex & address,Register dest)714 void MacroAssemblerMIPS64Compat::computeEffectiveAddress(
715     const BaseIndex& address, Register dest) {
716   computeScaledAddress(address, dest);
717   if (address.offset) {
718     asMasm().addPtr(Imm32(address.offset), dest);
719   }
720 }
721 
722 // Shortcut for when we know we're transferring 32 bits of data.
ma_pop(Register r)723 void MacroAssemblerMIPS64::ma_pop(Register r) {
724   as_ld(r, StackPointer, 0);
725   as_daddiu(StackPointer, StackPointer, sizeof(intptr_t));
726 }
727 
ma_push(Register r)728 void MacroAssemblerMIPS64::ma_push(Register r) {
729   if (r == sp) {
730     // Pushing sp requires one more instruction.
731     ma_move(ScratchRegister, sp);
732     r = ScratchRegister;
733   }
734 
735   as_daddiu(StackPointer, StackPointer, (int32_t) - sizeof(intptr_t));
736   as_sd(r, StackPointer, 0);
737 }
738 
739 // Branches when done from within mips-specific code.
ma_b(Register lhs,ImmWord imm,Label * label,Condition c,JumpKind jumpKind)740 void MacroAssemblerMIPS64::ma_b(Register lhs, ImmWord imm, Label* label,
741                                 Condition c, JumpKind jumpKind) {
742   if (imm.value <= INT32_MAX) {
743     ma_b(lhs, Imm32(uint32_t(imm.value)), label, c, jumpKind);
744   } else {
745     MOZ_ASSERT(lhs != ScratchRegister);
746     ma_li(ScratchRegister, imm);
747     ma_b(lhs, ScratchRegister, label, c, jumpKind);
748   }
749 }
750 
ma_b(Register lhs,Address addr,Label * label,Condition c,JumpKind jumpKind)751 void MacroAssemblerMIPS64::ma_b(Register lhs, Address addr, Label* label,
752                                 Condition c, JumpKind jumpKind) {
753   MOZ_ASSERT(lhs != ScratchRegister);
754   ma_load(ScratchRegister, addr, SizeDouble);
755   ma_b(lhs, ScratchRegister, label, c, jumpKind);
756 }
757 
ma_b(Address addr,Imm32 imm,Label * label,Condition c,JumpKind jumpKind)758 void MacroAssemblerMIPS64::ma_b(Address addr, Imm32 imm, Label* label,
759                                 Condition c, JumpKind jumpKind) {
760   ma_load(SecondScratchReg, addr, SizeDouble);
761   ma_b(SecondScratchReg, imm, label, c, jumpKind);
762 }
763 
ma_b(Address addr,ImmGCPtr imm,Label * label,Condition c,JumpKind jumpKind)764 void MacroAssemblerMIPS64::ma_b(Address addr, ImmGCPtr imm, Label* label,
765                                 Condition c, JumpKind jumpKind) {
766   ma_load(SecondScratchReg, addr, SizeDouble);
767   ma_b(SecondScratchReg, imm, label, c, jumpKind);
768 }
769 
ma_bal(Label * label,DelaySlotFill delaySlotFill)770 void MacroAssemblerMIPS64::ma_bal(Label* label, DelaySlotFill delaySlotFill) {
771   spew("branch .Llabel %p\n", label);
772   if (label->bound()) {
773     // Generate the long jump for calls because return address has to be
774     // the address after the reserved block.
775     addLongJump(nextOffset(), BufferOffset(label->offset()));
776     ma_liPatchable(ScratchRegister, ImmWord(LabelBase::INVALID_OFFSET));
777     as_jalr(ScratchRegister);
778     if (delaySlotFill == FillDelaySlot) {
779       as_nop();
780     }
781     return;
782   }
783 
784   // Second word holds a pointer to the next branch in label's chain.
785   uint32_t nextInChain =
786       label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
787 
788   // Make the whole branch continous in the buffer. The '6'
789   // instructions are writing at below (contain delay slot).
790   m_buffer.ensureSpace(6 * sizeof(uint32_t));
791 
792   spew("bal .Llabel %p\n", label);
793   BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
794   writeInst(nextInChain);
795   if (!oom()) {
796     label->use(bo.getOffset());
797   }
798   // Leave space for long jump.
799   as_nop();
800   as_nop();
801   as_nop();
802   if (delaySlotFill == FillDelaySlot) {
803     as_nop();
804   }
805 }
806 
branchWithCode(InstImm code,Label * label,JumpKind jumpKind)807 void MacroAssemblerMIPS64::branchWithCode(InstImm code, Label* label,
808                                           JumpKind jumpKind) {
809   // simply output the pointer of one label as its id,
810   // notice that after one label destructor, the pointer will be reused.
811   spew("branch .Llabel %p", label);
812   MOZ_ASSERT(code.encode() !=
813              InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
814   InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
815 
816   if (label->bound()) {
817     int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
818 
819     if (BOffImm16::IsInRange(offset)) {
820       jumpKind = ShortJump;
821     }
822 
823     if (jumpKind == ShortJump) {
824       MOZ_ASSERT(BOffImm16::IsInRange(offset));
825       code.setBOffImm16(BOffImm16(offset));
826 #ifdef JS_JITSPEW
827       decodeBranchInstAndSpew(code);
828 #endif
829       writeInst(code.encode());
830       as_nop();
831       return;
832     }
833 
834     if (code.encode() == inst_beq.encode()) {
835       // Handle long jump
836       addLongJump(nextOffset(), BufferOffset(label->offset()));
837       ma_liPatchable(ScratchRegister, ImmWord(LabelBase::INVALID_OFFSET));
838       as_jr(ScratchRegister);
839       as_nop();
840       return;
841     }
842 
843     // Handle long conditional branch, the target offset is based on self,
844     // point to next instruction of nop at below.
845     spew("invert branch .Llabel %p", label);
846     InstImm code_r = invertBranch(code, BOffImm16(7 * sizeof(uint32_t)));
847 #ifdef JS_JITSPEW
848     decodeBranchInstAndSpew(code_r);
849 #endif
850     writeInst(code_r.encode());
851     // No need for a "nop" here because we can clobber scratch.
852     addLongJump(nextOffset(), BufferOffset(label->offset()));
853     ma_liPatchable(ScratchRegister, ImmWord(LabelBase::INVALID_OFFSET));
854     as_jr(ScratchRegister);
855     as_nop();
856     return;
857   }
858 
859   // Generate open jump and link it to a label.
860 
861   // Second word holds a pointer to the next branch in label's chain.
862   uint32_t nextInChain =
863       label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
864 
865   if (jumpKind == ShortJump) {
866     // Make the whole branch continous in the buffer.
867     m_buffer.ensureSpace(2 * sizeof(uint32_t));
868 
869     // Indicate that this is short jump with offset 4.
870     code.setBOffImm16(BOffImm16(4));
871 #ifdef JS_JITSPEW
872     decodeBranchInstAndSpew(code);
873 #endif
874     BufferOffset bo = writeInst(code.encode());
875     writeInst(nextInChain);
876     if (!oom()) {
877       label->use(bo.getOffset());
878     }
879     return;
880   }
881 
882   bool conditional = code.encode() != inst_beq.encode();
883 
884   // Make the whole branch continous in the buffer. The '7'
885   // instructions are writing at below (contain conditional nop).
886   m_buffer.ensureSpace(7 * sizeof(uint32_t));
887 
888 #ifdef JS_JITSPEW
889   decodeBranchInstAndSpew(code);
890 #endif
891   BufferOffset bo = writeInst(code.encode());
892   writeInst(nextInChain);
893   if (!oom()) {
894     label->use(bo.getOffset());
895   }
896   // Leave space for potential long jump.
897   as_nop();
898   as_nop();
899   as_nop();
900   as_nop();
901   if (conditional) {
902     as_nop();
903   }
904 }
905 
ma_cmp_set(Register rd,Register rs,ImmWord imm,Condition c)906 void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmWord imm,
907                                       Condition c) {
908   if (imm.value <= INT32_MAX) {
909     ma_cmp_set(rd, rs, Imm32(uint32_t(imm.value)), c);
910   } else {
911     ma_li(ScratchRegister, imm);
912     ma_cmp_set(rd, rs, ScratchRegister, c);
913   }
914 }
915 
ma_cmp_set(Register rd,Register rs,ImmPtr imm,Condition c)916 void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmPtr imm,
917                                       Condition c) {
918   ma_cmp_set(rd, rs, ImmWord(uintptr_t(imm.value)), c);
919 }
920 
ma_cmp_set(Register rd,Address address,Imm32 imm,Condition c)921 void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Address address, Imm32 imm,
922                                       Condition c) {
923   ma_load(ScratchRegister, address, SizeWord, SignExtend);
924   ma_cmp_set(rd, ScratchRegister, imm, c);
925 }
926 
927 // fp instructions
ma_lid(FloatRegister dest,double value)928 void MacroAssemblerMIPS64::ma_lid(FloatRegister dest, double value) {
929   ImmWord imm(mozilla::BitwiseCast<uint64_t>(value));
930 
931   if (imm.value != 0) {
932     ma_li(ScratchRegister, imm);
933     moveToDouble(ScratchRegister, dest);
934   } else {
935     moveToDouble(zero, dest);
936   }
937 }
938 
ma_mv(FloatRegister src,ValueOperand dest)939 void MacroAssemblerMIPS64::ma_mv(FloatRegister src, ValueOperand dest) {
940   as_dmfc1(dest.valueReg(), src);
941 }
942 
ma_mv(ValueOperand src,FloatRegister dest)943 void MacroAssemblerMIPS64::ma_mv(ValueOperand src, FloatRegister dest) {
944   as_dmtc1(src.valueReg(), dest);
945 }
946 
ma_ls(FloatRegister ft,Address address)947 void MacroAssemblerMIPS64::ma_ls(FloatRegister ft, Address address) {
948   if (Imm16::IsInSignedRange(address.offset)) {
949     as_lwc1(ft, address.base, address.offset);
950   } else {
951     MOZ_ASSERT(address.base != ScratchRegister);
952     ma_li(ScratchRegister, Imm32(address.offset));
953     if (isLoongson()) {
954       as_gslsx(ft, address.base, ScratchRegister, 0);
955     } else {
956       as_daddu(ScratchRegister, address.base, ScratchRegister);
957       as_lwc1(ft, ScratchRegister, 0);
958     }
959   }
960 }
961 
ma_ld(FloatRegister ft,Address address)962 void MacroAssemblerMIPS64::ma_ld(FloatRegister ft, Address address) {
963   if (Imm16::IsInSignedRange(address.offset)) {
964     as_ldc1(ft, address.base, address.offset);
965   } else {
966     MOZ_ASSERT(address.base != ScratchRegister);
967     ma_li(ScratchRegister, Imm32(address.offset));
968     if (isLoongson()) {
969       as_gsldx(ft, address.base, ScratchRegister, 0);
970     } else {
971       as_daddu(ScratchRegister, address.base, ScratchRegister);
972       as_ldc1(ft, ScratchRegister, 0);
973     }
974   }
975 }
976 
ma_sd(FloatRegister ft,Address address)977 void MacroAssemblerMIPS64::ma_sd(FloatRegister ft, Address address) {
978   if (Imm16::IsInSignedRange(address.offset)) {
979     as_sdc1(ft, address.base, address.offset);
980   } else {
981     MOZ_ASSERT(address.base != ScratchRegister);
982     ma_li(ScratchRegister, Imm32(address.offset));
983     if (isLoongson()) {
984       as_gssdx(ft, address.base, ScratchRegister, 0);
985     } else {
986       as_daddu(ScratchRegister, address.base, ScratchRegister);
987       as_sdc1(ft, ScratchRegister, 0);
988     }
989   }
990 }
991 
ma_ss(FloatRegister ft,Address address)992 void MacroAssemblerMIPS64::ma_ss(FloatRegister ft, Address address) {
993   if (Imm16::IsInSignedRange(address.offset)) {
994     as_swc1(ft, address.base, address.offset);
995   } else {
996     MOZ_ASSERT(address.base != ScratchRegister);
997     ma_li(ScratchRegister, Imm32(address.offset));
998     if (isLoongson()) {
999       as_gsssx(ft, address.base, ScratchRegister, 0);
1000     } else {
1001       as_daddu(ScratchRegister, address.base, ScratchRegister);
1002       as_swc1(ft, ScratchRegister, 0);
1003     }
1004   }
1005 }
1006 
ma_pop(FloatRegister f)1007 void MacroAssemblerMIPS64::ma_pop(FloatRegister f) {
1008   as_ldc1(f, StackPointer, 0);
1009   as_daddiu(StackPointer, StackPointer, sizeof(double));
1010 }
1011 
ma_push(FloatRegister f)1012 void MacroAssemblerMIPS64::ma_push(FloatRegister f) {
1013   as_daddiu(StackPointer, StackPointer, (int32_t) - sizeof(double));
1014   as_sdc1(f, StackPointer, 0);
1015 }
1016 
buildOOLFakeExitFrame(void * fakeReturnAddr)1017 bool MacroAssemblerMIPS64Compat::buildOOLFakeExitFrame(void* fakeReturnAddr) {
1018   uint32_t descriptor = MakeFrameDescriptor(
1019       asMasm().framePushed(), FrameType::IonJS, ExitFrameLayout::Size());
1020 
1021   asMasm().Push(Imm32(descriptor));  // descriptor_
1022   asMasm().Push(ImmPtr(fakeReturnAddr));
1023 
1024   return true;
1025 }
1026 
move32(Imm32 imm,Register dest)1027 void MacroAssemblerMIPS64Compat::move32(Imm32 imm, Register dest) {
1028   ma_li(dest, imm);
1029 }
1030 
move32(Register src,Register dest)1031 void MacroAssemblerMIPS64Compat::move32(Register src, Register dest) {
1032   ma_move(dest, src);
1033 }
1034 
movePtr(Register src,Register dest)1035 void MacroAssemblerMIPS64Compat::movePtr(Register src, Register dest) {
1036   ma_move(dest, src);
1037 }
movePtr(ImmWord imm,Register dest)1038 void MacroAssemblerMIPS64Compat::movePtr(ImmWord imm, Register dest) {
1039   ma_li(dest, imm);
1040 }
1041 
movePtr(ImmGCPtr imm,Register dest)1042 void MacroAssemblerMIPS64Compat::movePtr(ImmGCPtr imm, Register dest) {
1043   ma_li(dest, imm);
1044 }
1045 
movePtr(ImmPtr imm,Register dest)1046 void MacroAssemblerMIPS64Compat::movePtr(ImmPtr imm, Register dest) {
1047   movePtr(ImmWord(uintptr_t(imm.value)), dest);
1048 }
movePtr(wasm::SymbolicAddress imm,Register dest)1049 void MacroAssemblerMIPS64Compat::movePtr(wasm::SymbolicAddress imm,
1050                                          Register dest) {
1051   append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm));
1052   ma_liPatchable(dest, ImmWord(-1));
1053 }
1054 
load8ZeroExtend(const Address & address,Register dest)1055 void MacroAssemblerMIPS64Compat::load8ZeroExtend(const Address& address,
1056                                                  Register dest) {
1057   ma_load(dest, address, SizeByte, ZeroExtend);
1058 }
1059 
load8ZeroExtend(const BaseIndex & src,Register dest)1060 void MacroAssemblerMIPS64Compat::load8ZeroExtend(const BaseIndex& src,
1061                                                  Register dest) {
1062   ma_load(dest, src, SizeByte, ZeroExtend);
1063 }
1064 
load8SignExtend(const Address & address,Register dest)1065 void MacroAssemblerMIPS64Compat::load8SignExtend(const Address& address,
1066                                                  Register dest) {
1067   ma_load(dest, address, SizeByte, SignExtend);
1068 }
1069 
load8SignExtend(const BaseIndex & src,Register dest)1070 void MacroAssemblerMIPS64Compat::load8SignExtend(const BaseIndex& src,
1071                                                  Register dest) {
1072   ma_load(dest, src, SizeByte, SignExtend);
1073 }
1074 
load16ZeroExtend(const Address & address,Register dest)1075 void MacroAssemblerMIPS64Compat::load16ZeroExtend(const Address& address,
1076                                                   Register dest) {
1077   ma_load(dest, address, SizeHalfWord, ZeroExtend);
1078 }
1079 
load16ZeroExtend(const BaseIndex & src,Register dest)1080 void MacroAssemblerMIPS64Compat::load16ZeroExtend(const BaseIndex& src,
1081                                                   Register dest) {
1082   ma_load(dest, src, SizeHalfWord, ZeroExtend);
1083 }
1084 
load16SignExtend(const Address & address,Register dest)1085 void MacroAssemblerMIPS64Compat::load16SignExtend(const Address& address,
1086                                                   Register dest) {
1087   ma_load(dest, address, SizeHalfWord, SignExtend);
1088 }
1089 
load16SignExtend(const BaseIndex & src,Register dest)1090 void MacroAssemblerMIPS64Compat::load16SignExtend(const BaseIndex& src,
1091                                                   Register dest) {
1092   ma_load(dest, src, SizeHalfWord, SignExtend);
1093 }
1094 
load32(const Address & address,Register dest)1095 void MacroAssemblerMIPS64Compat::load32(const Address& address, Register dest) {
1096   ma_load(dest, address, SizeWord);
1097 }
1098 
load32(const BaseIndex & address,Register dest)1099 void MacroAssemblerMIPS64Compat::load32(const BaseIndex& address,
1100                                         Register dest) {
1101   ma_load(dest, address, SizeWord);
1102 }
1103 
load32(AbsoluteAddress address,Register dest)1104 void MacroAssemblerMIPS64Compat::load32(AbsoluteAddress address,
1105                                         Register dest) {
1106   movePtr(ImmPtr(address.addr), ScratchRegister);
1107   load32(Address(ScratchRegister, 0), dest);
1108 }
1109 
load32(wasm::SymbolicAddress address,Register dest)1110 void MacroAssemblerMIPS64Compat::load32(wasm::SymbolicAddress address,
1111                                         Register dest) {
1112   movePtr(address, ScratchRegister);
1113   load32(Address(ScratchRegister, 0), dest);
1114 }
1115 
loadPtr(const Address & address,Register dest)1116 void MacroAssemblerMIPS64Compat::loadPtr(const Address& address,
1117                                          Register dest) {
1118   ma_load(dest, address, SizeDouble);
1119 }
1120 
loadPtr(const BaseIndex & src,Register dest)1121 void MacroAssemblerMIPS64Compat::loadPtr(const BaseIndex& src, Register dest) {
1122   ma_load(dest, src, SizeDouble);
1123 }
1124 
loadPtr(AbsoluteAddress address,Register dest)1125 void MacroAssemblerMIPS64Compat::loadPtr(AbsoluteAddress address,
1126                                          Register dest) {
1127   movePtr(ImmPtr(address.addr), ScratchRegister);
1128   loadPtr(Address(ScratchRegister, 0), dest);
1129 }
1130 
loadPtr(wasm::SymbolicAddress address,Register dest)1131 void MacroAssemblerMIPS64Compat::loadPtr(wasm::SymbolicAddress address,
1132                                          Register dest) {
1133   movePtr(address, ScratchRegister);
1134   loadPtr(Address(ScratchRegister, 0), dest);
1135 }
1136 
loadPrivate(const Address & address,Register dest)1137 void MacroAssemblerMIPS64Compat::loadPrivate(const Address& address,
1138                                              Register dest) {
1139   loadPtr(address, dest);
1140 }
1141 
loadUnalignedDouble(const wasm::MemoryAccessDesc & access,const BaseIndex & src,Register temp,FloatRegister dest)1142 void MacroAssemblerMIPS64Compat::loadUnalignedDouble(
1143     const wasm::MemoryAccessDesc& access, const BaseIndex& src, Register temp,
1144     FloatRegister dest) {
1145   computeScaledAddress(src, SecondScratchReg);
1146   BufferOffset load;
1147   if (Imm16::IsInSignedRange(src.offset) &&
1148       Imm16::IsInSignedRange(src.offset + 7)) {
1149     load = as_ldl(temp, SecondScratchReg, src.offset + 7);
1150     as_ldr(temp, SecondScratchReg, src.offset);
1151   } else {
1152     ma_li(ScratchRegister, Imm32(src.offset));
1153     as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
1154     load = as_ldl(temp, ScratchRegister, 7);
1155     as_ldr(temp, ScratchRegister, 0);
1156   }
1157   append(access, load.getOffset());
1158   moveToDouble(temp, dest);
1159 }
1160 
loadUnalignedFloat32(const wasm::MemoryAccessDesc & access,const BaseIndex & src,Register temp,FloatRegister dest)1161 void MacroAssemblerMIPS64Compat::loadUnalignedFloat32(
1162     const wasm::MemoryAccessDesc& access, const BaseIndex& src, Register temp,
1163     FloatRegister dest) {
1164   computeScaledAddress(src, SecondScratchReg);
1165   BufferOffset load;
1166   if (Imm16::IsInSignedRange(src.offset) &&
1167       Imm16::IsInSignedRange(src.offset + 3)) {
1168     load = as_lwl(temp, SecondScratchReg, src.offset + 3);
1169     as_lwr(temp, SecondScratchReg, src.offset);
1170   } else {
1171     ma_li(ScratchRegister, Imm32(src.offset));
1172     as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
1173     load = as_lwl(temp, ScratchRegister, 3);
1174     as_lwr(temp, ScratchRegister, 0);
1175   }
1176   append(access, load.getOffset());
1177   moveToFloat32(temp, dest);
1178 }
1179 
store8(Imm32 imm,const Address & address)1180 void MacroAssemblerMIPS64Compat::store8(Imm32 imm, const Address& address) {
1181   ma_li(SecondScratchReg, imm);
1182   ma_store(SecondScratchReg, address, SizeByte);
1183 }
1184 
store8(Register src,const Address & address)1185 void MacroAssemblerMIPS64Compat::store8(Register src, const Address& address) {
1186   ma_store(src, address, SizeByte);
1187 }
1188 
store8(Imm32 imm,const BaseIndex & dest)1189 void MacroAssemblerMIPS64Compat::store8(Imm32 imm, const BaseIndex& dest) {
1190   ma_store(imm, dest, SizeByte);
1191 }
1192 
store8(Register src,const BaseIndex & dest)1193 void MacroAssemblerMIPS64Compat::store8(Register src, const BaseIndex& dest) {
1194   ma_store(src, dest, SizeByte);
1195 }
1196 
store16(Imm32 imm,const Address & address)1197 void MacroAssemblerMIPS64Compat::store16(Imm32 imm, const Address& address) {
1198   ma_li(SecondScratchReg, imm);
1199   ma_store(SecondScratchReg, address, SizeHalfWord);
1200 }
1201 
store16(Register src,const Address & address)1202 void MacroAssemblerMIPS64Compat::store16(Register src, const Address& address) {
1203   ma_store(src, address, SizeHalfWord);
1204 }
1205 
store16(Imm32 imm,const BaseIndex & dest)1206 void MacroAssemblerMIPS64Compat::store16(Imm32 imm, const BaseIndex& dest) {
1207   ma_store(imm, dest, SizeHalfWord);
1208 }
1209 
store16(Register src,const BaseIndex & address)1210 void MacroAssemblerMIPS64Compat::store16(Register src,
1211                                          const BaseIndex& address) {
1212   ma_store(src, address, SizeHalfWord);
1213 }
1214 
store32(Register src,AbsoluteAddress address)1215 void MacroAssemblerMIPS64Compat::store32(Register src,
1216                                          AbsoluteAddress address) {
1217   movePtr(ImmPtr(address.addr), ScratchRegister);
1218   store32(src, Address(ScratchRegister, 0));
1219 }
1220 
store32(Register src,const Address & address)1221 void MacroAssemblerMIPS64Compat::store32(Register src, const Address& address) {
1222   ma_store(src, address, SizeWord);
1223 }
1224 
store32(Imm32 src,const Address & address)1225 void MacroAssemblerMIPS64Compat::store32(Imm32 src, const Address& address) {
1226   move32(src, SecondScratchReg);
1227   ma_store(SecondScratchReg, address, SizeWord);
1228 }
1229 
store32(Imm32 imm,const BaseIndex & dest)1230 void MacroAssemblerMIPS64Compat::store32(Imm32 imm, const BaseIndex& dest) {
1231   ma_store(imm, dest, SizeWord);
1232 }
1233 
store32(Register src,const BaseIndex & dest)1234 void MacroAssemblerMIPS64Compat::store32(Register src, const BaseIndex& dest) {
1235   ma_store(src, dest, SizeWord);
1236 }
1237 
1238 template <typename T>
storePtr(ImmWord imm,T address)1239 void MacroAssemblerMIPS64Compat::storePtr(ImmWord imm, T address) {
1240   ma_li(SecondScratchReg, imm);
1241   ma_store(SecondScratchReg, address, SizeDouble);
1242 }
1243 
1244 template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmWord imm,
1245                                                             Address address);
1246 template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(
1247     ImmWord imm, BaseIndex address);
1248 
1249 template <typename T>
storePtr(ImmPtr imm,T address)1250 void MacroAssemblerMIPS64Compat::storePtr(ImmPtr imm, T address) {
1251   storePtr(ImmWord(uintptr_t(imm.value)), address);
1252 }
1253 
1254 template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmPtr imm,
1255                                                             Address address);
1256 template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(
1257     ImmPtr imm, BaseIndex address);
1258 
1259 template <typename T>
storePtr(ImmGCPtr imm,T address)1260 void MacroAssemblerMIPS64Compat::storePtr(ImmGCPtr imm, T address) {
1261   movePtr(imm, SecondScratchReg);
1262   storePtr(SecondScratchReg, address);
1263 }
1264 
1265 template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmGCPtr imm,
1266                                                             Address address);
1267 template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(
1268     ImmGCPtr imm, BaseIndex address);
1269 
storePtr(Register src,const Address & address)1270 void MacroAssemblerMIPS64Compat::storePtr(Register src,
1271                                           const Address& address) {
1272   ma_store(src, address, SizeDouble);
1273 }
1274 
storePtr(Register src,const BaseIndex & address)1275 void MacroAssemblerMIPS64Compat::storePtr(Register src,
1276                                           const BaseIndex& address) {
1277   ma_store(src, address, SizeDouble);
1278 }
1279 
storePtr(Register src,AbsoluteAddress dest)1280 void MacroAssemblerMIPS64Compat::storePtr(Register src, AbsoluteAddress dest) {
1281   movePtr(ImmPtr(dest.addr), ScratchRegister);
1282   storePtr(src, Address(ScratchRegister, 0));
1283 }
1284 
storeUnalignedFloat32(const wasm::MemoryAccessDesc & access,FloatRegister src,Register temp,const BaseIndex & dest)1285 void MacroAssemblerMIPS64Compat::storeUnalignedFloat32(
1286     const wasm::MemoryAccessDesc& access, FloatRegister src, Register temp,
1287     const BaseIndex& dest) {
1288   computeScaledAddress(dest, SecondScratchReg);
1289   moveFromFloat32(src, temp);
1290   BufferOffset store;
1291   if (Imm16::IsInSignedRange(dest.offset) &&
1292       Imm16::IsInSignedRange(dest.offset + 3)) {
1293     store = as_swl(temp, SecondScratchReg, dest.offset + 3);
1294     as_swr(temp, SecondScratchReg, dest.offset);
1295   } else {
1296     ma_li(ScratchRegister, Imm32(dest.offset));
1297     as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
1298     store = as_swl(temp, ScratchRegister, 3);
1299     as_swr(temp, ScratchRegister, 0);
1300   }
1301   append(access, store.getOffset());
1302 }
1303 
storeUnalignedDouble(const wasm::MemoryAccessDesc & access,FloatRegister src,Register temp,const BaseIndex & dest)1304 void MacroAssemblerMIPS64Compat::storeUnalignedDouble(
1305     const wasm::MemoryAccessDesc& access, FloatRegister src, Register temp,
1306     const BaseIndex& dest) {
1307   computeScaledAddress(dest, SecondScratchReg);
1308   moveFromDouble(src, temp);
1309 
1310   BufferOffset store;
1311   if (Imm16::IsInSignedRange(dest.offset) &&
1312       Imm16::IsInSignedRange(dest.offset + 7)) {
1313     store = as_sdl(temp, SecondScratchReg, dest.offset + 7);
1314     as_sdr(temp, SecondScratchReg, dest.offset);
1315   } else {
1316     ma_li(ScratchRegister, Imm32(dest.offset));
1317     as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
1318     store = as_sdl(temp, ScratchRegister, 7);
1319     as_sdr(temp, ScratchRegister, 0);
1320   }
1321   append(access, store.getOffset());
1322 }
1323 
clampDoubleToUint8(FloatRegister input,Register output)1324 void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) {
1325   as_roundwd(ScratchDoubleReg, input);
1326   ma_li(ScratchRegister, Imm32(255));
1327   as_mfc1(output, ScratchDoubleReg);
1328 #ifdef MIPSR6
1329   as_slti(SecondScratchReg, output, 0);
1330   as_seleqz(output, output, SecondScratchReg);
1331   as_sltiu(SecondScratchReg, output, 255);
1332   as_selnez(output, output, SecondScratchReg);
1333   as_seleqz(ScratchRegister, ScratchRegister, SecondScratchReg);
1334   as_or(output, output, ScratchRegister);
1335 #else
1336   zeroDouble(ScratchDoubleReg);
1337   as_sltiu(SecondScratchReg, output, 255);
1338   as_colt(DoubleFloat, ScratchDoubleReg, input);
1339   // if res > 255; res = 255;
1340   as_movz(output, ScratchRegister, SecondScratchReg);
1341   // if !(input > 0); res = 0;
1342   as_movf(output, zero);
1343 #endif
1344 }
1345 
testNullSet(Condition cond,const ValueOperand & value,Register dest)1346 void MacroAssemblerMIPS64Compat::testNullSet(Condition cond,
1347                                              const ValueOperand& value,
1348                                              Register dest) {
1349   MOZ_ASSERT(cond == Equal || cond == NotEqual);
1350   splitTag(value, SecondScratchReg);
1351   ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_NULL), cond);
1352 }
1353 
testObjectSet(Condition cond,const ValueOperand & value,Register dest)1354 void MacroAssemblerMIPS64Compat::testObjectSet(Condition cond,
1355                                                const ValueOperand& value,
1356                                                Register dest) {
1357   MOZ_ASSERT(cond == Equal || cond == NotEqual);
1358   splitTag(value, SecondScratchReg);
1359   ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), cond);
1360 }
1361 
testUndefinedSet(Condition cond,const ValueOperand & value,Register dest)1362 void MacroAssemblerMIPS64Compat::testUndefinedSet(Condition cond,
1363                                                   const ValueOperand& value,
1364                                                   Register dest) {
1365   MOZ_ASSERT(cond == Equal || cond == NotEqual);
1366   splitTag(value, SecondScratchReg);
1367   ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), cond);
1368 }
1369 
unboxInt32(const ValueOperand & operand,Register dest)1370 void MacroAssemblerMIPS64Compat::unboxInt32(const ValueOperand& operand,
1371                                             Register dest) {
1372   ma_sll(dest, operand.valueReg(), Imm32(0));
1373 }
1374 
unboxInt32(Register src,Register dest)1375 void MacroAssemblerMIPS64Compat::unboxInt32(Register src, Register dest) {
1376   ma_sll(dest, src, Imm32(0));
1377 }
1378 
unboxInt32(const Address & src,Register dest)1379 void MacroAssemblerMIPS64Compat::unboxInt32(const Address& src, Register dest) {
1380   load32(Address(src.base, src.offset), dest);
1381 }
1382 
unboxInt32(const BaseIndex & src,Register dest)1383 void MacroAssemblerMIPS64Compat::unboxInt32(const BaseIndex& src,
1384                                             Register dest) {
1385   computeScaledAddress(src, SecondScratchReg);
1386   load32(Address(SecondScratchReg, src.offset), dest);
1387 }
1388 
unboxBoolean(const ValueOperand & operand,Register dest)1389 void MacroAssemblerMIPS64Compat::unboxBoolean(const ValueOperand& operand,
1390                                               Register dest) {
1391   ma_dext(dest, operand.valueReg(), Imm32(0), Imm32(32));
1392 }
1393 
unboxBoolean(Register src,Register dest)1394 void MacroAssemblerMIPS64Compat::unboxBoolean(Register src, Register dest) {
1395   ma_dext(dest, src, Imm32(0), Imm32(32));
1396 }
1397 
unboxBoolean(const Address & src,Register dest)1398 void MacroAssemblerMIPS64Compat::unboxBoolean(const Address& src,
1399                                               Register dest) {
1400   ma_load(dest, Address(src.base, src.offset), SizeWord, ZeroExtend);
1401 }
1402 
unboxBoolean(const BaseIndex & src,Register dest)1403 void MacroAssemblerMIPS64Compat::unboxBoolean(const BaseIndex& src,
1404                                               Register dest) {
1405   computeScaledAddress(src, SecondScratchReg);
1406   ma_load(dest, Address(SecondScratchReg, src.offset), SizeWord, ZeroExtend);
1407 }
1408 
unboxDouble(const ValueOperand & operand,FloatRegister dest)1409 void MacroAssemblerMIPS64Compat::unboxDouble(const ValueOperand& operand,
1410                                              FloatRegister dest) {
1411   as_dmtc1(operand.valueReg(), dest);
1412 }
1413 
unboxDouble(const Address & src,FloatRegister dest)1414 void MacroAssemblerMIPS64Compat::unboxDouble(const Address& src,
1415                                              FloatRegister dest) {
1416   ma_ld(dest, Address(src.base, src.offset));
1417 }
unboxDouble(const BaseIndex & src,FloatRegister dest)1418 void MacroAssemblerMIPS64Compat::unboxDouble(const BaseIndex& src,
1419                                              FloatRegister dest) {
1420   SecondScratchRegisterScope scratch(asMasm());
1421   loadPtr(src, scratch);
1422   unboxDouble(ValueOperand(scratch), dest);
1423 }
1424 
unboxString(const ValueOperand & operand,Register dest)1425 void MacroAssemblerMIPS64Compat::unboxString(const ValueOperand& operand,
1426                                              Register dest) {
1427   unboxNonDouble(operand, dest, JSVAL_TYPE_STRING);
1428 }
1429 
unboxString(Register src,Register dest)1430 void MacroAssemblerMIPS64Compat::unboxString(Register src, Register dest) {
1431   unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
1432 }
1433 
unboxString(const Address & src,Register dest)1434 void MacroAssemblerMIPS64Compat::unboxString(const Address& src,
1435                                              Register dest) {
1436   unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
1437 }
1438 
unboxSymbol(const ValueOperand & operand,Register dest)1439 void MacroAssemblerMIPS64Compat::unboxSymbol(const ValueOperand& operand,
1440                                              Register dest) {
1441   unboxNonDouble(operand, dest, JSVAL_TYPE_SYMBOL);
1442 }
1443 
unboxSymbol(Register src,Register dest)1444 void MacroAssemblerMIPS64Compat::unboxSymbol(Register src, Register dest) {
1445   unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
1446 }
1447 
unboxSymbol(const Address & src,Register dest)1448 void MacroAssemblerMIPS64Compat::unboxSymbol(const Address& src,
1449                                              Register dest) {
1450   unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
1451 }
1452 
unboxBigInt(const ValueOperand & operand,Register dest)1453 void MacroAssemblerMIPS64Compat::unboxBigInt(const ValueOperand& operand,
1454                                              Register dest) {
1455   unboxNonDouble(operand, dest, JSVAL_TYPE_BIGINT);
1456 }
1457 
unboxBigInt(Register src,Register dest)1458 void MacroAssemblerMIPS64Compat::unboxBigInt(Register src, Register dest) {
1459   unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
1460 }
1461 
unboxBigInt(const Address & src,Register dest)1462 void MacroAssemblerMIPS64Compat::unboxBigInt(const Address& src,
1463                                              Register dest) {
1464   unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
1465 }
1466 
unboxObject(const ValueOperand & src,Register dest)1467 void MacroAssemblerMIPS64Compat::unboxObject(const ValueOperand& src,
1468                                              Register dest) {
1469   unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
1470 }
1471 
unboxObject(Register src,Register dest)1472 void MacroAssemblerMIPS64Compat::unboxObject(Register src, Register dest) {
1473   unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
1474 }
1475 
unboxObject(const Address & src,Register dest)1476 void MacroAssemblerMIPS64Compat::unboxObject(const Address& src,
1477                                              Register dest) {
1478   unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
1479 }
1480 
unboxValue(const ValueOperand & src,AnyRegister dest,JSValueType type)1481 void MacroAssemblerMIPS64Compat::unboxValue(const ValueOperand& src,
1482                                             AnyRegister dest,
1483                                             JSValueType type) {
1484   if (dest.isFloat()) {
1485     Label notInt32, end;
1486     asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
1487     convertInt32ToDouble(src.valueReg(), dest.fpu());
1488     ma_b(&end, ShortJump);
1489     bind(&notInt32);
1490     unboxDouble(src, dest.fpu());
1491     bind(&end);
1492   } else {
1493     unboxNonDouble(src, dest.gpr(), type);
1494   }
1495 }
1496 
boxDouble(FloatRegister src,const ValueOperand & dest,FloatRegister)1497 void MacroAssemblerMIPS64Compat::boxDouble(FloatRegister src,
1498                                            const ValueOperand& dest,
1499                                            FloatRegister) {
1500   as_dmfc1(dest.valueReg(), src);
1501 }
1502 
boxNonDouble(JSValueType type,Register src,const ValueOperand & dest)1503 void MacroAssemblerMIPS64Compat::boxNonDouble(JSValueType type, Register src,
1504                                               const ValueOperand& dest) {
1505   MOZ_ASSERT(src != dest.valueReg());
1506   boxValue(type, src, dest.valueReg());
1507 }
1508 
boolValueToDouble(const ValueOperand & operand,FloatRegister dest)1509 void MacroAssemblerMIPS64Compat::boolValueToDouble(const ValueOperand& operand,
1510                                                    FloatRegister dest) {
1511   convertBoolToInt32(operand.valueReg(), ScratchRegister);
1512   convertInt32ToDouble(ScratchRegister, dest);
1513 }
1514 
int32ValueToDouble(const ValueOperand & operand,FloatRegister dest)1515 void MacroAssemblerMIPS64Compat::int32ValueToDouble(const ValueOperand& operand,
1516                                                     FloatRegister dest) {
1517   convertInt32ToDouble(operand.valueReg(), dest);
1518 }
1519 
boolValueToFloat32(const ValueOperand & operand,FloatRegister dest)1520 void MacroAssemblerMIPS64Compat::boolValueToFloat32(const ValueOperand& operand,
1521                                                     FloatRegister dest) {
1522   convertBoolToInt32(operand.valueReg(), ScratchRegister);
1523   convertInt32ToFloat32(ScratchRegister, dest);
1524 }
1525 
int32ValueToFloat32(const ValueOperand & operand,FloatRegister dest)1526 void MacroAssemblerMIPS64Compat::int32ValueToFloat32(
1527     const ValueOperand& operand, FloatRegister dest) {
1528   convertInt32ToFloat32(operand.valueReg(), dest);
1529 }
1530 
loadConstantFloat32(float f,FloatRegister dest)1531 void MacroAssemblerMIPS64Compat::loadConstantFloat32(float f,
1532                                                      FloatRegister dest) {
1533   ma_lis(dest, f);
1534 }
1535 
loadInt32OrDouble(const Address & src,FloatRegister dest)1536 void MacroAssemblerMIPS64Compat::loadInt32OrDouble(const Address& src,
1537                                                    FloatRegister dest) {
1538   Label notInt32, end;
1539   // If it's an int, convert it to double.
1540   loadPtr(Address(src.base, src.offset), ScratchRegister);
1541   ma_dsrl(SecondScratchReg, ScratchRegister, Imm32(JSVAL_TAG_SHIFT));
1542   asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
1543   loadPtr(Address(src.base, src.offset), SecondScratchReg);
1544   convertInt32ToDouble(SecondScratchReg, dest);
1545   ma_b(&end, ShortJump);
1546 
1547   // Not an int, just load as double.
1548   bind(&notInt32);
1549   unboxDouble(src, dest);
1550   bind(&end);
1551 }
1552 
loadInt32OrDouble(const BaseIndex & addr,FloatRegister dest)1553 void MacroAssemblerMIPS64Compat::loadInt32OrDouble(const BaseIndex& addr,
1554                                                    FloatRegister dest) {
1555   Label notInt32, end;
1556 
1557   // If it's an int, convert it to double.
1558   computeScaledAddress(addr, SecondScratchReg);
1559   // Since we only have one scratch, we need to stomp over it with the tag.
1560   loadPtr(Address(SecondScratchReg, 0), ScratchRegister);
1561   ma_dsrl(SecondScratchReg, ScratchRegister, Imm32(JSVAL_TAG_SHIFT));
1562   asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
1563 
1564   computeScaledAddress(addr, SecondScratchReg);
1565   loadPtr(Address(SecondScratchReg, 0), SecondScratchReg);
1566   convertInt32ToDouble(SecondScratchReg, dest);
1567   ma_b(&end, ShortJump);
1568 
1569   // Not an int, just load as double.
1570   bind(&notInt32);
1571   // First, recompute the offset that had been stored in the scratch register
1572   // since the scratch register was overwritten loading in the type.
1573   computeScaledAddress(addr, SecondScratchReg);
1574   unboxDouble(Address(SecondScratchReg, 0), dest);
1575   bind(&end);
1576 }
1577 
loadConstantDouble(double dp,FloatRegister dest)1578 void MacroAssemblerMIPS64Compat::loadConstantDouble(double dp,
1579                                                     FloatRegister dest) {
1580   ma_lid(dest, dp);
1581 }
1582 
extractObject(const Address & address,Register scratch)1583 Register MacroAssemblerMIPS64Compat::extractObject(const Address& address,
1584                                                    Register scratch) {
1585   loadPtr(Address(address.base, address.offset), scratch);
1586   ma_dext(scratch, scratch, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
1587   return scratch;
1588 }
1589 
extractTag(const Address & address,Register scratch)1590 Register MacroAssemblerMIPS64Compat::extractTag(const Address& address,
1591                                                 Register scratch) {
1592   loadPtr(Address(address.base, address.offset), scratch);
1593   ma_dext(scratch, scratch, Imm32(JSVAL_TAG_SHIFT),
1594           Imm32(64 - JSVAL_TAG_SHIFT));
1595   return scratch;
1596 }
1597 
extractTag(const BaseIndex & address,Register scratch)1598 Register MacroAssemblerMIPS64Compat::extractTag(const BaseIndex& address,
1599                                                 Register scratch) {
1600   computeScaledAddress(address, scratch);
1601   return extractTag(Address(scratch, address.offset), scratch);
1602 }
1603 
1604 /////////////////////////////////////////////////////////////////
1605 // X86/X64-common/ARM/MIPS interface.
1606 /////////////////////////////////////////////////////////////////
storeValue(ValueOperand val,Operand dst)1607 void MacroAssemblerMIPS64Compat::storeValue(ValueOperand val, Operand dst) {
1608   storeValue(val, Address(Register::FromCode(dst.base()), dst.disp()));
1609 }
1610 
storeValue(ValueOperand val,const BaseIndex & dest)1611 void MacroAssemblerMIPS64Compat::storeValue(ValueOperand val,
1612                                             const BaseIndex& dest) {
1613   computeScaledAddress(dest, SecondScratchReg);
1614   storeValue(val, Address(SecondScratchReg, dest.offset));
1615 }
1616 
storeValue(JSValueType type,Register reg,BaseIndex dest)1617 void MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg,
1618                                             BaseIndex dest) {
1619   computeScaledAddress(dest, ScratchRegister);
1620 
1621   int32_t offset = dest.offset;
1622   if (!Imm16::IsInSignedRange(offset)) {
1623     ma_li(SecondScratchReg, Imm32(offset));
1624     as_daddu(ScratchRegister, ScratchRegister, SecondScratchReg);
1625     offset = 0;
1626   }
1627 
1628   storeValue(type, reg, Address(ScratchRegister, offset));
1629 }
1630 
storeValue(ValueOperand val,const Address & dest)1631 void MacroAssemblerMIPS64Compat::storeValue(ValueOperand val,
1632                                             const Address& dest) {
1633   storePtr(val.valueReg(), Address(dest.base, dest.offset));
1634 }
1635 
storeValue(JSValueType type,Register reg,Address dest)1636 void MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg,
1637                                             Address dest) {
1638   MOZ_ASSERT(dest.base != SecondScratchReg);
1639 
1640   if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
1641     store32(reg, dest);
1642     JSValueShiftedTag tag = (JSValueShiftedTag)JSVAL_TYPE_TO_SHIFTED_TAG(type);
1643     store32(((Imm64(tag)).secondHalf()), Address(dest.base, dest.offset + 4));
1644   } else {
1645     ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
1646     ma_dsll(SecondScratchReg, SecondScratchReg, Imm32(JSVAL_TAG_SHIFT));
1647     ma_dins(SecondScratchReg, reg, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
1648     storePtr(SecondScratchReg, Address(dest.base, dest.offset));
1649   }
1650 }
1651 
storeValue(const Value & val,Address dest)1652 void MacroAssemblerMIPS64Compat::storeValue(const Value& val, Address dest) {
1653   if (val.isGCThing()) {
1654     writeDataRelocation(val);
1655     movWithPatch(ImmWord(val.asRawBits()), SecondScratchReg);
1656   } else {
1657     ma_li(SecondScratchReg, ImmWord(val.asRawBits()));
1658   }
1659   storePtr(SecondScratchReg, Address(dest.base, dest.offset));
1660 }
1661 
storeValue(const Value & val,BaseIndex dest)1662 void MacroAssemblerMIPS64Compat::storeValue(const Value& val, BaseIndex dest) {
1663   computeScaledAddress(dest, ScratchRegister);
1664 
1665   int32_t offset = dest.offset;
1666   if (!Imm16::IsInSignedRange(offset)) {
1667     ma_li(SecondScratchReg, Imm32(offset));
1668     as_daddu(ScratchRegister, ScratchRegister, SecondScratchReg);
1669     offset = 0;
1670   }
1671   storeValue(val, Address(ScratchRegister, offset));
1672 }
1673 
loadValue(const BaseIndex & addr,ValueOperand val)1674 void MacroAssemblerMIPS64Compat::loadValue(const BaseIndex& addr,
1675                                            ValueOperand val) {
1676   computeScaledAddress(addr, SecondScratchReg);
1677   loadValue(Address(SecondScratchReg, addr.offset), val);
1678 }
1679 
loadValue(Address src,ValueOperand val)1680 void MacroAssemblerMIPS64Compat::loadValue(Address src, ValueOperand val) {
1681   loadPtr(Address(src.base, src.offset), val.valueReg());
1682 }
1683 
tagValue(JSValueType type,Register payload,ValueOperand dest)1684 void MacroAssemblerMIPS64Compat::tagValue(JSValueType type, Register payload,
1685                                           ValueOperand dest) {
1686   MOZ_ASSERT(dest.valueReg() != ScratchRegister);
1687   if (payload != dest.valueReg()) {
1688     ma_move(dest.valueReg(), payload);
1689   }
1690   ma_li(ScratchRegister, ImmTag(JSVAL_TYPE_TO_TAG(type)));
1691   ma_dins(dest.valueReg(), ScratchRegister, Imm32(JSVAL_TAG_SHIFT),
1692           Imm32(64 - JSVAL_TAG_SHIFT));
1693   if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
1694     ma_dins(dest.valueReg(), zero, Imm32(32), Imm32(JSVAL_TAG_SHIFT - 32));
1695   }
1696 }
1697 
pushValue(ValueOperand val)1698 void MacroAssemblerMIPS64Compat::pushValue(ValueOperand val) {
1699   // Allocate stack slots for Value. One for each.
1700   asMasm().subPtr(Imm32(sizeof(Value)), StackPointer);
1701   // Store Value
1702   storeValue(val, Address(StackPointer, 0));
1703 }
1704 
pushValue(const Address & addr)1705 void MacroAssemblerMIPS64Compat::pushValue(const Address& addr) {
1706   // Load value before allocate stack, addr.base may be is sp.
1707   loadPtr(Address(addr.base, addr.offset), ScratchRegister);
1708   ma_dsubu(StackPointer, StackPointer, Imm32(sizeof(Value)));
1709   storePtr(ScratchRegister, Address(StackPointer, 0));
1710 }
1711 
popValue(ValueOperand val)1712 void MacroAssemblerMIPS64Compat::popValue(ValueOperand val) {
1713   as_ld(val.valueReg(), StackPointer, 0);
1714   as_daddiu(StackPointer, StackPointer, sizeof(Value));
1715 }
1716 
breakpoint()1717 void MacroAssemblerMIPS64Compat::breakpoint() { as_break(0); }
1718 
ensureDouble(const ValueOperand & source,FloatRegister dest,Label * failure)1719 void MacroAssemblerMIPS64Compat::ensureDouble(const ValueOperand& source,
1720                                               FloatRegister dest,
1721                                               Label* failure) {
1722   Label isDouble, done;
1723   {
1724     ScratchTagScope tag(asMasm(), source);
1725     splitTagForTest(source, tag);
1726     asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
1727     asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
1728   }
1729 
1730   unboxInt32(source, ScratchRegister);
1731   convertInt32ToDouble(ScratchRegister, dest);
1732   jump(&done);
1733 
1734   bind(&isDouble);
1735   unboxDouble(source, dest);
1736 
1737   bind(&done);
1738 }
1739 
checkStackAlignment()1740 void MacroAssemblerMIPS64Compat::checkStackAlignment() {
1741 #ifdef DEBUG
1742   Label aligned;
1743   as_andi(ScratchRegister, sp, ABIStackAlignment - 1);
1744   ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
1745   as_break(BREAK_STACK_UNALIGNED);
1746   bind(&aligned);
1747 #endif
1748 }
1749 
handleFailureWithHandlerTail(Label * profilerExitTail)1750 void MacroAssemblerMIPS64Compat::handleFailureWithHandlerTail(
1751     Label* profilerExitTail) {
1752   // Reserve space for exception information.
1753   int size = (sizeof(ResumeFromException) + ABIStackAlignment) &
1754              ~(ABIStackAlignment - 1);
1755   asMasm().subPtr(Imm32(size), StackPointer);
1756   ma_move(a0, StackPointer);  // Use a0 since it is a first function argument
1757 
1758   // Call the handler.
1759   using Fn = void (*)(ResumeFromException * rfe);
1760   asMasm().setupUnalignedABICall(a1);
1761   asMasm().passABIArg(a0);
1762   asMasm().callWithABI<Fn, HandleException>(
1763       MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
1764 
1765   Label entryFrame;
1766   Label catch_;
1767   Label finally;
1768   Label return_;
1769   Label bailout;
1770   Label wasm;
1771   Label wasmCatch;
1772 
1773   // Already clobbered a0, so use it...
1774   load32(Address(StackPointer, offsetof(ResumeFromException, kind)), a0);
1775   asMasm().branch32(Assembler::Equal, a0,
1776                     Imm32(ResumeFromException::RESUME_ENTRY_FRAME),
1777                     &entryFrame);
1778   asMasm().branch32(Assembler::Equal, a0,
1779                     Imm32(ResumeFromException::RESUME_CATCH), &catch_);
1780   asMasm().branch32(Assembler::Equal, a0,
1781                     Imm32(ResumeFromException::RESUME_FINALLY), &finally);
1782   asMasm().branch32(Assembler::Equal, a0,
1783                     Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_);
1784   asMasm().branch32(Assembler::Equal, a0,
1785                     Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
1786   asMasm().branch32(Assembler::Equal, a0,
1787                     Imm32(ResumeFromException::RESUME_WASM), &wasm);
1788   asMasm().branch32(Assembler::Equal, a0,
1789                     Imm32(ResumeFromException::RESUME_WASM_CATCH), &wasmCatch);
1790 
1791   breakpoint();  // Invalid kind.
1792 
1793   // No exception handler. Load the error value, load the new stack pointer
1794   // and return from the entry frame.
1795   bind(&entryFrame);
1796   asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
1797   loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
1798           StackPointer);
1799 
1800   // We're going to be returning by the ion calling convention
1801   ma_pop(ra);
1802   as_jr(ra);
1803   as_nop();
1804 
1805   // If we found a catch handler, this must be a baseline frame. Restore
1806   // state and jump to the catch block.
1807   bind(&catch_);
1808   loadPtr(Address(StackPointer, offsetof(ResumeFromException, target)), a0);
1809   loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
1810           BaselineFrameReg);
1811   loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
1812           StackPointer);
1813   jump(a0);
1814 
1815   // If we found a finally block, this must be a baseline frame. Push
1816   // two values expected by JSOp::Retsub: BooleanValue(true) and the
1817   // exception.
1818   bind(&finally);
1819   ValueOperand exception = ValueOperand(a1);
1820   loadValue(Address(sp, offsetof(ResumeFromException, exception)), exception);
1821 
1822   loadPtr(Address(sp, offsetof(ResumeFromException, target)), a0);
1823   loadPtr(Address(sp, offsetof(ResumeFromException, framePointer)),
1824           BaselineFrameReg);
1825   loadPtr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp);
1826 
1827   pushValue(BooleanValue(true));
1828   pushValue(exception);
1829   jump(a0);
1830 
1831   // Only used in debug mode. Return BaselineFrame->returnValue() to the
1832   // caller.
1833   bind(&return_);
1834   loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
1835           BaselineFrameReg);
1836   loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
1837           StackPointer);
1838   loadValue(
1839       Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
1840       JSReturnOperand);
1841   ma_move(StackPointer, BaselineFrameReg);
1842   pop(BaselineFrameReg);
1843 
1844   // If profiling is enabled, then update the lastProfilingFrame to refer to
1845   // caller frame before returning.
1846   {
1847     Label skipProfilingInstrumentation;
1848     // Test if profiler enabled.
1849     AbsoluteAddress addressOfEnabled(
1850         GetJitContext()->runtime->geckoProfiler().addressOfEnabled());
1851     asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
1852                       &skipProfilingInstrumentation);
1853     jump(profilerExitTail);
1854     bind(&skipProfilingInstrumentation);
1855   }
1856 
1857   ret();
1858 
1859   // If we are bailing out to baseline to handle an exception, jump to
1860   // the bailout tail stub. Load 1 (true) in ReturnReg to indicate success.
1861   bind(&bailout);
1862   loadPtr(Address(sp, offsetof(ResumeFromException, bailoutInfo)), a2);
1863   ma_li(ReturnReg, Imm32(1));
1864   loadPtr(Address(sp, offsetof(ResumeFromException, target)), a1);
1865   jump(a1);
1866 
1867   // If we are throwing and the innermost frame was a wasm frame, reset SP and
1868   // FP; SP is pointing to the unwound return address to the wasm entry, so
1869   // we can just ret().
1870   bind(&wasm);
1871   loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
1872           FramePointer);
1873   loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
1874           StackPointer);
1875   ret();
1876 
1877   // Found a wasm catch handler, restore state and jump to it.
1878   bind(&wasmCatch);
1879   loadPtr(Address(sp, offsetof(ResumeFromException, target)), a1);
1880   loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
1881           FramePointer);
1882   loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
1883           StackPointer);
1884   jump(a1);
1885 }
1886 
toggledJump(Label * label)1887 CodeOffset MacroAssemblerMIPS64Compat::toggledJump(Label* label) {
1888   CodeOffset ret(nextOffset().getOffset());
1889   ma_b(label);
1890   return ret;
1891 }
1892 
toggledCall(JitCode * target,bool enabled)1893 CodeOffset MacroAssemblerMIPS64Compat::toggledCall(JitCode* target,
1894                                                    bool enabled) {
1895   BufferOffset bo = nextOffset();
1896   CodeOffset offset(bo.getOffset());
1897   addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
1898   ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
1899   if (enabled) {
1900     as_jalr(ScratchRegister);
1901     as_nop();
1902   } else {
1903     as_nop();
1904     as_nop();
1905   }
1906   MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() ==
1907                             ToggledCallSize(nullptr));
1908   return offset;
1909 }
1910 
profilerEnterFrame(Register framePtr,Register scratch)1911 void MacroAssemblerMIPS64Compat::profilerEnterFrame(Register framePtr,
1912                                                     Register scratch) {
1913   asMasm().loadJSContext(scratch);
1914   loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
1915   storePtr(framePtr,
1916            Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
1917   storePtr(ImmPtr(nullptr),
1918            Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
1919 }
1920 
profilerExitFrame()1921 void MacroAssemblerMIPS64Compat::profilerExitFrame() {
1922   jump(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
1923 }
1924 
subFromStackPtr(Imm32 imm32)1925 void MacroAssembler::subFromStackPtr(Imm32 imm32) {
1926   if (imm32.value) {
1927     asMasm().subPtr(imm32, StackPointer);
1928   }
1929 }
1930 
1931 //{{{ check_macroassembler_style
1932 // ===============================================================
1933 // Stack manipulation functions.
1934 
PushRegsInMaskSizeInBytes(LiveRegisterSet set)1935 size_t MacroAssembler::PushRegsInMaskSizeInBytes(LiveRegisterSet set) {
1936   return set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
1937 }
1938 
PushRegsInMask(LiveRegisterSet set)1939 void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
1940   int32_t diff =
1941       set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
1942   const int32_t reserved = diff;
1943 
1944   reserveStack(reserved);
1945   for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
1946     diff -= sizeof(intptr_t);
1947     storePtr(*iter, Address(StackPointer, diff));
1948   }
1949 
1950 #ifdef ENABLE_WASM_SIMD
1951 #  error "Needs more careful logic if SIMD is enabled"
1952 #endif
1953 
1954   for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
1955        iter.more(); ++iter) {
1956     diff -= sizeof(double);
1957     storeDouble(*iter, Address(StackPointer, diff));
1958   }
1959   MOZ_ASSERT(diff == 0);
1960 }
1961 
PopRegsInMaskIgnore(LiveRegisterSet set,LiveRegisterSet ignore)1962 void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
1963                                          LiveRegisterSet ignore) {
1964   int32_t diff =
1965       set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
1966   const int32_t reserved = diff;
1967 
1968   for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
1969     diff -= sizeof(intptr_t);
1970     if (!ignore.has(*iter)) {
1971       loadPtr(Address(StackPointer, diff), *iter);
1972     }
1973   }
1974 
1975 #ifdef ENABLE_WASM_SIMD
1976 #  error "Needs more careful logic if SIMD is enabled"
1977 #endif
1978 
1979   for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
1980        iter.more(); ++iter) {
1981     diff -= sizeof(double);
1982     if (!ignore.has(*iter)) {
1983       loadDouble(Address(StackPointer, diff), *iter);
1984     }
1985   }
1986   MOZ_ASSERT(diff == 0);
1987   freeStack(reserved);
1988 }
1989 
storeRegsInMask(LiveRegisterSet set,Address dest,Register)1990 void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
1991                                      Register) {
1992   FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
1993   unsigned numFpu = fpuSet.size();
1994   int32_t diffF = fpuSet.getPushSizeInBytes();
1995   int32_t diffG = set.gprs().size() * sizeof(intptr_t);
1996 
1997   MOZ_ASSERT(dest.offset >= diffG + diffF);
1998 
1999   for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
2000     diffG -= sizeof(intptr_t);
2001     dest.offset -= sizeof(intptr_t);
2002     storePtr(*iter, dest);
2003   }
2004   MOZ_ASSERT(diffG == 0);
2005 
2006 #ifdef ENABLE_WASM_SIMD
2007 #  error "Needs more careful logic if SIMD is enabled"
2008 #endif
2009 
2010   for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
2011     FloatRegister reg = *iter;
2012     diffF -= reg.size();
2013     numFpu -= 1;
2014     dest.offset -= reg.size();
2015     if (reg.isDouble()) {
2016       storeDouble(reg, dest);
2017     } else if (reg.isSingle()) {
2018       storeFloat32(reg, dest);
2019     } else {
2020       MOZ_CRASH("Unknown register type.");
2021     }
2022   }
2023   MOZ_ASSERT(numFpu == 0);
2024   diffF -= diffF % sizeof(uintptr_t);
2025   MOZ_ASSERT(diffF == 0);
2026 }
2027 // ===============================================================
2028 // ABI function calls.
2029 
setupUnalignedABICall(Register scratch)2030 void MacroAssembler::setupUnalignedABICall(Register scratch) {
2031   MOZ_ASSERT(!IsCompilingWasm(), "wasm should only use aligned ABI calls");
2032   setupNativeABICall();
2033   dynamicAlignment_ = true;
2034 
2035   ma_move(scratch, StackPointer);
2036 
2037   // Force sp to be aligned
2038   asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
2039   ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
2040   storePtr(scratch, Address(StackPointer, 0));
2041 }
2042 
callWithABIPre(uint32_t * stackAdjust,bool callFromWasm)2043 void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
2044   MOZ_ASSERT(inCall_);
2045   uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
2046 
2047   // Reserve place for $ra.
2048   stackForCall += sizeof(intptr_t);
2049 
2050   if (dynamicAlignment_) {
2051     stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
2052   } else {
2053     uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
2054     stackForCall += ComputeByteAlignment(
2055         stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
2056   }
2057 
2058   *stackAdjust = stackForCall;
2059   reserveStack(stackForCall);
2060 
2061   // Save $ra because call is going to clobber it. Restore it in
2062   // callWithABIPost. NOTE: This is needed for calls from SharedIC.
2063   // Maybe we can do this differently.
2064   storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
2065 
2066   // Position all arguments.
2067   {
2068     enoughMemory_ &= moveResolver_.resolve();
2069     if (!enoughMemory_) {
2070       return;
2071     }
2072 
2073     MoveEmitter emitter(*this);
2074     emitter.emit(moveResolver_);
2075     emitter.finish();
2076   }
2077 
2078   assertStackAlignment(ABIStackAlignment);
2079 }
2080 
callWithABIPost(uint32_t stackAdjust,MoveOp::Type result,bool callFromWasm)2081 void MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
2082                                      bool callFromWasm) {
2083   // Restore ra value (as stored in callWithABIPre()).
2084   loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
2085 
2086   if (dynamicAlignment_) {
2087     // Restore sp value from stack (as stored in setupUnalignedABICall()).
2088     loadPtr(Address(StackPointer, stackAdjust), StackPointer);
2089     // Use adjustFrame instead of freeStack because we already restored sp.
2090     adjustFrame(-stackAdjust);
2091   } else {
2092     freeStack(stackAdjust);
2093   }
2094 
2095 #ifdef DEBUG
2096   MOZ_ASSERT(inCall_);
2097   inCall_ = false;
2098 #endif
2099 }
2100 
callWithABINoProfiler(Register fun,MoveOp::Type result)2101 void MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) {
2102   // Load the callee in t9, no instruction between the lw and call
2103   // should clobber it. Note that we can't use fun.base because it may
2104   // be one of the IntArg registers clobbered before the call.
2105   ma_move(t9, fun);
2106   uint32_t stackAdjust;
2107   callWithABIPre(&stackAdjust);
2108   call(t9);
2109   callWithABIPost(stackAdjust, result);
2110 }
2111 
callWithABINoProfiler(const Address & fun,MoveOp::Type result)2112 void MacroAssembler::callWithABINoProfiler(const Address& fun,
2113                                            MoveOp::Type result) {
2114   // Load the callee in t9, as above.
2115   loadPtr(Address(fun.base, fun.offset), t9);
2116   uint32_t stackAdjust;
2117   callWithABIPre(&stackAdjust);
2118   call(t9);
2119   callWithABIPost(stackAdjust, result);
2120 }
2121 
2122 // ===============================================================
2123 // Move
2124 
moveValue(const TypedOrValueRegister & src,const ValueOperand & dest)2125 void MacroAssembler::moveValue(const TypedOrValueRegister& src,
2126                                const ValueOperand& dest) {
2127   if (src.hasValue()) {
2128     moveValue(src.valueReg(), dest);
2129     return;
2130   }
2131 
2132   MIRType type = src.type();
2133   AnyRegister reg = src.typedReg();
2134 
2135   if (!IsFloatingPointType(type)) {
2136     boxNonDouble(ValueTypeFromMIRType(type), reg.gpr(), dest);
2137     return;
2138   }
2139 
2140   FloatRegister scratch = ScratchDoubleReg;
2141   FloatRegister freg = reg.fpu();
2142   if (type == MIRType::Float32) {
2143     convertFloat32ToDouble(freg, scratch);
2144     freg = scratch;
2145   }
2146   boxDouble(freg, dest, scratch);
2147 }
2148 
moveValue(const ValueOperand & src,const ValueOperand & dest)2149 void MacroAssembler::moveValue(const ValueOperand& src,
2150                                const ValueOperand& dest) {
2151   if (src == dest) {
2152     return;
2153   }
2154   movePtr(src.valueReg(), dest.valueReg());
2155 }
2156 
moveValue(const Value & src,const ValueOperand & dest)2157 void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
2158   if (!src.isGCThing()) {
2159     ma_li(dest.valueReg(), ImmWord(src.asRawBits()));
2160     return;
2161   }
2162 
2163   writeDataRelocation(src);
2164   movWithPatch(ImmWord(src.asRawBits()), dest.valueReg());
2165 }
2166 
2167 // ===============================================================
2168 // Branch functions
2169 
branchValueIsNurseryCell(Condition cond,const Address & address,Register temp,Label * label)2170 void MacroAssembler::branchValueIsNurseryCell(Condition cond,
2171                                               const Address& address,
2172                                               Register temp, Label* label) {
2173   branchValueIsNurseryCellImpl(cond, address, temp, label);
2174 }
2175 
branchValueIsNurseryCell(Condition cond,ValueOperand value,Register temp,Label * label)2176 void MacroAssembler::branchValueIsNurseryCell(Condition cond,
2177                                               ValueOperand value, Register temp,
2178                                               Label* label) {
2179   branchValueIsNurseryCellImpl(cond, value, temp, label);
2180 }
2181 
2182 template <typename T>
branchValueIsNurseryCellImpl(Condition cond,const T & value,Register temp,Label * label)2183 void MacroAssembler::branchValueIsNurseryCellImpl(Condition cond,
2184                                                   const T& value, Register temp,
2185                                                   Label* label) {
2186   MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
2187   Label done;
2188   branchTestGCThing(Assembler::NotEqual, value,
2189                     cond == Assembler::Equal ? &done : label);
2190 
2191   // temp may be InvalidReg, use scratch2 instead.
2192   SecondScratchRegisterScope scratch2(*this);
2193 
2194   unboxGCThingForGCBarrier(value, scratch2);
2195   orPtr(Imm32(gc::ChunkMask), scratch2);
2196   loadPtr(Address(scratch2, gc::ChunkStoreBufferOffsetFromLastByte), scratch2);
2197   branchPtr(InvertCondition(cond), scratch2, ImmWord(0), label);
2198 
2199   bind(&done);
2200 }
2201 
branchTestValue(Condition cond,const ValueOperand & lhs,const Value & rhs,Label * label)2202 void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
2203                                      const Value& rhs, Label* label) {
2204   MOZ_ASSERT(cond == Equal || cond == NotEqual);
2205   ScratchRegisterScope scratch(*this);
2206   MOZ_ASSERT(lhs.valueReg() != scratch);
2207   moveValue(rhs, ValueOperand(scratch));
2208   ma_b(lhs.valueReg(), scratch, label, cond);
2209 }
2210 
2211 // ========================================================================
2212 // Memory access primitives.
2213 template <typename T>
storeUnboxedValue(const ConstantOrRegister & value,MIRType valueType,const T & dest,MIRType slotType)2214 void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
2215                                        MIRType valueType, const T& dest,
2216                                        MIRType slotType) {
2217   if (valueType == MIRType::Double) {
2218     boxDouble(value.reg().typedReg().fpu(), dest);
2219     return;
2220   }
2221 
2222   // For known integers and booleans, we can just store the unboxed value if
2223   // the slot has the same type.
2224   if ((valueType == MIRType::Int32 || valueType == MIRType::Boolean) &&
2225       slotType == valueType) {
2226     if (value.constant()) {
2227       Value val = value.value();
2228       if (valueType == MIRType::Int32) {
2229         store32(Imm32(val.toInt32()), dest);
2230       } else {
2231         store32(Imm32(val.toBoolean() ? 1 : 0), dest);
2232       }
2233     } else {
2234       store32(value.reg().typedReg().gpr(), dest);
2235     }
2236     return;
2237   }
2238 
2239   if (value.constant()) {
2240     storeValue(value.value(), dest);
2241   } else {
2242     storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(),
2243                dest);
2244   }
2245 }
2246 
2247 template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
2248                                                 MIRType valueType,
2249                                                 const Address& dest,
2250                                                 MIRType slotType);
2251 template void MacroAssembler::storeUnboxedValue(
2252     const ConstantOrRegister& value, MIRType valueType,
2253     const BaseObjectElementIndex& dest, MIRType slotType);
2254 
PushBoxed(FloatRegister reg)2255 void MacroAssembler::PushBoxed(FloatRegister reg) {
2256   subFromStackPtr(Imm32(sizeof(double)));
2257   boxDouble(reg, Address(getStackPointer(), 0));
2258   adjustFrame(sizeof(double));
2259 }
2260 
wasmBoundsCheck32(Condition cond,Register index,Register boundsCheckLimit,Label * label)2261 void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
2262                                        Register boundsCheckLimit,
2263                                        Label* label) {
2264   ma_b(index, boundsCheckLimit, label, cond);
2265 }
2266 
wasmBoundsCheck32(Condition cond,Register index,Address boundsCheckLimit,Label * label)2267 void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
2268                                        Address boundsCheckLimit, Label* label) {
2269   SecondScratchRegisterScope scratch2(*this);
2270   load32(boundsCheckLimit, SecondScratchReg);
2271   ma_b(index, SecondScratchReg, label, cond);
2272 }
2273 
wasmBoundsCheck64(Condition cond,Register64 index,Register64 boundsCheckLimit,Label * label)2274 void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
2275                                        Register64 boundsCheckLimit,
2276                                        Label* label) {
2277   MOZ_CRASH("IMPLEMENTME");
2278 }
2279 
wasmBoundsCheck64(Condition cond,Register64 index,Address boundsCheckLimit,Label * label)2280 void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
2281                                        Address boundsCheckLimit, Label* label) {
2282   MOZ_CRASH("IMPLEMENTME");
2283 }
2284 
wasmTruncateDoubleToUInt32(FloatRegister input,Register output,bool isSaturating,Label * oolEntry)2285 void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
2286                                                 Register output,
2287                                                 bool isSaturating,
2288                                                 Label* oolEntry) {
2289   as_truncld(ScratchDoubleReg, input);
2290   moveFromDouble(ScratchDoubleReg, output);
2291   ma_dsrl(ScratchRegister, output, Imm32(32));
2292   as_sll(output, output, 0);
2293   ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
2294 }
2295 
wasmTruncateFloat32ToUInt32(FloatRegister input,Register output,bool isSaturating,Label * oolEntry)2296 void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
2297                                                  Register output,
2298                                                  bool isSaturating,
2299                                                  Label* oolEntry) {
2300   as_truncls(ScratchDoubleReg, input);
2301   moveFromDouble(ScratchDoubleReg, output);
2302   ma_dsrl(ScratchRegister, output, Imm32(32));
2303   as_sll(output, output, 0);
2304   ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
2305 }
2306 
wasmLoadI64(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,Register64 output)2307 void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
2308                                  Register memoryBase, Register ptr,
2309                                  Register ptrScratch, Register64 output) {
2310   wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
2311 }
2312 
wasmUnalignedLoadI64(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,Register64 output,Register tmp)2313 void MacroAssembler::wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access,
2314                                           Register memoryBase, Register ptr,
2315                                           Register ptrScratch,
2316                                           Register64 output, Register tmp) {
2317   wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, tmp);
2318 }
2319 
wasmStoreI64(const wasm::MemoryAccessDesc & access,Register64 value,Register memoryBase,Register ptr,Register ptrScratch)2320 void MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access,
2321                                   Register64 value, Register memoryBase,
2322                                   Register ptr, Register ptrScratch) {
2323   wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
2324 }
2325 
wasmUnalignedStoreI64(const wasm::MemoryAccessDesc & access,Register64 value,Register memoryBase,Register ptr,Register ptrScratch,Register tmp)2326 void MacroAssembler::wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access,
2327                                            Register64 value,
2328                                            Register memoryBase, Register ptr,
2329                                            Register ptrScratch, Register tmp) {
2330   wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, tmp);
2331 }
2332 
wasmTruncateDoubleToInt64(FloatRegister input,Register64 output,bool isSaturating,Label * oolEntry,Label * oolRejoin,FloatRegister tempDouble)2333 void MacroAssembler::wasmTruncateDoubleToInt64(
2334     FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
2335     Label* oolRejoin, FloatRegister tempDouble) {
2336   MOZ_ASSERT(tempDouble.isInvalid());
2337 
2338   as_truncld(ScratchDoubleReg, input);
2339   as_cfc1(ScratchRegister, Assembler::FCSR);
2340   moveFromDouble(ScratchDoubleReg, output.reg);
2341   ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
2342   ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
2343 
2344   if (isSaturating) {
2345     bind(oolRejoin);
2346   }
2347 }
2348 
wasmTruncateDoubleToUInt64(FloatRegister input,Register64 output_,bool isSaturating,Label * oolEntry,Label * oolRejoin,FloatRegister tempDouble)2349 void MacroAssembler::wasmTruncateDoubleToUInt64(
2350     FloatRegister input, Register64 output_, bool isSaturating, Label* oolEntry,
2351     Label* oolRejoin, FloatRegister tempDouble) {
2352   MOZ_ASSERT(tempDouble.isInvalid());
2353   Register output = output_.reg;
2354 
2355   Label done;
2356 
2357   as_truncld(ScratchDoubleReg, input);
2358   // ma_li INT64_MAX
2359   ma_li(SecondScratchReg, Imm32(-1));
2360   ma_dext(SecondScratchReg, SecondScratchReg, Imm32(0), Imm32(63));
2361   moveFromDouble(ScratchDoubleReg, output);
2362   // For numbers in  -1.[ : ]INT64_MAX range do nothing more
2363   ma_b(output, SecondScratchReg, &done, Assembler::Below, ShortJump);
2364 
2365   loadConstantDouble(double(INT64_MAX + 1ULL), ScratchDoubleReg);
2366   // ma_li INT64_MIN
2367   ma_daddu(SecondScratchReg, Imm32(1));
2368   as_subd(ScratchDoubleReg, input, ScratchDoubleReg);
2369   as_truncld(ScratchDoubleReg, ScratchDoubleReg);
2370   as_cfc1(ScratchRegister, Assembler::FCSR);
2371   moveFromDouble(ScratchDoubleReg, output);
2372   ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
2373   ma_daddu(output, SecondScratchReg);
2374 
2375   // Guard against negative values that result in 0 due the precision loss.
2376   as_sltiu(SecondScratchReg, output, 1);
2377   ma_or(ScratchRegister, SecondScratchReg);
2378 
2379   ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
2380 
2381   bind(&done);
2382 
2383   if (isSaturating) {
2384     bind(oolRejoin);
2385   }
2386 }
2387 
wasmTruncateFloat32ToInt64(FloatRegister input,Register64 output,bool isSaturating,Label * oolEntry,Label * oolRejoin,FloatRegister tempFloat)2388 void MacroAssembler::wasmTruncateFloat32ToInt64(
2389     FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
2390     Label* oolRejoin, FloatRegister tempFloat) {
2391   MOZ_ASSERT(tempFloat.isInvalid());
2392 
2393   as_truncls(ScratchDoubleReg, input);
2394   as_cfc1(ScratchRegister, Assembler::FCSR);
2395   moveFromDouble(ScratchDoubleReg, output.reg);
2396   ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
2397   ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
2398 
2399   if (isSaturating) {
2400     bind(oolRejoin);
2401   }
2402 }
2403 
wasmTruncateFloat32ToUInt64(FloatRegister input,Register64 output_,bool isSaturating,Label * oolEntry,Label * oolRejoin,FloatRegister tempFloat)2404 void MacroAssembler::wasmTruncateFloat32ToUInt64(
2405     FloatRegister input, Register64 output_, bool isSaturating, Label* oolEntry,
2406     Label* oolRejoin, FloatRegister tempFloat) {
2407   MOZ_ASSERT(tempFloat.isInvalid());
2408   Register output = output_.reg;
2409 
2410   Label done;
2411 
2412   as_truncls(ScratchDoubleReg, input);
2413   // ma_li INT64_MAX
2414   ma_li(SecondScratchReg, Imm32(-1));
2415   ma_dext(SecondScratchReg, SecondScratchReg, Imm32(0), Imm32(63));
2416   moveFromDouble(ScratchDoubleReg, output);
2417   // For numbers in  -1.[ : ]INT64_MAX range do nothing more
2418   ma_b(output, SecondScratchReg, &done, Assembler::Below, ShortJump);
2419 
2420   loadConstantFloat32(float(INT64_MAX + 1ULL), ScratchFloat32Reg);
2421   // ma_li INT64_MIN
2422   ma_daddu(SecondScratchReg, Imm32(1));
2423   as_subs(ScratchFloat32Reg, input, ScratchFloat32Reg);
2424   as_truncls(ScratchDoubleReg, ScratchFloat32Reg);
2425   as_cfc1(ScratchRegister, Assembler::FCSR);
2426   moveFromDouble(ScratchDoubleReg, output);
2427   ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
2428   ma_daddu(output, SecondScratchReg);
2429 
2430   // Guard against negative values that result in 0 due the precision loss.
2431   as_sltiu(SecondScratchReg, output, 1);
2432   ma_or(ScratchRegister, SecondScratchReg);
2433 
2434   ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
2435 
2436   bind(&done);
2437 
2438   if (isSaturating) {
2439     bind(oolRejoin);
2440   }
2441 }
2442 
wasmLoadI64Impl(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,Register64 output,Register tmp)2443 void MacroAssemblerMIPS64Compat::wasmLoadI64Impl(
2444     const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
2445     Register ptrScratch, Register64 output, Register tmp) {
2446   uint32_t offset = access.offset();
2447   MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
2448   MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
2449 
2450   MOZ_ASSERT(!access.isZeroExtendSimd128Load());
2451   MOZ_ASSERT(!access.isSplatSimd128Load());
2452   MOZ_ASSERT(!access.isWidenSimd128Load());
2453 
2454   // Maybe add the offset.
2455   if (offset) {
2456     asMasm().addPtr(Imm32(offset), ptrScratch);
2457     ptr = ptrScratch;
2458   }
2459 
2460   unsigned byteSize = access.byteSize();
2461   bool isSigned;
2462 
2463   switch (access.type()) {
2464     case Scalar::Int8:
2465       isSigned = true;
2466       break;
2467     case Scalar::Uint8:
2468       isSigned = false;
2469       break;
2470     case Scalar::Int16:
2471       isSigned = true;
2472       break;
2473     case Scalar::Uint16:
2474       isSigned = false;
2475       break;
2476     case Scalar::Int32:
2477       isSigned = true;
2478       break;
2479     case Scalar::Uint32:
2480       isSigned = false;
2481       break;
2482     case Scalar::Int64:
2483       isSigned = true;
2484       break;
2485     default:
2486       MOZ_CRASH("unexpected array type");
2487   }
2488 
2489   BaseIndex address(memoryBase, ptr, TimesOne);
2490   if (IsUnaligned(access)) {
2491     MOZ_ASSERT(tmp != InvalidReg);
2492     asMasm().ma_load_unaligned(access, output.reg, address, tmp,
2493                                static_cast<LoadStoreSize>(8 * byteSize),
2494                                isSigned ? SignExtend : ZeroExtend);
2495     return;
2496   }
2497 
2498   asMasm().memoryBarrierBefore(access.sync());
2499   asMasm().ma_load(output.reg, address,
2500                    static_cast<LoadStoreSize>(8 * byteSize),
2501                    isSigned ? SignExtend : ZeroExtend);
2502   asMasm().append(access, asMasm().size() - 4);
2503   asMasm().memoryBarrierAfter(access.sync());
2504 }
2505 
wasmStoreI64Impl(const wasm::MemoryAccessDesc & access,Register64 value,Register memoryBase,Register ptr,Register ptrScratch,Register tmp)2506 void MacroAssemblerMIPS64Compat::wasmStoreI64Impl(
2507     const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
2508     Register ptr, Register ptrScratch, Register tmp) {
2509   uint32_t offset = access.offset();
2510   MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
2511   MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
2512 
2513   // Maybe add the offset.
2514   if (offset) {
2515     asMasm().addPtr(Imm32(offset), ptrScratch);
2516     ptr = ptrScratch;
2517   }
2518 
2519   unsigned byteSize = access.byteSize();
2520   bool isSigned;
2521   switch (access.type()) {
2522     case Scalar::Int8:
2523       isSigned = true;
2524       break;
2525     case Scalar::Uint8:
2526       isSigned = false;
2527       break;
2528     case Scalar::Int16:
2529       isSigned = true;
2530       break;
2531     case Scalar::Uint16:
2532       isSigned = false;
2533       break;
2534     case Scalar::Int32:
2535       isSigned = true;
2536       break;
2537     case Scalar::Uint32:
2538       isSigned = false;
2539       break;
2540     case Scalar::Int64:
2541       isSigned = true;
2542       break;
2543     default:
2544       MOZ_CRASH("unexpected array type");
2545   }
2546 
2547   BaseIndex address(memoryBase, ptr, TimesOne);
2548 
2549   if (IsUnaligned(access)) {
2550     MOZ_ASSERT(tmp != InvalidReg);
2551     asMasm().ma_store_unaligned(access, value.reg, address, tmp,
2552                                 static_cast<LoadStoreSize>(8 * byteSize),
2553                                 isSigned ? SignExtend : ZeroExtend);
2554     return;
2555   }
2556 
2557   asMasm().memoryBarrierBefore(access.sync());
2558   asMasm().ma_store(value.reg, address,
2559                     static_cast<LoadStoreSize>(8 * byteSize),
2560                     isSigned ? SignExtend : ZeroExtend);
2561   asMasm().append(access, asMasm().size() - 4);
2562   asMasm().memoryBarrierAfter(access.sync());
2563 }
2564 
2565 template <typename T>
CompareExchange64(MacroAssembler & masm,const wasm::MemoryAccessDesc * access,const Synchronization & sync,const T & mem,Register64 expect,Register64 replace,Register64 output)2566 static void CompareExchange64(MacroAssembler& masm,
2567                               const wasm::MemoryAccessDesc* access,
2568                               const Synchronization& sync, const T& mem,
2569                               Register64 expect, Register64 replace,
2570                               Register64 output) {
2571   MOZ_ASSERT(expect != output && replace != output);
2572   masm.computeEffectiveAddress(mem, SecondScratchReg);
2573 
2574   Label tryAgain;
2575   Label exit;
2576 
2577   masm.memoryBarrierBefore(sync);
2578 
2579   masm.bind(&tryAgain);
2580 
2581   if (access) {
2582     masm.append(*access, masm.size());
2583   }
2584   masm.as_lld(output.reg, SecondScratchReg, 0);
2585 
2586   masm.ma_b(output.reg, expect.reg, &exit, Assembler::NotEqual, ShortJump);
2587   masm.movePtr(replace.reg, ScratchRegister);
2588   masm.as_scd(ScratchRegister, SecondScratchReg, 0);
2589   masm.ma_b(ScratchRegister, ScratchRegister, &tryAgain, Assembler::Zero,
2590             ShortJump);
2591 
2592   masm.memoryBarrierAfter(sync);
2593 
2594   masm.bind(&exit);
2595 }
2596 
wasmCompareExchange64(const wasm::MemoryAccessDesc & access,const Address & mem,Register64 expect,Register64 replace,Register64 output)2597 void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
2598                                            const Address& mem,
2599                                            Register64 expect,
2600                                            Register64 replace,
2601                                            Register64 output) {
2602   CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
2603                     output);
2604 }
2605 
wasmCompareExchange64(const wasm::MemoryAccessDesc & access,const BaseIndex & mem,Register64 expect,Register64 replace,Register64 output)2606 void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
2607                                            const BaseIndex& mem,
2608                                            Register64 expect,
2609                                            Register64 replace,
2610                                            Register64 output) {
2611   CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
2612                     output);
2613 }
2614 
compareExchange64(const Synchronization & sync,const Address & mem,Register64 expect,Register64 replace,Register64 output)2615 void MacroAssembler::compareExchange64(const Synchronization& sync,
2616                                        const Address& mem, Register64 expect,
2617                                        Register64 replace, Register64 output) {
2618   CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
2619 }
2620 
compareExchange64(const Synchronization & sync,const BaseIndex & mem,Register64 expect,Register64 replace,Register64 output)2621 void MacroAssembler::compareExchange64(const Synchronization& sync,
2622                                        const BaseIndex& mem, Register64 expect,
2623                                        Register64 replace, Register64 output) {
2624   CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
2625 }
2626 
2627 template <typename T>
AtomicExchange64(MacroAssembler & masm,const wasm::MemoryAccessDesc * access,const Synchronization & sync,const T & mem,Register64 value,Register64 output)2628 static void AtomicExchange64(MacroAssembler& masm,
2629                              const wasm::MemoryAccessDesc* access,
2630                              const Synchronization& sync, const T& mem,
2631                              Register64 value, Register64 output) {
2632   MOZ_ASSERT(value != output);
2633   masm.computeEffectiveAddress(mem, SecondScratchReg);
2634 
2635   Label tryAgain;
2636 
2637   masm.memoryBarrierBefore(sync);
2638 
2639   masm.bind(&tryAgain);
2640 
2641   if (access) {
2642     masm.append(*access, masm.size());
2643   }
2644 
2645   masm.as_lld(output.reg, SecondScratchReg, 0);
2646   masm.movePtr(value.reg, ScratchRegister);
2647   masm.as_scd(ScratchRegister, SecondScratchReg, 0);
2648   masm.ma_b(ScratchRegister, ScratchRegister, &tryAgain, Assembler::Zero,
2649             ShortJump);
2650 
2651   masm.memoryBarrierAfter(sync);
2652 }
2653 
2654 template <typename T>
WasmAtomicExchange64(MacroAssembler & masm,const wasm::MemoryAccessDesc & access,const T & mem,Register64 value,Register64 output)2655 static void WasmAtomicExchange64(MacroAssembler& masm,
2656                                  const wasm::MemoryAccessDesc& access,
2657                                  const T& mem, Register64 value,
2658                                  Register64 output) {
2659   AtomicExchange64(masm, &access, access.sync(), mem, value, output);
2660 }
2661 
wasmAtomicExchange64(const wasm::MemoryAccessDesc & access,const Address & mem,Register64 src,Register64 output)2662 void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
2663                                           const Address& mem, Register64 src,
2664                                           Register64 output) {
2665   WasmAtomicExchange64(*this, access, mem, src, output);
2666 }
2667 
wasmAtomicExchange64(const wasm::MemoryAccessDesc & access,const BaseIndex & mem,Register64 src,Register64 output)2668 void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
2669                                           const BaseIndex& mem, Register64 src,
2670                                           Register64 output) {
2671   WasmAtomicExchange64(*this, access, mem, src, output);
2672 }
2673 
atomicExchange64(const Synchronization & sync,const Address & mem,Register64 value,Register64 output)2674 void MacroAssembler::atomicExchange64(const Synchronization& sync,
2675                                       const Address& mem, Register64 value,
2676                                       Register64 output) {
2677   AtomicExchange64(*this, nullptr, sync, mem, value, output);
2678 }
2679 
atomicExchange64(const Synchronization & sync,const BaseIndex & mem,Register64 value,Register64 output)2680 void MacroAssembler::atomicExchange64(const Synchronization& sync,
2681                                       const BaseIndex& mem, Register64 value,
2682                                       Register64 output) {
2683   AtomicExchange64(*this, nullptr, sync, mem, value, output);
2684 }
2685 
2686 template <typename T>
AtomicFetchOp64(MacroAssembler & masm,const wasm::MemoryAccessDesc * access,const Synchronization & sync,AtomicOp op,Register64 value,const T & mem,Register64 temp,Register64 output)2687 static void AtomicFetchOp64(MacroAssembler& masm,
2688                             const wasm::MemoryAccessDesc* access,
2689                             const Synchronization& sync, AtomicOp op,
2690                             Register64 value, const T& mem, Register64 temp,
2691                             Register64 output) {
2692   MOZ_ASSERT(value != output);
2693   MOZ_ASSERT(value != temp);
2694   masm.computeEffectiveAddress(mem, SecondScratchReg);
2695 
2696   Label tryAgain;
2697 
2698   masm.memoryBarrierBefore(sync);
2699 
2700   masm.bind(&tryAgain);
2701   if (access) {
2702     masm.append(*access, masm.size());
2703   }
2704 
2705   masm.as_lld(output.reg, SecondScratchReg, 0);
2706 
2707   switch (op) {
2708     case AtomicFetchAddOp:
2709       masm.as_daddu(temp.reg, output.reg, value.reg);
2710       break;
2711     case AtomicFetchSubOp:
2712       masm.as_dsubu(temp.reg, output.reg, value.reg);
2713       break;
2714     case AtomicFetchAndOp:
2715       masm.as_and(temp.reg, output.reg, value.reg);
2716       break;
2717     case AtomicFetchOrOp:
2718       masm.as_or(temp.reg, output.reg, value.reg);
2719       break;
2720     case AtomicFetchXorOp:
2721       masm.as_xor(temp.reg, output.reg, value.reg);
2722       break;
2723     default:
2724       MOZ_CRASH();
2725   }
2726 
2727   masm.as_scd(temp.reg, SecondScratchReg, 0);
2728   masm.ma_b(temp.reg, temp.reg, &tryAgain, Assembler::Zero, ShortJump);
2729 
2730   masm.memoryBarrierAfter(sync);
2731 }
2732 
wasmAtomicFetchOp64(const wasm::MemoryAccessDesc & access,AtomicOp op,Register64 value,const Address & mem,Register64 temp,Register64 output)2733 void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
2734                                          AtomicOp op, Register64 value,
2735                                          const Address& mem, Register64 temp,
2736                                          Register64 output) {
2737   AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
2738 }
2739 
wasmAtomicFetchOp64(const wasm::MemoryAccessDesc & access,AtomicOp op,Register64 value,const BaseIndex & mem,Register64 temp,Register64 output)2740 void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
2741                                          AtomicOp op, Register64 value,
2742                                          const BaseIndex& mem, Register64 temp,
2743                                          Register64 output) {
2744   AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
2745 }
2746 
atomicFetchOp64(const Synchronization & sync,AtomicOp op,Register64 value,const Address & mem,Register64 temp,Register64 output)2747 void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
2748                                      Register64 value, const Address& mem,
2749                                      Register64 temp, Register64 output) {
2750   AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
2751 }
2752 
atomicFetchOp64(const Synchronization & sync,AtomicOp op,Register64 value,const BaseIndex & mem,Register64 temp,Register64 output)2753 void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
2754                                      Register64 value, const BaseIndex& mem,
2755                                      Register64 temp, Register64 output) {
2756   AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
2757 }
2758 
atomicEffectOp64(const Synchronization & sync,AtomicOp op,Register64 value,const Address & mem,Register64 temp)2759 void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
2760                                       Register64 value, const Address& mem,
2761                                       Register64 temp) {
2762   AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
2763 }
2764 
atomicEffectOp64(const Synchronization & sync,AtomicOp op,Register64 value,const BaseIndex & mem,Register64 temp)2765 void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
2766                                       Register64 value, const BaseIndex& mem,
2767                                       Register64 temp) {
2768   AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
2769 }
2770 
2771 // ========================================================================
2772 // Convert floating point.
2773 
convertInt64ToDouble(Register64 src,FloatRegister dest)2774 void MacroAssembler::convertInt64ToDouble(Register64 src, FloatRegister dest) {
2775   as_dmtc1(src.reg, dest);
2776   as_cvtdl(dest, dest);
2777 }
2778 
convertInt64ToFloat32(Register64 src,FloatRegister dest)2779 void MacroAssembler::convertInt64ToFloat32(Register64 src, FloatRegister dest) {
2780   as_dmtc1(src.reg, dest);
2781   as_cvtsl(dest, dest);
2782 }
2783 
convertUInt64ToDoubleNeedsTemp()2784 bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return false; }
2785 
convertUInt64ToDouble(Register64 src,FloatRegister dest,Register temp)2786 void MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest,
2787                                            Register temp) {
2788   MOZ_ASSERT(temp == Register::Invalid());
2789   MacroAssemblerSpecific::convertUInt64ToDouble(src.reg, dest);
2790 }
2791 
convertUInt64ToFloat32(Register64 src_,FloatRegister dest,Register temp)2792 void MacroAssembler::convertUInt64ToFloat32(Register64 src_, FloatRegister dest,
2793                                             Register temp) {
2794   MOZ_ASSERT(temp == Register::Invalid());
2795 
2796   Register src = src_.reg;
2797   Label positive, done;
2798   ma_b(src, src, &positive, NotSigned, ShortJump);
2799 
2800   MOZ_ASSERT(src != ScratchRegister);
2801   MOZ_ASSERT(src != SecondScratchReg);
2802 
2803   ma_and(ScratchRegister, src, Imm32(1));
2804   ma_dsrl(SecondScratchReg, src, Imm32(1));
2805   ma_or(ScratchRegister, SecondScratchReg);
2806   as_dmtc1(ScratchRegister, dest);
2807   as_cvtsl(dest, dest);
2808   addFloat32(dest, dest);
2809   ma_b(&done, ShortJump);
2810 
2811   bind(&positive);
2812   as_dmtc1(src, dest);
2813   as_cvtsl(dest, dest);
2814 
2815   bind(&done);
2816 }
2817 
2818 //}}} check_macroassembler_style
2819