1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/mips64/MacroAssembler-mips64.h"
8 
9 #include "mozilla/DebugOnly.h"
10 #include "mozilla/MathAlgorithms.h"
11 
12 #include "jit/Bailouts.h"
13 #include "jit/BaselineFrame.h"
14 #include "jit/JitFrames.h"
15 #include "jit/MacroAssembler.h"
16 #include "jit/mips64/Simulator-mips64.h"
17 #include "jit/MoveEmitter.h"
18 #include "jit/SharedICRegisters.h"
19 
20 #include "jit/MacroAssembler-inl.h"
21 
22 using namespace js;
23 using namespace jit;
24 
25 using mozilla::Abs;
26 
27 static_assert(sizeof(intptr_t) == 8, "Not 32-bit clean.");
28 
29 void
convertBoolToInt32(Register src,Register dest)30 MacroAssemblerMIPS64Compat::convertBoolToInt32(Register src, Register dest)
31 {
32     // Note that C++ bool is only 1 byte, so zero extend it to clear the
33     // higher-order bits.
34     ma_and(dest, src, Imm32(0xff));
35 }
36 
37 void
convertInt32ToDouble(Register src,FloatRegister dest)38 MacroAssemblerMIPS64Compat::convertInt32ToDouble(Register src, FloatRegister dest)
39 {
40     as_mtc1(src, dest);
41     as_cvtdw(dest, dest);
42 }
43 
44 void
convertInt32ToDouble(const Address & src,FloatRegister dest)45 MacroAssemblerMIPS64Compat::convertInt32ToDouble(const Address& src, FloatRegister dest)
46 {
47     ma_ls(dest, src);
48     as_cvtdw(dest, dest);
49 }
50 
51 void
convertInt32ToDouble(const BaseIndex & src,FloatRegister dest)52 MacroAssemblerMIPS64Compat::convertInt32ToDouble(const BaseIndex& src, FloatRegister dest)
53 {
54     computeScaledAddress(src, ScratchRegister);
55     convertInt32ToDouble(Address(ScratchRegister, src.offset), dest);
56 }
57 
58 void
convertUInt32ToDouble(Register src,FloatRegister dest)59 MacroAssemblerMIPS64Compat::convertUInt32ToDouble(Register src, FloatRegister dest)
60 {
61     // We use SecondScratchDoubleReg because MacroAssembler::loadFromTypedArray
62     // calls with ScratchDoubleReg as dest.
63     MOZ_ASSERT(dest != SecondScratchDoubleReg);
64 
65     // Subtract INT32_MIN to get a positive number
66     ma_subu(ScratchRegister, src, Imm32(INT32_MIN));
67 
68     // Convert value
69     as_mtc1(ScratchRegister, dest);
70     as_cvtdw(dest, dest);
71 
72     // Add unsigned value of INT32_MIN
73     ma_lid(SecondScratchDoubleReg, 2147483648.0);
74     as_addd(dest, dest, SecondScratchDoubleReg);
75 }
76 
77 void
convertUInt64ToDouble(Register64 src,Register temp,FloatRegister dest)78 MacroAssemblerMIPS64Compat::convertUInt64ToDouble(Register64 src, Register temp, FloatRegister dest)
79 {
80     Label positive, done;
81     ma_b(src.reg, src.reg, &positive, NotSigned, ShortJump);
82 
83     MOZ_ASSERT(src.reg != ScratchRegister);
84     MOZ_ASSERT(src.reg != SecondScratchReg);
85 
86     ma_and(ScratchRegister, src.reg, Imm32(1));
87     ma_dsrl(SecondScratchReg, src.reg, Imm32(1));
88     ma_or(ScratchRegister, SecondScratchReg);
89     as_dmtc1(ScratchRegister, dest);
90     as_cvtdl(dest, dest);
91     addDouble(dest, dest);
92     ma_b(&done, ShortJump);
93 
94     bind(&positive);
95     as_dmtc1(src.reg, dest);
96     as_cvtdl(dest, dest);
97 
98     bind(&done);
99 }
100 
101 void
convertUInt32ToFloat32(Register src,FloatRegister dest)102 MacroAssemblerMIPS64Compat::convertUInt32ToFloat32(Register src, FloatRegister dest)
103 {
104     Label positive, done;
105     ma_b(src, src, &positive, NotSigned, ShortJump);
106 
107     // We cannot do the same as convertUInt32ToDouble because float32 doesn't
108     // have enough precision.
109     convertUInt32ToDouble(src, dest);
110     convertDoubleToFloat32(dest, dest);
111     ma_b(&done, ShortJump);
112 
113     bind(&positive);
114     convertInt32ToFloat32(src, dest);
115 
116     bind(&done);
117 }
118 
119 void
convertDoubleToFloat32(FloatRegister src,FloatRegister dest)120 MacroAssemblerMIPS64Compat::convertDoubleToFloat32(FloatRegister src, FloatRegister dest)
121 {
122     as_cvtsd(dest, src);
123 }
124 
125 // Convert the floating point value to an integer, if it did not fit, then it
126 // was clamped to INT32_MIN/INT32_MAX, and we can test it.
127 // NOTE: if the value really was supposed to be INT32_MAX / INT32_MIN then it
128 // will be wrong.
129 void
branchTruncateDouble(FloatRegister src,Register dest,Label * fail)130 MacroAssemblerMIPS64Compat::branchTruncateDouble(FloatRegister src, Register dest,
131                                                  Label* fail)
132 {
133     Label test, success;
134     as_truncwd(ScratchDoubleReg, src);
135     as_mfc1(dest, ScratchDoubleReg);
136 
137     ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
138     ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
139 }
140 
141 // Checks whether a double is representable as a 32-bit integer. If so, the
142 // integer is written to the output register. Otherwise, a bailout is taken to
143 // the given snapshot. This function overwrites the scratch float register.
144 void
convertDoubleToInt32(FloatRegister src,Register dest,Label * fail,bool negativeZeroCheck)145 MacroAssemblerMIPS64Compat::convertDoubleToInt32(FloatRegister src, Register dest,
146                                                  Label* fail, bool negativeZeroCheck)
147 {
148     // Convert double to int, then convert back and check if we have the
149     // same number.
150     as_cvtwd(ScratchDoubleReg, src);
151     as_mfc1(dest, ScratchDoubleReg);
152     as_cvtdw(ScratchDoubleReg, ScratchDoubleReg);
153     ma_bc1d(src, ScratchDoubleReg, fail, Assembler::DoubleNotEqualOrUnordered);
154 
155     if (negativeZeroCheck) {
156         Label notZero;
157         ma_b(dest, Imm32(0), &notZero, Assembler::NotEqual, ShortJump);
158         // Test and bail for -0.0, when integer result is 0
159         // Move the top word of the double into the output reg, if it is
160         // non-zero, then the original value was -0.0
161         moveFromDoubleHi(src, dest);
162         ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
163         bind(&notZero);
164     }
165 }
166 
167 // Checks whether a float32 is representable as a 32-bit integer. If so, the
168 // integer is written to the output register. Otherwise, a bailout is taken to
169 // the given snapshot. This function overwrites the scratch float register.
170 void
convertFloat32ToInt32(FloatRegister src,Register dest,Label * fail,bool negativeZeroCheck)171 MacroAssemblerMIPS64Compat::convertFloat32ToInt32(FloatRegister src, Register dest,
172                                                   Label* fail, bool negativeZeroCheck)
173 {
174     // Converting the floating point value to an integer and then converting it
175     // back to a float32 would not work, as float to int32 conversions are
176     // clamping (e.g. float(INT32_MAX + 1) would get converted into INT32_MAX
177     // and then back to float(INT32_MAX + 1)).  If this ever happens, we just
178     // bail out.
179     as_cvtws(ScratchFloat32Reg, src);
180     as_mfc1(dest, ScratchFloat32Reg);
181     as_cvtsw(ScratchFloat32Reg, ScratchFloat32Reg);
182     ma_bc1s(src, ScratchFloat32Reg, fail, Assembler::DoubleNotEqualOrUnordered);
183 
184     // Bail out in the clamped cases.
185     ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
186 
187     if (negativeZeroCheck) {
188         Label notZero;
189         ma_b(dest, Imm32(0), &notZero, Assembler::NotEqual, ShortJump);
190         // Test and bail for -0.0, when integer result is 0
191         // Move the top word of the double into the output reg,
192         // if it is non-zero, then the original value was -0.0
193         moveFromDoubleHi(src, dest);
194         ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
195         bind(&notZero);
196     }
197 }
198 
199 void
convertFloat32ToDouble(FloatRegister src,FloatRegister dest)200 MacroAssemblerMIPS64Compat::convertFloat32ToDouble(FloatRegister src, FloatRegister dest)
201 {
202     as_cvtds(dest, src);
203 }
204 
205 void
branchTruncateFloat32(FloatRegister src,Register dest,Label * fail)206 MacroAssemblerMIPS64Compat::branchTruncateFloat32(FloatRegister src, Register dest,
207                                                   Label* fail)
208 {
209     Label test, success;
210     as_truncws(ScratchFloat32Reg, src);
211     as_mfc1(dest, ScratchFloat32Reg);
212 
213     ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
214 }
215 
216 void
convertInt32ToFloat32(Register src,FloatRegister dest)217 MacroAssemblerMIPS64Compat::convertInt32ToFloat32(Register src, FloatRegister dest)
218 {
219     as_mtc1(src, dest);
220     as_cvtsw(dest, dest);
221 }
222 
223 void
convertInt32ToFloat32(const Address & src,FloatRegister dest)224 MacroAssemblerMIPS64Compat::convertInt32ToFloat32(const Address& src, FloatRegister dest)
225 {
226     ma_ls(dest, src);
227     as_cvtsw(dest, dest);
228 }
229 
230 void
addDouble(FloatRegister src,FloatRegister dest)231 MacroAssemblerMIPS64Compat::addDouble(FloatRegister src, FloatRegister dest)
232 {
233     as_addd(dest, dest, src);
234 }
235 
236 void
subDouble(FloatRegister src,FloatRegister dest)237 MacroAssemblerMIPS64Compat::subDouble(FloatRegister src, FloatRegister dest)
238 {
239     as_subd(dest, dest, src);
240 }
241 
242 void
mulDouble(FloatRegister src,FloatRegister dest)243 MacroAssemblerMIPS64Compat::mulDouble(FloatRegister src, FloatRegister dest)
244 {
245     as_muld(dest, dest, src);
246 }
247 
248 void
divDouble(FloatRegister src,FloatRegister dest)249 MacroAssemblerMIPS64Compat::divDouble(FloatRegister src, FloatRegister dest)
250 {
251     as_divd(dest, dest, src);
252 }
253 
254 void
negateDouble(FloatRegister reg)255 MacroAssemblerMIPS64Compat::negateDouble(FloatRegister reg)
256 {
257     as_negd(reg, reg);
258 }
259 
260 void
inc64(AbsoluteAddress dest)261 MacroAssemblerMIPS64Compat::inc64(AbsoluteAddress dest)
262 {
263     ma_li(ScratchRegister, ImmWord(uintptr_t(dest.addr)));
264     as_ld(SecondScratchReg, ScratchRegister, 0);
265     as_daddiu(SecondScratchReg, SecondScratchReg, 1);
266     as_sd(SecondScratchReg, ScratchRegister, 0);
267 }
268 
269 void
movq(Register rs,Register rd)270 MacroAssemblerMIPS64Compat::movq(Register rs, Register rd)
271 {
272     ma_move(rd, rs);
273 }
274 
275 void
ma_li(Register dest,CodeOffset * label)276 MacroAssemblerMIPS64::ma_li(Register dest, CodeOffset* label)
277 {
278     BufferOffset bo = m_buffer.nextOffset();
279     ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
280     label->bind(bo.getOffset());
281 }
282 
283 void
ma_li(Register dest,ImmWord imm)284 MacroAssemblerMIPS64::ma_li(Register dest, ImmWord imm)
285 {
286     if ((int64_t)imm.value >= INT16_MIN  && (int64_t)imm.value <= INT16_MAX) {
287         as_addiu(dest, zero, imm.value);
288     } else if (imm.value <= UINT16_MAX) {
289         as_ori(dest, zero, Imm16::Lower(Imm32(imm.value)).encode());
290     } else if (0 == (imm.value & 0xffff) && 0 == (imm.value >> 32)) {
291         as_lui(dest, Imm16::Upper(Imm32(imm.value)).encode());
292     } else if (imm.value <= UINT32_MAX) {
293         as_lui(dest, Imm16::Upper(Imm32(imm.value)).encode());
294         as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
295     } else {
296         if (imm.value >> 48) {
297             as_lui(dest, Imm16::Upper(Imm32(imm.value >> 32)).encode());
298             if ((imm.value >> 32) & 0xffff)
299               as_ori(dest, dest, Imm16::Lower(Imm32(imm.value >> 32)).encode());
300         } else {
301             as_ori(dest, zero, Imm16::Lower(Imm32(imm.value >> 32)).encode());
302         }
303         as_dsll(dest, dest, 16);
304         if ((imm.value >> 16) & 0xffff)
305           as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode());
306         as_dsll(dest, dest, 16);
307         if (imm.value & 0xffff)
308           as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
309     }
310 }
311 
312 // This method generates lui, dsll and ori instruction block that can be modified
313 // by UpdateLoad64Value, either during compilation (eg. Assembler::bind), or
314 // during execution (eg. jit::PatchJump).
315 void
ma_liPatchable(Register dest,ImmPtr imm)316 MacroAssemblerMIPS64::ma_liPatchable(Register dest, ImmPtr imm)
317 {
318     return ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
319 }
320 
321 void
ma_liPatchable(Register dest,ImmWord imm,LiFlags flags)322 MacroAssemblerMIPS64::ma_liPatchable(Register dest, ImmWord imm, LiFlags flags)
323 {
324     if (Li64 == flags) {
325         m_buffer.ensureSpace(6 * sizeof(uint32_t));
326         as_lui(dest, Imm16::Upper(Imm32(imm.value >> 32)).encode());
327         as_ori(dest, dest, Imm16::Lower(Imm32(imm.value >> 32)).encode());
328         as_dsll(dest, dest, 16);
329         as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode());
330         as_dsll(dest, dest, 16);
331         as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
332     } else {
333         m_buffer.ensureSpace(4 * sizeof(uint32_t));
334         as_lui(dest, Imm16::Lower(Imm32(imm.value >> 32)).encode());
335         as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode());
336         as_drotr32(dest, dest, 48);
337         as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
338     }
339 }
340 
341 // Shifts
342 void
ma_dsll(Register rd,Register rt,Imm32 shift)343 MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Imm32 shift)
344 {
345     if (31 < shift.value)
346       as_dsll32(rd, rt, shift.value);
347     else
348       as_dsll(rd, rt, shift.value);
349 }
350 
351 void
ma_dsrl(Register rd,Register rt,Imm32 shift)352 MacroAssemblerMIPS64::ma_dsrl(Register rd, Register rt, Imm32 shift)
353 {
354     if (31 < shift.value)
355       as_dsrl32(rd, rt, shift.value);
356     else
357       as_dsrl(rd, rt, shift.value);
358 }
359 
360 void
ma_dsra(Register rd,Register rt,Imm32 shift)361 MacroAssemblerMIPS64::ma_dsra(Register rd, Register rt, Imm32 shift)
362 {
363     if (31 < shift.value)
364       as_dsra32(rd, rt, shift.value);
365     else
366       as_dsra(rd, rt, shift.value);
367 }
368 
369 void
ma_dror(Register rd,Register rt,Imm32 shift)370 MacroAssemblerMIPS64::ma_dror(Register rd, Register rt, Imm32 shift)
371 {
372     if (31 < shift.value)
373       as_drotr32(rd, rt, shift.value);
374     else
375       as_drotr(rd, rt, shift.value);
376 }
377 
378 void
ma_drol(Register rd,Register rt,Imm32 shift)379 MacroAssemblerMIPS64::ma_drol(Register rd, Register rt, Imm32 shift)
380 {
381     uint32_t s =  64 - shift.value;
382 
383     if (31 < s)
384       as_drotr32(rd, rt, s);
385     else
386       as_drotr(rd, rt, s);
387 }
388 
389 void
ma_dsll(Register rd,Register rt,Register shift)390 MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Register shift)
391 {
392     as_dsllv(rd, rt, shift);
393 }
394 
395 void
ma_dsrl(Register rd,Register rt,Register shift)396 MacroAssemblerMIPS64::ma_dsrl(Register rd, Register rt, Register shift)
397 {
398     as_dsrlv(rd, rt, shift);
399 }
400 
401 void
ma_dsra(Register rd,Register rt,Register shift)402 MacroAssemblerMIPS64::ma_dsra(Register rd, Register rt, Register shift)
403 {
404     as_dsrav(rd, rt, shift);
405 }
406 
407 void
ma_dror(Register rd,Register rt,Register shift)408 MacroAssemblerMIPS64::ma_dror(Register rd, Register rt, Register shift)
409 {
410     as_drotrv(rd, rt, shift);
411 }
412 
413 void
ma_drol(Register rd,Register rt,Register shift)414 MacroAssemblerMIPS64::ma_drol(Register rd, Register rt, Register shift)
415 {
416     ma_negu(ScratchRegister, shift);
417     as_drotrv(rd, rt, ScratchRegister);
418 }
419 
420 void
ma_dins(Register rt,Register rs,Imm32 pos,Imm32 size)421 MacroAssemblerMIPS64::ma_dins(Register rt, Register rs, Imm32 pos, Imm32 size)
422 {
423     if (pos.value >= 0 && pos.value < 32) {
424         if (size.value >= 2)
425           as_dinsm(rt, rs, pos.value, size.value);
426         else
427           as_dins(rt, rs, pos.value, size.value);
428     } else {
429         as_dinsu(rt, rs, pos.value, size.value);
430     }
431 }
432 
433 void
ma_dext(Register rt,Register rs,Imm32 pos,Imm32 size)434 MacroAssemblerMIPS64::ma_dext(Register rt, Register rs, Imm32 pos, Imm32 size)
435 {
436     if (pos.value >= 0 && pos.value < 32) {
437         if (size.value > 32)
438           as_dextm(rt, rs, pos.value, size.value);
439         else
440           as_dext(rt, rs, pos.value, size.value);
441     } else {
442         as_dextu(rt, rs, pos.value, size.value);
443     }
444 }
445 
446 // Arithmetic-based ops.
447 
448 // Add.
449 void
ma_daddu(Register rd,Register rs,Imm32 imm)450 MacroAssemblerMIPS64::ma_daddu(Register rd, Register rs, Imm32 imm)
451 {
452     if (Imm16::IsInSignedRange(imm.value)) {
453         as_daddiu(rd, rs, imm.value);
454     } else {
455         ma_li(ScratchRegister, imm);
456         as_daddu(rd, rs, ScratchRegister);
457     }
458 }
459 
460 void
ma_daddu(Register rd,Register rs)461 MacroAssemblerMIPS64::ma_daddu(Register rd, Register rs)
462 {
463     as_daddu(rd, rd, rs);
464 }
465 
466 void
ma_daddu(Register rd,Imm32 imm)467 MacroAssemblerMIPS64::ma_daddu(Register rd, Imm32 imm)
468 {
469     ma_daddu(rd, rd, imm);
470 }
471 
472 void
ma_addTestOverflow(Register rd,Register rs,Register rt,Label * overflow)473 MacroAssemblerMIPS64::ma_addTestOverflow(Register rd, Register rs, Register rt, Label* overflow)
474 {
475     as_addu(rd, rs, rt);
476     as_daddu(ScratchRegister, rs, rt);
477     ma_b(rd, ScratchRegister, overflow, Assembler::NotEqual);
478 }
479 
480 void
ma_addTestOverflow(Register rd,Register rs,Imm32 imm,Label * overflow)481 MacroAssemblerMIPS64::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, Label* overflow)
482 {
483     // Check for signed range because of as_daddiu
484     if (Imm16::IsInSignedRange(imm.value) && Imm16::IsInUnsignedRange(imm.value)) {
485         as_addiu(rd, rs, imm.value);
486         as_daddiu(ScratchRegister, rs, imm.value);
487         ma_b(rd, ScratchRegister, overflow, Assembler::NotEqual);
488     } else {
489         ma_li(ScratchRegister, imm);
490         ma_addTestOverflow(rd, rs, ScratchRegister, overflow);
491     }
492 }
493 
494 // Subtract.
495 void
ma_dsubu(Register rd,Register rs,Imm32 imm)496 MacroAssemblerMIPS64::ma_dsubu(Register rd, Register rs, Imm32 imm)
497 {
498     if (Imm16::IsInSignedRange(-imm.value)) {
499         as_daddiu(rd, rs, -imm.value);
500     } else {
501         ma_li(ScratchRegister, imm);
502         as_dsubu(rd, rs, ScratchRegister);
503     }
504 }
505 
506 void
ma_dsubu(Register rd,Imm32 imm)507 MacroAssemblerMIPS64::ma_dsubu(Register rd, Imm32 imm)
508 {
509     ma_dsubu(rd, rd, imm);
510 }
511 
512 void
ma_subTestOverflow(Register rd,Register rs,Register rt,Label * overflow)513 MacroAssemblerMIPS64::ma_subTestOverflow(Register rd, Register rs, Register rt, Label* overflow)
514 {
515     as_subu(rd, rs, rt);
516     as_dsubu(ScratchRegister, rs, rt);
517     ma_b(rd, ScratchRegister, overflow, Assembler::NotEqual);
518 }
519 
520 void
ma_dmult(Register rs,Imm32 imm)521 MacroAssemblerMIPS64::ma_dmult(Register rs, Imm32 imm)
522 {
523     ma_li(ScratchRegister, imm);
524     as_dmult(rs, ScratchRegister);
525 }
526 
527 // Memory.
528 
529 void
ma_load(Register dest,Address address,LoadStoreSize size,LoadStoreExtension extension)530 MacroAssemblerMIPS64::ma_load(Register dest, Address address,
531                               LoadStoreSize size, LoadStoreExtension extension)
532 {
533     int16_t encodedOffset;
534     Register base;
535     if (!Imm16::IsInSignedRange(address.offset)) {
536         ma_li(ScratchRegister, Imm32(address.offset));
537         as_daddu(ScratchRegister, address.base, ScratchRegister);
538         base = ScratchRegister;
539         encodedOffset = Imm16(0).encode();
540     } else {
541         encodedOffset = Imm16(address.offset).encode();
542         base = address.base;
543     }
544 
545     switch (size) {
546       case SizeByte:
547         if (ZeroExtend == extension)
548             as_lbu(dest, base, encodedOffset);
549         else
550             as_lb(dest, base, encodedOffset);
551         break;
552       case SizeHalfWord:
553         if (ZeroExtend == extension)
554             as_lhu(dest, base, encodedOffset);
555         else
556             as_lh(dest, base, encodedOffset);
557         break;
558       case SizeWord:
559         if (ZeroExtend == extension)
560             as_lwu(dest, base, encodedOffset);
561         else
562             as_lw(dest, base, encodedOffset);
563         break;
564       case SizeDouble:
565         as_ld(dest, base, encodedOffset);
566         break;
567       default:
568         MOZ_CRASH("Invalid argument for ma_load");
569     }
570 }
571 
572 void
ma_store(Register data,Address address,LoadStoreSize size,LoadStoreExtension extension)573 MacroAssemblerMIPS64::ma_store(Register data, Address address, LoadStoreSize size,
574                                LoadStoreExtension extension)
575 {
576     int16_t encodedOffset;
577     Register base;
578     if (!Imm16::IsInSignedRange(address.offset)) {
579         ma_li(ScratchRegister, Imm32(address.offset));
580         as_daddu(ScratchRegister, address.base, ScratchRegister);
581         base = ScratchRegister;
582         encodedOffset = Imm16(0).encode();
583     } else {
584         encodedOffset = Imm16(address.offset).encode();
585         base = address.base;
586     }
587 
588     switch (size) {
589       case SizeByte:
590         as_sb(data, base, encodedOffset);
591         break;
592       case SizeHalfWord:
593         as_sh(data, base, encodedOffset);
594         break;
595       case SizeWord:
596         as_sw(data, base, encodedOffset);
597         break;
598       case SizeDouble:
599         as_sd(data, base, encodedOffset);
600         break;
601       default:
602         MOZ_CRASH("Invalid argument for ma_store");
603     }
604 }
605 
606 void
computeScaledAddress(const BaseIndex & address,Register dest)607 MacroAssemblerMIPS64Compat::computeScaledAddress(const BaseIndex& address, Register dest)
608 {
609     int32_t shift = Imm32::ShiftOf(address.scale).value;
610     if (shift) {
611         ma_dsll(ScratchRegister, address.index, Imm32(shift));
612         as_daddu(dest, address.base, ScratchRegister);
613     } else {
614         as_daddu(dest, address.base, address.index);
615     }
616 }
617 
618 // Shortcut for when we know we're transferring 32 bits of data.
619 void
ma_pop(Register r)620 MacroAssemblerMIPS64::ma_pop(Register r)
621 {
622     as_ld(r, StackPointer, 0);
623     as_daddiu(StackPointer, StackPointer, sizeof(intptr_t));
624 }
625 
626 void
ma_push(Register r)627 MacroAssemblerMIPS64::ma_push(Register r)
628 {
629     if (r == sp) {
630         // Pushing sp requires one more instruction.
631         ma_move(ScratchRegister, sp);
632         r = ScratchRegister;
633     }
634 
635     as_daddiu(StackPointer, StackPointer, (int32_t)-sizeof(intptr_t));
636     as_sd(r, StackPointer, 0);
637 }
638 
639 // Branches when done from within mips-specific code.
640 void
ma_b(Register lhs,ImmWord imm,Label * label,Condition c,JumpKind jumpKind)641 MacroAssemblerMIPS64::ma_b(Register lhs, ImmWord imm, Label* label, Condition c, JumpKind jumpKind)
642 {
643     MOZ_ASSERT(c != Overflow);
644     if (imm.value == 0) {
645         if (c == Always || c == AboveOrEqual)
646             ma_b(label, jumpKind);
647         else if (c == Below)
648             ; // This condition is always false. No branch required.
649         else
650             branchWithCode(getBranchCode(lhs, c), label, jumpKind);
651     } else {
652         MOZ_ASSERT(lhs != ScratchRegister);
653         ma_li(ScratchRegister, imm);
654         ma_b(lhs, ScratchRegister, label, c, jumpKind);
655     }
656 }
657 
658 void
ma_b(Register lhs,Address addr,Label * label,Condition c,JumpKind jumpKind)659 MacroAssemblerMIPS64::ma_b(Register lhs, Address addr, Label* label, Condition c, JumpKind jumpKind)
660 {
661     MOZ_ASSERT(lhs != ScratchRegister);
662     ma_load(ScratchRegister, addr, SizeDouble);
663     ma_b(lhs, ScratchRegister, label, c, jumpKind);
664 }
665 
666 void
ma_b(Address addr,Imm32 imm,Label * label,Condition c,JumpKind jumpKind)667 MacroAssemblerMIPS64::ma_b(Address addr, Imm32 imm, Label* label, Condition c, JumpKind jumpKind)
668 {
669     ma_load(SecondScratchReg, addr, SizeDouble);
670     ma_b(SecondScratchReg, imm, label, c, jumpKind);
671 }
672 
673 void
ma_b(Address addr,ImmGCPtr imm,Label * label,Condition c,JumpKind jumpKind)674 MacroAssemblerMIPS64::ma_b(Address addr, ImmGCPtr imm, Label* label, Condition c, JumpKind jumpKind)
675 {
676     ma_load(SecondScratchReg, addr, SizeDouble);
677     ma_b(SecondScratchReg, imm, label, c, jumpKind);
678 }
679 
680 void
ma_bal(Label * label,DelaySlotFill delaySlotFill)681 MacroAssemblerMIPS64::ma_bal(Label* label, DelaySlotFill delaySlotFill)
682 {
683     if (label->bound()) {
684         // Generate the long jump for calls because return address has to be
685         // the address after the reserved block.
686         addLongJump(nextOffset());
687         ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
688         as_jalr(ScratchRegister);
689         if (delaySlotFill == FillDelaySlot)
690             as_nop();
691         return;
692     }
693 
694     // Second word holds a pointer to the next branch in label's chain.
695     uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
696 
697     // Make the whole branch continous in the buffer. The '6'
698     // instructions are writing at below (contain delay slot).
699     m_buffer.ensureSpace(6 * sizeof(uint32_t));
700 
701     BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
702     writeInst(nextInChain);
703     if (!oom())
704         label->use(bo.getOffset());
705     // Leave space for long jump.
706     as_nop();
707     as_nop();
708     as_nop();
709     if (delaySlotFill == FillDelaySlot)
710         as_nop();
711 }
712 
713 void
branchWithCode(InstImm code,Label * label,JumpKind jumpKind)714 MacroAssemblerMIPS64::branchWithCode(InstImm code, Label* label, JumpKind jumpKind)
715 {
716     MOZ_ASSERT(code.encode() != InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
717     InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
718 
719     if (label->bound()) {
720         int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
721 
722         if (BOffImm16::IsInRange(offset))
723             jumpKind = ShortJump;
724 
725         if (jumpKind == ShortJump) {
726             MOZ_ASSERT(BOffImm16::IsInRange(offset));
727             code.setBOffImm16(BOffImm16(offset));
728             writeInst(code.encode());
729             as_nop();
730             return;
731         }
732 
733         if (code.encode() == inst_beq.encode()) {
734             // Handle long jump
735             addLongJump(nextOffset());
736             ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
737             as_jr(ScratchRegister);
738             as_nop();
739             return;
740         }
741 
742         // Handle long conditional branch, the target offset is based on self,
743         // point to next instruction of nop at below.
744         writeInst(invertBranch(code, BOffImm16(7 * sizeof(uint32_t))).encode());
745         // No need for a "nop" here because we can clobber scratch.
746         addLongJump(nextOffset());
747         ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
748         as_jr(ScratchRegister);
749         as_nop();
750         return;
751     }
752 
753     // Generate open jump and link it to a label.
754 
755     // Second word holds a pointer to the next branch in label's chain.
756     uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
757 
758     if (jumpKind == ShortJump) {
759         // Make the whole branch continous in the buffer.
760         m_buffer.ensureSpace(2 * sizeof(uint32_t));
761 
762         // Indicate that this is short jump with offset 4.
763         code.setBOffImm16(BOffImm16(4));
764         BufferOffset bo = writeInst(code.encode());
765         writeInst(nextInChain);
766         if (!oom())
767             label->use(bo.getOffset());
768         return;
769     }
770 
771     bool conditional = code.encode() != inst_beq.encode();
772 
773     // Make the whole branch continous in the buffer. The '7'
774     // instructions are writing at below (contain conditional nop).
775     m_buffer.ensureSpace(7 * sizeof(uint32_t));
776 
777     BufferOffset bo = writeInst(code.encode());
778     writeInst(nextInChain);
779     if (!oom())
780         label->use(bo.getOffset());
781     // Leave space for potential long jump.
782     as_nop();
783     as_nop();
784     as_nop();
785     as_nop();
786     if (conditional)
787         as_nop();
788 }
789 
790 void
ma_cmp_set(Register rd,Register rs,ImmWord imm,Condition c)791 MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmWord imm, Condition c)
792 {
793     ma_li(ScratchRegister, imm);
794     ma_cmp_set(rd, rs, ScratchRegister, c);
795 }
796 
797 void
ma_cmp_set(Register rd,Register rs,ImmPtr imm,Condition c)798 MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmPtr imm, Condition c)
799 {
800     ma_li(ScratchRegister, ImmWord(uintptr_t(imm.value)));
801     ma_cmp_set(rd, rs, ScratchRegister, c);
802 }
803 
804 // fp instructions
805 void
ma_lid(FloatRegister dest,double value)806 MacroAssemblerMIPS64::ma_lid(FloatRegister dest, double value)
807 {
808     ImmWord imm(mozilla::BitwiseCast<uint64_t>(value));
809 
810     ma_li(ScratchRegister, imm);
811     moveToDouble(ScratchRegister, dest);
812 }
813 
814 void
ma_mv(FloatRegister src,ValueOperand dest)815 MacroAssemblerMIPS64::ma_mv(FloatRegister src, ValueOperand dest)
816 {
817     as_dmfc1(dest.valueReg(), src);
818 }
819 
820 void
ma_mv(ValueOperand src,FloatRegister dest)821 MacroAssemblerMIPS64::ma_mv(ValueOperand src, FloatRegister dest)
822 {
823     as_dmtc1(src.valueReg(), dest);
824 }
825 
826 void
ma_ls(FloatRegister ft,Address address)827 MacroAssemblerMIPS64::ma_ls(FloatRegister ft, Address address)
828 {
829     if (Imm16::IsInSignedRange(address.offset)) {
830         as_ls(ft, address.base, address.offset);
831     } else {
832         MOZ_ASSERT(address.base != ScratchRegister);
833         ma_li(ScratchRegister, Imm32(address.offset));
834         as_daddu(ScratchRegister, address.base, ScratchRegister);
835         as_ls(ft, ScratchRegister, 0);
836     }
837 }
838 
839 void
ma_ld(FloatRegister ft,Address address)840 MacroAssemblerMIPS64::ma_ld(FloatRegister ft, Address address)
841 {
842     if (Imm16::IsInSignedRange(address.offset)) {
843         as_ld(ft, address.base, address.offset);
844     } else {
845         ma_li(ScratchRegister, Imm32(address.offset));
846         as_daddu(ScratchRegister, address.base, ScratchRegister);
847         as_ld(ft, ScratchRegister, 0);
848     }
849 }
850 
851 void
ma_sd(FloatRegister ft,Address address)852 MacroAssemblerMIPS64::ma_sd(FloatRegister ft, Address address)
853 {
854     if (Imm16::IsInSignedRange(address.offset)) {
855         as_sd(ft, address.base, address.offset);
856     } else {
857         ma_li(ScratchRegister, Imm32(address.offset));
858         as_daddu(ScratchRegister, address.base, ScratchRegister);
859         as_sd(ft, ScratchRegister, 0);
860     }
861 }
862 
863 void
ma_ss(FloatRegister ft,Address address)864 MacroAssemblerMIPS64::ma_ss(FloatRegister ft, Address address)
865 {
866     if (Imm16::IsInSignedRange(address.offset)) {
867         as_ss(ft, address.base, address.offset);
868     } else {
869         ma_li(ScratchRegister, Imm32(address.offset));
870         as_daddu(ScratchRegister, address.base, ScratchRegister);
871         as_ss(ft, ScratchRegister, 0);
872     }
873 }
874 
875 void
ma_pop(FloatRegister fs)876 MacroAssemblerMIPS64::ma_pop(FloatRegister fs)
877 {
878     ma_ld(fs, Address(StackPointer, 0));
879     as_daddiu(StackPointer, StackPointer, sizeof(double));
880 }
881 
882 void
ma_push(FloatRegister fs)883 MacroAssemblerMIPS64::ma_push(FloatRegister fs)
884 {
885     as_daddiu(StackPointer, StackPointer, (int32_t)-sizeof(double));
886     ma_sd(fs, Address(StackPointer, 0));
887 }
888 
889 bool
buildOOLFakeExitFrame(void * fakeReturnAddr)890 MacroAssemblerMIPS64Compat::buildOOLFakeExitFrame(void* fakeReturnAddr)
891 {
892     uint32_t descriptor = MakeFrameDescriptor(asMasm().framePushed(), JitFrame_IonJS);
893 
894     asMasm().Push(Imm32(descriptor)); // descriptor_
895     asMasm().Push(ImmPtr(fakeReturnAddr));
896 
897     return true;
898 }
899 
900 void
add32(Register src,Register dest)901 MacroAssemblerMIPS64Compat::add32(Register src, Register dest)
902 {
903     as_addu(dest, dest, src);
904 }
905 
906 void
add32(Imm32 imm,Register dest)907 MacroAssemblerMIPS64Compat::add32(Imm32 imm, Register dest)
908 {
909     ma_addu(dest, dest, imm);
910 }
911 
912 void
913 
add32(Imm32 imm,const Address & dest)914 MacroAssemblerMIPS64Compat::add32(Imm32 imm, const Address& dest)
915 {
916     load32(dest, SecondScratchReg);
917     ma_addu(SecondScratchReg, imm);
918     store32(SecondScratchReg, dest);
919 }
920 
921 void
addPtr(Register src,Register dest)922 MacroAssemblerMIPS64Compat::addPtr(Register src, Register dest)
923 {
924     ma_daddu(dest, src);
925 }
926 
927 void
addPtr(const Address & src,Register dest)928 MacroAssemblerMIPS64Compat::addPtr(const Address& src, Register dest)
929 {
930     loadPtr(src, ScratchRegister);
931     ma_daddu(dest, ScratchRegister);
932 }
933 
934 void
subPtr(Register src,Register dest)935 MacroAssemblerMIPS64Compat::subPtr(Register src, Register dest)
936 {
937     as_dsubu(dest, dest, src);
938 }
939 
940 void
move32(Imm32 imm,Register dest)941 MacroAssemblerMIPS64Compat::move32(Imm32 imm, Register dest)
942 {
943     ma_li(dest, imm);
944 }
945 
946 void
move32(Register src,Register dest)947 MacroAssemblerMIPS64Compat::move32(Register src, Register dest)
948 {
949     ma_move(dest, src);
950 }
951 
952 void
movePtr(Register src,Register dest)953 MacroAssemblerMIPS64Compat::movePtr(Register src, Register dest)
954 {
955     ma_move(dest, src);
956 }
957 void
movePtr(ImmWord imm,Register dest)958 MacroAssemblerMIPS64Compat::movePtr(ImmWord imm, Register dest)
959 {
960     ma_li(dest, imm);
961 }
962 
963 void
movePtr(ImmGCPtr imm,Register dest)964 MacroAssemblerMIPS64Compat::movePtr(ImmGCPtr imm, Register dest)
965 {
966     ma_li(dest, imm);
967 }
968 
969 void
movePtr(ImmPtr imm,Register dest)970 MacroAssemblerMIPS64Compat::movePtr(ImmPtr imm, Register dest)
971 {
972     movePtr(ImmWord(uintptr_t(imm.value)), dest);
973 }
974 void
movePtr(wasm::SymbolicAddress imm,Register dest)975 MacroAssemblerMIPS64Compat::movePtr(wasm::SymbolicAddress imm, Register dest)
976 {
977     append(AsmJSAbsoluteLink(CodeOffset(nextOffset().getOffset()), imm));
978     ma_liPatchable(dest, ImmWord(-1));
979 }
980 
981 void
load8ZeroExtend(const Address & address,Register dest)982 MacroAssemblerMIPS64Compat::load8ZeroExtend(const Address& address, Register dest)
983 {
984     ma_load(dest, address, SizeByte, ZeroExtend);
985 }
986 
987 void
load8ZeroExtend(const BaseIndex & src,Register dest)988 MacroAssemblerMIPS64Compat::load8ZeroExtend(const BaseIndex& src, Register dest)
989 {
990     ma_load(dest, src, SizeByte, ZeroExtend);
991 }
992 
993 void
load8SignExtend(const Address & address,Register dest)994 MacroAssemblerMIPS64Compat::load8SignExtend(const Address& address, Register dest)
995 {
996     ma_load(dest, address, SizeByte, SignExtend);
997 }
998 
999 void
load8SignExtend(const BaseIndex & src,Register dest)1000 MacroAssemblerMIPS64Compat::load8SignExtend(const BaseIndex& src, Register dest)
1001 {
1002     ma_load(dest, src, SizeByte, SignExtend);
1003 }
1004 
1005 void
load16ZeroExtend(const Address & address,Register dest)1006 MacroAssemblerMIPS64Compat::load16ZeroExtend(const Address& address, Register dest)
1007 {
1008     ma_load(dest, address, SizeHalfWord, ZeroExtend);
1009 }
1010 
1011 void
load16ZeroExtend(const BaseIndex & src,Register dest)1012 MacroAssemblerMIPS64Compat::load16ZeroExtend(const BaseIndex& src, Register dest)
1013 {
1014     ma_load(dest, src, SizeHalfWord, ZeroExtend);
1015 }
1016 
1017 void
load16SignExtend(const Address & address,Register dest)1018 MacroAssemblerMIPS64Compat::load16SignExtend(const Address& address, Register dest)
1019 {
1020     ma_load(dest, address, SizeHalfWord, SignExtend);
1021 }
1022 
1023 void
load16SignExtend(const BaseIndex & src,Register dest)1024 MacroAssemblerMIPS64Compat::load16SignExtend(const BaseIndex& src, Register dest)
1025 {
1026     ma_load(dest, src, SizeHalfWord, SignExtend);
1027 }
1028 
1029 void
load32(const Address & address,Register dest)1030 MacroAssemblerMIPS64Compat::load32(const Address& address, Register dest)
1031 {
1032     ma_load(dest, address, SizeWord);
1033 }
1034 
1035 void
load32(const BaseIndex & address,Register dest)1036 MacroAssemblerMIPS64Compat::load32(const BaseIndex& address, Register dest)
1037 {
1038     ma_load(dest, address, SizeWord);
1039 }
1040 
1041 void
load32(AbsoluteAddress address,Register dest)1042 MacroAssemblerMIPS64Compat::load32(AbsoluteAddress address, Register dest)
1043 {
1044     movePtr(ImmPtr(address.addr), ScratchRegister);
1045     load32(Address(ScratchRegister, 0), dest);
1046 }
1047 
1048 void
load32(wasm::SymbolicAddress address,Register dest)1049 MacroAssemblerMIPS64Compat::load32(wasm::SymbolicAddress address, Register dest)
1050 {
1051     movePtr(address, ScratchRegister);
1052     load32(Address(ScratchRegister, 0), dest);
1053 }
1054 
1055 void
loadPtr(const Address & address,Register dest)1056 MacroAssemblerMIPS64Compat::loadPtr(const Address& address, Register dest)
1057 {
1058     ma_load(dest, address, SizeDouble);
1059 }
1060 
1061 void
loadPtr(const BaseIndex & src,Register dest)1062 MacroAssemblerMIPS64Compat::loadPtr(const BaseIndex& src, Register dest)
1063 {
1064     ma_load(dest, src, SizeDouble);
1065 }
1066 
1067 void
loadPtr(AbsoluteAddress address,Register dest)1068 MacroAssemblerMIPS64Compat::loadPtr(AbsoluteAddress address, Register dest)
1069 {
1070     movePtr(ImmPtr(address.addr), ScratchRegister);
1071     loadPtr(Address(ScratchRegister, 0), dest);
1072 }
1073 
1074 void
loadPtr(wasm::SymbolicAddress address,Register dest)1075 MacroAssemblerMIPS64Compat::loadPtr(wasm::SymbolicAddress address, Register dest)
1076 {
1077     movePtr(address, ScratchRegister);
1078     loadPtr(Address(ScratchRegister, 0), dest);
1079 }
1080 
1081 void
loadPrivate(const Address & address,Register dest)1082 MacroAssemblerMIPS64Compat::loadPrivate(const Address& address, Register dest)
1083 {
1084     loadPtr(address, dest);
1085     ma_dsll(dest, dest, Imm32(1));
1086 }
1087 
1088 void
loadDouble(const Address & address,FloatRegister dest)1089 MacroAssemblerMIPS64Compat::loadDouble(const Address& address, FloatRegister dest)
1090 {
1091     ma_ld(dest, address);
1092 }
1093 
1094 void
loadDouble(const BaseIndex & src,FloatRegister dest)1095 MacroAssemblerMIPS64Compat::loadDouble(const BaseIndex& src, FloatRegister dest)
1096 {
1097     computeScaledAddress(src, SecondScratchReg);
1098     ma_ld(dest, Address(SecondScratchReg, src.offset));
1099 }
1100 
1101 void
loadFloatAsDouble(const Address & address,FloatRegister dest)1102 MacroAssemblerMIPS64Compat::loadFloatAsDouble(const Address& address, FloatRegister dest)
1103 {
1104     ma_ls(dest, address);
1105     as_cvtds(dest, dest);
1106 }
1107 
1108 void
loadFloatAsDouble(const BaseIndex & src,FloatRegister dest)1109 MacroAssemblerMIPS64Compat::loadFloatAsDouble(const BaseIndex& src, FloatRegister dest)
1110 {
1111     loadFloat32(src, dest);
1112     as_cvtds(dest, dest);
1113 }
1114 
1115 void
loadFloat32(const Address & address,FloatRegister dest)1116 MacroAssemblerMIPS64Compat::loadFloat32(const Address& address, FloatRegister dest)
1117 {
1118     ma_ls(dest, address);
1119 }
1120 
1121 void
loadFloat32(const BaseIndex & src,FloatRegister dest)1122 MacroAssemblerMIPS64Compat::loadFloat32(const BaseIndex& src, FloatRegister dest)
1123 {
1124     computeScaledAddress(src, SecondScratchReg);
1125     ma_ls(dest, Address(SecondScratchReg, src.offset));
1126 }
1127 
1128 void
store8(Imm32 imm,const Address & address)1129 MacroAssemblerMIPS64Compat::store8(Imm32 imm, const Address& address)
1130 {
1131     ma_li(SecondScratchReg, imm);
1132     ma_store(SecondScratchReg, address, SizeByte);
1133 }
1134 
1135 void
store8(Register src,const Address & address)1136 MacroAssemblerMIPS64Compat::store8(Register src, const Address& address)
1137 {
1138     ma_store(src, address, SizeByte);
1139 }
1140 
1141 void
store8(Imm32 imm,const BaseIndex & dest)1142 MacroAssemblerMIPS64Compat::store8(Imm32 imm, const BaseIndex& dest)
1143 {
1144     ma_store(imm, dest, SizeByte);
1145 }
1146 
1147 void
store8(Register src,const BaseIndex & dest)1148 MacroAssemblerMIPS64Compat::store8(Register src, const BaseIndex& dest)
1149 {
1150     ma_store(src, dest, SizeByte);
1151 }
1152 
1153 void
store16(Imm32 imm,const Address & address)1154 MacroAssemblerMIPS64Compat::store16(Imm32 imm, const Address& address)
1155 {
1156     ma_li(SecondScratchReg, imm);
1157     ma_store(SecondScratchReg, address, SizeHalfWord);
1158 }
1159 
1160 void
store16(Register src,const Address & address)1161 MacroAssemblerMIPS64Compat::store16(Register src, const Address& address)
1162 {
1163     ma_store(src, address, SizeHalfWord);
1164 }
1165 
1166 void
store16(Imm32 imm,const BaseIndex & dest)1167 MacroAssemblerMIPS64Compat::store16(Imm32 imm, const BaseIndex& dest)
1168 {
1169     ma_store(imm, dest, SizeHalfWord);
1170 }
1171 
1172 void
store16(Register src,const BaseIndex & address)1173 MacroAssemblerMIPS64Compat::store16(Register src, const BaseIndex& address)
1174 {
1175     ma_store(src, address, SizeHalfWord);
1176 }
1177 
1178 void
store32(Register src,AbsoluteAddress address)1179 MacroAssemblerMIPS64Compat::store32(Register src, AbsoluteAddress address)
1180 {
1181     movePtr(ImmPtr(address.addr), ScratchRegister);
1182     store32(src, Address(ScratchRegister, 0));
1183 }
1184 
1185 void
store32(Register src,const Address & address)1186 MacroAssemblerMIPS64Compat::store32(Register src, const Address& address)
1187 {
1188     ma_store(src, address, SizeWord);
1189 }
1190 
1191 void
store32(Imm32 src,const Address & address)1192 MacroAssemblerMIPS64Compat::store32(Imm32 src, const Address& address)
1193 {
1194     move32(src, SecondScratchReg);
1195     ma_store(SecondScratchReg, address, SizeWord);
1196 }
1197 
1198 void
store32(Imm32 imm,const BaseIndex & dest)1199 MacroAssemblerMIPS64Compat::store32(Imm32 imm, const BaseIndex& dest)
1200 {
1201     ma_store(imm, dest, SizeWord);
1202 }
1203 
1204 void
store32(Register src,const BaseIndex & dest)1205 MacroAssemblerMIPS64Compat::store32(Register src, const BaseIndex& dest)
1206 {
1207     ma_store(src, dest, SizeWord);
1208 }
1209 
1210 template <typename T>
1211 void
storePtr(ImmWord imm,T address)1212 MacroAssemblerMIPS64Compat::storePtr(ImmWord imm, T address)
1213 {
1214     ma_li(SecondScratchReg, imm);
1215     ma_store(SecondScratchReg, address, SizeDouble);
1216 }
1217 
1218 template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmWord imm, Address address);
1219 template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(ImmWord imm, BaseIndex address);
1220 
1221 template <typename T>
1222 void
storePtr(ImmPtr imm,T address)1223 MacroAssemblerMIPS64Compat::storePtr(ImmPtr imm, T address)
1224 {
1225     storePtr(ImmWord(uintptr_t(imm.value)), address);
1226 }
1227 
1228 template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmPtr imm, Address address);
1229 template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(ImmPtr imm, BaseIndex address);
1230 
1231 template <typename T>
1232 void
storePtr(ImmGCPtr imm,T address)1233 MacroAssemblerMIPS64Compat::storePtr(ImmGCPtr imm, T address)
1234 {
1235     storePtr(ImmWord(uintptr_t(imm.value)), address);
1236 }
1237 
1238 template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmGCPtr imm, Address address);
1239 template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(ImmGCPtr imm, BaseIndex address);
1240 
1241 void
storePtr(Register src,const Address & address)1242 MacroAssemblerMIPS64Compat::storePtr(Register src, const Address& address)
1243 {
1244     ma_store(src, address, SizeDouble);
1245 }
1246 
1247 void
storePtr(Register src,const BaseIndex & address)1248 MacroAssemblerMIPS64Compat::storePtr(Register src, const BaseIndex& address)
1249 {
1250     ma_store(src, address, SizeDouble);
1251 }
1252 
1253 void
storePtr(Register src,AbsoluteAddress dest)1254 MacroAssemblerMIPS64Compat::storePtr(Register src, AbsoluteAddress dest)
1255 {
1256     movePtr(ImmPtr(dest.addr), ScratchRegister);
1257     storePtr(src, Address(ScratchRegister, 0));
1258 }
1259 
1260 void
clampIntToUint8(Register reg)1261 MacroAssemblerMIPS64Compat::clampIntToUint8(Register reg)
1262 {
1263     // look at (reg >> 8) if it is 0, then src shouldn't be clamped
1264     // if it is <0, then we want to clamp to 0,
1265     // otherwise, we wish to clamp to 255
1266     Label done;
1267     ma_move(ScratchRegister, reg);
1268     asMasm().rshiftPtrArithmetic(Imm32(8), ScratchRegister);
1269     ma_b(ScratchRegister, ScratchRegister, &done, Assembler::Zero, ShortJump);
1270     {
1271         Label negative;
1272         ma_b(ScratchRegister, ScratchRegister, &negative, Assembler::Signed, ShortJump);
1273         {
1274             ma_li(reg, Imm32(255));
1275             ma_b(&done, ShortJump);
1276         }
1277         bind(&negative);
1278         {
1279             ma_move(reg, zero);
1280         }
1281     }
1282     bind(&done);
1283 }
1284 
1285 // Note: this function clobbers the input register.
1286 void
clampDoubleToUint8(FloatRegister input,Register output)1287 MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
1288 {
1289     MOZ_ASSERT(input != ScratchDoubleReg);
1290     Label positive, done;
1291 
1292     // <= 0 or NaN --> 0
1293     zeroDouble(ScratchDoubleReg);
1294     branchDouble(DoubleGreaterThan, input, ScratchDoubleReg, &positive);
1295     {
1296         move32(Imm32(0), output);
1297         jump(&done);
1298     }
1299 
1300     bind(&positive);
1301 
1302     // Add 0.5 and truncate.
1303     loadConstantDouble(0.5, ScratchDoubleReg);
1304     addDouble(ScratchDoubleReg, input);
1305 
1306     Label outOfRange;
1307 
1308     branchTruncateDouble(input, output, &outOfRange);
1309     branch32(Assembler::Above, output, Imm32(255), &outOfRange);
1310     {
1311         // Check if we had a tie.
1312         convertInt32ToDouble(output, ScratchDoubleReg);
1313         branchDouble(DoubleNotEqual, input, ScratchDoubleReg, &done);
1314 
1315         // It was a tie. Mask out the ones bit to get an even value.
1316         // See also js_TypedArray_uint8_clamp_double.
1317         and32(Imm32(~1), output);
1318         jump(&done);
1319     }
1320 
1321     // > 255 --> 255
1322     bind(&outOfRange);
1323     {
1324         move32(Imm32(255), output);
1325     }
1326 
1327     bind(&done);
1328 }
1329 
1330 void
subPtr(Imm32 imm,const Register dest)1331 MacroAssemblerMIPS64Compat::subPtr(Imm32 imm, const Register dest)
1332 {
1333     ma_dsubu(dest, dest, imm);
1334 }
1335 
1336 void
subPtr(const Address & addr,const Register dest)1337 MacroAssemblerMIPS64Compat::subPtr(const Address& addr, const Register dest)
1338 {
1339     loadPtr(addr, SecondScratchReg);
1340     subPtr(SecondScratchReg, dest);
1341 }
1342 
1343 void
subPtr(Register src,const Address & dest)1344 MacroAssemblerMIPS64Compat::subPtr(Register src, const Address& dest)
1345 {
1346     loadPtr(dest, SecondScratchReg);
1347     subPtr(src, SecondScratchReg);
1348     storePtr(SecondScratchReg, dest);
1349 }
1350 
1351 void
addPtr(Imm32 imm,const Register dest)1352 MacroAssemblerMIPS64Compat::addPtr(Imm32 imm, const Register dest)
1353 {
1354     ma_daddu(dest, imm);
1355 }
1356 
1357 void
addPtr(Imm32 imm,const Address & dest)1358 MacroAssemblerMIPS64Compat::addPtr(Imm32 imm, const Address& dest)
1359 {
1360     loadPtr(dest, ScratchRegister);
1361     addPtr(imm, ScratchRegister);
1362     storePtr(ScratchRegister, dest);
1363 }
1364 
1365 void
branchDouble(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs,Label * label)1366 MacroAssemblerMIPS64Compat::branchDouble(DoubleCondition cond, FloatRegister lhs,
1367                                          FloatRegister rhs, Label* label)
1368 {
1369     ma_bc1d(lhs, rhs, label, cond);
1370 }
1371 
1372 void
branchFloat(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs,Label * label)1373 MacroAssemblerMIPS64Compat::branchFloat(DoubleCondition cond, FloatRegister lhs,
1374                                         FloatRegister rhs, Label* label)
1375 {
1376     ma_bc1s(lhs, rhs, label, cond);
1377 }
1378 
1379 void
branchTestGCThing(Condition cond,const Address & address,Label * label)1380 MacroAssemblerMIPS64Compat::branchTestGCThing(Condition cond, const Address& address, Label* label)
1381 {
1382     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1383     extractTag(address, SecondScratchReg);
1384     ma_b(SecondScratchReg, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
1385          (cond == Equal) ? AboveOrEqual : Below);
1386 }
1387 void
branchTestGCThing(Condition cond,const BaseIndex & src,Label * label)1388 MacroAssemblerMIPS64Compat::branchTestGCThing(Condition cond, const BaseIndex& src, Label* label)
1389 {
1390     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1391     extractTag(src, SecondScratchReg);
1392     ma_b(SecondScratchReg, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
1393          (cond == Equal) ? AboveOrEqual : Below);
1394 }
1395 
1396 void
branchTestPrimitive(Condition cond,const ValueOperand & value,Label * label)1397 MacroAssemblerMIPS64Compat::branchTestPrimitive(Condition cond, const ValueOperand& value,
1398                                                 Label* label)
1399 {
1400     splitTag(value, SecondScratchReg);
1401     branchTestPrimitive(cond, SecondScratchReg, label);
1402 }
1403 void
branchTestPrimitive(Condition cond,Register tag,Label * label)1404 MacroAssemblerMIPS64Compat::branchTestPrimitive(Condition cond, Register tag, Label* label)
1405 {
1406     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1407     ma_b(tag, ImmTag(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET), label,
1408          (cond == Equal) ? Below : AboveOrEqual);
1409 }
1410 
1411 void
branchTestInt32(Condition cond,const ValueOperand & value,Label * label)1412 MacroAssemblerMIPS64Compat::branchTestInt32(Condition cond, const ValueOperand& value, Label* label)
1413 {
1414     MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
1415     splitTag(value, SecondScratchReg);
1416     ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_INT32), label, cond);
1417 }
1418 
1419 void
branchTestInt32(Condition cond,Register tag,Label * label)1420 MacroAssemblerMIPS64Compat::branchTestInt32(Condition cond, Register tag, Label* label)
1421 {
1422     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1423     ma_b(tag, ImmTag(JSVAL_TAG_INT32), label, cond);
1424 }
1425 
1426 void
branchTestInt32(Condition cond,const Address & address,Label * label)1427 MacroAssemblerMIPS64Compat::branchTestInt32(Condition cond, const Address& address, Label* label)
1428 {
1429     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1430     extractTag(address, SecondScratchReg);
1431     ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_INT32), label, cond);
1432 }
1433 
1434 void
branchTestInt32(Condition cond,const BaseIndex & src,Label * label)1435 MacroAssemblerMIPS64Compat::branchTestInt32(Condition cond, const BaseIndex& src, Label* label)
1436 {
1437     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1438     extractTag(src, SecondScratchReg);
1439     ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_INT32), label, cond);
1440 }
1441 
1442 void
branchTestBoolean(Condition cond,const ValueOperand & value,Label * label)1443 MacroAssemblerMIPS64Compat:: branchTestBoolean(Condition cond, const ValueOperand& value,
1444                                                Label* label)
1445 {
1446     MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
1447     splitTag(value, SecondScratchReg);
1448     ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_BOOLEAN), label, cond);
1449 }
1450 
1451 void
branchTestBoolean(Condition cond,Register tag,Label * label)1452 MacroAssemblerMIPS64Compat:: branchTestBoolean(Condition cond, Register tag, Label* label)
1453 {
1454     MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
1455     ma_b(tag, ImmTag(JSVAL_TAG_BOOLEAN), label, cond);
1456 }
1457 
1458 void
branchTestBoolean(Condition cond,const Address & address,Label * label)1459 MacroAssemblerMIPS64Compat::branchTestBoolean(Condition cond, const Address& address, Label* label)
1460 {
1461     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1462     extractTag(address, SecondScratchReg);
1463     ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_BOOLEAN), label, cond);
1464 }
1465 
1466 void
branchTestBoolean(Condition cond,const BaseIndex & src,Label * label)1467 MacroAssemblerMIPS64Compat::branchTestBoolean(Condition cond, const BaseIndex& src, Label* label)
1468 {
1469     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1470     extractTag(src, SecondScratchReg);
1471     ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_BOOLEAN), label, cond);
1472 }
1473 
1474 void
branchTestDouble(Condition cond,const ValueOperand & value,Label * label)1475 MacroAssemblerMIPS64Compat::branchTestDouble(Condition cond, const ValueOperand& value, Label* label)
1476 {
1477     MOZ_ASSERT(cond == Assembler::Equal || cond == NotEqual);
1478     splitTag(value, SecondScratchReg);
1479     branchTestDouble(cond, SecondScratchReg, label);
1480 }
1481 
1482 void
branchTestDouble(Condition cond,Register tag,Label * label)1483 MacroAssemblerMIPS64Compat::branchTestDouble(Condition cond, Register tag, Label* label)
1484 {
1485     MOZ_ASSERT(cond == Assembler::Equal || cond == NotEqual);
1486     Condition actual = (cond == Equal) ? BelowOrEqual : Above;
1487     ma_b(tag, ImmTag(JSVAL_TAG_MAX_DOUBLE), label, actual);
1488 }
1489 
1490 void
branchTestDouble(Condition cond,const Address & address,Label * label)1491 MacroAssemblerMIPS64Compat::branchTestDouble(Condition cond, const Address& address, Label* label)
1492 {
1493     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1494     extractTag(address, SecondScratchReg);
1495     branchTestDouble(cond, SecondScratchReg, label);
1496 }
1497 
1498 void
branchTestDouble(Condition cond,const BaseIndex & src,Label * label)1499 MacroAssemblerMIPS64Compat::branchTestDouble(Condition cond, const BaseIndex& src, Label* label)
1500 {
1501     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1502     extractTag(src, SecondScratchReg);
1503     branchTestDouble(cond, SecondScratchReg, label);
1504 }
1505 
1506 void
branchTestNull(Condition cond,const ValueOperand & value,Label * label)1507 MacroAssemblerMIPS64Compat::branchTestNull(Condition cond, const ValueOperand& value, Label* label)
1508 {
1509     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1510     splitTag(value, SecondScratchReg);
1511     ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_NULL), label, cond);
1512 }
1513 
1514 void
branchTestNull(Condition cond,Register tag,Label * label)1515 MacroAssemblerMIPS64Compat::branchTestNull(Condition cond, Register tag, Label* label)
1516 {
1517     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1518     ma_b(tag, ImmTag(JSVAL_TAG_NULL), label, cond);
1519 }
1520 
1521 void
branchTestNull(Condition cond,const BaseIndex & src,Label * label)1522 MacroAssemblerMIPS64Compat::branchTestNull(Condition cond, const BaseIndex& src, Label* label)
1523 {
1524     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1525     extractTag(src, SecondScratchReg);
1526     ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_NULL), label, cond);
1527 }
1528 
1529 void
branchTestNull(Condition cond,const Address & address,Label * label)1530 MacroAssemblerMIPS64Compat::branchTestNull(Condition cond, const Address& address, Label* label) {
1531     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1532     extractTag(address, SecondScratchReg);
1533     ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_NULL), label, cond);
1534 }
1535 
1536 void
testNullSet(Condition cond,const ValueOperand & value,Register dest)1537 MacroAssemblerMIPS64Compat::testNullSet(Condition cond, const ValueOperand& value, Register dest)
1538 {
1539     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1540     splitTag(value, SecondScratchReg);
1541     ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_NULL), cond);
1542 }
1543 
1544 void
branchTestObject(Condition cond,const ValueOperand & value,Label * label)1545 MacroAssemblerMIPS64Compat::branchTestObject(Condition cond, const ValueOperand& value, Label* label)
1546 {
1547     splitTag(value, SecondScratchReg);
1548     branchTestObject(cond, SecondScratchReg, label);
1549 }
1550 
1551 void
branchTestObject(Condition cond,Register tag,Label * label)1552 MacroAssemblerMIPS64Compat::branchTestObject(Condition cond, Register tag, Label* label)
1553 {
1554     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1555     ma_b(tag, ImmTag(JSVAL_TAG_OBJECT), label, cond);
1556 }
1557 
1558 void
branchTestObject(Condition cond,const BaseIndex & src,Label * label)1559 MacroAssemblerMIPS64Compat::branchTestObject(Condition cond, const BaseIndex& src, Label* label)
1560 {
1561     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1562     extractTag(src, SecondScratchReg);
1563     ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), label, cond);
1564 }
1565 
1566 void
branchTestObject(Condition cond,const Address & address,Label * label)1567 MacroAssemblerMIPS64Compat::branchTestObject(Condition cond, const Address& address, Label* label)
1568 {
1569     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1570     extractTag(address, SecondScratchReg);
1571     ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), label, cond);
1572 }
1573 
1574 void
testObjectSet(Condition cond,const ValueOperand & value,Register dest)1575 MacroAssemblerMIPS64Compat::testObjectSet(Condition cond, const ValueOperand& value, Register dest)
1576 {
1577     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1578     splitTag(value, SecondScratchReg);
1579     ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), cond);
1580 }
1581 
1582 void
branchTestString(Condition cond,const ValueOperand & value,Label * label)1583 MacroAssemblerMIPS64Compat::branchTestString(Condition cond, const ValueOperand& value, Label* label)
1584 {
1585     splitTag(value, SecondScratchReg);
1586     branchTestString(cond, SecondScratchReg, label);
1587 }
1588 
1589 void
branchTestString(Condition cond,Register tag,Label * label)1590 MacroAssemblerMIPS64Compat::branchTestString(Condition cond, Register tag, Label* label)
1591 {
1592     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1593     ma_b(tag, ImmTag(JSVAL_TAG_STRING), label, cond);
1594 }
1595 
1596 void
branchTestString(Condition cond,const BaseIndex & src,Label * label)1597 MacroAssemblerMIPS64Compat::branchTestString(Condition cond, const BaseIndex& src, Label* label)
1598 {
1599     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1600     extractTag(src, SecondScratchReg);
1601     ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_STRING), label, cond);
1602 }
1603 
1604 void
branchTestSymbol(Condition cond,const ValueOperand & value,Label * label)1605 MacroAssemblerMIPS64Compat::branchTestSymbol(Condition cond, const ValueOperand& value, Label* label)
1606 {
1607     splitTag(value, SecondScratchReg);
1608     branchTestSymbol(cond, SecondScratchReg, label);
1609 }
1610 
1611 void
branchTestSymbol(Condition cond,const Register & tag,Label * label)1612 MacroAssemblerMIPS64Compat::branchTestSymbol(Condition cond, const Register& tag, Label* label)
1613 {
1614     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1615     ma_b(tag, ImmTag(JSVAL_TAG_SYMBOL), label, cond);
1616 }
1617 
1618 void
branchTestSymbol(Condition cond,const BaseIndex & src,Label * label)1619 MacroAssemblerMIPS64Compat::branchTestSymbol(Condition cond, const BaseIndex& src, Label* label)
1620 {
1621     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1622     extractTag(src, SecondScratchReg);
1623     ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_SYMBOL), label, cond);
1624 }
1625 
1626 void
branchTestUndefined(Condition cond,const ValueOperand & value,Label * label)1627 MacroAssemblerMIPS64Compat::branchTestUndefined(Condition cond, const ValueOperand& value,
1628                                                 Label* label)
1629 {
1630     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1631     splitTag(value, SecondScratchReg);
1632     ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
1633 }
1634 
1635 void
branchTestUndefined(Condition cond,Register tag,Label * label)1636 MacroAssemblerMIPS64Compat::branchTestUndefined(Condition cond, Register tag, Label* label)
1637 {
1638     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1639     ma_b(tag, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
1640 }
1641 
1642 void
branchTestUndefined(Condition cond,const BaseIndex & src,Label * label)1643 MacroAssemblerMIPS64Compat::branchTestUndefined(Condition cond, const BaseIndex& src, Label* label)
1644 {
1645     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1646     extractTag(src, SecondScratchReg);
1647     ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
1648 }
1649 
1650 void
branchTestUndefined(Condition cond,const Address & address,Label * label)1651 MacroAssemblerMIPS64Compat::branchTestUndefined(Condition cond, const Address& address, Label* label)
1652 {
1653     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1654     extractTag(address, SecondScratchReg);
1655     ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
1656 }
1657 
1658 void
testUndefinedSet(Condition cond,const ValueOperand & value,Register dest)1659 MacroAssemblerMIPS64Compat::testUndefinedSet(Condition cond, const ValueOperand& value, Register dest)
1660 {
1661     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1662     splitTag(value, SecondScratchReg);
1663     ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), cond);
1664 }
1665 
1666 void
branchTestNumber(Condition cond,const ValueOperand & value,Label * label)1667 MacroAssemblerMIPS64Compat::branchTestNumber(Condition cond, const ValueOperand& value, Label* label)
1668 {
1669     splitTag(value, SecondScratchReg);
1670     branchTestNumber(cond, SecondScratchReg, label);
1671 }
1672 
1673 void
branchTestNumber(Condition cond,Register tag,Label * label)1674 MacroAssemblerMIPS64Compat::branchTestNumber(Condition cond, Register tag, Label* label)
1675 {
1676     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1677     ma_b(tag, ImmTag(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET), label,
1678          cond == Equal ? BelowOrEqual : Above);
1679 }
1680 
1681 void
branchTestMagic(Condition cond,const ValueOperand & value,Label * label)1682 MacroAssemblerMIPS64Compat::branchTestMagic(Condition cond, const ValueOperand& value, Label* label)
1683 {
1684     splitTag(value, SecondScratchReg);
1685     branchTestMagic(cond, SecondScratchReg, label);
1686 }
1687 
1688 void
branchTestMagic(Condition cond,Register tag,Label * label)1689 MacroAssemblerMIPS64Compat::branchTestMagic(Condition cond, Register tag, Label* label)
1690 {
1691     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1692     ma_b(tag, ImmTag(JSVAL_TAG_MAGIC), label, cond);
1693 }
1694 
1695 void
branchTestMagic(Condition cond,const Address & address,Label * label)1696 MacroAssemblerMIPS64Compat::branchTestMagic(Condition cond, const Address& address, Label* label)
1697 {
1698     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1699     extractTag(address, SecondScratchReg);
1700     ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_MAGIC), label, cond);
1701 }
1702 
1703 void
branchTestMagic(Condition cond,const BaseIndex & src,Label * label)1704 MacroAssemblerMIPS64Compat::branchTestMagic(Condition cond, const BaseIndex& src, Label* label)
1705 {
1706     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1707     extractTag(src, SecondScratchReg);
1708     ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_MAGIC), label, cond);
1709 }
1710 
1711 void
branchTestValue(Condition cond,const ValueOperand & value,const Value & v,Label * label)1712 MacroAssemblerMIPS64Compat::branchTestValue(Condition cond, const ValueOperand& value,
1713                                             const Value& v, Label* label)
1714 {
1715     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1716     moveValue(v, ScratchRegister);
1717     ma_b(value.valueReg(), ScratchRegister, label, cond);
1718 }
1719 
1720 void
branchTestValue(Condition cond,const Address & valaddr,const ValueOperand & value,Label * label)1721 MacroAssemblerMIPS64Compat::branchTestValue(Condition cond, const Address& valaddr,
1722                                             const ValueOperand& value, Label* label)
1723 {
1724     MOZ_ASSERT(cond == Equal || cond == NotEqual);
1725     loadPtr(Address(valaddr.base, valaddr.offset), ScratchRegister);
1726     ma_b(value.valueReg(), ScratchRegister, label, cond);
1727 }
1728 
1729 // unboxing code
1730 void
unboxNonDouble(const ValueOperand & operand,Register dest)1731 MacroAssemblerMIPS64Compat::unboxNonDouble(const ValueOperand& operand, Register dest)
1732 {
1733     ma_dext(dest, operand.valueReg(), Imm32(0), Imm32(JSVAL_TAG_SHIFT));
1734 }
1735 
1736 void
unboxNonDouble(const Address & src,Register dest)1737 MacroAssemblerMIPS64Compat::unboxNonDouble(const Address& src, Register dest)
1738 {
1739     loadPtr(Address(src.base, src.offset), dest);
1740     ma_dext(dest, dest, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
1741 }
1742 
1743 void
unboxNonDouble(const BaseIndex & src,Register dest)1744 MacroAssemblerMIPS64Compat::unboxNonDouble(const BaseIndex& src, Register dest)
1745 {
1746     computeScaledAddress(src, SecondScratchReg);
1747     loadPtr(Address(SecondScratchReg, src.offset), dest);
1748     ma_dext(dest, dest, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
1749 }
1750 
1751 void
unboxInt32(const ValueOperand & operand,Register dest)1752 MacroAssemblerMIPS64Compat::unboxInt32(const ValueOperand& operand, Register dest)
1753 {
1754     ma_dsll(dest, operand.valueReg(), Imm32(32));
1755     ma_dsra(dest, dest, Imm32(32));
1756 }
1757 
1758 void
unboxInt32(const Operand & operand,Register dest)1759 MacroAssemblerMIPS64Compat::unboxInt32(const Operand& operand, Register dest)
1760 {
1761     switch(operand.getTag()) {
1762     case Operand::REG:
1763         ma_dsll(dest, operand.toReg(), Imm32(32));
1764         ma_dsra(dest, dest, Imm32(32));
1765         break;
1766     case Operand::MEM:
1767         unboxInt32(operand.toAddress(), dest);
1768         break;
1769     case Operand::FREG:
1770     default:
1771         MOZ_CRASH("unexpected operand kind");
1772         break;
1773     }
1774 }
1775 
1776 void
unboxInt32(const Address & src,Register dest)1777 MacroAssemblerMIPS64Compat::unboxInt32(const Address& src, Register dest)
1778 {
1779     load32(Address(src.base, src.offset), dest);
1780 }
1781 
1782 void
unboxInt32(const BaseIndex & src,Register dest)1783 MacroAssemblerMIPS64Compat::unboxInt32(const BaseIndex& src, Register dest)
1784 {
1785     computeScaledAddress(src, SecondScratchReg);
1786     load32(Address(SecondScratchReg, src.offset), dest);
1787 }
1788 
1789 void
unboxBoolean(const ValueOperand & operand,Register dest)1790 MacroAssemblerMIPS64Compat::unboxBoolean(const ValueOperand& operand, Register dest)
1791 {
1792     ma_dext(dest, operand.valueReg(), Imm32(0), Imm32(32));
1793 }
1794 
1795 void
unboxBoolean(const Operand & operand,Register dest)1796 MacroAssemblerMIPS64Compat::unboxBoolean(const Operand& operand, Register dest)
1797 {
1798     switch(operand.getTag()) {
1799     case Operand::REG:
1800         ma_dext(dest, operand.toReg(), Imm32(0), Imm32(32));
1801         break;
1802     case Operand::MEM:
1803         unboxBoolean(operand.toAddress(), dest);
1804         break;
1805     case Operand::FREG:
1806     default:
1807         MOZ_CRASH("unexpected operand kind");
1808         break;
1809     }
1810 }
1811 
1812 void
unboxBoolean(const Address & src,Register dest)1813 MacroAssemblerMIPS64Compat::unboxBoolean(const Address& src, Register dest)
1814 {
1815     ma_load(dest, Address(src.base, src.offset), SizeWord, ZeroExtend);
1816 }
1817 
1818 void
unboxBoolean(const BaseIndex & src,Register dest)1819 MacroAssemblerMIPS64Compat::unboxBoolean(const BaseIndex& src, Register dest)
1820 {
1821     computeScaledAddress(src, SecondScratchReg);
1822     ma_load(dest, Address(SecondScratchReg, src.offset), SizeWord, ZeroExtend);
1823 }
1824 
1825 void
unboxDouble(const ValueOperand & operand,FloatRegister dest)1826 MacroAssemblerMIPS64Compat::unboxDouble(const ValueOperand& operand, FloatRegister dest)
1827 {
1828     as_dmtc1(operand.valueReg(), dest);
1829 }
1830 
1831 void
unboxDouble(const Address & src,FloatRegister dest)1832 MacroAssemblerMIPS64Compat::unboxDouble(const Address& src, FloatRegister dest)
1833 {
1834     ma_ld(dest, Address(src.base, src.offset));
1835 }
1836 
1837 void
unboxString(const ValueOperand & operand,Register dest)1838 MacroAssemblerMIPS64Compat::unboxString(const ValueOperand& operand, Register dest)
1839 {
1840     unboxNonDouble(operand, dest);
1841 }
1842 
1843 void
unboxString(const Operand & operand,Register dest)1844 MacroAssemblerMIPS64Compat::unboxString(const Operand& operand, Register dest)
1845 {
1846     switch(operand.getTag()) {
1847     case Operand::REG:
1848         ma_dext(dest, operand.toReg(), Imm32(0), Imm32(JSVAL_TAG_SHIFT));
1849         break;
1850     case Operand::MEM:
1851         unboxNonDouble(operand.toAddress(), dest);
1852         break;
1853     case Operand::FREG:
1854     default:
1855         MOZ_CRASH("unexpected operand kind");
1856         break;
1857     }
1858 }
1859 
1860 void
unboxString(const Address & src,Register dest)1861 MacroAssemblerMIPS64Compat::unboxString(const Address& src, Register dest)
1862 {
1863     unboxNonDouble(src, dest);
1864 }
1865 
1866 void
unboxSymbol(const Operand & operand,Register dest)1867 MacroAssemblerMIPS64Compat::unboxSymbol(const Operand& operand, Register dest)
1868 {
1869     switch(operand.getTag()) {
1870     case Operand::REG:
1871         ma_dext(dest, operand.toReg(), Imm32(0), Imm32(JSVAL_TAG_SHIFT));
1872         break;
1873     case Operand::MEM:
1874         unboxNonDouble(operand.toAddress(), dest);
1875         break;
1876     case Operand::FREG:
1877     default:
1878         MOZ_CRASH("unexpected operand kind");
1879         break;
1880     }
1881 }
1882 
1883 void
unboxSymbol(const Address & src,Register dest)1884 MacroAssemblerMIPS64Compat::unboxSymbol(const Address& src, Register dest)
1885 {
1886     unboxNonDouble(src, dest);
1887 }
1888 
1889 void
unboxObject(const ValueOperand & src,Register dest)1890 MacroAssemblerMIPS64Compat::unboxObject(const ValueOperand& src, Register dest)
1891 {
1892     unboxNonDouble(src, dest);
1893 }
1894 
1895 void
unboxObject(const Operand & src,Register dest)1896 MacroAssemblerMIPS64Compat::unboxObject(const Operand& src, Register dest)
1897 {
1898     switch(src.getTag()) {
1899     case Operand::REG:
1900         ma_dext(dest, src.toReg(), Imm32(0), Imm32(JSVAL_TAG_SHIFT));
1901         break;
1902     case Operand::MEM:
1903         unboxNonDouble(src.toAddress(), dest);
1904         break;
1905     case Operand::FREG:
1906     default:
1907         MOZ_CRASH("unexpected operand kind");
1908         break;
1909     }
1910 }
1911 
1912 void
unboxObject(const Address & src,Register dest)1913 MacroAssemblerMIPS64Compat::unboxObject(const Address& src, Register dest)
1914 {
1915     unboxNonDouble(src, dest);
1916 }
1917 
1918 void
unboxValue(const ValueOperand & src,AnyRegister dest)1919 MacroAssemblerMIPS64Compat::unboxValue(const ValueOperand& src, AnyRegister dest)
1920 {
1921     if (dest.isFloat()) {
1922         Label notInt32, end;
1923         branchTestInt32(Assembler::NotEqual, src, &notInt32);
1924         convertInt32ToDouble(src.valueReg(), dest.fpu());
1925         ma_b(&end, ShortJump);
1926         bind(&notInt32);
1927         unboxDouble(src, dest.fpu());
1928         bind(&end);
1929     } else {
1930         unboxNonDouble(src, dest.gpr());
1931     }
1932 }
1933 
1934 void
unboxPrivate(const ValueOperand & src,Register dest)1935 MacroAssemblerMIPS64Compat::unboxPrivate(const ValueOperand& src, Register dest)
1936 {
1937     ma_dsrl(dest, src.valueReg(), Imm32(1));
1938 }
1939 
1940 void
boxDouble(FloatRegister src,const ValueOperand & dest)1941 MacroAssemblerMIPS64Compat::boxDouble(FloatRegister src, const ValueOperand& dest)
1942 {
1943     as_dmfc1(dest.valueReg(), src);
1944 }
1945 
1946 void
boxNonDouble(JSValueType type,Register src,const ValueOperand & dest)1947 MacroAssemblerMIPS64Compat::boxNonDouble(JSValueType type, Register src,
1948                                          const ValueOperand& dest)
1949 {
1950     MOZ_ASSERT(src != dest.valueReg());
1951     boxValue(type, src, dest.valueReg());
1952 }
1953 
1954 void
boolValueToDouble(const ValueOperand & operand,FloatRegister dest)1955 MacroAssemblerMIPS64Compat::boolValueToDouble(const ValueOperand& operand, FloatRegister dest)
1956 {
1957     convertBoolToInt32(operand.valueReg(), ScratchRegister);
1958     convertInt32ToDouble(ScratchRegister, dest);
1959 }
1960 
1961 void
int32ValueToDouble(const ValueOperand & operand,FloatRegister dest)1962 MacroAssemblerMIPS64Compat::int32ValueToDouble(const ValueOperand& operand,
1963                                                FloatRegister dest)
1964 {
1965     convertInt32ToDouble(operand.valueReg(), dest);
1966 }
1967 
1968 void
boolValueToFloat32(const ValueOperand & operand,FloatRegister dest)1969 MacroAssemblerMIPS64Compat::boolValueToFloat32(const ValueOperand& operand,
1970                                                FloatRegister dest)
1971 {
1972 
1973     convertBoolToInt32(operand.valueReg(), ScratchRegister);
1974     convertInt32ToFloat32(ScratchRegister, dest);
1975 }
1976 
1977 void
int32ValueToFloat32(const ValueOperand & operand,FloatRegister dest)1978 MacroAssemblerMIPS64Compat::int32ValueToFloat32(const ValueOperand& operand,
1979                                                 FloatRegister dest)
1980 {
1981     convertInt32ToFloat32(operand.valueReg(), dest);
1982 }
1983 
1984 void
loadConstantFloat32(float f,FloatRegister dest)1985 MacroAssemblerMIPS64Compat::loadConstantFloat32(float f, FloatRegister dest)
1986 {
1987     ma_lis(dest, f);
1988 }
1989 
1990 void
loadInt32OrDouble(const Address & src,FloatRegister dest)1991 MacroAssemblerMIPS64Compat::loadInt32OrDouble(const Address& src, FloatRegister dest)
1992 {
1993     Label notInt32, end;
1994     // If it's an int, convert it to double.
1995     loadPtr(Address(src.base, src.offset), ScratchRegister);
1996     ma_dsrl(SecondScratchReg, ScratchRegister, Imm32(JSVAL_TAG_SHIFT));
1997     branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
1998     loadPtr(Address(src.base, src.offset), SecondScratchReg);
1999     convertInt32ToDouble(SecondScratchReg, dest);
2000     ma_b(&end, ShortJump);
2001 
2002     // Not an int, just load as double.
2003     bind(&notInt32);
2004     ma_ld(dest, src);
2005     bind(&end);
2006 }
2007 
2008 void
loadInt32OrDouble(const BaseIndex & addr,FloatRegister dest)2009 MacroAssemblerMIPS64Compat::loadInt32OrDouble(const BaseIndex& addr, FloatRegister dest)
2010 {
2011     Label notInt32, end;
2012 
2013     // If it's an int, convert it to double.
2014     computeScaledAddress(addr, SecondScratchReg);
2015     // Since we only have one scratch, we need to stomp over it with the tag.
2016     loadPtr(Address(SecondScratchReg, 0), ScratchRegister);
2017     ma_dsrl(SecondScratchReg, ScratchRegister, Imm32(JSVAL_TAG_SHIFT));
2018     branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
2019 
2020     computeScaledAddress(addr, SecondScratchReg);
2021     loadPtr(Address(SecondScratchReg, 0), SecondScratchReg);
2022     convertInt32ToDouble(SecondScratchReg, dest);
2023     ma_b(&end, ShortJump);
2024 
2025     // Not an int, just load as double.
2026     bind(&notInt32);
2027     // First, recompute the offset that had been stored in the scratch register
2028     // since the scratch register was overwritten loading in the type.
2029     computeScaledAddress(addr, SecondScratchReg);
2030     loadDouble(Address(SecondScratchReg, 0), dest);
2031     bind(&end);
2032 }
2033 
2034 void
loadConstantDouble(double dp,FloatRegister dest)2035 MacroAssemblerMIPS64Compat::loadConstantDouble(double dp, FloatRegister dest)
2036 {
2037     ma_lid(dest, dp);
2038 }
2039 
2040 void
branchTestInt32Truthy(bool b,const ValueOperand & value,Label * label)2041 MacroAssemblerMIPS64Compat::branchTestInt32Truthy(bool b, const ValueOperand& value, Label* label)
2042 {
2043     ma_dext(ScratchRegister, value.valueReg(), Imm32(0), Imm32(32));
2044     ma_b(ScratchRegister, ScratchRegister, label, b ? NonZero : Zero);
2045 }
2046 
2047 void
branchTestStringTruthy(bool b,const ValueOperand & value,Label * label)2048 MacroAssemblerMIPS64Compat::branchTestStringTruthy(bool b, const ValueOperand& value, Label* label)
2049 {
2050     unboxString(value, SecondScratchReg);
2051     load32(Address(SecondScratchReg, JSString::offsetOfLength()), SecondScratchReg);
2052     ma_b(SecondScratchReg, Imm32(0), label, b ? NotEqual : Equal);
2053 }
2054 
2055 void
branchTestDoubleTruthy(bool b,FloatRegister value,Label * label)2056 MacroAssemblerMIPS64Compat::branchTestDoubleTruthy(bool b, FloatRegister value, Label* label)
2057 {
2058     ma_lid(ScratchDoubleReg, 0.0);
2059     DoubleCondition cond = b ? DoubleNotEqual : DoubleEqualOrUnordered;
2060     ma_bc1d(value, ScratchDoubleReg, label, cond);
2061 }
2062 
2063 void
branchTestBooleanTruthy(bool b,const ValueOperand & operand,Label * label)2064 MacroAssemblerMIPS64Compat::branchTestBooleanTruthy(bool b, const ValueOperand& operand,
2065                                                     Label* label)
2066 {
2067     unboxBoolean(operand, SecondScratchReg);
2068     ma_b(SecondScratchReg, SecondScratchReg, label, b ? NonZero : Zero);
2069 }
2070 
2071 Register
extractObject(const Address & address,Register scratch)2072 MacroAssemblerMIPS64Compat::extractObject(const Address& address, Register scratch)
2073 {
2074     loadPtr(Address(address.base, address.offset), scratch);
2075     ma_dext(scratch, scratch, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
2076     return scratch;
2077 }
2078 
2079 Register
extractTag(const Address & address,Register scratch)2080 MacroAssemblerMIPS64Compat::extractTag(const Address& address, Register scratch)
2081 {
2082     loadPtr(Address(address.base, address.offset), scratch);
2083     ma_dext(scratch, scratch, Imm32(JSVAL_TAG_SHIFT), Imm32(64 - JSVAL_TAG_SHIFT));
2084     return scratch;
2085 }
2086 
2087 Register
extractTag(const BaseIndex & address,Register scratch)2088 MacroAssemblerMIPS64Compat::extractTag(const BaseIndex& address, Register scratch)
2089 {
2090     computeScaledAddress(address, scratch);
2091     return extractTag(Address(scratch, address.offset), scratch);
2092 }
2093 
2094 template <typename T>
2095 void
storeUnboxedValue(ConstantOrRegister value,MIRType valueType,const T & dest,MIRType slotType)2096 MacroAssemblerMIPS64Compat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const T& dest,
2097                                               MIRType slotType)
2098 {
2099     if (valueType == MIRType_Double) {
2100         storeDouble(value.reg().typedReg().fpu(), dest);
2101         return;
2102     }
2103 
2104     // For known integers and booleans, we can just store the unboxed value if
2105     // the slot has the same type.
2106     if ((valueType == MIRType_Int32 || valueType == MIRType_Boolean) && slotType == valueType) {
2107         if (value.constant()) {
2108             Value val = value.value();
2109             if (valueType == MIRType_Int32)
2110                 store32(Imm32(val.toInt32()), dest);
2111             else
2112                 store32(Imm32(val.toBoolean() ? 1 : 0), dest);
2113         } else {
2114             store32(value.reg().typedReg().gpr(), dest);
2115         }
2116         return;
2117     }
2118 
2119     if (value.constant())
2120         storeValue(value.value(), dest);
2121     else
2122         storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(), dest);
2123 }
2124 
2125 template void
2126 MacroAssemblerMIPS64Compat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const Address& dest,
2127                                               MIRType slotType);
2128 
2129 template void
2130 MacroAssemblerMIPS64Compat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const BaseIndex& dest,
2131                                               MIRType slotType);
2132 
2133 void
moveValue(const Value & val,Register dest)2134 MacroAssemblerMIPS64Compat::moveValue(const Value& val, Register dest)
2135 {
2136     jsval_layout jv = JSVAL_TO_IMPL(val);
2137     writeDataRelocation(val);
2138     movWithPatch(ImmWord(jv.asBits), dest);
2139 }
2140 
2141 void
moveValue(const Value & val,const ValueOperand & dest)2142 MacroAssemblerMIPS64Compat::moveValue(const Value& val, const ValueOperand& dest)
2143 {
2144     moveValue(val, dest.valueReg());
2145 }
2146 
2147 /* There are 3 paths trough backedge jump. They are listed here in the order
2148  * in which instructions are executed.
2149  *  - The short jump is simple:
2150  *     b offset            # Jumps directly to target.
2151  *     lui at, addr1_hl    # In delay slot. Don't care about 'at' here.
2152  *
2153  *  - The long jump to loop header:
2154  *      b label1
2155  *      lui at, addr1_hl   # In delay slot. We use the value in 'at' later.
2156  *    label1:
2157  *      ori at, addr1_lh
2158  *      drotr32 at, at, 48
2159  *      ori at, addr1_ll
2160  *      jr at
2161  *      lui at, addr2_hl   # In delay slot. Don't care about 'at' here.
2162  *
2163  *  - The long jump to interrupt loop:
2164  *      b label2
2165  *      ...
2166  *      jr at
2167  *    label2:
2168  *      lui at, addr2_hl   # In delay slot. Don't care about 'at' here.
2169  *      ori at, addr2_lh
2170  *      drotr32 at, at, 48
2171  *      ori at, addr2_ll
2172  *      jr at
2173  *      nop                # In delay slot.
2174  *
2175  * The backedge is done this way to avoid patching lui+ori pair while it is
2176  * being executed. Look also at jit::PatchBackedge().
2177  */
2178 CodeOffsetJump
backedgeJump(RepatchLabel * label,Label * documentation)2179 MacroAssemblerMIPS64Compat::backedgeJump(RepatchLabel* label, Label* documentation)
2180 {
2181     // Only one branch per label.
2182     MOZ_ASSERT(!label->used());
2183     uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
2184     BufferOffset bo = nextOffset();
2185     label->use(bo.getOffset());
2186 
2187     // Backedges are short jumps when bound, but can become long when patched.
2188     m_buffer.ensureSpace(16 * sizeof(uint32_t));
2189     if (label->bound()) {
2190         int32_t offset = label->offset() - bo.getOffset();
2191         MOZ_ASSERT(BOffImm16::IsInRange(offset));
2192         as_b(BOffImm16(offset));
2193     } else {
2194         // Jump to "label1" by default to jump to the loop header.
2195         as_b(BOffImm16(2 * sizeof(uint32_t)));
2196     }
2197     // No need for nop here. We can safely put next instruction in delay slot.
2198     ma_liPatchable(ScratchRegister, ImmWord(dest));
2199     MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 5 * sizeof(uint32_t));
2200     as_jr(ScratchRegister);
2201     // No need for nop here. We can safely put next instruction in delay slot.
2202     ma_liPatchable(ScratchRegister, ImmWord(dest));
2203     as_jr(ScratchRegister);
2204     as_nop();
2205     MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 12 * sizeof(uint32_t));
2206     return CodeOffsetJump(bo.getOffset());
2207 }
2208 
2209 CodeOffsetJump
jumpWithPatch(RepatchLabel * label,Label * documentation)2210 MacroAssemblerMIPS64Compat::jumpWithPatch(RepatchLabel* label, Label* documentation)
2211 {
2212     // Only one branch per label.
2213     MOZ_ASSERT(!label->used());
2214     uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
2215 
2216     BufferOffset bo = nextOffset();
2217     label->use(bo.getOffset());
2218     addLongJump(bo);
2219     ma_liPatchable(ScratchRegister, ImmWord(dest));
2220     as_jr(ScratchRegister);
2221     as_nop();
2222     return CodeOffsetJump(bo.getOffset());
2223 }
2224 
2225 /////////////////////////////////////////////////////////////////
2226 // X86/X64-common/ARM/MIPS interface.
2227 /////////////////////////////////////////////////////////////////
2228 void
storeValue(ValueOperand val,Operand dst)2229 MacroAssemblerMIPS64Compat::storeValue(ValueOperand val, Operand dst)
2230 {
2231     storeValue(val, Address(Register::FromCode(dst.base()), dst.disp()));
2232 }
2233 
2234 void
storeValue(ValueOperand val,const BaseIndex & dest)2235 MacroAssemblerMIPS64Compat::storeValue(ValueOperand val, const BaseIndex& dest)
2236 {
2237     computeScaledAddress(dest, SecondScratchReg);
2238     storeValue(val, Address(SecondScratchReg, dest.offset));
2239 }
2240 
2241 void
storeValue(JSValueType type,Register reg,BaseIndex dest)2242 MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg, BaseIndex dest)
2243 {
2244     computeScaledAddress(dest, ScratchRegister);
2245 
2246     int32_t offset = dest.offset;
2247     if (!Imm16::IsInSignedRange(offset)) {
2248         ma_li(SecondScratchReg, Imm32(offset));
2249         as_daddu(ScratchRegister, ScratchRegister, SecondScratchReg);
2250         offset = 0;
2251     }
2252 
2253     storeValue(type, reg, Address(ScratchRegister, offset));
2254 }
2255 
2256 void
storeValue(ValueOperand val,const Address & dest)2257 MacroAssemblerMIPS64Compat::storeValue(ValueOperand val, const Address& dest)
2258 {
2259     storePtr(val.valueReg(), Address(dest.base, dest.offset));
2260 }
2261 
2262 void
storeValue(JSValueType type,Register reg,Address dest)2263 MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg, Address dest)
2264 {
2265     MOZ_ASSERT(dest.base != SecondScratchReg);
2266 
2267     ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
2268     ma_dsll(SecondScratchReg, SecondScratchReg, Imm32(JSVAL_TAG_SHIFT));
2269     ma_dins(SecondScratchReg, reg, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
2270     storePtr(SecondScratchReg, Address(dest.base, dest.offset));
2271 }
2272 
2273 void
storeValue(const Value & val,Address dest)2274 MacroAssemblerMIPS64Compat::storeValue(const Value& val, Address dest)
2275 {
2276     jsval_layout jv = JSVAL_TO_IMPL(val);
2277     if (val.isMarkable()) {
2278         writeDataRelocation(val);
2279         movWithPatch(ImmWord(jv.asBits), SecondScratchReg);
2280     } else {
2281         ma_li(SecondScratchReg, ImmWord(jv.asBits));
2282     }
2283     storePtr(SecondScratchReg, Address(dest.base, dest.offset));
2284 }
2285 
2286 void
storeValue(const Value & val,BaseIndex dest)2287 MacroAssemblerMIPS64Compat::storeValue(const Value& val, BaseIndex dest)
2288 {
2289     computeScaledAddress(dest, ScratchRegister);
2290 
2291     int32_t offset = dest.offset;
2292     if (!Imm16::IsInSignedRange(offset)) {
2293         ma_li(SecondScratchReg, Imm32(offset));
2294         as_daddu(ScratchRegister, ScratchRegister, SecondScratchReg);
2295         offset = 0;
2296     }
2297     storeValue(val, Address(ScratchRegister, offset));
2298 }
2299 
2300 void
loadValue(const BaseIndex & addr,ValueOperand val)2301 MacroAssemblerMIPS64Compat::loadValue(const BaseIndex& addr, ValueOperand val)
2302 {
2303     computeScaledAddress(addr, SecondScratchReg);
2304     loadValue(Address(SecondScratchReg, addr.offset), val);
2305 }
2306 
2307 void
loadValue(Address src,ValueOperand val)2308 MacroAssemblerMIPS64Compat::loadValue(Address src, ValueOperand val)
2309 {
2310     loadPtr(Address(src.base, src.offset), val.valueReg());
2311 }
2312 
2313 void
tagValue(JSValueType type,Register payload,ValueOperand dest)2314 MacroAssemblerMIPS64Compat::tagValue(JSValueType type, Register payload, ValueOperand dest)
2315 {
2316     MOZ_ASSERT(dest.valueReg() != ScratchRegister);
2317     if (payload != dest.valueReg())
2318       ma_move(dest.valueReg(), payload);
2319     ma_li(ScratchRegister, ImmTag(JSVAL_TYPE_TO_TAG(type)));
2320     ma_dins(dest.valueReg(), ScratchRegister, Imm32(JSVAL_TAG_SHIFT), Imm32(64 - JSVAL_TAG_SHIFT));
2321 }
2322 
2323 void
pushValue(ValueOperand val)2324 MacroAssemblerMIPS64Compat::pushValue(ValueOperand val)
2325 {
2326     // Allocate stack slots for Value. One for each.
2327     subPtr(Imm32(sizeof(Value)), StackPointer);
2328     // Store Value
2329     storeValue(val, Address(StackPointer, 0));
2330 }
2331 
2332 void
pushValue(const Address & addr)2333 MacroAssemblerMIPS64Compat::pushValue(const Address& addr)
2334 {
2335     // Load value before allocate stack, addr.base may be is sp.
2336     loadPtr(Address(addr.base, addr.offset), ScratchRegister);
2337     ma_dsubu(StackPointer, StackPointer, Imm32(sizeof(Value)));
2338     storePtr(ScratchRegister, Address(StackPointer, 0));
2339 }
2340 
2341 void
popValue(ValueOperand val)2342 MacroAssemblerMIPS64Compat::popValue(ValueOperand val)
2343 {
2344     as_ld(val.valueReg(), StackPointer, 0);
2345     as_daddiu(StackPointer, StackPointer, sizeof(Value));
2346 }
2347 
2348 void
breakpoint()2349 MacroAssemblerMIPS64Compat::breakpoint()
2350 {
2351     as_break(0);
2352 }
2353 
2354 void
ensureDouble(const ValueOperand & source,FloatRegister dest,Label * failure)2355 MacroAssemblerMIPS64Compat::ensureDouble(const ValueOperand& source, FloatRegister dest,
2356                                          Label* failure)
2357 {
2358     Label isDouble, done;
2359     Register tag = splitTagForTest(source);
2360     branchTestDouble(Assembler::Equal, tag, &isDouble);
2361     branchTestInt32(Assembler::NotEqual, tag, failure);
2362 
2363     unboxInt32(source, ScratchRegister);
2364     convertInt32ToDouble(ScratchRegister, dest);
2365     jump(&done);
2366 
2367     bind(&isDouble);
2368     unboxDouble(source, dest);
2369 
2370     bind(&done);
2371 }
2372 
2373 void
cmpPtrSet(Assembler::Condition cond,Address lhs,ImmPtr rhs,Register dest)2374 MacroAssemblerMIPS64Compat::cmpPtrSet(Assembler::Condition cond, Address lhs, ImmPtr rhs,
2375                                       Register dest)
2376 {
2377     loadPtr(lhs, ScratchRegister);
2378     movePtr(rhs, SecondScratchReg);
2379     cmpPtrSet(cond, ScratchRegister, SecondScratchReg, dest);
2380 }
2381 
2382 void
cmpPtrSet(Assembler::Condition cond,Register lhs,Address rhs,Register dest)2383 MacroAssemblerMIPS64Compat::cmpPtrSet(Assembler::Condition cond, Register lhs, Address rhs,
2384                                       Register dest)
2385 {
2386     loadPtr(rhs, ScratchRegister);
2387     cmpPtrSet(cond, lhs, ScratchRegister, dest);
2388 }
2389 
2390 void
cmp32Set(Assembler::Condition cond,Register lhs,Address rhs,Register dest)2391 MacroAssemblerMIPS64Compat::cmp32Set(Assembler::Condition cond, Register lhs, Address rhs,
2392                                      Register dest)
2393 {
2394     load32(rhs, ScratchRegister);
2395     cmp32Set(cond, lhs, ScratchRegister, dest);
2396 }
2397 
2398 void
checkStackAlignment()2399 MacroAssemblerMIPS64Compat::checkStackAlignment()
2400 {
2401 #ifdef DEBUG
2402     Label aligned;
2403     as_andi(ScratchRegister, sp, ABIStackAlignment - 1);
2404     ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
2405     as_break(BREAK_STACK_UNALIGNED);
2406     bind(&aligned);
2407 #endif
2408 }
2409 
2410 void
alignFrameForICArguments(AfterICSaveLive & aic)2411 MacroAssembler::alignFrameForICArguments(AfterICSaveLive& aic)
2412 {
2413     if (framePushed() % ABIStackAlignment != 0) {
2414         aic.alignmentPadding = ABIStackAlignment - (framePushed() % ABIStackAlignment);
2415         reserveStack(aic.alignmentPadding);
2416     } else {
2417         aic.alignmentPadding = 0;
2418     }
2419     MOZ_ASSERT(framePushed() % ABIStackAlignment == 0);
2420     checkStackAlignment();
2421 }
2422 
2423 void
restoreFrameAlignmentForICArguments(AfterICSaveLive & aic)2424 MacroAssembler::restoreFrameAlignmentForICArguments(AfterICSaveLive& aic)
2425 {
2426     if (aic.alignmentPadding != 0)
2427         freeStack(aic.alignmentPadding);
2428 }
2429 
2430 void
handleFailureWithHandlerTail(void * handler)2431 MacroAssemblerMIPS64Compat::handleFailureWithHandlerTail(void* handler)
2432 {
2433     // Reserve space for exception information.
2434     int size = (sizeof(ResumeFromException) + ABIStackAlignment) & ~(ABIStackAlignment - 1);
2435     subPtr(Imm32(size), StackPointer);
2436     ma_move(a0, StackPointer); // Use a0 since it is a first function argument
2437 
2438     // Call the handler.
2439     asMasm().setupUnalignedABICall(a1);
2440     asMasm().passABIArg(a0);
2441     asMasm().callWithABI(handler);
2442 
2443     Label entryFrame;
2444     Label catch_;
2445     Label finally;
2446     Label return_;
2447     Label bailout;
2448 
2449     // Already clobbered a0, so use it...
2450     load32(Address(StackPointer, offsetof(ResumeFromException, kind)), a0);
2451     branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME), &entryFrame);
2452     branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_CATCH), &catch_);
2453     branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FINALLY), &finally);
2454     branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_);
2455     branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
2456 
2457     breakpoint(); // Invalid kind.
2458 
2459     // No exception handler. Load the error value, load the new stack pointer
2460     // and return from the entry frame.
2461     bind(&entryFrame);
2462     moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
2463     loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
2464 
2465     // We're going to be returning by the ion calling convention
2466     ma_pop(ra);
2467     as_jr(ra);
2468     as_nop();
2469 
2470     // If we found a catch handler, this must be a baseline frame. Restore
2471     // state and jump to the catch block.
2472     bind(&catch_);
2473     loadPtr(Address(StackPointer, offsetof(ResumeFromException, target)), a0);
2474     loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
2475     loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
2476     jump(a0);
2477 
2478     // If we found a finally block, this must be a baseline frame. Push
2479     // two values expected by JSOP_RETSUB: BooleanValue(true) and the
2480     // exception.
2481     bind(&finally);
2482     ValueOperand exception = ValueOperand(a1);
2483     loadValue(Address(sp, offsetof(ResumeFromException, exception)), exception);
2484 
2485     loadPtr(Address(sp, offsetof(ResumeFromException, target)), a0);
2486     loadPtr(Address(sp, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
2487     loadPtr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp);
2488 
2489     pushValue(BooleanValue(true));
2490     pushValue(exception);
2491     jump(a0);
2492 
2493     // Only used in debug mode. Return BaselineFrame->returnValue() to the
2494     // caller.
2495     bind(&return_);
2496     loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
2497     loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
2498     loadValue(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
2499               JSReturnOperand);
2500     ma_move(StackPointer, BaselineFrameReg);
2501     pop(BaselineFrameReg);
2502 
2503     // If profiling is enabled, then update the lastProfilingFrame to refer to caller
2504     // frame before returning.
2505     {
2506         Label skipProfilingInstrumentation;
2507         // Test if profiler enabled.
2508         AbsoluteAddress addressOfEnabled(GetJitContext()->runtime->spsProfiler().addressOfEnabled());
2509         branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skipProfilingInstrumentation);
2510         profilerExitFrame();
2511         bind(&skipProfilingInstrumentation);
2512     }
2513 
2514     ret();
2515 
2516     // If we are bailing out to baseline to handle an exception, jump to
2517     // the bailout tail stub.
2518     bind(&bailout);
2519     loadPtr(Address(sp, offsetof(ResumeFromException, bailoutInfo)), a2);
2520     ma_li(ReturnReg, Imm32(BAILOUT_RETURN_OK));
2521     loadPtr(Address(sp, offsetof(ResumeFromException, target)), a1);
2522     jump(a1);
2523 }
2524 
2525 template<typename T>
2526 void
compareExchangeToTypedIntArray(Scalar::Type arrayType,const T & mem,Register oldval,Register newval,Register temp,Register valueTemp,Register offsetTemp,Register maskTemp,AnyRegister output)2527 MacroAssemblerMIPS64Compat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
2528                                                            Register oldval, Register newval,
2529                                                            Register temp, Register valueTemp,
2530                                                            Register offsetTemp, Register maskTemp,
2531                                                            AnyRegister output)
2532 {
2533     switch (arrayType) {
2534       case Scalar::Int8:
2535         compareExchange8SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
2536         break;
2537       case Scalar::Uint8:
2538         compareExchange8ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
2539         break;
2540       case Scalar::Int16:
2541         compareExchange16SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
2542         break;
2543       case Scalar::Uint16:
2544         compareExchange16ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
2545         break;
2546       case Scalar::Int32:
2547         compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
2548         break;
2549       case Scalar::Uint32:
2550         // At the moment, the code in MCallOptimize.cpp requires the output
2551         // type to be double for uint32 arrays.  See bug 1077305.
2552         MOZ_ASSERT(output.isFloat());
2553         compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, temp);
2554         convertUInt32ToDouble(temp, output.fpu());
2555         break;
2556       default:
2557         MOZ_CRASH("Invalid typed array type");
2558     }
2559 }
2560 
2561 template void
2562 MacroAssemblerMIPS64Compat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
2563                                                            Register oldval, Register newval, Register temp,
2564                                                            Register valueTemp, Register offsetTemp, Register maskTemp,
2565                                                            AnyRegister output);
2566 template void
2567 MacroAssemblerMIPS64Compat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
2568                                                            Register oldval, Register newval, Register temp,
2569                                                            Register valueTemp, Register offsetTemp, Register maskTemp,
2570                                                            AnyRegister output);
2571 
2572 template<typename T>
2573 void
atomicExchangeToTypedIntArray(Scalar::Type arrayType,const T & mem,Register value,Register temp,Register valueTemp,Register offsetTemp,Register maskTemp,AnyRegister output)2574 MacroAssemblerMIPS64Compat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
2575                                                           Register value, Register temp, Register valueTemp,
2576                                                           Register offsetTemp, Register maskTemp,
2577                                                           AnyRegister output)
2578 {
2579     switch (arrayType) {
2580       case Scalar::Int8:
2581         atomicExchange8SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
2582         break;
2583       case Scalar::Uint8:
2584         atomicExchange8ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
2585         break;
2586       case Scalar::Int16:
2587         atomicExchange16SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
2588         break;
2589       case Scalar::Uint16:
2590         atomicExchange16ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
2591         break;
2592       case Scalar::Int32:
2593         atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
2594         break;
2595       case Scalar::Uint32:
2596         // At the moment, the code in MCallOptimize.cpp requires the output
2597         // type to be double for uint32 arrays.  See bug 1077305.
2598         MOZ_ASSERT(output.isFloat());
2599         atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, temp);
2600         convertUInt32ToDouble(temp, output.fpu());
2601         break;
2602       default:
2603         MOZ_CRASH("Invalid typed array type");
2604     }
2605 }
2606 
2607 template void
2608 MacroAssemblerMIPS64Compat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
2609                                                           Register value, Register temp, Register valueTemp,
2610                                                           Register offsetTemp, Register maskTemp,
2611                                                           AnyRegister output);
2612 template void
2613 MacroAssemblerMIPS64Compat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
2614                                                           Register value, Register temp, Register valueTemp,
2615                                                           Register offsetTemp, Register maskTemp,
2616                                                           AnyRegister output);
2617 
2618 CodeOffset
toggledJump(Label * label)2619 MacroAssemblerMIPS64Compat::toggledJump(Label* label)
2620 {
2621     CodeOffset ret(nextOffset().getOffset());
2622     ma_b(label);
2623     return ret;
2624 }
2625 
2626 CodeOffset
toggledCall(JitCode * target,bool enabled)2627 MacroAssemblerMIPS64Compat::toggledCall(JitCode* target, bool enabled)
2628 {
2629     BufferOffset bo = nextOffset();
2630     CodeOffset offset(bo.getOffset());
2631     addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
2632     ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
2633     if (enabled) {
2634         as_jalr(ScratchRegister);
2635         as_nop();
2636     } else {
2637         as_nop();
2638         as_nop();
2639     }
2640     MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() == ToggledCallSize(nullptr));
2641     return offset;
2642 }
2643 
2644 void
branchPtrInNurseryRange(Condition cond,Register ptr,Register temp,Label * label)2645 MacroAssemblerMIPS64Compat::branchPtrInNurseryRange(Condition cond, Register ptr, Register temp,
2646                                                     Label* label)
2647 {
2648     MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
2649     MOZ_ASSERT(ptr != temp);
2650     MOZ_ASSERT(ptr != SecondScratchReg);
2651 
2652     const Nursery& nursery = GetJitContext()->runtime->gcNursery();
2653     movePtr(ImmWord(-ptrdiff_t(nursery.start())), SecondScratchReg);
2654     addPtr(ptr, SecondScratchReg);
2655     branchPtr(cond == Assembler::Equal ? Assembler::Below : Assembler::AboveOrEqual,
2656               SecondScratchReg, Imm32(nursery.nurserySize()), label);
2657 }
2658 
2659 void
branchValueIsNurseryObject(Condition cond,ValueOperand value,Register temp,Label * label)2660 MacroAssemblerMIPS64Compat::branchValueIsNurseryObject(Condition cond, ValueOperand value,
2661                                                        Register temp, Label* label)
2662 {
2663     MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
2664 
2665     // 'Value' representing the start of the nursery tagged as a JSObject
2666     const Nursery& nursery = GetJitContext()->runtime->gcNursery();
2667     Value start = ObjectValue(*reinterpret_cast<JSObject *>(nursery.start()));
2668 
2669     movePtr(ImmWord(-ptrdiff_t(start.asRawBits())), SecondScratchReg);
2670     addPtr(value.valueReg(), SecondScratchReg);
2671     branchPtr(cond == Assembler::Equal ? Assembler::Below : Assembler::AboveOrEqual,
2672               SecondScratchReg, Imm32(nursery.nurserySize()), label);
2673 }
2674 
2675 void
profilerEnterFrame(Register framePtr,Register scratch)2676 MacroAssemblerMIPS64Compat::profilerEnterFrame(Register framePtr, Register scratch)
2677 {
2678     AbsoluteAddress activation(GetJitContext()->runtime->addressOfProfilingActivation());
2679     loadPtr(activation, scratch);
2680     storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
2681     storePtr(ImmPtr(nullptr), Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
2682 }
2683 
2684 void
profilerExitFrame()2685 MacroAssemblerMIPS64Compat::profilerExitFrame()
2686 {
2687     branch(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
2688 }
2689 
2690 //{{{ check_macroassembler_style
2691 // ===============================================================
2692 // Stack manipulation functions.
2693 
2694 void
PushRegsInMask(LiveRegisterSet set)2695 MacroAssembler::PushRegsInMask(LiveRegisterSet set)
2696 {
2697     int32_t diff = set.gprs().size() * sizeof(intptr_t) +
2698         set.fpus().getPushSizeInBytes();
2699     const int32_t reserved = diff;
2700 
2701     reserveStack(reserved);
2702     for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) {
2703         diff -= sizeof(intptr_t);
2704         storePtr(*iter, Address(StackPointer, diff));
2705     }
2706     for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); iter++) {
2707         diff -= sizeof(double);
2708         storeDouble(*iter, Address(StackPointer, diff));
2709     }
2710     MOZ_ASSERT(diff == 0);
2711 }
2712 
2713 void
PopRegsInMaskIgnore(LiveRegisterSet set,LiveRegisterSet ignore)2714 MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
2715 {
2716     int32_t diff = set.gprs().size() * sizeof(intptr_t) +
2717         set.fpus().getPushSizeInBytes();
2718     const int32_t reserved = diff;
2719 
2720     for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) {
2721         diff -= sizeof(intptr_t);
2722         if (!ignore.has(*iter))
2723           loadPtr(Address(StackPointer, diff), *iter);
2724     }
2725     for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); iter++) {
2726         diff -= sizeof(double);
2727         if (!ignore.has(*iter))
2728           loadDouble(Address(StackPointer, diff), *iter);
2729     }
2730     MOZ_ASSERT(diff == 0);
2731     freeStack(reserved);
2732 }
2733 
2734 void
reserveStack(uint32_t amount)2735 MacroAssembler::reserveStack(uint32_t amount)
2736 {
2737     if (amount)
2738         subPtr(Imm32(amount), StackPointer);
2739     adjustFrame(amount);
2740 }
2741 
2742 // ===============================================================
2743 // ABI function calls.
2744 
2745 void
setupUnalignedABICall(Register scratch)2746 MacroAssembler::setupUnalignedABICall(Register scratch)
2747 {
2748     setupABICall();
2749     dynamicAlignment_ = true;
2750 
2751     ma_move(scratch, StackPointer);
2752 
2753     // Force sp to be aligned
2754     subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
2755     ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
2756     storePtr(scratch, Address(StackPointer, 0));
2757 }
2758 
2759 void
callWithABIPre(uint32_t * stackAdjust,bool callFromAsmJS)2760 MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS)
2761 {
2762     MOZ_ASSERT(inCall_);
2763     uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
2764 
2765     // Reserve place for $ra.
2766     stackForCall += sizeof(intptr_t);
2767 
2768     if (dynamicAlignment_) {
2769         stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
2770     } else {
2771         uint32_t alignmentAtPrologue = callFromAsmJS ? sizeof(AsmJSFrame) : 0;
2772         stackForCall += ComputeByteAlignment(stackForCall + framePushed() + alignmentAtPrologue,
2773                                              ABIStackAlignment);
2774     }
2775 
2776     *stackAdjust = stackForCall;
2777     reserveStack(stackForCall);
2778 
2779     // Save $ra because call is going to clobber it. Restore it in
2780     // callWithABIPost. NOTE: This is needed for calls from SharedIC.
2781     // Maybe we can do this differently.
2782     storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
2783 
2784     // Position all arguments.
2785     {
2786         enoughMemory_ = enoughMemory_ && moveResolver_.resolve();
2787         if (!enoughMemory_)
2788             return;
2789 
2790         MoveEmitter emitter(*this);
2791         emitter.emit(moveResolver_);
2792         emitter.finish();
2793     }
2794 
2795     assertStackAlignment(ABIStackAlignment);
2796 }
2797 
2798 void
callWithABIPost(uint32_t stackAdjust,MoveOp::Type result)2799 MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
2800 {
2801     // Restore ra value (as stored in callWithABIPre()).
2802     loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
2803 
2804     if (dynamicAlignment_) {
2805         // Restore sp value from stack (as stored in setupUnalignedABICall()).
2806         loadPtr(Address(StackPointer, stackAdjust), StackPointer);
2807         // Use adjustFrame instead of freeStack because we already restored sp.
2808         adjustFrame(-stackAdjust);
2809     } else {
2810         freeStack(stackAdjust);
2811     }
2812 
2813 #ifdef DEBUG
2814     MOZ_ASSERT(inCall_);
2815     inCall_ = false;
2816 #endif
2817 }
2818 
2819 void
callWithABINoProfiler(Register fun,MoveOp::Type result)2820 MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
2821 {
2822     // Load the callee in t9, no instruction between the lw and call
2823     // should clobber it. Note that we can't use fun.base because it may
2824     // be one of the IntArg registers clobbered before the call.
2825     ma_move(t9, fun);
2826     uint32_t stackAdjust;
2827     callWithABIPre(&stackAdjust);
2828     call(t9);
2829     callWithABIPost(stackAdjust, result);
2830 }
2831 
2832 void
callWithABINoProfiler(const Address & fun,MoveOp::Type result)2833 MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result)
2834 {
2835     // Load the callee in t9, as above.
2836     loadPtr(Address(fun.base, fun.offset), t9);
2837     uint32_t stackAdjust;
2838     callWithABIPre(&stackAdjust);
2839     call(t9);
2840     callWithABIPost(stackAdjust, result);
2841 }
2842 
2843 //}}} check_macroassembler_style
2844