1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/mips32/MacroAssembler-mips32.h"
8
9 #include "mozilla/DebugOnly.h"
10 #include "mozilla/MathAlgorithms.h"
11
12 #include "jit/Bailouts.h"
13 #include "jit/BaselineFrame.h"
14 #include "jit/JitFrames.h"
15 #include "jit/MacroAssembler.h"
16 #include "jit/mips32/Simulator-mips32.h"
17 #include "jit/MoveEmitter.h"
18 #include "jit/SharedICRegisters.h"
19
20 #include "jit/MacroAssembler-inl.h"
21
22 using namespace js;
23 using namespace jit;
24
25 using mozilla::Abs;
26
27 static const int32_t PAYLOAD_OFFSET = NUNBOX32_PAYLOAD_OFFSET;
28 static const int32_t TAG_OFFSET = NUNBOX32_TYPE_OFFSET;
29
30 static_assert(sizeof(intptr_t) == 4, "Not 64-bit clean.");
31
32 void
convertBoolToInt32(Register src,Register dest)33 MacroAssemblerMIPSCompat::convertBoolToInt32(Register src, Register dest)
34 {
35 // Note that C++ bool is only 1 byte, so zero extend it to clear the
36 // higher-order bits.
37 ma_and(dest, src, Imm32(0xff));
38 }
39
40 void
convertInt32ToDouble(Register src,FloatRegister dest)41 MacroAssemblerMIPSCompat::convertInt32ToDouble(Register src, FloatRegister dest)
42 {
43 as_mtc1(src, dest);
44 as_cvtdw(dest, dest);
45 }
46
47 void
convertInt32ToDouble(const Address & src,FloatRegister dest)48 MacroAssemblerMIPSCompat::convertInt32ToDouble(const Address& src, FloatRegister dest)
49 {
50 ma_ls(dest, src);
51 as_cvtdw(dest, dest);
52 }
53
54 void
convertInt32ToDouble(const BaseIndex & src,FloatRegister dest)55 MacroAssemblerMIPSCompat::convertInt32ToDouble(const BaseIndex& src, FloatRegister dest)
56 {
57 computeScaledAddress(src, ScratchRegister);
58 convertInt32ToDouble(Address(ScratchRegister, src.offset), dest);
59 }
60
61 void
convertUInt32ToDouble(Register src,FloatRegister dest)62 MacroAssemblerMIPSCompat::convertUInt32ToDouble(Register src, FloatRegister dest)
63 {
64 // We use SecondScratchDoubleReg because MacroAssembler::loadFromTypedArray
65 // calls with ScratchDoubleReg as dest.
66 MOZ_ASSERT(dest != SecondScratchDoubleReg);
67
68 // Subtract INT32_MIN to get a positive number
69 ma_subu(ScratchRegister, src, Imm32(INT32_MIN));
70
71 // Convert value
72 as_mtc1(ScratchRegister, dest);
73 as_cvtdw(dest, dest);
74
75 // Add unsigned value of INT32_MIN
76 ma_lid(SecondScratchDoubleReg, 2147483648.0);
77 as_addd(dest, dest, SecondScratchDoubleReg);
78 }
79
80 void
mul64(Imm64 imm,const Register64 & dest)81 MacroAssemblerMIPSCompat::mul64(Imm64 imm, const Register64& dest)
82 {
83 // LOW32 = LOW(LOW(dest) * LOW(imm));
84 // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
85 // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
86 // + HIGH(LOW(dest) * LOW(imm)) [carry]
87
88 // HIGH(dest) = LOW(HIGH(dest) * LOW(imm));
89 ma_li(ScratchRegister, Imm32(imm.value & LOW_32_MASK));
90 as_multu(dest.high, ScratchRegister);
91 as_mflo(dest.high);
92
93 // mfhi:mflo = LOW(dest) * LOW(imm);
94 as_multu(dest.low, ScratchRegister);
95
96 // HIGH(dest) += mfhi;
97 as_mfhi(ScratchRegister);
98 as_addu(dest.high, dest.high, ScratchRegister);
99
100 if (((imm.value >> 32) & LOW_32_MASK) == 5) {
101 // Optimized case for Math.random().
102
103 // HIGH(dest) += LOW(LOW(dest) * HIGH(imm));
104 as_sll(ScratchRegister, dest.low, 2);
105 as_addu(ScratchRegister, ScratchRegister, dest.low);
106 as_addu(dest.high, dest.high, ScratchRegister);
107
108 // LOW(dest) = mflo;
109 as_mflo(dest.low);
110 } else {
111 // tmp = mflo
112 as_mflo(SecondScratchReg);
113
114 // HIGH(dest) += LOW(LOW(dest) * HIGH(imm));
115 ma_li(ScratchRegister, Imm32((imm.value >> 32) & LOW_32_MASK));
116 as_multu(dest.low, ScratchRegister);
117 as_mflo(ScratchRegister);
118 as_addu(dest.high, dest.high, ScratchRegister);
119
120 // LOW(dest) = tmp;
121 ma_move(dest.low, SecondScratchReg);
122 }
123 }
124
125 static const double TO_DOUBLE_HIGH_SCALE = 0x100000000;
126
127 void
convertUInt64ToDouble(Register64 src,Register temp,FloatRegister dest)128 MacroAssemblerMIPSCompat::convertUInt64ToDouble(Register64 src, Register temp, FloatRegister dest)
129 {
130 convertUInt32ToDouble(src.high, dest);
131 loadConstantDouble(TO_DOUBLE_HIGH_SCALE, ScratchDoubleReg);
132 mulDouble(ScratchDoubleReg, dest);
133 convertUInt32ToDouble(src.low, ScratchDoubleReg);
134 addDouble(ScratchDoubleReg, dest);
135 }
136
137 void
convertUInt32ToFloat32(Register src,FloatRegister dest)138 MacroAssemblerMIPSCompat::convertUInt32ToFloat32(Register src, FloatRegister dest)
139 {
140 Label positive, done;
141 ma_b(src, src, &positive, NotSigned, ShortJump);
142
143 // We cannot do the same as convertUInt32ToDouble because float32 doesn't
144 // have enough precision.
145 convertUInt32ToDouble(src, dest);
146 convertDoubleToFloat32(dest, dest);
147 ma_b(&done, ShortJump);
148
149 bind(&positive);
150 convertInt32ToFloat32(src, dest);
151
152 bind(&done);
153 }
154
155 void
convertDoubleToFloat32(FloatRegister src,FloatRegister dest)156 MacroAssemblerMIPSCompat::convertDoubleToFloat32(FloatRegister src, FloatRegister dest)
157 {
158 as_cvtsd(dest, src);
159 }
160
161 // Convert the floating point value to an integer, if it did not fit, then it
162 // was clamped to INT32_MIN/INT32_MAX, and we can test it.
163 // NOTE: if the value really was supposed to be INT32_MAX / INT32_MIN then it
164 // will be wrong.
165 void
branchTruncateDouble(FloatRegister src,Register dest,Label * fail)166 MacroAssemblerMIPSCompat::branchTruncateDouble(FloatRegister src, Register dest,
167 Label* fail)
168 {
169 Label test, success;
170 as_truncwd(ScratchDoubleReg, src);
171 as_mfc1(dest, ScratchDoubleReg);
172
173 ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
174 ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
175 }
176
177 // Checks whether a double is representable as a 32-bit integer. If so, the
178 // integer is written to the output register. Otherwise, a bailout is taken to
179 // the given snapshot. This function overwrites the scratch float register.
180 void
convertDoubleToInt32(FloatRegister src,Register dest,Label * fail,bool negativeZeroCheck)181 MacroAssemblerMIPSCompat::convertDoubleToInt32(FloatRegister src, Register dest,
182 Label* fail, bool negativeZeroCheck)
183 {
184 // Convert double to int, then convert back and check if we have the
185 // same number.
186 as_cvtwd(ScratchDoubleReg, src);
187 as_mfc1(dest, ScratchDoubleReg);
188 as_cvtdw(ScratchDoubleReg, ScratchDoubleReg);
189 ma_bc1d(src, ScratchDoubleReg, fail, Assembler::DoubleNotEqualOrUnordered);
190
191 if (negativeZeroCheck) {
192 Label notZero;
193 ma_b(dest, Imm32(0), ¬Zero, Assembler::NotEqual, ShortJump);
194 // Test and bail for -0.0, when integer result is 0
195 // Move the top word of the double into the output reg, if it is
196 // non-zero, then the original value was -0.0
197 moveFromDoubleHi(src, dest);
198 ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
199 bind(¬Zero);
200 }
201 }
202
203 // Checks whether a float32 is representable as a 32-bit integer. If so, the
204 // integer is written to the output register. Otherwise, a bailout is taken to
205 // the given snapshot. This function overwrites the scratch float register.
206 void
convertFloat32ToInt32(FloatRegister src,Register dest,Label * fail,bool negativeZeroCheck)207 MacroAssemblerMIPSCompat::convertFloat32ToInt32(FloatRegister src, Register dest,
208 Label* fail, bool negativeZeroCheck)
209 {
210 // Converting the floating point value to an integer and then converting it
211 // back to a float32 would not work, as float to int32 conversions are
212 // clamping (e.g. float(INT32_MAX + 1) would get converted into INT32_MAX
213 // and then back to float(INT32_MAX + 1)). If this ever happens, we just
214 // bail out.
215 as_cvtws(ScratchFloat32Reg, src);
216 as_mfc1(dest, ScratchFloat32Reg);
217 as_cvtsw(ScratchFloat32Reg, ScratchFloat32Reg);
218 ma_bc1s(src, ScratchFloat32Reg, fail, Assembler::DoubleNotEqualOrUnordered);
219
220 // Bail out in the clamped cases.
221 ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
222
223 if (negativeZeroCheck) {
224 Label notZero;
225 ma_b(dest, Imm32(0), ¬Zero, Assembler::NotEqual, ShortJump);
226 // Test and bail for -0.0, when integer result is 0
227 // Move the top word of the double into the output reg,
228 // if it is non-zero, then the original value was -0.0
229 moveFromDoubleHi(src, dest);
230 ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
231 bind(¬Zero);
232 }
233 }
234
235 void
convertFloat32ToDouble(FloatRegister src,FloatRegister dest)236 MacroAssemblerMIPSCompat::convertFloat32ToDouble(FloatRegister src, FloatRegister dest)
237 {
238 as_cvtds(dest, src);
239 }
240
241 void
branchTruncateFloat32(FloatRegister src,Register dest,Label * fail)242 MacroAssemblerMIPSCompat::branchTruncateFloat32(FloatRegister src, Register dest,
243 Label* fail)
244 {
245 Label test, success;
246 as_truncws(ScratchFloat32Reg, src);
247 as_mfc1(dest, ScratchFloat32Reg);
248
249 ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
250 }
251
252 void
convertInt32ToFloat32(Register src,FloatRegister dest)253 MacroAssemblerMIPSCompat::convertInt32ToFloat32(Register src, FloatRegister dest)
254 {
255 as_mtc1(src, dest);
256 as_cvtsw(dest, dest);
257 }
258
259 void
convertInt32ToFloat32(const Address & src,FloatRegister dest)260 MacroAssemblerMIPSCompat::convertInt32ToFloat32(const Address& src, FloatRegister dest)
261 {
262 ma_ls(dest, src);
263 as_cvtsw(dest, dest);
264 }
265
266 void
addDouble(FloatRegister src,FloatRegister dest)267 MacroAssemblerMIPSCompat::addDouble(FloatRegister src, FloatRegister dest)
268 {
269 as_addd(dest, dest, src);
270 }
271
272 void
subDouble(FloatRegister src,FloatRegister dest)273 MacroAssemblerMIPSCompat::subDouble(FloatRegister src, FloatRegister dest)
274 {
275 as_subd(dest, dest, src);
276 }
277
278 void
mulDouble(FloatRegister src,FloatRegister dest)279 MacroAssemblerMIPSCompat::mulDouble(FloatRegister src, FloatRegister dest)
280 {
281 as_muld(dest, dest, src);
282 }
283
284 void
divDouble(FloatRegister src,FloatRegister dest)285 MacroAssemblerMIPSCompat::divDouble(FloatRegister src, FloatRegister dest)
286 {
287 as_divd(dest, dest, src);
288 }
289
290 void
negateDouble(FloatRegister reg)291 MacroAssemblerMIPSCompat::negateDouble(FloatRegister reg)
292 {
293 as_negd(reg, reg);
294 }
295
296 void
inc64(AbsoluteAddress dest)297 MacroAssemblerMIPSCompat::inc64(AbsoluteAddress dest)
298 {
299 ma_li(ScratchRegister, Imm32((int32_t)dest.addr));
300 as_lw(SecondScratchReg, ScratchRegister, 0);
301
302 as_addiu(SecondScratchReg, SecondScratchReg, 1);
303 as_sw(SecondScratchReg, ScratchRegister, 0);
304
305 as_sltiu(SecondScratchReg, SecondScratchReg, 1);
306 as_lw(ScratchRegister, ScratchRegister, 4);
307
308 as_addu(SecondScratchReg, ScratchRegister, SecondScratchReg);
309
310 ma_li(ScratchRegister, Imm32((int32_t)dest.addr));
311 as_sw(SecondScratchReg, ScratchRegister, 4);
312 }
313
314 void
ma_li(Register dest,CodeOffset * label)315 MacroAssemblerMIPS::ma_li(Register dest, CodeOffset* label)
316 {
317 BufferOffset bo = m_buffer.nextOffset();
318 ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
319 label->bind(bo.getOffset());
320 }
321
322 void
ma_li(Register dest,ImmWord imm)323 MacroAssemblerMIPS::ma_li(Register dest, ImmWord imm)
324 {
325 ma_li(dest, Imm32(uint32_t(imm.value)));
326 }
327
328 // This method generates lui and ori instruction pair that can be modified by
329 // UpdateLuiOriValue, either during compilation (eg. Assembler::bind), or
330 // during execution (eg. jit::PatchJump).
331 void
ma_liPatchable(Register dest,Imm32 imm)332 MacroAssemblerMIPS::ma_liPatchable(Register dest, Imm32 imm)
333 {
334 m_buffer.ensureSpace(2 * sizeof(uint32_t));
335 as_lui(dest, Imm16::Upper(imm).encode());
336 as_ori(dest, dest, Imm16::Lower(imm).encode());
337 }
338
339 void
ma_liPatchable(Register dest,ImmPtr imm)340 MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmPtr imm)
341 {
342 ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
343 }
344
345 void
ma_liPatchable(Register dest,ImmWord imm)346 MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmWord imm)
347 {
348 ma_liPatchable(dest, Imm32(int32_t(imm.value)));
349 }
350
351 // Arithmetic-based ops.
352
353 // Add.
354 void
ma_addTestOverflow(Register rd,Register rs,Register rt,Label * overflow)355 MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Register rt, Label* overflow)
356 {
357 Label goodAddition;
358 as_addu(rd, rs, rt);
359
360 as_xor(ScratchRegister, rs, rt); // If different sign, no overflow
361 ma_b(ScratchRegister, Imm32(0), &goodAddition, Assembler::LessThan, ShortJump);
362
363 // If different sign, then overflow
364 as_xor(ScratchRegister, rs, rd);
365 ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
366
367 bind(&goodAddition);
368 }
369
370 void
ma_addTestOverflow(Register rd,Register rs,Imm32 imm,Label * overflow)371 MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, Label* overflow)
372 {
373 // Check for signed range because of as_addiu
374 // Check for unsigned range because of as_xori
375 if (Imm16::IsInSignedRange(imm.value) && Imm16::IsInUnsignedRange(imm.value)) {
376 Label goodAddition;
377 as_addiu(rd, rs, imm.value);
378
379 // If different sign, no overflow
380 as_xori(ScratchRegister, rs, imm.value);
381 ma_b(ScratchRegister, Imm32(0), &goodAddition, Assembler::LessThan, ShortJump);
382
383 // If different sign, then overflow
384 as_xor(ScratchRegister, rs, rd);
385 ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
386
387 bind(&goodAddition);
388 } else {
389 ma_li(ScratchRegister, imm);
390 ma_addTestOverflow(rd, rs, ScratchRegister, overflow);
391 }
392 }
393
394 // Subtract.
395 void
ma_subTestOverflow(Register rd,Register rs,Register rt,Label * overflow)396 MacroAssemblerMIPS::ma_subTestOverflow(Register rd, Register rs, Register rt, Label* overflow)
397 {
398 Label goodSubtraction;
399 // Use second scratch. The instructions generated by ma_b don't use the
400 // second scratch register.
401 as_subu(rd, rs, rt);
402
403 as_xor(ScratchRegister, rs, rt); // If same sign, no overflow
404 ma_b(ScratchRegister, Imm32(0), &goodSubtraction, Assembler::GreaterThanOrEqual, ShortJump);
405
406 // If different sign, then overflow
407 as_xor(ScratchRegister, rs, rd);
408 ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
409
410 bind(&goodSubtraction);
411 }
412
413 // Memory.
414
415 void
ma_load(Register dest,Address address,LoadStoreSize size,LoadStoreExtension extension)416 MacroAssemblerMIPS::ma_load(Register dest, Address address,
417 LoadStoreSize size, LoadStoreExtension extension)
418 {
419 int16_t encodedOffset;
420 Register base;
421 if (!Imm16::IsInSignedRange(address.offset)) {
422 ma_li(ScratchRegister, Imm32(address.offset));
423 as_addu(ScratchRegister, address.base, ScratchRegister);
424 base = ScratchRegister;
425 encodedOffset = Imm16(0).encode();
426 } else {
427 encodedOffset = Imm16(address.offset).encode();
428 base = address.base;
429 }
430
431 switch (size) {
432 case SizeByte:
433 if (ZeroExtend == extension)
434 as_lbu(dest, base, encodedOffset);
435 else
436 as_lb(dest, base, encodedOffset);
437 break;
438 case SizeHalfWord:
439 if (ZeroExtend == extension)
440 as_lhu(dest, base, encodedOffset);
441 else
442 as_lh(dest, base, encodedOffset);
443 break;
444 case SizeWord:
445 as_lw(dest, base, encodedOffset);
446 break;
447 default:
448 MOZ_CRASH("Invalid argument for ma_load");
449 }
450 }
451
452 void
ma_store(Register data,Address address,LoadStoreSize size,LoadStoreExtension extension)453 MacroAssemblerMIPS::ma_store(Register data, Address address, LoadStoreSize size,
454 LoadStoreExtension extension)
455 {
456 int16_t encodedOffset;
457 Register base;
458 if (!Imm16::IsInSignedRange(address.offset)) {
459 ma_li(ScratchRegister, Imm32(address.offset));
460 as_addu(ScratchRegister, address.base, ScratchRegister);
461 base = ScratchRegister;
462 encodedOffset = Imm16(0).encode();
463 } else {
464 encodedOffset = Imm16(address.offset).encode();
465 base = address.base;
466 }
467
468 switch (size) {
469 case SizeByte:
470 as_sb(data, base, encodedOffset);
471 break;
472 case SizeHalfWord:
473 as_sh(data, base, encodedOffset);
474 break;
475 case SizeWord:
476 as_sw(data, base, encodedOffset);
477 break;
478 default:
479 MOZ_CRASH("Invalid argument for ma_store");
480 }
481 }
482
483 void
computeScaledAddress(const BaseIndex & address,Register dest)484 MacroAssemblerMIPSCompat::computeScaledAddress(const BaseIndex& address, Register dest)
485 {
486 int32_t shift = Imm32::ShiftOf(address.scale).value;
487 if (shift) {
488 ma_sll(ScratchRegister, address.index, Imm32(shift));
489 as_addu(dest, address.base, ScratchRegister);
490 } else {
491 as_addu(dest, address.base, address.index);
492 }
493 }
494
495 // Shortcut for when we know we're transferring 32 bits of data.
496 void
ma_lw(Register data,Address address)497 MacroAssemblerMIPS::ma_lw(Register data, Address address)
498 {
499 ma_load(data, address, SizeWord);
500 }
501
502 void
ma_sw(Register data,Address address)503 MacroAssemblerMIPS::ma_sw(Register data, Address address)
504 {
505 ma_store(data, address, SizeWord);
506 }
507
508 void
ma_sw(Imm32 imm,Address address)509 MacroAssemblerMIPS::ma_sw(Imm32 imm, Address address)
510 {
511 MOZ_ASSERT(address.base != ScratchRegister);
512 ma_li(ScratchRegister, imm);
513
514 if (Imm16::IsInSignedRange(address.offset)) {
515 as_sw(ScratchRegister, address.base, address.offset);
516 } else {
517 MOZ_ASSERT(address.base != SecondScratchReg);
518
519 ma_li(SecondScratchReg, Imm32(address.offset));
520 as_addu(SecondScratchReg, address.base, SecondScratchReg);
521 as_sw(ScratchRegister, SecondScratchReg, 0);
522 }
523 }
524
525 void
ma_sw(Register data,BaseIndex & address)526 MacroAssemblerMIPS::ma_sw(Register data, BaseIndex& address)
527 {
528 ma_store(data, address, SizeWord);
529 }
530
531 void
ma_pop(Register r)532 MacroAssemblerMIPS::ma_pop(Register r)
533 {
534 as_lw(r, StackPointer, 0);
535 as_addiu(StackPointer, StackPointer, sizeof(intptr_t));
536 }
537
538 void
ma_push(Register r)539 MacroAssemblerMIPS::ma_push(Register r)
540 {
541 if (r == sp) {
542 // Pushing sp requires one more instruction.
543 ma_move(ScratchRegister, sp);
544 r = ScratchRegister;
545 }
546
547 as_addiu(StackPointer, StackPointer, -sizeof(intptr_t));
548 as_sw(r, StackPointer, 0);
549 }
550
551 // Branches when done from within mips-specific code.
552 void
ma_b(Register lhs,Address addr,Label * label,Condition c,JumpKind jumpKind)553 MacroAssemblerMIPS::ma_b(Register lhs, Address addr, Label* label, Condition c, JumpKind jumpKind)
554 {
555 MOZ_ASSERT(lhs != ScratchRegister);
556 ma_lw(ScratchRegister, addr);
557 ma_b(lhs, ScratchRegister, label, c, jumpKind);
558 }
559
560 void
ma_b(Address addr,Imm32 imm,Label * label,Condition c,JumpKind jumpKind)561 MacroAssemblerMIPS::ma_b(Address addr, Imm32 imm, Label* label, Condition c, JumpKind jumpKind)
562 {
563 ma_lw(SecondScratchReg, addr);
564 ma_b(SecondScratchReg, imm, label, c, jumpKind);
565 }
566
567 void
ma_b(Address addr,ImmGCPtr imm,Label * label,Condition c,JumpKind jumpKind)568 MacroAssemblerMIPS::ma_b(Address addr, ImmGCPtr imm, Label* label, Condition c, JumpKind jumpKind)
569 {
570 ma_lw(SecondScratchReg, addr);
571 ma_b(SecondScratchReg, imm, label, c, jumpKind);
572 }
573
574 void
ma_bal(Label * label,DelaySlotFill delaySlotFill)575 MacroAssemblerMIPS::ma_bal(Label* label, DelaySlotFill delaySlotFill)
576 {
577 if (label->bound()) {
578 // Generate the long jump for calls because return address has to be
579 // the address after the reserved block.
580 addLongJump(nextOffset());
581 ma_liPatchable(ScratchRegister, Imm32(label->offset()));
582 as_jalr(ScratchRegister);
583 if (delaySlotFill == FillDelaySlot)
584 as_nop();
585 return;
586 }
587
588 // Second word holds a pointer to the next branch in label's chain.
589 uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
590
591 // Make the whole branch continous in the buffer.
592 m_buffer.ensureSpace(4 * sizeof(uint32_t));
593
594 BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
595 writeInst(nextInChain);
596 if (!oom())
597 label->use(bo.getOffset());
598 // Leave space for long jump.
599 as_nop();
600 if (delaySlotFill == FillDelaySlot)
601 as_nop();
602 }
603
604 void
branchWithCode(InstImm code,Label * label,JumpKind jumpKind)605 MacroAssemblerMIPS::branchWithCode(InstImm code, Label* label, JumpKind jumpKind)
606 {
607 MOZ_ASSERT(code.encode() != InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
608 InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
609
610 if (label->bound()) {
611 int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
612
613 if (BOffImm16::IsInRange(offset))
614 jumpKind = ShortJump;
615
616 if (jumpKind == ShortJump) {
617 MOZ_ASSERT(BOffImm16::IsInRange(offset));
618 code.setBOffImm16(BOffImm16(offset));
619 writeInst(code.encode());
620 as_nop();
621 return;
622 }
623
624 if (code.encode() == inst_beq.encode()) {
625 // Handle long jump
626 addLongJump(nextOffset());
627 ma_liPatchable(ScratchRegister, Imm32(label->offset()));
628 as_jr(ScratchRegister);
629 as_nop();
630 return;
631 }
632
633 // Handle long conditional branch
634 writeInst(invertBranch(code, BOffImm16(5 * sizeof(uint32_t))).encode());
635 // No need for a "nop" here because we can clobber scratch.
636 addLongJump(nextOffset());
637 ma_liPatchable(ScratchRegister, Imm32(label->offset()));
638 as_jr(ScratchRegister);
639 as_nop();
640 return;
641 }
642
643 // Generate open jump and link it to a label.
644
645 // Second word holds a pointer to the next branch in label's chain.
646 uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
647
648 if (jumpKind == ShortJump) {
649 // Make the whole branch continous in the buffer.
650 m_buffer.ensureSpace(2 * sizeof(uint32_t));
651
652 // Indicate that this is short jump with offset 4.
653 code.setBOffImm16(BOffImm16(4));
654 BufferOffset bo = writeInst(code.encode());
655 writeInst(nextInChain);
656 if (!oom())
657 label->use(bo.getOffset());
658 return;
659 }
660
661 bool conditional = code.encode() != inst_beq.encode();
662
663 // Make the whole branch continous in the buffer.
664 m_buffer.ensureSpace((conditional ? 5 : 4) * sizeof(uint32_t));
665
666 BufferOffset bo = writeInst(code.encode());
667 writeInst(nextInChain);
668 if (!oom())
669 label->use(bo.getOffset());
670 // Leave space for potential long jump.
671 as_nop();
672 as_nop();
673 if (conditional)
674 as_nop();
675 }
676
677 void
ma_cmp_set(Register rd,Register rs,Address addr,Condition c)678 MacroAssemblerMIPS::ma_cmp_set(Register rd, Register rs, Address addr, Condition c)
679 {
680 ma_lw(ScratchRegister, addr);
681 ma_cmp_set(rd, rs, ScratchRegister, c);
682 }
683
684 void
ma_cmp_set(Register dst,Address lhs,Register rhs,Condition c)685 MacroAssemblerMIPS::ma_cmp_set(Register dst, Address lhs, Register rhs, Condition c)
686 {
687 ma_lw(ScratchRegister, lhs);
688 ma_cmp_set(dst, ScratchRegister, rhs, c);
689 }
690
691 // fp instructions
692 void
ma_lid(FloatRegister dest,double value)693 MacroAssemblerMIPS::ma_lid(FloatRegister dest, double value)
694 {
695 struct DoubleStruct {
696 uint32_t lo;
697 uint32_t hi;
698 } ;
699 DoubleStruct intStruct = mozilla::BitwiseCast<DoubleStruct>(value);
700
701 // put hi part of 64 bit value into the odd register
702 if (intStruct.hi == 0) {
703 moveToDoubleHi(zero, dest);
704 } else {
705 ma_li(ScratchRegister, Imm32(intStruct.hi));
706 moveToDoubleHi(ScratchRegister, dest);
707 }
708
709 // put low part of 64 bit value into the even register
710 if (intStruct.lo == 0) {
711 moveToDoubleLo(zero, dest);
712 } else {
713 ma_li(ScratchRegister, Imm32(intStruct.lo));
714 moveToDoubleLo(ScratchRegister, dest);
715 }
716 }
717
718 void
ma_mv(FloatRegister src,ValueOperand dest)719 MacroAssemblerMIPS::ma_mv(FloatRegister src, ValueOperand dest)
720 {
721 moveFromDoubleLo(src, dest.payloadReg());
722 moveFromDoubleHi(src, dest.typeReg());
723 }
724
725 void
ma_mv(ValueOperand src,FloatRegister dest)726 MacroAssemblerMIPS::ma_mv(ValueOperand src, FloatRegister dest)
727 {
728 moveToDoubleLo(src.payloadReg(), dest);
729 moveToDoubleHi(src.typeReg(), dest);
730 }
731
732 void
ma_ls(FloatRegister ft,Address address)733 MacroAssemblerMIPS::ma_ls(FloatRegister ft, Address address)
734 {
735 if (Imm16::IsInSignedRange(address.offset)) {
736 as_ls(ft, address.base, address.offset);
737 } else {
738 MOZ_ASSERT(address.base != ScratchRegister);
739 ma_li(ScratchRegister, Imm32(address.offset));
740 as_addu(ScratchRegister, address.base, ScratchRegister);
741 as_ls(ft, ScratchRegister, 0);
742 }
743 }
744
745 void
ma_ld(FloatRegister ft,Address address)746 MacroAssemblerMIPS::ma_ld(FloatRegister ft, Address address)
747 {
748 // Use single precision load instructions so we don't have to worry about
749 // alignment.
750
751 int32_t off2 = address.offset + TAG_OFFSET;
752 if (Imm16::IsInSignedRange(address.offset) && Imm16::IsInSignedRange(off2)) {
753 as_ls(ft, address.base, address.offset);
754 as_ls(getOddPair(ft), address.base, off2);
755 } else {
756 ma_li(ScratchRegister, Imm32(address.offset));
757 as_addu(ScratchRegister, address.base, ScratchRegister);
758 as_ls(ft, ScratchRegister, PAYLOAD_OFFSET);
759 as_ls(getOddPair(ft), ScratchRegister, TAG_OFFSET);
760 }
761 }
762
763 void
ma_sd(FloatRegister ft,Address address)764 MacroAssemblerMIPS::ma_sd(FloatRegister ft, Address address)
765 {
766 int32_t off2 = address.offset + TAG_OFFSET;
767 if (Imm16::IsInSignedRange(address.offset) && Imm16::IsInSignedRange(off2)) {
768 as_ss(ft, address.base, address.offset);
769 as_ss(getOddPair(ft), address.base, off2);
770 } else {
771 ma_li(ScratchRegister, Imm32(address.offset));
772 as_addu(ScratchRegister, address.base, ScratchRegister);
773 as_ss(ft, ScratchRegister, PAYLOAD_OFFSET);
774 as_ss(getOddPair(ft), ScratchRegister, TAG_OFFSET);
775 }
776 }
777
778 void
ma_ss(FloatRegister ft,Address address)779 MacroAssemblerMIPS::ma_ss(FloatRegister ft, Address address)
780 {
781 if (Imm16::IsInSignedRange(address.offset)) {
782 as_ss(ft, address.base, address.offset);
783 } else {
784 ma_li(ScratchRegister, Imm32(address.offset));
785 as_addu(ScratchRegister, address.base, ScratchRegister);
786 as_ss(ft, ScratchRegister, 0);
787 }
788 }
789
790 void
ma_pop(FloatRegister fs)791 MacroAssemblerMIPS::ma_pop(FloatRegister fs)
792 {
793 ma_ld(fs.doubleOverlay(0), Address(StackPointer, 0));
794 as_addiu(StackPointer, StackPointer, sizeof(double));
795 }
796
797 void
ma_push(FloatRegister fs)798 MacroAssemblerMIPS::ma_push(FloatRegister fs)
799 {
800 as_addiu(StackPointer, StackPointer, -sizeof(double));
801 ma_sd(fs.doubleOverlay(0), Address(StackPointer, 0));
802 }
803
804 bool
buildOOLFakeExitFrame(void * fakeReturnAddr)805 MacroAssemblerMIPSCompat::buildOOLFakeExitFrame(void* fakeReturnAddr)
806 {
807 uint32_t descriptor = MakeFrameDescriptor(asMasm().framePushed(), JitFrame_IonJS);
808
809 asMasm().Push(Imm32(descriptor)); // descriptor_
810 asMasm().Push(ImmPtr(fakeReturnAddr));
811
812 return true;
813 }
814
815 void
add32(Register src,Register dest)816 MacroAssemblerMIPSCompat::add32(Register src, Register dest)
817 {
818 as_addu(dest, dest, src);
819 }
820
821 void
add32(Imm32 imm,Register dest)822 MacroAssemblerMIPSCompat::add32(Imm32 imm, Register dest)
823 {
824 ma_addu(dest, dest, imm);
825 }
826
827 void
828
add32(Imm32 imm,const Address & dest)829 MacroAssemblerMIPSCompat::add32(Imm32 imm, const Address& dest)
830 {
831 load32(dest, SecondScratchReg);
832 ma_addu(SecondScratchReg, imm);
833 store32(SecondScratchReg, dest);
834 }
835
836 void
addPtr(Register src,Register dest)837 MacroAssemblerMIPSCompat::addPtr(Register src, Register dest)
838 {
839 ma_addu(dest, src);
840 }
841
842 void
addPtr(const Address & src,Register dest)843 MacroAssemblerMIPSCompat::addPtr(const Address& src, Register dest)
844 {
845 loadPtr(src, ScratchRegister);
846 ma_addu(dest, ScratchRegister);
847 }
848
849 void
subPtr(Register src,Register dest)850 MacroAssemblerMIPSCompat::subPtr(Register src, Register dest)
851 {
852 as_subu(dest, dest, src);
853 }
854
855 void
move32(Imm32 imm,Register dest)856 MacroAssemblerMIPSCompat::move32(Imm32 imm, Register dest)
857 {
858 ma_li(dest, imm);
859 }
860
861 void
move32(Register src,Register dest)862 MacroAssemblerMIPSCompat::move32(Register src, Register dest)
863 {
864 ma_move(dest, src);
865 }
866
867 void
movePtr(Register src,Register dest)868 MacroAssemblerMIPSCompat::movePtr(Register src, Register dest)
869 {
870 ma_move(dest, src);
871 }
872 void
movePtr(ImmWord imm,Register dest)873 MacroAssemblerMIPSCompat::movePtr(ImmWord imm, Register dest)
874 {
875 ma_li(dest, imm);
876 }
877
878 void
movePtr(ImmGCPtr imm,Register dest)879 MacroAssemblerMIPSCompat::movePtr(ImmGCPtr imm, Register dest)
880 {
881 ma_li(dest, imm);
882 }
883
884 void
movePtr(ImmPtr imm,Register dest)885 MacroAssemblerMIPSCompat::movePtr(ImmPtr imm, Register dest)
886 {
887 movePtr(ImmWord(uintptr_t(imm.value)), dest);
888 }
889 void
movePtr(wasm::SymbolicAddress imm,Register dest)890 MacroAssemblerMIPSCompat::movePtr(wasm::SymbolicAddress imm, Register dest)
891 {
892 append(AsmJSAbsoluteLink(CodeOffset(nextOffset().getOffset()), imm));
893 ma_liPatchable(dest, ImmWord(-1));
894 }
895
896 void
load8ZeroExtend(const Address & address,Register dest)897 MacroAssemblerMIPSCompat::load8ZeroExtend(const Address& address, Register dest)
898 {
899 ma_load(dest, address, SizeByte, ZeroExtend);
900 }
901
902 void
load8ZeroExtend(const BaseIndex & src,Register dest)903 MacroAssemblerMIPSCompat::load8ZeroExtend(const BaseIndex& src, Register dest)
904 {
905 ma_load(dest, src, SizeByte, ZeroExtend);
906 }
907
908 void
load8SignExtend(const Address & address,Register dest)909 MacroAssemblerMIPSCompat::load8SignExtend(const Address& address, Register dest)
910 {
911 ma_load(dest, address, SizeByte, SignExtend);
912 }
913
914 void
load8SignExtend(const BaseIndex & src,Register dest)915 MacroAssemblerMIPSCompat::load8SignExtend(const BaseIndex& src, Register dest)
916 {
917 ma_load(dest, src, SizeByte, SignExtend);
918 }
919
920 void
load16ZeroExtend(const Address & address,Register dest)921 MacroAssemblerMIPSCompat::load16ZeroExtend(const Address& address, Register dest)
922 {
923 ma_load(dest, address, SizeHalfWord, ZeroExtend);
924 }
925
926 void
load16ZeroExtend(const BaseIndex & src,Register dest)927 MacroAssemblerMIPSCompat::load16ZeroExtend(const BaseIndex& src, Register dest)
928 {
929 ma_load(dest, src, SizeHalfWord, ZeroExtend);
930 }
931
932 void
load16SignExtend(const Address & address,Register dest)933 MacroAssemblerMIPSCompat::load16SignExtend(const Address& address, Register dest)
934 {
935 ma_load(dest, address, SizeHalfWord, SignExtend);
936 }
937
938 void
load16SignExtend(const BaseIndex & src,Register dest)939 MacroAssemblerMIPSCompat::load16SignExtend(const BaseIndex& src, Register dest)
940 {
941 ma_load(dest, src, SizeHalfWord, SignExtend);
942 }
943
944 void
load32(const Address & address,Register dest)945 MacroAssemblerMIPSCompat::load32(const Address& address, Register dest)
946 {
947 ma_load(dest, address, SizeWord);
948 }
949
950 void
load32(const BaseIndex & address,Register dest)951 MacroAssemblerMIPSCompat::load32(const BaseIndex& address, Register dest)
952 {
953 ma_load(dest, address, SizeWord);
954 }
955
956 void
load32(AbsoluteAddress address,Register dest)957 MacroAssemblerMIPSCompat::load32(AbsoluteAddress address, Register dest)
958 {
959 movePtr(ImmPtr(address.addr), ScratchRegister);
960 load32(Address(ScratchRegister, 0), dest);
961 }
962
963 void
load32(wasm::SymbolicAddress address,Register dest)964 MacroAssemblerMIPSCompat::load32(wasm::SymbolicAddress address, Register dest)
965 {
966 movePtr(address, ScratchRegister);
967 load32(Address(ScratchRegister, 0), dest);
968 }
969
970 void
loadPtr(const Address & address,Register dest)971 MacroAssemblerMIPSCompat::loadPtr(const Address& address, Register dest)
972 {
973 ma_load(dest, address, SizeWord);
974 }
975
976 void
loadPtr(const BaseIndex & src,Register dest)977 MacroAssemblerMIPSCompat::loadPtr(const BaseIndex& src, Register dest)
978 {
979 ma_load(dest, src, SizeWord);
980 }
981
982 void
loadPtr(AbsoluteAddress address,Register dest)983 MacroAssemblerMIPSCompat::loadPtr(AbsoluteAddress address, Register dest)
984 {
985 movePtr(ImmPtr(address.addr), ScratchRegister);
986 loadPtr(Address(ScratchRegister, 0), dest);
987 }
988
989 void
loadPtr(wasm::SymbolicAddress address,Register dest)990 MacroAssemblerMIPSCompat::loadPtr(wasm::SymbolicAddress address, Register dest)
991 {
992 movePtr(address, ScratchRegister);
993 loadPtr(Address(ScratchRegister, 0), dest);
994 }
995
996 void
loadPrivate(const Address & address,Register dest)997 MacroAssemblerMIPSCompat::loadPrivate(const Address& address, Register dest)
998 {
999 ma_lw(dest, Address(address.base, address.offset + PAYLOAD_OFFSET));
1000 }
1001
1002 void
loadDouble(const Address & address,FloatRegister dest)1003 MacroAssemblerMIPSCompat::loadDouble(const Address& address, FloatRegister dest)
1004 {
1005 ma_ld(dest, address);
1006 }
1007
1008 void
loadDouble(const BaseIndex & src,FloatRegister dest)1009 MacroAssemblerMIPSCompat::loadDouble(const BaseIndex& src, FloatRegister dest)
1010 {
1011 computeScaledAddress(src, SecondScratchReg);
1012 ma_ld(dest, Address(SecondScratchReg, src.offset));
1013 }
1014
1015 void
loadFloatAsDouble(const Address & address,FloatRegister dest)1016 MacroAssemblerMIPSCompat::loadFloatAsDouble(const Address& address, FloatRegister dest)
1017 {
1018 ma_ls(dest, address);
1019 as_cvtds(dest, dest);
1020 }
1021
1022 void
loadFloatAsDouble(const BaseIndex & src,FloatRegister dest)1023 MacroAssemblerMIPSCompat::loadFloatAsDouble(const BaseIndex& src, FloatRegister dest)
1024 {
1025 loadFloat32(src, dest);
1026 as_cvtds(dest, dest);
1027 }
1028
1029 void
loadFloat32(const Address & address,FloatRegister dest)1030 MacroAssemblerMIPSCompat::loadFloat32(const Address& address, FloatRegister dest)
1031 {
1032 ma_ls(dest, address);
1033 }
1034
1035 void
loadFloat32(const BaseIndex & src,FloatRegister dest)1036 MacroAssemblerMIPSCompat::loadFloat32(const BaseIndex& src, FloatRegister dest)
1037 {
1038 computeScaledAddress(src, SecondScratchReg);
1039 ma_ls(dest, Address(SecondScratchReg, src.offset));
1040 }
1041
1042 void
store8(Imm32 imm,const Address & address)1043 MacroAssemblerMIPSCompat::store8(Imm32 imm, const Address& address)
1044 {
1045 ma_li(SecondScratchReg, imm);
1046 ma_store(SecondScratchReg, address, SizeByte);
1047 }
1048
1049 void
store8(Register src,const Address & address)1050 MacroAssemblerMIPSCompat::store8(Register src, const Address& address)
1051 {
1052 ma_store(src, address, SizeByte);
1053 }
1054
1055 void
store8(Imm32 imm,const BaseIndex & dest)1056 MacroAssemblerMIPSCompat::store8(Imm32 imm, const BaseIndex& dest)
1057 {
1058 ma_store(imm, dest, SizeByte);
1059 }
1060
1061 void
store8(Register src,const BaseIndex & dest)1062 MacroAssemblerMIPSCompat::store8(Register src, const BaseIndex& dest)
1063 {
1064 ma_store(src, dest, SizeByte);
1065 }
1066
1067 void
store16(Imm32 imm,const Address & address)1068 MacroAssemblerMIPSCompat::store16(Imm32 imm, const Address& address)
1069 {
1070 ma_li(SecondScratchReg, imm);
1071 ma_store(SecondScratchReg, address, SizeHalfWord);
1072 }
1073
1074 void
store16(Register src,const Address & address)1075 MacroAssemblerMIPSCompat::store16(Register src, const Address& address)
1076 {
1077 ma_store(src, address, SizeHalfWord);
1078 }
1079
1080 void
store16(Imm32 imm,const BaseIndex & dest)1081 MacroAssemblerMIPSCompat::store16(Imm32 imm, const BaseIndex& dest)
1082 {
1083 ma_store(imm, dest, SizeHalfWord);
1084 }
1085
1086 void
store16(Register src,const BaseIndex & address)1087 MacroAssemblerMIPSCompat::store16(Register src, const BaseIndex& address)
1088 {
1089 ma_store(src, address, SizeHalfWord);
1090 }
1091
1092 void
store32(Register src,AbsoluteAddress address)1093 MacroAssemblerMIPSCompat::store32(Register src, AbsoluteAddress address)
1094 {
1095 movePtr(ImmPtr(address.addr), ScratchRegister);
1096 store32(src, Address(ScratchRegister, 0));
1097 }
1098
1099 void
store32(Register src,const Address & address)1100 MacroAssemblerMIPSCompat::store32(Register src, const Address& address)
1101 {
1102 ma_store(src, address, SizeWord);
1103 }
1104
1105 void
store32(Imm32 src,const Address & address)1106 MacroAssemblerMIPSCompat::store32(Imm32 src, const Address& address)
1107 {
1108 move32(src, SecondScratchReg);
1109 ma_store(SecondScratchReg, address, SizeWord);
1110 }
1111
1112 void
store32(Imm32 imm,const BaseIndex & dest)1113 MacroAssemblerMIPSCompat::store32(Imm32 imm, const BaseIndex& dest)
1114 {
1115 ma_store(imm, dest, SizeWord);
1116 }
1117
1118 void
store32(Register src,const BaseIndex & dest)1119 MacroAssemblerMIPSCompat::store32(Register src, const BaseIndex& dest)
1120 {
1121 ma_store(src, dest, SizeWord);
1122 }
1123
1124 template <typename T>
1125 void
storePtr(ImmWord imm,T address)1126 MacroAssemblerMIPSCompat::storePtr(ImmWord imm, T address)
1127 {
1128 ma_li(SecondScratchReg, imm);
1129 ma_store(SecondScratchReg, address, SizeWord);
1130 }
1131
1132 template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmWord imm, Address address);
1133 template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmWord imm, BaseIndex address);
1134
1135 template <typename T>
1136 void
storePtr(ImmPtr imm,T address)1137 MacroAssemblerMIPSCompat::storePtr(ImmPtr imm, T address)
1138 {
1139 storePtr(ImmWord(uintptr_t(imm.value)), address);
1140 }
1141
1142 template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmPtr imm, Address address);
1143 template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmPtr imm, BaseIndex address);
1144
1145 template <typename T>
1146 void
storePtr(ImmGCPtr imm,T address)1147 MacroAssemblerMIPSCompat::storePtr(ImmGCPtr imm, T address)
1148 {
1149 storePtr(ImmWord(uintptr_t(imm.value)), address);
1150 }
1151
1152 template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmGCPtr imm, Address address);
1153 template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmGCPtr imm, BaseIndex address);
1154
1155 void
storePtr(Register src,const Address & address)1156 MacroAssemblerMIPSCompat::storePtr(Register src, const Address& address)
1157 {
1158 ma_store(src, address, SizeWord);
1159 }
1160
1161 void
storePtr(Register src,const BaseIndex & address)1162 MacroAssemblerMIPSCompat::storePtr(Register src, const BaseIndex& address)
1163 {
1164 ma_store(src, address, SizeWord);
1165 }
1166
1167 void
storePtr(Register src,AbsoluteAddress dest)1168 MacroAssemblerMIPSCompat::storePtr(Register src, AbsoluteAddress dest)
1169 {
1170 movePtr(ImmPtr(dest.addr), ScratchRegister);
1171 storePtr(src, Address(ScratchRegister, 0));
1172 }
1173
1174 void
clampIntToUint8(Register reg)1175 MacroAssemblerMIPSCompat::clampIntToUint8(Register reg)
1176 {
1177 // look at (reg >> 8) if it is 0, then src shouldn't be clamped
1178 // if it is <0, then we want to clamp to 0,
1179 // otherwise, we wish to clamp to 255
1180 Label done;
1181 ma_move(ScratchRegister, reg);
1182 asMasm().rshiftPtrArithmetic(Imm32(8), ScratchRegister);
1183 ma_b(ScratchRegister, ScratchRegister, &done, Assembler::Zero, ShortJump);
1184 {
1185 Label negative;
1186 ma_b(ScratchRegister, ScratchRegister, &negative, Assembler::Signed, ShortJump);
1187 {
1188 ma_li(reg, Imm32(255));
1189 ma_b(&done, ShortJump);
1190 }
1191 bind(&negative);
1192 {
1193 ma_move(reg, zero);
1194 }
1195 }
1196 bind(&done);
1197 }
1198
1199 // Note: this function clobbers the input register.
1200 void
clampDoubleToUint8(FloatRegister input,Register output)1201 MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
1202 {
1203 MOZ_ASSERT(input != ScratchDoubleReg);
1204 Label positive, done;
1205
1206 // <= 0 or NaN --> 0
1207 zeroDouble(ScratchDoubleReg);
1208 branchDouble(DoubleGreaterThan, input, ScratchDoubleReg, &positive);
1209 {
1210 move32(Imm32(0), output);
1211 jump(&done);
1212 }
1213
1214 bind(&positive);
1215
1216 // Add 0.5 and truncate.
1217 loadConstantDouble(0.5, ScratchDoubleReg);
1218 addDouble(ScratchDoubleReg, input);
1219
1220 Label outOfRange;
1221
1222 branchTruncateDouble(input, output, &outOfRange);
1223 branch32(Assembler::Above, output, Imm32(255), &outOfRange);
1224 {
1225 // Check if we had a tie.
1226 convertInt32ToDouble(output, ScratchDoubleReg);
1227 branchDouble(DoubleNotEqual, input, ScratchDoubleReg, &done);
1228
1229 // It was a tie. Mask out the ones bit to get an even value.
1230 // See also js_TypedArray_uint8_clamp_double.
1231 and32(Imm32(~1), output);
1232 jump(&done);
1233 }
1234
1235 // > 255 --> 255
1236 bind(&outOfRange);
1237 {
1238 move32(Imm32(255), output);
1239 }
1240
1241 bind(&done);
1242 }
1243
1244 void
subPtr(Imm32 imm,const Register dest)1245 MacroAssemblerMIPSCompat::subPtr(Imm32 imm, const Register dest)
1246 {
1247 ma_subu(dest, dest, imm);
1248 }
1249
1250 void
subPtr(const Address & addr,const Register dest)1251 MacroAssemblerMIPSCompat::subPtr(const Address& addr, const Register dest)
1252 {
1253 loadPtr(addr, SecondScratchReg);
1254 subPtr(SecondScratchReg, dest);
1255 }
1256
1257 void
subPtr(Register src,const Address & dest)1258 MacroAssemblerMIPSCompat::subPtr(Register src, const Address& dest)
1259 {
1260 loadPtr(dest, SecondScratchReg);
1261 subPtr(src, SecondScratchReg);
1262 storePtr(SecondScratchReg, dest);
1263 }
1264
1265 void
addPtr(Imm32 imm,const Register dest)1266 MacroAssemblerMIPSCompat::addPtr(Imm32 imm, const Register dest)
1267 {
1268 ma_addu(dest, imm);
1269 }
1270
1271 void
addPtr(Imm32 imm,const Address & dest)1272 MacroAssemblerMIPSCompat::addPtr(Imm32 imm, const Address& dest)
1273 {
1274 loadPtr(dest, ScratchRegister);
1275 addPtr(imm, ScratchRegister);
1276 storePtr(ScratchRegister, dest);
1277 }
1278
1279 void
branchDouble(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs,Label * label)1280 MacroAssemblerMIPSCompat::branchDouble(DoubleCondition cond, FloatRegister lhs,
1281 FloatRegister rhs, Label* label)
1282 {
1283 ma_bc1d(lhs, rhs, label, cond);
1284 }
1285
1286 void
branchFloat(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs,Label * label)1287 MacroAssemblerMIPSCompat::branchFloat(DoubleCondition cond, FloatRegister lhs,
1288 FloatRegister rhs, Label* label)
1289 {
1290 ma_bc1s(lhs, rhs, label, cond);
1291 }
1292
1293 // higher level tag testing code
1294 Operand
ToPayload(Operand base)1295 MacroAssemblerMIPSCompat::ToPayload(Operand base)
1296 {
1297 return Operand(Register::FromCode(base.base()), base.disp() + PAYLOAD_OFFSET);
1298 }
1299
1300 Operand
ToType(Operand base)1301 MacroAssemblerMIPSCompat::ToType(Operand base)
1302 {
1303 return Operand(Register::FromCode(base.base()), base.disp() + TAG_OFFSET);
1304 }
1305
1306 void
branchTestGCThing(Condition cond,const Address & address,Label * label)1307 MacroAssemblerMIPSCompat::branchTestGCThing(Condition cond, const Address& address, Label* label)
1308 {
1309 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1310 extractTag(address, SecondScratchReg);
1311 ma_b(SecondScratchReg, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
1312 (cond == Equal) ? AboveOrEqual : Below);
1313 }
1314 void
branchTestGCThing(Condition cond,const BaseIndex & src,Label * label)1315 MacroAssemblerMIPSCompat::branchTestGCThing(Condition cond, const BaseIndex& src, Label* label)
1316 {
1317 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1318 extractTag(src, SecondScratchReg);
1319 ma_b(SecondScratchReg, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
1320 (cond == Equal) ? AboveOrEqual : Below);
1321 }
1322
1323 void
branchTestPrimitive(Condition cond,const ValueOperand & value,Label * label)1324 MacroAssemblerMIPSCompat::branchTestPrimitive(Condition cond, const ValueOperand& value,
1325 Label* label)
1326 {
1327 branchTestPrimitive(cond, value.typeReg(), label);
1328 }
1329 void
branchTestPrimitive(Condition cond,Register tag,Label * label)1330 MacroAssemblerMIPSCompat::branchTestPrimitive(Condition cond, Register tag, Label* label)
1331 {
1332 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1333 ma_b(tag, ImmTag(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET), label,
1334 (cond == Equal) ? Below : AboveOrEqual);
1335 }
1336
1337 void
branchTestInt32(Condition cond,const ValueOperand & value,Label * label)1338 MacroAssemblerMIPSCompat::branchTestInt32(Condition cond, const ValueOperand& value, Label* label)
1339 {
1340 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
1341 ma_b(value.typeReg(), ImmType(JSVAL_TYPE_INT32), label, cond);
1342 }
1343
1344 void
branchTestInt32(Condition cond,Register tag,Label * label)1345 MacroAssemblerMIPSCompat::branchTestInt32(Condition cond, Register tag, Label* label)
1346 {
1347 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1348 ma_b(tag, ImmTag(JSVAL_TAG_INT32), label, cond);
1349 }
1350
1351 void
branchTestInt32(Condition cond,const Address & address,Label * label)1352 MacroAssemblerMIPSCompat::branchTestInt32(Condition cond, const Address& address, Label* label)
1353 {
1354 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1355 extractTag(address, SecondScratchReg);
1356 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_INT32), label, cond);
1357 }
1358
1359 void
branchTestInt32(Condition cond,const BaseIndex & src,Label * label)1360 MacroAssemblerMIPSCompat::branchTestInt32(Condition cond, const BaseIndex& src, Label* label)
1361 {
1362 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1363 extractTag(src, SecondScratchReg);
1364 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_INT32), label, cond);
1365 }
1366
1367 void
branchTestBoolean(Condition cond,const ValueOperand & value,Label * label)1368 MacroAssemblerMIPSCompat:: branchTestBoolean(Condition cond, const ValueOperand& value,
1369 Label* label)
1370 {
1371 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
1372 ma_b(value.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
1373 }
1374
1375 void
branchTestBoolean(Condition cond,Register tag,Label * label)1376 MacroAssemblerMIPSCompat:: branchTestBoolean(Condition cond, Register tag, Label* label)
1377 {
1378 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
1379 ma_b(tag, ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
1380 }
1381
1382 void
branchTestBoolean(Condition cond,const Address & address,Label * label)1383 MacroAssemblerMIPSCompat::branchTestBoolean(Condition cond, const Address& address, Label* label)
1384 {
1385 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1386 extractTag(address, SecondScratchReg);
1387 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_BOOLEAN), label, cond);
1388 }
1389
1390 void
branchTestBoolean(Condition cond,const BaseIndex & src,Label * label)1391 MacroAssemblerMIPSCompat::branchTestBoolean(Condition cond, const BaseIndex& src, Label* label)
1392 {
1393 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1394 extractTag(src, SecondScratchReg);
1395 ma_b(SecondScratchReg, ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
1396 }
1397
1398 void
branchTestDouble(Condition cond,const ValueOperand & value,Label * label)1399 MacroAssemblerMIPSCompat::branchTestDouble(Condition cond, const ValueOperand& value, Label* label)
1400 {
1401 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
1402 Assembler::Condition actual = (cond == Equal) ? Below : AboveOrEqual;
1403 ma_b(value.typeReg(), ImmTag(JSVAL_TAG_CLEAR), label, actual);
1404 }
1405
1406 void
branchTestDouble(Condition cond,Register tag,Label * label)1407 MacroAssemblerMIPSCompat::branchTestDouble(Condition cond, Register tag, Label* label)
1408 {
1409 MOZ_ASSERT(cond == Assembler::Equal || cond == NotEqual);
1410 Condition actual = (cond == Equal) ? Below : AboveOrEqual;
1411 ma_b(tag, ImmTag(JSVAL_TAG_CLEAR), label, actual);
1412 }
1413
1414 void
branchTestDouble(Condition cond,const Address & address,Label * label)1415 MacroAssemblerMIPSCompat::branchTestDouble(Condition cond, const Address& address, Label* label)
1416 {
1417 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1418 extractTag(address, SecondScratchReg);
1419 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_CLEAR), label, cond);
1420 }
1421
1422 void
branchTestDouble(Condition cond,const BaseIndex & src,Label * label)1423 MacroAssemblerMIPSCompat::branchTestDouble(Condition cond, const BaseIndex& src, Label* label)
1424 {
1425 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1426 Condition actual = (cond == Equal) ? Below : AboveOrEqual;
1427 extractTag(src, SecondScratchReg);
1428 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_CLEAR), label, actual);
1429 }
1430
1431 void
branchTestNull(Condition cond,const ValueOperand & value,Label * label)1432 MacroAssemblerMIPSCompat::branchTestNull(Condition cond, const ValueOperand& value, Label* label)
1433 {
1434 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1435 ma_b(value.typeReg(), ImmType(JSVAL_TYPE_NULL), label, cond);
1436 }
1437
1438 void
branchTestNull(Condition cond,Register tag,Label * label)1439 MacroAssemblerMIPSCompat::branchTestNull(Condition cond, Register tag, Label* label)
1440 {
1441 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1442 ma_b(tag, ImmTag(JSVAL_TAG_NULL), label, cond);
1443 }
1444
1445 void
branchTestNull(Condition cond,const BaseIndex & src,Label * label)1446 MacroAssemblerMIPSCompat::branchTestNull(Condition cond, const BaseIndex& src, Label* label)
1447 {
1448 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1449 extractTag(src, SecondScratchReg);
1450 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_NULL), label, cond);
1451 }
1452
1453 void
branchTestNull(Condition cond,const Address & address,Label * label)1454 MacroAssemblerMIPSCompat::branchTestNull(Condition cond, const Address& address, Label* label) {
1455 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1456 extractTag(address, SecondScratchReg);
1457 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_NULL), label, cond);
1458 }
1459
1460 void
testNullSet(Condition cond,const ValueOperand & value,Register dest)1461 MacroAssemblerMIPSCompat::testNullSet(Condition cond, const ValueOperand& value, Register dest)
1462 {
1463 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1464 ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_NULL), cond);
1465 }
1466
1467 void
branchTestObject(Condition cond,const ValueOperand & value,Label * label)1468 MacroAssemblerMIPSCompat::branchTestObject(Condition cond, const ValueOperand& value, Label* label)
1469 {
1470 branchTestObject(cond, value.typeReg(), label);
1471 }
1472
1473 void
branchTestObject(Condition cond,Register tag,Label * label)1474 MacroAssemblerMIPSCompat::branchTestObject(Condition cond, Register tag, Label* label)
1475 {
1476 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1477 ma_b(tag, ImmTag(JSVAL_TAG_OBJECT), label, cond);
1478 }
1479
1480 void
branchTestObject(Condition cond,const BaseIndex & src,Label * label)1481 MacroAssemblerMIPSCompat::branchTestObject(Condition cond, const BaseIndex& src, Label* label)
1482 {
1483 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1484 extractTag(src, SecondScratchReg);
1485 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), label, cond);
1486 }
1487
1488 void
branchTestObject(Condition cond,const Address & address,Label * label)1489 MacroAssemblerMIPSCompat::branchTestObject(Condition cond, const Address& address, Label* label)
1490 {
1491 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1492 extractTag(address, SecondScratchReg);
1493 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), label, cond);
1494 }
1495
1496 void
testObjectSet(Condition cond,const ValueOperand & value,Register dest)1497 MacroAssemblerMIPSCompat::testObjectSet(Condition cond, const ValueOperand& value, Register dest)
1498 {
1499 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1500 ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_OBJECT), cond);
1501 }
1502
1503 void
branchTestString(Condition cond,const ValueOperand & value,Label * label)1504 MacroAssemblerMIPSCompat::branchTestString(Condition cond, const ValueOperand& value, Label* label)
1505 {
1506 branchTestString(cond, value.typeReg(), label);
1507 }
1508
1509 void
branchTestString(Condition cond,Register tag,Label * label)1510 MacroAssemblerMIPSCompat::branchTestString(Condition cond, Register tag, Label* label)
1511 {
1512 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1513 ma_b(tag, ImmTag(JSVAL_TAG_STRING), label, cond);
1514 }
1515
1516 void
branchTestString(Condition cond,const BaseIndex & src,Label * label)1517 MacroAssemblerMIPSCompat::branchTestString(Condition cond, const BaseIndex& src, Label* label)
1518 {
1519 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1520 extractTag(src, SecondScratchReg);
1521 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_STRING), label, cond);
1522 }
1523
1524 void
branchTestSymbol(Condition cond,const ValueOperand & value,Label * label)1525 MacroAssemblerMIPSCompat::branchTestSymbol(Condition cond, const ValueOperand& value, Label* label)
1526 {
1527 branchTestSymbol(cond, value.typeReg(), label);
1528 }
1529
1530 void
branchTestSymbol(Condition cond,const Register & tag,Label * label)1531 MacroAssemblerMIPSCompat::branchTestSymbol(Condition cond, const Register& tag, Label* label)
1532 {
1533 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1534 ma_b(tag, ImmTag(JSVAL_TAG_SYMBOL), label, cond);
1535 }
1536
1537 void
branchTestSymbol(Condition cond,const BaseIndex & src,Label * label)1538 MacroAssemblerMIPSCompat::branchTestSymbol(Condition cond, const BaseIndex& src, Label* label)
1539 {
1540 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1541 extractTag(src, SecondScratchReg);
1542 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_SYMBOL), label, cond);
1543 }
1544
1545 void
branchTestUndefined(Condition cond,const ValueOperand & value,Label * label)1546 MacroAssemblerMIPSCompat::branchTestUndefined(Condition cond, const ValueOperand& value,
1547 Label* label)
1548 {
1549 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1550 ma_b(value.typeReg(), ImmType(JSVAL_TYPE_UNDEFINED), label, cond);
1551 }
1552
1553 void
branchTestUndefined(Condition cond,Register tag,Label * label)1554 MacroAssemblerMIPSCompat::branchTestUndefined(Condition cond, Register tag, Label* label)
1555 {
1556 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1557 ma_b(tag, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
1558 }
1559
1560 void
branchTestUndefined(Condition cond,const BaseIndex & src,Label * label)1561 MacroAssemblerMIPSCompat::branchTestUndefined(Condition cond, const BaseIndex& src, Label* label)
1562 {
1563 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1564 extractTag(src, SecondScratchReg);
1565 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
1566 }
1567
1568 void
branchTestUndefined(Condition cond,const Address & address,Label * label)1569 MacroAssemblerMIPSCompat::branchTestUndefined(Condition cond, const Address& address, Label* label)
1570 {
1571 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1572 extractTag(address, SecondScratchReg);
1573 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
1574 }
1575
1576 void
testUndefinedSet(Condition cond,const ValueOperand & value,Register dest)1577 MacroAssemblerMIPSCompat::testUndefinedSet(Condition cond, const ValueOperand& value, Register dest)
1578 {
1579 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1580 ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_UNDEFINED), cond);
1581 }
1582
1583 void
branchTestNumber(Condition cond,const ValueOperand & value,Label * label)1584 MacroAssemblerMIPSCompat::branchTestNumber(Condition cond, const ValueOperand& value, Label* label)
1585 {
1586 branchTestNumber(cond, value.typeReg(), label);
1587 }
1588
1589 void
branchTestNumber(Condition cond,Register tag,Label * label)1590 MacroAssemblerMIPSCompat::branchTestNumber(Condition cond, Register tag, Label* label)
1591 {
1592 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1593 ma_b(tag, ImmTag(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET), label,
1594 cond == Equal ? BelowOrEqual : Above);
1595 }
1596
1597 void
branchTestMagic(Condition cond,const ValueOperand & value,Label * label)1598 MacroAssemblerMIPSCompat::branchTestMagic(Condition cond, const ValueOperand& value, Label* label)
1599 {
1600 branchTestMagic(cond, value.typeReg(), label);
1601 }
1602
1603 void
branchTestMagic(Condition cond,Register tag,Label * label)1604 MacroAssemblerMIPSCompat::branchTestMagic(Condition cond, Register tag, Label* label)
1605 {
1606 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1607 ma_b(tag, ImmTag(JSVAL_TAG_MAGIC), label, cond);
1608 }
1609
1610 void
branchTestMagic(Condition cond,const Address & address,Label * label)1611 MacroAssemblerMIPSCompat::branchTestMagic(Condition cond, const Address& address, Label* label)
1612 {
1613 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1614 extractTag(address, SecondScratchReg);
1615 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_MAGIC), label, cond);
1616 }
1617
1618 void
branchTestMagic(Condition cond,const BaseIndex & src,Label * label)1619 MacroAssemblerMIPSCompat::branchTestMagic(Condition cond, const BaseIndex& src, Label* label)
1620 {
1621 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1622 extractTag(src, SecondScratchReg);
1623 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_MAGIC), label, cond);
1624 }
1625
1626 void
branchTestValue(Condition cond,const ValueOperand & value,const Value & v,Label * label)1627 MacroAssemblerMIPSCompat::branchTestValue(Condition cond, const ValueOperand& value,
1628 const Value& v, Label* label)
1629 {
1630 moveData(v, ScratchRegister);
1631
1632 if (cond == Equal) {
1633 Label done;
1634 ma_b(value.payloadReg(), ScratchRegister, &done, NotEqual, ShortJump);
1635 {
1636 ma_b(value.typeReg(), Imm32(getType(v)), label, Equal);
1637 }
1638 bind(&done);
1639 } else {
1640 MOZ_ASSERT(cond == NotEqual);
1641 ma_b(value.payloadReg(), ScratchRegister, label, NotEqual);
1642
1643 ma_b(value.typeReg(), Imm32(getType(v)), label, NotEqual);
1644 }
1645 }
1646
1647 void
branchTestValue(Condition cond,const Address & valaddr,const ValueOperand & value,Label * label)1648 MacroAssemblerMIPSCompat::branchTestValue(Condition cond, const Address& valaddr,
1649 const ValueOperand& value, Label* label)
1650 {
1651 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1652
1653 // Load tag.
1654 ma_lw(ScratchRegister, Address(valaddr.base, valaddr.offset + TAG_OFFSET));
1655 branchPtr(cond, ScratchRegister, value.typeReg(), label);
1656
1657 // Load payload
1658 ma_lw(ScratchRegister, Address(valaddr.base, valaddr.offset + PAYLOAD_OFFSET));
1659 branchPtr(cond, ScratchRegister, value.payloadReg(), label);
1660 }
1661
1662 // unboxing code
1663 void
unboxNonDouble(const ValueOperand & operand,Register dest)1664 MacroAssemblerMIPSCompat::unboxNonDouble(const ValueOperand& operand, Register dest)
1665 {
1666 if (operand.payloadReg() != dest)
1667 ma_move(dest, operand.payloadReg());
1668 }
1669
1670 void
unboxNonDouble(const Address & src,Register dest)1671 MacroAssemblerMIPSCompat::unboxNonDouble(const Address& src, Register dest)
1672 {
1673 ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
1674 }
1675
1676 void
unboxNonDouble(const BaseIndex & src,Register dest)1677 MacroAssemblerMIPSCompat::unboxNonDouble(const BaseIndex& src, Register dest)
1678 {
1679 computeScaledAddress(src, SecondScratchReg);
1680 ma_lw(dest, Address(SecondScratchReg, src.offset + PAYLOAD_OFFSET));
1681 }
1682
1683 void
unboxInt32(const ValueOperand & operand,Register dest)1684 MacroAssemblerMIPSCompat::unboxInt32(const ValueOperand& operand, Register dest)
1685 {
1686 ma_move(dest, operand.payloadReg());
1687 }
1688
1689 void
unboxInt32(const Address & src,Register dest)1690 MacroAssemblerMIPSCompat::unboxInt32(const Address& src, Register dest)
1691 {
1692 ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
1693 }
1694
1695 void
unboxBoolean(const ValueOperand & operand,Register dest)1696 MacroAssemblerMIPSCompat::unboxBoolean(const ValueOperand& operand, Register dest)
1697 {
1698 ma_move(dest, operand.payloadReg());
1699 }
1700
1701 void
unboxBoolean(const Address & src,Register dest)1702 MacroAssemblerMIPSCompat::unboxBoolean(const Address& src, Register dest)
1703 {
1704 ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
1705 }
1706
1707 void
unboxDouble(const ValueOperand & operand,FloatRegister dest)1708 MacroAssemblerMIPSCompat::unboxDouble(const ValueOperand& operand, FloatRegister dest)
1709 {
1710 moveToDoubleLo(operand.payloadReg(), dest);
1711 moveToDoubleHi(operand.typeReg(), dest);
1712 }
1713
1714 void
unboxDouble(const Address & src,FloatRegister dest)1715 MacroAssemblerMIPSCompat::unboxDouble(const Address& src, FloatRegister dest)
1716 {
1717 ma_lw(ScratchRegister, Address(src.base, src.offset + PAYLOAD_OFFSET));
1718 moveToDoubleLo(ScratchRegister, dest);
1719 ma_lw(ScratchRegister, Address(src.base, src.offset + TAG_OFFSET));
1720 moveToDoubleHi(ScratchRegister, dest);
1721 }
1722
1723 void
unboxString(const ValueOperand & operand,Register dest)1724 MacroAssemblerMIPSCompat::unboxString(const ValueOperand& operand, Register dest)
1725 {
1726 ma_move(dest, operand.payloadReg());
1727 }
1728
1729 void
unboxString(const Address & src,Register dest)1730 MacroAssemblerMIPSCompat::unboxString(const Address& src, Register dest)
1731 {
1732 ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
1733 }
1734
1735 void
unboxObject(const ValueOperand & src,Register dest)1736 MacroAssemblerMIPSCompat::unboxObject(const ValueOperand& src, Register dest)
1737 {
1738 ma_move(dest, src.payloadReg());
1739 }
1740
1741 void
unboxObject(const Address & src,Register dest)1742 MacroAssemblerMIPSCompat::unboxObject(const Address& src, Register dest)
1743 {
1744 ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
1745 }
1746
1747 void
unboxValue(const ValueOperand & src,AnyRegister dest)1748 MacroAssemblerMIPSCompat::unboxValue(const ValueOperand& src, AnyRegister dest)
1749 {
1750 if (dest.isFloat()) {
1751 Label notInt32, end;
1752 branchTestInt32(Assembler::NotEqual, src, ¬Int32);
1753 convertInt32ToDouble(src.payloadReg(), dest.fpu());
1754 ma_b(&end, ShortJump);
1755 bind(¬Int32);
1756 unboxDouble(src, dest.fpu());
1757 bind(&end);
1758 } else if (src.payloadReg() != dest.gpr()) {
1759 ma_move(dest.gpr(), src.payloadReg());
1760 }
1761 }
1762
1763 void
unboxPrivate(const ValueOperand & src,Register dest)1764 MacroAssemblerMIPSCompat::unboxPrivate(const ValueOperand& src, Register dest)
1765 {
1766 ma_move(dest, src.payloadReg());
1767 }
1768
1769 void
boxDouble(FloatRegister src,const ValueOperand & dest)1770 MacroAssemblerMIPSCompat::boxDouble(FloatRegister src, const ValueOperand& dest)
1771 {
1772 moveFromDoubleLo(src, dest.payloadReg());
1773 moveFromDoubleHi(src, dest.typeReg());
1774 }
1775
1776 void
boxNonDouble(JSValueType type,Register src,const ValueOperand & dest)1777 MacroAssemblerMIPSCompat::boxNonDouble(JSValueType type, Register src,
1778 const ValueOperand& dest)
1779 {
1780 if (src != dest.payloadReg())
1781 ma_move(dest.payloadReg(), src);
1782 ma_li(dest.typeReg(), ImmType(type));
1783 }
1784
1785 void
boolValueToDouble(const ValueOperand & operand,FloatRegister dest)1786 MacroAssemblerMIPSCompat::boolValueToDouble(const ValueOperand& operand, FloatRegister dest)
1787 {
1788 convertBoolToInt32(operand.payloadReg(), ScratchRegister);
1789 convertInt32ToDouble(ScratchRegister, dest);
1790 }
1791
1792 void
int32ValueToDouble(const ValueOperand & operand,FloatRegister dest)1793 MacroAssemblerMIPSCompat::int32ValueToDouble(const ValueOperand& operand,
1794 FloatRegister dest)
1795 {
1796 convertInt32ToDouble(operand.payloadReg(), dest);
1797 }
1798
1799 void
boolValueToFloat32(const ValueOperand & operand,FloatRegister dest)1800 MacroAssemblerMIPSCompat::boolValueToFloat32(const ValueOperand& operand,
1801 FloatRegister dest)
1802 {
1803
1804 convertBoolToInt32(operand.payloadReg(), ScratchRegister);
1805 convertInt32ToFloat32(ScratchRegister, dest);
1806 }
1807
1808 void
int32ValueToFloat32(const ValueOperand & operand,FloatRegister dest)1809 MacroAssemblerMIPSCompat::int32ValueToFloat32(const ValueOperand& operand,
1810 FloatRegister dest)
1811 {
1812 convertInt32ToFloat32(operand.payloadReg(), dest);
1813 }
1814
1815 void
loadConstantFloat32(float f,FloatRegister dest)1816 MacroAssemblerMIPSCompat::loadConstantFloat32(float f, FloatRegister dest)
1817 {
1818 ma_lis(dest, f);
1819 }
1820
1821 void
loadInt32OrDouble(const Address & src,FloatRegister dest)1822 MacroAssemblerMIPSCompat::loadInt32OrDouble(const Address& src, FloatRegister dest)
1823 {
1824 Label notInt32, end;
1825 // If it's an int, convert it to double.
1826 ma_lw(SecondScratchReg, Address(src.base, src.offset + TAG_OFFSET));
1827 branchTestInt32(Assembler::NotEqual, SecondScratchReg, ¬Int32);
1828 ma_lw(SecondScratchReg, Address(src.base, src.offset + PAYLOAD_OFFSET));
1829 convertInt32ToDouble(SecondScratchReg, dest);
1830 ma_b(&end, ShortJump);
1831
1832 // Not an int, just load as double.
1833 bind(¬Int32);
1834 ma_ld(dest, src);
1835 bind(&end);
1836 }
1837
1838 void
loadInt32OrDouble(Register base,Register index,FloatRegister dest,int32_t shift)1839 MacroAssemblerMIPSCompat::loadInt32OrDouble(Register base, Register index,
1840 FloatRegister dest, int32_t shift)
1841 {
1842 Label notInt32, end;
1843
1844 // If it's an int, convert it to double.
1845
1846 computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
1847 // Since we only have one scratch, we need to stomp over it with the tag.
1848 load32(Address(SecondScratchReg, TAG_OFFSET), SecondScratchReg);
1849 branchTestInt32(Assembler::NotEqual, SecondScratchReg, ¬Int32);
1850
1851 computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
1852 load32(Address(SecondScratchReg, PAYLOAD_OFFSET), SecondScratchReg);
1853 convertInt32ToDouble(SecondScratchReg, dest);
1854 ma_b(&end, ShortJump);
1855
1856 // Not an int, just load as double.
1857 bind(¬Int32);
1858 // First, recompute the offset that had been stored in the scratch register
1859 // since the scratch register was overwritten loading in the type.
1860 computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
1861 loadDouble(Address(SecondScratchReg, 0), dest);
1862 bind(&end);
1863 }
1864
1865 void
loadConstantDouble(double dp,FloatRegister dest)1866 MacroAssemblerMIPSCompat::loadConstantDouble(double dp, FloatRegister dest)
1867 {
1868 ma_lid(dest, dp);
1869 }
1870
1871 void
branchTestInt32Truthy(bool b,const ValueOperand & value,Label * label)1872 MacroAssemblerMIPSCompat::branchTestInt32Truthy(bool b, const ValueOperand& value, Label* label)
1873 {
1874 as_and(ScratchRegister, value.payloadReg(), value.payloadReg());
1875 ma_b(ScratchRegister, ScratchRegister, label, b ? NonZero : Zero);
1876 }
1877
1878 void
branchTestStringTruthy(bool b,const ValueOperand & value,Label * label)1879 MacroAssemblerMIPSCompat::branchTestStringTruthy(bool b, const ValueOperand& value, Label* label)
1880 {
1881 Register string = value.payloadReg();
1882 ma_lw(SecondScratchReg, Address(string, JSString::offsetOfLength()));
1883 ma_b(SecondScratchReg, Imm32(0), label, b ? NotEqual : Equal);
1884 }
1885
1886 void
branchTestDoubleTruthy(bool b,FloatRegister value,Label * label)1887 MacroAssemblerMIPSCompat::branchTestDoubleTruthy(bool b, FloatRegister value, Label* label)
1888 {
1889 ma_lid(ScratchDoubleReg, 0.0);
1890 DoubleCondition cond = b ? DoubleNotEqual : DoubleEqualOrUnordered;
1891 ma_bc1d(value, ScratchDoubleReg, label, cond);
1892 }
1893
1894 void
branchTestBooleanTruthy(bool b,const ValueOperand & operand,Label * label)1895 MacroAssemblerMIPSCompat::branchTestBooleanTruthy(bool b, const ValueOperand& operand,
1896 Label* label)
1897 {
1898 ma_b(operand.payloadReg(), operand.payloadReg(), label, b ? NonZero : Zero);
1899 }
1900
1901 void
branchTest64(Condition cond,Register64 lhs,Register64 rhs,Register temp,Label * label)1902 MacroAssemblerMIPSCompat::branchTest64(Condition cond, Register64 lhs, Register64 rhs,
1903 Register temp, Label* label)
1904 {
1905 if (cond == Assembler::Zero) {
1906 MOZ_ASSERT(lhs.low == rhs.low);
1907 MOZ_ASSERT(lhs.high == rhs.high);
1908 as_or(ScratchRegister, lhs.low, lhs.high);
1909 branchTestPtr(cond, ScratchRegister, ScratchRegister, label);
1910 } else {
1911 MOZ_CRASH("Unsupported condition");
1912 }
1913 }
1914
1915 Register
extractObject(const Address & address,Register scratch)1916 MacroAssemblerMIPSCompat::extractObject(const Address& address, Register scratch)
1917 {
1918 ma_lw(scratch, Address(address.base, address.offset + PAYLOAD_OFFSET));
1919 return scratch;
1920 }
1921
1922 Register
extractTag(const Address & address,Register scratch)1923 MacroAssemblerMIPSCompat::extractTag(const Address& address, Register scratch)
1924 {
1925 ma_lw(scratch, Address(address.base, address.offset + TAG_OFFSET));
1926 return scratch;
1927 }
1928
1929 Register
extractTag(const BaseIndex & address,Register scratch)1930 MacroAssemblerMIPSCompat::extractTag(const BaseIndex& address, Register scratch)
1931 {
1932 computeScaledAddress(address, scratch);
1933 return extractTag(Address(scratch, address.offset), scratch);
1934 }
1935
1936
1937 uint32_t
getType(const Value & val)1938 MacroAssemblerMIPSCompat::getType(const Value& val)
1939 {
1940 jsval_layout jv = JSVAL_TO_IMPL(val);
1941 return jv.s.tag;
1942 }
1943
1944 template <typename T>
1945 void
storeUnboxedValue(ConstantOrRegister value,MIRType valueType,const T & dest,MIRType slotType)1946 MacroAssemblerMIPSCompat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const T& dest,
1947 MIRType slotType)
1948 {
1949 if (valueType == MIRType_Double) {
1950 storeDouble(value.reg().typedReg().fpu(), dest);
1951 return;
1952 }
1953
1954 // Store the type tag if needed.
1955 if (valueType != slotType)
1956 storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), dest);
1957
1958 // Store the payload.
1959 if (value.constant())
1960 storePayload(value.value(), dest);
1961 else
1962 storePayload(value.reg().typedReg().gpr(), dest);
1963 }
1964
1965 template void
1966 MacroAssemblerMIPSCompat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const Address& dest,
1967 MIRType slotType);
1968
1969 template void
1970 MacroAssemblerMIPSCompat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const BaseIndex& dest,
1971 MIRType slotType);
1972
1973 void
moveData(const Value & val,Register data)1974 MacroAssemblerMIPSCompat::moveData(const Value& val, Register data)
1975 {
1976 jsval_layout jv = JSVAL_TO_IMPL(val);
1977 if (val.isMarkable())
1978 ma_li(data, ImmGCPtr(reinterpret_cast<gc::Cell*>(val.toGCThing())));
1979 else
1980 ma_li(data, Imm32(jv.s.payload.i32));
1981 }
1982
1983 void
moveValue(const Value & val,Register type,Register data)1984 MacroAssemblerMIPSCompat::moveValue(const Value& val, Register type, Register data)
1985 {
1986 MOZ_ASSERT(type != data);
1987 ma_li(type, Imm32(getType(val)));
1988 moveData(val, data);
1989 }
1990 void
moveValue(const Value & val,const ValueOperand & dest)1991 MacroAssemblerMIPSCompat::moveValue(const Value& val, const ValueOperand& dest)
1992 {
1993 moveValue(val, dest.typeReg(), dest.payloadReg());
1994 }
1995
1996 /* There are 3 paths trough backedge jump. They are listed here in the order
1997 * in which instructions are executed.
1998 * - The short jump is simple:
1999 * b offset # Jumps directly to target.
2000 * lui at, addr1_hi # In delay slot. Don't care about 'at' here.
2001 *
2002 * - The long jump to loop header:
2003 * b label1
2004 * lui at, addr1_hi # In delay slot. We use the value in 'at' later.
2005 * label1:
2006 * ori at, addr1_lo
2007 * jr at
2008 * lui at, addr2_hi # In delay slot. Don't care about 'at' here.
2009 *
2010 * - The long jump to interrupt loop:
2011 * b label2
2012 * lui at, addr1_hi # In delay slot. Don't care about 'at' here.
2013 * label2:
2014 * lui at, addr2_hi
2015 * ori at, addr2_lo
2016 * jr at
2017 * nop # In delay slot.
2018 *
2019 * The backedge is done this way to avoid patching lui+ori pair while it is
2020 * being executed. Look also at jit::PatchBackedge().
2021 */
2022 CodeOffsetJump
backedgeJump(RepatchLabel * label,Label * documentation)2023 MacroAssemblerMIPSCompat::backedgeJump(RepatchLabel* label, Label* documentation)
2024 {
2025 // Only one branch per label.
2026 MOZ_ASSERT(!label->used());
2027 uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
2028 BufferOffset bo = nextOffset();
2029 label->use(bo.getOffset());
2030
2031 // Backedges are short jumps when bound, but can become long when patched.
2032 m_buffer.ensureSpace(8 * sizeof(uint32_t));
2033 if (label->bound()) {
2034 int32_t offset = label->offset() - bo.getOffset();
2035 MOZ_ASSERT(BOffImm16::IsInRange(offset));
2036 as_b(BOffImm16(offset));
2037 } else {
2038 // Jump to "label1" by default to jump to the loop header.
2039 as_b(BOffImm16(2 * sizeof(uint32_t)));
2040 }
2041 // No need for nop here. We can safely put next instruction in delay slot.
2042 ma_liPatchable(ScratchRegister, Imm32(dest));
2043 MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 3 * sizeof(uint32_t));
2044 as_jr(ScratchRegister);
2045 // No need for nop here. We can safely put next instruction in delay slot.
2046 ma_liPatchable(ScratchRegister, Imm32(dest));
2047 as_jr(ScratchRegister);
2048 as_nop();
2049 MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 8 * sizeof(uint32_t));
2050 return CodeOffsetJump(bo.getOffset());
2051 }
2052
2053 CodeOffsetJump
jumpWithPatch(RepatchLabel * label,Label * documentation)2054 MacroAssemblerMIPSCompat::jumpWithPatch(RepatchLabel* label, Label* documentation)
2055 {
2056 // Only one branch per label.
2057 MOZ_ASSERT(!label->used());
2058 uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
2059
2060 BufferOffset bo = nextOffset();
2061 label->use(bo.getOffset());
2062 addLongJump(bo);
2063 ma_liPatchable(ScratchRegister, Imm32(dest));
2064 as_jr(ScratchRegister);
2065 as_nop();
2066 return CodeOffsetJump(bo.getOffset());
2067 }
2068
2069 /////////////////////////////////////////////////////////////////
2070 // X86/X64-common/ARM/MIPS interface.
2071 /////////////////////////////////////////////////////////////////
2072 void
storeValue(ValueOperand val,Operand dst)2073 MacroAssemblerMIPSCompat::storeValue(ValueOperand val, Operand dst)
2074 {
2075 storeValue(val, Address(Register::FromCode(dst.base()), dst.disp()));
2076 }
2077
2078 void
storeValue(ValueOperand val,const BaseIndex & dest)2079 MacroAssemblerMIPSCompat::storeValue(ValueOperand val, const BaseIndex& dest)
2080 {
2081 computeScaledAddress(dest, SecondScratchReg);
2082 storeValue(val, Address(SecondScratchReg, dest.offset));
2083 }
2084
2085 void
storeValue(JSValueType type,Register reg,BaseIndex dest)2086 MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg, BaseIndex dest)
2087 {
2088 computeScaledAddress(dest, ScratchRegister);
2089
2090 // Make sure that ma_sw doesn't clobber ScratchRegister
2091 int32_t offset = dest.offset;
2092 if (!Imm16::IsInSignedRange(offset)) {
2093 ma_li(SecondScratchReg, Imm32(offset));
2094 as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
2095 offset = 0;
2096 }
2097
2098 storeValue(type, reg, Address(ScratchRegister, offset));
2099 }
2100
2101 void
storeValue(ValueOperand val,const Address & dest)2102 MacroAssemblerMIPSCompat::storeValue(ValueOperand val, const Address& dest)
2103 {
2104 ma_sw(val.payloadReg(), Address(dest.base, dest.offset + PAYLOAD_OFFSET));
2105 ma_sw(val.typeReg(), Address(dest.base, dest.offset + TAG_OFFSET));
2106 }
2107
2108 void
storeValue(JSValueType type,Register reg,Address dest)2109 MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg, Address dest)
2110 {
2111 MOZ_ASSERT(dest.base != SecondScratchReg);
2112
2113 ma_sw(reg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
2114 ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
2115 ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
2116 }
2117
2118 void
storeValue(const Value & val,Address dest)2119 MacroAssemblerMIPSCompat::storeValue(const Value& val, Address dest)
2120 {
2121 MOZ_ASSERT(dest.base != SecondScratchReg);
2122
2123 ma_li(SecondScratchReg, Imm32(getType(val)));
2124 ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
2125 moveData(val, SecondScratchReg);
2126 ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
2127 }
2128
2129 void
storeValue(const Value & val,BaseIndex dest)2130 MacroAssemblerMIPSCompat::storeValue(const Value& val, BaseIndex dest)
2131 {
2132 computeScaledAddress(dest, ScratchRegister);
2133
2134 // Make sure that ma_sw doesn't clobber ScratchRegister
2135 int32_t offset = dest.offset;
2136 if (!Imm16::IsInSignedRange(offset)) {
2137 ma_li(SecondScratchReg, Imm32(offset));
2138 as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
2139 offset = 0;
2140 }
2141 storeValue(val, Address(ScratchRegister, offset));
2142 }
2143
2144 void
loadValue(const BaseIndex & addr,ValueOperand val)2145 MacroAssemblerMIPSCompat::loadValue(const BaseIndex& addr, ValueOperand val)
2146 {
2147 computeScaledAddress(addr, SecondScratchReg);
2148 loadValue(Address(SecondScratchReg, addr.offset), val);
2149 }
2150
2151 void
loadValue(Address src,ValueOperand val)2152 MacroAssemblerMIPSCompat::loadValue(Address src, ValueOperand val)
2153 {
2154 // Ensure that loading the payload does not erase the pointer to the
2155 // Value in memory.
2156 if (src.base != val.payloadReg()) {
2157 ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
2158 ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
2159 } else {
2160 ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
2161 ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
2162 }
2163 }
2164
2165 void
tagValue(JSValueType type,Register payload,ValueOperand dest)2166 MacroAssemblerMIPSCompat::tagValue(JSValueType type, Register payload, ValueOperand dest)
2167 {
2168 MOZ_ASSERT(payload != dest.typeReg());
2169 ma_li(dest.typeReg(), ImmType(type));
2170 if (payload != dest.payloadReg())
2171 ma_move(dest.payloadReg(), payload);
2172 }
2173
2174 void
pushValue(ValueOperand val)2175 MacroAssemblerMIPSCompat::pushValue(ValueOperand val)
2176 {
2177 // Allocate stack slots for type and payload. One for each.
2178 subPtr(Imm32(sizeof(Value)), StackPointer);
2179 // Store type and payload.
2180 storeValue(val, Address(StackPointer, 0));
2181 }
2182
2183 void
pushValue(const Address & addr)2184 MacroAssemblerMIPSCompat::pushValue(const Address& addr)
2185 {
2186 // Allocate stack slots for type and payload. One for each.
2187 ma_subu(StackPointer, StackPointer, Imm32(sizeof(Value)));
2188 // Store type and payload.
2189 ma_lw(ScratchRegister, Address(addr.base, addr.offset + TAG_OFFSET));
2190 ma_sw(ScratchRegister, Address(StackPointer, TAG_OFFSET));
2191 ma_lw(ScratchRegister, Address(addr.base, addr.offset + PAYLOAD_OFFSET));
2192 ma_sw(ScratchRegister, Address(StackPointer, PAYLOAD_OFFSET));
2193 }
2194
2195 void
popValue(ValueOperand val)2196 MacroAssemblerMIPSCompat::popValue(ValueOperand val)
2197 {
2198 // Load payload and type.
2199 as_lw(val.payloadReg(), StackPointer, PAYLOAD_OFFSET);
2200 as_lw(val.typeReg(), StackPointer, TAG_OFFSET);
2201 // Free stack.
2202 as_addiu(StackPointer, StackPointer, sizeof(Value));
2203 }
2204
2205 void
storePayload(const Value & val,Address dest)2206 MacroAssemblerMIPSCompat::storePayload(const Value& val, Address dest)
2207 {
2208 moveData(val, SecondScratchReg);
2209 ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
2210 }
2211
2212 void
storePayload(Register src,Address dest)2213 MacroAssemblerMIPSCompat::storePayload(Register src, Address dest)
2214 {
2215 ma_sw(src, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
2216 return;
2217 }
2218
2219 void
storePayload(const Value & val,const BaseIndex & dest)2220 MacroAssemblerMIPSCompat::storePayload(const Value& val, const BaseIndex& dest)
2221 {
2222 MOZ_ASSERT(dest.offset == 0);
2223
2224 computeScaledAddress(dest, SecondScratchReg);
2225
2226 moveData(val, ScratchRegister);
2227
2228 as_sw(ScratchRegister, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
2229 }
2230
2231 void
storePayload(Register src,const BaseIndex & dest)2232 MacroAssemblerMIPSCompat::storePayload(Register src, const BaseIndex& dest)
2233 {
2234 MOZ_ASSERT(dest.offset == 0);
2235
2236 computeScaledAddress(dest, SecondScratchReg);
2237 as_sw(src, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
2238 }
2239
2240 void
storeTypeTag(ImmTag tag,Address dest)2241 MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, Address dest)
2242 {
2243 ma_li(SecondScratchReg, tag);
2244 ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
2245 }
2246
2247 void
storeTypeTag(ImmTag tag,const BaseIndex & dest)2248 MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, const BaseIndex& dest)
2249 {
2250 MOZ_ASSERT(dest.offset == 0);
2251
2252 computeScaledAddress(dest, SecondScratchReg);
2253 ma_li(ScratchRegister, tag);
2254 as_sw(ScratchRegister, SecondScratchReg, TAG_OFFSET);
2255 }
2256
2257 void
breakpoint()2258 MacroAssemblerMIPSCompat::breakpoint()
2259 {
2260 as_break(0);
2261 }
2262
2263 void
ensureDouble(const ValueOperand & source,FloatRegister dest,Label * failure)2264 MacroAssemblerMIPSCompat::ensureDouble(const ValueOperand& source, FloatRegister dest,
2265 Label* failure)
2266 {
2267 Label isDouble, done;
2268 branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble);
2269 branchTestInt32(Assembler::NotEqual, source.typeReg(), failure);
2270
2271 convertInt32ToDouble(source.payloadReg(), dest);
2272 jump(&done);
2273
2274 bind(&isDouble);
2275 unboxDouble(source, dest);
2276
2277 bind(&done);
2278 }
2279
2280 void
checkStackAlignment()2281 MacroAssemblerMIPSCompat::checkStackAlignment()
2282 {
2283 #ifdef DEBUG
2284 Label aligned;
2285 as_andi(ScratchRegister, sp, ABIStackAlignment - 1);
2286 ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
2287 as_break(BREAK_STACK_UNALIGNED);
2288 bind(&aligned);
2289 #endif
2290 }
2291
2292 void
alignStackPointer()2293 MacroAssemblerMIPSCompat::alignStackPointer()
2294 {
2295 movePtr(StackPointer, SecondScratchReg);
2296 subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
2297 asMasm().andPtr(Imm32(~(ABIStackAlignment - 1)), StackPointer);
2298 storePtr(SecondScratchReg, Address(StackPointer, 0));
2299 }
2300
2301 void
restoreStackPointer()2302 MacroAssemblerMIPSCompat::restoreStackPointer()
2303 {
2304 loadPtr(Address(StackPointer, 0), StackPointer);
2305 }
2306
2307 void
alignFrameForICArguments(AfterICSaveLive & aic)2308 MacroAssembler::alignFrameForICArguments(AfterICSaveLive& aic)
2309 {
2310 if (framePushed() % ABIStackAlignment != 0) {
2311 aic.alignmentPadding = ABIStackAlignment - (framePushed() % ABIStackAlignment);
2312 reserveStack(aic.alignmentPadding);
2313 } else {
2314 aic.alignmentPadding = 0;
2315 }
2316 MOZ_ASSERT(framePushed() % ABIStackAlignment == 0);
2317 checkStackAlignment();
2318 }
2319
2320 void
restoreFrameAlignmentForICArguments(AfterICSaveLive & aic)2321 MacroAssembler::restoreFrameAlignmentForICArguments(AfterICSaveLive& aic)
2322 {
2323 if (aic.alignmentPadding != 0)
2324 freeStack(aic.alignmentPadding);
2325 }
2326
2327 void
handleFailureWithHandlerTail(void * handler)2328 MacroAssemblerMIPSCompat::handleFailureWithHandlerTail(void* handler)
2329 {
2330 // Reserve space for exception information.
2331 int size = (sizeof(ResumeFromException) + ABIStackAlignment) & ~(ABIStackAlignment - 1);
2332 subPtr(Imm32(size), StackPointer);
2333 ma_move(a0, StackPointer); // Use a0 since it is a first function argument
2334
2335 // Call the handler.
2336 asMasm().setupUnalignedABICall(a1);
2337 asMasm().passABIArg(a0);
2338 asMasm().callWithABI(handler);
2339
2340 Label entryFrame;
2341 Label catch_;
2342 Label finally;
2343 Label return_;
2344 Label bailout;
2345
2346 // Already clobbered a0, so use it...
2347 load32(Address(StackPointer, offsetof(ResumeFromException, kind)), a0);
2348 branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME), &entryFrame);
2349 branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_CATCH), &catch_);
2350 branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FINALLY), &finally);
2351 branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_);
2352 branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
2353
2354 breakpoint(); // Invalid kind.
2355
2356 // No exception handler. Load the error value, load the new stack pointer
2357 // and return from the entry frame.
2358 bind(&entryFrame);
2359 moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
2360 loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
2361
2362 // We're going to be returning by the ion calling convention
2363 ma_pop(ra);
2364 as_jr(ra);
2365 as_nop();
2366
2367 // If we found a catch handler, this must be a baseline frame. Restore
2368 // state and jump to the catch block.
2369 bind(&catch_);
2370 loadPtr(Address(StackPointer, offsetof(ResumeFromException, target)), a0);
2371 loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
2372 loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
2373 jump(a0);
2374
2375 // If we found a finally block, this must be a baseline frame. Push
2376 // two values expected by JSOP_RETSUB: BooleanValue(true) and the
2377 // exception.
2378 bind(&finally);
2379 ValueOperand exception = ValueOperand(a1, a2);
2380 loadValue(Address(sp, offsetof(ResumeFromException, exception)), exception);
2381
2382 loadPtr(Address(sp, offsetof(ResumeFromException, target)), a0);
2383 loadPtr(Address(sp, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
2384 loadPtr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp);
2385
2386 pushValue(BooleanValue(true));
2387 pushValue(exception);
2388 jump(a0);
2389
2390 // Only used in debug mode. Return BaselineFrame->returnValue() to the
2391 // caller.
2392 bind(&return_);
2393 loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
2394 loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
2395 loadValue(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
2396 JSReturnOperand);
2397 ma_move(StackPointer, BaselineFrameReg);
2398 pop(BaselineFrameReg);
2399
2400 // If profiling is enabled, then update the lastProfilingFrame to refer to caller
2401 // frame before returning.
2402 {
2403 Label skipProfilingInstrumentation;
2404 // Test if profiler enabled.
2405 AbsoluteAddress addressOfEnabled(GetJitContext()->runtime->spsProfiler().addressOfEnabled());
2406 branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skipProfilingInstrumentation);
2407 profilerExitFrame();
2408 bind(&skipProfilingInstrumentation);
2409 }
2410
2411 ret();
2412
2413 // If we are bailing out to baseline to handle an exception, jump to
2414 // the bailout tail stub.
2415 bind(&bailout);
2416 loadPtr(Address(sp, offsetof(ResumeFromException, bailoutInfo)), a2);
2417 ma_li(ReturnReg, Imm32(BAILOUT_RETURN_OK));
2418 loadPtr(Address(sp, offsetof(ResumeFromException, target)), a1);
2419 jump(a1);
2420 }
2421
2422 template<typename T>
2423 void
compareExchangeToTypedIntArray(Scalar::Type arrayType,const T & mem,Register oldval,Register newval,Register temp,Register valueTemp,Register offsetTemp,Register maskTemp,AnyRegister output)2424 MacroAssemblerMIPSCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
2425 Register oldval, Register newval,
2426 Register temp, Register valueTemp,
2427 Register offsetTemp, Register maskTemp,
2428 AnyRegister output)
2429 {
2430 switch (arrayType) {
2431 case Scalar::Int8:
2432 compareExchange8SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
2433 break;
2434 case Scalar::Uint8:
2435 compareExchange8ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
2436 break;
2437 case Scalar::Int16:
2438 compareExchange16SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
2439 break;
2440 case Scalar::Uint16:
2441 compareExchange16ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
2442 break;
2443 case Scalar::Int32:
2444 compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
2445 break;
2446 case Scalar::Uint32:
2447 // At the moment, the code in MCallOptimize.cpp requires the output
2448 // type to be double for uint32 arrays. See bug 1077305.
2449 MOZ_ASSERT(output.isFloat());
2450 compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, temp);
2451 convertUInt32ToDouble(temp, output.fpu());
2452 break;
2453 default:
2454 MOZ_CRASH("Invalid typed array type");
2455 }
2456 }
2457
2458 template void
2459 MacroAssemblerMIPSCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
2460 Register oldval, Register newval, Register temp,
2461 Register valueTemp, Register offsetTemp, Register maskTemp,
2462 AnyRegister output);
2463 template void
2464 MacroAssemblerMIPSCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
2465 Register oldval, Register newval, Register temp,
2466 Register valueTemp, Register offsetTemp, Register maskTemp,
2467 AnyRegister output);
2468
2469 template<typename T>
2470 void
atomicExchangeToTypedIntArray(Scalar::Type arrayType,const T & mem,Register value,Register temp,Register valueTemp,Register offsetTemp,Register maskTemp,AnyRegister output)2471 MacroAssemblerMIPSCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
2472 Register value, Register temp, Register valueTemp,
2473 Register offsetTemp, Register maskTemp,
2474 AnyRegister output)
2475 {
2476 switch (arrayType) {
2477 case Scalar::Int8:
2478 atomicExchange8SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
2479 break;
2480 case Scalar::Uint8:
2481 atomicExchange8ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
2482 break;
2483 case Scalar::Int16:
2484 atomicExchange16SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
2485 break;
2486 case Scalar::Uint16:
2487 atomicExchange16ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
2488 break;
2489 case Scalar::Int32:
2490 atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
2491 break;
2492 case Scalar::Uint32:
2493 // At the moment, the code in MCallOptimize.cpp requires the output
2494 // type to be double for uint32 arrays. See bug 1077305.
2495 MOZ_ASSERT(output.isFloat());
2496 atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, temp);
2497 convertUInt32ToDouble(temp, output.fpu());
2498 break;
2499 default:
2500 MOZ_CRASH("Invalid typed array type");
2501 }
2502 }
2503
2504 template void
2505 MacroAssemblerMIPSCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
2506 Register value, Register temp, Register valueTemp,
2507 Register offsetTemp, Register maskTemp,
2508 AnyRegister output);
2509 template void
2510 MacroAssemblerMIPSCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
2511 Register value, Register temp, Register valueTemp,
2512 Register offsetTemp, Register maskTemp,
2513 AnyRegister output);
2514
2515 CodeOffset
toggledJump(Label * label)2516 MacroAssemblerMIPSCompat::toggledJump(Label* label)
2517 {
2518 CodeOffset ret(nextOffset().getOffset());
2519 ma_b(label);
2520 return ret;
2521 }
2522
2523 CodeOffset
toggledCall(JitCode * target,bool enabled)2524 MacroAssemblerMIPSCompat::toggledCall(JitCode* target, bool enabled)
2525 {
2526 BufferOffset bo = nextOffset();
2527 CodeOffset offset(bo.getOffset());
2528 addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
2529 ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
2530 if (enabled) {
2531 as_jalr(ScratchRegister);
2532 as_nop();
2533 } else {
2534 as_nop();
2535 as_nop();
2536 }
2537 MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() == ToggledCallSize(nullptr));
2538 return offset;
2539 }
2540
2541 void
branchPtrInNurseryRange(Condition cond,Register ptr,Register temp,Label * label)2542 MacroAssemblerMIPSCompat::branchPtrInNurseryRange(Condition cond, Register ptr, Register temp,
2543 Label* label)
2544 {
2545 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
2546 MOZ_ASSERT(ptr != temp);
2547 MOZ_ASSERT(ptr != SecondScratchReg);
2548
2549 const Nursery& nursery = GetJitContext()->runtime->gcNursery();
2550 movePtr(ImmWord(-ptrdiff_t(nursery.start())), SecondScratchReg);
2551 addPtr(ptr, SecondScratchReg);
2552 branchPtr(cond == Assembler::Equal ? Assembler::Below : Assembler::AboveOrEqual,
2553 SecondScratchReg, Imm32(nursery.nurserySize()), label);
2554 }
2555
2556 void
branchValueIsNurseryObject(Condition cond,ValueOperand value,Register temp,Label * label)2557 MacroAssemblerMIPSCompat::branchValueIsNurseryObject(Condition cond, ValueOperand value,
2558 Register temp, Label* label)
2559 {
2560 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
2561
2562 Label done;
2563
2564 branchTestObject(Assembler::NotEqual, value, cond == Assembler::Equal ? &done : label);
2565 branchPtrInNurseryRange(cond, value.payloadReg(), temp, label);
2566
2567 bind(&done);
2568 }
2569
2570 void
profilerEnterFrame(Register framePtr,Register scratch)2571 MacroAssemblerMIPSCompat::profilerEnterFrame(Register framePtr, Register scratch)
2572 {
2573 AbsoluteAddress activation(GetJitContext()->runtime->addressOfProfilingActivation());
2574 loadPtr(activation, scratch);
2575 storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
2576 storePtr(ImmPtr(nullptr), Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
2577 }
2578
2579 void
profilerExitFrame()2580 MacroAssemblerMIPSCompat::profilerExitFrame()
2581 {
2582 branch(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
2583 }
2584
2585 //{{{ check_macroassembler_style
2586 // ===============================================================
2587 // Stack manipulation functions.
2588
2589 void
PushRegsInMask(LiveRegisterSet set)2590 MacroAssembler::PushRegsInMask(LiveRegisterSet set)
2591 {
2592 int32_t diffF = set.fpus().getPushSizeInBytes();
2593 int32_t diffG = set.gprs().size() * sizeof(intptr_t);
2594
2595 reserveStack(diffG);
2596 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) {
2597 diffG -= sizeof(intptr_t);
2598 storePtr(*iter, Address(StackPointer, diffG));
2599 }
2600 MOZ_ASSERT(diffG == 0);
2601
2602 // Double values have to be aligned. We reserve extra space so that we can
2603 // start writing from the first aligned location.
2604 // We reserve a whole extra double so that the buffer has even size.
2605 ma_and(SecondScratchReg, sp, Imm32(~(ABIStackAlignment - 1)));
2606 reserveStack(diffF + sizeof(double));
2607
2608 for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); iter++) {
2609 if ((*iter).code() % 2 == 0)
2610 as_sd(*iter, SecondScratchReg, -diffF);
2611 diffF -= sizeof(double);
2612 }
2613 MOZ_ASSERT(diffF == 0);
2614 }
2615
2616 void
PopRegsInMaskIgnore(LiveRegisterSet set,LiveRegisterSet ignore)2617 MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
2618 {
2619 int32_t diffG = set.gprs().size() * sizeof(intptr_t);
2620 int32_t diffF = set.fpus().getPushSizeInBytes();
2621 const int32_t reservedG = diffG;
2622 const int32_t reservedF = diffF;
2623
2624 // Read the buffer form the first aligned location.
2625 ma_addu(SecondScratchReg, sp, Imm32(reservedF + sizeof(double)));
2626 ma_and(SecondScratchReg, SecondScratchReg, Imm32(~(ABIStackAlignment - 1)));
2627
2628 for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); iter++) {
2629 if (!ignore.has(*iter) && ((*iter).code() % 2 == 0))
2630 // Use assembly l.d because we have alligned the stack.
2631 as_ld(*iter, SecondScratchReg, -diffF);
2632 diffF -= sizeof(double);
2633 }
2634 freeStack(reservedF + sizeof(double));
2635 MOZ_ASSERT(diffF == 0);
2636
2637 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) {
2638 diffG -= sizeof(intptr_t);
2639 if (!ignore.has(*iter))
2640 loadPtr(Address(StackPointer, diffG), *iter);
2641 }
2642 freeStack(reservedG);
2643 MOZ_ASSERT(diffG == 0);
2644 }
2645
2646 void
reserveStack(uint32_t amount)2647 MacroAssembler::reserveStack(uint32_t amount)
2648 {
2649 if (amount)
2650 subPtr(Imm32(amount), StackPointer);
2651 adjustFrame(amount);
2652 }
2653
2654 // ===============================================================
2655 // ABI function calls.
2656
2657 void
setupUnalignedABICall(Register scratch)2658 MacroAssembler::setupUnalignedABICall(Register scratch)
2659 {
2660 setupABICall();
2661 dynamicAlignment_ = true;
2662
2663 ma_move(scratch, StackPointer);
2664
2665 // Force sp to be aligned
2666 subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
2667 ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
2668 storePtr(scratch, Address(StackPointer, 0));
2669 }
2670
2671 void
callWithABIPre(uint32_t * stackAdjust,bool callFromAsmJS)2672 MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS)
2673 {
2674 MOZ_ASSERT(inCall_);
2675 uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
2676
2677 // Reserve place for $ra.
2678 stackForCall += sizeof(intptr_t);
2679
2680 if (dynamicAlignment_) {
2681 stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
2682 } else {
2683 uint32_t alignmentAtPrologue = callFromAsmJS ? sizeof(AsmJSFrame) : 0;
2684 stackForCall += ComputeByteAlignment(stackForCall + framePushed() + alignmentAtPrologue,
2685 ABIStackAlignment);
2686 }
2687
2688 *stackAdjust = stackForCall;
2689 reserveStack(stackForCall);
2690
2691 // Save $ra because call is going to clobber it. Restore it in
2692 // callWithABIPost. NOTE: This is needed for calls from SharedIC.
2693 // Maybe we can do this differently.
2694 storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
2695
2696 // Position all arguments.
2697 {
2698 enoughMemory_ = enoughMemory_ && moveResolver_.resolve();
2699 if (!enoughMemory_)
2700 return;
2701
2702 MoveEmitter emitter(*this);
2703 emitter.emit(moveResolver_);
2704 emitter.finish();
2705 }
2706
2707 assertStackAlignment(ABIStackAlignment);
2708 }
2709
2710 void
callWithABIPost(uint32_t stackAdjust,MoveOp::Type result)2711 MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
2712 {
2713 // Restore ra value (as stored in callWithABIPre()).
2714 loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
2715
2716 if (dynamicAlignment_) {
2717 // Restore sp value from stack (as stored in setupUnalignedABICall()).
2718 loadPtr(Address(StackPointer, stackAdjust), StackPointer);
2719 // Use adjustFrame instead of freeStack because we already restored sp.
2720 adjustFrame(-stackAdjust);
2721 } else {
2722 freeStack(stackAdjust);
2723 }
2724
2725 #ifdef DEBUG
2726 MOZ_ASSERT(inCall_);
2727 inCall_ = false;
2728 #endif
2729 }
2730
2731 void
callWithABINoProfiler(Register fun,MoveOp::Type result)2732 MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
2733 {
2734 // Load the callee in t9, no instruction between the lw and call
2735 // should clobber it. Note that we can't use fun.base because it may
2736 // be one of the IntArg registers clobbered before the call.
2737 ma_move(t9, fun);
2738 uint32_t stackAdjust;
2739 callWithABIPre(&stackAdjust);
2740 call(t9);
2741 callWithABIPost(stackAdjust, result);
2742 }
2743
2744 void
callWithABINoProfiler(const Address & fun,MoveOp::Type result)2745 MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result)
2746 {
2747 // Load the callee in t9, as above.
2748 loadPtr(Address(fun.base, fun.offset), t9);
2749 uint32_t stackAdjust;
2750 callWithABIPre(&stackAdjust);
2751 call(t9);
2752 callWithABIPost(stackAdjust, result);
2753 }
2754
2755 //}}} check_macroassembler_style
2756