1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/mips32/MacroAssembler-mips32.h"
8
9 #include "mozilla/DebugOnly.h"
10 #include "mozilla/EndianUtils.h"
11 #include "mozilla/MathAlgorithms.h"
12
13 #include "jit/Bailouts.h"
14 #include "jit/BaselineFrame.h"
15 #include "jit/JitFrames.h"
16 #include "jit/MacroAssembler.h"
17 #include "jit/mips32/Simulator-mips32.h"
18 #include "jit/MoveEmitter.h"
19 #include "jit/SharedICRegisters.h"
20
21 #include "jit/MacroAssembler-inl.h"
22
23 using namespace js;
24 using namespace jit;
25
26 using mozilla::Abs;
27
28 static const int32_t PAYLOAD_OFFSET = NUNBOX32_PAYLOAD_OFFSET;
29 static const int32_t TAG_OFFSET = NUNBOX32_TYPE_OFFSET;
30
31 static_assert(sizeof(intptr_t) == 4, "Not 64-bit clean.");
32
convertBoolToInt32(Register src,Register dest)33 void MacroAssemblerMIPSCompat::convertBoolToInt32(Register src, Register dest) {
34 // Note that C++ bool is only 1 byte, so zero extend it to clear the
35 // higher-order bits.
36 ma_and(dest, src, Imm32(0xff));
37 }
38
convertInt32ToDouble(Register src,FloatRegister dest)39 void MacroAssemblerMIPSCompat::convertInt32ToDouble(Register src,
40 FloatRegister dest) {
41 as_mtc1(src, dest);
42 as_cvtdw(dest, dest);
43 }
44
convertInt32ToDouble(const Address & src,FloatRegister dest)45 void MacroAssemblerMIPSCompat::convertInt32ToDouble(const Address& src,
46 FloatRegister dest) {
47 ma_ls(dest, src);
48 as_cvtdw(dest, dest);
49 }
50
convertInt32ToDouble(const BaseIndex & src,FloatRegister dest)51 void MacroAssemblerMIPSCompat::convertInt32ToDouble(const BaseIndex& src,
52 FloatRegister dest) {
53 computeScaledAddress(src, ScratchRegister);
54 convertInt32ToDouble(Address(ScratchRegister, src.offset), dest);
55 }
56
convertUInt32ToDouble(Register src,FloatRegister dest)57 void MacroAssemblerMIPSCompat::convertUInt32ToDouble(Register src,
58 FloatRegister dest) {
59 Label positive, done;
60 ma_b(src, src, &positive, NotSigned, ShortJump);
61
62 const uint32_t kExponentShift =
63 mozilla::FloatingPoint<double>::kExponentShift - 32;
64 const uint32_t kExponent =
65 (31 + mozilla::FloatingPoint<double>::kExponentBias);
66
67 ma_ext(SecondScratchReg, src, 31 - kExponentShift, kExponentShift);
68 ma_li(ScratchRegister, Imm32(kExponent << kExponentShift));
69 ma_or(SecondScratchReg, ScratchRegister);
70 ma_sll(ScratchRegister, src, Imm32(kExponentShift + 1));
71 moveToDoubleHi(SecondScratchReg, dest);
72 moveToDoubleLo(ScratchRegister, dest);
73
74 ma_b(&done, ShortJump);
75
76 bind(&positive);
77 convertInt32ToDouble(src, dest);
78
79 bind(&done);
80 }
81
convertUInt32ToFloat32(Register src,FloatRegister dest)82 void MacroAssemblerMIPSCompat::convertUInt32ToFloat32(Register src,
83 FloatRegister dest) {
84 Label positive, done;
85 ma_b(src, src, &positive, NotSigned, ShortJump);
86
87 const uint32_t kExponentShift =
88 mozilla::FloatingPoint<double>::kExponentShift - 32;
89 const uint32_t kExponent =
90 (31 + mozilla::FloatingPoint<double>::kExponentBias);
91
92 ma_ext(SecondScratchReg, src, 31 - kExponentShift, kExponentShift);
93 ma_li(ScratchRegister, Imm32(kExponent << kExponentShift));
94 ma_or(SecondScratchReg, ScratchRegister);
95 ma_sll(ScratchRegister, src, Imm32(kExponentShift + 1));
96 FloatRegister destDouble = dest.asDouble();
97 moveToDoubleHi(SecondScratchReg, destDouble);
98 moveToDoubleLo(ScratchRegister, destDouble);
99
100 convertDoubleToFloat32(destDouble, dest);
101
102 ma_b(&done, ShortJump);
103
104 bind(&positive);
105 convertInt32ToFloat32(src, dest);
106
107 bind(&done);
108 }
109
convertDoubleToFloat32(FloatRegister src,FloatRegister dest)110 void MacroAssemblerMIPSCompat::convertDoubleToFloat32(FloatRegister src,
111 FloatRegister dest) {
112 as_cvtsd(dest, src);
113 }
114
115 // Checks whether a double is representable as a 32-bit integer. If so, the
116 // integer is written to the output register. Otherwise, a bailout is taken to
117 // the given snapshot. This function overwrites the scratch float register.
convertDoubleToInt32(FloatRegister src,Register dest,Label * fail,bool negativeZeroCheck)118 void MacroAssemblerMIPSCompat::convertDoubleToInt32(FloatRegister src,
119 Register dest, Label* fail,
120 bool negativeZeroCheck) {
121 if (negativeZeroCheck) {
122 moveFromDoubleHi(src, dest);
123 moveFromDoubleLo(src, SecondScratchReg);
124 ma_xor(dest, Imm32(INT32_MIN));
125 ma_or(dest, SecondScratchReg);
126 ma_b(dest, Imm32(0), fail, Assembler::Equal);
127 }
128
129 // Truncate double to int ; if result is inexact fail
130 as_truncwd(ScratchFloat32Reg, src);
131 as_cfc1(ScratchRegister, Assembler::FCSR);
132 moveFromFloat32(ScratchFloat32Reg, dest);
133 ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseI, 1);
134 ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
135 }
136
137 // Checks whether a float32 is representable as a 32-bit integer. If so, the
138 // integer is written to the output register. Otherwise, a bailout is taken to
139 // the given snapshot. This function overwrites the scratch float register.
convertFloat32ToInt32(FloatRegister src,Register dest,Label * fail,bool negativeZeroCheck)140 void MacroAssemblerMIPSCompat::convertFloat32ToInt32(FloatRegister src,
141 Register dest, Label* fail,
142 bool negativeZeroCheck) {
143 if (negativeZeroCheck) {
144 moveFromFloat32(src, dest);
145 ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
146 }
147
148 as_truncws(ScratchFloat32Reg, src);
149 as_cfc1(ScratchRegister, Assembler::FCSR);
150 moveFromFloat32(ScratchFloat32Reg, dest);
151 ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseI, 1);
152 ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
153 }
154
convertFloat32ToDouble(FloatRegister src,FloatRegister dest)155 void MacroAssemblerMIPSCompat::convertFloat32ToDouble(FloatRegister src,
156 FloatRegister dest) {
157 as_cvtds(dest, src);
158 }
159
convertInt32ToFloat32(Register src,FloatRegister dest)160 void MacroAssemblerMIPSCompat::convertInt32ToFloat32(Register src,
161 FloatRegister dest) {
162 as_mtc1(src, dest);
163 as_cvtsw(dest, dest);
164 }
165
convertInt32ToFloat32(const Address & src,FloatRegister dest)166 void MacroAssemblerMIPSCompat::convertInt32ToFloat32(const Address& src,
167 FloatRegister dest) {
168 ma_ls(dest, src);
169 as_cvtsw(dest, dest);
170 }
171
ma_li(Register dest,CodeLabel * label)172 void MacroAssemblerMIPS::ma_li(Register dest, CodeLabel* label) {
173 BufferOffset bo = m_buffer.nextOffset();
174 ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
175 label->patchAt()->bind(bo.getOffset());
176 label->setLinkMode(CodeLabel::MoveImmediate);
177 }
178
ma_li(Register dest,ImmWord imm)179 void MacroAssemblerMIPS::ma_li(Register dest, ImmWord imm) {
180 ma_li(dest, Imm32(uint32_t(imm.value)));
181 }
182
ma_liPatchable(Register dest,ImmPtr imm)183 void MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmPtr imm) {
184 ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
185 }
186
ma_liPatchable(Register dest,ImmWord imm)187 void MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmWord imm) {
188 ma_liPatchable(dest, Imm32(int32_t(imm.value)));
189 }
190
191 // Arithmetic-based ops.
192
193 // Add.
194 template <typename L>
ma_addTestOverflow(Register rd,Register rs,Register rt,L overflow)195 void MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs,
196 Register rt, L overflow) {
197 Label goodAddition;
198 as_addu(rd, rs, rt);
199
200 as_xor(ScratchRegister, rs, rt); // If different sign, no overflow
201 ma_b(ScratchRegister, Imm32(0), &goodAddition, Assembler::LessThan,
202 ShortJump);
203
204 // If different sign, then overflow
205 as_xor(ScratchRegister, rs, rd);
206 ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
207
208 bind(&goodAddition);
209 }
210
211 template void MacroAssemblerMIPS::ma_addTestOverflow<Label*>(Register rd,
212 Register rs,
213 Register rt,
214 Label* overflow);
215 template void MacroAssemblerMIPS::ma_addTestOverflow<wasm::OldTrapDesc>(
216 Register rd, Register rs, Register rt, wasm::OldTrapDesc overflow);
217
218 template <typename L>
ma_addTestOverflow(Register rd,Register rs,Imm32 imm,L overflow)219 void MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Imm32 imm,
220 L overflow) {
221 // Check for signed range because of as_addiu
222 // Check for unsigned range because of as_xori
223 if (Imm16::IsInSignedRange(imm.value) &&
224 Imm16::IsInUnsignedRange(imm.value)) {
225 Label goodAddition;
226 as_addiu(rd, rs, imm.value);
227
228 // If different sign, no overflow
229 as_xori(ScratchRegister, rs, imm.value);
230 ma_b(ScratchRegister, Imm32(0), &goodAddition, Assembler::LessThan,
231 ShortJump);
232
233 // If different sign, then overflow
234 as_xor(ScratchRegister, rs, rd);
235 ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
236
237 bind(&goodAddition);
238 } else {
239 ma_li(ScratchRegister, imm);
240 ma_addTestOverflow(rd, rs, ScratchRegister, overflow);
241 }
242 }
243
244 template void MacroAssemblerMIPS::ma_addTestOverflow<Label*>(Register rd,
245 Register rs,
246 Imm32 imm,
247 Label* overflow);
248 template void MacroAssemblerMIPS::ma_addTestOverflow<wasm::OldTrapDesc>(
249 Register rd, Register rs, Imm32 imm, wasm::OldTrapDesc overflow);
250
251 // Subtract.
ma_subTestOverflow(Register rd,Register rs,Register rt,Label * overflow)252 void MacroAssemblerMIPS::ma_subTestOverflow(Register rd, Register rs,
253 Register rt, Label* overflow) {
254 Label goodSubtraction;
255 // Use second scratch. The instructions generated by ma_b don't use the
256 // second scratch register.
257 as_subu(rd, rs, rt);
258
259 as_xor(ScratchRegister, rs, rt); // If same sign, no overflow
260 ma_b(ScratchRegister, Imm32(0), &goodSubtraction,
261 Assembler::GreaterThanOrEqual, ShortJump);
262
263 // If different sign, then overflow
264 as_xor(ScratchRegister, rs, rd);
265 ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
266
267 bind(&goodSubtraction);
268 }
269
270 // Memory.
271
ma_load(Register dest,Address address,LoadStoreSize size,LoadStoreExtension extension)272 void MacroAssemblerMIPS::ma_load(Register dest, Address address,
273 LoadStoreSize size,
274 LoadStoreExtension extension) {
275 int16_t encodedOffset;
276 Register base;
277
278 if (isLoongson() && ZeroExtend != extension &&
279 !Imm16::IsInSignedRange(address.offset)) {
280 ma_li(ScratchRegister, Imm32(address.offset));
281 base = address.base;
282
283 switch (size) {
284 case SizeByte:
285 as_gslbx(dest, base, ScratchRegister, 0);
286 break;
287 case SizeHalfWord:
288 as_gslhx(dest, base, ScratchRegister, 0);
289 break;
290 case SizeWord:
291 as_gslwx(dest, base, ScratchRegister, 0);
292 break;
293 case SizeDouble:
294 as_gsldx(dest, base, ScratchRegister, 0);
295 break;
296 default:
297 MOZ_CRASH("Invalid argument for ma_load");
298 }
299 return;
300 }
301
302 if (!Imm16::IsInSignedRange(address.offset)) {
303 ma_li(ScratchRegister, Imm32(address.offset));
304 as_addu(ScratchRegister, address.base, ScratchRegister);
305 base = ScratchRegister;
306 encodedOffset = Imm16(0).encode();
307 } else {
308 encodedOffset = Imm16(address.offset).encode();
309 base = address.base;
310 }
311
312 switch (size) {
313 case SizeByte:
314 if (ZeroExtend == extension)
315 as_lbu(dest, base, encodedOffset);
316 else
317 as_lb(dest, base, encodedOffset);
318 break;
319 case SizeHalfWord:
320 if (ZeroExtend == extension)
321 as_lhu(dest, base, encodedOffset);
322 else
323 as_lh(dest, base, encodedOffset);
324 break;
325 case SizeWord:
326 as_lw(dest, base, encodedOffset);
327 break;
328 default:
329 MOZ_CRASH("Invalid argument for ma_load");
330 }
331 }
332
ma_store(Register data,Address address,LoadStoreSize size,LoadStoreExtension extension)333 void MacroAssemblerMIPS::ma_store(Register data, Address address,
334 LoadStoreSize size,
335 LoadStoreExtension extension) {
336 int16_t encodedOffset;
337 Register base;
338
339 if (isLoongson() && !Imm16::IsInSignedRange(address.offset)) {
340 ma_li(ScratchRegister, Imm32(address.offset));
341 base = address.base;
342
343 switch (size) {
344 case SizeByte:
345 as_gssbx(data, base, ScratchRegister, 0);
346 break;
347 case SizeHalfWord:
348 as_gsshx(data, base, ScratchRegister, 0);
349 break;
350 case SizeWord:
351 as_gsswx(data, base, ScratchRegister, 0);
352 break;
353 case SizeDouble:
354 as_gssdx(data, base, ScratchRegister, 0);
355 break;
356 default:
357 MOZ_CRASH("Invalid argument for ma_store");
358 }
359 return;
360 }
361
362 if (!Imm16::IsInSignedRange(address.offset)) {
363 ma_li(ScratchRegister, Imm32(address.offset));
364 as_addu(ScratchRegister, address.base, ScratchRegister);
365 base = ScratchRegister;
366 encodedOffset = Imm16(0).encode();
367 } else {
368 encodedOffset = Imm16(address.offset).encode();
369 base = address.base;
370 }
371
372 switch (size) {
373 case SizeByte:
374 as_sb(data, base, encodedOffset);
375 break;
376 case SizeHalfWord:
377 as_sh(data, base, encodedOffset);
378 break;
379 case SizeWord:
380 as_sw(data, base, encodedOffset);
381 break;
382 default:
383 MOZ_CRASH("Invalid argument for ma_store");
384 }
385 }
386
computeScaledAddress(const BaseIndex & address,Register dest)387 void MacroAssemblerMIPSCompat::computeScaledAddress(const BaseIndex& address,
388 Register dest) {
389 int32_t shift = Imm32::ShiftOf(address.scale).value;
390 if (shift) {
391 ma_sll(ScratchRegister, address.index, Imm32(shift));
392 as_addu(dest, address.base, ScratchRegister);
393 } else {
394 as_addu(dest, address.base, address.index);
395 }
396 }
397
398 // Shortcut for when we know we're transferring 32 bits of data.
ma_lw(Register data,Address address)399 void MacroAssemblerMIPS::ma_lw(Register data, Address address) {
400 ma_load(data, address, SizeWord);
401 }
402
ma_sw(Register data,Address address)403 void MacroAssemblerMIPS::ma_sw(Register data, Address address) {
404 ma_store(data, address, SizeWord);
405 }
406
ma_sw(Imm32 imm,Address address)407 void MacroAssemblerMIPS::ma_sw(Imm32 imm, Address address) {
408 MOZ_ASSERT(address.base != ScratchRegister);
409 ma_li(ScratchRegister, imm);
410
411 if (Imm16::IsInSignedRange(address.offset)) {
412 as_sw(ScratchRegister, address.base, address.offset);
413 } else {
414 MOZ_ASSERT(address.base != SecondScratchReg);
415
416 ma_li(SecondScratchReg, Imm32(address.offset));
417 as_addu(SecondScratchReg, address.base, SecondScratchReg);
418 as_sw(ScratchRegister, SecondScratchReg, 0);
419 }
420 }
421
ma_sw(Register data,BaseIndex & address)422 void MacroAssemblerMIPS::ma_sw(Register data, BaseIndex& address) {
423 ma_store(data, address, SizeWord);
424 }
425
ma_pop(Register r)426 void MacroAssemblerMIPS::ma_pop(Register r) {
427 as_lw(r, StackPointer, 0);
428 as_addiu(StackPointer, StackPointer, sizeof(intptr_t));
429 }
430
ma_push(Register r)431 void MacroAssemblerMIPS::ma_push(Register r) {
432 if (r == sp) {
433 // Pushing sp requires one more instruction.
434 ma_move(ScratchRegister, sp);
435 r = ScratchRegister;
436 }
437
438 as_addiu(StackPointer, StackPointer, -sizeof(intptr_t));
439 as_sw(r, StackPointer, 0);
440 }
441
442 // Branches when done from within mips-specific code.
ma_b(Register lhs,Address addr,Label * label,Condition c,JumpKind jumpKind)443 void MacroAssemblerMIPS::ma_b(Register lhs, Address addr, Label* label,
444 Condition c, JumpKind jumpKind) {
445 MOZ_ASSERT(lhs != ScratchRegister);
446 ma_lw(ScratchRegister, addr);
447 ma_b(lhs, ScratchRegister, label, c, jumpKind);
448 }
449
ma_b(Address addr,Imm32 imm,Label * label,Condition c,JumpKind jumpKind)450 void MacroAssemblerMIPS::ma_b(Address addr, Imm32 imm, Label* label,
451 Condition c, JumpKind jumpKind) {
452 ma_lw(SecondScratchReg, addr);
453 ma_b(SecondScratchReg, imm, label, c, jumpKind);
454 }
455
ma_b(Address addr,ImmGCPtr imm,Label * label,Condition c,JumpKind jumpKind)456 void MacroAssemblerMIPS::ma_b(Address addr, ImmGCPtr imm, Label* label,
457 Condition c, JumpKind jumpKind) {
458 ma_lw(SecondScratchReg, addr);
459 ma_b(SecondScratchReg, imm, label, c, jumpKind);
460 }
461
ma_bal(Label * label,DelaySlotFill delaySlotFill)462 void MacroAssemblerMIPS::ma_bal(Label* label, DelaySlotFill delaySlotFill) {
463 spew("branch .Llabel %p\n", label);
464 if (label->bound()) {
465 // Generate the long jump for calls because return address has to be
466 // the address after the reserved block.
467 addLongJump(nextOffset(), BufferOffset(label->offset()));
468 ma_liPatchable(ScratchRegister, Imm32(LabelBase::INVALID_OFFSET));
469 as_jalr(ScratchRegister);
470 if (delaySlotFill == FillDelaySlot) as_nop();
471 return;
472 }
473
474 // Second word holds a pointer to the next branch in label's chain.
475 uint32_t nextInChain =
476 label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
477
478 // Make the whole branch continous in the buffer.
479 m_buffer.ensureSpace(4 * sizeof(uint32_t));
480
481 spew("bal .Llabel %p\n", label);
482 BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
483 writeInst(nextInChain);
484 if (!oom()) label->use(bo.getOffset());
485 // Leave space for long jump.
486 as_nop();
487 if (delaySlotFill == FillDelaySlot) as_nop();
488 }
489
branchWithCode(InstImm code,Label * label,JumpKind jumpKind)490 void MacroAssemblerMIPS::branchWithCode(InstImm code, Label* label,
491 JumpKind jumpKind) {
492 spew("branch .Llabel %p", label);
493 MOZ_ASSERT(code.encode() !=
494 InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
495 InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
496
497 if (label->bound()) {
498 int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
499
500 if (BOffImm16::IsInRange(offset)) jumpKind = ShortJump;
501
502 if (jumpKind == ShortJump) {
503 MOZ_ASSERT(BOffImm16::IsInRange(offset));
504 code.setBOffImm16(BOffImm16(offset));
505 #ifdef JS_JITSPEW
506 decodeBranchInstAndSpew(code);
507 #endif
508 writeInst(code.encode());
509 as_nop();
510 return;
511 }
512
513 if (code.encode() == inst_beq.encode()) {
514 // Handle long jump
515 addLongJump(nextOffset(), BufferOffset(label->offset()));
516 ma_liPatchable(ScratchRegister, Imm32(LabelBase::INVALID_OFFSET));
517 as_jr(ScratchRegister);
518 as_nop();
519 return;
520 }
521
522 // Handle long conditional branch
523 spew("invert branch .Llabel %p", label);
524 InstImm code_r = invertBranch(code, BOffImm16(5 * sizeof(uint32_t)));
525 #ifdef JS_JITSPEW
526 decodeBranchInstAndSpew(code_r);
527 #endif
528 writeInst(code_r.encode());
529
530 // No need for a "nop" here because we can clobber scratch.
531 addLongJump(nextOffset(), BufferOffset(label->offset()));
532 ma_liPatchable(ScratchRegister, Imm32(LabelBase::INVALID_OFFSET));
533 as_jr(ScratchRegister);
534 as_nop();
535 return;
536 }
537
538 // Generate open jump and link it to a label.
539
540 // Second word holds a pointer to the next branch in label's chain.
541 uint32_t nextInChain =
542 label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
543
544 if (jumpKind == ShortJump) {
545 // Make the whole branch continous in the buffer.
546 m_buffer.ensureSpace(2 * sizeof(uint32_t));
547
548 // Indicate that this is short jump with offset 4.
549 code.setBOffImm16(BOffImm16(4));
550 #ifdef JS_JITSPEW
551 decodeBranchInstAndSpew(code);
552 #endif
553 BufferOffset bo = writeInst(code.encode());
554 writeInst(nextInChain);
555 if (!oom()) label->use(bo.getOffset());
556 return;
557 }
558
559 bool conditional = code.encode() != inst_beq.encode();
560
561 // Make the whole branch continous in the buffer.
562 m_buffer.ensureSpace((conditional ? 5 : 4) * sizeof(uint32_t));
563
564 #ifdef JS_JITSPEW
565 decodeBranchInstAndSpew(code);
566 #endif
567 BufferOffset bo = writeInst(code.encode());
568 writeInst(nextInChain);
569 if (!oom()) label->use(bo.getOffset());
570 // Leave space for potential long jump.
571 as_nop();
572 as_nop();
573 if (conditional) as_nop();
574 }
575
cmp64Set(Condition cond,Register64 lhs,Imm64 val,Register dest)576 void MacroAssemblerMIPSCompat::cmp64Set(Condition cond, Register64 lhs,
577 Imm64 val, Register dest) {
578 if (val.value == 0) {
579 switch (cond) {
580 case Assembler::Equal:
581 case Assembler::BelowOrEqual:
582 as_or(dest, lhs.high, lhs.low);
583 as_sltiu(dest, dest, 1);
584 break;
585 case Assembler::NotEqual:
586 case Assembler::Above:
587 as_or(dest, lhs.high, lhs.low);
588 as_sltu(dest, zero, dest);
589 break;
590 case Assembler::LessThan:
591 case Assembler::GreaterThanOrEqual:
592 as_slt(dest, lhs.high, zero);
593 if (cond == Assembler::GreaterThanOrEqual) as_xori(dest, dest, 1);
594 break;
595 case Assembler::GreaterThan:
596 case Assembler::LessThanOrEqual:
597 as_or(SecondScratchReg, lhs.high, lhs.low);
598 as_sra(ScratchRegister, lhs.high, 31);
599 as_sltu(dest, ScratchRegister, SecondScratchReg);
600 if (cond == Assembler::LessThanOrEqual) as_xori(dest, dest, 1);
601 break;
602 case Assembler::Below:
603 case Assembler::AboveOrEqual:
604 as_ori(dest, zero, cond == Assembler::AboveOrEqual ? 1 : 0);
605 break;
606 default:
607 MOZ_CRASH("Condition code not supported");
608 break;
609 }
610 return;
611 }
612
613 Condition c = ma_cmp64(cond, lhs, val, dest);
614
615 switch (cond) {
616 // For Equal/NotEqual cond ma_cmp64 dest holds non boolean result.
617 case Assembler::Equal:
618 as_sltiu(dest, dest, 1);
619 break;
620 case Assembler::NotEqual:
621 as_sltu(dest, zero, dest);
622 break;
623 default:
624 if (c == Assembler::Zero) as_xori(dest, dest, 1);
625 break;
626 }
627 }
628
cmp64Set(Condition cond,Register64 lhs,Register64 rhs,Register dest)629 void MacroAssemblerMIPSCompat::cmp64Set(Condition cond, Register64 lhs,
630 Register64 rhs, Register dest) {
631 Condition c = ma_cmp64(cond, lhs, rhs, dest);
632
633 switch (cond) {
634 // For Equal/NotEqual cond ma_cmp64 dest holds non boolean result.
635 case Assembler::Equal:
636 as_sltiu(dest, dest, 1);
637 break;
638 case Assembler::NotEqual:
639 as_sltu(dest, zero, dest);
640 break;
641 default:
642 if (c == Assembler::Zero) as_xori(dest, dest, 1);
643 break;
644 }
645 }
646
ma_cmp64(Condition cond,Register64 lhs,Register64 rhs,Register dest)647 Assembler::Condition MacroAssemblerMIPSCompat::ma_cmp64(Condition cond,
648 Register64 lhs,
649 Register64 rhs,
650 Register dest) {
651 switch (cond) {
652 case Assembler::Equal:
653 case Assembler::NotEqual:
654 as_xor(SecondScratchReg, lhs.high, rhs.high);
655 as_xor(ScratchRegister, lhs.low, rhs.low);
656 as_or(dest, SecondScratchReg, ScratchRegister);
657 return (cond == Assembler::Equal) ? Assembler::Zero : Assembler::NonZero;
658 break;
659 case Assembler::LessThan:
660 case Assembler::GreaterThanOrEqual:
661 as_slt(SecondScratchReg, rhs.high, lhs.high);
662 as_sltu(ScratchRegister, lhs.low, rhs.low);
663 as_slt(SecondScratchReg, SecondScratchReg, ScratchRegister);
664 as_slt(ScratchRegister, lhs.high, rhs.high);
665 as_or(dest, ScratchRegister, SecondScratchReg);
666 return (cond == Assembler::GreaterThanOrEqual) ? Assembler::Zero
667 : Assembler::NonZero;
668 break;
669 case Assembler::GreaterThan:
670 case Assembler::LessThanOrEqual:
671 as_slt(SecondScratchReg, lhs.high, rhs.high);
672 as_sltu(ScratchRegister, rhs.low, lhs.low);
673 as_slt(SecondScratchReg, SecondScratchReg, ScratchRegister);
674 as_slt(ScratchRegister, rhs.high, lhs.high);
675 as_or(dest, ScratchRegister, SecondScratchReg);
676 return (cond == Assembler::LessThanOrEqual) ? Assembler::Zero
677 : Assembler::NonZero;
678 break;
679 case Assembler::Below:
680 case Assembler::AboveOrEqual:
681 as_sltu(SecondScratchReg, rhs.high, lhs.high);
682 as_sltu(ScratchRegister, lhs.low, rhs.low);
683 as_slt(SecondScratchReg, SecondScratchReg, ScratchRegister);
684 as_sltu(ScratchRegister, lhs.high, rhs.high);
685 as_or(dest, ScratchRegister, SecondScratchReg);
686 return (cond == Assembler::AboveOrEqual) ? Assembler::Zero
687 : Assembler::NonZero;
688 break;
689 case Assembler::Above:
690 case Assembler::BelowOrEqual:
691 as_sltu(SecondScratchReg, lhs.high, rhs.high);
692 as_sltu(ScratchRegister, rhs.low, lhs.low);
693 as_slt(SecondScratchReg, SecondScratchReg, ScratchRegister);
694 as_sltu(ScratchRegister, rhs.high, lhs.high);
695 as_or(dest, ScratchRegister, SecondScratchReg);
696 return (cond == Assembler::BelowOrEqual) ? Assembler::Zero
697 : Assembler::NonZero;
698 break;
699 default:
700 MOZ_CRASH("Condition code not supported");
701 break;
702 }
703 }
704
ma_cmp64(Condition cond,Register64 lhs,Imm64 val,Register dest)705 Assembler::Condition MacroAssemblerMIPSCompat::ma_cmp64(Condition cond,
706 Register64 lhs,
707 Imm64 val,
708 Register dest) {
709 MOZ_ASSERT(val.value != 0);
710
711 switch (cond) {
712 case Assembler::Equal:
713 case Assembler::NotEqual:
714 ma_xor(SecondScratchReg, lhs.high, val.hi());
715 ma_xor(ScratchRegister, lhs.low, val.low());
716 as_or(dest, SecondScratchReg, ScratchRegister);
717 return (cond == Assembler::Equal) ? Assembler::Zero : Assembler::NonZero;
718 break;
719 case Assembler::LessThan:
720 case Assembler::GreaterThanOrEqual:
721 ma_li(SecondScratchReg, val.hi());
722 as_slt(ScratchRegister, lhs.high, SecondScratchReg);
723 as_slt(SecondScratchReg, SecondScratchReg, lhs.high);
724 as_subu(SecondScratchReg, SecondScratchReg, ScratchRegister);
725 ma_li(ScratchRegister, val.low());
726 as_sltu(ScratchRegister, lhs.low, ScratchRegister);
727 as_slt(dest, SecondScratchReg, ScratchRegister);
728 return (cond == Assembler::GreaterThanOrEqual) ? Assembler::Zero
729 : Assembler::NonZero;
730 break;
731 case Assembler::GreaterThan:
732 case Assembler::LessThanOrEqual:
733 ma_li(SecondScratchReg, val.hi());
734 as_slt(ScratchRegister, SecondScratchReg, lhs.high);
735 as_slt(SecondScratchReg, lhs.high, SecondScratchReg);
736 as_subu(SecondScratchReg, SecondScratchReg, ScratchRegister);
737 ma_li(ScratchRegister, val.low());
738 as_sltu(ScratchRegister, ScratchRegister, lhs.low);
739 as_slt(dest, SecondScratchReg, ScratchRegister);
740 return (cond == Assembler::LessThanOrEqual) ? Assembler::Zero
741 : Assembler::NonZero;
742 break;
743 case Assembler::Below:
744 case Assembler::AboveOrEqual:
745 ma_li(SecondScratchReg, val.hi());
746 as_sltu(ScratchRegister, lhs.high, SecondScratchReg);
747 as_sltu(SecondScratchReg, SecondScratchReg, lhs.high);
748 as_subu(SecondScratchReg, SecondScratchReg, ScratchRegister);
749 ma_li(ScratchRegister, val.low());
750 as_sltu(ScratchRegister, lhs.low, ScratchRegister);
751 as_slt(dest, SecondScratchReg, ScratchRegister);
752 return (cond == Assembler::AboveOrEqual) ? Assembler::Zero
753 : Assembler::NonZero;
754 break;
755 case Assembler::Above:
756 case Assembler::BelowOrEqual:
757 ma_li(SecondScratchReg, val.hi());
758 as_sltu(ScratchRegister, SecondScratchReg, lhs.high);
759 as_sltu(SecondScratchReg, lhs.high, SecondScratchReg);
760 as_subu(SecondScratchReg, SecondScratchReg, ScratchRegister);
761 ma_li(ScratchRegister, val.low());
762 as_sltu(ScratchRegister, ScratchRegister, lhs.low);
763 as_slt(dest, SecondScratchReg, ScratchRegister);
764 return (cond == Assembler::BelowOrEqual) ? Assembler::Zero
765 : Assembler::NonZero;
766 break;
767 default:
768 MOZ_CRASH("Condition code not supported");
769 break;
770 }
771 }
772
773 // fp instructions
ma_lid(FloatRegister dest,double value)774 void MacroAssemblerMIPS::ma_lid(FloatRegister dest, double value) {
775 struct DoubleStruct {
776 uint32_t lo;
777 uint32_t hi;
778 };
779 DoubleStruct intStruct = mozilla::BitwiseCast<DoubleStruct>(value);
780 #if MOZ_BIG_ENDIAN
781 mozilla::Swap(intStruct.hi, intStruct.lo);
782 #endif
783
784 // put hi part of 64 bit value into the odd register
785 if (intStruct.hi == 0) {
786 moveToDoubleHi(zero, dest);
787 } else {
788 ma_li(ScratchRegister, Imm32(intStruct.hi));
789 moveToDoubleHi(ScratchRegister, dest);
790 }
791
792 // put low part of 64 bit value into the even register
793 if (intStruct.lo == 0) {
794 moveToDoubleLo(zero, dest);
795 } else {
796 ma_li(ScratchRegister, Imm32(intStruct.lo));
797 moveToDoubleLo(ScratchRegister, dest);
798 }
799 }
800
ma_mv(FloatRegister src,ValueOperand dest)801 void MacroAssemblerMIPS::ma_mv(FloatRegister src, ValueOperand dest) {
802 moveFromDoubleLo(src, dest.payloadReg());
803 moveFromDoubleHi(src, dest.typeReg());
804 }
805
ma_mv(ValueOperand src,FloatRegister dest)806 void MacroAssemblerMIPS::ma_mv(ValueOperand src, FloatRegister dest) {
807 moveToDoubleLo(src.payloadReg(), dest);
808 moveToDoubleHi(src.typeReg(), dest);
809 }
810
ma_ls(FloatRegister ft,Address address)811 void MacroAssemblerMIPS::ma_ls(FloatRegister ft, Address address) {
812 if (Imm16::IsInSignedRange(address.offset)) {
813 as_lwc1(ft, address.base, address.offset);
814 } else {
815 MOZ_ASSERT(address.base != ScratchRegister);
816 ma_li(ScratchRegister, Imm32(address.offset));
817 if (isLoongson()) {
818 as_gslsx(ft, address.base, ScratchRegister, 0);
819 } else {
820 as_addu(ScratchRegister, address.base, ScratchRegister);
821 as_lwc1(ft, ScratchRegister, 0);
822 }
823 }
824 }
825
ma_ld(FloatRegister ft,Address address)826 void MacroAssemblerMIPS::ma_ld(FloatRegister ft, Address address) {
827 if (Imm16::IsInSignedRange(address.offset)) {
828 as_ldc1(ft, address.base, address.offset);
829 } else {
830 MOZ_ASSERT(address.base != ScratchRegister);
831 ma_li(ScratchRegister, Imm32(address.offset));
832 if (isLoongson()) {
833 as_gsldx(ft, address.base, ScratchRegister, 0);
834 } else {
835 as_addu(ScratchRegister, address.base, ScratchRegister);
836 as_ldc1(ft, ScratchRegister, 0);
837 }
838 }
839 }
840
ma_sd(FloatRegister ft,Address address)841 void MacroAssemblerMIPS::ma_sd(FloatRegister ft, Address address) {
842 if (Imm16::IsInSignedRange(address.offset)) {
843 as_sdc1(ft, address.base, address.offset);
844 } else {
845 MOZ_ASSERT(address.base != ScratchRegister);
846 ma_li(ScratchRegister, Imm32(address.offset));
847 if (isLoongson()) {
848 as_gssdx(ft, address.base, ScratchRegister, 0);
849 } else {
850 as_addu(ScratchRegister, address.base, ScratchRegister);
851 as_sdc1(ft, ScratchRegister, 0);
852 }
853 }
854 }
855
ma_ss(FloatRegister ft,Address address)856 void MacroAssemblerMIPS::ma_ss(FloatRegister ft, Address address) {
857 if (Imm16::IsInSignedRange(address.offset)) {
858 as_swc1(ft, address.base, address.offset);
859 } else {
860 MOZ_ASSERT(address.base != ScratchRegister);
861 ma_li(ScratchRegister, Imm32(address.offset));
862 if (isLoongson()) {
863 as_gsssx(ft, address.base, ScratchRegister, 0);
864 } else {
865 as_addu(ScratchRegister, address.base, ScratchRegister);
866 as_swc1(ft, ScratchRegister, 0);
867 }
868 }
869 }
870
ma_ldc1WordAligned(FloatRegister ft,Register base,int32_t off)871 void MacroAssemblerMIPS::ma_ldc1WordAligned(FloatRegister ft, Register base,
872 int32_t off) {
873 MOZ_ASSERT(Imm16::IsInSignedRange(off + PAYLOAD_OFFSET) &&
874 Imm16::IsInSignedRange(off + TAG_OFFSET));
875
876 as_lwc1(ft, base, off + PAYLOAD_OFFSET);
877 as_lwc1(getOddPair(ft), base, off + TAG_OFFSET);
878 }
879
ma_sdc1WordAligned(FloatRegister ft,Register base,int32_t off)880 void MacroAssemblerMIPS::ma_sdc1WordAligned(FloatRegister ft, Register base,
881 int32_t off) {
882 MOZ_ASSERT(Imm16::IsInSignedRange(off + PAYLOAD_OFFSET) &&
883 Imm16::IsInSignedRange(off + TAG_OFFSET));
884
885 as_swc1(ft, base, off + PAYLOAD_OFFSET);
886 as_swc1(getOddPair(ft), base, off + TAG_OFFSET);
887 }
888
ma_pop(FloatRegister f)889 void MacroAssemblerMIPS::ma_pop(FloatRegister f) {
890 if (f.isDouble())
891 ma_ldc1WordAligned(f, StackPointer, 0);
892 else
893 as_lwc1(f, StackPointer, 0);
894
895 as_addiu(StackPointer, StackPointer, f.size());
896 }
897
ma_push(FloatRegister f)898 void MacroAssemblerMIPS::ma_push(FloatRegister f) {
899 as_addiu(StackPointer, StackPointer, -f.size());
900
901 if (f.isDouble())
902 ma_sdc1WordAligned(f, StackPointer, 0);
903 else
904 as_swc1(f, StackPointer, 0);
905 }
906
buildOOLFakeExitFrame(void * fakeReturnAddr)907 bool MacroAssemblerMIPSCompat::buildOOLFakeExitFrame(void* fakeReturnAddr) {
908 uint32_t descriptor = MakeFrameDescriptor(
909 asMasm().framePushed(), JitFrame_IonJS, ExitFrameLayout::Size());
910
911 asMasm().Push(Imm32(descriptor)); // descriptor_
912 asMasm().Push(ImmPtr(fakeReturnAddr));
913
914 return true;
915 }
916
move32(Imm32 imm,Register dest)917 void MacroAssemblerMIPSCompat::move32(Imm32 imm, Register dest) {
918 ma_li(dest, imm);
919 }
920
move32(Register src,Register dest)921 void MacroAssemblerMIPSCompat::move32(Register src, Register dest) {
922 ma_move(dest, src);
923 }
924
movePtr(Register src,Register dest)925 void MacroAssemblerMIPSCompat::movePtr(Register src, Register dest) {
926 ma_move(dest, src);
927 }
movePtr(ImmWord imm,Register dest)928 void MacroAssemblerMIPSCompat::movePtr(ImmWord imm, Register dest) {
929 ma_li(dest, imm);
930 }
931
movePtr(ImmGCPtr imm,Register dest)932 void MacroAssemblerMIPSCompat::movePtr(ImmGCPtr imm, Register dest) {
933 ma_li(dest, imm);
934 }
935
movePtr(ImmPtr imm,Register dest)936 void MacroAssemblerMIPSCompat::movePtr(ImmPtr imm, Register dest) {
937 movePtr(ImmWord(uintptr_t(imm.value)), dest);
938 }
movePtr(wasm::SymbolicAddress imm,Register dest)939 void MacroAssemblerMIPSCompat::movePtr(wasm::SymbolicAddress imm,
940 Register dest) {
941 append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm));
942 ma_liPatchable(dest, ImmWord(-1));
943 }
944
load8ZeroExtend(const Address & address,Register dest)945 void MacroAssemblerMIPSCompat::load8ZeroExtend(const Address& address,
946 Register dest) {
947 ma_load(dest, address, SizeByte, ZeroExtend);
948 }
949
load8ZeroExtend(const BaseIndex & src,Register dest)950 void MacroAssemblerMIPSCompat::load8ZeroExtend(const BaseIndex& src,
951 Register dest) {
952 ma_load(dest, src, SizeByte, ZeroExtend);
953 }
954
load8SignExtend(const Address & address,Register dest)955 void MacroAssemblerMIPSCompat::load8SignExtend(const Address& address,
956 Register dest) {
957 ma_load(dest, address, SizeByte, SignExtend);
958 }
959
load8SignExtend(const BaseIndex & src,Register dest)960 void MacroAssemblerMIPSCompat::load8SignExtend(const BaseIndex& src,
961 Register dest) {
962 ma_load(dest, src, SizeByte, SignExtend);
963 }
964
load16ZeroExtend(const Address & address,Register dest)965 void MacroAssemblerMIPSCompat::load16ZeroExtend(const Address& address,
966 Register dest) {
967 ma_load(dest, address, SizeHalfWord, ZeroExtend);
968 }
969
load16ZeroExtend(const BaseIndex & src,Register dest)970 void MacroAssemblerMIPSCompat::load16ZeroExtend(const BaseIndex& src,
971 Register dest) {
972 ma_load(dest, src, SizeHalfWord, ZeroExtend);
973 }
974
load16SignExtend(const Address & address,Register dest)975 void MacroAssemblerMIPSCompat::load16SignExtend(const Address& address,
976 Register dest) {
977 ma_load(dest, address, SizeHalfWord, SignExtend);
978 }
979
load16SignExtend(const BaseIndex & src,Register dest)980 void MacroAssemblerMIPSCompat::load16SignExtend(const BaseIndex& src,
981 Register dest) {
982 ma_load(dest, src, SizeHalfWord, SignExtend);
983 }
984
load32(const Address & address,Register dest)985 void MacroAssemblerMIPSCompat::load32(const Address& address, Register dest) {
986 ma_load(dest, address, SizeWord);
987 }
988
load32(const BaseIndex & address,Register dest)989 void MacroAssemblerMIPSCompat::load32(const BaseIndex& address, Register dest) {
990 ma_load(dest, address, SizeWord);
991 }
992
load32(AbsoluteAddress address,Register dest)993 void MacroAssemblerMIPSCompat::load32(AbsoluteAddress address, Register dest) {
994 movePtr(ImmPtr(address.addr), ScratchRegister);
995 load32(Address(ScratchRegister, 0), dest);
996 }
997
load32(wasm::SymbolicAddress address,Register dest)998 void MacroAssemblerMIPSCompat::load32(wasm::SymbolicAddress address,
999 Register dest) {
1000 movePtr(address, ScratchRegister);
1001 load32(Address(ScratchRegister, 0), dest);
1002 }
1003
loadPtr(const Address & address,Register dest)1004 void MacroAssemblerMIPSCompat::loadPtr(const Address& address, Register dest) {
1005 ma_load(dest, address, SizeWord);
1006 }
1007
loadPtr(const BaseIndex & src,Register dest)1008 void MacroAssemblerMIPSCompat::loadPtr(const BaseIndex& src, Register dest) {
1009 ma_load(dest, src, SizeWord);
1010 }
1011
loadPtr(AbsoluteAddress address,Register dest)1012 void MacroAssemblerMIPSCompat::loadPtr(AbsoluteAddress address, Register dest) {
1013 movePtr(ImmPtr(address.addr), ScratchRegister);
1014 loadPtr(Address(ScratchRegister, 0), dest);
1015 }
1016
loadPtr(wasm::SymbolicAddress address,Register dest)1017 void MacroAssemblerMIPSCompat::loadPtr(wasm::SymbolicAddress address,
1018 Register dest) {
1019 movePtr(address, ScratchRegister);
1020 loadPtr(Address(ScratchRegister, 0), dest);
1021 }
1022
loadPrivate(const Address & address,Register dest)1023 void MacroAssemblerMIPSCompat::loadPrivate(const Address& address,
1024 Register dest) {
1025 ma_lw(dest, Address(address.base, address.offset + PAYLOAD_OFFSET));
1026 }
1027
loadUnalignedDouble(const wasm::MemoryAccessDesc & access,const BaseIndex & src,Register temp,FloatRegister dest)1028 void MacroAssemblerMIPSCompat::loadUnalignedDouble(
1029 const wasm::MemoryAccessDesc& access, const BaseIndex& src, Register temp,
1030 FloatRegister dest) {
1031 MOZ_ASSERT(MOZ_LITTLE_ENDIAN, "Wasm-only; wasm is disabled on big-endian.");
1032 computeScaledAddress(src, SecondScratchReg);
1033
1034 uint32_t framePushed = asMasm().framePushed();
1035 BufferOffset load;
1036 if (Imm16::IsInSignedRange(src.offset) &&
1037 Imm16::IsInSignedRange(src.offset + 7)) {
1038 load = as_lwl(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET + 3);
1039 as_lwr(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET);
1040 append(access, load.getOffset(), framePushed);
1041 moveToDoubleLo(temp, dest);
1042 load = as_lwl(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET + 3);
1043 as_lwr(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET);
1044 append(access, load.getOffset(), framePushed);
1045 moveToDoubleHi(temp, dest);
1046 } else {
1047 ma_li(ScratchRegister, Imm32(src.offset));
1048 as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
1049 load = as_lwl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
1050 as_lwr(temp, ScratchRegister, INT64LOW_OFFSET);
1051 append(access, load.getOffset(), framePushed);
1052 moveToDoubleLo(temp, dest);
1053 load = as_lwl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
1054 as_lwr(temp, ScratchRegister, INT64HIGH_OFFSET);
1055 append(access, load.getOffset(), framePushed);
1056 moveToDoubleHi(temp, dest);
1057 }
1058 }
1059
loadUnalignedFloat32(const wasm::MemoryAccessDesc & access,const BaseIndex & src,Register temp,FloatRegister dest)1060 void MacroAssemblerMIPSCompat::loadUnalignedFloat32(
1061 const wasm::MemoryAccessDesc& access, const BaseIndex& src, Register temp,
1062 FloatRegister dest) {
1063 MOZ_ASSERT(MOZ_LITTLE_ENDIAN, "Wasm-only; wasm is disabled on big-endian.");
1064 computeScaledAddress(src, SecondScratchReg);
1065 BufferOffset load;
1066 if (Imm16::IsInSignedRange(src.offset) &&
1067 Imm16::IsInSignedRange(src.offset + 3)) {
1068 load = as_lwl(temp, SecondScratchReg, src.offset + 3);
1069 as_lwr(temp, SecondScratchReg, src.offset);
1070 } else {
1071 ma_li(ScratchRegister, Imm32(src.offset));
1072 as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
1073 load = as_lwl(temp, ScratchRegister, 3);
1074 as_lwr(temp, ScratchRegister, 0);
1075 }
1076 append(access, load.getOffset(), asMasm().framePushed());
1077 moveToFloat32(temp, dest);
1078 }
1079
store8(Imm32 imm,const Address & address)1080 void MacroAssemblerMIPSCompat::store8(Imm32 imm, const Address& address) {
1081 ma_li(SecondScratchReg, imm);
1082 ma_store(SecondScratchReg, address, SizeByte);
1083 }
1084
store8(Register src,const Address & address)1085 void MacroAssemblerMIPSCompat::store8(Register src, const Address& address) {
1086 ma_store(src, address, SizeByte);
1087 }
1088
store8(Imm32 imm,const BaseIndex & dest)1089 void MacroAssemblerMIPSCompat::store8(Imm32 imm, const BaseIndex& dest) {
1090 ma_store(imm, dest, SizeByte);
1091 }
1092
store8(Register src,const BaseIndex & dest)1093 void MacroAssemblerMIPSCompat::store8(Register src, const BaseIndex& dest) {
1094 ma_store(src, dest, SizeByte);
1095 }
1096
store16(Imm32 imm,const Address & address)1097 void MacroAssemblerMIPSCompat::store16(Imm32 imm, const Address& address) {
1098 ma_li(SecondScratchReg, imm);
1099 ma_store(SecondScratchReg, address, SizeHalfWord);
1100 }
1101
store16(Register src,const Address & address)1102 void MacroAssemblerMIPSCompat::store16(Register src, const Address& address) {
1103 ma_store(src, address, SizeHalfWord);
1104 }
1105
store16(Imm32 imm,const BaseIndex & dest)1106 void MacroAssemblerMIPSCompat::store16(Imm32 imm, const BaseIndex& dest) {
1107 ma_store(imm, dest, SizeHalfWord);
1108 }
1109
store16(Register src,const BaseIndex & address)1110 void MacroAssemblerMIPSCompat::store16(Register src, const BaseIndex& address) {
1111 ma_store(src, address, SizeHalfWord);
1112 }
1113
store32(Register src,AbsoluteAddress address)1114 void MacroAssemblerMIPSCompat::store32(Register src, AbsoluteAddress address) {
1115 movePtr(ImmPtr(address.addr), ScratchRegister);
1116 store32(src, Address(ScratchRegister, 0));
1117 }
1118
store32(Register src,const Address & address)1119 void MacroAssemblerMIPSCompat::store32(Register src, const Address& address) {
1120 ma_store(src, address, SizeWord);
1121 }
1122
store32(Imm32 src,const Address & address)1123 void MacroAssemblerMIPSCompat::store32(Imm32 src, const Address& address) {
1124 move32(src, SecondScratchReg);
1125 ma_store(SecondScratchReg, address, SizeWord);
1126 }
1127
store32(Imm32 imm,const BaseIndex & dest)1128 void MacroAssemblerMIPSCompat::store32(Imm32 imm, const BaseIndex& dest) {
1129 ma_store(imm, dest, SizeWord);
1130 }
1131
store32(Register src,const BaseIndex & dest)1132 void MacroAssemblerMIPSCompat::store32(Register src, const BaseIndex& dest) {
1133 ma_store(src, dest, SizeWord);
1134 }
1135
1136 template <typename T>
storePtr(ImmWord imm,T address)1137 void MacroAssemblerMIPSCompat::storePtr(ImmWord imm, T address) {
1138 ma_li(SecondScratchReg, imm);
1139 ma_store(SecondScratchReg, address, SizeWord);
1140 }
1141
1142 template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmWord imm,
1143 Address address);
1144 template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmWord imm,
1145 BaseIndex address);
1146
1147 template <typename T>
storePtr(ImmPtr imm,T address)1148 void MacroAssemblerMIPSCompat::storePtr(ImmPtr imm, T address) {
1149 storePtr(ImmWord(uintptr_t(imm.value)), address);
1150 }
1151
1152 template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmPtr imm,
1153 Address address);
1154 template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmPtr imm,
1155 BaseIndex address);
1156
1157 template <typename T>
storePtr(ImmGCPtr imm,T address)1158 void MacroAssemblerMIPSCompat::storePtr(ImmGCPtr imm, T address) {
1159 movePtr(imm, SecondScratchReg);
1160 storePtr(SecondScratchReg, address);
1161 }
1162
1163 template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmGCPtr imm,
1164 Address address);
1165 template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmGCPtr imm,
1166 BaseIndex address);
1167
storePtr(Register src,const Address & address)1168 void MacroAssemblerMIPSCompat::storePtr(Register src, const Address& address) {
1169 ma_store(src, address, SizeWord);
1170 }
1171
storePtr(Register src,const BaseIndex & address)1172 void MacroAssemblerMIPSCompat::storePtr(Register src,
1173 const BaseIndex& address) {
1174 ma_store(src, address, SizeWord);
1175 }
1176
storePtr(Register src,AbsoluteAddress dest)1177 void MacroAssemblerMIPSCompat::storePtr(Register src, AbsoluteAddress dest) {
1178 movePtr(ImmPtr(dest.addr), ScratchRegister);
1179 storePtr(src, Address(ScratchRegister, 0));
1180 }
1181
storeUnalignedFloat32(const wasm::MemoryAccessDesc & access,FloatRegister src,Register temp,const BaseIndex & dest)1182 void MacroAssemblerMIPSCompat::storeUnalignedFloat32(
1183 const wasm::MemoryAccessDesc& access, FloatRegister src, Register temp,
1184 const BaseIndex& dest) {
1185 MOZ_ASSERT(MOZ_LITTLE_ENDIAN, "Wasm-only; wasm is disabled on big-endian.");
1186 computeScaledAddress(dest, SecondScratchReg);
1187 moveFromFloat32(src, temp);
1188
1189 BufferOffset store;
1190 if (Imm16::IsInSignedRange(dest.offset) &&
1191 Imm16::IsInSignedRange(dest.offset + 3)) {
1192 store = as_swl(temp, SecondScratchReg, dest.offset + 3);
1193 as_swr(temp, SecondScratchReg, dest.offset);
1194 } else {
1195 ma_li(ScratchRegister, Imm32(dest.offset));
1196 as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
1197 store = as_swl(temp, ScratchRegister, 3);
1198 as_swr(temp, ScratchRegister, 0);
1199 }
1200 append(access, store.getOffset(), asMasm().framePushed());
1201 }
1202
storeUnalignedDouble(const wasm::MemoryAccessDesc & access,FloatRegister src,Register temp,const BaseIndex & dest)1203 void MacroAssemblerMIPSCompat::storeUnalignedDouble(
1204 const wasm::MemoryAccessDesc& access, FloatRegister src, Register temp,
1205 const BaseIndex& dest) {
1206 MOZ_ASSERT(MOZ_LITTLE_ENDIAN, "Wasm-only; wasm is disabled on big-endian.");
1207 computeScaledAddress(dest, SecondScratchReg);
1208
1209 uint32_t framePushed = asMasm().framePushed();
1210 BufferOffset store;
1211 if (Imm16::IsInSignedRange(dest.offset) &&
1212 Imm16::IsInSignedRange(dest.offset + 7)) {
1213 moveFromDoubleHi(src, temp);
1214 store = as_swl(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET + 3);
1215 as_swr(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET);
1216 moveFromDoubleLo(src, temp);
1217 as_swl(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET + 3);
1218 as_swr(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET);
1219
1220 } else {
1221 ma_li(ScratchRegister, Imm32(dest.offset));
1222 as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
1223 moveFromDoubleHi(src, temp);
1224 store = as_swl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
1225 as_swr(temp, ScratchRegister, INT64HIGH_OFFSET);
1226 moveFromDoubleLo(src, temp);
1227 as_swl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
1228 as_swr(temp, ScratchRegister, INT64LOW_OFFSET);
1229 }
1230 append(access, store.getOffset(), framePushed);
1231 }
1232
clampDoubleToUint8(FloatRegister input,Register output)1233 void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) {
1234 as_roundwd(ScratchDoubleReg, input);
1235 ma_li(ScratchRegister, Imm32(255));
1236 as_mfc1(output, ScratchDoubleReg);
1237 zeroDouble(ScratchDoubleReg);
1238 as_sltiu(SecondScratchReg, output, 255);
1239 as_colt(DoubleFloat, ScratchDoubleReg, input);
1240 // if res > 255; res = 255;
1241 as_movz(output, ScratchRegister, SecondScratchReg);
1242 // if !(input > 0); res = 0;
1243 as_movf(output, zero);
1244 }
1245
1246 // higher level tag testing code
ToPayload(Operand base)1247 Operand MacroAssemblerMIPSCompat::ToPayload(Operand base) {
1248 return Operand(Register::FromCode(base.base()), base.disp() + PAYLOAD_OFFSET);
1249 }
1250
ToType(Operand base)1251 Operand MacroAssemblerMIPSCompat::ToType(Operand base) {
1252 return Operand(Register::FromCode(base.base()), base.disp() + TAG_OFFSET);
1253 }
1254
testNullSet(Condition cond,const ValueOperand & value,Register dest)1255 void MacroAssemblerMIPSCompat::testNullSet(Condition cond,
1256 const ValueOperand& value,
1257 Register dest) {
1258 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1259 ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_NULL), cond);
1260 }
1261
testObjectSet(Condition cond,const ValueOperand & value,Register dest)1262 void MacroAssemblerMIPSCompat::testObjectSet(Condition cond,
1263 const ValueOperand& value,
1264 Register dest) {
1265 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1266 ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_OBJECT), cond);
1267 }
1268
testUndefinedSet(Condition cond,const ValueOperand & value,Register dest)1269 void MacroAssemblerMIPSCompat::testUndefinedSet(Condition cond,
1270 const ValueOperand& value,
1271 Register dest) {
1272 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1273 ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_UNDEFINED), cond);
1274 }
1275
1276 // unboxing code
unboxNonDouble(const ValueOperand & operand,Register dest,JSValueType)1277 void MacroAssemblerMIPSCompat::unboxNonDouble(const ValueOperand& operand,
1278 Register dest, JSValueType) {
1279 if (operand.payloadReg() != dest) ma_move(dest, operand.payloadReg());
1280 }
1281
unboxNonDouble(const Address & src,Register dest,JSValueType)1282 void MacroAssemblerMIPSCompat::unboxNonDouble(const Address& src, Register dest,
1283 JSValueType) {
1284 ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
1285 }
1286
unboxNonDouble(const BaseIndex & src,Register dest,JSValueType)1287 void MacroAssemblerMIPSCompat::unboxNonDouble(const BaseIndex& src,
1288 Register dest, JSValueType) {
1289 computeScaledAddress(src, SecondScratchReg);
1290 ma_lw(dest, Address(SecondScratchReg, src.offset + PAYLOAD_OFFSET));
1291 }
1292
unboxInt32(const ValueOperand & operand,Register dest)1293 void MacroAssemblerMIPSCompat::unboxInt32(const ValueOperand& operand,
1294 Register dest) {
1295 ma_move(dest, operand.payloadReg());
1296 }
1297
unboxInt32(const Address & src,Register dest)1298 void MacroAssemblerMIPSCompat::unboxInt32(const Address& src, Register dest) {
1299 ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
1300 }
1301
unboxBoolean(const ValueOperand & operand,Register dest)1302 void MacroAssemblerMIPSCompat::unboxBoolean(const ValueOperand& operand,
1303 Register dest) {
1304 ma_move(dest, operand.payloadReg());
1305 }
1306
unboxBoolean(const Address & src,Register dest)1307 void MacroAssemblerMIPSCompat::unboxBoolean(const Address& src, Register dest) {
1308 ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
1309 }
1310
unboxDouble(const ValueOperand & operand,FloatRegister dest)1311 void MacroAssemblerMIPSCompat::unboxDouble(const ValueOperand& operand,
1312 FloatRegister dest) {
1313 moveToDoubleLo(operand.payloadReg(), dest);
1314 moveToDoubleHi(operand.typeReg(), dest);
1315 }
1316
unboxDouble(const Address & src,FloatRegister dest)1317 void MacroAssemblerMIPSCompat::unboxDouble(const Address& src,
1318 FloatRegister dest) {
1319 ma_lw(ScratchRegister, Address(src.base, src.offset + PAYLOAD_OFFSET));
1320 moveToDoubleLo(ScratchRegister, dest);
1321 ma_lw(ScratchRegister, Address(src.base, src.offset + TAG_OFFSET));
1322 moveToDoubleHi(ScratchRegister, dest);
1323 }
1324
unboxString(const ValueOperand & operand,Register dest)1325 void MacroAssemblerMIPSCompat::unboxString(const ValueOperand& operand,
1326 Register dest) {
1327 ma_move(dest, operand.payloadReg());
1328 }
1329
unboxString(const Address & src,Register dest)1330 void MacroAssemblerMIPSCompat::unboxString(const Address& src, Register dest) {
1331 ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
1332 }
1333
unboxObject(const ValueOperand & src,Register dest)1334 void MacroAssemblerMIPSCompat::unboxObject(const ValueOperand& src,
1335 Register dest) {
1336 ma_move(dest, src.payloadReg());
1337 }
1338
unboxObject(const Address & src,Register dest)1339 void MacroAssemblerMIPSCompat::unboxObject(const Address& src, Register dest) {
1340 ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
1341 }
1342
unboxValue(const ValueOperand & src,AnyRegister dest,JSValueType)1343 void MacroAssemblerMIPSCompat::unboxValue(const ValueOperand& src,
1344 AnyRegister dest, JSValueType) {
1345 if (dest.isFloat()) {
1346 Label notInt32, end;
1347 asMasm().branchTestInt32(Assembler::NotEqual, src, ¬Int32);
1348 convertInt32ToDouble(src.payloadReg(), dest.fpu());
1349 ma_b(&end, ShortJump);
1350 bind(¬Int32);
1351 unboxDouble(src, dest.fpu());
1352 bind(&end);
1353 } else if (src.payloadReg() != dest.gpr()) {
1354 ma_move(dest.gpr(), src.payloadReg());
1355 }
1356 }
1357
unboxPrivate(const ValueOperand & src,Register dest)1358 void MacroAssemblerMIPSCompat::unboxPrivate(const ValueOperand& src,
1359 Register dest) {
1360 ma_move(dest, src.payloadReg());
1361 }
1362
boxDouble(FloatRegister src,const ValueOperand & dest,FloatRegister)1363 void MacroAssemblerMIPSCompat::boxDouble(FloatRegister src,
1364 const ValueOperand& dest,
1365 FloatRegister) {
1366 moveFromDoubleLo(src, dest.payloadReg());
1367 moveFromDoubleHi(src, dest.typeReg());
1368 }
1369
boxNonDouble(JSValueType type,Register src,const ValueOperand & dest)1370 void MacroAssemblerMIPSCompat::boxNonDouble(JSValueType type, Register src,
1371 const ValueOperand& dest) {
1372 if (src != dest.payloadReg()) ma_move(dest.payloadReg(), src);
1373 ma_li(dest.typeReg(), ImmType(type));
1374 }
1375
boolValueToDouble(const ValueOperand & operand,FloatRegister dest)1376 void MacroAssemblerMIPSCompat::boolValueToDouble(const ValueOperand& operand,
1377 FloatRegister dest) {
1378 convertBoolToInt32(operand.payloadReg(), ScratchRegister);
1379 convertInt32ToDouble(ScratchRegister, dest);
1380 }
1381
int32ValueToDouble(const ValueOperand & operand,FloatRegister dest)1382 void MacroAssemblerMIPSCompat::int32ValueToDouble(const ValueOperand& operand,
1383 FloatRegister dest) {
1384 convertInt32ToDouble(operand.payloadReg(), dest);
1385 }
1386
boolValueToFloat32(const ValueOperand & operand,FloatRegister dest)1387 void MacroAssemblerMIPSCompat::boolValueToFloat32(const ValueOperand& operand,
1388 FloatRegister dest) {
1389 convertBoolToInt32(operand.payloadReg(), ScratchRegister);
1390 convertInt32ToFloat32(ScratchRegister, dest);
1391 }
1392
int32ValueToFloat32(const ValueOperand & operand,FloatRegister dest)1393 void MacroAssemblerMIPSCompat::int32ValueToFloat32(const ValueOperand& operand,
1394 FloatRegister dest) {
1395 convertInt32ToFloat32(operand.payloadReg(), dest);
1396 }
1397
loadConstantFloat32(float f,FloatRegister dest)1398 void MacroAssemblerMIPSCompat::loadConstantFloat32(float f,
1399 FloatRegister dest) {
1400 ma_lis(dest, f);
1401 }
1402
loadInt32OrDouble(const Address & src,FloatRegister dest)1403 void MacroAssemblerMIPSCompat::loadInt32OrDouble(const Address& src,
1404 FloatRegister dest) {
1405 Label notInt32, end;
1406 // If it's an int, convert it to double.
1407 ma_lw(SecondScratchReg, Address(src.base, src.offset + TAG_OFFSET));
1408 asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, ¬Int32);
1409 ma_lw(SecondScratchReg, Address(src.base, src.offset + PAYLOAD_OFFSET));
1410 convertInt32ToDouble(SecondScratchReg, dest);
1411 ma_b(&end, ShortJump);
1412
1413 // Not an int, just load as double.
1414 bind(¬Int32);
1415 ma_ld(dest, src);
1416 bind(&end);
1417 }
1418
loadInt32OrDouble(Register base,Register index,FloatRegister dest,int32_t shift)1419 void MacroAssemblerMIPSCompat::loadInt32OrDouble(Register base, Register index,
1420 FloatRegister dest,
1421 int32_t shift) {
1422 Label notInt32, end;
1423
1424 // If it's an int, convert it to double.
1425
1426 computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)),
1427 SecondScratchReg);
1428 // Since we only have one scratch, we need to stomp over it with the tag.
1429 load32(Address(SecondScratchReg, TAG_OFFSET), SecondScratchReg);
1430 asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, ¬Int32);
1431
1432 computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)),
1433 SecondScratchReg);
1434 load32(Address(SecondScratchReg, PAYLOAD_OFFSET), SecondScratchReg);
1435 convertInt32ToDouble(SecondScratchReg, dest);
1436 ma_b(&end, ShortJump);
1437
1438 // Not an int, just load as double.
1439 bind(¬Int32);
1440 // First, recompute the offset that had been stored in the scratch register
1441 // since the scratch register was overwritten loading in the type.
1442 computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)),
1443 SecondScratchReg);
1444 loadDouble(Address(SecondScratchReg, 0), dest);
1445 bind(&end);
1446 }
1447
loadConstantDouble(double dp,FloatRegister dest)1448 void MacroAssemblerMIPSCompat::loadConstantDouble(double dp,
1449 FloatRegister dest) {
1450 ma_lid(dest, dp);
1451 }
1452
extractObject(const Address & address,Register scratch)1453 Register MacroAssemblerMIPSCompat::extractObject(const Address& address,
1454 Register scratch) {
1455 ma_lw(scratch, Address(address.base, address.offset + PAYLOAD_OFFSET));
1456 return scratch;
1457 }
1458
extractTag(const Address & address,Register scratch)1459 Register MacroAssemblerMIPSCompat::extractTag(const Address& address,
1460 Register scratch) {
1461 ma_lw(scratch, Address(address.base, address.offset + TAG_OFFSET));
1462 return scratch;
1463 }
1464
extractTag(const BaseIndex & address,Register scratch)1465 Register MacroAssemblerMIPSCompat::extractTag(const BaseIndex& address,
1466 Register scratch) {
1467 computeScaledAddress(address, scratch);
1468 return extractTag(Address(scratch, address.offset), scratch);
1469 }
1470
getType(const Value & val)1471 uint32_t MacroAssemblerMIPSCompat::getType(const Value& val) {
1472 return val.toNunboxTag();
1473 }
1474
moveData(const Value & val,Register data)1475 void MacroAssemblerMIPSCompat::moveData(const Value& val, Register data) {
1476 if (val.isGCThing())
1477 ma_li(data, ImmGCPtr(val.toGCThing()));
1478 else
1479 ma_li(data, Imm32(val.toNunboxPayload()));
1480 }
1481
1482 /* There are 3 paths trough backedge jump. They are listed here in the order
1483 * in which instructions are executed.
1484 * - The short jump is simple:
1485 * b offset # Jumps directly to target.
1486 * lui at, addr1_hi # In delay slot. Don't care about 'at' here.
1487 *
1488 * - The long jump to loop header:
1489 * b label1
1490 * lui at, addr1_hi # In delay slot. We use the value in 'at' later.
1491 * label1:
1492 * ori at, addr1_lo
1493 * jr at
1494 * lui at, addr2_hi # In delay slot. Don't care about 'at' here.
1495 *
1496 * - The long jump to interrupt loop:
1497 * b label2
1498 * lui at, addr1_hi # In delay slot. Don't care about 'at' here.
1499 * label2:
1500 * lui at, addr2_hi
1501 * ori at, addr2_lo
1502 * jr at
1503 * nop # In delay slot.
1504 *
1505 * The backedge is done this way to avoid patching lui+ori pair while it is
1506 * being executed. Look also at jit::PatchBackedge().
1507 */
backedgeJump(RepatchLabel * label,Label * documentation)1508 CodeOffsetJump MacroAssemblerMIPSCompat::backedgeJump(RepatchLabel* label,
1509 Label* documentation) {
1510 // Only one branch per label.
1511 MOZ_ASSERT(!label->used());
1512
1513 BufferOffset bo = nextOffset();
1514 label->use(bo.getOffset());
1515
1516 // Backedges are short jumps when bound, but can become long when patched.
1517 m_buffer.ensureSpace(8 * sizeof(uint32_t));
1518 // Jump to "label1" by default to jump to the loop header.
1519 as_b(BOffImm16(2 * sizeof(uint32_t)));
1520 // No need for nop here. We can safely put next instruction in delay slot.
1521 ma_liPatchable(ScratchRegister, Imm32(LabelBase::INVALID_OFFSET));
1522 MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 3 * sizeof(uint32_t));
1523 as_jr(ScratchRegister);
1524 // No need for nop here. We can safely put next instruction in delay slot.
1525 ma_liPatchable(ScratchRegister, Imm32(LabelBase::INVALID_OFFSET));
1526 as_jr(ScratchRegister);
1527 as_nop();
1528 MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 8 * sizeof(uint32_t));
1529 return CodeOffsetJump(bo.getOffset());
1530 }
1531
jumpWithPatch(RepatchLabel * label,Label * documentation)1532 CodeOffsetJump MacroAssemblerMIPSCompat::jumpWithPatch(RepatchLabel* label,
1533 Label* documentation) {
1534 // Only one branch per label.
1535 MOZ_ASSERT(!label->used());
1536
1537 BufferOffset bo = nextOffset();
1538 label->use(bo.getOffset());
1539 ma_liPatchable(ScratchRegister, Imm32(LabelBase::INVALID_OFFSET));
1540 as_jr(ScratchRegister);
1541 as_nop();
1542 return CodeOffsetJump(bo.getOffset());
1543 }
1544
1545 /////////////////////////////////////////////////////////////////
1546 // X86/X64-common/ARM/MIPS interface.
1547 /////////////////////////////////////////////////////////////////
storeValue(ValueOperand val,Operand dst)1548 void MacroAssemblerMIPSCompat::storeValue(ValueOperand val, Operand dst) {
1549 storeValue(val, Address(Register::FromCode(dst.base()), dst.disp()));
1550 }
1551
storeValue(ValueOperand val,const BaseIndex & dest)1552 void MacroAssemblerMIPSCompat::storeValue(ValueOperand val,
1553 const BaseIndex& dest) {
1554 computeScaledAddress(dest, SecondScratchReg);
1555 storeValue(val, Address(SecondScratchReg, dest.offset));
1556 }
1557
storeValue(JSValueType type,Register reg,BaseIndex dest)1558 void MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg,
1559 BaseIndex dest) {
1560 computeScaledAddress(dest, ScratchRegister);
1561
1562 // Make sure that ma_sw doesn't clobber ScratchRegister
1563 int32_t offset = dest.offset;
1564 if (!Imm16::IsInSignedRange(offset)) {
1565 ma_li(SecondScratchReg, Imm32(offset));
1566 as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
1567 offset = 0;
1568 }
1569
1570 storeValue(type, reg, Address(ScratchRegister, offset));
1571 }
1572
storeValue(ValueOperand val,const Address & dest)1573 void MacroAssemblerMIPSCompat::storeValue(ValueOperand val,
1574 const Address& dest) {
1575 ma_sw(val.payloadReg(), Address(dest.base, dest.offset + PAYLOAD_OFFSET));
1576 ma_sw(val.typeReg(), Address(dest.base, dest.offset + TAG_OFFSET));
1577 }
1578
storeValue(JSValueType type,Register reg,Address dest)1579 void MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg,
1580 Address dest) {
1581 MOZ_ASSERT(dest.base != SecondScratchReg);
1582
1583 ma_sw(reg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
1584 ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
1585 ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
1586 }
1587
storeValue(const Value & val,Address dest)1588 void MacroAssemblerMIPSCompat::storeValue(const Value& val, Address dest) {
1589 MOZ_ASSERT(dest.base != SecondScratchReg);
1590
1591 ma_li(SecondScratchReg, Imm32(getType(val)));
1592 ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
1593 moveData(val, SecondScratchReg);
1594 ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
1595 }
1596
storeValue(const Value & val,BaseIndex dest)1597 void MacroAssemblerMIPSCompat::storeValue(const Value& val, BaseIndex dest) {
1598 computeScaledAddress(dest, ScratchRegister);
1599
1600 // Make sure that ma_sw doesn't clobber ScratchRegister
1601 int32_t offset = dest.offset;
1602 if (!Imm16::IsInSignedRange(offset)) {
1603 ma_li(SecondScratchReg, Imm32(offset));
1604 as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
1605 offset = 0;
1606 }
1607 storeValue(val, Address(ScratchRegister, offset));
1608 }
1609
loadValue(const BaseIndex & addr,ValueOperand val)1610 void MacroAssemblerMIPSCompat::loadValue(const BaseIndex& addr,
1611 ValueOperand val) {
1612 computeScaledAddress(addr, SecondScratchReg);
1613 loadValue(Address(SecondScratchReg, addr.offset), val);
1614 }
1615
loadValue(Address src,ValueOperand val)1616 void MacroAssemblerMIPSCompat::loadValue(Address src, ValueOperand val) {
1617 // Ensure that loading the payload does not erase the pointer to the
1618 // Value in memory.
1619 if (src.base != val.payloadReg()) {
1620 ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
1621 ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
1622 } else {
1623 ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
1624 ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
1625 }
1626 }
1627
tagValue(JSValueType type,Register payload,ValueOperand dest)1628 void MacroAssemblerMIPSCompat::tagValue(JSValueType type, Register payload,
1629 ValueOperand dest) {
1630 MOZ_ASSERT(payload != dest.typeReg());
1631 ma_li(dest.typeReg(), ImmType(type));
1632 if (payload != dest.payloadReg()) ma_move(dest.payloadReg(), payload);
1633 }
1634
pushValue(ValueOperand val)1635 void MacroAssemblerMIPSCompat::pushValue(ValueOperand val) {
1636 // Allocate stack slots for type and payload. One for each.
1637 asMasm().subPtr(Imm32(sizeof(Value)), StackPointer);
1638 // Store type and payload.
1639 storeValue(val, Address(StackPointer, 0));
1640 }
1641
pushValue(const Address & addr)1642 void MacroAssemblerMIPSCompat::pushValue(const Address& addr) {
1643 // Allocate stack slots for type and payload. One for each.
1644 ma_subu(StackPointer, StackPointer, Imm32(sizeof(Value)));
1645 // If address is based on StackPointer its offset needs to be adjusted
1646 // to accommodate for previous stack allocation.
1647 int32_t offset =
1648 addr.base != StackPointer ? addr.offset : addr.offset + sizeof(Value);
1649 // Store type and payload.
1650 ma_lw(ScratchRegister, Address(addr.base, offset + TAG_OFFSET));
1651 ma_sw(ScratchRegister, Address(StackPointer, TAG_OFFSET));
1652 ma_lw(ScratchRegister, Address(addr.base, offset + PAYLOAD_OFFSET));
1653 ma_sw(ScratchRegister, Address(StackPointer, PAYLOAD_OFFSET));
1654 }
1655
popValue(ValueOperand val)1656 void MacroAssemblerMIPSCompat::popValue(ValueOperand val) {
1657 // Load payload and type.
1658 as_lw(val.payloadReg(), StackPointer, PAYLOAD_OFFSET);
1659 as_lw(val.typeReg(), StackPointer, TAG_OFFSET);
1660 // Free stack.
1661 as_addiu(StackPointer, StackPointer, sizeof(Value));
1662 }
1663
storePayload(const Value & val,Address dest)1664 void MacroAssemblerMIPSCompat::storePayload(const Value& val, Address dest) {
1665 moveData(val, SecondScratchReg);
1666 ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
1667 }
1668
storePayload(Register src,Address dest)1669 void MacroAssemblerMIPSCompat::storePayload(Register src, Address dest) {
1670 ma_sw(src, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
1671 return;
1672 }
1673
storePayload(const Value & val,const BaseIndex & dest)1674 void MacroAssemblerMIPSCompat::storePayload(const Value& val,
1675 const BaseIndex& dest) {
1676 MOZ_ASSERT(dest.offset == 0);
1677
1678 computeScaledAddress(dest, SecondScratchReg);
1679
1680 moveData(val, ScratchRegister);
1681
1682 as_sw(ScratchRegister, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
1683 }
1684
storePayload(Register src,const BaseIndex & dest)1685 void MacroAssemblerMIPSCompat::storePayload(Register src,
1686 const BaseIndex& dest) {
1687 MOZ_ASSERT(dest.offset == 0);
1688
1689 computeScaledAddress(dest, SecondScratchReg);
1690 as_sw(src, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
1691 }
1692
storeTypeTag(ImmTag tag,Address dest)1693 void MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, Address dest) {
1694 ma_li(SecondScratchReg, tag);
1695 ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
1696 }
1697
storeTypeTag(ImmTag tag,const BaseIndex & dest)1698 void MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, const BaseIndex& dest) {
1699 MOZ_ASSERT(dest.offset == 0);
1700
1701 computeScaledAddress(dest, SecondScratchReg);
1702 ma_li(ScratchRegister, tag);
1703 as_sw(ScratchRegister, SecondScratchReg, TAG_OFFSET);
1704 }
1705
breakpoint()1706 void MacroAssemblerMIPSCompat::breakpoint() { as_break(0); }
1707
ensureDouble(const ValueOperand & source,FloatRegister dest,Label * failure)1708 void MacroAssemblerMIPSCompat::ensureDouble(const ValueOperand& source,
1709 FloatRegister dest,
1710 Label* failure) {
1711 Label isDouble, done;
1712 asMasm().branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble);
1713 asMasm().branchTestInt32(Assembler::NotEqual, source.typeReg(), failure);
1714
1715 convertInt32ToDouble(source.payloadReg(), dest);
1716 jump(&done);
1717
1718 bind(&isDouble);
1719 unboxDouble(source, dest);
1720
1721 bind(&done);
1722 }
1723
checkStackAlignment()1724 void MacroAssemblerMIPSCompat::checkStackAlignment() {
1725 #ifdef DEBUG
1726 Label aligned;
1727 as_andi(ScratchRegister, sp, ABIStackAlignment - 1);
1728 ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
1729 as_break(BREAK_STACK_UNALIGNED);
1730 bind(&aligned);
1731 #endif
1732 }
1733
alignStackPointer()1734 void MacroAssemblerMIPSCompat::alignStackPointer() {
1735 movePtr(StackPointer, SecondScratchReg);
1736 asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
1737 asMasm().andPtr(Imm32(~(ABIStackAlignment - 1)), StackPointer);
1738 storePtr(SecondScratchReg, Address(StackPointer, 0));
1739 }
1740
restoreStackPointer()1741 void MacroAssemblerMIPSCompat::restoreStackPointer() {
1742 loadPtr(Address(StackPointer, 0), StackPointer);
1743 }
1744
handleFailureWithHandlerTail(void * handler,Label * profilerExitTail)1745 void MacroAssemblerMIPSCompat::handleFailureWithHandlerTail(
1746 void* handler, Label* profilerExitTail) {
1747 // Reserve space for exception information.
1748 int size = (sizeof(ResumeFromException) + ABIStackAlignment) &
1749 ~(ABIStackAlignment - 1);
1750 asMasm().subPtr(Imm32(size), StackPointer);
1751 ma_move(a0, StackPointer); // Use a0 since it is a first function argument
1752
1753 // Call the handler.
1754 asMasm().setupUnalignedABICall(a1);
1755 asMasm().passABIArg(a0);
1756 asMasm().callWithABI(handler, MoveOp::GENERAL,
1757 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
1758
1759 Label entryFrame;
1760 Label catch_;
1761 Label finally;
1762 Label return_;
1763 Label bailout;
1764 Label wasm;
1765
1766 // Already clobbered a0, so use it...
1767 load32(Address(StackPointer, offsetof(ResumeFromException, kind)), a0);
1768 asMasm().branch32(Assembler::Equal, a0,
1769 Imm32(ResumeFromException::RESUME_ENTRY_FRAME),
1770 &entryFrame);
1771 asMasm().branch32(Assembler::Equal, a0,
1772 Imm32(ResumeFromException::RESUME_CATCH), &catch_);
1773 asMasm().branch32(Assembler::Equal, a0,
1774 Imm32(ResumeFromException::RESUME_FINALLY), &finally);
1775 asMasm().branch32(Assembler::Equal, a0,
1776 Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_);
1777 asMasm().branch32(Assembler::Equal, a0,
1778 Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
1779 asMasm().branch32(Assembler::Equal, a0,
1780 Imm32(ResumeFromException::RESUME_WASM), &wasm);
1781
1782 breakpoint(); // Invalid kind.
1783
1784 // No exception handler. Load the error value, load the new stack pointer
1785 // and return from the entry frame.
1786 bind(&entryFrame);
1787 asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
1788 loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
1789 StackPointer);
1790
1791 // We're going to be returning by the ion calling convention
1792 ma_pop(ra);
1793 as_jr(ra);
1794 as_nop();
1795
1796 // If we found a catch handler, this must be a baseline frame. Restore
1797 // state and jump to the catch block.
1798 bind(&catch_);
1799 loadPtr(Address(StackPointer, offsetof(ResumeFromException, target)), a0);
1800 loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
1801 BaselineFrameReg);
1802 loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
1803 StackPointer);
1804 jump(a0);
1805
1806 // If we found a finally block, this must be a baseline frame. Push
1807 // two values expected by JSOP_RETSUB: BooleanValue(true) and the
1808 // exception.
1809 bind(&finally);
1810 ValueOperand exception = ValueOperand(a1, a2);
1811 loadValue(Address(sp, offsetof(ResumeFromException, exception)), exception);
1812
1813 loadPtr(Address(sp, offsetof(ResumeFromException, target)), a0);
1814 loadPtr(Address(sp, offsetof(ResumeFromException, framePointer)),
1815 BaselineFrameReg);
1816 loadPtr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp);
1817
1818 pushValue(BooleanValue(true));
1819 pushValue(exception);
1820 jump(a0);
1821
1822 // Only used in debug mode. Return BaselineFrame->returnValue() to the
1823 // caller.
1824 bind(&return_);
1825 loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
1826 BaselineFrameReg);
1827 loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
1828 StackPointer);
1829 loadValue(
1830 Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
1831 JSReturnOperand);
1832 ma_move(StackPointer, BaselineFrameReg);
1833 pop(BaselineFrameReg);
1834
1835 // If profiling is enabled, then update the lastProfilingFrame to refer to
1836 // caller frame before returning.
1837 {
1838 Label skipProfilingInstrumentation;
1839 // Test if profiler enabled.
1840 AbsoluteAddress addressOfEnabled(
1841 GetJitContext()->runtime->geckoProfiler().addressOfEnabled());
1842 asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
1843 &skipProfilingInstrumentation);
1844 jump(profilerExitTail);
1845 bind(&skipProfilingInstrumentation);
1846 }
1847
1848 ret();
1849
1850 // If we are bailing out to baseline to handle an exception, jump to
1851 // the bailout tail stub.
1852 bind(&bailout);
1853 loadPtr(Address(sp, offsetof(ResumeFromException, bailoutInfo)), a2);
1854 ma_li(ReturnReg, Imm32(BAILOUT_RETURN_OK));
1855 loadPtr(Address(sp, offsetof(ResumeFromException, target)), a1);
1856 jump(a1);
1857
1858 // If we are throwing and the innermost frame was a wasm frame, reset SP and
1859 // FP; SP is pointing to the unwound return address to the wasm entry, so
1860 // we can just ret().
1861 bind(&wasm);
1862 loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
1863 FramePointer);
1864 loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
1865 StackPointer);
1866 ret();
1867 }
1868
toggledJump(Label * label)1869 CodeOffset MacroAssemblerMIPSCompat::toggledJump(Label* label) {
1870 CodeOffset ret(nextOffset().getOffset());
1871 ma_b(label);
1872 return ret;
1873 }
1874
toggledCall(JitCode * target,bool enabled)1875 CodeOffset MacroAssemblerMIPSCompat::toggledCall(JitCode* target,
1876 bool enabled) {
1877 BufferOffset bo = nextOffset();
1878 CodeOffset offset(bo.getOffset());
1879 addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
1880 ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
1881 if (enabled) {
1882 as_jalr(ScratchRegister);
1883 as_nop();
1884 } else {
1885 as_nop();
1886 as_nop();
1887 }
1888 MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() ==
1889 ToggledCallSize(nullptr));
1890 return offset;
1891 }
1892
profilerEnterFrame(Register framePtr,Register scratch)1893 void MacroAssemblerMIPSCompat::profilerEnterFrame(Register framePtr,
1894 Register scratch) {
1895 asMasm().loadJSContext(scratch);
1896 loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
1897 storePtr(framePtr,
1898 Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
1899 storePtr(ImmPtr(nullptr),
1900 Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
1901 }
1902
profilerExitFrame()1903 void MacroAssemblerMIPSCompat::profilerExitFrame() {
1904 jump(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
1905 }
1906
subFromStackPtr(Imm32 imm32)1907 void MacroAssembler::subFromStackPtr(Imm32 imm32) {
1908 if (imm32.value) asMasm().subPtr(imm32, StackPointer);
1909 }
1910
1911 //{{{ check_macroassembler_style
1912 // ===============================================================
1913 // Stack manipulation functions.
1914
PushRegsInMask(LiveRegisterSet set)1915 void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
1916 int32_t diffF = set.fpus().getPushSizeInBytes();
1917 int32_t diffG = set.gprs().size() * sizeof(intptr_t);
1918
1919 reserveStack(diffG);
1920 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
1921 diffG -= sizeof(intptr_t);
1922 storePtr(*iter, Address(StackPointer, diffG));
1923 }
1924 MOZ_ASSERT(diffG == 0);
1925
1926 if (diffF > 0) {
1927 // Double values have to be aligned. We reserve extra space so that we can
1928 // start writing from the first aligned location.
1929 // We reserve a whole extra double so that the buffer has even size.
1930 ma_and(SecondScratchReg, sp, Imm32(~(ABIStackAlignment - 1)));
1931 reserveStack(diffF);
1932
1933 diffF -= sizeof(double);
1934
1935 for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush());
1936 iter.more(); ++iter) {
1937 as_sdc1(*iter, SecondScratchReg, -diffF);
1938 diffF -= sizeof(double);
1939 }
1940
1941 MOZ_ASSERT(diffF == 0);
1942 }
1943 }
1944
PopRegsInMaskIgnore(LiveRegisterSet set,LiveRegisterSet ignore)1945 void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
1946 LiveRegisterSet ignore) {
1947 int32_t diffG = set.gprs().size() * sizeof(intptr_t);
1948 int32_t diffF = set.fpus().getPushSizeInBytes();
1949 const int32_t reservedG = diffG;
1950 const int32_t reservedF = diffF;
1951
1952 if (reservedF > 0) {
1953 // Read the buffer form the first aligned location.
1954 ma_addu(SecondScratchReg, sp, Imm32(reservedF));
1955 ma_and(SecondScratchReg, SecondScratchReg, Imm32(~(ABIStackAlignment - 1)));
1956
1957 diffF -= sizeof(double);
1958
1959 LiveFloatRegisterSet fpignore(ignore.fpus().reduceSetForPush());
1960 for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush());
1961 iter.more(); ++iter) {
1962 if (!ignore.has(*iter)) as_ldc1(*iter, SecondScratchReg, -diffF);
1963 diffF -= sizeof(double);
1964 }
1965 freeStack(reservedF);
1966 MOZ_ASSERT(diffF == 0);
1967 }
1968
1969 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
1970 diffG -= sizeof(intptr_t);
1971 if (!ignore.has(*iter)) loadPtr(Address(StackPointer, diffG), *iter);
1972 }
1973 freeStack(reservedG);
1974 MOZ_ASSERT(diffG == 0);
1975 }
1976
storeRegsInMask(LiveRegisterSet set,Address dest,Register scratch)1977 void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
1978 Register scratch) {
1979 int32_t diffF = set.fpus().getPushSizeInBytes();
1980 int32_t diffG = set.gprs().size() * sizeof(intptr_t);
1981
1982 MOZ_ASSERT(dest.offset >= diffG + diffF);
1983 MOZ_ASSERT(dest.base == StackPointer);
1984
1985 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
1986 diffG -= sizeof(intptr_t);
1987 dest.offset -= sizeof(intptr_t);
1988 storePtr(*iter, dest);
1989 }
1990 MOZ_ASSERT(diffG == 0);
1991
1992 if (diffF > 0) {
1993 computeEffectiveAddress(dest, scratch);
1994 ma_and(scratch, scratch, Imm32(~(ABIStackAlignment - 1)));
1995
1996 diffF -= sizeof(double);
1997
1998 for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush());
1999 iter.more(); ++iter) {
2000 as_sdc1(*iter, scratch, -diffF);
2001 diffF -= sizeof(double);
2002 }
2003 MOZ_ASSERT(diffF == 0);
2004 }
2005 }
2006 // ===============================================================
2007 // ABI function calls.
2008
setupUnalignedABICall(Register scratch)2009 void MacroAssembler::setupUnalignedABICall(Register scratch) {
2010 MOZ_ASSERT(!IsCompilingWasm(), "wasm should only use aligned ABI calls");
2011 setupABICall();
2012 dynamicAlignment_ = true;
2013
2014 ma_move(scratch, StackPointer);
2015
2016 // Force sp to be aligned
2017 asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
2018 ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
2019 storePtr(scratch, Address(StackPointer, 0));
2020 }
2021
callWithABIPre(uint32_t * stackAdjust,bool callFromWasm)2022 void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
2023 MOZ_ASSERT(inCall_);
2024 uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
2025
2026 // Reserve place for $ra.
2027 stackForCall += sizeof(intptr_t);
2028
2029 if (dynamicAlignment_) {
2030 stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
2031 } else {
2032 uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
2033 stackForCall += ComputeByteAlignment(
2034 stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
2035 }
2036
2037 *stackAdjust = stackForCall;
2038 reserveStack(stackForCall);
2039
2040 // Save $ra because call is going to clobber it. Restore it in
2041 // callWithABIPost. NOTE: This is needed for calls from SharedIC.
2042 // Maybe we can do this differently.
2043 storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
2044
2045 // Position all arguments.
2046 {
2047 enoughMemory_ = enoughMemory_ && moveResolver_.resolve();
2048 if (!enoughMemory_) return;
2049
2050 MoveEmitter emitter(*this);
2051 emitter.emit(moveResolver_);
2052 emitter.finish();
2053 }
2054
2055 assertStackAlignment(ABIStackAlignment);
2056 }
2057
callWithABIPost(uint32_t stackAdjust,MoveOp::Type result,bool callFromWasm)2058 void MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
2059 bool callFromWasm) {
2060 // Restore ra value (as stored in callWithABIPre()).
2061 loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
2062
2063 if (dynamicAlignment_) {
2064 // Restore sp value from stack (as stored in setupUnalignedABICall()).
2065 loadPtr(Address(StackPointer, stackAdjust), StackPointer);
2066 // Use adjustFrame instead of freeStack because we already restored sp.
2067 adjustFrame(-stackAdjust);
2068 } else {
2069 freeStack(stackAdjust);
2070 }
2071
2072 #ifdef DEBUG
2073 MOZ_ASSERT(inCall_);
2074 inCall_ = false;
2075 #endif
2076 }
2077
callWithABINoProfiler(Register fun,MoveOp::Type result)2078 void MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) {
2079 // Load the callee in t9, no instruction between the lw and call
2080 // should clobber it. Note that we can't use fun.base because it may
2081 // be one of the IntArg registers clobbered before the call.
2082 ma_move(t9, fun);
2083 uint32_t stackAdjust;
2084 callWithABIPre(&stackAdjust);
2085 call(t9);
2086 callWithABIPost(stackAdjust, result);
2087 }
2088
callWithABINoProfiler(const Address & fun,MoveOp::Type result)2089 void MacroAssembler::callWithABINoProfiler(const Address& fun,
2090 MoveOp::Type result) {
2091 // Load the callee in t9, as above.
2092 loadPtr(Address(fun.base, fun.offset), t9);
2093 uint32_t stackAdjust;
2094 callWithABIPre(&stackAdjust);
2095 call(t9);
2096 callWithABIPost(stackAdjust, result);
2097 }
2098 // ===============================================================
2099 // Move instructions
2100
moveValue(const TypedOrValueRegister & src,const ValueOperand & dest)2101 void MacroAssembler::moveValue(const TypedOrValueRegister& src,
2102 const ValueOperand& dest) {
2103 if (src.hasValue()) {
2104 moveValue(src.valueReg(), dest);
2105 return;
2106 }
2107
2108 MIRType type = src.type();
2109 AnyRegister reg = src.typedReg();
2110
2111 if (!IsFloatingPointType(type)) {
2112 mov(ImmWord(MIRTypeToTag(type)), dest.typeReg());
2113 if (reg.gpr() != dest.payloadReg()) move32(reg.gpr(), dest.payloadReg());
2114 return;
2115 }
2116
2117 ScratchDoubleScope scratch(*this);
2118 FloatRegister freg = reg.fpu();
2119 if (type == MIRType::Float32) {
2120 convertFloat32ToDouble(freg, scratch);
2121 freg = scratch;
2122 }
2123 boxDouble(freg, dest, scratch);
2124 }
2125
moveValue(const ValueOperand & src,const ValueOperand & dest)2126 void MacroAssembler::moveValue(const ValueOperand& src,
2127 const ValueOperand& dest) {
2128 Register s0 = src.typeReg();
2129 Register s1 = src.payloadReg();
2130 Register d0 = dest.typeReg();
2131 Register d1 = dest.payloadReg();
2132
2133 // Either one or both of the source registers could be the same as a
2134 // destination register.
2135 if (s1 == d0) {
2136 if (s0 == d1) {
2137 // If both are, this is just a swap of two registers.
2138 ScratchRegisterScope scratch(*this);
2139 MOZ_ASSERT(d1 != scratch);
2140 MOZ_ASSERT(d0 != scratch);
2141 move32(d1, scratch);
2142 move32(d0, d1);
2143 move32(scratch, d0);
2144 return;
2145 }
2146 // If only one is, copy that source first.
2147 mozilla::Swap(s0, s1);
2148 mozilla::Swap(d0, d1);
2149 }
2150
2151 if (s0 != d0) move32(s0, d0);
2152 if (s1 != d1) move32(s1, d1);
2153 }
2154
moveValue(const Value & src,const ValueOperand & dest)2155 void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
2156 move32(Imm32(src.toNunboxTag()), dest.typeReg());
2157 if (src.isGCThing())
2158 movePtr(ImmGCPtr(src.toGCThing()), dest.payloadReg());
2159 else
2160 move32(Imm32(src.toNunboxPayload()), dest.payloadReg());
2161 }
2162
2163 // ===============================================================
2164 // Branch functions
2165
branchValueIsNurseryCell(Condition cond,const Address & address,Register temp,Label * label)2166 void MacroAssembler::branchValueIsNurseryCell(Condition cond,
2167 const Address& address,
2168 Register temp, Label* label) {
2169 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
2170 Label done, checkAddress;
2171
2172 branchTestObject(Assembler::Equal, address, &checkAddress);
2173 branchTestString(Assembler::NotEqual, address,
2174 cond == Assembler::Equal ? &done : label);
2175
2176 bind(&checkAddress);
2177 loadPtr(address, temp);
2178 branchPtrInNurseryChunk(cond, temp, InvalidReg, label);
2179
2180 bind(&done);
2181 }
2182
branchValueIsNurseryCell(Condition cond,ValueOperand value,Register temp,Label * label)2183 void MacroAssembler::branchValueIsNurseryCell(Condition cond,
2184 ValueOperand value, Register temp,
2185 Label* label) {
2186 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
2187
2188 Label done, checkAddress;
2189 branchTestObject(Assembler::Equal, value, &checkAddress);
2190 branchTestString(Assembler::NotEqual, value,
2191 cond == Assembler::Equal ? &done : label);
2192
2193 bind(&checkAddress);
2194 branchPtrInNurseryChunk(cond, value.payloadReg(), temp, label);
2195
2196 bind(&done);
2197 }
2198
branchValueIsNurseryObject(Condition cond,ValueOperand value,Register temp,Label * label)2199 void MacroAssembler::branchValueIsNurseryObject(Condition cond,
2200 ValueOperand value,
2201 Register temp, Label* label) {
2202 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
2203 Label done;
2204
2205 branchTestObject(Assembler::NotEqual, value,
2206 cond == Assembler::Equal ? &done : label);
2207 branchPtrInNurseryChunk(cond, value.payloadReg(), temp, label);
2208
2209 bind(&done);
2210 }
2211
branchTestValue(Condition cond,const ValueOperand & lhs,const Value & rhs,Label * label)2212 void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
2213 const Value& rhs, Label* label) {
2214 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2215 ScratchRegisterScope scratch(*this);
2216 moveData(rhs, scratch);
2217
2218 if (cond == Equal) {
2219 Label done;
2220 ma_b(lhs.payloadReg(), scratch, &done, NotEqual, ShortJump);
2221 { ma_b(lhs.typeReg(), Imm32(getType(rhs)), label, Equal); }
2222 bind(&done);
2223 } else {
2224 ma_b(lhs.payloadReg(), scratch, label, NotEqual);
2225
2226 ma_b(lhs.typeReg(), Imm32(getType(rhs)), label, NotEqual);
2227 }
2228 }
2229
2230 // ========================================================================
2231 // Memory access primitives.
2232 template <typename T>
storeUnboxedValue(const ConstantOrRegister & value,MIRType valueType,const T & dest,MIRType slotType)2233 void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
2234 MIRType valueType, const T& dest,
2235 MIRType slotType) {
2236 if (valueType == MIRType::Double) {
2237 storeDouble(value.reg().typedReg().fpu(), dest);
2238 return;
2239 }
2240
2241 // Store the type tag if needed.
2242 if (valueType != slotType)
2243 storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), dest);
2244
2245 // Store the payload.
2246 if (value.constant())
2247 storePayload(value.value(), dest);
2248 else
2249 storePayload(value.reg().typedReg().gpr(), dest);
2250 }
2251
2252 template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
2253 MIRType valueType,
2254 const Address& dest,
2255 MIRType slotType);
2256 template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
2257 MIRType valueType,
2258 const BaseIndex& dest,
2259 MIRType slotType);
2260
wasmTruncateDoubleToUInt32(FloatRegister input,Register output,bool isSaturating,Label * oolEntry)2261 void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
2262 Register output,
2263 bool isSaturating,
2264 Label* oolEntry) {
2265 Label done;
2266
2267 as_truncwd(ScratchFloat32Reg, input);
2268 ma_li(ScratchRegister, Imm32(INT32_MAX));
2269 moveFromFloat32(ScratchFloat32Reg, output);
2270
2271 // For numbers in -1.[ : ]INT32_MAX range do nothing more
2272 ma_b(output, ScratchRegister, &done, Assembler::Below, ShortJump);
2273
2274 loadConstantDouble(double(INT32_MAX + 1ULL), ScratchDoubleReg);
2275 ma_li(ScratchRegister, Imm32(INT32_MIN));
2276 as_subd(ScratchDoubleReg, input, ScratchDoubleReg);
2277 as_truncwd(ScratchFloat32Reg, ScratchDoubleReg);
2278 as_cfc1(SecondScratchReg, Assembler::FCSR);
2279 moveFromFloat32(ScratchFloat32Reg, output);
2280 ma_ext(SecondScratchReg, SecondScratchReg, Assembler::CauseV, 1);
2281 ma_addu(output, ScratchRegister);
2282
2283 ma_b(SecondScratchReg, Imm32(0), oolEntry, Assembler::NotEqual);
2284
2285 bind(&done);
2286 }
2287
wasmTruncateFloat32ToUInt32(FloatRegister input,Register output,bool isSaturating,Label * oolEntry)2288 void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
2289 Register output,
2290 bool isSaturating,
2291 Label* oolEntry) {
2292 Label done;
2293
2294 as_truncws(ScratchFloat32Reg, input);
2295 ma_li(ScratchRegister, Imm32(INT32_MAX));
2296 moveFromFloat32(ScratchFloat32Reg, output);
2297 // For numbers in -1.[ : ]INT32_MAX range do nothing more
2298 ma_b(output, ScratchRegister, &done, Assembler::Below, ShortJump);
2299
2300 loadConstantFloat32(float(INT32_MAX + 1ULL), ScratchFloat32Reg);
2301 ma_li(ScratchRegister, Imm32(INT32_MIN));
2302 as_subs(ScratchFloat32Reg, input, ScratchFloat32Reg);
2303 as_truncws(ScratchFloat32Reg, ScratchFloat32Reg);
2304 as_cfc1(SecondScratchReg, Assembler::FCSR);
2305 moveFromFloat32(ScratchFloat32Reg, output);
2306 ma_ext(SecondScratchReg, SecondScratchReg, Assembler::CauseV, 1);
2307 ma_addu(output, ScratchRegister);
2308
2309 // Guard against negative values that result in 0 due the precision loss.
2310 as_sltiu(ScratchRegister, output, 1);
2311 ma_or(SecondScratchReg, ScratchRegister);
2312
2313 ma_b(SecondScratchReg, Imm32(0), oolEntry, Assembler::NotEqual);
2314
2315 bind(&done);
2316 }
2317
wasmLoadI64(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,Register64 output)2318 void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
2319 Register memoryBase, Register ptr,
2320 Register ptrScratch, Register64 output) {
2321 wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
2322 }
2323
wasmUnalignedLoadI64(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,Register64 output,Register tmp)2324 void MacroAssembler::wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access,
2325 Register memoryBase, Register ptr,
2326 Register ptrScratch,
2327 Register64 output, Register tmp) {
2328 wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, tmp);
2329 }
2330
wasmStoreI64(const wasm::MemoryAccessDesc & access,Register64 value,Register memoryBase,Register ptr,Register ptrScratch)2331 void MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access,
2332 Register64 value, Register memoryBase,
2333 Register ptr, Register ptrScratch) {
2334 wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
2335 }
2336
wasmUnalignedStoreI64(const wasm::MemoryAccessDesc & access,Register64 value,Register memoryBase,Register ptr,Register ptrScratch,Register tmp)2337 void MacroAssembler::wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access,
2338 Register64 value,
2339 Register memoryBase, Register ptr,
2340 Register ptrScratch, Register tmp) {
2341 wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, tmp);
2342 }
2343
wasmLoadI64Impl(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,Register64 output,Register tmp)2344 void MacroAssemblerMIPSCompat::wasmLoadI64Impl(
2345 const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
2346 Register ptrScratch, Register64 output, Register tmp) {
2347 uint32_t offset = access.offset();
2348 MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
2349
2350 // Maybe add the offset.
2351 if (offset) {
2352 asMasm().movePtr(ptr, ptrScratch);
2353 asMasm().addPtr(Imm32(offset), ptrScratch);
2354 ptr = ptrScratch;
2355 }
2356
2357 unsigned byteSize = access.byteSize();
2358 bool isSigned;
2359
2360 switch (access.type()) {
2361 case Scalar::Int8:
2362 isSigned = true;
2363 break;
2364 case Scalar::Uint8:
2365 isSigned = false;
2366 break;
2367 case Scalar::Int16:
2368 isSigned = true;
2369 break;
2370 case Scalar::Uint16:
2371 isSigned = false;
2372 break;
2373 case Scalar::Int32:
2374 isSigned = true;
2375 break;
2376 case Scalar::Uint32:
2377 isSigned = false;
2378 break;
2379 case Scalar::Int64:
2380 isSigned = true;
2381 break;
2382 default:
2383 MOZ_CRASH("unexpected array type");
2384 }
2385
2386 BaseIndex address(memoryBase, ptr, TimesOne);
2387 MOZ_ASSERT(INT64LOW_OFFSET == 0);
2388 if (IsUnaligned(access)) {
2389 MOZ_ASSERT(tmp != InvalidReg);
2390 if (byteSize <= 4) {
2391 asMasm().ma_load_unaligned(access, output.low, address, tmp,
2392 static_cast<LoadStoreSize>(8 * byteSize),
2393 isSigned ? SignExtend : ZeroExtend);
2394 if (!isSigned)
2395 asMasm().move32(Imm32(0), output.high);
2396 else
2397 asMasm().ma_sra(output.high, output.low, Imm32(31));
2398 } else {
2399 MOZ_ASSERT(output.low != ptr);
2400 asMasm().ma_load_unaligned(access, output.low, address, tmp, SizeWord,
2401 ZeroExtend);
2402 asMasm().ma_load_unaligned(
2403 access, output.high,
2404 BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), tmp, SizeWord,
2405 SignExtend);
2406 }
2407 return;
2408 }
2409
2410 asMasm().memoryBarrierBefore(access.sync());
2411 if (byteSize <= 4) {
2412 asMasm().ma_load(output.low, address,
2413 static_cast<LoadStoreSize>(8 * byteSize),
2414 isSigned ? SignExtend : ZeroExtend);
2415 asMasm().append(access, asMasm().size() - 4, asMasm().framePushed());
2416 if (!isSigned)
2417 asMasm().move32(Imm32(0), output.high);
2418 else
2419 asMasm().ma_sra(output.high, output.low, Imm32(31));
2420 } else {
2421 MOZ_ASSERT(output.low != ptr);
2422 asMasm().ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord);
2423 asMasm().append(access, asMasm().size() - 4, asMasm().framePushed());
2424 asMasm().ma_load(output.high,
2425 BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET),
2426 SizeWord);
2427 asMasm().append(access, asMasm().size() - 4, asMasm().framePushed());
2428 }
2429 asMasm().memoryBarrierAfter(access.sync());
2430 }
2431
wasmStoreI64Impl(const wasm::MemoryAccessDesc & access,Register64 value,Register memoryBase,Register ptr,Register ptrScratch,Register tmp)2432 void MacroAssemblerMIPSCompat::wasmStoreI64Impl(
2433 const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
2434 Register ptr, Register ptrScratch, Register tmp) {
2435 uint32_t offset = access.offset();
2436 MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
2437 MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
2438
2439 // Maybe add the offset.
2440 if (offset) {
2441 asMasm().addPtr(Imm32(offset), ptrScratch);
2442 ptr = ptrScratch;
2443 }
2444
2445 unsigned byteSize = access.byteSize();
2446 bool isSigned;
2447 switch (access.type()) {
2448 case Scalar::Int8:
2449 isSigned = true;
2450 break;
2451 case Scalar::Uint8:
2452 isSigned = false;
2453 break;
2454 case Scalar::Int16:
2455 isSigned = true;
2456 break;
2457 case Scalar::Uint16:
2458 isSigned = false;
2459 break;
2460 case Scalar::Int32:
2461 isSigned = true;
2462 break;
2463 case Scalar::Uint32:
2464 isSigned = false;
2465 break;
2466 case Scalar::Int64:
2467 isSigned = true;
2468 break;
2469 default:
2470 MOZ_CRASH("unexpected array type");
2471 }
2472
2473 MOZ_ASSERT(INT64LOW_OFFSET == 0);
2474 BaseIndex address(memoryBase, ptr, TimesOne);
2475 if (IsUnaligned(access)) {
2476 MOZ_ASSERT(tmp != InvalidReg);
2477 if (byteSize <= 4) {
2478 asMasm().ma_store_unaligned(access, value.low, address, tmp,
2479 static_cast<LoadStoreSize>(8 * byteSize),
2480 isSigned ? SignExtend : ZeroExtend);
2481 } else {
2482 asMasm().ma_store_unaligned(
2483 access, value.high,
2484 BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), tmp, SizeWord,
2485 SignExtend);
2486 asMasm().ma_store_unaligned(access, value.low, address, tmp, SizeWord,
2487 ZeroExtend);
2488 }
2489 return;
2490 }
2491
2492 asMasm().memoryBarrierBefore(access.sync());
2493 if (byteSize <= 4) {
2494 asMasm().ma_store(value.low, address,
2495 static_cast<LoadStoreSize>(8 * byteSize));
2496 asMasm().append(access, asMasm().size() - 4, asMasm().framePushed());
2497 } else {
2498 asMasm().ma_store(value.high,
2499 BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET),
2500 SizeWord);
2501 asMasm().append(access, asMasm().size() - 4, asMasm().framePushed());
2502 asMasm().ma_store(value.low, address, SizeWord);
2503 }
2504 asMasm().memoryBarrierAfter(access.sync());
2505 }
2506
EnterAtomic64Region(MacroAssembler & masm,Register addr,Register spinlock,Register scratch)2507 static void EnterAtomic64Region(MacroAssembler& masm, Register addr,
2508 Register spinlock, Register scratch) {
2509 masm.movePtr(wasm::SymbolicAddress::js_jit_gAtomic64Lock, spinlock);
2510 masm.as_lbu(
2511 zero, addr,
2512 7); // Force memory trap on invalid access before we enter the spinlock.
2513
2514 Label tryLock;
2515
2516 masm.memoryBarrier(MembarFull);
2517
2518 masm.bind(&tryLock);
2519
2520 masm.as_ll(scratch, spinlock, 0);
2521 masm.ma_b(scratch, scratch, &tryLock, Assembler::NonZero, ShortJump);
2522 masm.ma_li(scratch, Imm32(1));
2523 masm.as_sc(scratch, spinlock, 0);
2524 masm.ma_b(scratch, scratch, &tryLock, Assembler::Zero, ShortJump);
2525
2526 masm.memoryBarrier(MembarFull);
2527 }
2528
ExitAtomic64Region(MacroAssembler & masm,Register spinlock)2529 static void ExitAtomic64Region(MacroAssembler& masm, Register spinlock) {
2530 masm.memoryBarrier(MembarFull);
2531 masm.as_sw(zero, spinlock, 0);
2532 masm.memoryBarrier(MembarFull);
2533 }
2534
2535 template <typename T>
AtomicLoad64(MacroAssembler & masm,const T & mem,Register64 temp,Register64 output)2536 static void AtomicLoad64(MacroAssembler& masm, const T& mem, Register64 temp,
2537 Register64 output) {
2538 MOZ_ASSERT(temp.low == InvalidReg && temp.high == InvalidReg);
2539
2540 masm.computeEffectiveAddress(mem, SecondScratchReg);
2541
2542 EnterAtomic64Region(masm, /* addr= */ SecondScratchReg,
2543 /* spinlock= */ ScratchRegister,
2544 /* scratch= */ output.low);
2545
2546 masm.load64(Address(SecondScratchReg, 0), output);
2547
2548 ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
2549 }
2550
atomicLoad64(const Synchronization &,const Address & mem,Register64 temp,Register64 output)2551 void MacroAssembler::atomicLoad64(const Synchronization&, const Address& mem,
2552 Register64 temp, Register64 output) {
2553 AtomicLoad64(*this, mem, temp, output);
2554 }
2555
atomicLoad64(const Synchronization &,const BaseIndex & mem,Register64 temp,Register64 output)2556 void MacroAssembler::atomicLoad64(const Synchronization&, const BaseIndex& mem,
2557 Register64 temp, Register64 output) {
2558 AtomicLoad64(*this, mem, temp, output);
2559 }
2560
2561 template <typename T>
atomicStore64(const T & mem,Register temp,Register64 value)2562 void MacroAssemblerMIPSCompat::atomicStore64(const T& mem, Register temp,
2563 Register64 value) {
2564 computeEffectiveAddress(mem, SecondScratchReg);
2565
2566 EnterAtomic64Region(asMasm(), /* addr= */ SecondScratchReg,
2567 /* spinlock= */ ScratchRegister,
2568 /* scratch= */ temp);
2569
2570 store64(value, Address(SecondScratchReg, 0));
2571
2572 ExitAtomic64Region(asMasm(), /* spinlock= */ ScratchRegister);
2573 }
2574
2575 template void MacroAssemblerMIPSCompat::atomicStore64(const Address& mem,
2576 Register temp,
2577 Register64 value);
2578 template void MacroAssemblerMIPSCompat::atomicStore64(const BaseIndex& mem,
2579 Register temp,
2580 Register64 value);
2581
2582 template <typename T>
CompareExchange64(MacroAssembler & masm,const T & mem,Register64 expect,Register64 replace,Register64 output)2583 static void CompareExchange64(MacroAssembler& masm, const T& mem,
2584 Register64 expect, Register64 replace,
2585 Register64 output) {
2586 MOZ_ASSERT(output != expect);
2587 MOZ_ASSERT(output != replace);
2588
2589 Label exit;
2590
2591 masm.computeEffectiveAddress(mem, SecondScratchReg);
2592 Address addr(SecondScratchReg, 0);
2593
2594 EnterAtomic64Region(masm, /* addr= */ SecondScratchReg,
2595 /* spinlock= */ ScratchRegister,
2596 /* scratch= */ output.low);
2597 masm.load64(addr, output);
2598
2599 masm.ma_b(output.low, expect.low, &exit, Assembler::NotEqual, ShortJump);
2600 masm.ma_b(output.high, expect.high, &exit, Assembler::NotEqual, ShortJump);
2601 masm.store64(replace, addr);
2602 masm.bind(&exit);
2603 ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
2604 }
2605
compareExchange64(const Synchronization &,const Address & mem,Register64 expect,Register64 replace,Register64 output)2606 void MacroAssembler::compareExchange64(const Synchronization&,
2607 const Address& mem, Register64 expect,
2608 Register64 replace, Register64 output) {
2609 CompareExchange64(*this, mem, expect, replace, output);
2610 }
2611
compareExchange64(const Synchronization &,const BaseIndex & mem,Register64 expect,Register64 replace,Register64 output)2612 void MacroAssembler::compareExchange64(const Synchronization&,
2613 const BaseIndex& mem, Register64 expect,
2614 Register64 replace, Register64 output) {
2615 CompareExchange64(*this, mem, expect, replace, output);
2616 }
2617
2618 template <typename T>
AtomicExchange64(MacroAssembler & masm,const T & mem,Register64 src,Register64 output)2619 static void AtomicExchange64(MacroAssembler& masm, const T& mem, Register64 src,
2620 Register64 output) {
2621 masm.computeEffectiveAddress(mem, SecondScratchReg);
2622 Address addr(SecondScratchReg, 0);
2623
2624 EnterAtomic64Region(masm, /* addr= */ SecondScratchReg,
2625 /* spinlock= */ ScratchRegister,
2626 /* scratch= */ output.low);
2627
2628 masm.load64(addr, output);
2629 masm.store64(src, addr);
2630
2631 ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
2632 }
2633
atomicExchange64(const Synchronization &,const Address & mem,Register64 src,Register64 output)2634 void MacroAssembler::atomicExchange64(const Synchronization&,
2635 const Address& mem, Register64 src,
2636 Register64 output) {
2637 AtomicExchange64(*this, mem, src, output);
2638 }
2639
atomicExchange64(const Synchronization &,const BaseIndex & mem,Register64 src,Register64 output)2640 void MacroAssembler::atomicExchange64(const Synchronization&,
2641 const BaseIndex& mem, Register64 src,
2642 Register64 output) {
2643 AtomicExchange64(*this, mem, src, output);
2644 }
2645
2646 template <typename T>
AtomicFetchOp64(MacroAssembler & masm,AtomicOp op,Register64 value,const T & mem,Register64 temp,Register64 output)2647 static void AtomicFetchOp64(MacroAssembler& masm, AtomicOp op, Register64 value,
2648 const T& mem, Register64 temp, Register64 output) {
2649 masm.computeEffectiveAddress(mem, SecondScratchReg);
2650
2651 EnterAtomic64Region(masm, /* addr= */ SecondScratchReg,
2652 /* spinlock= */ ScratchRegister,
2653 /* scratch= */ output.low);
2654
2655 masm.load64(Address(SecondScratchReg, 0), output);
2656
2657 switch (op) {
2658 case AtomicFetchAddOp:
2659 masm.as_addu(temp.low, output.low, value.low);
2660 masm.as_sltu(temp.high, temp.low, output.low);
2661 masm.as_addu(temp.high, temp.high, output.high);
2662 masm.as_addu(temp.high, temp.high, value.high);
2663 break;
2664 case AtomicFetchSubOp:
2665 masm.as_sltu(temp.high, output.low, value.low);
2666 masm.as_subu(temp.high, output.high, temp.high);
2667 masm.as_subu(temp.low, output.low, value.low);
2668 masm.as_subu(temp.high, temp.high, value.high);
2669 break;
2670 case AtomicFetchAndOp:
2671 masm.as_and(temp.low, output.low, value.low);
2672 masm.as_and(temp.high, output.high, value.high);
2673 break;
2674 case AtomicFetchOrOp:
2675 masm.as_or(temp.low, output.low, value.low);
2676 masm.as_or(temp.high, output.high, value.high);
2677 break;
2678 case AtomicFetchXorOp:
2679 masm.as_xor(temp.low, output.low, value.low);
2680 masm.as_xor(temp.high, output.high, value.high);
2681 break;
2682 default:
2683 MOZ_CRASH();
2684 }
2685
2686 masm.store64(temp, Address(SecondScratchReg, 0));
2687
2688 ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
2689 }
2690
atomicFetchOp64(const Synchronization &,AtomicOp op,Register64 value,const Address & mem,Register64 temp,Register64 output)2691 void MacroAssembler::atomicFetchOp64(const Synchronization&, AtomicOp op,
2692 Register64 value, const Address& mem,
2693 Register64 temp, Register64 output) {
2694 AtomicFetchOp64(*this, op, value, mem, temp, output);
2695 }
2696
atomicFetchOp64(const Synchronization &,AtomicOp op,Register64 value,const BaseIndex & mem,Register64 temp,Register64 output)2697 void MacroAssembler::atomicFetchOp64(const Synchronization&, AtomicOp op,
2698 Register64 value, const BaseIndex& mem,
2699 Register64 temp, Register64 output) {
2700 AtomicFetchOp64(*this, op, value, mem, temp, output);
2701 }
2702
2703 // ========================================================================
2704 // Convert floating point.
2705
2706 static const double TO_DOUBLE_HIGH_SCALE = 0x100000000;
2707
convertUInt64ToDoubleNeedsTemp()2708 bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return false; }
2709
convertUInt64ToDouble(Register64 src,FloatRegister dest,Register temp)2710 void MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest,
2711 Register temp) {
2712 MOZ_ASSERT(temp == Register::Invalid());
2713 convertUInt32ToDouble(src.high, dest);
2714 loadConstantDouble(TO_DOUBLE_HIGH_SCALE, ScratchDoubleReg);
2715 mulDouble(ScratchDoubleReg, dest);
2716 convertUInt32ToDouble(src.low, ScratchDoubleReg);
2717 addDouble(ScratchDoubleReg, dest);
2718 }
2719
2720 //}}} check_macroassembler_style
2721