1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/mips64/MacroAssembler-mips64.h"
8
9 #include "mozilla/DebugOnly.h"
10 #include "mozilla/MathAlgorithms.h"
11
12 #include "jit/Bailouts.h"
13 #include "jit/BaselineFrame.h"
14 #include "jit/JitFrames.h"
15 #include "jit/MacroAssembler.h"
16 #include "jit/mips64/Simulator-mips64.h"
17 #include "jit/MoveEmitter.h"
18 #include "jit/SharedICRegisters.h"
19
20 #include "jit/MacroAssembler-inl.h"
21
22 using namespace js;
23 using namespace jit;
24
25 using mozilla::Abs;
26
27 static_assert(sizeof(intptr_t) == 8, "Not 32-bit clean.");
28
convertBoolToInt32(Register src,Register dest)29 void MacroAssemblerMIPS64Compat::convertBoolToInt32(Register src,
30 Register dest) {
31 // Note that C++ bool is only 1 byte, so zero extend it to clear the
32 // higher-order bits.
33 ma_and(dest, src, Imm32(0xff));
34 }
35
convertInt32ToDouble(Register src,FloatRegister dest)36 void MacroAssemblerMIPS64Compat::convertInt32ToDouble(Register src,
37 FloatRegister dest) {
38 as_mtc1(src, dest);
39 as_cvtdw(dest, dest);
40 }
41
convertInt32ToDouble(const Address & src,FloatRegister dest)42 void MacroAssemblerMIPS64Compat::convertInt32ToDouble(const Address& src,
43 FloatRegister dest) {
44 ma_ls(dest, src);
45 as_cvtdw(dest, dest);
46 }
47
convertInt32ToDouble(const BaseIndex & src,FloatRegister dest)48 void MacroAssemblerMIPS64Compat::convertInt32ToDouble(const BaseIndex& src,
49 FloatRegister dest) {
50 computeScaledAddress(src, ScratchRegister);
51 convertInt32ToDouble(Address(ScratchRegister, src.offset), dest);
52 }
53
convertUInt32ToDouble(Register src,FloatRegister dest)54 void MacroAssemblerMIPS64Compat::convertUInt32ToDouble(Register src,
55 FloatRegister dest) {
56 ma_dext(ScratchRegister, src, Imm32(0), Imm32(32));
57 asMasm().convertInt64ToDouble(Register64(ScratchRegister), dest);
58 }
59
convertUInt64ToDouble(Register src,FloatRegister dest)60 void MacroAssemblerMIPS64Compat::convertUInt64ToDouble(Register src,
61 FloatRegister dest) {
62 Label positive, done;
63 ma_b(src, src, &positive, NotSigned, ShortJump);
64
65 MOZ_ASSERT(src != ScratchRegister);
66 MOZ_ASSERT(src != SecondScratchReg);
67
68 ma_and(ScratchRegister, src, Imm32(1));
69 ma_dsrl(SecondScratchReg, src, Imm32(1));
70 ma_or(ScratchRegister, SecondScratchReg);
71 as_dmtc1(ScratchRegister, dest);
72 as_cvtdl(dest, dest);
73 asMasm().addDouble(dest, dest);
74 ma_b(&done, ShortJump);
75
76 bind(&positive);
77 as_dmtc1(src, dest);
78 as_cvtdl(dest, dest);
79
80 bind(&done);
81 }
82
convertUInt32ToFloat32(Register src,FloatRegister dest)83 void MacroAssemblerMIPS64Compat::convertUInt32ToFloat32(Register src,
84 FloatRegister dest) {
85 ma_dext(ScratchRegister, src, Imm32(0), Imm32(32));
86 asMasm().convertInt64ToFloat32(Register64(ScratchRegister), dest);
87 }
88
convertDoubleToFloat32(FloatRegister src,FloatRegister dest)89 void MacroAssemblerMIPS64Compat::convertDoubleToFloat32(FloatRegister src,
90 FloatRegister dest) {
91 as_cvtsd(dest, src);
92 }
93
94 // Checks whether a double is representable as a 32-bit integer. If so, the
95 // integer is written to the output register. Otherwise, a bailout is taken to
96 // the given snapshot. This function overwrites the scratch float register.
convertDoubleToInt32(FloatRegister src,Register dest,Label * fail,bool negativeZeroCheck)97 void MacroAssemblerMIPS64Compat::convertDoubleToInt32(FloatRegister src,
98 Register dest,
99 Label* fail,
100 bool negativeZeroCheck) {
101 if (negativeZeroCheck) {
102 moveFromDouble(src, dest);
103 ma_drol(dest, dest, Imm32(1));
104 ma_b(dest, Imm32(1), fail, Assembler::Equal);
105 }
106
107 // Truncate double to int ; if result is inexact fail
108 as_truncwd(ScratchFloat32Reg, src);
109 as_cfc1(ScratchRegister, Assembler::FCSR);
110 moveFromFloat32(ScratchFloat32Reg, dest);
111 ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseI, 1);
112 ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
113 }
114
115 // Checks whether a float32 is representable as a 32-bit integer. If so, the
116 // integer is written to the output register. Otherwise, a bailout is taken to
117 // the given snapshot. This function overwrites the scratch float register.
convertFloat32ToInt32(FloatRegister src,Register dest,Label * fail,bool negativeZeroCheck)118 void MacroAssemblerMIPS64Compat::convertFloat32ToInt32(FloatRegister src,
119 Register dest,
120 Label* fail,
121 bool negativeZeroCheck) {
122 if (negativeZeroCheck) {
123 moveFromFloat32(src, dest);
124 ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
125 }
126
127 as_truncws(ScratchFloat32Reg, src);
128 as_cfc1(ScratchRegister, Assembler::FCSR);
129 moveFromFloat32(ScratchFloat32Reg, dest);
130 ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseI, 1);
131 ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
132 }
133
convertFloat32ToDouble(FloatRegister src,FloatRegister dest)134 void MacroAssemblerMIPS64Compat::convertFloat32ToDouble(FloatRegister src,
135 FloatRegister dest) {
136 as_cvtds(dest, src);
137 }
138
convertInt32ToFloat32(Register src,FloatRegister dest)139 void MacroAssemblerMIPS64Compat::convertInt32ToFloat32(Register src,
140 FloatRegister dest) {
141 as_mtc1(src, dest);
142 as_cvtsw(dest, dest);
143 }
144
convertInt32ToFloat32(const Address & src,FloatRegister dest)145 void MacroAssemblerMIPS64Compat::convertInt32ToFloat32(const Address& src,
146 FloatRegister dest) {
147 ma_ls(dest, src);
148 as_cvtsw(dest, dest);
149 }
150
movq(Register rs,Register rd)151 void MacroAssemblerMIPS64Compat::movq(Register rs, Register rd) {
152 ma_move(rd, rs);
153 }
154
ma_li(Register dest,CodeLabel * label)155 void MacroAssemblerMIPS64::ma_li(Register dest, CodeLabel* label) {
156 BufferOffset bo = m_buffer.nextOffset();
157 ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
158 label->patchAt()->bind(bo.getOffset());
159 label->setLinkMode(CodeLabel::MoveImmediate);
160 }
161
ma_li(Register dest,ImmWord imm)162 void MacroAssemblerMIPS64::ma_li(Register dest, ImmWord imm) {
163 int64_t value = imm.value;
164
165 if (-1 == (value >> 15) || 0 == (value >> 15)) {
166 as_addiu(dest, zero, value);
167 return;
168 }
169 if (0 == (value >> 16)) {
170 as_ori(dest, zero, value);
171 return;
172 }
173
174 if (-1 == (value >> 31) || 0 == (value >> 31)) {
175 as_lui(dest, uint16_t(value >> 16));
176 } else if (0 == (value >> 32)) {
177 as_lui(dest, uint16_t(value >> 16));
178 as_dinsu(dest, zero, 32, 32);
179 } else if (-1 == (value >> 47) || 0 == (value >> 47)) {
180 as_lui(dest, uint16_t(value >> 32));
181 if (uint16_t(value >> 16)) as_ori(dest, dest, uint16_t(value >> 16));
182 as_dsll(dest, dest, 16);
183 } else if (0 == (value >> 48)) {
184 as_lui(dest, uint16_t(value >> 32));
185 as_dinsu(dest, zero, 32, 32);
186 if (uint16_t(value >> 16)) as_ori(dest, dest, uint16_t(value >> 16));
187 as_dsll(dest, dest, 16);
188 } else {
189 as_lui(dest, uint16_t(value >> 48));
190 if (uint16_t(value >> 32)) as_ori(dest, dest, uint16_t(value >> 32));
191 if (uint16_t(value >> 16)) {
192 as_dsll(dest, dest, 16);
193 as_ori(dest, dest, uint16_t(value >> 16));
194 as_dsll(dest, dest, 16);
195 } else {
196 as_dsll32(dest, dest, 32);
197 }
198 }
199 if (uint16_t(value)) as_ori(dest, dest, uint16_t(value));
200 }
201
202 // This method generates lui, dsll and ori instruction block that can be
203 // modified by UpdateLoad64Value, either during compilation (eg.
204 // Assembler::bind), or during execution (eg. jit::PatchJump).
ma_liPatchable(Register dest,ImmPtr imm)205 void MacroAssemblerMIPS64::ma_liPatchable(Register dest, ImmPtr imm) {
206 return ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
207 }
208
ma_liPatchable(Register dest,ImmWord imm,LiFlags flags)209 void MacroAssemblerMIPS64::ma_liPatchable(Register dest, ImmWord imm,
210 LiFlags flags) {
211 if (Li64 == flags) {
212 m_buffer.ensureSpace(6 * sizeof(uint32_t));
213 as_lui(dest, Imm16::Upper(Imm32(imm.value >> 32)).encode());
214 as_ori(dest, dest, Imm16::Lower(Imm32(imm.value >> 32)).encode());
215 as_dsll(dest, dest, 16);
216 as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode());
217 as_dsll(dest, dest, 16);
218 as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
219 } else {
220 m_buffer.ensureSpace(4 * sizeof(uint32_t));
221 as_lui(dest, Imm16::Lower(Imm32(imm.value >> 32)).encode());
222 as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode());
223 as_drotr32(dest, dest, 48);
224 as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
225 }
226 }
227
ma_dnegu(Register rd,Register rs)228 void MacroAssemblerMIPS64::ma_dnegu(Register rd, Register rs) {
229 as_dsubu(rd, zero, rs);
230 }
231
232 // Shifts
ma_dsll(Register rd,Register rt,Imm32 shift)233 void MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Imm32 shift) {
234 if (31 < shift.value)
235 as_dsll32(rd, rt, shift.value);
236 else
237 as_dsll(rd, rt, shift.value);
238 }
239
ma_dsrl(Register rd,Register rt,Imm32 shift)240 void MacroAssemblerMIPS64::ma_dsrl(Register rd, Register rt, Imm32 shift) {
241 if (31 < shift.value)
242 as_dsrl32(rd, rt, shift.value);
243 else
244 as_dsrl(rd, rt, shift.value);
245 }
246
ma_dsra(Register rd,Register rt,Imm32 shift)247 void MacroAssemblerMIPS64::ma_dsra(Register rd, Register rt, Imm32 shift) {
248 if (31 < shift.value)
249 as_dsra32(rd, rt, shift.value);
250 else
251 as_dsra(rd, rt, shift.value);
252 }
253
ma_dror(Register rd,Register rt,Imm32 shift)254 void MacroAssemblerMIPS64::ma_dror(Register rd, Register rt, Imm32 shift) {
255 if (31 < shift.value)
256 as_drotr32(rd, rt, shift.value);
257 else
258 as_drotr(rd, rt, shift.value);
259 }
260
ma_drol(Register rd,Register rt,Imm32 shift)261 void MacroAssemblerMIPS64::ma_drol(Register rd, Register rt, Imm32 shift) {
262 uint32_t s = 64 - shift.value;
263
264 if (31 < s)
265 as_drotr32(rd, rt, s);
266 else
267 as_drotr(rd, rt, s);
268 }
269
ma_dsll(Register rd,Register rt,Register shift)270 void MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Register shift) {
271 as_dsllv(rd, rt, shift);
272 }
273
ma_dsrl(Register rd,Register rt,Register shift)274 void MacroAssemblerMIPS64::ma_dsrl(Register rd, Register rt, Register shift) {
275 as_dsrlv(rd, rt, shift);
276 }
277
ma_dsra(Register rd,Register rt,Register shift)278 void MacroAssemblerMIPS64::ma_dsra(Register rd, Register rt, Register shift) {
279 as_dsrav(rd, rt, shift);
280 }
281
ma_dror(Register rd,Register rt,Register shift)282 void MacroAssemblerMIPS64::ma_dror(Register rd, Register rt, Register shift) {
283 as_drotrv(rd, rt, shift);
284 }
285
ma_drol(Register rd,Register rt,Register shift)286 void MacroAssemblerMIPS64::ma_drol(Register rd, Register rt, Register shift) {
287 ma_negu(ScratchRegister, shift);
288 as_drotrv(rd, rt, ScratchRegister);
289 }
290
ma_dins(Register rt,Register rs,Imm32 pos,Imm32 size)291 void MacroAssemblerMIPS64::ma_dins(Register rt, Register rs, Imm32 pos,
292 Imm32 size) {
293 if (pos.value >= 0 && pos.value < 32) {
294 if (pos.value + size.value > 32)
295 as_dinsm(rt, rs, pos.value, size.value);
296 else
297 as_dins(rt, rs, pos.value, size.value);
298 } else {
299 as_dinsu(rt, rs, pos.value, size.value);
300 }
301 }
302
ma_dext(Register rt,Register rs,Imm32 pos,Imm32 size)303 void MacroAssemblerMIPS64::ma_dext(Register rt, Register rs, Imm32 pos,
304 Imm32 size) {
305 if (pos.value >= 0 && pos.value < 32) {
306 if (size.value > 32)
307 as_dextm(rt, rs, pos.value, size.value);
308 else
309 as_dext(rt, rs, pos.value, size.value);
310 } else {
311 as_dextu(rt, rs, pos.value, size.value);
312 }
313 }
314
ma_dctz(Register rd,Register rs)315 void MacroAssemblerMIPS64::ma_dctz(Register rd, Register rs) {
316 ma_dnegu(ScratchRegister, rs);
317 as_and(rd, ScratchRegister, rs);
318 as_dclz(rd, rd);
319 ma_dnegu(SecondScratchReg, rd);
320 ma_daddu(SecondScratchReg, Imm32(0x3f));
321 as_movn(rd, SecondScratchReg, ScratchRegister);
322 }
323
324 // Arithmetic-based ops.
325
326 // Add.
ma_daddu(Register rd,Register rs,Imm32 imm)327 void MacroAssemblerMIPS64::ma_daddu(Register rd, Register rs, Imm32 imm) {
328 if (Imm16::IsInSignedRange(imm.value)) {
329 as_daddiu(rd, rs, imm.value);
330 } else {
331 ma_li(ScratchRegister, imm);
332 as_daddu(rd, rs, ScratchRegister);
333 }
334 }
335
ma_daddu(Register rd,Register rs)336 void MacroAssemblerMIPS64::ma_daddu(Register rd, Register rs) {
337 as_daddu(rd, rd, rs);
338 }
339
ma_daddu(Register rd,Imm32 imm)340 void MacroAssemblerMIPS64::ma_daddu(Register rd, Imm32 imm) {
341 ma_daddu(rd, rd, imm);
342 }
343
344 template <typename L>
ma_addTestOverflow(Register rd,Register rs,Register rt,L overflow)345 void MacroAssemblerMIPS64::ma_addTestOverflow(Register rd, Register rs,
346 Register rt, L overflow) {
347 as_daddu(SecondScratchReg, rs, rt);
348 as_addu(rd, rs, rt);
349 ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual);
350 }
351
352 template void MacroAssemblerMIPS64::ma_addTestOverflow<Label*>(Register rd,
353 Register rs,
354 Register rt,
355 Label* overflow);
356 template void MacroAssemblerMIPS64::ma_addTestOverflow<wasm::OldTrapDesc>(
357 Register rd, Register rs, Register rt, wasm::OldTrapDesc overflow);
358
359 template <typename L>
ma_addTestOverflow(Register rd,Register rs,Imm32 imm,L overflow)360 void MacroAssemblerMIPS64::ma_addTestOverflow(Register rd, Register rs,
361 Imm32 imm, L overflow) {
362 // Check for signed range because of as_daddiu
363 if (Imm16::IsInSignedRange(imm.value)) {
364 as_daddiu(SecondScratchReg, rs, imm.value);
365 as_addiu(rd, rs, imm.value);
366 ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual);
367 } else {
368 ma_li(ScratchRegister, imm);
369 ma_addTestOverflow(rd, rs, ScratchRegister, overflow);
370 }
371 }
372
373 template void MacroAssemblerMIPS64::ma_addTestOverflow<Label*>(Register rd,
374 Register rs,
375 Imm32 imm,
376 Label* overflow);
377 template void MacroAssemblerMIPS64::ma_addTestOverflow<wasm::OldTrapDesc>(
378 Register rd, Register rs, Imm32 imm, wasm::OldTrapDesc overflow);
379
380 // Subtract.
ma_dsubu(Register rd,Register rs,Imm32 imm)381 void MacroAssemblerMIPS64::ma_dsubu(Register rd, Register rs, Imm32 imm) {
382 if (Imm16::IsInSignedRange(-imm.value)) {
383 as_daddiu(rd, rs, -imm.value);
384 } else {
385 ma_li(ScratchRegister, imm);
386 as_dsubu(rd, rs, ScratchRegister);
387 }
388 }
389
ma_dsubu(Register rd,Register rs)390 void MacroAssemblerMIPS64::ma_dsubu(Register rd, Register rs) {
391 as_dsubu(rd, rd, rs);
392 }
393
ma_dsubu(Register rd,Imm32 imm)394 void MacroAssemblerMIPS64::ma_dsubu(Register rd, Imm32 imm) {
395 ma_dsubu(rd, rd, imm);
396 }
397
ma_subTestOverflow(Register rd,Register rs,Register rt,Label * overflow)398 void MacroAssemblerMIPS64::ma_subTestOverflow(Register rd, Register rs,
399 Register rt, Label* overflow) {
400 as_dsubu(SecondScratchReg, rs, rt);
401 as_subu(rd, rs, rt);
402 ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual);
403 }
404
ma_dmult(Register rs,Imm32 imm)405 void MacroAssemblerMIPS64::ma_dmult(Register rs, Imm32 imm) {
406 ma_li(ScratchRegister, imm);
407 as_dmult(rs, ScratchRegister);
408 }
409
410 // Memory.
411
ma_load(Register dest,Address address,LoadStoreSize size,LoadStoreExtension extension)412 void MacroAssemblerMIPS64::ma_load(Register dest, Address address,
413 LoadStoreSize size,
414 LoadStoreExtension extension) {
415 int16_t encodedOffset;
416 Register base;
417
418 if (isLoongson() && ZeroExtend != extension &&
419 !Imm16::IsInSignedRange(address.offset)) {
420 ma_li(ScratchRegister, Imm32(address.offset));
421 base = address.base;
422
423 switch (size) {
424 case SizeByte:
425 as_gslbx(dest, base, ScratchRegister, 0);
426 break;
427 case SizeHalfWord:
428 as_gslhx(dest, base, ScratchRegister, 0);
429 break;
430 case SizeWord:
431 as_gslwx(dest, base, ScratchRegister, 0);
432 break;
433 case SizeDouble:
434 as_gsldx(dest, base, ScratchRegister, 0);
435 break;
436 default:
437 MOZ_CRASH("Invalid argument for ma_load");
438 }
439 return;
440 }
441
442 if (!Imm16::IsInSignedRange(address.offset)) {
443 ma_li(ScratchRegister, Imm32(address.offset));
444 as_daddu(ScratchRegister, address.base, ScratchRegister);
445 base = ScratchRegister;
446 encodedOffset = Imm16(0).encode();
447 } else {
448 encodedOffset = Imm16(address.offset).encode();
449 base = address.base;
450 }
451
452 switch (size) {
453 case SizeByte:
454 if (ZeroExtend == extension)
455 as_lbu(dest, base, encodedOffset);
456 else
457 as_lb(dest, base, encodedOffset);
458 break;
459 case SizeHalfWord:
460 if (ZeroExtend == extension)
461 as_lhu(dest, base, encodedOffset);
462 else
463 as_lh(dest, base, encodedOffset);
464 break;
465 case SizeWord:
466 if (ZeroExtend == extension)
467 as_lwu(dest, base, encodedOffset);
468 else
469 as_lw(dest, base, encodedOffset);
470 break;
471 case SizeDouble:
472 as_ld(dest, base, encodedOffset);
473 break;
474 default:
475 MOZ_CRASH("Invalid argument for ma_load");
476 }
477 }
478
ma_store(Register data,Address address,LoadStoreSize size,LoadStoreExtension extension)479 void MacroAssemblerMIPS64::ma_store(Register data, Address address,
480 LoadStoreSize size,
481 LoadStoreExtension extension) {
482 int16_t encodedOffset;
483 Register base;
484
485 if (isLoongson() && !Imm16::IsInSignedRange(address.offset)) {
486 ma_li(ScratchRegister, Imm32(address.offset));
487 base = address.base;
488
489 switch (size) {
490 case SizeByte:
491 as_gssbx(data, base, ScratchRegister, 0);
492 break;
493 case SizeHalfWord:
494 as_gsshx(data, base, ScratchRegister, 0);
495 break;
496 case SizeWord:
497 as_gsswx(data, base, ScratchRegister, 0);
498 break;
499 case SizeDouble:
500 as_gssdx(data, base, ScratchRegister, 0);
501 break;
502 default:
503 MOZ_CRASH("Invalid argument for ma_store");
504 }
505 return;
506 }
507
508 if (!Imm16::IsInSignedRange(address.offset)) {
509 ma_li(ScratchRegister, Imm32(address.offset));
510 as_daddu(ScratchRegister, address.base, ScratchRegister);
511 base = ScratchRegister;
512 encodedOffset = Imm16(0).encode();
513 } else {
514 encodedOffset = Imm16(address.offset).encode();
515 base = address.base;
516 }
517
518 switch (size) {
519 case SizeByte:
520 as_sb(data, base, encodedOffset);
521 break;
522 case SizeHalfWord:
523 as_sh(data, base, encodedOffset);
524 break;
525 case SizeWord:
526 as_sw(data, base, encodedOffset);
527 break;
528 case SizeDouble:
529 as_sd(data, base, encodedOffset);
530 break;
531 default:
532 MOZ_CRASH("Invalid argument for ma_store");
533 }
534 }
535
computeScaledAddress(const BaseIndex & address,Register dest)536 void MacroAssemblerMIPS64Compat::computeScaledAddress(const BaseIndex& address,
537 Register dest) {
538 int32_t shift = Imm32::ShiftOf(address.scale).value;
539 if (shift) {
540 ma_dsll(ScratchRegister, address.index, Imm32(shift));
541 as_daddu(dest, address.base, ScratchRegister);
542 } else {
543 as_daddu(dest, address.base, address.index);
544 }
545 }
546
547 // Shortcut for when we know we're transferring 32 bits of data.
ma_pop(Register r)548 void MacroAssemblerMIPS64::ma_pop(Register r) {
549 as_ld(r, StackPointer, 0);
550 as_daddiu(StackPointer, StackPointer, sizeof(intptr_t));
551 }
552
ma_push(Register r)553 void MacroAssemblerMIPS64::ma_push(Register r) {
554 if (r == sp) {
555 // Pushing sp requires one more instruction.
556 ma_move(ScratchRegister, sp);
557 r = ScratchRegister;
558 }
559
560 as_daddiu(StackPointer, StackPointer, (int32_t) - sizeof(intptr_t));
561 as_sd(r, StackPointer, 0);
562 }
563
564 // Branches when done from within mips-specific code.
ma_b(Register lhs,ImmWord imm,Label * label,Condition c,JumpKind jumpKind)565 void MacroAssemblerMIPS64::ma_b(Register lhs, ImmWord imm, Label* label,
566 Condition c, JumpKind jumpKind) {
567 if (imm.value <= INT32_MAX) {
568 ma_b(lhs, Imm32(uint32_t(imm.value)), label, c, jumpKind);
569 } else {
570 MOZ_ASSERT(lhs != ScratchRegister);
571 ma_li(ScratchRegister, imm);
572 ma_b(lhs, ScratchRegister, label, c, jumpKind);
573 }
574 }
575
ma_b(Register lhs,Address addr,Label * label,Condition c,JumpKind jumpKind)576 void MacroAssemblerMIPS64::ma_b(Register lhs, Address addr, Label* label,
577 Condition c, JumpKind jumpKind) {
578 MOZ_ASSERT(lhs != ScratchRegister);
579 ma_load(ScratchRegister, addr, SizeDouble);
580 ma_b(lhs, ScratchRegister, label, c, jumpKind);
581 }
582
ma_b(Address addr,Imm32 imm,Label * label,Condition c,JumpKind jumpKind)583 void MacroAssemblerMIPS64::ma_b(Address addr, Imm32 imm, Label* label,
584 Condition c, JumpKind jumpKind) {
585 ma_load(SecondScratchReg, addr, SizeDouble);
586 ma_b(SecondScratchReg, imm, label, c, jumpKind);
587 }
588
ma_b(Address addr,ImmGCPtr imm,Label * label,Condition c,JumpKind jumpKind)589 void MacroAssemblerMIPS64::ma_b(Address addr, ImmGCPtr imm, Label* label,
590 Condition c, JumpKind jumpKind) {
591 ma_load(SecondScratchReg, addr, SizeDouble);
592 ma_b(SecondScratchReg, imm, label, c, jumpKind);
593 }
594
ma_bal(Label * label,DelaySlotFill delaySlotFill)595 void MacroAssemblerMIPS64::ma_bal(Label* label, DelaySlotFill delaySlotFill) {
596 spew("branch .Llabel %p\n", label);
597 if (label->bound()) {
598 // Generate the long jump for calls because return address has to be
599 // the address after the reserved block.
600 addLongJump(nextOffset(), BufferOffset(label->offset()));
601 ma_liPatchable(ScratchRegister, ImmWord(LabelBase::INVALID_OFFSET));
602 as_jalr(ScratchRegister);
603 if (delaySlotFill == FillDelaySlot) as_nop();
604 return;
605 }
606
607 // Second word holds a pointer to the next branch in label's chain.
608 uint32_t nextInChain =
609 label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
610
611 // Make the whole branch continous in the buffer. The '6'
612 // instructions are writing at below (contain delay slot).
613 m_buffer.ensureSpace(6 * sizeof(uint32_t));
614
615 spew("bal .Llabel %p\n", label);
616 BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
617 writeInst(nextInChain);
618 if (!oom()) label->use(bo.getOffset());
619 // Leave space for long jump.
620 as_nop();
621 as_nop();
622 as_nop();
623 if (delaySlotFill == FillDelaySlot) as_nop();
624 }
625
branchWithCode(InstImm code,Label * label,JumpKind jumpKind)626 void MacroAssemblerMIPS64::branchWithCode(InstImm code, Label* label,
627 JumpKind jumpKind) {
628 // simply output the pointer of one label as its id,
629 // notice that after one label destructor, the pointer will be reused.
630 spew("branch .Llabel %p", label);
631 MOZ_ASSERT(code.encode() !=
632 InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
633 InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
634
635 if (label->bound()) {
636 int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
637
638 if (BOffImm16::IsInRange(offset)) jumpKind = ShortJump;
639
640 if (jumpKind == ShortJump) {
641 MOZ_ASSERT(BOffImm16::IsInRange(offset));
642 code.setBOffImm16(BOffImm16(offset));
643 #ifdef JS_JITSPEW
644 decodeBranchInstAndSpew(code);
645 #endif
646 writeInst(code.encode());
647 as_nop();
648 return;
649 }
650
651 if (code.encode() == inst_beq.encode()) {
652 // Handle long jump
653 addLongJump(nextOffset(), BufferOffset(label->offset()));
654 ma_liPatchable(ScratchRegister, ImmWord(LabelBase::INVALID_OFFSET));
655 as_jr(ScratchRegister);
656 as_nop();
657 return;
658 }
659
660 // Handle long conditional branch, the target offset is based on self,
661 // point to next instruction of nop at below.
662 spew("invert branch .Llabel %p", label);
663 InstImm code_r = invertBranch(code, BOffImm16(7 * sizeof(uint32_t)));
664 #ifdef JS_JITSPEW
665 decodeBranchInstAndSpew(code_r);
666 #endif
667 writeInst(code_r.encode());
668 // No need for a "nop" here because we can clobber scratch.
669 addLongJump(nextOffset(), BufferOffset(label->offset()));
670 ma_liPatchable(ScratchRegister, ImmWord(LabelBase::INVALID_OFFSET));
671 as_jr(ScratchRegister);
672 as_nop();
673 return;
674 }
675
676 // Generate open jump and link it to a label.
677
678 // Second word holds a pointer to the next branch in label's chain.
679 uint32_t nextInChain =
680 label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
681
682 if (jumpKind == ShortJump) {
683 // Make the whole branch continous in the buffer.
684 m_buffer.ensureSpace(2 * sizeof(uint32_t));
685
686 // Indicate that this is short jump with offset 4.
687 code.setBOffImm16(BOffImm16(4));
688 #ifdef JS_JITSPEW
689 decodeBranchInstAndSpew(code);
690 #endif
691 BufferOffset bo = writeInst(code.encode());
692 writeInst(nextInChain);
693 if (!oom()) label->use(bo.getOffset());
694 return;
695 }
696
697 bool conditional = code.encode() != inst_beq.encode();
698
699 // Make the whole branch continous in the buffer. The '7'
700 // instructions are writing at below (contain conditional nop).
701 m_buffer.ensureSpace(7 * sizeof(uint32_t));
702
703 #ifdef JS_JITSPEW
704 decodeBranchInstAndSpew(code);
705 #endif
706 BufferOffset bo = writeInst(code.encode());
707 writeInst(nextInChain);
708 if (!oom()) label->use(bo.getOffset());
709 // Leave space for potential long jump.
710 as_nop();
711 as_nop();
712 as_nop();
713 as_nop();
714 if (conditional) as_nop();
715 }
716
ma_cmp_set(Register rd,Register rs,ImmWord imm,Condition c)717 void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmWord imm,
718 Condition c) {
719 if (imm.value <= INT32_MAX) {
720 ma_cmp_set(rd, rs, Imm32(uint32_t(imm.value)), c);
721 } else {
722 ma_li(ScratchRegister, imm);
723 ma_cmp_set(rd, rs, ScratchRegister, c);
724 }
725 }
726
ma_cmp_set(Register rd,Register rs,ImmPtr imm,Condition c)727 void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmPtr imm,
728 Condition c) {
729 ma_cmp_set(rd, rs, ImmWord(uintptr_t(imm.value)), c);
730 }
731
732 // fp instructions
ma_lid(FloatRegister dest,double value)733 void MacroAssemblerMIPS64::ma_lid(FloatRegister dest, double value) {
734 ImmWord imm(mozilla::BitwiseCast<uint64_t>(value));
735
736 if (imm.value != 0) {
737 ma_li(ScratchRegister, imm);
738 moveToDouble(ScratchRegister, dest);
739 } else {
740 moveToDouble(zero, dest);
741 }
742 }
743
ma_mv(FloatRegister src,ValueOperand dest)744 void MacroAssemblerMIPS64::ma_mv(FloatRegister src, ValueOperand dest) {
745 as_dmfc1(dest.valueReg(), src);
746 }
747
ma_mv(ValueOperand src,FloatRegister dest)748 void MacroAssemblerMIPS64::ma_mv(ValueOperand src, FloatRegister dest) {
749 as_dmtc1(src.valueReg(), dest);
750 }
751
ma_ls(FloatRegister ft,Address address)752 void MacroAssemblerMIPS64::ma_ls(FloatRegister ft, Address address) {
753 if (Imm16::IsInSignedRange(address.offset)) {
754 as_lwc1(ft, address.base, address.offset);
755 } else {
756 MOZ_ASSERT(address.base != ScratchRegister);
757 ma_li(ScratchRegister, Imm32(address.offset));
758 if (isLoongson()) {
759 as_gslsx(ft, address.base, ScratchRegister, 0);
760 } else {
761 as_daddu(ScratchRegister, address.base, ScratchRegister);
762 as_lwc1(ft, ScratchRegister, 0);
763 }
764 }
765 }
766
ma_ld(FloatRegister ft,Address address)767 void MacroAssemblerMIPS64::ma_ld(FloatRegister ft, Address address) {
768 if (Imm16::IsInSignedRange(address.offset)) {
769 as_ldc1(ft, address.base, address.offset);
770 } else {
771 MOZ_ASSERT(address.base != ScratchRegister);
772 ma_li(ScratchRegister, Imm32(address.offset));
773 if (isLoongson()) {
774 as_gsldx(ft, address.base, ScratchRegister, 0);
775 } else {
776 as_daddu(ScratchRegister, address.base, ScratchRegister);
777 as_ldc1(ft, ScratchRegister, 0);
778 }
779 }
780 }
781
ma_sd(FloatRegister ft,Address address)782 void MacroAssemblerMIPS64::ma_sd(FloatRegister ft, Address address) {
783 if (Imm16::IsInSignedRange(address.offset)) {
784 as_sdc1(ft, address.base, address.offset);
785 } else {
786 MOZ_ASSERT(address.base != ScratchRegister);
787 ma_li(ScratchRegister, Imm32(address.offset));
788 if (isLoongson()) {
789 as_gssdx(ft, address.base, ScratchRegister, 0);
790 } else {
791 as_daddu(ScratchRegister, address.base, ScratchRegister);
792 as_sdc1(ft, ScratchRegister, 0);
793 }
794 }
795 }
796
ma_ss(FloatRegister ft,Address address)797 void MacroAssemblerMIPS64::ma_ss(FloatRegister ft, Address address) {
798 if (Imm16::IsInSignedRange(address.offset)) {
799 as_swc1(ft, address.base, address.offset);
800 } else {
801 MOZ_ASSERT(address.base != ScratchRegister);
802 ma_li(ScratchRegister, Imm32(address.offset));
803 if (isLoongson()) {
804 as_gsssx(ft, address.base, ScratchRegister, 0);
805 } else {
806 as_daddu(ScratchRegister, address.base, ScratchRegister);
807 as_swc1(ft, ScratchRegister, 0);
808 }
809 }
810 }
811
ma_pop(FloatRegister f)812 void MacroAssemblerMIPS64::ma_pop(FloatRegister f) {
813 as_ldc1(f, StackPointer, 0);
814 as_daddiu(StackPointer, StackPointer, sizeof(double));
815 }
816
ma_push(FloatRegister f)817 void MacroAssemblerMIPS64::ma_push(FloatRegister f) {
818 as_daddiu(StackPointer, StackPointer, (int32_t) - sizeof(double));
819 as_sdc1(f, StackPointer, 0);
820 }
821
buildOOLFakeExitFrame(void * fakeReturnAddr)822 bool MacroAssemblerMIPS64Compat::buildOOLFakeExitFrame(void* fakeReturnAddr) {
823 uint32_t descriptor = MakeFrameDescriptor(
824 asMasm().framePushed(), JitFrame_IonJS, ExitFrameLayout::Size());
825
826 asMasm().Push(Imm32(descriptor)); // descriptor_
827 asMasm().Push(ImmPtr(fakeReturnAddr));
828
829 return true;
830 }
831
move32(Imm32 imm,Register dest)832 void MacroAssemblerMIPS64Compat::move32(Imm32 imm, Register dest) {
833 ma_li(dest, imm);
834 }
835
move32(Register src,Register dest)836 void MacroAssemblerMIPS64Compat::move32(Register src, Register dest) {
837 ma_move(dest, src);
838 }
839
movePtr(Register src,Register dest)840 void MacroAssemblerMIPS64Compat::movePtr(Register src, Register dest) {
841 ma_move(dest, src);
842 }
movePtr(ImmWord imm,Register dest)843 void MacroAssemblerMIPS64Compat::movePtr(ImmWord imm, Register dest) {
844 ma_li(dest, imm);
845 }
846
movePtr(ImmGCPtr imm,Register dest)847 void MacroAssemblerMIPS64Compat::movePtr(ImmGCPtr imm, Register dest) {
848 ma_li(dest, imm);
849 }
850
movePtr(ImmPtr imm,Register dest)851 void MacroAssemblerMIPS64Compat::movePtr(ImmPtr imm, Register dest) {
852 movePtr(ImmWord(uintptr_t(imm.value)), dest);
853 }
movePtr(wasm::SymbolicAddress imm,Register dest)854 void MacroAssemblerMIPS64Compat::movePtr(wasm::SymbolicAddress imm,
855 Register dest) {
856 append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm));
857 ma_liPatchable(dest, ImmWord(-1));
858 }
859
load8ZeroExtend(const Address & address,Register dest)860 void MacroAssemblerMIPS64Compat::load8ZeroExtend(const Address& address,
861 Register dest) {
862 ma_load(dest, address, SizeByte, ZeroExtend);
863 }
864
load8ZeroExtend(const BaseIndex & src,Register dest)865 void MacroAssemblerMIPS64Compat::load8ZeroExtend(const BaseIndex& src,
866 Register dest) {
867 ma_load(dest, src, SizeByte, ZeroExtend);
868 }
869
load8SignExtend(const Address & address,Register dest)870 void MacroAssemblerMIPS64Compat::load8SignExtend(const Address& address,
871 Register dest) {
872 ma_load(dest, address, SizeByte, SignExtend);
873 }
874
load8SignExtend(const BaseIndex & src,Register dest)875 void MacroAssemblerMIPS64Compat::load8SignExtend(const BaseIndex& src,
876 Register dest) {
877 ma_load(dest, src, SizeByte, SignExtend);
878 }
879
load16ZeroExtend(const Address & address,Register dest)880 void MacroAssemblerMIPS64Compat::load16ZeroExtend(const Address& address,
881 Register dest) {
882 ma_load(dest, address, SizeHalfWord, ZeroExtend);
883 }
884
load16ZeroExtend(const BaseIndex & src,Register dest)885 void MacroAssemblerMIPS64Compat::load16ZeroExtend(const BaseIndex& src,
886 Register dest) {
887 ma_load(dest, src, SizeHalfWord, ZeroExtend);
888 }
889
load16SignExtend(const Address & address,Register dest)890 void MacroAssemblerMIPS64Compat::load16SignExtend(const Address& address,
891 Register dest) {
892 ma_load(dest, address, SizeHalfWord, SignExtend);
893 }
894
load16SignExtend(const BaseIndex & src,Register dest)895 void MacroAssemblerMIPS64Compat::load16SignExtend(const BaseIndex& src,
896 Register dest) {
897 ma_load(dest, src, SizeHalfWord, SignExtend);
898 }
899
load32(const Address & address,Register dest)900 void MacroAssemblerMIPS64Compat::load32(const Address& address, Register dest) {
901 ma_load(dest, address, SizeWord);
902 }
903
load32(const BaseIndex & address,Register dest)904 void MacroAssemblerMIPS64Compat::load32(const BaseIndex& address,
905 Register dest) {
906 ma_load(dest, address, SizeWord);
907 }
908
load32(AbsoluteAddress address,Register dest)909 void MacroAssemblerMIPS64Compat::load32(AbsoluteAddress address,
910 Register dest) {
911 movePtr(ImmPtr(address.addr), ScratchRegister);
912 load32(Address(ScratchRegister, 0), dest);
913 }
914
load32(wasm::SymbolicAddress address,Register dest)915 void MacroAssemblerMIPS64Compat::load32(wasm::SymbolicAddress address,
916 Register dest) {
917 movePtr(address, ScratchRegister);
918 load32(Address(ScratchRegister, 0), dest);
919 }
920
loadPtr(const Address & address,Register dest)921 void MacroAssemblerMIPS64Compat::loadPtr(const Address& address,
922 Register dest) {
923 ma_load(dest, address, SizeDouble);
924 }
925
loadPtr(const BaseIndex & src,Register dest)926 void MacroAssemblerMIPS64Compat::loadPtr(const BaseIndex& src, Register dest) {
927 ma_load(dest, src, SizeDouble);
928 }
929
loadPtr(AbsoluteAddress address,Register dest)930 void MacroAssemblerMIPS64Compat::loadPtr(AbsoluteAddress address,
931 Register dest) {
932 movePtr(ImmPtr(address.addr), ScratchRegister);
933 loadPtr(Address(ScratchRegister, 0), dest);
934 }
935
loadPtr(wasm::SymbolicAddress address,Register dest)936 void MacroAssemblerMIPS64Compat::loadPtr(wasm::SymbolicAddress address,
937 Register dest) {
938 movePtr(address, ScratchRegister);
939 loadPtr(Address(ScratchRegister, 0), dest);
940 }
941
loadPrivate(const Address & address,Register dest)942 void MacroAssemblerMIPS64Compat::loadPrivate(const Address& address,
943 Register dest) {
944 loadPtr(address, dest);
945 ma_dsll(dest, dest, Imm32(1));
946 }
947
loadUnalignedDouble(const wasm::MemoryAccessDesc & access,const BaseIndex & src,Register temp,FloatRegister dest)948 void MacroAssemblerMIPS64Compat::loadUnalignedDouble(
949 const wasm::MemoryAccessDesc& access, const BaseIndex& src, Register temp,
950 FloatRegister dest) {
951 computeScaledAddress(src, SecondScratchReg);
952 BufferOffset load;
953 if (Imm16::IsInSignedRange(src.offset) &&
954 Imm16::IsInSignedRange(src.offset + 7)) {
955 load = as_ldl(temp, SecondScratchReg, src.offset + 7);
956 as_ldr(temp, SecondScratchReg, src.offset);
957 } else {
958 ma_li(ScratchRegister, Imm32(src.offset));
959 as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
960 load = as_ldl(temp, ScratchRegister, 7);
961 as_ldr(temp, ScratchRegister, 0);
962 }
963 append(access, load.getOffset(), asMasm().framePushed());
964 moveToDouble(temp, dest);
965 }
966
loadUnalignedFloat32(const wasm::MemoryAccessDesc & access,const BaseIndex & src,Register temp,FloatRegister dest)967 void MacroAssemblerMIPS64Compat::loadUnalignedFloat32(
968 const wasm::MemoryAccessDesc& access, const BaseIndex& src, Register temp,
969 FloatRegister dest) {
970 computeScaledAddress(src, SecondScratchReg);
971 BufferOffset load;
972 if (Imm16::IsInSignedRange(src.offset) &&
973 Imm16::IsInSignedRange(src.offset + 3)) {
974 load = as_lwl(temp, SecondScratchReg, src.offset + 3);
975 as_lwr(temp, SecondScratchReg, src.offset);
976 } else {
977 ma_li(ScratchRegister, Imm32(src.offset));
978 as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
979 load = as_lwl(temp, ScratchRegister, 3);
980 as_lwr(temp, ScratchRegister, 0);
981 }
982 append(access, load.getOffset(), asMasm().framePushed());
983 moveToFloat32(temp, dest);
984 }
985
store8(Imm32 imm,const Address & address)986 void MacroAssemblerMIPS64Compat::store8(Imm32 imm, const Address& address) {
987 ma_li(SecondScratchReg, imm);
988 ma_store(SecondScratchReg, address, SizeByte);
989 }
990
store8(Register src,const Address & address)991 void MacroAssemblerMIPS64Compat::store8(Register src, const Address& address) {
992 ma_store(src, address, SizeByte);
993 }
994
store8(Imm32 imm,const BaseIndex & dest)995 void MacroAssemblerMIPS64Compat::store8(Imm32 imm, const BaseIndex& dest) {
996 ma_store(imm, dest, SizeByte);
997 }
998
store8(Register src,const BaseIndex & dest)999 void MacroAssemblerMIPS64Compat::store8(Register src, const BaseIndex& dest) {
1000 ma_store(src, dest, SizeByte);
1001 }
1002
store16(Imm32 imm,const Address & address)1003 void MacroAssemblerMIPS64Compat::store16(Imm32 imm, const Address& address) {
1004 ma_li(SecondScratchReg, imm);
1005 ma_store(SecondScratchReg, address, SizeHalfWord);
1006 }
1007
store16(Register src,const Address & address)1008 void MacroAssemblerMIPS64Compat::store16(Register src, const Address& address) {
1009 ma_store(src, address, SizeHalfWord);
1010 }
1011
store16(Imm32 imm,const BaseIndex & dest)1012 void MacroAssemblerMIPS64Compat::store16(Imm32 imm, const BaseIndex& dest) {
1013 ma_store(imm, dest, SizeHalfWord);
1014 }
1015
store16(Register src,const BaseIndex & address)1016 void MacroAssemblerMIPS64Compat::store16(Register src,
1017 const BaseIndex& address) {
1018 ma_store(src, address, SizeHalfWord);
1019 }
1020
store32(Register src,AbsoluteAddress address)1021 void MacroAssemblerMIPS64Compat::store32(Register src,
1022 AbsoluteAddress address) {
1023 movePtr(ImmPtr(address.addr), ScratchRegister);
1024 store32(src, Address(ScratchRegister, 0));
1025 }
1026
store32(Register src,const Address & address)1027 void MacroAssemblerMIPS64Compat::store32(Register src, const Address& address) {
1028 ma_store(src, address, SizeWord);
1029 }
1030
store32(Imm32 src,const Address & address)1031 void MacroAssemblerMIPS64Compat::store32(Imm32 src, const Address& address) {
1032 move32(src, SecondScratchReg);
1033 ma_store(SecondScratchReg, address, SizeWord);
1034 }
1035
store32(Imm32 imm,const BaseIndex & dest)1036 void MacroAssemblerMIPS64Compat::store32(Imm32 imm, const BaseIndex& dest) {
1037 ma_store(imm, dest, SizeWord);
1038 }
1039
store32(Register src,const BaseIndex & dest)1040 void MacroAssemblerMIPS64Compat::store32(Register src, const BaseIndex& dest) {
1041 ma_store(src, dest, SizeWord);
1042 }
1043
1044 template <typename T>
storePtr(ImmWord imm,T address)1045 void MacroAssemblerMIPS64Compat::storePtr(ImmWord imm, T address) {
1046 ma_li(SecondScratchReg, imm);
1047 ma_store(SecondScratchReg, address, SizeDouble);
1048 }
1049
1050 template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmWord imm,
1051 Address address);
1052 template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(
1053 ImmWord imm, BaseIndex address);
1054
1055 template <typename T>
storePtr(ImmPtr imm,T address)1056 void MacroAssemblerMIPS64Compat::storePtr(ImmPtr imm, T address) {
1057 storePtr(ImmWord(uintptr_t(imm.value)), address);
1058 }
1059
1060 template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmPtr imm,
1061 Address address);
1062 template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(
1063 ImmPtr imm, BaseIndex address);
1064
1065 template <typename T>
storePtr(ImmGCPtr imm,T address)1066 void MacroAssemblerMIPS64Compat::storePtr(ImmGCPtr imm, T address) {
1067 movePtr(imm, SecondScratchReg);
1068 storePtr(SecondScratchReg, address);
1069 }
1070
1071 template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmGCPtr imm,
1072 Address address);
1073 template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(
1074 ImmGCPtr imm, BaseIndex address);
1075
storePtr(Register src,const Address & address)1076 void MacroAssemblerMIPS64Compat::storePtr(Register src,
1077 const Address& address) {
1078 ma_store(src, address, SizeDouble);
1079 }
1080
storePtr(Register src,const BaseIndex & address)1081 void MacroAssemblerMIPS64Compat::storePtr(Register src,
1082 const BaseIndex& address) {
1083 ma_store(src, address, SizeDouble);
1084 }
1085
storePtr(Register src,AbsoluteAddress dest)1086 void MacroAssemblerMIPS64Compat::storePtr(Register src, AbsoluteAddress dest) {
1087 movePtr(ImmPtr(dest.addr), ScratchRegister);
1088 storePtr(src, Address(ScratchRegister, 0));
1089 }
1090
storeUnalignedFloat32(const wasm::MemoryAccessDesc & access,FloatRegister src,Register temp,const BaseIndex & dest)1091 void MacroAssemblerMIPS64Compat::storeUnalignedFloat32(
1092 const wasm::MemoryAccessDesc& access, FloatRegister src, Register temp,
1093 const BaseIndex& dest) {
1094 computeScaledAddress(dest, SecondScratchReg);
1095 moveFromFloat32(src, temp);
1096 BufferOffset store;
1097 if (Imm16::IsInSignedRange(dest.offset) &&
1098 Imm16::IsInSignedRange(dest.offset + 3)) {
1099 store = as_swl(temp, SecondScratchReg, dest.offset + 3);
1100 as_swr(temp, SecondScratchReg, dest.offset);
1101 } else {
1102 ma_li(ScratchRegister, Imm32(dest.offset));
1103 as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
1104 store = as_swl(temp, ScratchRegister, 3);
1105 as_swr(temp, ScratchRegister, 0);
1106 }
1107 append(access, store.getOffset(), asMasm().framePushed());
1108 }
1109
storeUnalignedDouble(const wasm::MemoryAccessDesc & access,FloatRegister src,Register temp,const BaseIndex & dest)1110 void MacroAssemblerMIPS64Compat::storeUnalignedDouble(
1111 const wasm::MemoryAccessDesc& access, FloatRegister src, Register temp,
1112 const BaseIndex& dest) {
1113 computeScaledAddress(dest, SecondScratchReg);
1114 moveFromDouble(src, temp);
1115
1116 BufferOffset store;
1117 if (Imm16::IsInSignedRange(dest.offset) &&
1118 Imm16::IsInSignedRange(dest.offset + 7)) {
1119 store = as_sdl(temp, SecondScratchReg, dest.offset + 7);
1120 as_sdr(temp, SecondScratchReg, dest.offset);
1121 } else {
1122 ma_li(ScratchRegister, Imm32(dest.offset));
1123 as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
1124 store = as_sdl(temp, ScratchRegister, 7);
1125 as_sdr(temp, ScratchRegister, 0);
1126 }
1127 append(access, store.getOffset(), asMasm().framePushed());
1128 }
1129
clampDoubleToUint8(FloatRegister input,Register output)1130 void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) {
1131 as_roundwd(ScratchDoubleReg, input);
1132 ma_li(ScratchRegister, Imm32(255));
1133 as_mfc1(output, ScratchDoubleReg);
1134 zeroDouble(ScratchDoubleReg);
1135 as_sltiu(SecondScratchReg, output, 255);
1136 as_colt(DoubleFloat, ScratchDoubleReg, input);
1137 // if res > 255; res = 255;
1138 as_movz(output, ScratchRegister, SecondScratchReg);
1139 // if !(input > 0); res = 0;
1140 as_movf(output, zero);
1141 }
1142
testNullSet(Condition cond,const ValueOperand & value,Register dest)1143 void MacroAssemblerMIPS64Compat::testNullSet(Condition cond,
1144 const ValueOperand& value,
1145 Register dest) {
1146 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1147 splitTag(value, SecondScratchReg);
1148 ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_NULL), cond);
1149 }
1150
testObjectSet(Condition cond,const ValueOperand & value,Register dest)1151 void MacroAssemblerMIPS64Compat::testObjectSet(Condition cond,
1152 const ValueOperand& value,
1153 Register dest) {
1154 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1155 splitTag(value, SecondScratchReg);
1156 ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), cond);
1157 }
1158
testUndefinedSet(Condition cond,const ValueOperand & value,Register dest)1159 void MacroAssemblerMIPS64Compat::testUndefinedSet(Condition cond,
1160 const ValueOperand& value,
1161 Register dest) {
1162 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1163 splitTag(value, SecondScratchReg);
1164 ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), cond);
1165 }
1166
unboxInt32(const ValueOperand & operand,Register dest)1167 void MacroAssemblerMIPS64Compat::unboxInt32(const ValueOperand& operand,
1168 Register dest) {
1169 ma_sll(dest, operand.valueReg(), Imm32(0));
1170 }
1171
unboxInt32(Register src,Register dest)1172 void MacroAssemblerMIPS64Compat::unboxInt32(Register src, Register dest) {
1173 ma_sll(dest, src, Imm32(0));
1174 }
1175
unboxInt32(const Address & src,Register dest)1176 void MacroAssemblerMIPS64Compat::unboxInt32(const Address& src, Register dest) {
1177 load32(Address(src.base, src.offset), dest);
1178 }
1179
unboxInt32(const BaseIndex & src,Register dest)1180 void MacroAssemblerMIPS64Compat::unboxInt32(const BaseIndex& src,
1181 Register dest) {
1182 computeScaledAddress(src, SecondScratchReg);
1183 load32(Address(SecondScratchReg, src.offset), dest);
1184 }
1185
unboxBoolean(const ValueOperand & operand,Register dest)1186 void MacroAssemblerMIPS64Compat::unboxBoolean(const ValueOperand& operand,
1187 Register dest) {
1188 ma_dext(dest, operand.valueReg(), Imm32(0), Imm32(32));
1189 }
1190
unboxBoolean(Register src,Register dest)1191 void MacroAssemblerMIPS64Compat::unboxBoolean(Register src, Register dest) {
1192 ma_dext(dest, src, Imm32(0), Imm32(32));
1193 }
1194
unboxBoolean(const Address & src,Register dest)1195 void MacroAssemblerMIPS64Compat::unboxBoolean(const Address& src,
1196 Register dest) {
1197 ma_load(dest, Address(src.base, src.offset), SizeWord, ZeroExtend);
1198 }
1199
unboxBoolean(const BaseIndex & src,Register dest)1200 void MacroAssemblerMIPS64Compat::unboxBoolean(const BaseIndex& src,
1201 Register dest) {
1202 computeScaledAddress(src, SecondScratchReg);
1203 ma_load(dest, Address(SecondScratchReg, src.offset), SizeWord, ZeroExtend);
1204 }
1205
unboxDouble(const ValueOperand & operand,FloatRegister dest)1206 void MacroAssemblerMIPS64Compat::unboxDouble(const ValueOperand& operand,
1207 FloatRegister dest) {
1208 as_dmtc1(operand.valueReg(), dest);
1209 }
1210
unboxDouble(const Address & src,FloatRegister dest)1211 void MacroAssemblerMIPS64Compat::unboxDouble(const Address& src,
1212 FloatRegister dest) {
1213 ma_ld(dest, Address(src.base, src.offset));
1214 }
1215
unboxString(const ValueOperand & operand,Register dest)1216 void MacroAssemblerMIPS64Compat::unboxString(const ValueOperand& operand,
1217 Register dest) {
1218 unboxNonDouble(operand, dest, JSVAL_TYPE_STRING);
1219 }
1220
unboxString(Register src,Register dest)1221 void MacroAssemblerMIPS64Compat::unboxString(Register src, Register dest) {
1222 unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
1223 }
1224
unboxString(const Address & src,Register dest)1225 void MacroAssemblerMIPS64Compat::unboxString(const Address& src,
1226 Register dest) {
1227 unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
1228 }
1229
unboxSymbol(const ValueOperand & operand,Register dest)1230 void MacroAssemblerMIPS64Compat::unboxSymbol(const ValueOperand& operand,
1231 Register dest) {
1232 unboxNonDouble(operand, dest, JSVAL_TYPE_SYMBOL);
1233 }
1234
unboxSymbol(Register src,Register dest)1235 void MacroAssemblerMIPS64Compat::unboxSymbol(Register src, Register dest) {
1236 unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
1237 }
1238
unboxSymbol(const Address & src,Register dest)1239 void MacroAssemblerMIPS64Compat::unboxSymbol(const Address& src,
1240 Register dest) {
1241 unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
1242 }
1243
unboxObject(const ValueOperand & src,Register dest)1244 void MacroAssemblerMIPS64Compat::unboxObject(const ValueOperand& src,
1245 Register dest) {
1246 unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
1247 }
1248
unboxObject(Register src,Register dest)1249 void MacroAssemblerMIPS64Compat::unboxObject(Register src, Register dest) {
1250 unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
1251 }
1252
unboxObject(const Address & src,Register dest)1253 void MacroAssemblerMIPS64Compat::unboxObject(const Address& src,
1254 Register dest) {
1255 unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
1256 }
1257
unboxValue(const ValueOperand & src,AnyRegister dest,JSValueType type)1258 void MacroAssemblerMIPS64Compat::unboxValue(const ValueOperand& src,
1259 AnyRegister dest,
1260 JSValueType type) {
1261 if (dest.isFloat()) {
1262 Label notInt32, end;
1263 asMasm().branchTestInt32(Assembler::NotEqual, src, ¬Int32);
1264 convertInt32ToDouble(src.valueReg(), dest.fpu());
1265 ma_b(&end, ShortJump);
1266 bind(¬Int32);
1267 unboxDouble(src, dest.fpu());
1268 bind(&end);
1269 } else {
1270 unboxNonDouble(src, dest.gpr(), type);
1271 }
1272 }
1273
unboxPrivate(const ValueOperand & src,Register dest)1274 void MacroAssemblerMIPS64Compat::unboxPrivate(const ValueOperand& src,
1275 Register dest) {
1276 ma_dsll(dest, src.valueReg(), Imm32(1));
1277 }
1278
boxDouble(FloatRegister src,const ValueOperand & dest,FloatRegister)1279 void MacroAssemblerMIPS64Compat::boxDouble(FloatRegister src,
1280 const ValueOperand& dest,
1281 FloatRegister) {
1282 as_dmfc1(dest.valueReg(), src);
1283 }
1284
boxNonDouble(JSValueType type,Register src,const ValueOperand & dest)1285 void MacroAssemblerMIPS64Compat::boxNonDouble(JSValueType type, Register src,
1286 const ValueOperand& dest) {
1287 MOZ_ASSERT(src != dest.valueReg());
1288 boxValue(type, src, dest.valueReg());
1289 }
1290
boolValueToDouble(const ValueOperand & operand,FloatRegister dest)1291 void MacroAssemblerMIPS64Compat::boolValueToDouble(const ValueOperand& operand,
1292 FloatRegister dest) {
1293 convertBoolToInt32(operand.valueReg(), ScratchRegister);
1294 convertInt32ToDouble(ScratchRegister, dest);
1295 }
1296
int32ValueToDouble(const ValueOperand & operand,FloatRegister dest)1297 void MacroAssemblerMIPS64Compat::int32ValueToDouble(const ValueOperand& operand,
1298 FloatRegister dest) {
1299 convertInt32ToDouble(operand.valueReg(), dest);
1300 }
1301
boolValueToFloat32(const ValueOperand & operand,FloatRegister dest)1302 void MacroAssemblerMIPS64Compat::boolValueToFloat32(const ValueOperand& operand,
1303 FloatRegister dest) {
1304 convertBoolToInt32(operand.valueReg(), ScratchRegister);
1305 convertInt32ToFloat32(ScratchRegister, dest);
1306 }
1307
int32ValueToFloat32(const ValueOperand & operand,FloatRegister dest)1308 void MacroAssemblerMIPS64Compat::int32ValueToFloat32(
1309 const ValueOperand& operand, FloatRegister dest) {
1310 convertInt32ToFloat32(operand.valueReg(), dest);
1311 }
1312
loadConstantFloat32(float f,FloatRegister dest)1313 void MacroAssemblerMIPS64Compat::loadConstantFloat32(float f,
1314 FloatRegister dest) {
1315 ma_lis(dest, f);
1316 }
1317
loadInt32OrDouble(const Address & src,FloatRegister dest)1318 void MacroAssemblerMIPS64Compat::loadInt32OrDouble(const Address& src,
1319 FloatRegister dest) {
1320 Label notInt32, end;
1321 // If it's an int, convert it to double.
1322 loadPtr(Address(src.base, src.offset), ScratchRegister);
1323 ma_dsrl(SecondScratchReg, ScratchRegister, Imm32(JSVAL_TAG_SHIFT));
1324 asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, ¬Int32);
1325 loadPtr(Address(src.base, src.offset), SecondScratchReg);
1326 convertInt32ToDouble(SecondScratchReg, dest);
1327 ma_b(&end, ShortJump);
1328
1329 // Not an int, just load as double.
1330 bind(¬Int32);
1331 ma_ld(dest, src);
1332 bind(&end);
1333 }
1334
loadInt32OrDouble(const BaseIndex & addr,FloatRegister dest)1335 void MacroAssemblerMIPS64Compat::loadInt32OrDouble(const BaseIndex& addr,
1336 FloatRegister dest) {
1337 Label notInt32, end;
1338
1339 // If it's an int, convert it to double.
1340 computeScaledAddress(addr, SecondScratchReg);
1341 // Since we only have one scratch, we need to stomp over it with the tag.
1342 loadPtr(Address(SecondScratchReg, 0), ScratchRegister);
1343 ma_dsrl(SecondScratchReg, ScratchRegister, Imm32(JSVAL_TAG_SHIFT));
1344 asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, ¬Int32);
1345
1346 computeScaledAddress(addr, SecondScratchReg);
1347 loadPtr(Address(SecondScratchReg, 0), SecondScratchReg);
1348 convertInt32ToDouble(SecondScratchReg, dest);
1349 ma_b(&end, ShortJump);
1350
1351 // Not an int, just load as double.
1352 bind(¬Int32);
1353 // First, recompute the offset that had been stored in the scratch register
1354 // since the scratch register was overwritten loading in the type.
1355 computeScaledAddress(addr, SecondScratchReg);
1356 loadDouble(Address(SecondScratchReg, 0), dest);
1357 bind(&end);
1358 }
1359
loadConstantDouble(double dp,FloatRegister dest)1360 void MacroAssemblerMIPS64Compat::loadConstantDouble(double dp,
1361 FloatRegister dest) {
1362 ma_lid(dest, dp);
1363 }
1364
extractObject(const Address & address,Register scratch)1365 Register MacroAssemblerMIPS64Compat::extractObject(const Address& address,
1366 Register scratch) {
1367 loadPtr(Address(address.base, address.offset), scratch);
1368 ma_dext(scratch, scratch, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
1369 return scratch;
1370 }
1371
extractTag(const Address & address,Register scratch)1372 Register MacroAssemblerMIPS64Compat::extractTag(const Address& address,
1373 Register scratch) {
1374 loadPtr(Address(address.base, address.offset), scratch);
1375 ma_dext(scratch, scratch, Imm32(JSVAL_TAG_SHIFT),
1376 Imm32(64 - JSVAL_TAG_SHIFT));
1377 return scratch;
1378 }
1379
extractTag(const BaseIndex & address,Register scratch)1380 Register MacroAssemblerMIPS64Compat::extractTag(const BaseIndex& address,
1381 Register scratch) {
1382 computeScaledAddress(address, scratch);
1383 return extractTag(Address(scratch, address.offset), scratch);
1384 }
1385
1386 /* There are 3 paths trough backedge jump. They are listed here in the order
1387 * in which instructions are executed.
1388 * - The short jump is simple:
1389 * b offset # Jumps directly to target.
1390 * lui at, addr1_hl # In delay slot. Don't care about 'at' here.
1391 *
1392 * - The long jump to loop header:
1393 * b label1
1394 * lui at, addr1_hl # In delay slot. We use the value in 'at' later.
1395 * label1:
1396 * ori at, addr1_lh
1397 * drotr32 at, at, 48
1398 * ori at, addr1_ll
1399 * jr at
1400 * lui at, addr2_hl # In delay slot. Don't care about 'at' here.
1401 *
1402 * - The long jump to interrupt loop:
1403 * b label2
1404 * ...
1405 * jr at
1406 * label2:
1407 * lui at, addr2_hl # In delay slot. Don't care about 'at' here.
1408 * ori at, addr2_lh
1409 * drotr32 at, at, 48
1410 * ori at, addr2_ll
1411 * jr at
1412 * nop # In delay slot.
1413 *
1414 * The backedge is done this way to avoid patching lui+ori pair while it is
1415 * being executed. Look also at jit::PatchBackedge().
1416 */
backedgeJump(RepatchLabel * label,Label * documentation)1417 CodeOffsetJump MacroAssemblerMIPS64Compat::backedgeJump(RepatchLabel* label,
1418 Label* documentation) {
1419 // Only one branch per label.
1420 MOZ_ASSERT(!label->used());
1421
1422 BufferOffset bo = nextOffset();
1423 label->use(bo.getOffset());
1424
1425 // Backedges are short jumps when bound, but can become long when patched.
1426 m_buffer.ensureSpace(16 * sizeof(uint32_t));
1427 // Jump to "label1" by default to jump to the loop header.
1428 as_b(BOffImm16(2 * sizeof(uint32_t)));
1429 // No need for nop here. We can safely put next instruction in delay slot.
1430 ma_liPatchable(ScratchRegister, ImmWord(LabelBase::INVALID_OFFSET));
1431 MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 5 * sizeof(uint32_t));
1432 as_jr(ScratchRegister);
1433 // No need for nop here. We can safely put next instruction in delay slot.
1434 ma_liPatchable(ScratchRegister, ImmWord(LabelBase::INVALID_OFFSET));
1435 as_jr(ScratchRegister);
1436 as_nop();
1437 MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() ==
1438 12 * sizeof(uint32_t));
1439 return CodeOffsetJump(bo.getOffset());
1440 }
1441
jumpWithPatch(RepatchLabel * label,Label * documentation)1442 CodeOffsetJump MacroAssemblerMIPS64Compat::jumpWithPatch(RepatchLabel* label,
1443 Label* documentation) {
1444 // Only one branch per label.
1445 MOZ_ASSERT(!label->used());
1446
1447 BufferOffset bo = nextOffset();
1448 label->use(bo.getOffset());
1449 ma_liPatchable(ScratchRegister, ImmWord(LabelBase::INVALID_OFFSET));
1450 as_jr(ScratchRegister);
1451 as_nop();
1452 return CodeOffsetJump(bo.getOffset());
1453 }
1454
1455 /////////////////////////////////////////////////////////////////
1456 // X86/X64-common/ARM/MIPS interface.
1457 /////////////////////////////////////////////////////////////////
storeValue(ValueOperand val,Operand dst)1458 void MacroAssemblerMIPS64Compat::storeValue(ValueOperand val, Operand dst) {
1459 storeValue(val, Address(Register::FromCode(dst.base()), dst.disp()));
1460 }
1461
storeValue(ValueOperand val,const BaseIndex & dest)1462 void MacroAssemblerMIPS64Compat::storeValue(ValueOperand val,
1463 const BaseIndex& dest) {
1464 computeScaledAddress(dest, SecondScratchReg);
1465 storeValue(val, Address(SecondScratchReg, dest.offset));
1466 }
1467
storeValue(JSValueType type,Register reg,BaseIndex dest)1468 void MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg,
1469 BaseIndex dest) {
1470 computeScaledAddress(dest, ScratchRegister);
1471
1472 int32_t offset = dest.offset;
1473 if (!Imm16::IsInSignedRange(offset)) {
1474 ma_li(SecondScratchReg, Imm32(offset));
1475 as_daddu(ScratchRegister, ScratchRegister, SecondScratchReg);
1476 offset = 0;
1477 }
1478
1479 storeValue(type, reg, Address(ScratchRegister, offset));
1480 }
1481
storeValue(ValueOperand val,const Address & dest)1482 void MacroAssemblerMIPS64Compat::storeValue(ValueOperand val,
1483 const Address& dest) {
1484 storePtr(val.valueReg(), Address(dest.base, dest.offset));
1485 }
1486
storeValue(JSValueType type,Register reg,Address dest)1487 void MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg,
1488 Address dest) {
1489 MOZ_ASSERT(dest.base != SecondScratchReg);
1490
1491 if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
1492 store32(reg, dest);
1493 JSValueShiftedTag tag = (JSValueShiftedTag)JSVAL_TYPE_TO_SHIFTED_TAG(type);
1494 store32(((Imm64(tag)).secondHalf()), Address(dest.base, dest.offset + 4));
1495 } else {
1496 ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
1497 ma_dsll(SecondScratchReg, SecondScratchReg, Imm32(JSVAL_TAG_SHIFT));
1498 ma_dins(SecondScratchReg, reg, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
1499 storePtr(SecondScratchReg, Address(dest.base, dest.offset));
1500 }
1501 }
1502
storeValue(const Value & val,Address dest)1503 void MacroAssemblerMIPS64Compat::storeValue(const Value& val, Address dest) {
1504 if (val.isGCThing()) {
1505 writeDataRelocation(val);
1506 movWithPatch(ImmWord(val.asRawBits()), SecondScratchReg);
1507 } else {
1508 ma_li(SecondScratchReg, ImmWord(val.asRawBits()));
1509 }
1510 storePtr(SecondScratchReg, Address(dest.base, dest.offset));
1511 }
1512
storeValue(const Value & val,BaseIndex dest)1513 void MacroAssemblerMIPS64Compat::storeValue(const Value& val, BaseIndex dest) {
1514 computeScaledAddress(dest, ScratchRegister);
1515
1516 int32_t offset = dest.offset;
1517 if (!Imm16::IsInSignedRange(offset)) {
1518 ma_li(SecondScratchReg, Imm32(offset));
1519 as_daddu(ScratchRegister, ScratchRegister, SecondScratchReg);
1520 offset = 0;
1521 }
1522 storeValue(val, Address(ScratchRegister, offset));
1523 }
1524
loadValue(const BaseIndex & addr,ValueOperand val)1525 void MacroAssemblerMIPS64Compat::loadValue(const BaseIndex& addr,
1526 ValueOperand val) {
1527 computeScaledAddress(addr, SecondScratchReg);
1528 loadValue(Address(SecondScratchReg, addr.offset), val);
1529 }
1530
loadValue(Address src,ValueOperand val)1531 void MacroAssemblerMIPS64Compat::loadValue(Address src, ValueOperand val) {
1532 loadPtr(Address(src.base, src.offset), val.valueReg());
1533 }
1534
tagValue(JSValueType type,Register payload,ValueOperand dest)1535 void MacroAssemblerMIPS64Compat::tagValue(JSValueType type, Register payload,
1536 ValueOperand dest) {
1537 MOZ_ASSERT(dest.valueReg() != ScratchRegister);
1538 if (payload != dest.valueReg()) ma_move(dest.valueReg(), payload);
1539 ma_li(ScratchRegister, ImmTag(JSVAL_TYPE_TO_TAG(type)));
1540 ma_dins(dest.valueReg(), ScratchRegister, Imm32(JSVAL_TAG_SHIFT),
1541 Imm32(64 - JSVAL_TAG_SHIFT));
1542 }
1543
pushValue(ValueOperand val)1544 void MacroAssemblerMIPS64Compat::pushValue(ValueOperand val) {
1545 // Allocate stack slots for Value. One for each.
1546 asMasm().subPtr(Imm32(sizeof(Value)), StackPointer);
1547 // Store Value
1548 storeValue(val, Address(StackPointer, 0));
1549 }
1550
pushValue(const Address & addr)1551 void MacroAssemblerMIPS64Compat::pushValue(const Address& addr) {
1552 // Load value before allocate stack, addr.base may be is sp.
1553 loadPtr(Address(addr.base, addr.offset), ScratchRegister);
1554 ma_dsubu(StackPointer, StackPointer, Imm32(sizeof(Value)));
1555 storePtr(ScratchRegister, Address(StackPointer, 0));
1556 }
1557
popValue(ValueOperand val)1558 void MacroAssemblerMIPS64Compat::popValue(ValueOperand val) {
1559 as_ld(val.valueReg(), StackPointer, 0);
1560 as_daddiu(StackPointer, StackPointer, sizeof(Value));
1561 }
1562
breakpoint()1563 void MacroAssemblerMIPS64Compat::breakpoint() { as_break(0); }
1564
ensureDouble(const ValueOperand & source,FloatRegister dest,Label * failure)1565 void MacroAssemblerMIPS64Compat::ensureDouble(const ValueOperand& source,
1566 FloatRegister dest,
1567 Label* failure) {
1568 Label isDouble, done;
1569 {
1570 ScratchTagScope tag(asMasm(), source);
1571 splitTagForTest(source, tag);
1572 asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
1573 asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
1574 }
1575
1576 unboxInt32(source, ScratchRegister);
1577 convertInt32ToDouble(ScratchRegister, dest);
1578 jump(&done);
1579
1580 bind(&isDouble);
1581 unboxDouble(source, dest);
1582
1583 bind(&done);
1584 }
1585
checkStackAlignment()1586 void MacroAssemblerMIPS64Compat::checkStackAlignment() {
1587 #ifdef DEBUG
1588 Label aligned;
1589 as_andi(ScratchRegister, sp, ABIStackAlignment - 1);
1590 ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
1591 as_break(BREAK_STACK_UNALIGNED);
1592 bind(&aligned);
1593 #endif
1594 }
1595
handleFailureWithHandlerTail(void * handler,Label * profilerExitTail)1596 void MacroAssemblerMIPS64Compat::handleFailureWithHandlerTail(
1597 void* handler, Label* profilerExitTail) {
1598 // Reserve space for exception information.
1599 int size = (sizeof(ResumeFromException) + ABIStackAlignment) &
1600 ~(ABIStackAlignment - 1);
1601 asMasm().subPtr(Imm32(size), StackPointer);
1602 ma_move(a0, StackPointer); // Use a0 since it is a first function argument
1603
1604 // Call the handler.
1605 asMasm().setupUnalignedABICall(a1);
1606 asMasm().passABIArg(a0);
1607 asMasm().callWithABI(handler, MoveOp::GENERAL,
1608 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
1609
1610 Label entryFrame;
1611 Label catch_;
1612 Label finally;
1613 Label return_;
1614 Label bailout;
1615 Label wasm;
1616
1617 // Already clobbered a0, so use it...
1618 load32(Address(StackPointer, offsetof(ResumeFromException, kind)), a0);
1619 asMasm().branch32(Assembler::Equal, a0,
1620 Imm32(ResumeFromException::RESUME_ENTRY_FRAME),
1621 &entryFrame);
1622 asMasm().branch32(Assembler::Equal, a0,
1623 Imm32(ResumeFromException::RESUME_CATCH), &catch_);
1624 asMasm().branch32(Assembler::Equal, a0,
1625 Imm32(ResumeFromException::RESUME_FINALLY), &finally);
1626 asMasm().branch32(Assembler::Equal, a0,
1627 Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_);
1628 asMasm().branch32(Assembler::Equal, a0,
1629 Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
1630 asMasm().branch32(Assembler::Equal, a0,
1631 Imm32(ResumeFromException::RESUME_WASM), &wasm);
1632
1633 breakpoint(); // Invalid kind.
1634
1635 // No exception handler. Load the error value, load the new stack pointer
1636 // and return from the entry frame.
1637 bind(&entryFrame);
1638 asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
1639 loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
1640 StackPointer);
1641
1642 // We're going to be returning by the ion calling convention
1643 ma_pop(ra);
1644 as_jr(ra);
1645 as_nop();
1646
1647 // If we found a catch handler, this must be a baseline frame. Restore
1648 // state and jump to the catch block.
1649 bind(&catch_);
1650 loadPtr(Address(StackPointer, offsetof(ResumeFromException, target)), a0);
1651 loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
1652 BaselineFrameReg);
1653 loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
1654 StackPointer);
1655 jump(a0);
1656
1657 // If we found a finally block, this must be a baseline frame. Push
1658 // two values expected by JSOP_RETSUB: BooleanValue(true) and the
1659 // exception.
1660 bind(&finally);
1661 ValueOperand exception = ValueOperand(a1);
1662 loadValue(Address(sp, offsetof(ResumeFromException, exception)), exception);
1663
1664 loadPtr(Address(sp, offsetof(ResumeFromException, target)), a0);
1665 loadPtr(Address(sp, offsetof(ResumeFromException, framePointer)),
1666 BaselineFrameReg);
1667 loadPtr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp);
1668
1669 pushValue(BooleanValue(true));
1670 pushValue(exception);
1671 jump(a0);
1672
1673 // Only used in debug mode. Return BaselineFrame->returnValue() to the
1674 // caller.
1675 bind(&return_);
1676 loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
1677 BaselineFrameReg);
1678 loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
1679 StackPointer);
1680 loadValue(
1681 Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
1682 JSReturnOperand);
1683 ma_move(StackPointer, BaselineFrameReg);
1684 pop(BaselineFrameReg);
1685
1686 // If profiling is enabled, then update the lastProfilingFrame to refer to
1687 // caller frame before returning.
1688 {
1689 Label skipProfilingInstrumentation;
1690 // Test if profiler enabled.
1691 AbsoluteAddress addressOfEnabled(
1692 GetJitContext()->runtime->geckoProfiler().addressOfEnabled());
1693 asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
1694 &skipProfilingInstrumentation);
1695 jump(profilerExitTail);
1696 bind(&skipProfilingInstrumentation);
1697 }
1698
1699 ret();
1700
1701 // If we are bailing out to baseline to handle an exception, jump to
1702 // the bailout tail stub.
1703 bind(&bailout);
1704 loadPtr(Address(sp, offsetof(ResumeFromException, bailoutInfo)), a2);
1705 ma_li(ReturnReg, Imm32(BAILOUT_RETURN_OK));
1706 loadPtr(Address(sp, offsetof(ResumeFromException, target)), a1);
1707 jump(a1);
1708
1709 // If we are throwing and the innermost frame was a wasm frame, reset SP and
1710 // FP; SP is pointing to the unwound return address to the wasm entry, so
1711 // we can just ret().
1712 bind(&wasm);
1713 loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
1714 FramePointer);
1715 loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
1716 StackPointer);
1717 ret();
1718 }
1719
toggledJump(Label * label)1720 CodeOffset MacroAssemblerMIPS64Compat::toggledJump(Label* label) {
1721 CodeOffset ret(nextOffset().getOffset());
1722 ma_b(label);
1723 return ret;
1724 }
1725
toggledCall(JitCode * target,bool enabled)1726 CodeOffset MacroAssemblerMIPS64Compat::toggledCall(JitCode* target,
1727 bool enabled) {
1728 BufferOffset bo = nextOffset();
1729 CodeOffset offset(bo.getOffset());
1730 addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
1731 ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
1732 if (enabled) {
1733 as_jalr(ScratchRegister);
1734 as_nop();
1735 } else {
1736 as_nop();
1737 as_nop();
1738 }
1739 MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() ==
1740 ToggledCallSize(nullptr));
1741 return offset;
1742 }
1743
profilerEnterFrame(Register framePtr,Register scratch)1744 void MacroAssemblerMIPS64Compat::profilerEnterFrame(Register framePtr,
1745 Register scratch) {
1746 asMasm().loadJSContext(scratch);
1747 loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
1748 storePtr(framePtr,
1749 Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
1750 storePtr(ImmPtr(nullptr),
1751 Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
1752 }
1753
profilerExitFrame()1754 void MacroAssemblerMIPS64Compat::profilerExitFrame() {
1755 jump(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
1756 }
1757
subFromStackPtr(Imm32 imm32)1758 void MacroAssembler::subFromStackPtr(Imm32 imm32) {
1759 if (imm32.value) asMasm().subPtr(imm32, StackPointer);
1760 }
1761
1762 //{{{ check_macroassembler_style
1763 // ===============================================================
1764 // Stack manipulation functions.
1765
PushRegsInMask(LiveRegisterSet set)1766 void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
1767 int32_t diff =
1768 set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
1769 const int32_t reserved = diff;
1770
1771 reserveStack(reserved);
1772 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
1773 diff -= sizeof(intptr_t);
1774 storePtr(*iter, Address(StackPointer, diff));
1775 }
1776 for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
1777 iter.more(); ++iter) {
1778 diff -= sizeof(double);
1779 storeDouble(*iter, Address(StackPointer, diff));
1780 }
1781 MOZ_ASSERT(diff == 0);
1782 }
1783
PopRegsInMaskIgnore(LiveRegisterSet set,LiveRegisterSet ignore)1784 void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
1785 LiveRegisterSet ignore) {
1786 int32_t diff =
1787 set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
1788 const int32_t reserved = diff;
1789
1790 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
1791 diff -= sizeof(intptr_t);
1792 if (!ignore.has(*iter)) loadPtr(Address(StackPointer, diff), *iter);
1793 }
1794 for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
1795 iter.more(); ++iter) {
1796 diff -= sizeof(double);
1797 if (!ignore.has(*iter)) loadDouble(Address(StackPointer, diff), *iter);
1798 }
1799 MOZ_ASSERT(diff == 0);
1800 freeStack(reserved);
1801 }
1802
storeRegsInMask(LiveRegisterSet set,Address dest,Register)1803 void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
1804 Register) {
1805 FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
1806 unsigned numFpu = fpuSet.size();
1807 int32_t diffF = fpuSet.getPushSizeInBytes();
1808 int32_t diffG = set.gprs().size() * sizeof(intptr_t);
1809
1810 MOZ_ASSERT(dest.offset >= diffG + diffF);
1811
1812 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
1813 diffG -= sizeof(intptr_t);
1814 dest.offset -= sizeof(intptr_t);
1815 storePtr(*iter, dest);
1816 }
1817 MOZ_ASSERT(diffG == 0);
1818
1819 for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
1820 FloatRegister reg = *iter;
1821 diffF -= reg.size();
1822 numFpu -= 1;
1823 dest.offset -= reg.size();
1824 if (reg.isDouble())
1825 storeDouble(reg, dest);
1826 else if (reg.isSingle())
1827 storeFloat32(reg, dest);
1828 else
1829 MOZ_CRASH("Unknown register type.");
1830 }
1831 MOZ_ASSERT(numFpu == 0);
1832 diffF -= diffF % sizeof(uintptr_t);
1833 MOZ_ASSERT(diffF == 0);
1834 }
1835 // ===============================================================
1836 // ABI function calls.
1837
setupUnalignedABICall(Register scratch)1838 void MacroAssembler::setupUnalignedABICall(Register scratch) {
1839 MOZ_ASSERT(!IsCompilingWasm(), "wasm should only use aligned ABI calls");
1840 setupABICall();
1841 dynamicAlignment_ = true;
1842
1843 ma_move(scratch, StackPointer);
1844
1845 // Force sp to be aligned
1846 asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
1847 ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
1848 storePtr(scratch, Address(StackPointer, 0));
1849 }
1850
callWithABIPre(uint32_t * stackAdjust,bool callFromWasm)1851 void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
1852 MOZ_ASSERT(inCall_);
1853 uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
1854
1855 // Reserve place for $ra.
1856 stackForCall += sizeof(intptr_t);
1857
1858 if (dynamicAlignment_) {
1859 stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
1860 } else {
1861 uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
1862 stackForCall += ComputeByteAlignment(
1863 stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
1864 }
1865
1866 *stackAdjust = stackForCall;
1867 reserveStack(stackForCall);
1868
1869 // Save $ra because call is going to clobber it. Restore it in
1870 // callWithABIPost. NOTE: This is needed for calls from SharedIC.
1871 // Maybe we can do this differently.
1872 storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
1873
1874 // Position all arguments.
1875 {
1876 enoughMemory_ = enoughMemory_ && moveResolver_.resolve();
1877 if (!enoughMemory_) return;
1878
1879 MoveEmitter emitter(*this);
1880 emitter.emit(moveResolver_);
1881 emitter.finish();
1882 }
1883
1884 assertStackAlignment(ABIStackAlignment);
1885 }
1886
callWithABIPost(uint32_t stackAdjust,MoveOp::Type result,bool callFromWasm)1887 void MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
1888 bool callFromWasm) {
1889 // Restore ra value (as stored in callWithABIPre()).
1890 loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
1891
1892 if (dynamicAlignment_) {
1893 // Restore sp value from stack (as stored in setupUnalignedABICall()).
1894 loadPtr(Address(StackPointer, stackAdjust), StackPointer);
1895 // Use adjustFrame instead of freeStack because we already restored sp.
1896 adjustFrame(-stackAdjust);
1897 } else {
1898 freeStack(stackAdjust);
1899 }
1900
1901 #ifdef DEBUG
1902 MOZ_ASSERT(inCall_);
1903 inCall_ = false;
1904 #endif
1905 }
1906
callWithABINoProfiler(Register fun,MoveOp::Type result)1907 void MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) {
1908 // Load the callee in t9, no instruction between the lw and call
1909 // should clobber it. Note that we can't use fun.base because it may
1910 // be one of the IntArg registers clobbered before the call.
1911 ma_move(t9, fun);
1912 uint32_t stackAdjust;
1913 callWithABIPre(&stackAdjust);
1914 call(t9);
1915 callWithABIPost(stackAdjust, result);
1916 }
1917
callWithABINoProfiler(const Address & fun,MoveOp::Type result)1918 void MacroAssembler::callWithABINoProfiler(const Address& fun,
1919 MoveOp::Type result) {
1920 // Load the callee in t9, as above.
1921 loadPtr(Address(fun.base, fun.offset), t9);
1922 uint32_t stackAdjust;
1923 callWithABIPre(&stackAdjust);
1924 call(t9);
1925 callWithABIPost(stackAdjust, result);
1926 }
1927
1928 // ===============================================================
1929 // Move
1930
moveValue(const TypedOrValueRegister & src,const ValueOperand & dest)1931 void MacroAssembler::moveValue(const TypedOrValueRegister& src,
1932 const ValueOperand& dest) {
1933 if (src.hasValue()) {
1934 moveValue(src.valueReg(), dest);
1935 return;
1936 }
1937
1938 MIRType type = src.type();
1939 AnyRegister reg = src.typedReg();
1940
1941 if (!IsFloatingPointType(type)) {
1942 boxNonDouble(ValueTypeFromMIRType(type), reg.gpr(), dest);
1943 return;
1944 }
1945
1946 FloatRegister scratch = ScratchDoubleReg;
1947 FloatRegister freg = reg.fpu();
1948 if (type == MIRType::Float32) {
1949 convertFloat32ToDouble(freg, scratch);
1950 freg = scratch;
1951 }
1952 boxDouble(freg, dest, scratch);
1953 }
1954
moveValue(const ValueOperand & src,const ValueOperand & dest)1955 void MacroAssembler::moveValue(const ValueOperand& src,
1956 const ValueOperand& dest) {
1957 if (src == dest) return;
1958 movePtr(src.valueReg(), dest.valueReg());
1959 }
1960
moveValue(const Value & src,const ValueOperand & dest)1961 void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
1962 if (!src.isGCThing()) {
1963 ma_li(dest.valueReg(), ImmWord(src.asRawBits()));
1964 return;
1965 }
1966
1967 writeDataRelocation(src);
1968 movWithPatch(ImmWord(src.asRawBits()), dest.valueReg());
1969 }
1970
1971 // ===============================================================
1972 // Branch functions
1973
branchValueIsNurseryObject(Condition cond,ValueOperand value,Register temp,Label * label)1974 void MacroAssembler::branchValueIsNurseryObject(Condition cond,
1975 ValueOperand value,
1976 Register temp, Label* label) {
1977 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
1978
1979 Label done;
1980 branchTestObject(Assembler::NotEqual, value,
1981 cond == Assembler::Equal ? &done : label);
1982
1983 extractObject(value, SecondScratchReg);
1984 orPtr(Imm32(gc::ChunkMask), SecondScratchReg);
1985 branch32(cond, Address(SecondScratchReg, gc::ChunkLocationOffsetFromLastByte),
1986 Imm32(int32_t(gc::ChunkLocation::Nursery)), label);
1987
1988 bind(&done);
1989 }
1990
branchValueIsNurseryCell(Condition cond,const Address & address,Register temp,Label * label)1991 void MacroAssembler::branchValueIsNurseryCell(Condition cond,
1992 const Address& address,
1993 Register temp, Label* label) {
1994 MOZ_ASSERT(temp != InvalidReg);
1995 loadValue(address, ValueOperand(temp));
1996 branchValueIsNurseryCell(cond, ValueOperand(temp), InvalidReg, label);
1997 }
1998
branchValueIsNurseryCell(Condition cond,ValueOperand value,Register temp,Label * label)1999 void MacroAssembler::branchValueIsNurseryCell(Condition cond,
2000 ValueOperand value, Register temp,
2001 Label* label) {
2002 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
2003
2004 Label done, checkAddress, checkObjectAddress;
2005 SecondScratchRegisterScope scratch2(*this);
2006
2007 splitTag(value, scratch2);
2008 branchTestObject(Assembler::Equal, scratch2, &checkObjectAddress);
2009 branchTestString(Assembler::NotEqual, scratch2,
2010 cond == Assembler::Equal ? &done : label);
2011
2012 unboxString(value, scratch2);
2013 jump(&checkAddress);
2014
2015 bind(&checkObjectAddress);
2016 unboxObject(value, scratch2);
2017
2018 bind(&checkAddress);
2019 orPtr(Imm32(gc::ChunkMask), scratch2);
2020 load32(Address(scratch2, gc::ChunkLocationOffsetFromLastByte), scratch2);
2021 branch32(cond, scratch2, Imm32(int32_t(gc::ChunkLocation::Nursery)), label);
2022
2023 bind(&done);
2024 }
2025
branchTestValue(Condition cond,const ValueOperand & lhs,const Value & rhs,Label * label)2026 void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
2027 const Value& rhs, Label* label) {
2028 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2029 ScratchRegisterScope scratch(*this);
2030 MOZ_ASSERT(lhs.valueReg() != scratch);
2031 moveValue(rhs, ValueOperand(scratch));
2032 ma_b(lhs.valueReg(), scratch, label, cond);
2033 }
2034
2035 // ========================================================================
2036 // Memory access primitives.
2037 template <typename T>
storeUnboxedValue(const ConstantOrRegister & value,MIRType valueType,const T & dest,MIRType slotType)2038 void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
2039 MIRType valueType, const T& dest,
2040 MIRType slotType) {
2041 if (valueType == MIRType::Double) {
2042 storeDouble(value.reg().typedReg().fpu(), dest);
2043 return;
2044 }
2045
2046 // For known integers and booleans, we can just store the unboxed value if
2047 // the slot has the same type.
2048 if ((valueType == MIRType::Int32 || valueType == MIRType::Boolean) &&
2049 slotType == valueType) {
2050 if (value.constant()) {
2051 Value val = value.value();
2052 if (valueType == MIRType::Int32)
2053 store32(Imm32(val.toInt32()), dest);
2054 else
2055 store32(Imm32(val.toBoolean() ? 1 : 0), dest);
2056 } else {
2057 store32(value.reg().typedReg().gpr(), dest);
2058 }
2059 return;
2060 }
2061
2062 if (value.constant())
2063 storeValue(value.value(), dest);
2064 else
2065 storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(),
2066 dest);
2067 }
2068
2069 template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
2070 MIRType valueType,
2071 const Address& dest,
2072 MIRType slotType);
2073 template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
2074 MIRType valueType,
2075 const BaseIndex& dest,
2076 MIRType slotType);
2077
wasmTruncateDoubleToUInt32(FloatRegister input,Register output,bool isSaturating,Label * oolEntry)2078 void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
2079 Register output,
2080 bool isSaturating,
2081 Label* oolEntry) {
2082 as_truncld(ScratchDoubleReg, input);
2083 moveFromDouble(ScratchDoubleReg, output);
2084 ma_dsrl(ScratchRegister, output, Imm32(32));
2085 as_sll(output, output, 0);
2086 ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
2087 }
2088
wasmTruncateFloat32ToUInt32(FloatRegister input,Register output,bool isSaturating,Label * oolEntry)2089 void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
2090 Register output,
2091 bool isSaturating,
2092 Label* oolEntry) {
2093 as_truncls(ScratchDoubleReg, input);
2094 moveFromDouble(ScratchDoubleReg, output);
2095 ma_dsrl(ScratchRegister, output, Imm32(32));
2096 as_sll(output, output, 0);
2097 ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
2098 }
2099
wasmLoadI64(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,Register64 output)2100 void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
2101 Register memoryBase, Register ptr,
2102 Register ptrScratch, Register64 output) {
2103 wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
2104 }
2105
wasmUnalignedLoadI64(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,Register64 output,Register tmp)2106 void MacroAssembler::wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access,
2107 Register memoryBase, Register ptr,
2108 Register ptrScratch,
2109 Register64 output, Register tmp) {
2110 wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, tmp);
2111 }
2112
wasmStoreI64(const wasm::MemoryAccessDesc & access,Register64 value,Register memoryBase,Register ptr,Register ptrScratch)2113 void MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access,
2114 Register64 value, Register memoryBase,
2115 Register ptr, Register ptrScratch) {
2116 wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
2117 }
2118
wasmUnalignedStoreI64(const wasm::MemoryAccessDesc & access,Register64 value,Register memoryBase,Register ptr,Register ptrScratch,Register tmp)2119 void MacroAssembler::wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access,
2120 Register64 value,
2121 Register memoryBase, Register ptr,
2122 Register ptrScratch, Register tmp) {
2123 wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, tmp);
2124 }
2125
wasmTruncateDoubleToInt64(FloatRegister input,Register64 output,bool isSaturating,Label * oolEntry,Label * oolRejoin,FloatRegister tempDouble)2126 void MacroAssembler::wasmTruncateDoubleToInt64(
2127 FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
2128 Label* oolRejoin, FloatRegister tempDouble) {
2129 MOZ_ASSERT(tempDouble.isInvalid());
2130
2131 as_truncld(ScratchDoubleReg, input);
2132 as_cfc1(ScratchRegister, Assembler::FCSR);
2133 moveFromDouble(ScratchDoubleReg, output.reg);
2134 ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
2135 ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
2136
2137 if (isSaturating) bind(oolRejoin);
2138 }
2139
wasmTruncateDoubleToUInt64(FloatRegister input,Register64 output_,bool isSaturating,Label * oolEntry,Label * oolRejoin,FloatRegister tempDouble)2140 void MacroAssembler::wasmTruncateDoubleToUInt64(
2141 FloatRegister input, Register64 output_, bool isSaturating, Label* oolEntry,
2142 Label* oolRejoin, FloatRegister tempDouble) {
2143 MOZ_ASSERT(tempDouble.isInvalid());
2144 Register output = output_.reg;
2145
2146 Label done;
2147
2148 as_truncld(ScratchDoubleReg, input);
2149 // ma_li INT64_MAX
2150 ma_li(SecondScratchReg, Imm32(-1));
2151 ma_dext(SecondScratchReg, SecondScratchReg, Imm32(0), Imm32(63));
2152 moveFromDouble(ScratchDoubleReg, output);
2153 // For numbers in -1.[ : ]INT64_MAX range do nothing more
2154 ma_b(output, SecondScratchReg, &done, Assembler::Below, ShortJump);
2155
2156 loadConstantDouble(double(INT64_MAX + 1ULL), ScratchDoubleReg);
2157 // ma_li INT64_MIN
2158 ma_daddu(SecondScratchReg, Imm32(1));
2159 as_subd(ScratchDoubleReg, input, ScratchDoubleReg);
2160 as_truncld(ScratchDoubleReg, ScratchDoubleReg);
2161 as_cfc1(ScratchRegister, Assembler::FCSR);
2162 moveFromDouble(ScratchDoubleReg, output);
2163 ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
2164 ma_daddu(output, SecondScratchReg);
2165
2166 // Guard against negative values that result in 0 due the precision loss.
2167 as_sltiu(SecondScratchReg, output, 1);
2168 ma_or(ScratchRegister, SecondScratchReg);
2169
2170 ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
2171
2172 bind(&done);
2173
2174 if (isSaturating) bind(oolRejoin);
2175 }
2176
wasmTruncateFloat32ToInt64(FloatRegister input,Register64 output,bool isSaturating,Label * oolEntry,Label * oolRejoin,FloatRegister tempFloat)2177 void MacroAssembler::wasmTruncateFloat32ToInt64(
2178 FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
2179 Label* oolRejoin, FloatRegister tempFloat) {
2180 MOZ_ASSERT(tempFloat.isInvalid());
2181
2182 as_truncls(ScratchDoubleReg, input);
2183 as_cfc1(ScratchRegister, Assembler::FCSR);
2184 moveFromDouble(ScratchDoubleReg, output.reg);
2185 ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
2186 ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
2187
2188 if (isSaturating) bind(oolRejoin);
2189 }
2190
wasmTruncateFloat32ToUInt64(FloatRegister input,Register64 output_,bool isSaturating,Label * oolEntry,Label * oolRejoin,FloatRegister tempFloat)2191 void MacroAssembler::wasmTruncateFloat32ToUInt64(
2192 FloatRegister input, Register64 output_, bool isSaturating, Label* oolEntry,
2193 Label* oolRejoin, FloatRegister tempFloat) {
2194 MOZ_ASSERT(tempFloat.isInvalid());
2195 Register output = output_.reg;
2196
2197 Label done;
2198
2199 as_truncls(ScratchDoubleReg, input);
2200 // ma_li INT64_MAX
2201 ma_li(SecondScratchReg, Imm32(-1));
2202 ma_dext(SecondScratchReg, SecondScratchReg, Imm32(0), Imm32(63));
2203 moveFromDouble(ScratchDoubleReg, output);
2204 // For numbers in -1.[ : ]INT64_MAX range do nothing more
2205 ma_b(output, SecondScratchReg, &done, Assembler::Below, ShortJump);
2206
2207 loadConstantFloat32(float(INT64_MAX + 1ULL), ScratchFloat32Reg);
2208 // ma_li INT64_MIN
2209 ma_daddu(SecondScratchReg, Imm32(1));
2210 as_subs(ScratchFloat32Reg, input, ScratchFloat32Reg);
2211 as_truncls(ScratchDoubleReg, ScratchFloat32Reg);
2212 as_cfc1(ScratchRegister, Assembler::FCSR);
2213 moveFromDouble(ScratchDoubleReg, output);
2214 ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
2215 ma_daddu(output, SecondScratchReg);
2216
2217 // Guard against negative values that result in 0 due the precision loss.
2218 as_sltiu(SecondScratchReg, output, 1);
2219 ma_or(ScratchRegister, SecondScratchReg);
2220
2221 ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
2222
2223 bind(&done);
2224
2225 if (isSaturating) bind(oolRejoin);
2226 }
2227
wasmLoadI64Impl(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,Register64 output,Register tmp)2228 void MacroAssemblerMIPS64Compat::wasmLoadI64Impl(
2229 const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
2230 Register ptrScratch, Register64 output, Register tmp) {
2231 uint32_t offset = access.offset();
2232 MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
2233 MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
2234
2235 // Maybe add the offset.
2236 if (offset) {
2237 asMasm().addPtr(Imm32(offset), ptrScratch);
2238 ptr = ptrScratch;
2239 }
2240
2241 unsigned byteSize = access.byteSize();
2242 bool isSigned;
2243
2244 switch (access.type()) {
2245 case Scalar::Int8:
2246 isSigned = true;
2247 break;
2248 case Scalar::Uint8:
2249 isSigned = false;
2250 break;
2251 case Scalar::Int16:
2252 isSigned = true;
2253 break;
2254 case Scalar::Uint16:
2255 isSigned = false;
2256 break;
2257 case Scalar::Int32:
2258 isSigned = true;
2259 break;
2260 case Scalar::Uint32:
2261 isSigned = false;
2262 break;
2263 case Scalar::Int64:
2264 isSigned = true;
2265 break;
2266 default:
2267 MOZ_CRASH("unexpected array type");
2268 }
2269
2270 BaseIndex address(memoryBase, ptr, TimesOne);
2271 if (IsUnaligned(access)) {
2272 MOZ_ASSERT(tmp != InvalidReg);
2273 asMasm().ma_load_unaligned(access, output.reg, address, tmp,
2274 static_cast<LoadStoreSize>(8 * byteSize),
2275 isSigned ? SignExtend : ZeroExtend);
2276 return;
2277 }
2278
2279 asMasm().memoryBarrierBefore(access.sync());
2280 asMasm().ma_load(output.reg, address,
2281 static_cast<LoadStoreSize>(8 * byteSize),
2282 isSigned ? SignExtend : ZeroExtend);
2283 asMasm().append(access, asMasm().size() - 4, asMasm().framePushed());
2284 asMasm().memoryBarrierAfter(access.sync());
2285 }
2286
wasmStoreI64Impl(const wasm::MemoryAccessDesc & access,Register64 value,Register memoryBase,Register ptr,Register ptrScratch,Register tmp)2287 void MacroAssemblerMIPS64Compat::wasmStoreI64Impl(
2288 const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
2289 Register ptr, Register ptrScratch, Register tmp) {
2290 uint32_t offset = access.offset();
2291 MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
2292 MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
2293
2294 // Maybe add the offset.
2295 if (offset) {
2296 asMasm().addPtr(Imm32(offset), ptrScratch);
2297 ptr = ptrScratch;
2298 }
2299
2300 unsigned byteSize = access.byteSize();
2301 bool isSigned;
2302 switch (access.type()) {
2303 case Scalar::Int8:
2304 isSigned = true;
2305 break;
2306 case Scalar::Uint8:
2307 isSigned = false;
2308 break;
2309 case Scalar::Int16:
2310 isSigned = true;
2311 break;
2312 case Scalar::Uint16:
2313 isSigned = false;
2314 break;
2315 case Scalar::Int32:
2316 isSigned = true;
2317 break;
2318 case Scalar::Uint32:
2319 isSigned = false;
2320 break;
2321 case Scalar::Int64:
2322 isSigned = true;
2323 break;
2324 default:
2325 MOZ_CRASH("unexpected array type");
2326 }
2327
2328 BaseIndex address(memoryBase, ptr, TimesOne);
2329
2330 if (IsUnaligned(access)) {
2331 MOZ_ASSERT(tmp != InvalidReg);
2332 asMasm().ma_store_unaligned(access, value.reg, address, tmp,
2333 static_cast<LoadStoreSize>(8 * byteSize),
2334 isSigned ? SignExtend : ZeroExtend);
2335 return;
2336 }
2337
2338 asMasm().memoryBarrierBefore(access.sync());
2339 asMasm().ma_store(value.reg, address,
2340 static_cast<LoadStoreSize>(8 * byteSize),
2341 isSigned ? SignExtend : ZeroExtend);
2342 asMasm().append(access, asMasm().size() - 4, asMasm().framePushed());
2343 asMasm().memoryBarrierAfter(access.sync());
2344 }
2345
2346 template <typename T>
CompareExchange64(MacroAssembler & masm,const Synchronization & sync,const T & mem,Register64 expect,Register64 replace,Register64 output)2347 static void CompareExchange64(MacroAssembler& masm, const Synchronization& sync,
2348 const T& mem, Register64 expect,
2349 Register64 replace, Register64 output) {
2350 masm.computeEffectiveAddress(mem, SecondScratchReg);
2351
2352 Label tryAgain;
2353 Label exit;
2354
2355 masm.memoryBarrierBefore(sync);
2356
2357 masm.bind(&tryAgain);
2358
2359 masm.as_lld(output.reg, SecondScratchReg, 0);
2360 masm.ma_b(output.reg, expect.reg, &exit, Assembler::NotEqual, ShortJump);
2361 masm.movePtr(replace.reg, ScratchRegister);
2362 masm.as_scd(ScratchRegister, SecondScratchReg, 0);
2363 masm.ma_b(ScratchRegister, ScratchRegister, &tryAgain, Assembler::Zero,
2364 ShortJump);
2365
2366 masm.memoryBarrierAfter(sync);
2367
2368 masm.bind(&exit);
2369 }
2370
compareExchange64(const Synchronization & sync,const Address & mem,Register64 expect,Register64 replace,Register64 output)2371 void MacroAssembler::compareExchange64(const Synchronization& sync,
2372 const Address& mem, Register64 expect,
2373 Register64 replace, Register64 output) {
2374 CompareExchange64(*this, sync, mem, expect, replace, output);
2375 }
2376
compareExchange64(const Synchronization & sync,const BaseIndex & mem,Register64 expect,Register64 replace,Register64 output)2377 void MacroAssembler::compareExchange64(const Synchronization& sync,
2378 const BaseIndex& mem, Register64 expect,
2379 Register64 replace, Register64 output) {
2380 CompareExchange64(*this, sync, mem, expect, replace, output);
2381 }
2382
2383 template <typename T>
AtomicExchange64(MacroAssembler & masm,const Synchronization & sync,const T & mem,Register64 src,Register64 output)2384 static void AtomicExchange64(MacroAssembler& masm, const Synchronization& sync,
2385 const T& mem, Register64 src, Register64 output) {
2386 masm.computeEffectiveAddress(mem, SecondScratchReg);
2387
2388 Label tryAgain;
2389
2390 masm.memoryBarrierBefore(sync);
2391
2392 masm.bind(&tryAgain);
2393
2394 masm.as_lld(output.reg, SecondScratchReg, 0);
2395 masm.movePtr(src.reg, ScratchRegister);
2396 masm.as_scd(ScratchRegister, SecondScratchReg, 0);
2397 masm.ma_b(ScratchRegister, ScratchRegister, &tryAgain, Assembler::Zero,
2398 ShortJump);
2399
2400 masm.memoryBarrierAfter(sync);
2401 }
2402
atomicExchange64(const Synchronization & sync,const Address & mem,Register64 src,Register64 output)2403 void MacroAssembler::atomicExchange64(const Synchronization& sync,
2404 const Address& mem, Register64 src,
2405 Register64 output) {
2406 AtomicExchange64(*this, sync, mem, src, output);
2407 }
2408
atomicExchange64(const Synchronization & sync,const BaseIndex & mem,Register64 src,Register64 output)2409 void MacroAssembler::atomicExchange64(const Synchronization& sync,
2410 const BaseIndex& mem, Register64 src,
2411 Register64 output) {
2412 AtomicExchange64(*this, sync, mem, src, output);
2413 }
2414
2415 template <typename T>
AtomicFetchOp64(MacroAssembler & masm,const Synchronization & sync,AtomicOp op,Register64 value,const T & mem,Register64 temp,Register64 output)2416 static void AtomicFetchOp64(MacroAssembler& masm, const Synchronization& sync,
2417 AtomicOp op, Register64 value, const T& mem,
2418 Register64 temp, Register64 output) {
2419 masm.computeEffectiveAddress(mem, SecondScratchReg);
2420
2421 Label tryAgain;
2422
2423 masm.memoryBarrierBefore(sync);
2424
2425 masm.bind(&tryAgain);
2426
2427 masm.as_lld(output.reg, SecondScratchReg, 0);
2428
2429 switch (op) {
2430 case AtomicFetchAddOp:
2431 masm.as_daddu(temp.reg, output.reg, value.reg);
2432 break;
2433 case AtomicFetchSubOp:
2434 masm.as_dsubu(temp.reg, output.reg, value.reg);
2435 break;
2436 case AtomicFetchAndOp:
2437 masm.as_and(temp.reg, output.reg, value.reg);
2438 break;
2439 case AtomicFetchOrOp:
2440 masm.as_or(temp.reg, output.reg, value.reg);
2441 break;
2442 case AtomicFetchXorOp:
2443 masm.as_xor(temp.reg, output.reg, value.reg);
2444 break;
2445 default:
2446 MOZ_CRASH();
2447 }
2448
2449 masm.as_scd(temp.reg, SecondScratchReg, 0);
2450 masm.ma_b(temp.reg, temp.reg, &tryAgain, Assembler::Zero, ShortJump);
2451
2452 masm.memoryBarrierAfter(sync);
2453 }
2454
atomicFetchOp64(const Synchronization & sync,AtomicOp op,Register64 value,const Address & mem,Register64 temp,Register64 output)2455 void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
2456 Register64 value, const Address& mem,
2457 Register64 temp, Register64 output) {
2458 AtomicFetchOp64(*this, sync, op, value, mem, temp, output);
2459 }
2460
atomicFetchOp64(const Synchronization & sync,AtomicOp op,Register64 value,const BaseIndex & mem,Register64 temp,Register64 output)2461 void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
2462 Register64 value, const BaseIndex& mem,
2463 Register64 temp, Register64 output) {
2464 AtomicFetchOp64(*this, sync, op, value, mem, temp, output);
2465 }
2466
2467 // ========================================================================
2468 // Convert floating point.
2469
convertInt64ToDouble(Register64 src,FloatRegister dest)2470 void MacroAssembler::convertInt64ToDouble(Register64 src, FloatRegister dest) {
2471 as_dmtc1(src.reg, dest);
2472 as_cvtdl(dest, dest);
2473 }
2474
convertInt64ToFloat32(Register64 src,FloatRegister dest)2475 void MacroAssembler::convertInt64ToFloat32(Register64 src, FloatRegister dest) {
2476 as_dmtc1(src.reg, dest);
2477 as_cvtsl(dest, dest);
2478 }
2479
convertUInt64ToDoubleNeedsTemp()2480 bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return false; }
2481
convertUInt64ToDouble(Register64 src,FloatRegister dest,Register temp)2482 void MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest,
2483 Register temp) {
2484 MOZ_ASSERT(temp == Register::Invalid());
2485 MacroAssemblerSpecific::convertUInt64ToDouble(src.reg, dest);
2486 }
2487
convertUInt64ToFloat32(Register64 src_,FloatRegister dest,Register temp)2488 void MacroAssembler::convertUInt64ToFloat32(Register64 src_, FloatRegister dest,
2489 Register temp) {
2490 MOZ_ASSERT(temp == Register::Invalid());
2491
2492 Register src = src_.reg;
2493 Label positive, done;
2494 ma_b(src, src, &positive, NotSigned, ShortJump);
2495
2496 MOZ_ASSERT(src != ScratchRegister);
2497 MOZ_ASSERT(src != SecondScratchReg);
2498
2499 ma_and(ScratchRegister, src, Imm32(1));
2500 ma_dsrl(SecondScratchReg, src, Imm32(1));
2501 ma_or(ScratchRegister, SecondScratchReg);
2502 as_dmtc1(ScratchRegister, dest);
2503 as_cvtsl(dest, dest);
2504 addFloat32(dest, dest);
2505 ma_b(&done, ShortJump);
2506
2507 bind(&positive);
2508 as_dmtc1(src, dest);
2509 as_cvtsl(dest, dest);
2510
2511 bind(&done);
2512 }
2513
2514 //}}} check_macroassembler_style
2515