1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/mips-shared/MacroAssembler-mips-shared.h"
8
9 #include "mozilla/EndianUtils.h"
10
11 #include "jsmath.h"
12
13 #include "jit/MacroAssembler.h"
14
15 using namespace js;
16 using namespace jit;
17
ma_move(Register rd,Register rs)18 void MacroAssemblerMIPSShared::ma_move(Register rd, Register rs) {
19 as_or(rd, rs, zero);
20 }
21
ma_li(Register dest,ImmGCPtr ptr)22 void MacroAssemblerMIPSShared::ma_li(Register dest, ImmGCPtr ptr) {
23 writeDataRelocation(ptr);
24 asMasm().ma_liPatchable(dest, ImmPtr(ptr.value));
25 }
26
ma_li(Register dest,Imm32 imm)27 void MacroAssemblerMIPSShared::ma_li(Register dest, Imm32 imm) {
28 if (Imm16::IsInSignedRange(imm.value)) {
29 as_addiu(dest, zero, imm.value);
30 } else if (Imm16::IsInUnsignedRange(imm.value)) {
31 as_ori(dest, zero, Imm16::Lower(imm).encode());
32 } else if (Imm16::Lower(imm).encode() == 0) {
33 as_lui(dest, Imm16::Upper(imm).encode());
34 } else {
35 as_lui(dest, Imm16::Upper(imm).encode());
36 as_ori(dest, dest, Imm16::Lower(imm).encode());
37 }
38 }
39
40 // This method generates lui and ori instruction pair that can be modified by
41 // UpdateLuiOriValue, either during compilation (eg. Assembler::bind), or
42 // during execution (eg. jit::PatchJump).
ma_liPatchable(Register dest,Imm32 imm)43 void MacroAssemblerMIPSShared::ma_liPatchable(Register dest, Imm32 imm) {
44 m_buffer.ensureSpace(2 * sizeof(uint32_t));
45 as_lui(dest, Imm16::Upper(imm).encode());
46 as_ori(dest, dest, Imm16::Lower(imm).encode());
47 }
48
49 // Shifts
ma_sll(Register rd,Register rt,Imm32 shift)50 void MacroAssemblerMIPSShared::ma_sll(Register rd, Register rt, Imm32 shift) {
51 as_sll(rd, rt, shift.value % 32);
52 }
ma_srl(Register rd,Register rt,Imm32 shift)53 void MacroAssemblerMIPSShared::ma_srl(Register rd, Register rt, Imm32 shift) {
54 as_srl(rd, rt, shift.value % 32);
55 }
56
ma_sra(Register rd,Register rt,Imm32 shift)57 void MacroAssemblerMIPSShared::ma_sra(Register rd, Register rt, Imm32 shift) {
58 as_sra(rd, rt, shift.value % 32);
59 }
60
ma_ror(Register rd,Register rt,Imm32 shift)61 void MacroAssemblerMIPSShared::ma_ror(Register rd, Register rt, Imm32 shift) {
62 if (hasR2()) {
63 as_rotr(rd, rt, shift.value % 32);
64 } else {
65 ScratchRegisterScope scratch(asMasm());
66 as_srl(scratch, rt, shift.value % 32);
67 as_sll(rd, rt, (32 - (shift.value % 32)) % 32);
68 as_or(rd, rd, scratch);
69 }
70 }
71
ma_rol(Register rd,Register rt,Imm32 shift)72 void MacroAssemblerMIPSShared::ma_rol(Register rd, Register rt, Imm32 shift) {
73 if (hasR2()) {
74 as_rotr(rd, rt, (32 - (shift.value % 32)) % 32);
75 } else {
76 ScratchRegisterScope scratch(asMasm());
77 as_srl(scratch, rt, (32 - (shift.value % 32)) % 32);
78 as_sll(rd, rt, shift.value % 32);
79 as_or(rd, rd, scratch);
80 }
81 }
82
ma_sll(Register rd,Register rt,Register shift)83 void MacroAssemblerMIPSShared::ma_sll(Register rd, Register rt,
84 Register shift) {
85 as_sllv(rd, rt, shift);
86 }
87
ma_srl(Register rd,Register rt,Register shift)88 void MacroAssemblerMIPSShared::ma_srl(Register rd, Register rt,
89 Register shift) {
90 as_srlv(rd, rt, shift);
91 }
92
ma_sra(Register rd,Register rt,Register shift)93 void MacroAssemblerMIPSShared::ma_sra(Register rd, Register rt,
94 Register shift) {
95 as_srav(rd, rt, shift);
96 }
97
ma_ror(Register rd,Register rt,Register shift)98 void MacroAssemblerMIPSShared::ma_ror(Register rd, Register rt,
99 Register shift) {
100 if (hasR2()) {
101 as_rotrv(rd, rt, shift);
102 } else {
103 ScratchRegisterScope scratch(asMasm());
104 ma_negu(scratch, shift);
105 as_sllv(scratch, rt, scratch);
106 as_srlv(rd, rt, shift);
107 as_or(rd, rd, scratch);
108 }
109 }
110
ma_rol(Register rd,Register rt,Register shift)111 void MacroAssemblerMIPSShared::ma_rol(Register rd, Register rt,
112 Register shift) {
113 ScratchRegisterScope scratch(asMasm());
114 ma_negu(scratch, shift);
115 if (hasR2()) {
116 as_rotrv(rd, rt, scratch);
117 } else {
118 as_srlv(rd, rt, scratch);
119 as_sllv(scratch, rt, shift);
120 as_or(rd, rd, scratch);
121 }
122 }
123
ma_negu(Register rd,Register rs)124 void MacroAssemblerMIPSShared::ma_negu(Register rd, Register rs) {
125 as_subu(rd, zero, rs);
126 }
127
ma_not(Register rd,Register rs)128 void MacroAssemblerMIPSShared::ma_not(Register rd, Register rs) {
129 as_nor(rd, rs, zero);
130 }
131
132 // Bit extract/insert
ma_ext(Register rt,Register rs,uint16_t pos,uint16_t size)133 void MacroAssemblerMIPSShared::ma_ext(Register rt, Register rs, uint16_t pos,
134 uint16_t size) {
135 MOZ_ASSERT(pos < 32);
136 MOZ_ASSERT(pos + size < 33);
137
138 if (hasR2()) {
139 as_ext(rt, rs, pos, size);
140 } else {
141 int shift_left = 32 - (pos + size);
142 as_sll(rt, rs, shift_left);
143 int shift_right = 32 - size;
144 if (shift_right > 0) {
145 as_srl(rt, rt, shift_right);
146 }
147 }
148 }
149
ma_ins(Register rt,Register rs,uint16_t pos,uint16_t size)150 void MacroAssemblerMIPSShared::ma_ins(Register rt, Register rs, uint16_t pos,
151 uint16_t size) {
152 MOZ_ASSERT(pos < 32);
153 MOZ_ASSERT(pos + size <= 32);
154 MOZ_ASSERT(size != 0);
155
156 if (hasR2()) {
157 as_ins(rt, rs, pos, size);
158 } else {
159 ScratchRegisterScope scratch(asMasm());
160 SecondScratchRegisterScope scratch2(asMasm());
161 ma_subu(scratch, zero, Imm32(1));
162 as_srl(scratch, scratch, 32 - size);
163 as_and(scratch2, rs, scratch);
164 as_sll(scratch2, scratch2, pos);
165 as_sll(scratch, scratch, pos);
166 as_nor(scratch, scratch, zero);
167 as_and(scratch, rt, scratch);
168 as_or(rt, scratch2, scratch);
169 }
170 }
171
172 // Sign extend
ma_seb(Register rd,Register rt)173 void MacroAssemblerMIPSShared::ma_seb(Register rd, Register rt) {
174 if (hasR2()) {
175 as_seb(rd, rt);
176 } else {
177 as_sll(rd, rt, 24);
178 as_sra(rd, rd, 24);
179 }
180 }
181
ma_seh(Register rd,Register rt)182 void MacroAssemblerMIPSShared::ma_seh(Register rd, Register rt) {
183 if (hasR2()) {
184 as_seh(rd, rt);
185 } else {
186 as_sll(rd, rt, 16);
187 as_sra(rd, rd, 16);
188 }
189 }
190
191 // And.
ma_and(Register rd,Register rs)192 void MacroAssemblerMIPSShared::ma_and(Register rd, Register rs) {
193 as_and(rd, rd, rs);
194 }
195
ma_and(Register rd,Imm32 imm)196 void MacroAssemblerMIPSShared::ma_and(Register rd, Imm32 imm) {
197 ma_and(rd, rd, imm);
198 }
199
ma_and(Register rd,Register rs,Imm32 imm)200 void MacroAssemblerMIPSShared::ma_and(Register rd, Register rs, Imm32 imm) {
201 if (Imm16::IsInUnsignedRange(imm.value)) {
202 as_andi(rd, rs, imm.value);
203 } else {
204 ma_li(ScratchRegister, imm);
205 as_and(rd, rs, ScratchRegister);
206 }
207 }
208
209 // Or.
ma_or(Register rd,Register rs)210 void MacroAssemblerMIPSShared::ma_or(Register rd, Register rs) {
211 as_or(rd, rd, rs);
212 }
213
ma_or(Register rd,Imm32 imm)214 void MacroAssemblerMIPSShared::ma_or(Register rd, Imm32 imm) {
215 ma_or(rd, rd, imm);
216 }
217
ma_or(Register rd,Register rs,Imm32 imm)218 void MacroAssemblerMIPSShared::ma_or(Register rd, Register rs, Imm32 imm) {
219 if (Imm16::IsInUnsignedRange(imm.value)) {
220 as_ori(rd, rs, imm.value);
221 } else {
222 ma_li(ScratchRegister, imm);
223 as_or(rd, rs, ScratchRegister);
224 }
225 }
226
227 // xor
ma_xor(Register rd,Register rs)228 void MacroAssemblerMIPSShared::ma_xor(Register rd, Register rs) {
229 as_xor(rd, rd, rs);
230 }
231
ma_xor(Register rd,Imm32 imm)232 void MacroAssemblerMIPSShared::ma_xor(Register rd, Imm32 imm) {
233 ma_xor(rd, rd, imm);
234 }
235
ma_xor(Register rd,Register rs,Imm32 imm)236 void MacroAssemblerMIPSShared::ma_xor(Register rd, Register rs, Imm32 imm) {
237 if (Imm16::IsInUnsignedRange(imm.value)) {
238 as_xori(rd, rs, imm.value);
239 } else {
240 ma_li(ScratchRegister, imm);
241 as_xor(rd, rs, ScratchRegister);
242 }
243 }
244
245 // word swap bytes within halfwords
ma_wsbh(Register rd,Register rt)246 void MacroAssemblerMIPSShared::ma_wsbh(Register rd, Register rt) {
247 as_wsbh(rd, rt);
248 }
249
ma_ctz(Register rd,Register rs)250 void MacroAssemblerMIPSShared::ma_ctz(Register rd, Register rs) {
251 as_addiu(ScratchRegister, rs, -1);
252 as_xor(rd, ScratchRegister, rs);
253 as_and(rd, rd, ScratchRegister);
254 as_clz(rd, rd);
255 ma_li(ScratchRegister, Imm32(0x20));
256 as_subu(rd, ScratchRegister, rd);
257 }
258
259 // Arithmetic-based ops.
260
261 // Add.
ma_addu(Register rd,Register rs,Imm32 imm)262 void MacroAssemblerMIPSShared::ma_addu(Register rd, Register rs, Imm32 imm) {
263 if (Imm16::IsInSignedRange(imm.value)) {
264 as_addiu(rd, rs, imm.value);
265 } else {
266 ma_li(ScratchRegister, imm);
267 as_addu(rd, rs, ScratchRegister);
268 }
269 }
270
ma_addu(Register rd,Register rs)271 void MacroAssemblerMIPSShared::ma_addu(Register rd, Register rs) {
272 as_addu(rd, rd, rs);
273 }
274
ma_addu(Register rd,Imm32 imm)275 void MacroAssemblerMIPSShared::ma_addu(Register rd, Imm32 imm) {
276 ma_addu(rd, rd, imm);
277 }
278
ma_add32TestCarry(Condition cond,Register rd,Register rs,Register rt,Label * overflow)279 void MacroAssemblerMIPSShared::ma_add32TestCarry(Condition cond, Register rd,
280 Register rs, Register rt,
281 Label* overflow) {
282 MOZ_ASSERT(cond == Assembler::CarrySet || cond == Assembler::CarryClear);
283 MOZ_ASSERT_IF(rd == rs, rt != rd);
284 as_addu(rd, rs, rt);
285 as_sltu(SecondScratchReg, rd, rd == rs ? rt : rs);
286 ma_b(SecondScratchReg, SecondScratchReg, overflow,
287 cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
288 }
289
ma_add32TestCarry(Condition cond,Register rd,Register rs,Imm32 imm,Label * overflow)290 void MacroAssemblerMIPSShared::ma_add32TestCarry(Condition cond, Register rd,
291 Register rs, Imm32 imm,
292 Label* overflow) {
293 ma_li(ScratchRegister, imm);
294 ma_add32TestCarry(cond, rd, rs, ScratchRegister, overflow);
295 }
296
297 // Subtract.
ma_subu(Register rd,Register rs,Imm32 imm)298 void MacroAssemblerMIPSShared::ma_subu(Register rd, Register rs, Imm32 imm) {
299 if (Imm16::IsInSignedRange(-imm.value)) {
300 as_addiu(rd, rs, -imm.value);
301 } else {
302 ma_li(ScratchRegister, imm);
303 as_subu(rd, rs, ScratchRegister);
304 }
305 }
306
ma_subu(Register rd,Imm32 imm)307 void MacroAssemblerMIPSShared::ma_subu(Register rd, Imm32 imm) {
308 ma_subu(rd, rd, imm);
309 }
310
ma_subu(Register rd,Register rs)311 void MacroAssemblerMIPSShared::ma_subu(Register rd, Register rs) {
312 as_subu(rd, rd, rs);
313 }
314
ma_sub32TestOverflow(Register rd,Register rs,Imm32 imm,Label * overflow)315 void MacroAssemblerMIPSShared::ma_sub32TestOverflow(Register rd, Register rs,
316 Imm32 imm,
317 Label* overflow) {
318 if (imm.value != INT32_MIN) {
319 asMasm().ma_add32TestOverflow(rd, rs, Imm32(-imm.value), overflow);
320 } else {
321 ma_li(ScratchRegister, Imm32(imm.value));
322 asMasm().ma_sub32TestOverflow(rd, rs, ScratchRegister, overflow);
323 }
324 }
325
ma_mul(Register rd,Register rs,Imm32 imm)326 void MacroAssemblerMIPSShared::ma_mul(Register rd, Register rs, Imm32 imm) {
327 ma_li(ScratchRegister, imm);
328 as_mul(rd, rs, ScratchRegister);
329 }
330
ma_mul32TestOverflow(Register rd,Register rs,Register rt,Label * overflow)331 void MacroAssemblerMIPSShared::ma_mul32TestOverflow(Register rd, Register rs,
332 Register rt,
333 Label* overflow) {
334 #ifdef MIPSR6
335 if (rd == rs) {
336 ma_move(SecondScratchReg, rs);
337 rs = SecondScratchReg;
338 }
339 as_mul(rd, rs, rt);
340 as_muh(SecondScratchReg, rs, rt);
341 #else
342 as_mult(rs, rt);
343 as_mflo(rd);
344 as_mfhi(SecondScratchReg);
345 #endif
346 as_sra(ScratchRegister, rd, 31);
347 ma_b(ScratchRegister, SecondScratchReg, overflow, Assembler::NotEqual);
348 }
349
ma_mul32TestOverflow(Register rd,Register rs,Imm32 imm,Label * overflow)350 void MacroAssemblerMIPSShared::ma_mul32TestOverflow(Register rd, Register rs,
351 Imm32 imm,
352 Label* overflow) {
353 ma_li(ScratchRegister, imm);
354 ma_mul32TestOverflow(rd, rs, ScratchRegister, overflow);
355 }
356
ma_div_branch_overflow(Register rd,Register rs,Register rt,Label * overflow)357 void MacroAssemblerMIPSShared::ma_div_branch_overflow(Register rd, Register rs,
358 Register rt,
359 Label* overflow) {
360 #ifdef MIPSR6
361 if (rd == rs) {
362 ma_move(SecondScratchReg, rs);
363 rs = SecondScratchReg;
364 }
365 as_mod(ScratchRegister, rs, rt);
366 #else
367 as_div(rs, rt);
368 as_mfhi(ScratchRegister);
369 #endif
370 ma_b(ScratchRegister, ScratchRegister, overflow, Assembler::NonZero);
371 #ifdef MIPSR6
372 as_div(rd, rs, rt);
373 #else
374 as_mflo(rd);
375 #endif
376 }
377
ma_div_branch_overflow(Register rd,Register rs,Imm32 imm,Label * overflow)378 void MacroAssemblerMIPSShared::ma_div_branch_overflow(Register rd, Register rs,
379 Imm32 imm,
380 Label* overflow) {
381 ma_li(ScratchRegister, imm);
382 ma_div_branch_overflow(rd, rs, ScratchRegister, overflow);
383 }
384
ma_mod_mask(Register src,Register dest,Register hold,Register remain,int32_t shift,Label * negZero)385 void MacroAssemblerMIPSShared::ma_mod_mask(Register src, Register dest,
386 Register hold, Register remain,
387 int32_t shift, Label* negZero) {
388 // MATH:
389 // We wish to compute x % (1<<y) - 1 for a known constant, y.
390 // First, let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit
391 // dividend as a number in base b, namely
392 // c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
393 // now, since both addition and multiplication commute with modulus,
394 // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
395 // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
396 // now, since b == C + 1, b % C == 1, and b^n % C == 1
397 // this means that the whole thing simplifies to:
398 // c_0 + c_1 + c_2 ... c_n % C
399 // each c_n can easily be computed by a shift/bitextract, and the modulus
400 // can be maintained by simply subtracting by C whenever the number gets
401 // over C.
402 int32_t mask = (1 << shift) - 1;
403 Label head, negative, sumSigned, done;
404
405 // hold holds -1 if the value was negative, 1 otherwise.
406 // remain holds the remaining bits that have not been processed
407 // SecondScratchReg serves as a temporary location to store extracted bits
408 // into as well as holding the trial subtraction as a temp value dest is
409 // the accumulator (and holds the final result)
410
411 // move the whole value into the remain.
412 ma_move(remain, src);
413 // Zero out the dest.
414 ma_li(dest, Imm32(0));
415 // Set the hold appropriately.
416 ma_b(remain, remain, &negative, Signed, ShortJump);
417 ma_li(hold, Imm32(1));
418 ma_b(&head, ShortJump);
419
420 bind(&negative);
421 ma_li(hold, Imm32(-1));
422 ma_negu(remain, remain);
423
424 // Begin the main loop.
425 bind(&head);
426
427 // Extract the bottom bits into SecondScratchReg.
428 ma_and(SecondScratchReg, remain, Imm32(mask));
429 // Add those bits to the accumulator.
430 as_addu(dest, dest, SecondScratchReg);
431 // Do a trial subtraction
432 ma_subu(SecondScratchReg, dest, Imm32(mask));
433 // If (sum - C) > 0, store sum - C back into sum, thus performing a
434 // modulus.
435 ma_b(SecondScratchReg, SecondScratchReg, &sumSigned, Signed, ShortJump);
436 ma_move(dest, SecondScratchReg);
437 bind(&sumSigned);
438 // Get rid of the bits that we extracted before.
439 as_srl(remain, remain, shift);
440 // If the shift produced zero, finish, otherwise, continue in the loop.
441 ma_b(remain, remain, &head, NonZero, ShortJump);
442 // Check the hold to see if we need to negate the result.
443 ma_b(hold, hold, &done, NotSigned, ShortJump);
444
445 // If the hold was non-zero, negate the result to be in line with
446 // what JS wants
447 if (negZero != nullptr) {
448 // Jump out in case of negative zero.
449 ma_b(hold, hold, negZero, Zero);
450 ma_negu(dest, dest);
451 } else {
452 ma_negu(dest, dest);
453 }
454
455 bind(&done);
456 }
457
458 // Memory.
459
ma_load(Register dest,const BaseIndex & src,LoadStoreSize size,LoadStoreExtension extension)460 void MacroAssemblerMIPSShared::ma_load(Register dest, const BaseIndex& src,
461 LoadStoreSize size,
462 LoadStoreExtension extension) {
463 if (isLoongson() && ZeroExtend != extension &&
464 Imm8::IsInSignedRange(src.offset)) {
465 Register index = src.index;
466
467 if (src.scale != TimesOne) {
468 int32_t shift = Imm32::ShiftOf(src.scale).value;
469
470 MOZ_ASSERT(SecondScratchReg != src.base);
471 index = SecondScratchReg;
472 #ifdef JS_CODEGEN_MIPS64
473 asMasm().ma_dsll(index, src.index, Imm32(shift));
474 #else
475 asMasm().ma_sll(index, src.index, Imm32(shift));
476 #endif
477 }
478
479 switch (size) {
480 case SizeByte:
481 as_gslbx(dest, src.base, index, src.offset);
482 break;
483 case SizeHalfWord:
484 as_gslhx(dest, src.base, index, src.offset);
485 break;
486 case SizeWord:
487 as_gslwx(dest, src.base, index, src.offset);
488 break;
489 case SizeDouble:
490 as_gsldx(dest, src.base, index, src.offset);
491 break;
492 default:
493 MOZ_CRASH("Invalid argument for ma_load");
494 }
495 return;
496 }
497
498 asMasm().computeScaledAddress(src, SecondScratchReg);
499 asMasm().ma_load(dest, Address(SecondScratchReg, src.offset), size,
500 extension);
501 }
502
ma_load_unaligned(Register dest,const BaseIndex & src,LoadStoreSize size,LoadStoreExtension extension)503 void MacroAssemblerMIPSShared::ma_load_unaligned(Register dest,
504 const BaseIndex& src,
505 LoadStoreSize size,
506 LoadStoreExtension extension) {
507 int16_t lowOffset, hiOffset;
508 SecondScratchRegisterScope base(asMasm());
509 asMasm().computeScaledAddress(src, base);
510 ScratchRegisterScope scratch(asMasm());
511
512 if (Imm16::IsInSignedRange(src.offset) &&
513 Imm16::IsInSignedRange(src.offset + size / 8 - 1)) {
514 lowOffset = Imm16(src.offset).encode();
515 hiOffset = Imm16(src.offset + size / 8 - 1).encode();
516 } else {
517 ma_li(scratch, Imm32(src.offset));
518 asMasm().addPtr(scratch, base);
519 lowOffset = Imm16(0).encode();
520 hiOffset = Imm16(size / 8 - 1).encode();
521 }
522
523 switch (size) {
524 case SizeHalfWord:
525 MOZ_ASSERT(dest != scratch);
526 if (extension == ZeroExtend) {
527 as_lbu(scratch, base, hiOffset);
528 } else {
529 as_lb(scratch, base, hiOffset);
530 }
531 as_lbu(dest, base, lowOffset);
532 ma_ins(dest, scratch, 8, 24);
533 break;
534 case SizeWord:
535 MOZ_ASSERT(dest != base);
536 as_lwl(dest, base, hiOffset);
537 as_lwr(dest, base, lowOffset);
538 #ifdef JS_CODEGEN_MIPS64
539 if (extension == ZeroExtend) {
540 as_dext(dest, dest, 0, 32);
541 }
542 #endif
543 break;
544 #ifdef JS_CODEGEN_MIPS64
545 case SizeDouble:
546 MOZ_ASSERT(dest != base);
547 as_ldl(dest, base, hiOffset);
548 as_ldr(dest, base, lowOffset);
549 break;
550 #endif
551 default:
552 MOZ_CRASH("Invalid argument for ma_load_unaligned");
553 }
554 }
555
ma_load_unaligned(Register dest,const Address & address,LoadStoreSize size,LoadStoreExtension extension)556 void MacroAssemblerMIPSShared::ma_load_unaligned(Register dest,
557 const Address& address,
558 LoadStoreSize size,
559 LoadStoreExtension extension) {
560 int16_t lowOffset, hiOffset;
561 ScratchRegisterScope scratch1(asMasm());
562 SecondScratchRegisterScope scratch2(asMasm());
563 Register base;
564
565 if (Imm16::IsInSignedRange(address.offset) &&
566 Imm16::IsInSignedRange(address.offset + size / 8 - 1)) {
567 base = address.base;
568 lowOffset = Imm16(address.offset).encode();
569 hiOffset = Imm16(address.offset + size / 8 - 1).encode();
570 } else {
571 ma_li(scratch1, Imm32(address.offset));
572 asMasm().addPtr(address.base, scratch1);
573 base = scratch1;
574 lowOffset = Imm16(0).encode();
575 hiOffset = Imm16(size / 8 - 1).encode();
576 }
577
578 switch (size) {
579 case SizeHalfWord:
580 MOZ_ASSERT(base != scratch2 && dest != scratch2);
581 if (extension == ZeroExtend) {
582 as_lbu(scratch2, base, hiOffset);
583 } else {
584 as_lb(scratch2, base, hiOffset);
585 }
586 as_lbu(dest, base, lowOffset);
587 ma_ins(dest, scratch2, 8, 24);
588 break;
589 case SizeWord:
590 MOZ_ASSERT(dest != base);
591 as_lwl(dest, base, hiOffset);
592 as_lwr(dest, base, lowOffset);
593 #ifdef JS_CODEGEN_MIPS64
594 if (extension == ZeroExtend) {
595 as_dext(dest, dest, 0, 32);
596 }
597 #endif
598 break;
599 #ifdef JS_CODEGEN_MIPS64
600 case SizeDouble:
601 MOZ_ASSERT(dest != base);
602 as_ldl(dest, base, hiOffset);
603 as_ldr(dest, base, lowOffset);
604 break;
605 #endif
606 default:
607 MOZ_CRASH("Invalid argument for ma_load_unaligned");
608 }
609 }
610
ma_load_unaligned(const wasm::MemoryAccessDesc & access,Register dest,const BaseIndex & src,Register temp,LoadStoreSize size,LoadStoreExtension extension)611 void MacroAssemblerMIPSShared::ma_load_unaligned(
612 const wasm::MemoryAccessDesc& access, Register dest, const BaseIndex& src,
613 Register temp, LoadStoreSize size, LoadStoreExtension extension) {
614 MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
615 int16_t lowOffset, hiOffset;
616 Register base;
617
618 asMasm().computeScaledAddress(src, SecondScratchReg);
619
620 if (Imm16::IsInSignedRange(src.offset) &&
621 Imm16::IsInSignedRange(src.offset + size / 8 - 1)) {
622 base = SecondScratchReg;
623 lowOffset = Imm16(src.offset).encode();
624 hiOffset = Imm16(src.offset + size / 8 - 1).encode();
625 } else {
626 ma_li(ScratchRegister, Imm32(src.offset));
627 asMasm().addPtr(SecondScratchReg, ScratchRegister);
628 base = ScratchRegister;
629 lowOffset = Imm16(0).encode();
630 hiOffset = Imm16(size / 8 - 1).encode();
631 }
632
633 BufferOffset load;
634 switch (size) {
635 case SizeHalfWord:
636 if (extension == ZeroExtend) {
637 load = as_lbu(temp, base, hiOffset);
638 } else {
639 load = as_lb(temp, base, hiOffset);
640 }
641 as_lbu(dest, base, lowOffset);
642 ma_ins(dest, temp, 8, 24);
643 break;
644 case SizeWord:
645 load = as_lwl(dest, base, hiOffset);
646 as_lwr(dest, base, lowOffset);
647 #ifdef JS_CODEGEN_MIPS64
648 if (extension == ZeroExtend) {
649 as_dext(dest, dest, 0, 32);
650 }
651 #endif
652 break;
653 #ifdef JS_CODEGEN_MIPS64
654 case SizeDouble:
655 load = as_ldl(dest, base, hiOffset);
656 as_ldr(dest, base, lowOffset);
657 break;
658 #endif
659 default:
660 MOZ_CRASH("Invalid argument for ma_load");
661 }
662
663 append(access, load.getOffset());
664 }
665
ma_store(Register data,const BaseIndex & dest,LoadStoreSize size,LoadStoreExtension extension)666 void MacroAssemblerMIPSShared::ma_store(Register data, const BaseIndex& dest,
667 LoadStoreSize size,
668 LoadStoreExtension extension) {
669 if (isLoongson() && Imm8::IsInSignedRange(dest.offset)) {
670 Register index = dest.index;
671
672 if (dest.scale != TimesOne) {
673 int32_t shift = Imm32::ShiftOf(dest.scale).value;
674
675 MOZ_ASSERT(SecondScratchReg != dest.base);
676 index = SecondScratchReg;
677 #ifdef JS_CODEGEN_MIPS64
678 asMasm().ma_dsll(index, dest.index, Imm32(shift));
679 #else
680 asMasm().ma_sll(index, dest.index, Imm32(shift));
681 #endif
682 }
683
684 switch (size) {
685 case SizeByte:
686 as_gssbx(data, dest.base, index, dest.offset);
687 break;
688 case SizeHalfWord:
689 as_gsshx(data, dest.base, index, dest.offset);
690 break;
691 case SizeWord:
692 as_gsswx(data, dest.base, index, dest.offset);
693 break;
694 case SizeDouble:
695 as_gssdx(data, dest.base, index, dest.offset);
696 break;
697 default:
698 MOZ_CRASH("Invalid argument for ma_store");
699 }
700 return;
701 }
702
703 asMasm().computeScaledAddress(dest, SecondScratchReg);
704 asMasm().ma_store(data, Address(SecondScratchReg, dest.offset), size,
705 extension);
706 }
707
ma_store(Imm32 imm,const BaseIndex & dest,LoadStoreSize size,LoadStoreExtension extension)708 void MacroAssemblerMIPSShared::ma_store(Imm32 imm, const BaseIndex& dest,
709 LoadStoreSize size,
710 LoadStoreExtension extension) {
711 if (isLoongson() && Imm8::IsInSignedRange(dest.offset)) {
712 Register data = zero;
713 Register index = dest.index;
714
715 if (imm.value) {
716 MOZ_ASSERT(ScratchRegister != dest.base);
717 MOZ_ASSERT(ScratchRegister != dest.index);
718 data = ScratchRegister;
719 ma_li(data, imm);
720 }
721
722 if (dest.scale != TimesOne) {
723 int32_t shift = Imm32::ShiftOf(dest.scale).value;
724
725 MOZ_ASSERT(SecondScratchReg != dest.base);
726 index = SecondScratchReg;
727 #ifdef JS_CODEGEN_MIPS64
728 asMasm().ma_dsll(index, dest.index, Imm32(shift));
729 #else
730 asMasm().ma_sll(index, dest.index, Imm32(shift));
731 #endif
732 }
733
734 switch (size) {
735 case SizeByte:
736 as_gssbx(data, dest.base, index, dest.offset);
737 break;
738 case SizeHalfWord:
739 as_gsshx(data, dest.base, index, dest.offset);
740 break;
741 case SizeWord:
742 as_gsswx(data, dest.base, index, dest.offset);
743 break;
744 case SizeDouble:
745 as_gssdx(data, dest.base, index, dest.offset);
746 break;
747 default:
748 MOZ_CRASH("Invalid argument for ma_store");
749 }
750 return;
751 }
752
753 // Make sure that SecondScratchReg contains absolute address so that
754 // offset is 0.
755 asMasm().computeEffectiveAddress(dest, SecondScratchReg);
756
757 // Scrach register is free now, use it for loading imm value
758 ma_li(ScratchRegister, imm);
759
760 // with offset=0 ScratchRegister will not be used in ma_store()
761 // so we can use it as a parameter here
762 asMasm().ma_store(ScratchRegister, Address(SecondScratchReg, 0), size,
763 extension);
764 }
765
ma_store_unaligned(Register data,const Address & address,LoadStoreSize size)766 void MacroAssemblerMIPSShared::ma_store_unaligned(Register data,
767 const Address& address,
768 LoadStoreSize size) {
769 int16_t lowOffset, hiOffset;
770 ScratchRegisterScope scratch(asMasm());
771 Register base;
772
773 if (Imm16::IsInSignedRange(address.offset) &&
774 Imm16::IsInSignedRange(address.offset + size / 8 - 1)) {
775 base = address.base;
776 lowOffset = Imm16(address.offset).encode();
777 hiOffset = Imm16(address.offset + size / 8 - 1).encode();
778 } else {
779 ma_li(scratch, Imm32(address.offset));
780 asMasm().addPtr(address.base, scratch);
781 base = scratch;
782 lowOffset = Imm16(0).encode();
783 hiOffset = Imm16(size / 8 - 1).encode();
784 }
785
786 switch (size) {
787 case SizeHalfWord: {
788 SecondScratchRegisterScope scratch2(asMasm());
789 MOZ_ASSERT(base != scratch2);
790 as_sb(data, base, lowOffset);
791 ma_ext(scratch2, data, 8, 8);
792 as_sb(scratch2, base, hiOffset);
793 break;
794 }
795 case SizeWord:
796 as_swl(data, base, hiOffset);
797 as_swr(data, base, lowOffset);
798 break;
799 #ifdef JS_CODEGEN_MIPS64
800 case SizeDouble:
801 as_sdl(data, base, hiOffset);
802 as_sdr(data, base, lowOffset);
803 break;
804 #endif
805 default:
806 MOZ_CRASH("Invalid argument for ma_store_unaligned");
807 }
808 }
809
ma_store_unaligned(Register data,const BaseIndex & dest,LoadStoreSize size)810 void MacroAssemblerMIPSShared::ma_store_unaligned(Register data,
811 const BaseIndex& dest,
812 LoadStoreSize size) {
813 int16_t lowOffset, hiOffset;
814 SecondScratchRegisterScope base(asMasm());
815 asMasm().computeScaledAddress(dest, base);
816 ScratchRegisterScope scratch(asMasm());
817
818 if (Imm16::IsInSignedRange(dest.offset) &&
819 Imm16::IsInSignedRange(dest.offset + size / 8 - 1)) {
820 lowOffset = Imm16(dest.offset).encode();
821 hiOffset = Imm16(dest.offset + size / 8 - 1).encode();
822 } else {
823 ma_li(scratch, Imm32(dest.offset));
824 asMasm().addPtr(scratch, base);
825 lowOffset = Imm16(0).encode();
826 hiOffset = Imm16(size / 8 - 1).encode();
827 }
828
829 switch (size) {
830 case SizeHalfWord:
831 MOZ_ASSERT(base != scratch);
832 as_sb(data, base, lowOffset);
833 ma_ext(scratch, data, 8, 8);
834 as_sb(scratch, base, hiOffset);
835 break;
836 case SizeWord:
837 as_swl(data, base, hiOffset);
838 as_swr(data, base, lowOffset);
839 break;
840 #ifdef JS_CODEGEN_MIPS64
841 case SizeDouble:
842 as_sdl(data, base, hiOffset);
843 as_sdr(data, base, lowOffset);
844 break;
845 #endif
846 default:
847 MOZ_CRASH("Invalid argument for ma_store_unaligned");
848 }
849 }
850
ma_store_unaligned(const wasm::MemoryAccessDesc & access,Register data,const BaseIndex & dest,Register temp,LoadStoreSize size,LoadStoreExtension extension)851 void MacroAssemblerMIPSShared::ma_store_unaligned(
852 const wasm::MemoryAccessDesc& access, Register data, const BaseIndex& dest,
853 Register temp, LoadStoreSize size, LoadStoreExtension extension) {
854 MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
855 int16_t lowOffset, hiOffset;
856 Register base;
857
858 asMasm().computeScaledAddress(dest, SecondScratchReg);
859
860 if (Imm16::IsInSignedRange(dest.offset) &&
861 Imm16::IsInSignedRange(dest.offset + size / 8 - 1)) {
862 base = SecondScratchReg;
863 lowOffset = Imm16(dest.offset).encode();
864 hiOffset = Imm16(dest.offset + size / 8 - 1).encode();
865 } else {
866 ma_li(ScratchRegister, Imm32(dest.offset));
867 asMasm().addPtr(SecondScratchReg, ScratchRegister);
868 base = ScratchRegister;
869 lowOffset = Imm16(0).encode();
870 hiOffset = Imm16(size / 8 - 1).encode();
871 }
872
873 BufferOffset store;
874 switch (size) {
875 case SizeHalfWord:
876 ma_ext(temp, data, 8, 8);
877 store = as_sb(temp, base, hiOffset);
878 as_sb(data, base, lowOffset);
879 break;
880 case SizeWord:
881 store = as_swl(data, base, hiOffset);
882 as_swr(data, base, lowOffset);
883 break;
884 #ifdef JS_CODEGEN_MIPS64
885 case SizeDouble:
886 store = as_sdl(data, base, hiOffset);
887 as_sdr(data, base, lowOffset);
888 break;
889 #endif
890 default:
891 MOZ_CRASH("Invalid argument for ma_store");
892 }
893 append(access, store.getOffset());
894 }
895
896 // Branches when done from within mips-specific code.
ma_b(Register lhs,Register rhs,Label * label,Condition c,JumpKind jumpKind)897 void MacroAssemblerMIPSShared::ma_b(Register lhs, Register rhs, Label* label,
898 Condition c, JumpKind jumpKind) {
899 switch (c) {
900 case Equal:
901 case NotEqual:
902 asMasm().branchWithCode(getBranchCode(lhs, rhs, c), label, jumpKind);
903 break;
904 case Always:
905 ma_b(label, jumpKind);
906 break;
907 case Zero:
908 case NonZero:
909 case Signed:
910 case NotSigned:
911 MOZ_ASSERT(lhs == rhs);
912 asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
913 break;
914 default:
915 Condition cond = ma_cmp(ScratchRegister, lhs, rhs, c);
916 asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label,
917 jumpKind);
918 break;
919 }
920 }
921
ma_b(Register lhs,Imm32 imm,Label * label,Condition c,JumpKind jumpKind)922 void MacroAssemblerMIPSShared::ma_b(Register lhs, Imm32 imm, Label* label,
923 Condition c, JumpKind jumpKind) {
924 MOZ_ASSERT(c != Overflow);
925 if (imm.value == 0) {
926 if (c == Always || c == AboveOrEqual) {
927 ma_b(label, jumpKind);
928 } else if (c == Below) {
929 ; // This condition is always false. No branch required.
930 } else {
931 asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
932 }
933 } else {
934 switch (c) {
935 case Equal:
936 case NotEqual:
937 MOZ_ASSERT(lhs != ScratchRegister);
938 ma_li(ScratchRegister, imm);
939 ma_b(lhs, ScratchRegister, label, c, jumpKind);
940 break;
941 default:
942 Condition cond = ma_cmp(ScratchRegister, lhs, imm, c);
943 asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label,
944 jumpKind);
945 }
946 }
947 }
948
ma_b(Register lhs,ImmPtr imm,Label * l,Condition c,JumpKind jumpKind)949 void MacroAssemblerMIPSShared::ma_b(Register lhs, ImmPtr imm, Label* l,
950 Condition c, JumpKind jumpKind) {
951 asMasm().ma_b(lhs, ImmWord(uintptr_t(imm.value)), l, c, jumpKind);
952 }
953
ma_b(Label * label,JumpKind jumpKind)954 void MacroAssemblerMIPSShared::ma_b(Label* label, JumpKind jumpKind) {
955 asMasm().branchWithCode(getBranchCode(BranchIsJump), label, jumpKind);
956 }
957
ma_cmp(Register dest,Register lhs,Register rhs,Condition c)958 Assembler::Condition MacroAssemblerMIPSShared::ma_cmp(Register dest,
959 Register lhs,
960 Register rhs,
961 Condition c) {
962 switch (c) {
963 case Above:
964 // bgtu s,t,label =>
965 // sltu at,t,s
966 // bne at,$zero,offs
967 as_sltu(dest, rhs, lhs);
968 return NotEqual;
969 case AboveOrEqual:
970 // bgeu s,t,label =>
971 // sltu at,s,t
972 // beq at,$zero,offs
973 as_sltu(dest, lhs, rhs);
974 return Equal;
975 case Below:
976 // bltu s,t,label =>
977 // sltu at,s,t
978 // bne at,$zero,offs
979 as_sltu(dest, lhs, rhs);
980 return NotEqual;
981 case BelowOrEqual:
982 // bleu s,t,label =>
983 // sltu at,t,s
984 // beq at,$zero,offs
985 as_sltu(dest, rhs, lhs);
986 return Equal;
987 case GreaterThan:
988 // bgt s,t,label =>
989 // slt at,t,s
990 // bne at,$zero,offs
991 as_slt(dest, rhs, lhs);
992 return NotEqual;
993 case GreaterThanOrEqual:
994 // bge s,t,label =>
995 // slt at,s,t
996 // beq at,$zero,offs
997 as_slt(dest, lhs, rhs);
998 return Equal;
999 case LessThan:
1000 // blt s,t,label =>
1001 // slt at,s,t
1002 // bne at,$zero,offs
1003 as_slt(dest, lhs, rhs);
1004 return NotEqual;
1005 case LessThanOrEqual:
1006 // ble s,t,label =>
1007 // slt at,t,s
1008 // beq at,$zero,offs
1009 as_slt(dest, rhs, lhs);
1010 return Equal;
1011 default:
1012 MOZ_CRASH("Invalid condition.");
1013 }
1014 return Always;
1015 }
1016
ma_cmp(Register dest,Register lhs,Imm32 imm,Condition c)1017 Assembler::Condition MacroAssemblerMIPSShared::ma_cmp(Register dest,
1018 Register lhs, Imm32 imm,
1019 Condition c) {
1020 ScratchRegisterScope scratch(asMasm());
1021 MOZ_ASSERT(lhs != scratch);
1022
1023 switch (c) {
1024 case Above:
1025 case BelowOrEqual:
1026 if (Imm16::IsInSignedRange(imm.value + 1) && imm.value != -1) {
1027 // lhs <= rhs via lhs < rhs + 1 if rhs + 1 does not overflow
1028 as_sltiu(dest, lhs, imm.value + 1);
1029
1030 return (c == BelowOrEqual ? NotEqual : Equal);
1031 } else {
1032 ma_li(scratch, imm);
1033 as_sltu(dest, scratch, lhs);
1034 return (c == BelowOrEqual ? Equal : NotEqual);
1035 }
1036 case AboveOrEqual:
1037 case Below:
1038 if (Imm16::IsInSignedRange(imm.value)) {
1039 as_sltiu(dest, lhs, imm.value);
1040 } else {
1041 ma_li(scratch, imm);
1042 as_sltu(dest, lhs, scratch);
1043 }
1044 return (c == AboveOrEqual ? Equal : NotEqual);
1045 case GreaterThan:
1046 case LessThanOrEqual:
1047 if (Imm16::IsInSignedRange(imm.value + 1)) {
1048 // lhs <= rhs via lhs < rhs + 1.
1049 as_slti(dest, lhs, imm.value + 1);
1050 return (c == LessThanOrEqual ? NotEqual : Equal);
1051 } else {
1052 ma_li(scratch, imm);
1053 as_slt(dest, scratch, lhs);
1054 return (c == LessThanOrEqual ? Equal : NotEqual);
1055 }
1056 case GreaterThanOrEqual:
1057 case LessThan:
1058 if (Imm16::IsInSignedRange(imm.value)) {
1059 as_slti(dest, lhs, imm.value);
1060 } else {
1061 ma_li(scratch, imm);
1062 as_slt(dest, lhs, scratch);
1063 }
1064 return (c == GreaterThanOrEqual ? Equal : NotEqual);
1065 default:
1066 MOZ_CRASH("Invalid condition.");
1067 }
1068 return Always;
1069 }
1070
ma_cmp_set(Register rd,Register rs,Register rt,Condition c)1071 void MacroAssemblerMIPSShared::ma_cmp_set(Register rd, Register rs, Register rt,
1072 Condition c) {
1073 switch (c) {
1074 case Equal:
1075 // seq d,s,t =>
1076 // xor d,s,t
1077 // sltiu d,d,1
1078 as_xor(rd, rs, rt);
1079 as_sltiu(rd, rd, 1);
1080 break;
1081 case NotEqual:
1082 // sne d,s,t =>
1083 // xor d,s,t
1084 // sltu d,$zero,d
1085 as_xor(rd, rs, rt);
1086 as_sltu(rd, zero, rd);
1087 break;
1088 case Above:
1089 // sgtu d,s,t =>
1090 // sltu d,t,s
1091 as_sltu(rd, rt, rs);
1092 break;
1093 case AboveOrEqual:
1094 // sgeu d,s,t =>
1095 // sltu d,s,t
1096 // xori d,d,1
1097 as_sltu(rd, rs, rt);
1098 as_xori(rd, rd, 1);
1099 break;
1100 case Below:
1101 // sltu d,s,t
1102 as_sltu(rd, rs, rt);
1103 break;
1104 case BelowOrEqual:
1105 // sleu d,s,t =>
1106 // sltu d,t,s
1107 // xori d,d,1
1108 as_sltu(rd, rt, rs);
1109 as_xori(rd, rd, 1);
1110 break;
1111 case GreaterThan:
1112 // sgt d,s,t =>
1113 // slt d,t,s
1114 as_slt(rd, rt, rs);
1115 break;
1116 case GreaterThanOrEqual:
1117 // sge d,s,t =>
1118 // slt d,s,t
1119 // xori d,d,1
1120 as_slt(rd, rs, rt);
1121 as_xori(rd, rd, 1);
1122 break;
1123 case LessThan:
1124 // slt d,s,t
1125 as_slt(rd, rs, rt);
1126 break;
1127 case LessThanOrEqual:
1128 // sle d,s,t =>
1129 // slt d,t,s
1130 // xori d,d,1
1131 as_slt(rd, rt, rs);
1132 as_xori(rd, rd, 1);
1133 break;
1134 case Zero:
1135 MOZ_ASSERT(rs == rt);
1136 // seq d,s,$zero =>
1137 // sltiu d,s,1
1138 as_sltiu(rd, rs, 1);
1139 break;
1140 case NonZero:
1141 MOZ_ASSERT(rs == rt);
1142 // sne d,s,$zero =>
1143 // sltu d,$zero,s
1144 as_sltu(rd, zero, rs);
1145 break;
1146 case Signed:
1147 MOZ_ASSERT(rs == rt);
1148 as_slt(rd, rs, zero);
1149 break;
1150 case NotSigned:
1151 MOZ_ASSERT(rs == rt);
1152 // sge d,s,$zero =>
1153 // slt d,s,$zero
1154 // xori d,d,1
1155 as_slt(rd, rs, zero);
1156 as_xori(rd, rd, 1);
1157 break;
1158 default:
1159 MOZ_CRASH("Invalid condition.");
1160 }
1161 }
1162
compareFloatingPoint(FloatFormat fmt,FloatRegister lhs,FloatRegister rhs,DoubleCondition c,FloatTestKind * testKind,FPConditionBit fcc)1163 void MacroAssemblerMIPSShared::compareFloatingPoint(
1164 FloatFormat fmt, FloatRegister lhs, FloatRegister rhs, DoubleCondition c,
1165 FloatTestKind* testKind, FPConditionBit fcc) {
1166 switch (c) {
1167 case DoubleOrdered:
1168 as_cun(fmt, lhs, rhs, fcc);
1169 *testKind = TestForFalse;
1170 break;
1171 case DoubleEqual:
1172 as_ceq(fmt, lhs, rhs, fcc);
1173 *testKind = TestForTrue;
1174 break;
1175 case DoubleNotEqual:
1176 as_cueq(fmt, lhs, rhs, fcc);
1177 *testKind = TestForFalse;
1178 break;
1179 case DoubleGreaterThan:
1180 as_colt(fmt, rhs, lhs, fcc);
1181 *testKind = TestForTrue;
1182 break;
1183 case DoubleGreaterThanOrEqual:
1184 as_cole(fmt, rhs, lhs, fcc);
1185 *testKind = TestForTrue;
1186 break;
1187 case DoubleLessThan:
1188 as_colt(fmt, lhs, rhs, fcc);
1189 *testKind = TestForTrue;
1190 break;
1191 case DoubleLessThanOrEqual:
1192 as_cole(fmt, lhs, rhs, fcc);
1193 *testKind = TestForTrue;
1194 break;
1195 case DoubleUnordered:
1196 as_cun(fmt, lhs, rhs, fcc);
1197 *testKind = TestForTrue;
1198 break;
1199 case DoubleEqualOrUnordered:
1200 as_cueq(fmt, lhs, rhs, fcc);
1201 *testKind = TestForTrue;
1202 break;
1203 case DoubleNotEqualOrUnordered:
1204 as_ceq(fmt, lhs, rhs, fcc);
1205 *testKind = TestForFalse;
1206 break;
1207 case DoubleGreaterThanOrUnordered:
1208 as_cult(fmt, rhs, lhs, fcc);
1209 *testKind = TestForTrue;
1210 break;
1211 case DoubleGreaterThanOrEqualOrUnordered:
1212 as_cule(fmt, rhs, lhs, fcc);
1213 *testKind = TestForTrue;
1214 break;
1215 case DoubleLessThanOrUnordered:
1216 as_cult(fmt, lhs, rhs, fcc);
1217 *testKind = TestForTrue;
1218 break;
1219 case DoubleLessThanOrEqualOrUnordered:
1220 as_cule(fmt, lhs, rhs, fcc);
1221 *testKind = TestForTrue;
1222 break;
1223 default:
1224 MOZ_CRASH("Invalid DoubleCondition.");
1225 }
1226 }
1227
ma_cmp_set_double(Register dest,FloatRegister lhs,FloatRegister rhs,DoubleCondition c)1228 void MacroAssemblerMIPSShared::ma_cmp_set_double(Register dest,
1229 FloatRegister lhs,
1230 FloatRegister rhs,
1231 DoubleCondition c) {
1232 FloatTestKind moveCondition;
1233 compareFloatingPoint(DoubleFloat, lhs, rhs, c, &moveCondition);
1234
1235 #ifdef MIPSR6
1236 as_mfc1(dest, FloatRegisters::f24);
1237 if (moveCondition == TestForTrue) {
1238 as_andi(dest, dest, 0x1);
1239 } else {
1240 as_addiu(dest, dest, 0x1);
1241 }
1242 #else
1243 ma_li(dest, Imm32(1));
1244
1245 if (moveCondition == TestForTrue) {
1246 as_movf(dest, zero);
1247 } else {
1248 as_movt(dest, zero);
1249 }
1250 #endif
1251 }
1252
ma_cmp_set_float32(Register dest,FloatRegister lhs,FloatRegister rhs,DoubleCondition c)1253 void MacroAssemblerMIPSShared::ma_cmp_set_float32(Register dest,
1254 FloatRegister lhs,
1255 FloatRegister rhs,
1256 DoubleCondition c) {
1257 FloatTestKind moveCondition;
1258 compareFloatingPoint(SingleFloat, lhs, rhs, c, &moveCondition);
1259
1260 #ifdef MIPSR6
1261 as_mfc1(dest, FloatRegisters::f24);
1262 if (moveCondition == TestForTrue) {
1263 as_andi(dest, dest, 0x1);
1264 } else {
1265 as_addiu(dest, dest, 0x1);
1266 }
1267 #else
1268 ma_li(dest, Imm32(1));
1269
1270 if (moveCondition == TestForTrue) {
1271 as_movf(dest, zero);
1272 } else {
1273 as_movt(dest, zero);
1274 }
1275 #endif
1276 }
1277
ma_cmp_set(Register rd,Register rs,Imm32 imm,Condition c)1278 void MacroAssemblerMIPSShared::ma_cmp_set(Register rd, Register rs, Imm32 imm,
1279 Condition c) {
1280 if (imm.value == 0) {
1281 switch (c) {
1282 case Equal:
1283 case BelowOrEqual:
1284 as_sltiu(rd, rs, 1);
1285 break;
1286 case NotEqual:
1287 case Above:
1288 as_sltu(rd, zero, rs);
1289 break;
1290 case AboveOrEqual:
1291 case Below:
1292 as_ori(rd, zero, c == AboveOrEqual ? 1 : 0);
1293 break;
1294 case GreaterThan:
1295 case LessThanOrEqual:
1296 as_slt(rd, zero, rs);
1297 if (c == LessThanOrEqual) {
1298 as_xori(rd, rd, 1);
1299 }
1300 break;
1301 case LessThan:
1302 case GreaterThanOrEqual:
1303 as_slt(rd, rs, zero);
1304 if (c == GreaterThanOrEqual) {
1305 as_xori(rd, rd, 1);
1306 }
1307 break;
1308 case Zero:
1309 as_sltiu(rd, rs, 1);
1310 break;
1311 case NonZero:
1312 as_sltu(rd, zero, rs);
1313 break;
1314 case Signed:
1315 as_slt(rd, rs, zero);
1316 break;
1317 case NotSigned:
1318 as_slt(rd, rs, zero);
1319 as_xori(rd, rd, 1);
1320 break;
1321 default:
1322 MOZ_CRASH("Invalid condition.");
1323 }
1324 return;
1325 }
1326
1327 switch (c) {
1328 case Equal:
1329 case NotEqual:
1330 MOZ_ASSERT(rs != ScratchRegister);
1331 ma_xor(rd, rs, imm);
1332 if (c == Equal) {
1333 as_sltiu(rd, rd, 1);
1334 } else {
1335 as_sltu(rd, zero, rd);
1336 }
1337 break;
1338 case Zero:
1339 case NonZero:
1340 case Signed:
1341 case NotSigned:
1342 MOZ_CRASH("Invalid condition.");
1343 default:
1344 Condition cond = ma_cmp(rd, rs, imm, c);
1345 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1346
1347 if (cond == Equal) as_xori(rd, rd, 1);
1348 }
1349 }
1350
1351 // fp instructions
ma_lis(FloatRegister dest,float value)1352 void MacroAssemblerMIPSShared::ma_lis(FloatRegister dest, float value) {
1353 Imm32 imm(mozilla::BitwiseCast<uint32_t>(value));
1354
1355 if (imm.value != 0) {
1356 ma_li(ScratchRegister, imm);
1357 moveToFloat32(ScratchRegister, dest);
1358 } else {
1359 moveToFloat32(zero, dest);
1360 }
1361 }
1362
ma_sd(FloatRegister ft,BaseIndex address)1363 void MacroAssemblerMIPSShared::ma_sd(FloatRegister ft, BaseIndex address) {
1364 if (isLoongson() && Imm8::IsInSignedRange(address.offset)) {
1365 Register index = address.index;
1366
1367 if (address.scale != TimesOne) {
1368 int32_t shift = Imm32::ShiftOf(address.scale).value;
1369
1370 MOZ_ASSERT(SecondScratchReg != address.base);
1371 index = SecondScratchReg;
1372 #ifdef JS_CODEGEN_MIPS64
1373 asMasm().ma_dsll(index, address.index, Imm32(shift));
1374 #else
1375 asMasm().ma_sll(index, address.index, Imm32(shift));
1376 #endif
1377 }
1378
1379 as_gssdx(ft, address.base, index, address.offset);
1380 return;
1381 }
1382
1383 asMasm().computeScaledAddress(address, SecondScratchReg);
1384 asMasm().ma_sd(ft, Address(SecondScratchReg, address.offset));
1385 }
1386
ma_ss(FloatRegister ft,BaseIndex address)1387 void MacroAssemblerMIPSShared::ma_ss(FloatRegister ft, BaseIndex address) {
1388 if (isLoongson() && Imm8::IsInSignedRange(address.offset)) {
1389 Register index = address.index;
1390
1391 if (address.scale != TimesOne) {
1392 int32_t shift = Imm32::ShiftOf(address.scale).value;
1393
1394 MOZ_ASSERT(SecondScratchReg != address.base);
1395 index = SecondScratchReg;
1396 #ifdef JS_CODEGEN_MIPS64
1397 asMasm().ma_dsll(index, address.index, Imm32(shift));
1398 #else
1399 asMasm().ma_sll(index, address.index, Imm32(shift));
1400 #endif
1401 }
1402
1403 as_gsssx(ft, address.base, index, address.offset);
1404 return;
1405 }
1406
1407 asMasm().computeScaledAddress(address, SecondScratchReg);
1408 asMasm().ma_ss(ft, Address(SecondScratchReg, address.offset));
1409 }
1410
ma_ld(FloatRegister ft,const BaseIndex & src)1411 void MacroAssemblerMIPSShared::ma_ld(FloatRegister ft, const BaseIndex& src) {
1412 asMasm().computeScaledAddress(src, SecondScratchReg);
1413 asMasm().ma_ld(ft, Address(SecondScratchReg, src.offset));
1414 }
1415
ma_ls(FloatRegister ft,const BaseIndex & src)1416 void MacroAssemblerMIPSShared::ma_ls(FloatRegister ft, const BaseIndex& src) {
1417 asMasm().computeScaledAddress(src, SecondScratchReg);
1418 asMasm().ma_ls(ft, Address(SecondScratchReg, src.offset));
1419 }
1420
ma_bc1s(FloatRegister lhs,FloatRegister rhs,Label * label,DoubleCondition c,JumpKind jumpKind,FPConditionBit fcc)1421 void MacroAssemblerMIPSShared::ma_bc1s(FloatRegister lhs, FloatRegister rhs,
1422 Label* label, DoubleCondition c,
1423 JumpKind jumpKind, FPConditionBit fcc) {
1424 FloatTestKind testKind;
1425 compareFloatingPoint(SingleFloat, lhs, rhs, c, &testKind, fcc);
1426 asMasm().branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
1427 }
1428
ma_bc1d(FloatRegister lhs,FloatRegister rhs,Label * label,DoubleCondition c,JumpKind jumpKind,FPConditionBit fcc)1429 void MacroAssemblerMIPSShared::ma_bc1d(FloatRegister lhs, FloatRegister rhs,
1430 Label* label, DoubleCondition c,
1431 JumpKind jumpKind, FPConditionBit fcc) {
1432 FloatTestKind testKind;
1433 compareFloatingPoint(DoubleFloat, lhs, rhs, c, &testKind, fcc);
1434 asMasm().branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
1435 }
1436
minMaxDouble(FloatRegister srcDest,FloatRegister second,bool handleNaN,bool isMax)1437 void MacroAssemblerMIPSShared::minMaxDouble(FloatRegister srcDest,
1438 FloatRegister second,
1439 bool handleNaN, bool isMax) {
1440 FloatRegister first = srcDest;
1441
1442 Assembler::DoubleCondition cond = isMax ? Assembler::DoubleLessThanOrEqual
1443 : Assembler::DoubleGreaterThanOrEqual;
1444 Label nan, equal, done;
1445 FloatTestKind moveCondition;
1446
1447 // First or second is NaN, result is NaN.
1448 ma_bc1d(first, second, &nan, Assembler::DoubleUnordered, ShortJump);
1449 #ifdef MIPSR6
1450 if (isMax) {
1451 as_max(DoubleFloat, srcDest, first, second);
1452 } else {
1453 as_min(DoubleFloat, srcDest, first, second);
1454 }
1455 #else
1456 // Make sure we handle -0 and 0 right.
1457 ma_bc1d(first, second, &equal, Assembler::DoubleEqual, ShortJump);
1458 compareFloatingPoint(DoubleFloat, first, second, cond, &moveCondition);
1459 MOZ_ASSERT(TestForTrue == moveCondition);
1460 as_movt(DoubleFloat, first, second);
1461 ma_b(&done, ShortJump);
1462
1463 // Check for zero.
1464 bind(&equal);
1465 asMasm().loadConstantDouble(0.0, ScratchDoubleReg);
1466 compareFloatingPoint(DoubleFloat, first, ScratchDoubleReg,
1467 Assembler::DoubleEqual, &moveCondition);
1468
1469 // So now both operands are either -0 or 0.
1470 if (isMax) {
1471 // -0 + -0 = -0 and -0 + 0 = 0.
1472 as_addd(ScratchDoubleReg, first, second);
1473 } else {
1474 as_negd(ScratchDoubleReg, first);
1475 as_subd(ScratchDoubleReg, ScratchDoubleReg, second);
1476 as_negd(ScratchDoubleReg, ScratchDoubleReg);
1477 }
1478 MOZ_ASSERT(TestForTrue == moveCondition);
1479 // First is 0 or -0, move max/min to it, else just return it.
1480 as_movt(DoubleFloat, first, ScratchDoubleReg);
1481 #endif
1482 ma_b(&done, ShortJump);
1483
1484 bind(&nan);
1485 asMasm().loadConstantDouble(JS::GenericNaN(), srcDest);
1486
1487 bind(&done);
1488 }
1489
minMaxFloat32(FloatRegister srcDest,FloatRegister second,bool handleNaN,bool isMax)1490 void MacroAssemblerMIPSShared::minMaxFloat32(FloatRegister srcDest,
1491 FloatRegister second,
1492 bool handleNaN, bool isMax) {
1493 FloatRegister first = srcDest;
1494
1495 Assembler::DoubleCondition cond = isMax ? Assembler::DoubleLessThanOrEqual
1496 : Assembler::DoubleGreaterThanOrEqual;
1497 Label nan, equal, done;
1498 FloatTestKind moveCondition;
1499
1500 // First or second is NaN, result is NaN.
1501 ma_bc1s(first, second, &nan, Assembler::DoubleUnordered, ShortJump);
1502 #ifdef MIPSR6
1503 if (isMax) {
1504 as_max(SingleFloat, srcDest, first, second);
1505 } else {
1506 as_min(SingleFloat, srcDest, first, second);
1507 }
1508 #else
1509 // Make sure we handle -0 and 0 right.
1510 ma_bc1s(first, second, &equal, Assembler::DoubleEqual, ShortJump);
1511 compareFloatingPoint(SingleFloat, first, second, cond, &moveCondition);
1512 MOZ_ASSERT(TestForTrue == moveCondition);
1513 as_movt(SingleFloat, first, second);
1514 ma_b(&done, ShortJump);
1515
1516 // Check for zero.
1517 bind(&equal);
1518 asMasm().loadConstantFloat32(0.0f, ScratchFloat32Reg);
1519 compareFloatingPoint(SingleFloat, first, ScratchFloat32Reg,
1520 Assembler::DoubleEqual, &moveCondition);
1521
1522 // So now both operands are either -0 or 0.
1523 if (isMax) {
1524 // -0 + -0 = -0 and -0 + 0 = 0.
1525 as_adds(ScratchFloat32Reg, first, second);
1526 } else {
1527 as_negs(ScratchFloat32Reg, first);
1528 as_subs(ScratchFloat32Reg, ScratchFloat32Reg, second);
1529 as_negs(ScratchFloat32Reg, ScratchFloat32Reg);
1530 }
1531 MOZ_ASSERT(TestForTrue == moveCondition);
1532 // First is 0 or -0, move max/min to it, else just return it.
1533 as_movt(SingleFloat, first, ScratchFloat32Reg);
1534 #endif
1535 ma_b(&done, ShortJump);
1536
1537 bind(&nan);
1538 asMasm().loadConstantFloat32(JS::GenericNaN(), srcDest);
1539
1540 bind(&done);
1541 }
1542
loadDouble(const Address & address,FloatRegister dest)1543 void MacroAssemblerMIPSShared::loadDouble(const Address& address,
1544 FloatRegister dest) {
1545 asMasm().ma_ld(dest, address);
1546 }
1547
loadDouble(const BaseIndex & src,FloatRegister dest)1548 void MacroAssemblerMIPSShared::loadDouble(const BaseIndex& src,
1549 FloatRegister dest) {
1550 asMasm().ma_ld(dest, src);
1551 }
1552
loadFloatAsDouble(const Address & address,FloatRegister dest)1553 void MacroAssemblerMIPSShared::loadFloatAsDouble(const Address& address,
1554 FloatRegister dest) {
1555 asMasm().ma_ls(dest, address);
1556 as_cvtds(dest, dest);
1557 }
1558
loadFloatAsDouble(const BaseIndex & src,FloatRegister dest)1559 void MacroAssemblerMIPSShared::loadFloatAsDouble(const BaseIndex& src,
1560 FloatRegister dest) {
1561 asMasm().loadFloat32(src, dest);
1562 as_cvtds(dest, dest);
1563 }
1564
loadFloat32(const Address & address,FloatRegister dest)1565 void MacroAssemblerMIPSShared::loadFloat32(const Address& address,
1566 FloatRegister dest) {
1567 asMasm().ma_ls(dest, address);
1568 }
1569
loadFloat32(const BaseIndex & src,FloatRegister dest)1570 void MacroAssemblerMIPSShared::loadFloat32(const BaseIndex& src,
1571 FloatRegister dest) {
1572 asMasm().ma_ls(dest, src);
1573 }
1574
ma_call(ImmPtr dest)1575 void MacroAssemblerMIPSShared::ma_call(ImmPtr dest) {
1576 asMasm().ma_liPatchable(CallReg, dest);
1577 as_jalr(CallReg);
1578 as_nop();
1579 }
1580
ma_jump(ImmPtr dest)1581 void MacroAssemblerMIPSShared::ma_jump(ImmPtr dest) {
1582 asMasm().ma_liPatchable(ScratchRegister, dest);
1583 as_jr(ScratchRegister);
1584 as_nop();
1585 }
1586
asMasm()1587 MacroAssembler& MacroAssemblerMIPSShared::asMasm() {
1588 return *static_cast<MacroAssembler*>(this);
1589 }
1590
asMasm() const1591 const MacroAssembler& MacroAssemblerMIPSShared::asMasm() const {
1592 return *static_cast<const MacroAssembler*>(this);
1593 }
1594
1595 //{{{ check_macroassembler_style
1596 // ===============================================================
1597 // MacroAssembler high-level usage.
1598
flush()1599 void MacroAssembler::flush() {}
1600
1601 // ===============================================================
1602 // Stack manipulation functions.
1603
Push(Register reg)1604 void MacroAssembler::Push(Register reg) {
1605 ma_push(reg);
1606 adjustFrame(int32_t(sizeof(intptr_t)));
1607 }
1608
Push(const Imm32 imm)1609 void MacroAssembler::Push(const Imm32 imm) {
1610 ma_li(ScratchRegister, imm);
1611 ma_push(ScratchRegister);
1612 adjustFrame(int32_t(sizeof(intptr_t)));
1613 }
1614
Push(const ImmWord imm)1615 void MacroAssembler::Push(const ImmWord imm) {
1616 ma_li(ScratchRegister, imm);
1617 ma_push(ScratchRegister);
1618 adjustFrame(int32_t(sizeof(intptr_t)));
1619 }
1620
Push(const ImmPtr imm)1621 void MacroAssembler::Push(const ImmPtr imm) {
1622 Push(ImmWord(uintptr_t(imm.value)));
1623 }
1624
Push(const ImmGCPtr ptr)1625 void MacroAssembler::Push(const ImmGCPtr ptr) {
1626 ma_li(ScratchRegister, ptr);
1627 ma_push(ScratchRegister);
1628 adjustFrame(int32_t(sizeof(intptr_t)));
1629 }
1630
Push(FloatRegister f)1631 void MacroAssembler::Push(FloatRegister f) {
1632 ma_push(f);
1633 adjustFrame(int32_t(f.pushSize()));
1634 }
1635
Pop(Register reg)1636 void MacroAssembler::Pop(Register reg) {
1637 ma_pop(reg);
1638 adjustFrame(-int32_t(sizeof(intptr_t)));
1639 }
1640
Pop(FloatRegister f)1641 void MacroAssembler::Pop(FloatRegister f) {
1642 ma_pop(f);
1643 adjustFrame(-int32_t(f.pushSize()));
1644 }
1645
Pop(const ValueOperand & val)1646 void MacroAssembler::Pop(const ValueOperand& val) {
1647 popValue(val);
1648 adjustFrame(-int32_t(sizeof(Value)));
1649 }
1650
PopStackPtr()1651 void MacroAssembler::PopStackPtr() {
1652 loadPtr(Address(StackPointer, 0), StackPointer);
1653 adjustFrame(-int32_t(sizeof(intptr_t)));
1654 }
1655
1656 // ===============================================================
1657 // Simple call functions.
1658
call(Register reg)1659 CodeOffset MacroAssembler::call(Register reg) {
1660 as_jalr(reg);
1661 as_nop();
1662 return CodeOffset(currentOffset());
1663 }
1664
call(Label * label)1665 CodeOffset MacroAssembler::call(Label* label) {
1666 ma_bal(label);
1667 return CodeOffset(currentOffset());
1668 }
1669
callWithPatch()1670 CodeOffset MacroAssembler::callWithPatch() {
1671 as_bal(BOffImm16(3 * sizeof(uint32_t)));
1672 addPtr(Imm32(5 * sizeof(uint32_t)), ra);
1673 // Allocate space which will be patched by patchCall().
1674 spew(".space 32bit initValue 0xffff ffff");
1675 writeInst(UINT32_MAX);
1676 as_lw(ScratchRegister, ra, -(int32_t)(5 * sizeof(uint32_t)));
1677 addPtr(ra, ScratchRegister);
1678 as_jr(ScratchRegister);
1679 as_nop();
1680 return CodeOffset(currentOffset());
1681 }
1682
patchCall(uint32_t callerOffset,uint32_t calleeOffset)1683 void MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset) {
1684 BufferOffset call(callerOffset - 7 * sizeof(uint32_t));
1685
1686 BOffImm16 offset = BufferOffset(calleeOffset).diffB<BOffImm16>(call);
1687 if (!offset.isInvalid()) {
1688 InstImm* bal = (InstImm*)editSrc(call);
1689 bal->setBOffImm16(offset);
1690 } else {
1691 uint32_t u32Offset = callerOffset - 5 * sizeof(uint32_t);
1692 uint32_t* u32 =
1693 reinterpret_cast<uint32_t*>(editSrc(BufferOffset(u32Offset)));
1694 *u32 = calleeOffset - callerOffset;
1695 }
1696 }
1697
farJumpWithPatch()1698 CodeOffset MacroAssembler::farJumpWithPatch() {
1699 ma_move(SecondScratchReg, ra);
1700 as_bal(BOffImm16(3 * sizeof(uint32_t)));
1701 as_lw(ScratchRegister, ra, 0);
1702 // Allocate space which will be patched by patchFarJump().
1703 CodeOffset farJump(currentOffset());
1704 spew(".space 32bit initValue 0xffff ffff");
1705 writeInst(UINT32_MAX);
1706 addPtr(ra, ScratchRegister);
1707 as_jr(ScratchRegister);
1708 ma_move(ra, SecondScratchReg);
1709 return farJump;
1710 }
1711
patchFarJump(CodeOffset farJump,uint32_t targetOffset)1712 void MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset) {
1713 uint32_t* u32 =
1714 reinterpret_cast<uint32_t*>(editSrc(BufferOffset(farJump.offset())));
1715 MOZ_ASSERT(*u32 == UINT32_MAX);
1716 *u32 = targetOffset - farJump.offset();
1717 }
1718
call(wasm::SymbolicAddress target)1719 CodeOffset MacroAssembler::call(wasm::SymbolicAddress target) {
1720 movePtr(target, CallReg);
1721 return call(CallReg);
1722 }
1723
call(const Address & addr)1724 void MacroAssembler::call(const Address& addr) {
1725 loadPtr(addr, CallReg);
1726 call(CallReg);
1727 }
1728
call(ImmWord target)1729 void MacroAssembler::call(ImmWord target) { call(ImmPtr((void*)target.value)); }
1730
call(ImmPtr target)1731 void MacroAssembler::call(ImmPtr target) {
1732 BufferOffset bo = m_buffer.nextOffset();
1733 addPendingJump(bo, target, RelocationKind::HARDCODED);
1734 ma_call(target);
1735 }
1736
call(JitCode * c)1737 void MacroAssembler::call(JitCode* c) {
1738 BufferOffset bo = m_buffer.nextOffset();
1739 addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
1740 ma_liPatchable(ScratchRegister, ImmPtr(c->raw()));
1741 callJitNoProfiler(ScratchRegister);
1742 }
1743
nopPatchableToCall()1744 CodeOffset MacroAssembler::nopPatchableToCall() {
1745 // MIPS32 //MIPS64
1746 as_nop(); // lui // lui
1747 as_nop(); // ori // ori
1748 as_nop(); // jalr // drotr32
1749 as_nop(); // ori
1750 #ifdef JS_CODEGEN_MIPS64
1751 as_nop(); // jalr
1752 as_nop();
1753 #endif
1754 return CodeOffset(currentOffset());
1755 }
1756
patchNopToCall(uint8_t * call,uint8_t * target)1757 void MacroAssembler::patchNopToCall(uint8_t* call, uint8_t* target) {
1758 #ifdef JS_CODEGEN_MIPS64
1759 Instruction* inst = (Instruction*)call - 6 /* six nops */;
1760 Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)target);
1761 inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
1762 #else
1763 Instruction* inst = (Instruction*)call - 4 /* four nops */;
1764 Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister,
1765 (uint32_t)target);
1766 inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
1767 #endif
1768 }
1769
patchCallToNop(uint8_t * call)1770 void MacroAssembler::patchCallToNop(uint8_t* call) {
1771 #ifdef JS_CODEGEN_MIPS64
1772 Instruction* inst = (Instruction*)call - 6 /* six nops */;
1773 #else
1774 Instruction* inst = (Instruction*)call - 4 /* four nops */;
1775 #endif
1776
1777 inst[0].makeNop();
1778 inst[1].makeNop();
1779 inst[2].makeNop();
1780 inst[3].makeNop();
1781 #ifdef JS_CODEGEN_MIPS64
1782 inst[4].makeNop();
1783 inst[5].makeNop();
1784 #endif
1785 }
1786
pushReturnAddress()1787 void MacroAssembler::pushReturnAddress() { push(ra); }
1788
popReturnAddress()1789 void MacroAssembler::popReturnAddress() { pop(ra); }
1790
1791 // ===============================================================
1792 // Jit Frames.
1793
pushFakeReturnAddress(Register scratch)1794 uint32_t MacroAssembler::pushFakeReturnAddress(Register scratch) {
1795 CodeLabel cl;
1796
1797 ma_li(scratch, &cl);
1798 Push(scratch);
1799 bind(&cl);
1800 uint32_t retAddr = currentOffset();
1801
1802 addCodeLabel(cl);
1803 return retAddr;
1804 }
1805
loadStoreBuffer(Register ptr,Register buffer)1806 void MacroAssembler::loadStoreBuffer(Register ptr, Register buffer) {
1807 if (ptr != buffer) {
1808 movePtr(ptr, buffer);
1809 }
1810 orPtr(Imm32(gc::ChunkMask), buffer);
1811 loadPtr(Address(buffer, gc::ChunkStoreBufferOffsetFromLastByte), buffer);
1812 }
1813
branchPtrInNurseryChunk(Condition cond,Register ptr,Register temp,Label * label)1814 void MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr,
1815 Register temp, Label* label) {
1816 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
1817 MOZ_ASSERT(ptr != temp);
1818 MOZ_ASSERT(ptr != SecondScratchReg);
1819
1820 movePtr(ptr, SecondScratchReg);
1821 orPtr(Imm32(gc::ChunkMask), SecondScratchReg);
1822 branchPtr(InvertCondition(cond),
1823 Address(SecondScratchReg, gc::ChunkStoreBufferOffsetFromLastByte),
1824 ImmWord(0), label);
1825 }
1826
comment(const char * msg)1827 void MacroAssembler::comment(const char* msg) { Assembler::comment(msg); }
1828
1829 // ===============================================================
1830 // WebAssembly
1831
wasmTrapInstruction()1832 CodeOffset MacroAssembler::wasmTrapInstruction() {
1833 CodeOffset offset(currentOffset());
1834 as_teq(zero, zero, WASM_TRAP);
1835 return offset;
1836 }
1837
wasmTruncateDoubleToInt32(FloatRegister input,Register output,bool isSaturating,Label * oolEntry)1838 void MacroAssembler::wasmTruncateDoubleToInt32(FloatRegister input,
1839 Register output,
1840 bool isSaturating,
1841 Label* oolEntry) {
1842 as_truncwd(ScratchFloat32Reg, input);
1843 as_cfc1(ScratchRegister, Assembler::FCSR);
1844 moveFromFloat32(ScratchFloat32Reg, output);
1845 ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
1846 ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
1847 }
1848
wasmTruncateFloat32ToInt32(FloatRegister input,Register output,bool isSaturating,Label * oolEntry)1849 void MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input,
1850 Register output,
1851 bool isSaturating,
1852 Label* oolEntry) {
1853 as_truncws(ScratchFloat32Reg, input);
1854 as_cfc1(ScratchRegister, Assembler::FCSR);
1855 moveFromFloat32(ScratchFloat32Reg, output);
1856 ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
1857 ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
1858 }
1859
oolWasmTruncateCheckF32ToI32(FloatRegister input,Register output,TruncFlags flags,wasm::BytecodeOffset off,Label * rejoin)1860 void MacroAssembler::oolWasmTruncateCheckF32ToI32(FloatRegister input,
1861 Register output,
1862 TruncFlags flags,
1863 wasm::BytecodeOffset off,
1864 Label* rejoin) {
1865 outOfLineWasmTruncateToInt32Check(input, output, MIRType::Float32, flags,
1866 rejoin, off);
1867 }
1868
oolWasmTruncateCheckF64ToI32(FloatRegister input,Register output,TruncFlags flags,wasm::BytecodeOffset off,Label * rejoin)1869 void MacroAssembler::oolWasmTruncateCheckF64ToI32(FloatRegister input,
1870 Register output,
1871 TruncFlags flags,
1872 wasm::BytecodeOffset off,
1873 Label* rejoin) {
1874 outOfLineWasmTruncateToInt32Check(input, output, MIRType::Double, flags,
1875 rejoin, off);
1876 }
1877
oolWasmTruncateCheckF32ToI64(FloatRegister input,Register64 output,TruncFlags flags,wasm::BytecodeOffset off,Label * rejoin)1878 void MacroAssembler::oolWasmTruncateCheckF32ToI64(FloatRegister input,
1879 Register64 output,
1880 TruncFlags flags,
1881 wasm::BytecodeOffset off,
1882 Label* rejoin) {
1883 outOfLineWasmTruncateToInt64Check(input, output, MIRType::Float32, flags,
1884 rejoin, off);
1885 }
1886
oolWasmTruncateCheckF64ToI64(FloatRegister input,Register64 output,TruncFlags flags,wasm::BytecodeOffset off,Label * rejoin)1887 void MacroAssembler::oolWasmTruncateCheckF64ToI64(FloatRegister input,
1888 Register64 output,
1889 TruncFlags flags,
1890 wasm::BytecodeOffset off,
1891 Label* rejoin) {
1892 outOfLineWasmTruncateToInt64Check(input, output, MIRType::Double, flags,
1893 rejoin, off);
1894 }
1895
outOfLineWasmTruncateToInt32Check(FloatRegister input,Register output,MIRType fromType,TruncFlags flags,Label * rejoin,wasm::BytecodeOffset trapOffset)1896 void MacroAssemblerMIPSShared::outOfLineWasmTruncateToInt32Check(
1897 FloatRegister input, Register output, MIRType fromType, TruncFlags flags,
1898 Label* rejoin, wasm::BytecodeOffset trapOffset) {
1899 bool isUnsigned = flags & TRUNC_UNSIGNED;
1900 bool isSaturating = flags & TRUNC_SATURATING;
1901
1902 if (isSaturating) {
1903 if (fromType == MIRType::Double) {
1904 asMasm().loadConstantDouble(0.0, ScratchDoubleReg);
1905 } else {
1906 asMasm().loadConstantFloat32(0.0f, ScratchFloat32Reg);
1907 }
1908
1909 if (isUnsigned) {
1910 ma_li(output, Imm32(UINT32_MAX));
1911
1912 FloatTestKind moveCondition;
1913 compareFloatingPoint(
1914 fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
1915 fromType == MIRType::Double ? ScratchDoubleReg : ScratchFloat32Reg,
1916 Assembler::DoubleLessThanOrUnordered, &moveCondition);
1917 MOZ_ASSERT(moveCondition == TestForTrue);
1918
1919 as_movt(output, zero);
1920 } else {
1921 // Positive overflow is already saturated to INT32_MAX, so we only have
1922 // to handle NaN and negative overflow here.
1923
1924 FloatTestKind moveCondition;
1925 compareFloatingPoint(
1926 fromType == MIRType::Double ? DoubleFloat : SingleFloat, input, input,
1927 Assembler::DoubleUnordered, &moveCondition);
1928 MOZ_ASSERT(moveCondition == TestForTrue);
1929
1930 as_movt(output, zero);
1931
1932 compareFloatingPoint(
1933 fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
1934 fromType == MIRType::Double ? ScratchDoubleReg : ScratchFloat32Reg,
1935 Assembler::DoubleLessThan, &moveCondition);
1936 MOZ_ASSERT(moveCondition == TestForTrue);
1937
1938 ma_li(ScratchRegister, Imm32(INT32_MIN));
1939 as_movt(output, ScratchRegister);
1940 }
1941
1942 MOZ_ASSERT(rejoin->bound());
1943 asMasm().jump(rejoin);
1944 return;
1945 }
1946
1947 Label inputIsNaN;
1948
1949 if (fromType == MIRType::Double) {
1950 asMasm().branchDouble(Assembler::DoubleUnordered, input, input,
1951 &inputIsNaN);
1952 } else if (fromType == MIRType::Float32) {
1953 asMasm().branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN);
1954 }
1955
1956 asMasm().wasmTrap(wasm::Trap::IntegerOverflow, trapOffset);
1957 asMasm().bind(&inputIsNaN);
1958 asMasm().wasmTrap(wasm::Trap::InvalidConversionToInteger, trapOffset);
1959 }
1960
outOfLineWasmTruncateToInt64Check(FloatRegister input,Register64 output_,MIRType fromType,TruncFlags flags,Label * rejoin,wasm::BytecodeOffset trapOffset)1961 void MacroAssemblerMIPSShared::outOfLineWasmTruncateToInt64Check(
1962 FloatRegister input, Register64 output_, MIRType fromType, TruncFlags flags,
1963 Label* rejoin, wasm::BytecodeOffset trapOffset) {
1964 bool isUnsigned = flags & TRUNC_UNSIGNED;
1965 bool isSaturating = flags & TRUNC_SATURATING;
1966
1967 if (isSaturating) {
1968 #if defined(JS_CODEGEN_MIPS32)
1969 // Saturating callouts don't use ool path.
1970 return;
1971 #else
1972 Register output = output_.reg;
1973
1974 if (fromType == MIRType::Double) {
1975 asMasm().loadConstantDouble(0.0, ScratchDoubleReg);
1976 } else {
1977 asMasm().loadConstantFloat32(0.0f, ScratchFloat32Reg);
1978 }
1979
1980 if (isUnsigned) {
1981 asMasm().ma_li(output, ImmWord(UINT64_MAX));
1982
1983 FloatTestKind moveCondition;
1984 compareFloatingPoint(
1985 fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
1986 fromType == MIRType::Double ? ScratchDoubleReg : ScratchFloat32Reg,
1987 Assembler::DoubleLessThanOrUnordered, &moveCondition);
1988 MOZ_ASSERT(moveCondition == TestForTrue);
1989
1990 as_movt(output, zero);
1991
1992 } else {
1993 // Positive overflow is already saturated to INT64_MAX, so we only have
1994 // to handle NaN and negative overflow here.
1995
1996 FloatTestKind moveCondition;
1997 compareFloatingPoint(
1998 fromType == MIRType::Double ? DoubleFloat : SingleFloat, input, input,
1999 Assembler::DoubleUnordered, &moveCondition);
2000 MOZ_ASSERT(moveCondition == TestForTrue);
2001
2002 as_movt(output, zero);
2003
2004 compareFloatingPoint(
2005 fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
2006 fromType == MIRType::Double ? ScratchDoubleReg : ScratchFloat32Reg,
2007 Assembler::DoubleLessThan, &moveCondition);
2008 MOZ_ASSERT(moveCondition == TestForTrue);
2009
2010 asMasm().ma_li(ScratchRegister, ImmWord(INT64_MIN));
2011 as_movt(output, ScratchRegister);
2012 }
2013
2014 MOZ_ASSERT(rejoin->bound());
2015 asMasm().jump(rejoin);
2016 return;
2017 #endif
2018 }
2019
2020 Label inputIsNaN;
2021
2022 if (fromType == MIRType::Double) {
2023 asMasm().branchDouble(Assembler::DoubleUnordered, input, input,
2024 &inputIsNaN);
2025 } else if (fromType == MIRType::Float32) {
2026 asMasm().branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN);
2027 }
2028
2029 #if defined(JS_CODEGEN_MIPS32)
2030
2031 // Only possible valid input that produces INT64_MIN result.
2032 double validInput =
2033 isUnsigned ? double(uint64_t(INT64_MIN)) : double(int64_t(INT64_MIN));
2034
2035 if (fromType == MIRType::Double) {
2036 asMasm().loadConstantDouble(validInput, ScratchDoubleReg);
2037 asMasm().branchDouble(Assembler::DoubleEqual, input, ScratchDoubleReg,
2038 rejoin);
2039 } else {
2040 asMasm().loadConstantFloat32(float(validInput), ScratchFloat32Reg);
2041 asMasm().branchFloat(Assembler::DoubleEqual, input, ScratchDoubleReg,
2042 rejoin);
2043 }
2044
2045 #endif
2046
2047 asMasm().wasmTrap(wasm::Trap::IntegerOverflow, trapOffset);
2048 asMasm().bind(&inputIsNaN);
2049 asMasm().wasmTrap(wasm::Trap::InvalidConversionToInteger, trapOffset);
2050 }
2051
wasmLoad(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,AnyRegister output)2052 void MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access,
2053 Register memoryBase, Register ptr,
2054 Register ptrScratch, AnyRegister output) {
2055 wasmLoadImpl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
2056 }
2057
wasmUnalignedLoad(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,Register output,Register tmp)2058 void MacroAssembler::wasmUnalignedLoad(const wasm::MemoryAccessDesc& access,
2059 Register memoryBase, Register ptr,
2060 Register ptrScratch, Register output,
2061 Register tmp) {
2062 wasmLoadImpl(access, memoryBase, ptr, ptrScratch, AnyRegister(output), tmp);
2063 }
2064
wasmUnalignedLoadFP(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,FloatRegister output,Register tmp1,Register tmp2,Register tmp3)2065 void MacroAssembler::wasmUnalignedLoadFP(const wasm::MemoryAccessDesc& access,
2066 Register memoryBase, Register ptr,
2067 Register ptrScratch,
2068 FloatRegister output, Register tmp1,
2069 Register tmp2, Register tmp3) {
2070 MOZ_ASSERT(tmp2 == InvalidReg);
2071 MOZ_ASSERT(tmp3 == InvalidReg);
2072 wasmLoadImpl(access, memoryBase, ptr, ptrScratch, AnyRegister(output), tmp1);
2073 }
2074
wasmStore(const wasm::MemoryAccessDesc & access,AnyRegister value,Register memoryBase,Register ptr,Register ptrScratch)2075 void MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access,
2076 AnyRegister value, Register memoryBase,
2077 Register ptr, Register ptrScratch) {
2078 wasmStoreImpl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
2079 }
2080
wasmUnalignedStore(const wasm::MemoryAccessDesc & access,Register value,Register memoryBase,Register ptr,Register ptrScratch,Register tmp)2081 void MacroAssembler::wasmUnalignedStore(const wasm::MemoryAccessDesc& access,
2082 Register value, Register memoryBase,
2083 Register ptr, Register ptrScratch,
2084 Register tmp) {
2085 wasmStoreImpl(access, AnyRegister(value), memoryBase, ptr, ptrScratch, tmp);
2086 }
2087
wasmUnalignedStoreFP(const wasm::MemoryAccessDesc & access,FloatRegister floatValue,Register memoryBase,Register ptr,Register ptrScratch,Register tmp)2088 void MacroAssembler::wasmUnalignedStoreFP(const wasm::MemoryAccessDesc& access,
2089 FloatRegister floatValue,
2090 Register memoryBase, Register ptr,
2091 Register ptrScratch, Register tmp) {
2092 wasmStoreImpl(access, AnyRegister(floatValue), memoryBase, ptr, ptrScratch,
2093 tmp);
2094 }
2095
wasmLoadImpl(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,AnyRegister output,Register tmp)2096 void MacroAssemblerMIPSShared::wasmLoadImpl(
2097 const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
2098 Register ptrScratch, AnyRegister output, Register tmp) {
2099 uint32_t offset = access.offset();
2100 MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
2101 MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
2102
2103 // Maybe add the offset.
2104 if (offset) {
2105 asMasm().addPtr(Imm32(offset), ptrScratch);
2106 ptr = ptrScratch;
2107 }
2108
2109 unsigned byteSize = access.byteSize();
2110 bool isSigned;
2111 bool isFloat = false;
2112
2113 MOZ_ASSERT(!access.isZeroExtendSimd128Load());
2114 MOZ_ASSERT(!access.isSplatSimd128Load());
2115 MOZ_ASSERT(!access.isWidenSimd128Load());
2116 switch (access.type()) {
2117 case Scalar::Int8:
2118 isSigned = true;
2119 break;
2120 case Scalar::Uint8:
2121 isSigned = false;
2122 break;
2123 case Scalar::Int16:
2124 isSigned = true;
2125 break;
2126 case Scalar::Uint16:
2127 isSigned = false;
2128 break;
2129 case Scalar::Int32:
2130 isSigned = true;
2131 break;
2132 case Scalar::Uint32:
2133 isSigned = false;
2134 break;
2135 case Scalar::Float64:
2136 isFloat = true;
2137 break;
2138 case Scalar::Float32:
2139 isFloat = true;
2140 break;
2141 default:
2142 MOZ_CRASH("unexpected array type");
2143 }
2144
2145 BaseIndex address(memoryBase, ptr, TimesOne);
2146 if (IsUnaligned(access)) {
2147 MOZ_ASSERT(tmp != InvalidReg);
2148 if (isFloat) {
2149 if (byteSize == 4) {
2150 asMasm().loadUnalignedFloat32(access, address, tmp, output.fpu());
2151 } else {
2152 asMasm().loadUnalignedDouble(access, address, tmp, output.fpu());
2153 }
2154 } else {
2155 asMasm().ma_load_unaligned(access, output.gpr(), address, tmp,
2156 static_cast<LoadStoreSize>(8 * byteSize),
2157 isSigned ? SignExtend : ZeroExtend);
2158 }
2159 return;
2160 }
2161
2162 asMasm().memoryBarrierBefore(access.sync());
2163 if (isFloat) {
2164 if (byteSize == 4) {
2165 asMasm().ma_ls(output.fpu(), address);
2166 } else {
2167 asMasm().ma_ld(output.fpu(), address);
2168 }
2169 } else {
2170 asMasm().ma_load(output.gpr(), address,
2171 static_cast<LoadStoreSize>(8 * byteSize),
2172 isSigned ? SignExtend : ZeroExtend);
2173 }
2174 asMasm().append(access, asMasm().size() - 4);
2175 asMasm().memoryBarrierAfter(access.sync());
2176 }
2177
wasmStoreImpl(const wasm::MemoryAccessDesc & access,AnyRegister value,Register memoryBase,Register ptr,Register ptrScratch,Register tmp)2178 void MacroAssemblerMIPSShared::wasmStoreImpl(
2179 const wasm::MemoryAccessDesc& access, AnyRegister value,
2180 Register memoryBase, Register ptr, Register ptrScratch, Register tmp) {
2181 uint32_t offset = access.offset();
2182 MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
2183 MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
2184
2185 // Maybe add the offset.
2186 if (offset) {
2187 asMasm().addPtr(Imm32(offset), ptrScratch);
2188 ptr = ptrScratch;
2189 }
2190
2191 unsigned byteSize = access.byteSize();
2192 bool isSigned;
2193 bool isFloat = false;
2194
2195 switch (access.type()) {
2196 case Scalar::Int8:
2197 isSigned = true;
2198 break;
2199 case Scalar::Uint8:
2200 isSigned = false;
2201 break;
2202 case Scalar::Int16:
2203 isSigned = true;
2204 break;
2205 case Scalar::Uint16:
2206 isSigned = false;
2207 break;
2208 case Scalar::Int32:
2209 isSigned = true;
2210 break;
2211 case Scalar::Uint32:
2212 isSigned = false;
2213 break;
2214 case Scalar::Int64:
2215 isSigned = true;
2216 break;
2217 case Scalar::Float64:
2218 isFloat = true;
2219 break;
2220 case Scalar::Float32:
2221 isFloat = true;
2222 break;
2223 default:
2224 MOZ_CRASH("unexpected array type");
2225 }
2226
2227 BaseIndex address(memoryBase, ptr, TimesOne);
2228 if (IsUnaligned(access)) {
2229 MOZ_ASSERT(tmp != InvalidReg);
2230 if (isFloat) {
2231 if (byteSize == 4) {
2232 asMasm().storeUnalignedFloat32(access, value.fpu(), tmp, address);
2233 } else {
2234 asMasm().storeUnalignedDouble(access, value.fpu(), tmp, address);
2235 }
2236 } else {
2237 asMasm().ma_store_unaligned(access, value.gpr(), address, tmp,
2238 static_cast<LoadStoreSize>(8 * byteSize),
2239 isSigned ? SignExtend : ZeroExtend);
2240 }
2241 return;
2242 }
2243
2244 asMasm().memoryBarrierBefore(access.sync());
2245 if (isFloat) {
2246 if (byteSize == 4) {
2247 asMasm().ma_ss(value.fpu(), address);
2248 } else {
2249 asMasm().ma_sd(value.fpu(), address);
2250 }
2251 } else {
2252 asMasm().ma_store(value.gpr(), address,
2253 static_cast<LoadStoreSize>(8 * byteSize),
2254 isSigned ? SignExtend : ZeroExtend);
2255 }
2256 // Only the last emitted instruction is a memory access.
2257 asMasm().append(access, asMasm().size() - 4);
2258 asMasm().memoryBarrierAfter(access.sync());
2259 }
2260
enterFakeExitFrameForWasm(Register cxreg,Register scratch,ExitFrameType type)2261 void MacroAssembler::enterFakeExitFrameForWasm(Register cxreg, Register scratch,
2262 ExitFrameType type) {
2263 enterFakeExitFrame(cxreg, scratch, type);
2264 }
2265
2266 // ========================================================================
2267 // Primitive atomic operations.
2268
2269 template <typename T>
CompareExchange(MacroAssembler & masm,const wasm::MemoryAccessDesc * access,Scalar::Type type,const Synchronization & sync,const T & mem,Register oldval,Register newval,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)2270 static void CompareExchange(MacroAssembler& masm,
2271 const wasm::MemoryAccessDesc* access,
2272 Scalar::Type type, const Synchronization& sync,
2273 const T& mem, Register oldval, Register newval,
2274 Register valueTemp, Register offsetTemp,
2275 Register maskTemp, Register output) {
2276 bool signExtend = Scalar::isSignedIntType(type);
2277 unsigned nbytes = Scalar::byteSize(type);
2278
2279 switch (nbytes) {
2280 case 1:
2281 case 2:
2282 break;
2283 case 4:
2284 MOZ_ASSERT(valueTemp == InvalidReg);
2285 MOZ_ASSERT(offsetTemp == InvalidReg);
2286 MOZ_ASSERT(maskTemp == InvalidReg);
2287 break;
2288 default:
2289 MOZ_CRASH();
2290 }
2291
2292 Label again, end;
2293
2294 masm.computeEffectiveAddress(mem, SecondScratchReg);
2295
2296 if (nbytes == 4) {
2297 masm.memoryBarrierBefore(sync);
2298 masm.bind(&again);
2299
2300 if (access) {
2301 masm.append(*access, masm.size());
2302 }
2303
2304 masm.as_ll(output, SecondScratchReg, 0);
2305 masm.ma_b(output, oldval, &end, Assembler::NotEqual, ShortJump);
2306 masm.ma_move(ScratchRegister, newval);
2307 masm.as_sc(ScratchRegister, SecondScratchReg, 0);
2308 masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
2309 ShortJump);
2310
2311 masm.memoryBarrierAfter(sync);
2312 masm.bind(&end);
2313
2314 return;
2315 }
2316
2317 masm.as_andi(offsetTemp, SecondScratchReg, 3);
2318 masm.subPtr(offsetTemp, SecondScratchReg);
2319 #if !MOZ_LITTLE_ENDIAN()
2320 masm.as_xori(offsetTemp, offsetTemp, 3);
2321 #endif
2322 masm.as_sll(offsetTemp, offsetTemp, 3);
2323 masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
2324 masm.as_sllv(maskTemp, maskTemp, offsetTemp);
2325 masm.as_nor(maskTemp, zero, maskTemp);
2326
2327 masm.memoryBarrierBefore(sync);
2328
2329 masm.bind(&again);
2330
2331 if (access) {
2332 masm.append(*access, masm.size());
2333 }
2334
2335 masm.as_ll(ScratchRegister, SecondScratchReg, 0);
2336
2337 masm.as_srlv(output, ScratchRegister, offsetTemp);
2338
2339 switch (nbytes) {
2340 case 1:
2341 if (signExtend) {
2342 masm.ma_seb(valueTemp, oldval);
2343 masm.ma_seb(output, output);
2344 } else {
2345 masm.as_andi(valueTemp, oldval, 0xff);
2346 masm.as_andi(output, output, 0xff);
2347 }
2348 break;
2349 case 2:
2350 if (signExtend) {
2351 masm.ma_seh(valueTemp, oldval);
2352 masm.ma_seh(output, output);
2353 } else {
2354 masm.as_andi(valueTemp, oldval, 0xffff);
2355 masm.as_andi(output, output, 0xffff);
2356 }
2357 break;
2358 }
2359
2360 masm.ma_b(output, valueTemp, &end, Assembler::NotEqual, ShortJump);
2361
2362 masm.as_sllv(valueTemp, newval, offsetTemp);
2363 masm.as_and(ScratchRegister, ScratchRegister, maskTemp);
2364 masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
2365
2366 masm.as_sc(ScratchRegister, SecondScratchReg, 0);
2367
2368 masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
2369 ShortJump);
2370
2371 masm.memoryBarrierAfter(sync);
2372
2373 masm.bind(&end);
2374 }
2375
compareExchange(Scalar::Type type,const Synchronization & sync,const Address & mem,Register oldval,Register newval,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)2376 void MacroAssembler::compareExchange(Scalar::Type type,
2377 const Synchronization& sync,
2378 const Address& mem, Register oldval,
2379 Register newval, Register valueTemp,
2380 Register offsetTemp, Register maskTemp,
2381 Register output) {
2382 CompareExchange(*this, nullptr, type, sync, mem, oldval, newval, valueTemp,
2383 offsetTemp, maskTemp, output);
2384 }
2385
compareExchange(Scalar::Type type,const Synchronization & sync,const BaseIndex & mem,Register oldval,Register newval,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)2386 void MacroAssembler::compareExchange(Scalar::Type type,
2387 const Synchronization& sync,
2388 const BaseIndex& mem, Register oldval,
2389 Register newval, Register valueTemp,
2390 Register offsetTemp, Register maskTemp,
2391 Register output) {
2392 CompareExchange(*this, nullptr, type, sync, mem, oldval, newval, valueTemp,
2393 offsetTemp, maskTemp, output);
2394 }
2395
wasmCompareExchange(const wasm::MemoryAccessDesc & access,const Address & mem,Register oldval,Register newval,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)2396 void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
2397 const Address& mem, Register oldval,
2398 Register newval, Register valueTemp,
2399 Register offsetTemp, Register maskTemp,
2400 Register output) {
2401 CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
2402 newval, valueTemp, offsetTemp, maskTemp, output);
2403 }
2404
wasmCompareExchange(const wasm::MemoryAccessDesc & access,const BaseIndex & mem,Register oldval,Register newval,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)2405 void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
2406 const BaseIndex& mem, Register oldval,
2407 Register newval, Register valueTemp,
2408 Register offsetTemp, Register maskTemp,
2409 Register output) {
2410 CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
2411 newval, valueTemp, offsetTemp, maskTemp, output);
2412 }
2413
2414 template <typename T>
AtomicExchange(MacroAssembler & masm,const wasm::MemoryAccessDesc * access,Scalar::Type type,const Synchronization & sync,const T & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)2415 static void AtomicExchange(MacroAssembler& masm,
2416 const wasm::MemoryAccessDesc* access,
2417 Scalar::Type type, const Synchronization& sync,
2418 const T& mem, Register value, Register valueTemp,
2419 Register offsetTemp, Register maskTemp,
2420 Register output) {
2421 bool signExtend = Scalar::isSignedIntType(type);
2422 unsigned nbytes = Scalar::byteSize(type);
2423
2424 switch (nbytes) {
2425 case 1:
2426 case 2:
2427 break;
2428 case 4:
2429 MOZ_ASSERT(valueTemp == InvalidReg);
2430 MOZ_ASSERT(offsetTemp == InvalidReg);
2431 MOZ_ASSERT(maskTemp == InvalidReg);
2432 break;
2433 default:
2434 MOZ_CRASH();
2435 }
2436
2437 Label again;
2438
2439 masm.computeEffectiveAddress(mem, SecondScratchReg);
2440
2441 if (nbytes == 4) {
2442 masm.memoryBarrierBefore(sync);
2443 masm.bind(&again);
2444
2445 if (access) {
2446 masm.append(*access, masm.size());
2447 }
2448
2449 masm.as_ll(output, SecondScratchReg, 0);
2450 masm.ma_move(ScratchRegister, value);
2451 masm.as_sc(ScratchRegister, SecondScratchReg, 0);
2452 masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
2453 ShortJump);
2454
2455 masm.memoryBarrierAfter(sync);
2456
2457 return;
2458 }
2459
2460 masm.as_andi(offsetTemp, SecondScratchReg, 3);
2461 masm.subPtr(offsetTemp, SecondScratchReg);
2462 #if !MOZ_LITTLE_ENDIAN()
2463 masm.as_xori(offsetTemp, offsetTemp, 3);
2464 #endif
2465 masm.as_sll(offsetTemp, offsetTemp, 3);
2466 masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
2467 masm.as_sllv(maskTemp, maskTemp, offsetTemp);
2468 masm.as_nor(maskTemp, zero, maskTemp);
2469 switch (nbytes) {
2470 case 1:
2471 masm.as_andi(valueTemp, value, 0xff);
2472 break;
2473 case 2:
2474 masm.as_andi(valueTemp, value, 0xffff);
2475 break;
2476 }
2477 masm.as_sllv(valueTemp, valueTemp, offsetTemp);
2478
2479 masm.memoryBarrierBefore(sync);
2480
2481 masm.bind(&again);
2482
2483 if (access) {
2484 masm.append(*access, masm.size());
2485 }
2486
2487 masm.as_ll(output, SecondScratchReg, 0);
2488 masm.as_and(ScratchRegister, output, maskTemp);
2489 masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
2490
2491 masm.as_sc(ScratchRegister, SecondScratchReg, 0);
2492
2493 masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
2494 ShortJump);
2495
2496 masm.as_srlv(output, output, offsetTemp);
2497
2498 switch (nbytes) {
2499 case 1:
2500 if (signExtend) {
2501 masm.ma_seb(output, output);
2502 } else {
2503 masm.as_andi(output, output, 0xff);
2504 }
2505 break;
2506 case 2:
2507 if (signExtend) {
2508 masm.ma_seh(output, output);
2509 } else {
2510 masm.as_andi(output, output, 0xffff);
2511 }
2512 break;
2513 }
2514
2515 masm.memoryBarrierAfter(sync);
2516 }
2517
atomicExchange(Scalar::Type type,const Synchronization & sync,const Address & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)2518 void MacroAssembler::atomicExchange(Scalar::Type type,
2519 const Synchronization& sync,
2520 const Address& mem, Register value,
2521 Register valueTemp, Register offsetTemp,
2522 Register maskTemp, Register output) {
2523 AtomicExchange(*this, nullptr, type, sync, mem, value, valueTemp, offsetTemp,
2524 maskTemp, output);
2525 }
2526
atomicExchange(Scalar::Type type,const Synchronization & sync,const BaseIndex & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)2527 void MacroAssembler::atomicExchange(Scalar::Type type,
2528 const Synchronization& sync,
2529 const BaseIndex& mem, Register value,
2530 Register valueTemp, Register offsetTemp,
2531 Register maskTemp, Register output) {
2532 AtomicExchange(*this, nullptr, type, sync, mem, value, valueTemp, offsetTemp,
2533 maskTemp, output);
2534 }
2535
wasmAtomicExchange(const wasm::MemoryAccessDesc & access,const Address & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)2536 void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
2537 const Address& mem, Register value,
2538 Register valueTemp, Register offsetTemp,
2539 Register maskTemp, Register output) {
2540 AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
2541 valueTemp, offsetTemp, maskTemp, output);
2542 }
2543
wasmAtomicExchange(const wasm::MemoryAccessDesc & access,const BaseIndex & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)2544 void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
2545 const BaseIndex& mem, Register value,
2546 Register valueTemp, Register offsetTemp,
2547 Register maskTemp, Register output) {
2548 AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
2549 valueTemp, offsetTemp, maskTemp, output);
2550 }
2551
2552 template <typename T>
AtomicFetchOp(MacroAssembler & masm,const wasm::MemoryAccessDesc * access,Scalar::Type type,const Synchronization & sync,AtomicOp op,const T & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)2553 static void AtomicFetchOp(MacroAssembler& masm,
2554 const wasm::MemoryAccessDesc* access,
2555 Scalar::Type type, const Synchronization& sync,
2556 AtomicOp op, const T& mem, Register value,
2557 Register valueTemp, Register offsetTemp,
2558 Register maskTemp, Register output) {
2559 bool signExtend = Scalar::isSignedIntType(type);
2560 unsigned nbytes = Scalar::byteSize(type);
2561
2562 switch (nbytes) {
2563 case 1:
2564 case 2:
2565 break;
2566 case 4:
2567 MOZ_ASSERT(valueTemp == InvalidReg);
2568 MOZ_ASSERT(offsetTemp == InvalidReg);
2569 MOZ_ASSERT(maskTemp == InvalidReg);
2570 break;
2571 default:
2572 MOZ_CRASH();
2573 }
2574
2575 Label again;
2576
2577 masm.computeEffectiveAddress(mem, SecondScratchReg);
2578
2579 if (nbytes == 4) {
2580 masm.memoryBarrierBefore(sync);
2581 masm.bind(&again);
2582
2583 if (access) {
2584 masm.append(*access, masm.size());
2585 }
2586
2587 masm.as_ll(output, SecondScratchReg, 0);
2588
2589 switch (op) {
2590 case AtomicFetchAddOp:
2591 masm.as_addu(ScratchRegister, output, value);
2592 break;
2593 case AtomicFetchSubOp:
2594 masm.as_subu(ScratchRegister, output, value);
2595 break;
2596 case AtomicFetchAndOp:
2597 masm.as_and(ScratchRegister, output, value);
2598 break;
2599 case AtomicFetchOrOp:
2600 masm.as_or(ScratchRegister, output, value);
2601 break;
2602 case AtomicFetchXorOp:
2603 masm.as_xor(ScratchRegister, output, value);
2604 break;
2605 default:
2606 MOZ_CRASH();
2607 }
2608
2609 masm.as_sc(ScratchRegister, SecondScratchReg, 0);
2610 masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
2611 ShortJump);
2612
2613 masm.memoryBarrierAfter(sync);
2614
2615 return;
2616 }
2617
2618 masm.as_andi(offsetTemp, SecondScratchReg, 3);
2619 masm.subPtr(offsetTemp, SecondScratchReg);
2620 #if !MOZ_LITTLE_ENDIAN()
2621 masm.as_xori(offsetTemp, offsetTemp, 3);
2622 #endif
2623 masm.as_sll(offsetTemp, offsetTemp, 3);
2624 masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
2625 masm.as_sllv(maskTemp, maskTemp, offsetTemp);
2626 masm.as_nor(maskTemp, zero, maskTemp);
2627
2628 masm.memoryBarrierBefore(sync);
2629
2630 masm.bind(&again);
2631
2632 if (access) {
2633 masm.append(*access, masm.size());
2634 }
2635
2636 masm.as_ll(ScratchRegister, SecondScratchReg, 0);
2637 masm.as_srlv(output, ScratchRegister, offsetTemp);
2638
2639 switch (op) {
2640 case AtomicFetchAddOp:
2641 masm.as_addu(valueTemp, output, value);
2642 break;
2643 case AtomicFetchSubOp:
2644 masm.as_subu(valueTemp, output, value);
2645 break;
2646 case AtomicFetchAndOp:
2647 masm.as_and(valueTemp, output, value);
2648 break;
2649 case AtomicFetchOrOp:
2650 masm.as_or(valueTemp, output, value);
2651 break;
2652 case AtomicFetchXorOp:
2653 masm.as_xor(valueTemp, output, value);
2654 break;
2655 default:
2656 MOZ_CRASH();
2657 }
2658
2659 switch (nbytes) {
2660 case 1:
2661 masm.as_andi(valueTemp, valueTemp, 0xff);
2662 break;
2663 case 2:
2664 masm.as_andi(valueTemp, valueTemp, 0xffff);
2665 break;
2666 }
2667
2668 masm.as_sllv(valueTemp, valueTemp, offsetTemp);
2669
2670 masm.as_and(ScratchRegister, ScratchRegister, maskTemp);
2671 masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
2672
2673 masm.as_sc(ScratchRegister, SecondScratchReg, 0);
2674
2675 masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
2676 ShortJump);
2677
2678 switch (nbytes) {
2679 case 1:
2680 if (signExtend) {
2681 masm.ma_seb(output, output);
2682 } else {
2683 masm.as_andi(output, output, 0xff);
2684 }
2685 break;
2686 case 2:
2687 if (signExtend) {
2688 masm.ma_seh(output, output);
2689 } else {
2690 masm.as_andi(output, output, 0xffff);
2691 }
2692 break;
2693 }
2694
2695 masm.memoryBarrierAfter(sync);
2696 }
2697
atomicFetchOp(Scalar::Type type,const Synchronization & sync,AtomicOp op,Register value,const Address & mem,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)2698 void MacroAssembler::atomicFetchOp(Scalar::Type type,
2699 const Synchronization& sync, AtomicOp op,
2700 Register value, const Address& mem,
2701 Register valueTemp, Register offsetTemp,
2702 Register maskTemp, Register output) {
2703 AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
2704 offsetTemp, maskTemp, output);
2705 }
2706
atomicFetchOp(Scalar::Type type,const Synchronization & sync,AtomicOp op,Register value,const BaseIndex & mem,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)2707 void MacroAssembler::atomicFetchOp(Scalar::Type type,
2708 const Synchronization& sync, AtomicOp op,
2709 Register value, const BaseIndex& mem,
2710 Register valueTemp, Register offsetTemp,
2711 Register maskTemp, Register output) {
2712 AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
2713 offsetTemp, maskTemp, output);
2714 }
2715
wasmAtomicFetchOp(const wasm::MemoryAccessDesc & access,AtomicOp op,Register value,const Address & mem,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)2716 void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
2717 AtomicOp op, Register value,
2718 const Address& mem, Register valueTemp,
2719 Register offsetTemp, Register maskTemp,
2720 Register output) {
2721 AtomicFetchOp(*this, &access, access.type(), access.sync(), op, mem, value,
2722 valueTemp, offsetTemp, maskTemp, output);
2723 }
2724
wasmAtomicFetchOp(const wasm::MemoryAccessDesc & access,AtomicOp op,Register value,const BaseIndex & mem,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)2725 void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
2726 AtomicOp op, Register value,
2727 const BaseIndex& mem, Register valueTemp,
2728 Register offsetTemp, Register maskTemp,
2729 Register output) {
2730 AtomicFetchOp(*this, &access, access.type(), access.sync(), op, mem, value,
2731 valueTemp, offsetTemp, maskTemp, output);
2732 }
2733
2734 template <typename T>
AtomicEffectOp(MacroAssembler & masm,const wasm::MemoryAccessDesc * access,Scalar::Type type,const Synchronization & sync,AtomicOp op,const T & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp)2735 static void AtomicEffectOp(MacroAssembler& masm,
2736 const wasm::MemoryAccessDesc* access,
2737 Scalar::Type type, const Synchronization& sync,
2738 AtomicOp op, const T& mem, Register value,
2739 Register valueTemp, Register offsetTemp,
2740 Register maskTemp) {
2741 unsigned nbytes = Scalar::byteSize(type);
2742
2743 switch (nbytes) {
2744 case 1:
2745 case 2:
2746 break;
2747 case 4:
2748 MOZ_ASSERT(valueTemp == InvalidReg);
2749 MOZ_ASSERT(offsetTemp == InvalidReg);
2750 MOZ_ASSERT(maskTemp == InvalidReg);
2751 break;
2752 default:
2753 MOZ_CRASH();
2754 }
2755
2756 Label again;
2757
2758 masm.computeEffectiveAddress(mem, SecondScratchReg);
2759
2760 if (nbytes == 4) {
2761 masm.memoryBarrierBefore(sync);
2762 masm.bind(&again);
2763
2764 if (access) {
2765 masm.append(*access, masm.size());
2766 }
2767
2768 masm.as_ll(ScratchRegister, SecondScratchReg, 0);
2769
2770 switch (op) {
2771 case AtomicFetchAddOp:
2772 masm.as_addu(ScratchRegister, ScratchRegister, value);
2773 break;
2774 case AtomicFetchSubOp:
2775 masm.as_subu(ScratchRegister, ScratchRegister, value);
2776 break;
2777 case AtomicFetchAndOp:
2778 masm.as_and(ScratchRegister, ScratchRegister, value);
2779 break;
2780 case AtomicFetchOrOp:
2781 masm.as_or(ScratchRegister, ScratchRegister, value);
2782 break;
2783 case AtomicFetchXorOp:
2784 masm.as_xor(ScratchRegister, ScratchRegister, value);
2785 break;
2786 default:
2787 MOZ_CRASH();
2788 }
2789
2790 masm.as_sc(ScratchRegister, SecondScratchReg, 0);
2791 masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
2792 ShortJump);
2793
2794 masm.memoryBarrierAfter(sync);
2795
2796 return;
2797 }
2798
2799 masm.as_andi(offsetTemp, SecondScratchReg, 3);
2800 masm.subPtr(offsetTemp, SecondScratchReg);
2801 #if !MOZ_LITTLE_ENDIAN()
2802 masm.as_xori(offsetTemp, offsetTemp, 3);
2803 #endif
2804 masm.as_sll(offsetTemp, offsetTemp, 3);
2805 masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
2806 masm.as_sllv(maskTemp, maskTemp, offsetTemp);
2807 masm.as_nor(maskTemp, zero, maskTemp);
2808
2809 masm.memoryBarrierBefore(sync);
2810
2811 masm.bind(&again);
2812
2813 if (access) {
2814 masm.append(*access, masm.size());
2815 }
2816
2817 masm.as_ll(ScratchRegister, SecondScratchReg, 0);
2818 masm.as_srlv(valueTemp, ScratchRegister, offsetTemp);
2819
2820 switch (op) {
2821 case AtomicFetchAddOp:
2822 masm.as_addu(valueTemp, valueTemp, value);
2823 break;
2824 case AtomicFetchSubOp:
2825 masm.as_subu(valueTemp, valueTemp, value);
2826 break;
2827 case AtomicFetchAndOp:
2828 masm.as_and(valueTemp, valueTemp, value);
2829 break;
2830 case AtomicFetchOrOp:
2831 masm.as_or(valueTemp, valueTemp, value);
2832 break;
2833 case AtomicFetchXorOp:
2834 masm.as_xor(valueTemp, valueTemp, value);
2835 break;
2836 default:
2837 MOZ_CRASH();
2838 }
2839
2840 switch (nbytes) {
2841 case 1:
2842 masm.as_andi(valueTemp, valueTemp, 0xff);
2843 break;
2844 case 2:
2845 masm.as_andi(valueTemp, valueTemp, 0xffff);
2846 break;
2847 }
2848
2849 masm.as_sllv(valueTemp, valueTemp, offsetTemp);
2850
2851 masm.as_and(ScratchRegister, ScratchRegister, maskTemp);
2852 masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
2853
2854 masm.as_sc(ScratchRegister, SecondScratchReg, 0);
2855
2856 masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
2857 ShortJump);
2858
2859 masm.memoryBarrierAfter(sync);
2860 }
2861
wasmAtomicEffectOp(const wasm::MemoryAccessDesc & access,AtomicOp op,Register value,const Address & mem,Register valueTemp,Register offsetTemp,Register maskTemp)2862 void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
2863 AtomicOp op, Register value,
2864 const Address& mem, Register valueTemp,
2865 Register offsetTemp,
2866 Register maskTemp) {
2867 AtomicEffectOp(*this, &access, access.type(), access.sync(), op, mem, value,
2868 valueTemp, offsetTemp, maskTemp);
2869 }
2870
wasmAtomicEffectOp(const wasm::MemoryAccessDesc & access,AtomicOp op,Register value,const BaseIndex & mem,Register valueTemp,Register offsetTemp,Register maskTemp)2871 void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
2872 AtomicOp op, Register value,
2873 const BaseIndex& mem,
2874 Register valueTemp, Register offsetTemp,
2875 Register maskTemp) {
2876 AtomicEffectOp(*this, &access, access.type(), access.sync(), op, mem, value,
2877 valueTemp, offsetTemp, maskTemp);
2878 }
2879
2880 // ========================================================================
2881 // JS atomic operations.
2882
2883 template <typename T>
CompareExchangeJS(MacroAssembler & masm,Scalar::Type arrayType,const Synchronization & sync,const T & mem,Register oldval,Register newval,Register valueTemp,Register offsetTemp,Register maskTemp,Register temp,AnyRegister output)2884 static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
2885 const Synchronization& sync, const T& mem,
2886 Register oldval, Register newval,
2887 Register valueTemp, Register offsetTemp,
2888 Register maskTemp, Register temp,
2889 AnyRegister output) {
2890 if (arrayType == Scalar::Uint32) {
2891 masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp,
2892 offsetTemp, maskTemp, temp);
2893 masm.convertUInt32ToDouble(temp, output.fpu());
2894 } else {
2895 masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp,
2896 offsetTemp, maskTemp, output.gpr());
2897 }
2898 }
2899
compareExchangeJS(Scalar::Type arrayType,const Synchronization & sync,const Address & mem,Register oldval,Register newval,Register valueTemp,Register offsetTemp,Register maskTemp,Register temp,AnyRegister output)2900 void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
2901 const Synchronization& sync,
2902 const Address& mem, Register oldval,
2903 Register newval, Register valueTemp,
2904 Register offsetTemp, Register maskTemp,
2905 Register temp, AnyRegister output) {
2906 CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
2907 offsetTemp, maskTemp, temp, output);
2908 }
2909
compareExchangeJS(Scalar::Type arrayType,const Synchronization & sync,const BaseIndex & mem,Register oldval,Register newval,Register valueTemp,Register offsetTemp,Register maskTemp,Register temp,AnyRegister output)2910 void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
2911 const Synchronization& sync,
2912 const BaseIndex& mem, Register oldval,
2913 Register newval, Register valueTemp,
2914 Register offsetTemp, Register maskTemp,
2915 Register temp, AnyRegister output) {
2916 CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
2917 offsetTemp, maskTemp, temp, output);
2918 }
2919
2920 template <typename T>
AtomicExchangeJS(MacroAssembler & masm,Scalar::Type arrayType,const Synchronization & sync,const T & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp,Register temp,AnyRegister output)2921 static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
2922 const Synchronization& sync, const T& mem,
2923 Register value, Register valueTemp,
2924 Register offsetTemp, Register maskTemp,
2925 Register temp, AnyRegister output) {
2926 if (arrayType == Scalar::Uint32) {
2927 masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
2928 maskTemp, temp);
2929 masm.convertUInt32ToDouble(temp, output.fpu());
2930 } else {
2931 masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
2932 maskTemp, output.gpr());
2933 }
2934 }
2935
atomicExchangeJS(Scalar::Type arrayType,const Synchronization & sync,const Address & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp,Register temp,AnyRegister output)2936 void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
2937 const Synchronization& sync,
2938 const Address& mem, Register value,
2939 Register valueTemp, Register offsetTemp,
2940 Register maskTemp, Register temp,
2941 AnyRegister output) {
2942 AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
2943 maskTemp, temp, output);
2944 }
2945
atomicExchangeJS(Scalar::Type arrayType,const Synchronization & sync,const BaseIndex & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp,Register temp,AnyRegister output)2946 void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
2947 const Synchronization& sync,
2948 const BaseIndex& mem, Register value,
2949 Register valueTemp, Register offsetTemp,
2950 Register maskTemp, Register temp,
2951 AnyRegister output) {
2952 AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
2953 maskTemp, temp, output);
2954 }
2955
2956 template <typename T>
AtomicFetchOpJS(MacroAssembler & masm,Scalar::Type arrayType,const Synchronization & sync,AtomicOp op,Register value,const T & mem,Register valueTemp,Register offsetTemp,Register maskTemp,Register temp,AnyRegister output)2957 static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
2958 const Synchronization& sync, AtomicOp op,
2959 Register value, const T& mem, Register valueTemp,
2960 Register offsetTemp, Register maskTemp,
2961 Register temp, AnyRegister output) {
2962 if (arrayType == Scalar::Uint32) {
2963 masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp,
2964 maskTemp, temp);
2965 masm.convertUInt32ToDouble(temp, output.fpu());
2966 } else {
2967 masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp,
2968 maskTemp, output.gpr());
2969 }
2970 }
2971
atomicFetchOpJS(Scalar::Type arrayType,const Synchronization & sync,AtomicOp op,Register value,const Address & mem,Register valueTemp,Register offsetTemp,Register maskTemp,Register temp,AnyRegister output)2972 void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
2973 const Synchronization& sync, AtomicOp op,
2974 Register value, const Address& mem,
2975 Register valueTemp, Register offsetTemp,
2976 Register maskTemp, Register temp,
2977 AnyRegister output) {
2978 AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp,
2979 maskTemp, temp, output);
2980 }
2981
atomicFetchOpJS(Scalar::Type arrayType,const Synchronization & sync,AtomicOp op,Register value,const BaseIndex & mem,Register valueTemp,Register offsetTemp,Register maskTemp,Register temp,AnyRegister output)2982 void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
2983 const Synchronization& sync, AtomicOp op,
2984 Register value, const BaseIndex& mem,
2985 Register valueTemp, Register offsetTemp,
2986 Register maskTemp, Register temp,
2987 AnyRegister output) {
2988 AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp,
2989 maskTemp, temp, output);
2990 }
2991
atomicEffectOpJS(Scalar::Type arrayType,const Synchronization & sync,AtomicOp op,Register value,const BaseIndex & mem,Register valueTemp,Register offsetTemp,Register maskTemp)2992 void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
2993 const Synchronization& sync, AtomicOp op,
2994 Register value, const BaseIndex& mem,
2995 Register valueTemp, Register offsetTemp,
2996 Register maskTemp) {
2997 AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
2998 offsetTemp, maskTemp);
2999 }
3000
atomicEffectOpJS(Scalar::Type arrayType,const Synchronization & sync,AtomicOp op,Register value,const Address & mem,Register valueTemp,Register offsetTemp,Register maskTemp)3001 void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
3002 const Synchronization& sync, AtomicOp op,
3003 Register value, const Address& mem,
3004 Register valueTemp, Register offsetTemp,
3005 Register maskTemp) {
3006 AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
3007 offsetTemp, maskTemp);
3008 }
3009
flexibleQuotient32(Register rhs,Register srcDest,bool isUnsigned,const LiveRegisterSet &)3010 void MacroAssembler::flexibleQuotient32(Register rhs, Register srcDest,
3011 bool isUnsigned,
3012 const LiveRegisterSet&) {
3013 quotient32(rhs, srcDest, isUnsigned);
3014 }
3015
flexibleRemainder32(Register rhs,Register srcDest,bool isUnsigned,const LiveRegisterSet &)3016 void MacroAssembler::flexibleRemainder32(Register rhs, Register srcDest,
3017 bool isUnsigned,
3018 const LiveRegisterSet&) {
3019 remainder32(rhs, srcDest, isUnsigned);
3020 }
3021
flexibleDivMod32(Register rhs,Register srcDest,Register remOutput,bool isUnsigned,const LiveRegisterSet &)3022 void MacroAssembler::flexibleDivMod32(Register rhs, Register srcDest,
3023 Register remOutput, bool isUnsigned,
3024 const LiveRegisterSet&) {
3025 if (isUnsigned) {
3026 #ifdef MIPSR6
3027 as_divu(ScratchRegister, srcDest, rhs);
3028 as_modu(remOutput, srcDest, rhs);
3029 ma_move(srcDest, ScratchRegister);
3030 #else
3031 as_divu(srcDest, rhs);
3032 #endif
3033 } else {
3034 #ifdef MIPSR6
3035 as_div(ScratchRegister, srcDest, rhs);
3036 as_mod(remOutput, srcDest, rhs);
3037 ma_move(srcDest, ScratchRegister);
3038 #else
3039 as_div(srcDest, rhs);
3040 #endif
3041 }
3042 #ifndef MIPSR6
3043 as_mfhi(remOutput);
3044 as_mflo(srcDest);
3045 #endif
3046 }
3047
moveNearAddressWithPatch(Register dest)3048 CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest) {
3049 return movWithPatch(ImmPtr(nullptr), dest);
3050 }
3051
patchNearAddressMove(CodeLocationLabel loc,CodeLocationLabel target)3052 void MacroAssembler::patchNearAddressMove(CodeLocationLabel loc,
3053 CodeLocationLabel target) {
3054 PatchDataWithValueCheck(loc, ImmPtr(target.raw()), ImmPtr(nullptr));
3055 }
3056
3057 // ========================================================================
3058 // Spectre Mitigations.
3059
speculationBarrier()3060 void MacroAssembler::speculationBarrier() { MOZ_CRASH(); }
3061
floorFloat32ToInt32(FloatRegister src,Register dest,Label * fail)3062 void MacroAssembler::floorFloat32ToInt32(FloatRegister src, Register dest,
3063 Label* fail) {
3064 ScratchFloat32Scope scratch(*this);
3065
3066 Label skipCheck, done;
3067
3068 // If Nan, 0 or -0 check for bailout
3069 loadConstantFloat32(0.0f, scratch);
3070 ma_bc1s(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
3071
3072 // If binary value is not zero, it is NaN or -0, so we bail.
3073 moveFromDoubleLo(src, SecondScratchReg);
3074 branch32(Assembler::NotEqual, SecondScratchReg, Imm32(0), fail);
3075
3076 // Input was zero, so return zero.
3077 move32(Imm32(0), dest);
3078 ma_b(&done, ShortJump);
3079
3080 bind(&skipCheck);
3081 as_floorws(scratch, src);
3082 moveFromDoubleLo(scratch, dest);
3083
3084 branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
3085 branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
3086
3087 bind(&done);
3088 }
3089
floorDoubleToInt32(FloatRegister src,Register dest,Label * fail)3090 void MacroAssembler::floorDoubleToInt32(FloatRegister src, Register dest,
3091 Label* fail) {
3092 ScratchDoubleScope scratch(*this);
3093
3094 Label skipCheck, done;
3095
3096 // If Nan, 0 or -0 check for bailout
3097 loadConstantDouble(0.0, scratch);
3098 ma_bc1d(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
3099
3100 // If high part is not zero, it is NaN or -0, so we bail.
3101 moveFromDoubleHi(src, SecondScratchReg);
3102 branch32(Assembler::NotEqual, SecondScratchReg, Imm32(0), fail);
3103
3104 // Input was zero, so return zero.
3105 move32(Imm32(0), dest);
3106 ma_b(&done, ShortJump);
3107
3108 bind(&skipCheck);
3109 as_floorwd(scratch, src);
3110 moveFromDoubleLo(scratch, dest);
3111
3112 branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
3113 branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
3114
3115 bind(&done);
3116 }
3117
ceilFloat32ToInt32(FloatRegister src,Register dest,Label * fail)3118 void MacroAssembler::ceilFloat32ToInt32(FloatRegister src, Register dest,
3119 Label* fail) {
3120 ScratchFloat32Scope scratch(*this);
3121
3122 Label performCeil, done;
3123
3124 // If x < -1 or x > 0 then perform ceil.
3125 loadConstantFloat32(0.0f, scratch);
3126 branchFloat(Assembler::DoubleGreaterThan, src, scratch, &performCeil);
3127 loadConstantFloat32(-1.0f, scratch);
3128 branchFloat(Assembler::DoubleLessThanOrEqual, src, scratch, &performCeil);
3129
3130 // If binary value is not zero, the input was not 0, so we bail.
3131 moveFromFloat32(src, SecondScratchReg);
3132 branch32(Assembler::NotEqual, SecondScratchReg, Imm32(0), fail);
3133
3134 // Input was zero, so return zero.
3135 move32(Imm32(0), dest);
3136 ma_b(&done, ShortJump);
3137
3138 bind(&performCeil);
3139 as_ceilws(scratch, src);
3140 moveFromFloat32(scratch, dest);
3141
3142 branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
3143 branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
3144
3145 bind(&done);
3146 }
3147
ceilDoubleToInt32(FloatRegister src,Register dest,Label * fail)3148 void MacroAssembler::ceilDoubleToInt32(FloatRegister src, Register dest,
3149 Label* fail) {
3150 ScratchDoubleScope scratch(*this);
3151
3152 Label performCeil, done;
3153
3154 // If x < -1 or x > 0 then perform ceil.
3155 loadConstantDouble(0, scratch);
3156 branchDouble(Assembler::DoubleGreaterThan, src, scratch, &performCeil);
3157 loadConstantDouble(-1, scratch);
3158 branchDouble(Assembler::DoubleLessThanOrEqual, src, scratch, &performCeil);
3159
3160 // If high part is not zero, the input was not 0, so we bail.
3161 moveFromDoubleHi(src, SecondScratchReg);
3162 branch32(Assembler::NotEqual, SecondScratchReg, Imm32(0), fail);
3163
3164 // Input was zero, so return zero.
3165 move32(Imm32(0), dest);
3166 ma_b(&done, ShortJump);
3167
3168 bind(&performCeil);
3169 as_ceilwd(scratch, src);
3170 moveFromDoubleLo(scratch, dest);
3171
3172 branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
3173 branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
3174
3175 bind(&done);
3176 }
3177
roundFloat32ToInt32(FloatRegister src,Register dest,FloatRegister temp,Label * fail)3178 void MacroAssembler::roundFloat32ToInt32(FloatRegister src, Register dest,
3179 FloatRegister temp, Label* fail) {
3180 ScratchFloat32Scope scratch(*this);
3181
3182 Label negative, end, skipCheck;
3183
3184 // Load biggest number less than 0.5 in the temp register.
3185 loadConstantFloat32(GetBiggestNumberLessThan(0.5f), temp);
3186
3187 // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
3188 loadConstantFloat32(0.0f, scratch);
3189 ma_bc1s(src, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
3190
3191 // If Nan, 0 or -0 check for bailout
3192 ma_bc1s(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
3193
3194 // If binary value is not zero, it is NaN or -0, so we bail.
3195 moveFromFloat32(src, SecondScratchReg);
3196 branch32(Assembler::NotEqual, SecondScratchReg, Imm32(0), fail);
3197
3198 // Input was zero, so return zero.
3199 move32(Imm32(0), dest);
3200 ma_b(&end, ShortJump);
3201
3202 bind(&skipCheck);
3203 as_adds(scratch, src, temp);
3204 as_floorws(scratch, scratch);
3205
3206 moveFromFloat32(scratch, dest);
3207
3208 branchTest32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
3209 branchTest32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
3210
3211 jump(&end);
3212
3213 // Input is negative, but isn't -0.
3214 bind(&negative);
3215
3216 // Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
3217 // be added the biggest double less than 0.5.
3218 Label loadJoin;
3219 loadConstantFloat32(-0.5f, scratch);
3220 branchFloat(Assembler::DoubleLessThan, src, scratch, &loadJoin);
3221 loadConstantFloat32(0.5f, temp);
3222 bind(&loadJoin);
3223
3224 as_adds(temp, src, temp);
3225
3226 // If input + 0.5 >= 0, input is a negative number >= -0.5 and the
3227 // result is -0.
3228 branchFloat(Assembler::DoubleGreaterThanOrEqual, temp, scratch, fail);
3229
3230 // Truncate and round toward zero.
3231 // This is off-by-one for everything but integer-valued inputs.
3232 as_floorws(scratch, temp);
3233 moveFromFloat32(scratch, dest);
3234
3235 branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
3236
3237 bind(&end);
3238 }
3239
roundDoubleToInt32(FloatRegister src,Register dest,FloatRegister temp,Label * fail)3240 void MacroAssembler::roundDoubleToInt32(FloatRegister src, Register dest,
3241 FloatRegister temp, Label* fail) {
3242 ScratchDoubleScope scratch(*this);
3243
3244 Label negative, end, skipCheck;
3245
3246 // Load biggest number less than 0.5 in the temp register.
3247 loadConstantDouble(GetBiggestNumberLessThan(0.5), temp);
3248
3249 // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
3250 loadConstantDouble(0.0, scratch);
3251 ma_bc1d(src, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
3252
3253 // If Nan, 0 or -0 check for bailout
3254 ma_bc1d(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
3255
3256 // If high part is not zero, it is NaN or -0, so we bail.
3257 moveFromDoubleHi(src, SecondScratchReg);
3258 branch32(Assembler::NotEqual, SecondScratchReg, Imm32(0), fail);
3259
3260 // Input was zero, so return zero.
3261 move32(Imm32(0), dest);
3262 ma_b(&end, ShortJump);
3263
3264 bind(&skipCheck);
3265 as_addd(scratch, src, temp);
3266 as_floorwd(scratch, scratch);
3267
3268 moveFromDoubleLo(scratch, dest);
3269
3270 branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
3271 branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
3272
3273 jump(&end);
3274
3275 // Input is negative, but isn't -0.
3276 bind(&negative);
3277
3278 // Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
3279 // be added the biggest double less than 0.5.
3280 Label loadJoin;
3281 loadConstantDouble(-0.5, scratch);
3282 branchDouble(Assembler::DoubleLessThan, src, scratch, &loadJoin);
3283 loadConstantDouble(0.5, temp);
3284 bind(&loadJoin);
3285
3286 addDouble(src, temp);
3287
3288 // If input + 0.5 >= 0, input is a negative number >= -0.5 and the
3289 // result is -0.
3290 branchDouble(Assembler::DoubleGreaterThanOrEqual, temp, scratch, fail);
3291
3292 // Truncate and round toward zero.
3293 // This is off-by-one for everything but integer-valued inputs.
3294 as_floorwd(scratch, temp);
3295 moveFromDoubleLo(scratch, dest);
3296
3297 branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
3298
3299 bind(&end);
3300 }
3301
truncFloat32ToInt32(FloatRegister src,Register dest,Label * fail)3302 void MacroAssembler::truncFloat32ToInt32(FloatRegister src, Register dest,
3303 Label* fail) {
3304 Label notZero;
3305 as_truncws(ScratchFloat32Reg, src);
3306 as_cfc1(ScratchRegister, Assembler::FCSR);
3307 moveFromFloat32(ScratchFloat32Reg, dest);
3308 ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
3309
3310 ma_b(dest, Imm32(0), ¬Zero, Assembler::NotEqual, ShortJump);
3311 moveFromFloat32(src, ScratchRegister);
3312 // Check if src is in ]-1; -0] range by checking the sign bit.
3313 as_slt(ScratchRegister, ScratchRegister, zero);
3314 bind(¬Zero);
3315
3316 branch32(Assembler::NotEqual, ScratchRegister, Imm32(0), fail);
3317 }
3318
truncDoubleToInt32(FloatRegister src,Register dest,Label * fail)3319 void MacroAssembler::truncDoubleToInt32(FloatRegister src, Register dest,
3320 Label* fail) {
3321 Label notZero;
3322 as_truncwd(ScratchFloat32Reg, src);
3323 as_cfc1(ScratchRegister, Assembler::FCSR);
3324 moveFromFloat32(ScratchFloat32Reg, dest);
3325 ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
3326
3327 ma_b(dest, Imm32(0), ¬Zero, Assembler::NotEqual, ShortJump);
3328 moveFromDoubleHi(src, ScratchRegister);
3329 // Check if src is in ]-1; -0] range by checking the sign bit.
3330 as_slt(ScratchRegister, ScratchRegister, zero);
3331 bind(¬Zero);
3332
3333 branch32(Assembler::NotEqual, ScratchRegister, Imm32(0), fail);
3334 }
3335
nearbyIntDouble(RoundingMode mode,FloatRegister src,FloatRegister dest)3336 void MacroAssembler::nearbyIntDouble(RoundingMode mode, FloatRegister src,
3337 FloatRegister dest) {
3338 MOZ_CRASH("not supported on this platform");
3339 }
3340
nearbyIntFloat32(RoundingMode mode,FloatRegister src,FloatRegister dest)3341 void MacroAssembler::nearbyIntFloat32(RoundingMode mode, FloatRegister src,
3342 FloatRegister dest) {
3343 MOZ_CRASH("not supported on this platform");
3344 }
3345
copySignDouble(FloatRegister lhs,FloatRegister rhs,FloatRegister output)3346 void MacroAssembler::copySignDouble(FloatRegister lhs, FloatRegister rhs,
3347 FloatRegister output) {
3348 MOZ_CRASH("not supported on this platform");
3349 }
3350
3351 //}}} check_macroassembler_style
3352