1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/loong64/MacroAssembler-loong64.h"
8
9 #include "jsmath.h"
10
11 #include "jit/Bailouts.h"
12 #include "jit/BaselineFrame.h"
13 #include "jit/JitFrames.h"
14 #include "jit/JitRuntime.h"
15 #include "jit/loong64/SharedICRegisters-loong64.h"
16 #include "jit/MacroAssembler.h"
17 #include "jit/MoveEmitter.h"
18 #include "util/Memory.h"
19 #include "vm/JitActivation.h" // js::jit::JitActivation
20 #include "vm/JSContext.h"
21
22 #include "jit/MacroAssembler-inl.h"
23
24 namespace js {
25 namespace jit {
26
clampDoubleToUint8(FloatRegister input,Register output)27 void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) {
28 ScratchRegisterScope scratch(asMasm());
29 ScratchDoubleScope fpscratch(asMasm());
30 as_ftintrne_l_d(fpscratch, input);
31 as_movfr2gr_d(output, fpscratch);
32 // if (res < 0); res = 0;
33 as_slt(scratch, output, zero);
34 as_masknez(output, output, scratch);
35 // if res > 255; res = 255;
36 as_sltui(scratch, output, 255);
37 as_addi_d(output, output, -255);
38 as_maskeqz(output, output, scratch);
39 as_addi_d(output, output, 255);
40 }
41
buildOOLFakeExitFrame(void * fakeReturnAddr)42 bool MacroAssemblerLOONG64Compat::buildOOLFakeExitFrame(void* fakeReturnAddr) {
43 uint32_t descriptor = MakeFrameDescriptor(
44 asMasm().framePushed(), FrameType::IonJS, ExitFrameLayout::Size());
45
46 asMasm().Push(Imm32(descriptor)); // descriptor_
47 asMasm().Push(ImmPtr(fakeReturnAddr));
48
49 return true;
50 }
51
convertUInt32ToDouble(Register src,FloatRegister dest)52 void MacroAssemblerLOONG64Compat::convertUInt32ToDouble(Register src,
53 FloatRegister dest) {
54 ScratchRegisterScope scratch(asMasm());
55 as_bstrpick_d(scratch, src, 31, 0);
56 asMasm().convertInt64ToDouble(Register64(scratch), dest);
57 }
58
convertUInt64ToDouble(Register src,FloatRegister dest)59 void MacroAssemblerLOONG64Compat::convertUInt64ToDouble(Register src,
60 FloatRegister dest) {
61 Label positive, done;
62 ma_b(src, src, &positive, NotSigned, ShortJump);
63 ScratchRegisterScope scratch(asMasm());
64 SecondScratchRegisterScope scratch2(asMasm());
65
66 MOZ_ASSERT(src != scratch);
67 MOZ_ASSERT(src != scratch2);
68
69 ma_and(scratch, src, Imm32(1));
70 as_srli_d(scratch2, src, 1);
71 as_or(scratch, scratch, scratch2);
72 as_movgr2fr_d(dest, scratch);
73 as_ffint_d_l(dest, dest);
74 asMasm().addDouble(dest, dest);
75 ma_b(&done, ShortJump);
76
77 bind(&positive);
78 as_movgr2fr_d(dest, src);
79 as_ffint_d_l(dest, dest);
80
81 bind(&done);
82 }
83
convertUInt32ToFloat32(Register src,FloatRegister dest)84 void MacroAssemblerLOONG64Compat::convertUInt32ToFloat32(Register src,
85 FloatRegister dest) {
86 ScratchRegisterScope scratch(asMasm());
87 as_bstrpick_d(scratch, src, 31, 0);
88 asMasm().convertInt64ToFloat32(Register64(scratch), dest);
89 }
90
convertDoubleToFloat32(FloatRegister src,FloatRegister dest)91 void MacroAssemblerLOONG64Compat::convertDoubleToFloat32(FloatRegister src,
92 FloatRegister dest) {
93 as_fcvt_s_d(dest, src);
94 }
95
96 const int CauseBitPos = int(Assembler::CauseI);
97 const int CauseBitCount = 1 + int(Assembler::CauseV) - int(Assembler::CauseI);
98 const int CauseIOrVMask = ((1 << int(Assembler::CauseI)) |
99 (1 << int(Assembler::CauseV))) >>
100 int(Assembler::CauseI);
101
102 // Checks whether a double is representable as a 32-bit integer. If so, the
103 // integer is written to the output register. Otherwise, a bailout is taken to
104 // the given snapshot. This function overwrites the scratch float register.
convertDoubleToInt32(FloatRegister src,Register dest,Label * fail,bool negativeZeroCheck)105 void MacroAssemblerLOONG64Compat::convertDoubleToInt32(FloatRegister src,
106 Register dest,
107 Label* fail,
108 bool negativeZeroCheck) {
109 if (negativeZeroCheck) {
110 moveFromDouble(src, dest);
111 as_rotri_d(dest, dest, 63);
112 ma_b(dest, Imm32(1), fail, Assembler::Equal);
113 }
114
115 ScratchRegisterScope scratch(asMasm());
116 ScratchFloat32Scope fpscratch(asMasm());
117 // Truncate double to int ; if result is inexact or invalid fail.
118 as_ftintrz_w_d(fpscratch, src);
119 as_movfcsr2gr(scratch);
120 moveFromFloat32(fpscratch, dest);
121 as_bstrpick_d(scratch, scratch, CauseBitPos + CauseBitCount - 1, CauseBitPos);
122 as_andi(scratch, scratch,
123 CauseIOrVMask); // masking for Inexact and Invalid flag.
124 ma_b(scratch, zero, fail, Assembler::NotEqual);
125 }
126
convertDoubleToPtr(FloatRegister src,Register dest,Label * fail,bool negativeZeroCheck)127 void MacroAssemblerLOONG64Compat::convertDoubleToPtr(FloatRegister src,
128 Register dest, Label* fail,
129 bool negativeZeroCheck) {
130 if (negativeZeroCheck) {
131 moveFromDouble(src, dest);
132 as_rotri_d(dest, dest, 63);
133 ma_b(dest, Imm32(1), fail, Assembler::Equal);
134 }
135
136 ScratchRegisterScope scratch(asMasm());
137 ScratchDoubleScope fpscratch(asMasm());
138 // Truncate double to int64 ; if result is inexact or invalid fail.
139 as_ftintrz_l_d(fpscratch, src);
140 as_movfcsr2gr(scratch);
141 moveFromDouble(fpscratch, dest);
142 as_bstrpick_d(scratch, scratch, CauseBitPos + CauseBitCount - 1, CauseBitPos);
143 as_andi(scratch, scratch,
144 CauseIOrVMask); // masking for Inexact and Invalid flag.
145 ma_b(scratch, zero, fail, Assembler::NotEqual);
146 }
147
148 // Checks whether a float32 is representable as a 32-bit integer. If so, the
149 // integer is written to the output register. Otherwise, a bailout is taken to
150 // the given snapshot. This function overwrites the scratch float register.
convertFloat32ToInt32(FloatRegister src,Register dest,Label * fail,bool negativeZeroCheck)151 void MacroAssemblerLOONG64Compat::convertFloat32ToInt32(
152 FloatRegister src, Register dest, Label* fail, bool negativeZeroCheck) {
153 if (negativeZeroCheck) {
154 moveFromFloat32(src, dest);
155 ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
156 }
157
158 ScratchRegisterScope scratch(asMasm());
159 ScratchFloat32Scope fpscratch(asMasm());
160 as_ftintrz_w_s(fpscratch, src);
161 as_movfcsr2gr(scratch);
162 moveFromFloat32(fpscratch, dest);
163 MOZ_ASSERT(CauseBitPos + CauseBitCount < 33);
164 MOZ_ASSERT(CauseBitPos < 32);
165 as_bstrpick_w(scratch, scratch, CauseBitPos + CauseBitCount - 1, CauseBitPos);
166 as_andi(scratch, scratch, CauseIOrVMask);
167 ma_b(scratch, zero, fail, Assembler::NotEqual);
168 }
169
convertFloat32ToDouble(FloatRegister src,FloatRegister dest)170 void MacroAssemblerLOONG64Compat::convertFloat32ToDouble(FloatRegister src,
171 FloatRegister dest) {
172 as_fcvt_d_s(dest, src);
173 }
174
convertInt32ToFloat32(Register src,FloatRegister dest)175 void MacroAssemblerLOONG64Compat::convertInt32ToFloat32(Register src,
176 FloatRegister dest) {
177 as_movgr2fr_w(dest, src);
178 as_ffint_s_w(dest, dest);
179 }
180
convertInt32ToFloat32(const Address & src,FloatRegister dest)181 void MacroAssemblerLOONG64Compat::convertInt32ToFloat32(const Address& src,
182 FloatRegister dest) {
183 ma_fld_s(dest, src);
184 as_ffint_s_w(dest, dest);
185 }
186
movq(Register rj,Register rd)187 void MacroAssemblerLOONG64Compat::movq(Register rj, Register rd) {
188 as_or(rd, rj, zero);
189 }
190
ma_li(Register dest,CodeLabel * label)191 void MacroAssemblerLOONG64::ma_li(Register dest, CodeLabel* label) {
192 BufferOffset bo = m_buffer.nextOffset();
193 ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
194 label->patchAt()->bind(bo.getOffset());
195 label->setLinkMode(CodeLabel::MoveImmediate);
196 }
197
ma_li(Register dest,ImmWord imm)198 void MacroAssemblerLOONG64::ma_li(Register dest, ImmWord imm) {
199 int64_t value = imm.value;
200
201 if (-1 == (value >> 11) || 0 == (value >> 11)) {
202 as_addi_w(dest, zero, value);
203 return;
204 }
205
206 if (0 == (value >> 12)) {
207 as_ori(dest, zero, value);
208 return;
209 }
210
211 if (-1 == (value >> 31) || 0 == (value >> 31)) {
212 as_lu12i_w(dest, (value >> 12) & 0xfffff);
213 } else if (0 == (value >> 32)) {
214 as_lu12i_w(dest, (value >> 12) & 0xfffff);
215 as_bstrins_d(dest, zero, 63, 32);
216 } else if (-1 == (value >> 51) || 0 == (value >> 51)) {
217 if (is_uintN((value >> 12) & 0xfffff, 20)) {
218 as_lu12i_w(dest, (value >> 12) & 0xfffff);
219 }
220 as_lu32i_d(dest, (value >> 32) & 0xfffff);
221 } else if (0 == (value >> 52)) {
222 if (is_uintN((value >> 12) & 0xfffff, 20)) {
223 as_lu12i_w(dest, (value >> 12) & 0xfffff);
224 }
225 as_lu32i_d(dest, (value >> 32) & 0xfffff);
226 as_bstrins_d(dest, zero, 63, 52);
227 } else {
228 if (is_uintN((value >> 12) & 0xfffff, 20)) {
229 as_lu12i_w(dest, (value >> 12) & 0xfffff);
230 }
231 if (is_uintN((value >> 32) & 0xfffff, 20)) {
232 as_lu32i_d(dest, (value >> 32) & 0xfffff);
233 }
234 as_lu52i_d(dest, dest, (value >> 52) & 0xfff);
235 }
236
237 if (is_uintN(value & 0xfff, 12)) {
238 as_ori(dest, dest, value & 0xfff);
239 }
240 }
241
242 // This method generates lu32i_d, lu12i_w and ori instruction block that can be
243 // modified by UpdateLoad64Value, either during compilation (eg.
244 // Assembler::bind), or during execution (eg. jit::PatchJump).
ma_liPatchable(Register dest,ImmPtr imm)245 void MacroAssemblerLOONG64::ma_liPatchable(Register dest, ImmPtr imm) {
246 return ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
247 }
248
ma_liPatchable(Register dest,ImmWord imm,LiFlags flags)249 void MacroAssemblerLOONG64::ma_liPatchable(Register dest, ImmWord imm,
250 LiFlags flags) {
251 // hi12, hi20, low20, low12
252 if (Li64 == flags) { // Li64: Imm data
253 m_buffer.ensureSpace(4 * sizeof(uint32_t));
254 as_lu12i_w(dest, imm.value >> 12 & 0xfffff); // low20
255 as_ori(dest, dest, imm.value & 0xfff); // low12
256 as_lu32i_d(dest, imm.value >> 32 & 0xfffff); // hi20
257 as_lu52i_d(dest, dest, imm.value >> 52 & 0xfff); // hi12
258 } else { // Li48 address
259 m_buffer.ensureSpace(3 * sizeof(uint32_t));
260 as_lu12i_w(dest, imm.value >> 12 & 0xfffff); // low20
261 as_ori(dest, dest, imm.value & 0xfff); // low12
262 as_lu32i_d(dest, imm.value >> 32 & 0xfffff); // hi20
263 }
264 }
265
266 // Memory access ops.
267
ma_ld_b(Register dest,Address address)268 void MacroAssemblerLOONG64::ma_ld_b(Register dest, Address address) {
269 int32_t offset = address.offset;
270 Register base = address.base;
271
272 if (is_intN(offset, 12)) {
273 as_ld_b(dest, base, offset);
274 } else if (base != dest) {
275 ma_li(dest, Imm32(offset));
276 as_ldx_b(dest, base, dest);
277 } else {
278 ScratchRegisterScope scratch(asMasm());
279 MOZ_ASSERT(base != scratch);
280 ma_li(scratch, Imm32(offset));
281 as_ldx_b(dest, base, scratch);
282 }
283 }
284
ma_ld_bu(Register dest,Address address)285 void MacroAssemblerLOONG64::ma_ld_bu(Register dest, Address address) {
286 int32_t offset = address.offset;
287 Register base = address.base;
288
289 if (is_intN(offset, 12)) {
290 as_ld_bu(dest, base, offset);
291 } else if (base != dest) {
292 ma_li(dest, Imm32(offset));
293 as_ldx_bu(dest, base, dest);
294 } else {
295 ScratchRegisterScope scratch(asMasm());
296 MOZ_ASSERT(base != scratch);
297 ma_li(scratch, Imm32(offset));
298 as_ldx_bu(dest, base, scratch);
299 }
300 }
301
ma_ld_h(Register dest,Address address)302 void MacroAssemblerLOONG64::ma_ld_h(Register dest, Address address) {
303 int32_t offset = address.offset;
304 Register base = address.base;
305
306 if (is_intN(offset, 12)) {
307 as_ld_h(dest, base, offset);
308 } else if (base != dest) {
309 ma_li(dest, Imm32(offset));
310 as_ldx_h(dest, base, dest);
311 } else {
312 ScratchRegisterScope scratch(asMasm());
313 MOZ_ASSERT(base != scratch);
314 ma_li(scratch, Imm32(offset));
315 as_ldx_h(dest, base, scratch);
316 }
317 }
318
ma_ld_hu(Register dest,Address address)319 void MacroAssemblerLOONG64::ma_ld_hu(Register dest, Address address) {
320 int32_t offset = address.offset;
321 Register base = address.base;
322
323 if (is_intN(offset, 12)) {
324 as_ld_hu(dest, base, offset);
325 } else if (base != dest) {
326 ma_li(dest, Imm32(offset));
327 as_ldx_hu(dest, base, dest);
328 } else {
329 ScratchRegisterScope scratch(asMasm());
330 MOZ_ASSERT(base != scratch);
331 ma_li(scratch, Imm32(offset));
332 as_ldx_hu(dest, base, scratch);
333 }
334 }
335
ma_ld_w(Register dest,Address address)336 void MacroAssemblerLOONG64::ma_ld_w(Register dest, Address address) {
337 int32_t offset = address.offset;
338 Register base = address.base;
339
340 if (is_intN(offset, 12)) {
341 as_ld_w(dest, base, offset);
342 } else if (base != dest) {
343 ma_li(dest, Imm32(offset));
344 as_ldx_w(dest, base, dest);
345 } else {
346 ScratchRegisterScope scratch(asMasm());
347 MOZ_ASSERT(base != scratch);
348 ma_li(scratch, Imm32(offset));
349 as_ldx_w(dest, base, scratch);
350 }
351 }
352
ma_ld_wu(Register dest,Address address)353 void MacroAssemblerLOONG64::ma_ld_wu(Register dest, Address address) {
354 int32_t offset = address.offset;
355 Register base = address.base;
356
357 if (is_intN(offset, 12)) {
358 as_ld_wu(dest, base, offset);
359 } else if (base != dest) {
360 ma_li(dest, Imm32(offset));
361 as_ldx_wu(dest, base, dest);
362 } else {
363 ScratchRegisterScope scratch(asMasm());
364 MOZ_ASSERT(base != scratch);
365 ma_li(scratch, Imm32(offset));
366 as_ldx_wu(dest, base, scratch);
367 }
368 }
369
ma_ld_d(Register dest,Address address)370 void MacroAssemblerLOONG64::ma_ld_d(Register dest, Address address) {
371 int32_t offset = address.offset;
372 Register base = address.base;
373
374 if (is_intN(offset, 12)) {
375 as_ld_d(dest, base, offset);
376 } else if (base != dest) {
377 ma_li(dest, Imm32(offset));
378 as_ldx_d(dest, base, dest);
379 } else {
380 ScratchRegisterScope scratch(asMasm());
381 MOZ_ASSERT(base != scratch);
382 ma_li(scratch, Imm32(offset));
383 as_ldx_d(dest, base, scratch);
384 }
385 }
386
ma_st_b(Register src,Address address)387 void MacroAssemblerLOONG64::ma_st_b(Register src, Address address) {
388 int32_t offset = address.offset;
389 Register base = address.base;
390
391 if (is_intN(offset, 12)) {
392 as_st_b(src, base, offset);
393 } else {
394 ScratchRegisterScope scratch(asMasm());
395 MOZ_ASSERT(src != scratch);
396 MOZ_ASSERT(base != scratch);
397 ma_li(scratch, Imm32(offset));
398 as_stx_b(src, base, scratch);
399 }
400 }
401
ma_st_h(Register src,Address address)402 void MacroAssemblerLOONG64::ma_st_h(Register src, Address address) {
403 int32_t offset = address.offset;
404 Register base = address.base;
405
406 if (is_intN(offset, 12)) {
407 as_st_h(src, base, offset);
408 } else {
409 ScratchRegisterScope scratch(asMasm());
410 MOZ_ASSERT(src != scratch);
411 MOZ_ASSERT(base != scratch);
412 ma_li(scratch, Imm32(offset));
413 as_stx_h(src, base, scratch);
414 }
415 }
416
ma_st_w(Register src,Address address)417 void MacroAssemblerLOONG64::ma_st_w(Register src, Address address) {
418 int32_t offset = address.offset;
419 Register base = address.base;
420
421 if (is_intN(offset, 12)) {
422 as_st_w(src, base, offset);
423 } else {
424 ScratchRegisterScope scratch(asMasm());
425 MOZ_ASSERT(src != scratch);
426 MOZ_ASSERT(base != scratch);
427 ma_li(scratch, Imm32(offset));
428 as_stx_w(src, base, scratch);
429 }
430 }
431
ma_st_d(Register src,Address address)432 void MacroAssemblerLOONG64::ma_st_d(Register src, Address address) {
433 int32_t offset = address.offset;
434 Register base = address.base;
435
436 if (is_intN(offset, 12)) {
437 as_st_d(src, base, offset);
438 } else {
439 ScratchRegisterScope scratch(asMasm());
440 MOZ_ASSERT(src != scratch);
441 MOZ_ASSERT(base != scratch);
442 ma_li(scratch, Imm32(offset));
443 as_stx_d(src, base, scratch);
444 }
445 }
446
447 // Arithmetic-based ops.
448
449 // Add.
ma_add_d(Register rd,Register rj,Imm32 imm)450 void MacroAssemblerLOONG64::ma_add_d(Register rd, Register rj, Imm32 imm) {
451 if (is_intN(imm.value, 12)) {
452 as_addi_d(rd, rj, imm.value);
453 } else if (rd != rj) {
454 ma_li(rd, imm);
455 as_add_d(rd, rj, rd);
456 } else {
457 ScratchRegisterScope scratch(asMasm());
458 MOZ_ASSERT(rj != scratch);
459 ma_li(scratch, imm);
460 as_add_d(rd, rj, scratch);
461 }
462 }
463
ma_add32TestOverflow(Register rd,Register rj,Register rk,Label * overflow)464 void MacroAssemblerLOONG64::ma_add32TestOverflow(Register rd, Register rj,
465 Register rk, Label* overflow) {
466 ScratchRegisterScope scratch(asMasm());
467 as_add_d(scratch, rj, rk);
468 as_add_w(rd, rj, rk);
469 ma_b(rd, Register(scratch), overflow, Assembler::NotEqual);
470 }
471
ma_add32TestOverflow(Register rd,Register rj,Imm32 imm,Label * overflow)472 void MacroAssemblerLOONG64::ma_add32TestOverflow(Register rd, Register rj,
473 Imm32 imm, Label* overflow) {
474 // Check for signed range because of as_addi_d
475 if (is_intN(imm.value, 12)) {
476 ScratchRegisterScope scratch(asMasm());
477 as_addi_d(scratch, rj, imm.value);
478 as_addi_w(rd, rj, imm.value);
479 ma_b(rd, scratch, overflow, Assembler::NotEqual);
480 } else {
481 SecondScratchRegisterScope scratch2(asMasm());
482 ma_li(scratch2, imm);
483 ma_add32TestOverflow(rd, rj, scratch2, overflow);
484 }
485 }
486
ma_addPtrTestOverflow(Register rd,Register rj,Register rk,Label * overflow)487 void MacroAssemblerLOONG64::ma_addPtrTestOverflow(Register rd, Register rj,
488 Register rk,
489 Label* overflow) {
490 ScratchRegisterScope scratch(asMasm());
491 MOZ_ASSERT(rd != scratch);
492
493 if (rj == rk) {
494 if (rj == rd) {
495 as_or(scratch, rj, zero);
496 rj = scratch;
497 }
498
499 as_add_d(rd, rj, rj);
500 as_xor(scratch, rj, rd);
501 ma_b(scratch, zero, overflow, Assembler::LessThan);
502 } else {
503 SecondScratchRegisterScope scratch2(asMasm());
504 MOZ_ASSERT(rj != scratch);
505 MOZ_ASSERT(rd != scratch2);
506
507 if (rj == rd) {
508 as_or(scratch2, rj, zero);
509 rj = scratch2;
510 }
511
512 as_add_d(rd, rj, rk);
513 as_slti(scratch, rj, 0);
514 as_slt(scratch2, rd, rj);
515 ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
516 }
517 }
518
ma_addPtrTestOverflow(Register rd,Register rj,Imm32 imm,Label * overflow)519 void MacroAssemblerLOONG64::ma_addPtrTestOverflow(Register rd, Register rj,
520 Imm32 imm, Label* overflow) {
521 SecondScratchRegisterScope scratch2(asMasm());
522
523 if (imm.value == 0) {
524 as_ori(rd, rj, 0);
525 return;
526 }
527
528 if (rj == rd) {
529 as_ori(scratch2, rj, 0);
530 rj = scratch2;
531 }
532
533 ma_add_d(rd, rj, imm);
534
535 if (imm.value > 0) {
536 ma_b(rd, rj, overflow, Assembler::LessThan);
537 } else {
538 MOZ_ASSERT(imm.value < 0);
539 ma_b(rd, rj, overflow, Assembler::GreaterThan);
540 }
541 }
542
ma_addPtrTestOverflow(Register rd,Register rj,ImmWord imm,Label * overflow)543 void MacroAssemblerLOONG64::ma_addPtrTestOverflow(Register rd, Register rj,
544 ImmWord imm,
545 Label* overflow) {
546 SecondScratchRegisterScope scratch2(asMasm());
547
548 if (imm.value == 0) {
549 as_ori(rd, rj, 0);
550 return;
551 }
552
553 if (rj == rd) {
554 MOZ_ASSERT(rj != scratch2);
555 as_ori(scratch2, rj, 0);
556 rj = scratch2;
557 }
558
559 ma_li(rd, imm);
560 as_add_d(rd, rj, rd);
561
562 if (imm.value > 0) {
563 ma_b(rd, rj, overflow, Assembler::LessThan);
564 } else {
565 MOZ_ASSERT(imm.value < 0);
566 ma_b(rd, rj, overflow, Assembler::GreaterThan);
567 }
568 }
569
ma_addPtrTestCarry(Condition cond,Register rd,Register rj,Register rk,Label * label)570 void MacroAssemblerLOONG64::ma_addPtrTestCarry(Condition cond, Register rd,
571 Register rj, Register rk,
572 Label* label) {
573 ScratchRegisterScope scratch(asMasm());
574 MOZ_ASSERT(rd != rk);
575 MOZ_ASSERT(rd != scratch);
576 as_add_d(rd, rj, rk);
577 as_sltu(scratch, rd, rk);
578 ma_b(scratch, Register(scratch), label,
579 cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
580 }
581
ma_addPtrTestCarry(Condition cond,Register rd,Register rj,Imm32 imm,Label * label)582 void MacroAssemblerLOONG64::ma_addPtrTestCarry(Condition cond, Register rd,
583 Register rj, Imm32 imm,
584 Label* label) {
585 SecondScratchRegisterScope scratch2(asMasm());
586
587 // Check for signed range because of as_addi_d
588 if (is_intN(imm.value, 12)) {
589 as_addi_d(rd, rj, imm.value);
590 as_sltui(scratch2, rd, imm.value);
591 ma_b(scratch2, scratch2, label,
592 cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
593 } else {
594 ma_li(scratch2, imm);
595 ma_addPtrTestCarry(cond, rd, rj, scratch2, label);
596 }
597 }
598
ma_addPtrTestCarry(Condition cond,Register rd,Register rj,ImmWord imm,Label * label)599 void MacroAssemblerLOONG64::ma_addPtrTestCarry(Condition cond, Register rd,
600 Register rj, ImmWord imm,
601 Label* label) {
602 SecondScratchRegisterScope scratch2(asMasm());
603
604 // Check for signed range because of as_addi_d
605 if (is_intN(imm.value, 12)) {
606 as_addi_d(rd, rj, imm.value);
607 as_sltui(scratch2, rd, imm.value);
608 ma_b(scratch2, scratch2, label,
609 cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
610 } else {
611 ma_li(scratch2, imm);
612 ma_addPtrTestCarry(cond, rd, rj, scratch2, label);
613 }
614 }
615
616 // Subtract.
ma_sub_d(Register rd,Register rj,Imm32 imm)617 void MacroAssemblerLOONG64::ma_sub_d(Register rd, Register rj, Imm32 imm) {
618 if (is_intN(-imm.value, 12)) {
619 as_addi_d(rd, rj, -imm.value);
620 } else {
621 ScratchRegisterScope scratch(asMasm());
622 ma_li(scratch, imm);
623 as_sub_d(rd, rj, scratch);
624 }
625 }
626
ma_sub32TestOverflow(Register rd,Register rj,Register rk,Label * overflow)627 void MacroAssemblerLOONG64::ma_sub32TestOverflow(Register rd, Register rj,
628 Register rk, Label* overflow) {
629 ScratchRegisterScope scratch(asMasm());
630 as_sub_d(scratch, rj, rk);
631 as_sub_w(rd, rj, rk);
632 ma_b(rd, Register(scratch), overflow, Assembler::NotEqual);
633 }
634
ma_subPtrTestOverflow(Register rd,Register rj,Register rk,Label * overflow)635 void MacroAssemblerLOONG64::ma_subPtrTestOverflow(Register rd, Register rj,
636 Register rk,
637 Label* overflow) {
638 SecondScratchRegisterScope scratch2(asMasm());
639 MOZ_ASSERT_IF(rj == rd, rj != rk);
640 MOZ_ASSERT(rj != scratch2);
641 MOZ_ASSERT(rk != scratch2);
642 MOZ_ASSERT(rd != scratch2);
643
644 Register rj_copy = rj;
645
646 if (rj == rd) {
647 as_or(scratch2, rj, zero);
648 rj_copy = scratch2;
649 }
650
651 {
652 ScratchRegisterScope scratch(asMasm());
653 MOZ_ASSERT(rd != scratch);
654
655 as_sub_d(rd, rj, rk);
656 // If the sign of rj and rk are the same, no overflow
657 as_xor(scratch, rj_copy, rk);
658 // Check if the sign of rd and rj are the same
659 as_xor(scratch2, rd, rj_copy);
660 as_and(scratch2, scratch2, scratch);
661 }
662
663 ma_b(scratch2, zero, overflow, Assembler::LessThan);
664 }
665
ma_subPtrTestOverflow(Register rd,Register rj,Imm32 imm,Label * overflow)666 void MacroAssemblerLOONG64::ma_subPtrTestOverflow(Register rd, Register rj,
667 Imm32 imm, Label* overflow) {
668 // TODO(loong64): Check subPtrTestOverflow
669 MOZ_ASSERT(imm.value != INT32_MIN);
670 ma_addPtrTestOverflow(rd, rj, Imm32(-imm.value), overflow);
671 }
672
ma_mul_d(Register rd,Register rj,Imm32 imm)673 void MacroAssemblerLOONG64::ma_mul_d(Register rd, Register rj, Imm32 imm) {
674 // li handles the relocation.
675 ScratchRegisterScope scratch(asMasm());
676 MOZ_ASSERT(rj != scratch);
677 ma_li(scratch, imm);
678 as_mul_d(rd, rj, scratch);
679 }
680
ma_mulh_d(Register rd,Register rj,Imm32 imm)681 void MacroAssemblerLOONG64::ma_mulh_d(Register rd, Register rj, Imm32 imm) {
682 // li handles the relocation.
683 ScratchRegisterScope scratch(asMasm());
684 MOZ_ASSERT(rj != scratch);
685 ma_li(scratch, imm);
686 as_mulh_d(rd, rj, scratch);
687 }
688
ma_mulPtrTestOverflow(Register rd,Register rj,Register rk,Label * overflow)689 void MacroAssemblerLOONG64::ma_mulPtrTestOverflow(Register rd, Register rj,
690 Register rk,
691 Label* overflow) {
692 ScratchRegisterScope scratch(asMasm());
693 SecondScratchRegisterScope scratch2(asMasm());
694 MOZ_ASSERT(rd != scratch);
695
696 if (rd == rj) {
697 as_or(scratch, rj, zero);
698 rj = scratch;
699 rk = (rd == rk) ? rj : rk;
700 } else if (rd == rk) {
701 as_or(scratch, rk, zero);
702 rk = scratch;
703 }
704
705 as_mul_d(rd, rj, rk);
706 as_mulh_d(scratch, rj, rk);
707 as_srai_d(scratch2, rd, 63);
708 ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
709 }
710
711 // Memory.
712
ma_load(Register dest,Address address,LoadStoreSize size,LoadStoreExtension extension)713 void MacroAssemblerLOONG64::ma_load(Register dest, Address address,
714 LoadStoreSize size,
715 LoadStoreExtension extension) {
716 int32_t encodedOffset;
717 Register base;
718
719 // TODO: use as_ldx_b/h/w/d, could decrease as_add_d instr.
720 switch (size) {
721 case SizeByte:
722 case SizeHalfWord:
723 if (!is_intN(address.offset, 12)) {
724 ma_li(ScratchRegister, Imm32(address.offset));
725 as_add_d(ScratchRegister, address.base, ScratchRegister);
726 base = ScratchRegister;
727 encodedOffset = 0;
728 } else {
729 encodedOffset = address.offset;
730 base = address.base;
731 }
732
733 if (size == SizeByte) {
734 if (ZeroExtend == extension) {
735 as_ld_bu(dest, base, encodedOffset);
736 } else {
737 as_ld_b(dest, base, encodedOffset);
738 }
739 } else {
740 if (ZeroExtend == extension) {
741 as_ld_hu(dest, base, encodedOffset);
742 } else {
743 as_ld_h(dest, base, encodedOffset);
744 }
745 }
746 break;
747 case SizeWord:
748 case SizeDouble:
749 if ((address.offset & 0x3) == 0 &&
750 (size == SizeDouble ||
751 (size == SizeWord && SignExtend == extension))) {
752 if (!Imm16::IsInSignedRange(address.offset)) {
753 ma_li(ScratchRegister, Imm32(address.offset));
754 as_add_d(ScratchRegister, address.base, ScratchRegister);
755 base = ScratchRegister;
756 encodedOffset = 0;
757 } else {
758 encodedOffset = address.offset;
759 base = address.base;
760 }
761
762 if (size == SizeWord) {
763 as_ldptr_w(dest, base, encodedOffset);
764 } else {
765 as_ldptr_d(dest, base, encodedOffset);
766 }
767 } else {
768 if (!is_intN(address.offset, 12)) {
769 ma_li(ScratchRegister, Imm32(address.offset));
770 as_add_d(ScratchRegister, address.base, ScratchRegister);
771 base = ScratchRegister;
772 encodedOffset = 0;
773 } else {
774 encodedOffset = address.offset;
775 base = address.base;
776 }
777
778 if (size == SizeWord) {
779 if (ZeroExtend == extension) {
780 as_ld_wu(dest, base, encodedOffset);
781 } else {
782 as_ld_w(dest, base, encodedOffset);
783 }
784 } else {
785 as_ld_d(dest, base, encodedOffset);
786 }
787 }
788 break;
789 default:
790 MOZ_CRASH("Invalid argument for ma_load");
791 }
792 }
793
ma_store(Register data,Address address,LoadStoreSize size,LoadStoreExtension extension)794 void MacroAssemblerLOONG64::ma_store(Register data, Address address,
795 LoadStoreSize size,
796 LoadStoreExtension extension) {
797 int32_t encodedOffset;
798 Register base;
799
800 // TODO: use as_stx_b/h/w/d, could decrease as_add_d instr.
801 switch (size) {
802 case SizeByte:
803 case SizeHalfWord:
804 if (!is_intN(address.offset, 12)) {
805 ma_li(ScratchRegister, Imm32(address.offset));
806 as_add_d(ScratchRegister, address.base, ScratchRegister);
807 base = ScratchRegister;
808 encodedOffset = 0;
809 } else {
810 encodedOffset = address.offset;
811 base = address.base;
812 }
813
814 if (size == SizeByte) {
815 as_st_b(data, base, encodedOffset);
816 } else {
817 as_st_h(data, base, encodedOffset);
818 }
819 break;
820 case SizeWord:
821 case SizeDouble:
822 if ((address.offset & 0x3) == 0) {
823 if (!Imm16::IsInSignedRange(address.offset)) {
824 ma_li(ScratchRegister, Imm32(address.offset));
825 as_add_d(ScratchRegister, address.base, ScratchRegister);
826 base = ScratchRegister;
827 encodedOffset = 0;
828 } else {
829 encodedOffset = address.offset;
830 base = address.base;
831 }
832
833 if (size == SizeWord) {
834 as_stptr_w(data, base, encodedOffset);
835 } else {
836 as_stptr_d(data, base, encodedOffset);
837 }
838 } else {
839 if (!is_intN(address.offset, 12)) {
840 ma_li(ScratchRegister, Imm32(address.offset));
841 as_add_d(ScratchRegister, address.base, ScratchRegister);
842 base = ScratchRegister;
843 encodedOffset = 0;
844 } else {
845 encodedOffset = address.offset;
846 base = address.base;
847 }
848
849 if (size == SizeWord) {
850 as_st_w(data, base, encodedOffset);
851 } else {
852 as_st_d(data, base, encodedOffset);
853 }
854 }
855 break;
856 default:
857 MOZ_CRASH("Invalid argument for ma_store");
858 }
859 }
860
computeScaledAddress(const BaseIndex & address,Register dest)861 void MacroAssemblerLOONG64Compat::computeScaledAddress(const BaseIndex& address,
862 Register dest) {
863 Register base = address.base;
864 Register index = address.index;
865 int32_t shift = Imm32::ShiftOf(address.scale).value;
866
867 if (shift) {
868 MOZ_ASSERT(shift <= 4);
869 as_alsl_d(dest, index, base, shift - 1);
870 } else {
871 as_add_d(dest, base, index);
872 }
873 }
874
ma_pop(Register r)875 void MacroAssemblerLOONG64::ma_pop(Register r) {
876 MOZ_ASSERT(r != StackPointer);
877 as_ld_d(r, StackPointer, 0);
878 as_addi_d(StackPointer, StackPointer, sizeof(intptr_t));
879 }
880
ma_push(Register r)881 void MacroAssemblerLOONG64::ma_push(Register r) {
882 if (r == StackPointer) {
883 ScratchRegisterScope scratch(asMasm());
884 as_or(scratch, r, zero);
885 as_addi_d(StackPointer, StackPointer, (int32_t) - sizeof(intptr_t));
886 as_st_d(scratch, StackPointer, 0);
887 } else {
888 as_addi_d(StackPointer, StackPointer, (int32_t) - sizeof(intptr_t));
889 as_st_d(r, StackPointer, 0);
890 }
891 }
892
893 // Branches when done from within loongarch-specific code.
ma_b(Register lhs,ImmWord imm,Label * label,Condition c,JumpKind jumpKind)894 void MacroAssemblerLOONG64::ma_b(Register lhs, ImmWord imm, Label* label,
895 Condition c, JumpKind jumpKind) {
896 if (imm.value <= INT32_MAX) {
897 ma_b(lhs, Imm32(uint32_t(imm.value)), label, c, jumpKind);
898 } else {
899 ScratchRegisterScope scratch(asMasm());
900 MOZ_ASSERT(lhs != scratch);
901 ma_li(scratch, imm);
902 ma_b(lhs, Register(scratch), label, c, jumpKind);
903 }
904 }
905
ma_b(Register lhs,Address addr,Label * label,Condition c,JumpKind jumpKind)906 void MacroAssemblerLOONG64::ma_b(Register lhs, Address addr, Label* label,
907 Condition c, JumpKind jumpKind) {
908 ScratchRegisterScope scratch(asMasm());
909 MOZ_ASSERT(lhs != scratch);
910 ma_ld_d(scratch, addr);
911 ma_b(lhs, Register(scratch), label, c, jumpKind);
912 }
913
ma_b(Address addr,Imm32 imm,Label * label,Condition c,JumpKind jumpKind)914 void MacroAssemblerLOONG64::ma_b(Address addr, Imm32 imm, Label* label,
915 Condition c, JumpKind jumpKind) {
916 SecondScratchRegisterScope scratch2(asMasm());
917 ma_ld_d(scratch2, addr);
918 ma_b(Register(scratch2), imm, label, c, jumpKind);
919 }
920
ma_b(Address addr,ImmGCPtr imm,Label * label,Condition c,JumpKind jumpKind)921 void MacroAssemblerLOONG64::ma_b(Address addr, ImmGCPtr imm, Label* label,
922 Condition c, JumpKind jumpKind) {
923 SecondScratchRegisterScope scratch2(asMasm());
924 ma_ld_d(scratch2, addr);
925 ma_b(Register(scratch2), imm, label, c, jumpKind);
926 }
927
ma_bl(Label * label)928 void MacroAssemblerLOONG64::ma_bl(Label* label) {
929 spew("branch .Llabel %p\n", label);
930 if (label->bound()) {
931 // Generate the long jump for calls because return address has to be
932 // the address after the reserved block.
933 addLongJump(nextOffset(), BufferOffset(label->offset()));
934 ScratchRegisterScope scratch(asMasm());
935 ma_liPatchable(scratch, ImmWord(LabelBase::INVALID_OFFSET));
936 as_jirl(ra, scratch, BOffImm16(0));
937 return;
938 }
939
940 // Second word holds a pointer to the next branch in label's chain.
941 uint32_t nextInChain =
942 label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
943
944 // Make the whole branch continous in the buffer. The '5'
945 // instructions are writing at below.
946 m_buffer.ensureSpace(5 * sizeof(uint32_t));
947
948 spew("bal .Llabel %p\n", label);
949 BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
950 writeInst(nextInChain);
951 if (!oom()) {
952 label->use(bo.getOffset());
953 }
954 // Leave space for long jump.
955 as_nop();
956 as_nop();
957 as_nop();
958 }
959
branchWithCode(InstImm code,Label * label,JumpKind jumpKind)960 void MacroAssemblerLOONG64::branchWithCode(InstImm code, Label* label,
961 JumpKind jumpKind) {
962 // simply output the pointer of one label as its id,
963 // notice that after one label destructor, the pointer will be reused.
964 spew("branch .Llabel %p", label);
965 MOZ_ASSERT(code.encode() !=
966 InstImm(op_jirl, BOffImm16(0), zero, ra).encode());
967 InstImm inst_beq = InstImm(op_beq, BOffImm16(0), zero, zero);
968
969 if (label->bound()) {
970 int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
971
972 if (BOffImm16::IsInRange(offset)) {
973 jumpKind = ShortJump;
974 }
975
976 // ShortJump
977 if (jumpKind == ShortJump) {
978 MOZ_ASSERT(BOffImm16::IsInRange(offset));
979
980 if (code.extractBitField(31, 26) == ((uint32_t)op_bcz >> 26)) {
981 code.setImm21(offset);
982 } else {
983 code.setBOffImm16(BOffImm16(offset));
984 }
985 #ifdef JS_JITSPEW
986 decodeBranchInstAndSpew(code);
987 #endif
988 writeInst(code.encode());
989 return;
990 }
991
992 // LongJump
993 if (code.encode() == inst_beq.encode()) {
994 // Handle long jump
995 addLongJump(nextOffset(), BufferOffset(label->offset()));
996 ScratchRegisterScope scratch(asMasm());
997 ma_liPatchable(scratch, ImmWord(LabelBase::INVALID_OFFSET));
998 as_jirl(zero, scratch, BOffImm16(0)); // jr scratch
999 as_nop();
1000 return;
1001 }
1002
1003 // OpenLongJump
1004 // Handle long conditional branch, the target offset is based on self,
1005 // point to next instruction of nop at below.
1006 spew("invert branch .Llabel %p", label);
1007 InstImm code_r = invertBranch(code, BOffImm16(5 * sizeof(uint32_t)));
1008 #ifdef JS_JITSPEW
1009 decodeBranchInstAndSpew(code_r);
1010 #endif
1011 writeInst(code_r.encode());
1012 addLongJump(nextOffset(), BufferOffset(label->offset()));
1013 ScratchRegisterScope scratch(asMasm());
1014 ma_liPatchable(scratch, ImmWord(LabelBase::INVALID_OFFSET));
1015 as_jirl(zero, scratch, BOffImm16(0));
1016 as_nop();
1017 return;
1018 }
1019
1020 // Generate open jump and link it to a label.
1021
1022 // Second word holds a pointer to the next branch in label's chain.
1023 uint32_t nextInChain =
1024 label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
1025
1026 if (jumpKind == ShortJump) {
1027 // Make the whole branch continous in the buffer.
1028 m_buffer.ensureSpace(2 * sizeof(uint32_t));
1029
1030 // Indicate that this is short jump with offset 4.
1031 code.setBOffImm16(BOffImm16(4));
1032 #ifdef JS_JITSPEW
1033 decodeBranchInstAndSpew(code);
1034 #endif
1035 BufferOffset bo = writeInst(code.encode());
1036 writeInst(nextInChain);
1037 if (!oom()) {
1038 label->use(bo.getOffset());
1039 }
1040 return;
1041 }
1042
1043 bool conditional = code.encode() != inst_beq.encode();
1044
1045 // Make the whole branch continous in the buffer. The '5'
1046 // instructions are writing at below (contain conditional nop).
1047 m_buffer.ensureSpace(5 * sizeof(uint32_t));
1048
1049 #ifdef JS_JITSPEW
1050 decodeBranchInstAndSpew(code);
1051 #endif
1052 BufferOffset bo = writeInst(code.encode()); // invert
1053 writeInst(nextInChain);
1054 if (!oom()) {
1055 label->use(bo.getOffset());
1056 }
1057 // Leave space for potential long jump.
1058 as_nop();
1059 as_nop();
1060 if (conditional) {
1061 as_nop();
1062 }
1063 }
1064
ma_cmp_set(Register rd,Register rj,ImmWord imm,Condition c)1065 void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Register rj, ImmWord imm,
1066 Condition c) {
1067 if (imm.value <= INT32_MAX) {
1068 ma_cmp_set(rd, rj, Imm32(uint32_t(imm.value)), c);
1069 } else {
1070 ScratchRegisterScope scratch(asMasm());
1071 ma_li(scratch, imm);
1072 ma_cmp_set(rd, rj, scratch, c);
1073 }
1074 }
1075
ma_cmp_set(Register rd,Register rj,ImmPtr imm,Condition c)1076 void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Register rj, ImmPtr imm,
1077 Condition c) {
1078 ma_cmp_set(rd, rj, ImmWord(uintptr_t(imm.value)), c);
1079 }
1080
ma_cmp_set(Register rd,Address address,Imm32 imm,Condition c)1081 void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Address address, Imm32 imm,
1082 Condition c) {
1083 // TODO(loong64): 32-bit ma_cmp_set?
1084 SecondScratchRegisterScope scratch2(asMasm());
1085 ma_ld_w(scratch2, address);
1086 ma_cmp_set(rd, Register(scratch2), imm, c);
1087 }
1088
ma_cmp_set(Register rd,Address address,ImmWord imm,Condition c)1089 void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Address address,
1090 ImmWord imm, Condition c) {
1091 SecondScratchRegisterScope scratch2(asMasm());
1092 ma_ld_d(scratch2, address);
1093 ma_cmp_set(rd, Register(scratch2), imm, c);
1094 }
1095
1096 // fp instructions
ma_lid(FloatRegister dest,double value)1097 void MacroAssemblerLOONG64::ma_lid(FloatRegister dest, double value) {
1098 ImmWord imm(mozilla::BitwiseCast<uint64_t>(value));
1099
1100 if (imm.value != 0) {
1101 ScratchRegisterScope scratch(asMasm());
1102 ma_li(scratch, imm);
1103 moveToDouble(scratch, dest);
1104 } else {
1105 moveToDouble(zero, dest);
1106 }
1107 }
1108
ma_mv(FloatRegister src,ValueOperand dest)1109 void MacroAssemblerLOONG64::ma_mv(FloatRegister src, ValueOperand dest) {
1110 as_movfr2gr_d(dest.valueReg(), src);
1111 }
1112
ma_mv(ValueOperand src,FloatRegister dest)1113 void MacroAssemblerLOONG64::ma_mv(ValueOperand src, FloatRegister dest) {
1114 as_movgr2fr_d(dest, src.valueReg());
1115 }
1116
ma_fld_s(FloatRegister dest,Address address)1117 void MacroAssemblerLOONG64::ma_fld_s(FloatRegister dest, Address address) {
1118 int32_t offset = address.offset;
1119 Register base = address.base;
1120
1121 if (is_intN(offset, 12)) {
1122 as_fld_s(dest, base, offset);
1123 } else {
1124 ScratchRegisterScope scratch(asMasm());
1125 MOZ_ASSERT(base != scratch);
1126 ma_li(scratch, Imm32(offset));
1127 as_fldx_s(dest, base, scratch);
1128 }
1129 }
1130
ma_fld_d(FloatRegister dest,Address address)1131 void MacroAssemblerLOONG64::ma_fld_d(FloatRegister dest, Address address) {
1132 int32_t offset = address.offset;
1133 Register base = address.base;
1134
1135 if (is_intN(offset, 12)) {
1136 as_fld_d(dest, base, offset);
1137 } else {
1138 ScratchRegisterScope scratch(asMasm());
1139 MOZ_ASSERT(base != scratch);
1140 ma_li(scratch, Imm32(offset));
1141 as_fldx_d(dest, base, scratch);
1142 }
1143 }
1144
ma_fst_s(FloatRegister src,Address address)1145 void MacroAssemblerLOONG64::ma_fst_s(FloatRegister src, Address address) {
1146 int32_t offset = address.offset;
1147 Register base = address.base;
1148
1149 if (is_intN(offset, 12)) {
1150 as_fst_s(src, base, offset);
1151 } else {
1152 ScratchRegisterScope scratch(asMasm());
1153 MOZ_ASSERT(base != scratch);
1154 ma_li(scratch, Imm32(offset));
1155 as_fstx_s(src, base, scratch);
1156 }
1157 }
1158
ma_fst_d(FloatRegister src,Address address)1159 void MacroAssemblerLOONG64::ma_fst_d(FloatRegister src, Address address) {
1160 int32_t offset = address.offset;
1161 Register base = address.base;
1162
1163 if (is_intN(offset, 12)) {
1164 as_fst_d(src, base, offset);
1165 } else {
1166 ScratchRegisterScope scratch(asMasm());
1167 MOZ_ASSERT(base != scratch);
1168 ma_li(scratch, Imm32(offset));
1169 as_fstx_d(src, base, scratch);
1170 }
1171 }
1172
ma_pop(FloatRegister f)1173 void MacroAssemblerLOONG64::ma_pop(FloatRegister f) {
1174 as_fld_d(f, StackPointer, 0);
1175 as_addi_d(StackPointer, StackPointer, sizeof(double));
1176 }
1177
ma_push(FloatRegister f)1178 void MacroAssemblerLOONG64::ma_push(FloatRegister f) {
1179 as_addi_d(StackPointer, StackPointer, (int32_t) - sizeof(double));
1180 as_fst_d(f, StackPointer, 0);
1181 }
1182
ma_li(Register dest,ImmGCPtr ptr)1183 void MacroAssemblerLOONG64::ma_li(Register dest, ImmGCPtr ptr) {
1184 writeDataRelocation(ptr);
1185 asMasm().ma_liPatchable(dest, ImmPtr(ptr.value));
1186 }
1187
ma_li(Register dest,Imm32 imm)1188 void MacroAssemblerLOONG64::ma_li(Register dest, Imm32 imm) {
1189 if (is_intN(imm.value, 12)) {
1190 as_addi_w(dest, zero, imm.value);
1191 } else if (is_uintN(imm.value, 12)) {
1192 as_ori(dest, zero, imm.value & 0xfff);
1193 } else {
1194 as_lu12i_w(dest, imm.value >> 12 & 0xfffff);
1195 if (imm.value & 0xfff) {
1196 as_ori(dest, dest, imm.value & 0xfff);
1197 }
1198 }
1199 }
1200
1201 // This method generates lu12i_w and ori instruction pair that can be modified
1202 // by UpdateLuiOriValue, either during compilation (eg. Assembler::bind), or
1203 // during execution (eg. jit::PatchJump).
ma_liPatchable(Register dest,Imm32 imm)1204 void MacroAssemblerLOONG64::ma_liPatchable(Register dest, Imm32 imm) {
1205 m_buffer.ensureSpace(2 * sizeof(uint32_t));
1206 as_lu12i_w(dest, imm.value >> 12 & 0xfffff);
1207 as_ori(dest, dest, imm.value & 0xfff);
1208 }
1209
ma_fmovz(FloatFormat fmt,FloatRegister fd,FloatRegister fj,Register rk)1210 void MacroAssemblerLOONG64::ma_fmovz(FloatFormat fmt, FloatRegister fd,
1211 FloatRegister fj, Register rk) {
1212 Label done;
1213 ma_b(rk, zero, &done, Assembler::NotEqual);
1214 if (fmt == SingleFloat) {
1215 as_fmov_s(fd, fj);
1216 } else {
1217 as_fmov_d(fd, fj);
1218 }
1219 bind(&done);
1220 }
1221
ma_fmovn(FloatFormat fmt,FloatRegister fd,FloatRegister fj,Register rk)1222 void MacroAssemblerLOONG64::ma_fmovn(FloatFormat fmt, FloatRegister fd,
1223 FloatRegister fj, Register rk) {
1224 Label done;
1225 ma_b(rk, zero, &done, Assembler::Equal);
1226 if (fmt == SingleFloat) {
1227 as_fmov_s(fd, fj);
1228 } else {
1229 as_fmov_d(fd, fj);
1230 }
1231 bind(&done);
1232 }
1233
ma_and(Register rd,Register rj,Imm32 imm,bool bit32)1234 void MacroAssemblerLOONG64::ma_and(Register rd, Register rj, Imm32 imm,
1235 bool bit32) {
1236 if (is_uintN(imm.value, 12)) {
1237 as_andi(rd, rj, imm.value);
1238 } else if (rd != rj) {
1239 ma_li(rd, imm);
1240 as_and(rd, rj, rd);
1241 } else {
1242 ScratchRegisterScope scratch(asMasm());
1243 MOZ_ASSERT(rj != scratch);
1244 ma_li(scratch, imm);
1245 as_and(rd, rj, scratch);
1246 }
1247 }
1248
ma_or(Register rd,Register rj,Imm32 imm,bool bit32)1249 void MacroAssemblerLOONG64::ma_or(Register rd, Register rj, Imm32 imm,
1250 bool bit32) {
1251 if (is_uintN(imm.value, 12)) {
1252 as_ori(rd, rj, imm.value);
1253 } else {
1254 ScratchRegisterScope scratch(asMasm());
1255 MOZ_ASSERT(rj != scratch);
1256 ma_li(scratch, imm);
1257 as_or(rd, rj, scratch);
1258 }
1259 }
1260
ma_xor(Register rd,Register rj,Imm32 imm,bool bit32)1261 void MacroAssemblerLOONG64::ma_xor(Register rd, Register rj, Imm32 imm,
1262 bool bit32) {
1263 if (is_uintN(imm.value, 12)) {
1264 as_xori(rd, rj, imm.value);
1265 } else {
1266 ScratchRegisterScope scratch(asMasm());
1267 MOZ_ASSERT(rj != scratch);
1268 ma_li(scratch, imm);
1269 as_xor(rd, rj, scratch);
1270 }
1271 }
1272
1273 // Arithmetic-based ops.
1274
1275 // Add.
ma_add_w(Register rd,Register rj,Imm32 imm)1276 void MacroAssemblerLOONG64::ma_add_w(Register rd, Register rj, Imm32 imm) {
1277 if (is_intN(imm.value, 12)) {
1278 as_addi_w(rd, rj, imm.value);
1279 } else {
1280 ScratchRegisterScope scratch(asMasm());
1281 MOZ_ASSERT(rj != scratch);
1282 ma_li(scratch, imm);
1283 as_add_w(rd, rj, scratch);
1284 }
1285 }
1286
ma_add32TestCarry(Condition cond,Register rd,Register rj,Register rk,Label * overflow)1287 void MacroAssemblerLOONG64::ma_add32TestCarry(Condition cond, Register rd,
1288 Register rj, Register rk,
1289 Label* overflow) {
1290 MOZ_ASSERT(cond == Assembler::CarrySet || cond == Assembler::CarryClear);
1291 MOZ_ASSERT_IF(rd == rj, rk != rd);
1292 ScratchRegisterScope scratch(asMasm());
1293 as_add_w(rd, rj, rk);
1294 as_sltu(scratch, rd, rd == rj ? rk : rj);
1295 ma_b(Register(scratch), Register(scratch), overflow,
1296 cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
1297 }
1298
ma_add32TestCarry(Condition cond,Register rd,Register rj,Imm32 imm,Label * overflow)1299 void MacroAssemblerLOONG64::ma_add32TestCarry(Condition cond, Register rd,
1300 Register rj, Imm32 imm,
1301 Label* overflow) {
1302 SecondScratchRegisterScope scratch2(asMasm());
1303 MOZ_ASSERT(rj != scratch2);
1304 ma_li(scratch2, imm);
1305 ma_add32TestCarry(cond, rd, rj, scratch2, overflow);
1306 }
1307
1308 // Subtract.
ma_sub_w(Register rd,Register rj,Imm32 imm)1309 void MacroAssemblerLOONG64::ma_sub_w(Register rd, Register rj, Imm32 imm) {
1310 if (is_intN(-imm.value, 12)) {
1311 as_addi_w(rd, rj, -imm.value);
1312 } else {
1313 ScratchRegisterScope scratch(asMasm());
1314 MOZ_ASSERT(rj != scratch);
1315 ma_li(scratch, imm);
1316 as_sub_w(rd, rj, scratch);
1317 }
1318 }
1319
ma_sub_w(Register rd,Register rj,Register rk)1320 void MacroAssemblerLOONG64::ma_sub_w(Register rd, Register rj, Register rk) {
1321 as_sub_w(rd, rj, rk);
1322 }
1323
ma_sub32TestOverflow(Register rd,Register rj,Imm32 imm,Label * overflow)1324 void MacroAssemblerLOONG64::ma_sub32TestOverflow(Register rd, Register rj,
1325 Imm32 imm, Label* overflow) {
1326 if (imm.value != INT32_MIN) {
1327 asMasm().ma_add32TestOverflow(rd, rj, Imm32(-imm.value), overflow);
1328 } else {
1329 ScratchRegisterScope scratch(asMasm());
1330 MOZ_ASSERT(rj != scratch);
1331 ma_li(scratch, Imm32(imm.value));
1332 asMasm().ma_sub32TestOverflow(rd, rj, scratch, overflow);
1333 }
1334 }
1335
ma_mul(Register rd,Register rj,Imm32 imm)1336 void MacroAssemblerLOONG64::ma_mul(Register rd, Register rj, Imm32 imm) {
1337 ScratchRegisterScope scratch(asMasm());
1338 MOZ_ASSERT(rj != scratch);
1339 ma_li(scratch, imm);
1340 as_mul_w(rd, rj, scratch);
1341 }
1342
ma_mul32TestOverflow(Register rd,Register rj,Register rk,Label * overflow)1343 void MacroAssemblerLOONG64::ma_mul32TestOverflow(Register rd, Register rj,
1344 Register rk, Label* overflow) {
1345 ScratchRegisterScope scratch(asMasm());
1346 SecondScratchRegisterScope scratch2(asMasm());
1347 as_mulh_w(scratch, rj, rk);
1348 as_mul_w(rd, rj, rk);
1349 as_srai_w(scratch2, rd, 31);
1350 ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
1351 }
1352
ma_mul32TestOverflow(Register rd,Register rj,Imm32 imm,Label * overflow)1353 void MacroAssemblerLOONG64::ma_mul32TestOverflow(Register rd, Register rj,
1354 Imm32 imm, Label* overflow) {
1355 ScratchRegisterScope scratch(asMasm());
1356 SecondScratchRegisterScope scratch2(asMasm());
1357 ma_li(scratch, imm);
1358 as_mulh_w(scratch2, rj, scratch);
1359 as_mul_w(rd, rj, scratch);
1360 as_srai_w(scratch, rd, 31);
1361 ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
1362 }
1363
ma_div_branch_overflow(Register rd,Register rj,Register rk,Label * overflow)1364 void MacroAssemblerLOONG64::ma_div_branch_overflow(Register rd, Register rj,
1365 Register rk,
1366 Label* overflow) {
1367 ScratchRegisterScope scratch(asMasm());
1368 as_mod_w(scratch, rj, rk);
1369 ma_b(scratch, scratch, overflow, Assembler::NonZero);
1370 as_div_w(rd, rj, rk);
1371 }
1372
ma_div_branch_overflow(Register rd,Register rj,Imm32 imm,Label * overflow)1373 void MacroAssemblerLOONG64::ma_div_branch_overflow(Register rd, Register rj,
1374 Imm32 imm, Label* overflow) {
1375 SecondScratchRegisterScope scratch2(asMasm());
1376 ma_li(scratch2, imm);
1377 ma_div_branch_overflow(rd, rj, scratch2, overflow);
1378 }
1379
ma_mod_mask(Register src,Register dest,Register hold,Register remain,int32_t shift,Label * negZero)1380 void MacroAssemblerLOONG64::ma_mod_mask(Register src, Register dest,
1381 Register hold, Register remain,
1382 int32_t shift, Label* negZero) {
1383 // MATH:
1384 // We wish to compute x % (1<<y) - 1 for a known constant, y.
1385 // First, let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit
1386 // dividend as a number in base b, namely
1387 // c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
1388 // now, since both addition and multiplication commute with modulus,
1389 // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
1390 // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
1391 // now, since b == C + 1, b % C == 1, and b^n % C == 1
1392 // this means that the whole thing simplifies to:
1393 // c_0 + c_1 + c_2 ... c_n % C
1394 // each c_n can easily be computed by a shift/bitextract, and the modulus
1395 // can be maintained by simply subtracting by C whenever the number gets
1396 // over C.
1397 int32_t mask = (1 << shift) - 1;
1398 Label head, negative, sumSigned, done;
1399
1400 // hold holds -1 if the value was negative, 1 otherwise.
1401 // remain holds the remaining bits that have not been processed
1402 // SecondScratchReg serves as a temporary location to store extracted bits
1403 // into as well as holding the trial subtraction as a temp value dest is
1404 // the accumulator (and holds the final result)
1405
1406 // move the whole value into the remain.
1407 as_or(remain, src, zero);
1408 // Zero out the dest.
1409 ma_li(dest, Imm32(0));
1410 // Set the hold appropriately.
1411 ma_b(remain, remain, &negative, Signed, ShortJump);
1412 ma_li(hold, Imm32(1));
1413 ma_b(&head, ShortJump);
1414
1415 bind(&negative);
1416 ma_li(hold, Imm32(-1));
1417 as_sub_w(remain, zero, remain);
1418
1419 // Begin the main loop.
1420 bind(&head);
1421
1422 SecondScratchRegisterScope scratch2(asMasm());
1423 // Extract the bottom bits into SecondScratchReg.
1424 ma_and(scratch2, remain, Imm32(mask));
1425 // Add those bits to the accumulator.
1426 as_add_w(dest, dest, scratch2);
1427 // Do a trial subtraction
1428 ma_sub_w(scratch2, dest, Imm32(mask));
1429 // If (sum - C) > 0, store sum - C back into sum, thus performing a
1430 // modulus.
1431 ma_b(scratch2, Register(scratch2), &sumSigned, Signed, ShortJump);
1432 as_or(dest, scratch2, zero);
1433 bind(&sumSigned);
1434 // Get rid of the bits that we extracted before.
1435 as_srli_w(remain, remain, shift);
1436 // If the shift produced zero, finish, otherwise, continue in the loop.
1437 ma_b(remain, remain, &head, NonZero, ShortJump);
1438 // Check the hold to see if we need to negate the result.
1439 ma_b(hold, hold, &done, NotSigned, ShortJump);
1440
1441 // If the hold was non-zero, negate the result to be in line with
1442 // what JS wants
1443 if (negZero != nullptr) {
1444 // Jump out in case of negative zero.
1445 ma_b(hold, hold, negZero, Zero);
1446 as_sub_w(dest, zero, dest);
1447 } else {
1448 as_sub_w(dest, zero, dest);
1449 }
1450
1451 bind(&done);
1452 }
1453
1454 // Memory.
1455
ma_load(Register dest,const BaseIndex & src,LoadStoreSize size,LoadStoreExtension extension)1456 void MacroAssemblerLOONG64::ma_load(Register dest, const BaseIndex& src,
1457 LoadStoreSize size,
1458 LoadStoreExtension extension) {
1459 SecondScratchRegisterScope scratch2(asMasm());
1460 asMasm().computeScaledAddress(src, scratch2);
1461 asMasm().ma_load(dest, Address(scratch2, src.offset), size, extension);
1462 }
1463
ma_store(Register data,const BaseIndex & dest,LoadStoreSize size,LoadStoreExtension extension)1464 void MacroAssemblerLOONG64::ma_store(Register data, const BaseIndex& dest,
1465 LoadStoreSize size,
1466 LoadStoreExtension extension) {
1467 SecondScratchRegisterScope scratch2(asMasm());
1468 asMasm().computeScaledAddress(dest, scratch2);
1469 asMasm().ma_store(data, Address(scratch2, dest.offset), size, extension);
1470 }
1471
ma_store(Imm32 imm,const BaseIndex & dest,LoadStoreSize size,LoadStoreExtension extension)1472 void MacroAssemblerLOONG64::ma_store(Imm32 imm, const BaseIndex& dest,
1473 LoadStoreSize size,
1474 LoadStoreExtension extension) {
1475 SecondScratchRegisterScope scratch2(asMasm());
1476 // Make sure that scratch2 contains absolute address so that offset is 0.
1477 asMasm().computeEffectiveAddress(dest, scratch2);
1478
1479 ScratchRegisterScope scratch(asMasm());
1480 // Scrach register is free now, use it for loading imm value
1481 ma_li(scratch, imm);
1482
1483 // with offset=0 ScratchRegister will not be used in ma_store()
1484 // so we can use it as a parameter here
1485 asMasm().ma_store(scratch, Address(scratch2, 0), size, extension);
1486 }
1487
1488 // Branches when done from within loongarch-specific code.
1489 // TODO(loong64) Optimize ma_b
ma_b(Register lhs,Register rhs,Label * label,Condition c,JumpKind jumpKind)1490 void MacroAssemblerLOONG64::ma_b(Register lhs, Register rhs, Label* label,
1491 Condition c, JumpKind jumpKind) {
1492 switch (c) {
1493 case Equal:
1494 case NotEqual:
1495 asMasm().branchWithCode(getBranchCode(lhs, rhs, c), label, jumpKind);
1496 break;
1497 case Always:
1498 ma_b(label, jumpKind);
1499 break;
1500 case Zero:
1501 case NonZero:
1502 case Signed:
1503 case NotSigned:
1504 MOZ_ASSERT(lhs == rhs);
1505 asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
1506 break;
1507 default: {
1508 Condition cond = ma_cmp(ScratchRegister, lhs, rhs, c);
1509 asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label,
1510 jumpKind);
1511 break;
1512 }
1513 }
1514 }
1515
ma_b(Register lhs,Imm32 imm,Label * label,Condition c,JumpKind jumpKind)1516 void MacroAssemblerLOONG64::ma_b(Register lhs, Imm32 imm, Label* label,
1517 Condition c, JumpKind jumpKind) {
1518 MOZ_ASSERT(c != Overflow);
1519 if (imm.value == 0) {
1520 if (c == Always || c == AboveOrEqual) {
1521 ma_b(label, jumpKind);
1522 } else if (c == Below) {
1523 ; // This condition is always false. No branch required.
1524 } else {
1525 asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
1526 }
1527 } else {
1528 switch (c) {
1529 case Equal:
1530 case NotEqual:
1531 MOZ_ASSERT(lhs != ScratchRegister);
1532 ma_li(ScratchRegister, imm);
1533 ma_b(lhs, ScratchRegister, label, c, jumpKind);
1534 break;
1535 default:
1536 Condition cond = ma_cmp(ScratchRegister, lhs, imm, c);
1537 asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label,
1538 jumpKind);
1539 }
1540 }
1541 }
1542
ma_b(Register lhs,ImmPtr imm,Label * l,Condition c,JumpKind jumpKind)1543 void MacroAssemblerLOONG64::ma_b(Register lhs, ImmPtr imm, Label* l,
1544 Condition c, JumpKind jumpKind) {
1545 asMasm().ma_b(lhs, ImmWord(uintptr_t(imm.value)), l, c, jumpKind);
1546 }
1547
ma_b(Label * label,JumpKind jumpKind)1548 void MacroAssemblerLOONG64::ma_b(Label* label, JumpKind jumpKind) {
1549 asMasm().branchWithCode(getBranchCode(BranchIsJump), label, jumpKind);
1550 }
1551
ma_cmp(Register dest,Register lhs,Register rhs,Condition c)1552 Assembler::Condition MacroAssemblerLOONG64::ma_cmp(Register dest, Register lhs,
1553 Register rhs, Condition c) {
1554 switch (c) {
1555 case Above:
1556 // bgtu s,t,label =>
1557 // sltu at,t,s
1558 // bne at,$zero,offs
1559 as_sltu(dest, rhs, lhs);
1560 return NotEqual;
1561 case AboveOrEqual:
1562 // bgeu s,t,label =>
1563 // sltu at,s,t
1564 // beq at,$zero,offs
1565 as_sltu(dest, lhs, rhs);
1566 return Equal;
1567 case Below:
1568 // bltu s,t,label =>
1569 // sltu at,s,t
1570 // bne at,$zero,offs
1571 as_sltu(dest, lhs, rhs);
1572 return NotEqual;
1573 case BelowOrEqual:
1574 // bleu s,t,label =>
1575 // sltu at,t,s
1576 // beq at,$zero,offs
1577 as_sltu(dest, rhs, lhs);
1578 return Equal;
1579 case GreaterThan:
1580 // bgt s,t,label =>
1581 // slt at,t,s
1582 // bne at,$zero,offs
1583 as_slt(dest, rhs, lhs);
1584 return NotEqual;
1585 case GreaterThanOrEqual:
1586 // bge s,t,label =>
1587 // slt at,s,t
1588 // beq at,$zero,offs
1589 as_slt(dest, lhs, rhs);
1590 return Equal;
1591 case LessThan:
1592 // blt s,t,label =>
1593 // slt at,s,t
1594 // bne at,$zero,offs
1595 as_slt(dest, lhs, rhs);
1596 return NotEqual;
1597 case LessThanOrEqual:
1598 // ble s,t,label =>
1599 // slt at,t,s
1600 // beq at,$zero,offs
1601 as_slt(dest, rhs, lhs);
1602 return Equal;
1603 default:
1604 MOZ_CRASH("Invalid condition.");
1605 }
1606 return Always;
1607 }
1608
ma_cmp(Register dest,Register lhs,Imm32 imm,Condition c)1609 Assembler::Condition MacroAssemblerLOONG64::ma_cmp(Register dest, Register lhs,
1610 Imm32 imm, Condition c) {
1611 ScratchRegisterScope scratch(asMasm());
1612 MOZ_RELEASE_ASSERT(lhs != scratch);
1613
1614 switch (c) {
1615 case Above:
1616 case BelowOrEqual:
1617 if (imm.value != 0x7fffffff && is_intN(imm.value + 1, 12) &&
1618 imm.value != -1) {
1619 // lhs <= rhs via lhs < rhs + 1 if rhs + 1 does not overflow
1620 as_sltui(dest, lhs, imm.value + 1);
1621
1622 return (c == BelowOrEqual ? NotEqual : Equal);
1623 } else {
1624 ma_li(scratch, imm);
1625 as_sltu(dest, scratch, lhs);
1626 return (c == BelowOrEqual ? Equal : NotEqual);
1627 }
1628 case AboveOrEqual:
1629 case Below:
1630 if (is_intN(imm.value, 12)) {
1631 as_sltui(dest, lhs, imm.value);
1632 } else {
1633 ma_li(scratch, imm);
1634 as_sltu(dest, lhs, scratch);
1635 }
1636 return (c == AboveOrEqual ? Equal : NotEqual);
1637 case GreaterThan:
1638 case LessThanOrEqual:
1639 if (imm.value != 0x7fffffff && is_intN(imm.value + 1, 12)) {
1640 // lhs <= rhs via lhs < rhs + 1.
1641 as_slti(dest, lhs, imm.value + 1);
1642 return (c == LessThanOrEqual ? NotEqual : Equal);
1643 } else {
1644 ma_li(scratch, imm);
1645 as_slt(dest, scratch, lhs);
1646 return (c == LessThanOrEqual ? Equal : NotEqual);
1647 }
1648 case GreaterThanOrEqual:
1649 case LessThan:
1650 if (is_intN(imm.value, 12)) {
1651 as_slti(dest, lhs, imm.value);
1652 } else {
1653 ma_li(scratch, imm);
1654 as_slt(dest, lhs, scratch);
1655 }
1656 return (c == GreaterThanOrEqual ? Equal : NotEqual);
1657 default:
1658 MOZ_CRASH("Invalid condition.");
1659 }
1660 return Always;
1661 }
1662
1663 // fp instructions
ma_lis(FloatRegister dest,float value)1664 void MacroAssemblerLOONG64::ma_lis(FloatRegister dest, float value) {
1665 Imm32 imm(mozilla::BitwiseCast<uint32_t>(value));
1666
1667 if (imm.value != 0) {
1668 ScratchRegisterScope scratch(asMasm());
1669 ma_li(scratch, imm);
1670 moveToFloat32(scratch, dest);
1671 } else {
1672 moveToFloat32(zero, dest);
1673 }
1674 }
1675
ma_fst_d(FloatRegister ft,BaseIndex address)1676 void MacroAssemblerLOONG64::ma_fst_d(FloatRegister ft, BaseIndex address) {
1677 SecondScratchRegisterScope scratch2(asMasm());
1678 asMasm().computeScaledAddress(address, scratch2);
1679 asMasm().ma_fst_d(ft, Address(scratch2, address.offset));
1680 }
1681
ma_fst_s(FloatRegister ft,BaseIndex address)1682 void MacroAssemblerLOONG64::ma_fst_s(FloatRegister ft, BaseIndex address) {
1683 SecondScratchRegisterScope scratch2(asMasm());
1684 asMasm().computeScaledAddress(address, scratch2);
1685 asMasm().ma_fst_s(ft, Address(scratch2, address.offset));
1686 }
1687
ma_fld_d(FloatRegister ft,const BaseIndex & src)1688 void MacroAssemblerLOONG64::ma_fld_d(FloatRegister ft, const BaseIndex& src) {
1689 SecondScratchRegisterScope scratch2(asMasm());
1690 asMasm().computeScaledAddress(src, scratch2);
1691 asMasm().ma_fld_d(ft, Address(scratch2, src.offset));
1692 }
1693
ma_fld_s(FloatRegister ft,const BaseIndex & src)1694 void MacroAssemblerLOONG64::ma_fld_s(FloatRegister ft, const BaseIndex& src) {
1695 SecondScratchRegisterScope scratch2(asMasm());
1696 asMasm().computeScaledAddress(src, scratch2);
1697 asMasm().ma_fld_s(ft, Address(scratch2, src.offset));
1698 }
1699
ma_bc_s(FloatRegister lhs,FloatRegister rhs,Label * label,DoubleCondition c,JumpKind jumpKind,FPConditionBit fcc)1700 void MacroAssemblerLOONG64::ma_bc_s(FloatRegister lhs, FloatRegister rhs,
1701 Label* label, DoubleCondition c,
1702 JumpKind jumpKind, FPConditionBit fcc) {
1703 compareFloatingPoint(SingleFloat, lhs, rhs, c, fcc);
1704 asMasm().branchWithCode(getBranchCode(fcc), label, jumpKind);
1705 }
1706
ma_bc_d(FloatRegister lhs,FloatRegister rhs,Label * label,DoubleCondition c,JumpKind jumpKind,FPConditionBit fcc)1707 void MacroAssemblerLOONG64::ma_bc_d(FloatRegister lhs, FloatRegister rhs,
1708 Label* label, DoubleCondition c,
1709 JumpKind jumpKind, FPConditionBit fcc) {
1710 compareFloatingPoint(DoubleFloat, lhs, rhs, c, fcc);
1711 asMasm().branchWithCode(getBranchCode(fcc), label, jumpKind);
1712 }
1713
ma_call(ImmPtr dest)1714 void MacroAssemblerLOONG64::ma_call(ImmPtr dest) {
1715 asMasm().ma_liPatchable(CallReg, dest);
1716 as_jirl(ra, CallReg, BOffImm16(0));
1717 }
1718
ma_jump(ImmPtr dest)1719 void MacroAssemblerLOONG64::ma_jump(ImmPtr dest) {
1720 ScratchRegisterScope scratch(asMasm());
1721 asMasm().ma_liPatchable(scratch, dest);
1722 as_jirl(zero, scratch, BOffImm16(0));
1723 }
1724
ma_cmp_set(Register rd,Register rj,Register rk,Condition c)1725 void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Register rj, Register rk,
1726 Condition c) {
1727 switch (c) {
1728 case Equal:
1729 // seq d,s,t =>
1730 // xor d,s,t
1731 // sltiu d,d,1
1732 as_xor(rd, rj, rk);
1733 as_sltui(rd, rd, 1);
1734 break;
1735 case NotEqual:
1736 // sne d,s,t =>
1737 // xor d,s,t
1738 // sltu d,$zero,d
1739 as_xor(rd, rj, rk);
1740 as_sltu(rd, zero, rd);
1741 break;
1742 case Above:
1743 // sgtu d,s,t =>
1744 // sltu d,t,s
1745 as_sltu(rd, rk, rj);
1746 break;
1747 case AboveOrEqual:
1748 // sgeu d,s,t =>
1749 // sltu d,s,t
1750 // xori d,d,1
1751 as_sltu(rd, rj, rk);
1752 as_xori(rd, rd, 1);
1753 break;
1754 case Below:
1755 // sltu d,s,t
1756 as_sltu(rd, rj, rk);
1757 break;
1758 case BelowOrEqual:
1759 // sleu d,s,t =>
1760 // sltu d,t,s
1761 // xori d,d,1
1762 as_sltu(rd, rk, rj);
1763 as_xori(rd, rd, 1);
1764 break;
1765 case GreaterThan:
1766 // sgt d,s,t =>
1767 // slt d,t,s
1768 as_slt(rd, rk, rj);
1769 break;
1770 case GreaterThanOrEqual:
1771 // sge d,s,t =>
1772 // slt d,s,t
1773 // xori d,d,1
1774 as_slt(rd, rj, rk);
1775 as_xori(rd, rd, 1);
1776 break;
1777 case LessThan:
1778 // slt d,s,t
1779 as_slt(rd, rj, rk);
1780 break;
1781 case LessThanOrEqual:
1782 // sle d,s,t =>
1783 // slt d,t,s
1784 // xori d,d,1
1785 as_slt(rd, rk, rj);
1786 as_xori(rd, rd, 1);
1787 break;
1788 case Zero:
1789 MOZ_ASSERT(rj == rk);
1790 // seq d,s,$zero =>
1791 // sltiu d,s,1
1792 as_sltui(rd, rj, 1);
1793 break;
1794 case NonZero:
1795 MOZ_ASSERT(rj == rk);
1796 // sne d,s,$zero =>
1797 // sltu d,$zero,s
1798 as_sltu(rd, zero, rj);
1799 break;
1800 case Signed:
1801 MOZ_ASSERT(rj == rk);
1802 as_slt(rd, rj, zero);
1803 break;
1804 case NotSigned:
1805 MOZ_ASSERT(rj == rk);
1806 // sge d,s,$zero =>
1807 // slt d,s,$zero
1808 // xori d,d,1
1809 as_slt(rd, rj, zero);
1810 as_xori(rd, rd, 1);
1811 break;
1812 default:
1813 MOZ_CRASH("Invalid condition.");
1814 }
1815 }
1816
ma_cmp_set_double(Register dest,FloatRegister lhs,FloatRegister rhs,DoubleCondition c)1817 void MacroAssemblerLOONG64::ma_cmp_set_double(Register dest, FloatRegister lhs,
1818 FloatRegister rhs,
1819 DoubleCondition c) {
1820 compareFloatingPoint(DoubleFloat, lhs, rhs, c);
1821 as_movcf2gr(dest, FCC0);
1822 }
1823
ma_cmp_set_float32(Register dest,FloatRegister lhs,FloatRegister rhs,DoubleCondition c)1824 void MacroAssemblerLOONG64::ma_cmp_set_float32(Register dest, FloatRegister lhs,
1825 FloatRegister rhs,
1826 DoubleCondition c) {
1827 compareFloatingPoint(SingleFloat, lhs, rhs, c);
1828 as_movcf2gr(dest, FCC0);
1829 }
1830
ma_cmp_set(Register rd,Register rj,Imm32 imm,Condition c)1831 void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Register rj, Imm32 imm,
1832 Condition c) {
1833 if (imm.value == 0) {
1834 switch (c) {
1835 case Equal:
1836 case BelowOrEqual:
1837 as_sltui(rd, rj, 1);
1838 break;
1839 case NotEqual:
1840 case Above:
1841 as_sltu(rd, zero, rj);
1842 break;
1843 case AboveOrEqual:
1844 case Below:
1845 as_ori(rd, zero, c == AboveOrEqual ? 1 : 0);
1846 break;
1847 case GreaterThan:
1848 case LessThanOrEqual:
1849 as_slt(rd, zero, rj);
1850 if (c == LessThanOrEqual) {
1851 as_xori(rd, rd, 1);
1852 }
1853 break;
1854 case LessThan:
1855 case GreaterThanOrEqual:
1856 as_slt(rd, rj, zero);
1857 if (c == GreaterThanOrEqual) {
1858 as_xori(rd, rd, 1);
1859 }
1860 break;
1861 case Zero:
1862 as_sltui(rd, rj, 1);
1863 break;
1864 case NonZero:
1865 as_sltu(rd, zero, rj);
1866 break;
1867 case Signed:
1868 as_slt(rd, rj, zero);
1869 break;
1870 case NotSigned:
1871 as_slt(rd, rj, zero);
1872 as_xori(rd, rd, 1);
1873 break;
1874 default:
1875 MOZ_CRASH("Invalid condition.");
1876 }
1877 return;
1878 }
1879
1880 switch (c) {
1881 case Equal:
1882 case NotEqual:
1883 ma_xor(rd, rj, imm);
1884 if (c == Equal) {
1885 as_sltui(rd, rd, 1);
1886 } else {
1887 as_sltu(rd, zero, rd);
1888 }
1889 break;
1890 case Zero:
1891 case NonZero:
1892 case Signed:
1893 case NotSigned:
1894 MOZ_CRASH("Invalid condition.");
1895 default:
1896 Condition cond = ma_cmp(rd, rj, imm, c);
1897 MOZ_ASSERT(cond == Equal || cond == NotEqual);
1898
1899 if (cond == Equal) as_xori(rd, rd, 1);
1900 }
1901 }
1902
compareFloatingPoint(FloatFormat fmt,FloatRegister lhs,FloatRegister rhs,DoubleCondition c,FPConditionBit fcc)1903 void MacroAssemblerLOONG64::compareFloatingPoint(FloatFormat fmt,
1904 FloatRegister lhs,
1905 FloatRegister rhs,
1906 DoubleCondition c,
1907 FPConditionBit fcc) {
1908 switch (c) {
1909 case DoubleOrdered:
1910 as_fcmp_cor(fmt, lhs, rhs, fcc);
1911 break;
1912 case DoubleEqual:
1913 as_fcmp_ceq(fmt, lhs, rhs, fcc);
1914 break;
1915 case DoubleNotEqual:
1916 as_fcmp_cne(fmt, lhs, rhs, fcc);
1917 break;
1918 case DoubleGreaterThan:
1919 as_fcmp_clt(fmt, rhs, lhs, fcc);
1920 break;
1921 case DoubleGreaterThanOrEqual:
1922 as_fcmp_cle(fmt, rhs, lhs, fcc);
1923 break;
1924 case DoubleLessThan:
1925 as_fcmp_clt(fmt, lhs, rhs, fcc);
1926 break;
1927 case DoubleLessThanOrEqual:
1928 as_fcmp_cle(fmt, lhs, rhs, fcc);
1929 break;
1930 case DoubleUnordered:
1931 as_fcmp_cun(fmt, lhs, rhs, fcc);
1932 break;
1933 case DoubleEqualOrUnordered:
1934 as_fcmp_cueq(fmt, lhs, rhs, fcc);
1935 break;
1936 case DoubleNotEqualOrUnordered:
1937 as_fcmp_cune(fmt, lhs, rhs, fcc);
1938 break;
1939 case DoubleGreaterThanOrUnordered:
1940 as_fcmp_cult(fmt, rhs, lhs, fcc);
1941 break;
1942 case DoubleGreaterThanOrEqualOrUnordered:
1943 as_fcmp_cule(fmt, rhs, lhs, fcc);
1944 break;
1945 case DoubleLessThanOrUnordered:
1946 as_fcmp_cult(fmt, lhs, rhs, fcc);
1947 break;
1948 case DoubleLessThanOrEqualOrUnordered:
1949 as_fcmp_cule(fmt, lhs, rhs, fcc);
1950 break;
1951 default:
1952 MOZ_CRASH("Invalid DoubleCondition.");
1953 }
1954 }
1955
minMaxDouble(FloatRegister srcDest,FloatRegister second,bool handleNaN,bool isMax)1956 void MacroAssemblerLOONG64::minMaxDouble(FloatRegister srcDest,
1957 FloatRegister second, bool handleNaN,
1958 bool isMax) {
1959 if (srcDest == second) return;
1960
1961 Label nan, done;
1962
1963 // First or second is NaN, result is NaN.
1964 ma_bc_d(srcDest, second, &nan, Assembler::DoubleUnordered, ShortJump);
1965 if (isMax) {
1966 as_fmax_d(srcDest, srcDest, second);
1967 } else {
1968 as_fmin_d(srcDest, srcDest, second);
1969 }
1970 ma_b(&done, ShortJump);
1971
1972 bind(&nan);
1973 as_fadd_d(srcDest, srcDest, second);
1974
1975 bind(&done);
1976 }
1977
minMaxFloat32(FloatRegister srcDest,FloatRegister second,bool handleNaN,bool isMax)1978 void MacroAssemblerLOONG64::minMaxFloat32(FloatRegister srcDest,
1979 FloatRegister second, bool handleNaN,
1980 bool isMax) {
1981 if (srcDest == second) return;
1982
1983 Label nan, done;
1984
1985 // First or second is NaN, result is NaN.
1986 ma_bc_s(srcDest, second, &nan, Assembler::DoubleUnordered, ShortJump);
1987 if (isMax) {
1988 as_fmax_s(srcDest, srcDest, second);
1989 } else {
1990 as_fmin_s(srcDest, srcDest, second);
1991 }
1992 ma_b(&done, ShortJump);
1993
1994 bind(&nan);
1995 as_fadd_s(srcDest, srcDest, second);
1996
1997 bind(&done);
1998 }
1999
loadDouble(const Address & address,FloatRegister dest)2000 void MacroAssemblerLOONG64::loadDouble(const Address& address,
2001 FloatRegister dest) {
2002 asMasm().ma_fld_d(dest, address);
2003 }
2004
loadDouble(const BaseIndex & src,FloatRegister dest)2005 void MacroAssemblerLOONG64::loadDouble(const BaseIndex& src,
2006 FloatRegister dest) {
2007 asMasm().ma_fld_d(dest, src);
2008 }
2009
loadFloatAsDouble(const Address & address,FloatRegister dest)2010 void MacroAssemblerLOONG64::loadFloatAsDouble(const Address& address,
2011 FloatRegister dest) {
2012 asMasm().ma_fld_s(dest, address);
2013 as_fcvt_d_s(dest, dest);
2014 }
2015
loadFloatAsDouble(const BaseIndex & src,FloatRegister dest)2016 void MacroAssemblerLOONG64::loadFloatAsDouble(const BaseIndex& src,
2017 FloatRegister dest) {
2018 asMasm().loadFloat32(src, dest);
2019 as_fcvt_d_s(dest, dest);
2020 }
2021
loadFloat32(const Address & address,FloatRegister dest)2022 void MacroAssemblerLOONG64::loadFloat32(const Address& address,
2023 FloatRegister dest) {
2024 asMasm().ma_fld_s(dest, address);
2025 }
2026
loadFloat32(const BaseIndex & src,FloatRegister dest)2027 void MacroAssemblerLOONG64::loadFloat32(const BaseIndex& src,
2028 FloatRegister dest) {
2029 asMasm().ma_fld_s(dest, src);
2030 }
2031
wasmLoadImpl(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,AnyRegister output,Register tmp)2032 void MacroAssemblerLOONG64::wasmLoadImpl(const wasm::MemoryAccessDesc& access,
2033 Register memoryBase, Register ptr,
2034 Register ptrScratch,
2035 AnyRegister output, Register tmp) {
2036 uint32_t offset = access.offset();
2037 MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
2038 MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
2039
2040 // Maybe add the offset.
2041 if (offset) {
2042 asMasm().addPtr(ImmWord(offset), ptrScratch);
2043 ptr = ptrScratch;
2044 }
2045
2046 asMasm().memoryBarrierBefore(access.sync());
2047
2048 switch (access.type()) {
2049 case Scalar::Int8:
2050 as_ldx_b(output.gpr(), memoryBase, ptr);
2051 break;
2052 case Scalar::Uint8:
2053 as_ldx_bu(output.gpr(), memoryBase, ptr);
2054 break;
2055 case Scalar::Int16:
2056 as_ldx_h(output.gpr(), memoryBase, ptr);
2057 break;
2058 case Scalar::Uint16:
2059 as_ldx_hu(output.gpr(), memoryBase, ptr);
2060 break;
2061 case Scalar::Int32:
2062 case Scalar::Uint32:
2063 as_ldx_w(output.gpr(), memoryBase, ptr);
2064 break;
2065 case Scalar::Float64:
2066 as_fldx_d(output.fpu(), memoryBase, ptr);
2067 break;
2068 case Scalar::Float32:
2069 as_fldx_s(output.fpu(), memoryBase, ptr);
2070 break;
2071 default:
2072 MOZ_CRASH("unexpected array type");
2073 }
2074
2075 asMasm().append(access, asMasm().size() - 4);
2076 asMasm().memoryBarrierAfter(access.sync());
2077 }
2078
wasmStoreImpl(const wasm::MemoryAccessDesc & access,AnyRegister value,Register memoryBase,Register ptr,Register ptrScratch,Register tmp)2079 void MacroAssemblerLOONG64::wasmStoreImpl(const wasm::MemoryAccessDesc& access,
2080 AnyRegister value,
2081 Register memoryBase, Register ptr,
2082 Register ptrScratch, Register tmp) {
2083 uint32_t offset = access.offset();
2084 MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
2085 MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
2086
2087 // Maybe add the offset.
2088 if (offset) {
2089 asMasm().addPtr(ImmWord(offset), ptrScratch);
2090 ptr = ptrScratch;
2091 }
2092
2093 asMasm().memoryBarrierBefore(access.sync());
2094
2095 switch (access.type()) {
2096 case Scalar::Int8:
2097 case Scalar::Uint8:
2098 as_stx_b(value.gpr(), memoryBase, ptr);
2099 break;
2100 case Scalar::Int16:
2101 case Scalar::Uint16:
2102 as_stx_h(value.gpr(), memoryBase, ptr);
2103 break;
2104 case Scalar::Int32:
2105 case Scalar::Uint32:
2106 as_stx_w(value.gpr(), memoryBase, ptr);
2107 break;
2108 case Scalar::Int64:
2109 as_stx_d(value.gpr(), memoryBase, ptr);
2110 break;
2111 case Scalar::Float64:
2112 as_fstx_d(value.fpu(), memoryBase, ptr);
2113 break;
2114 case Scalar::Float32:
2115 as_fstx_s(value.fpu(), memoryBase, ptr);
2116 break;
2117 default:
2118 MOZ_CRASH("unexpected array type");
2119 }
2120
2121 // Only the last emitted instruction is a memory access.
2122 asMasm().append(access, asMasm().size() - 4);
2123 asMasm().memoryBarrierAfter(access.sync());
2124 }
2125
wasmLoadI64Impl(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,Register64 output,Register tmp)2126 void MacroAssemblerLOONG64Compat::wasmLoadI64Impl(
2127 const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
2128 Register ptrScratch, Register64 output, Register tmp) {
2129 uint32_t offset = access.offset();
2130 MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
2131 MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
2132
2133 // Maybe add the offset.
2134 if (offset) {
2135 asMasm().addPtr(ImmWord(offset), ptrScratch);
2136 ptr = ptrScratch;
2137 }
2138
2139 asMasm().memoryBarrierBefore(access.sync());
2140
2141 switch (access.type()) {
2142 case Scalar::Int8:
2143 as_ldx_b(output.reg, memoryBase, ptr);
2144 break;
2145 case Scalar::Uint8:
2146 as_ldx_bu(output.reg, memoryBase, ptr);
2147 break;
2148 case Scalar::Int16:
2149 as_ldx_h(output.reg, memoryBase, ptr);
2150 break;
2151 case Scalar::Uint16:
2152 as_ldx_hu(output.reg, memoryBase, ptr);
2153 break;
2154 case Scalar::Int32:
2155 as_ldx_w(output.reg, memoryBase, ptr);
2156 break;
2157 case Scalar::Uint32:
2158 // TODO(loong64): Why need zero-extension here?
2159 as_ldx_wu(output.reg, memoryBase, ptr);
2160 break;
2161 case Scalar::Int64:
2162 as_ldx_d(output.reg, memoryBase, ptr);
2163 break;
2164 default:
2165 MOZ_CRASH("unexpected array type");
2166 }
2167
2168 asMasm().append(access, asMasm().size() - 4);
2169 asMasm().memoryBarrierAfter(access.sync());
2170 }
2171
wasmStoreI64Impl(const wasm::MemoryAccessDesc & access,Register64 value,Register memoryBase,Register ptr,Register ptrScratch,Register tmp)2172 void MacroAssemblerLOONG64Compat::wasmStoreI64Impl(
2173 const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
2174 Register ptr, Register ptrScratch, Register tmp) {
2175 uint32_t offset = access.offset();
2176 MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
2177 MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
2178
2179 // Maybe add the offset.
2180 if (offset) {
2181 asMasm().addPtr(ImmWord(offset), ptrScratch);
2182 ptr = ptrScratch;
2183 }
2184
2185 asMasm().memoryBarrierBefore(access.sync());
2186
2187 switch (access.type()) {
2188 case Scalar::Int8:
2189 case Scalar::Uint8:
2190 as_stx_b(value.reg, memoryBase, ptr);
2191 break;
2192 case Scalar::Int16:
2193 case Scalar::Uint16:
2194 as_stx_h(value.reg, memoryBase, ptr);
2195 break;
2196 case Scalar::Int32:
2197 case Scalar::Uint32:
2198 as_stx_w(value.reg, memoryBase, ptr);
2199 break;
2200 case Scalar::Int64:
2201 as_stx_d(value.reg, memoryBase, ptr);
2202 break;
2203 default:
2204 MOZ_CRASH("unexpected array type");
2205 }
2206
2207 asMasm().append(access, asMasm().size() - 4);
2208 asMasm().memoryBarrierAfter(access.sync());
2209 }
2210
outOfLineWasmTruncateToInt32Check(FloatRegister input,Register output,MIRType fromType,TruncFlags flags,Label * rejoin,wasm::BytecodeOffset trapOffset)2211 void MacroAssemblerLOONG64::outOfLineWasmTruncateToInt32Check(
2212 FloatRegister input, Register output, MIRType fromType, TruncFlags flags,
2213 Label* rejoin, wasm::BytecodeOffset trapOffset) {
2214 bool isUnsigned = flags & TRUNC_UNSIGNED;
2215 bool isSaturating = flags & TRUNC_SATURATING;
2216
2217 if (isSaturating) {
2218 ScratchDoubleScope fpscratch(asMasm());
2219 if (fromType == MIRType::Double) {
2220 asMasm().loadConstantDouble(0.0, fpscratch);
2221 } else {
2222 asMasm().loadConstantFloat32(0.0f, fpscratch);
2223 }
2224
2225 if (isUnsigned) {
2226 ma_li(output, Imm32(UINT32_MAX));
2227
2228 compareFloatingPoint(
2229 fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
2230 fpscratch, Assembler::DoubleLessThanOrUnordered);
2231
2232 ScratchRegisterScope scratch(asMasm());
2233 as_movcf2gr(scratch, FCC0);
2234 // FCC0 = 1, output = zero; else not change.
2235 as_masknez(output, output, scratch);
2236 } else {
2237 // Positive overflow is already saturated to INT32_MAX, so we only have
2238 // to handle NaN and negative overflow here.
2239
2240 compareFloatingPoint(
2241 fromType == MIRType::Double ? DoubleFloat : SingleFloat, input, input,
2242 Assembler::DoubleLessThanOrUnordered);
2243
2244 ScratchRegisterScope scratch(asMasm());
2245 as_movcf2gr(scratch, FCC0);
2246 // FCC0 = 1, output = zero; else not change.
2247 as_masknez(output, output, scratch);
2248
2249 compareFloatingPoint(
2250 fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
2251 fpscratch, Assembler::DoubleLessThan);
2252
2253 as_movcf2gr(scratch, FCC0);
2254 // FCC0 == 1, move INT32_MIN to output; else not change.
2255 as_slli_w(scratch, scratch, 31);
2256 as_or(output, output, scratch);
2257 }
2258
2259 MOZ_ASSERT(rejoin->bound());
2260 asMasm().jump(rejoin);
2261 return;
2262 }
2263
2264 Label inputIsNaN;
2265
2266 if (fromType == MIRType::Double) {
2267 asMasm().branchDouble(Assembler::DoubleUnordered, input, input,
2268 &inputIsNaN);
2269 } else if (fromType == MIRType::Float32) {
2270 asMasm().branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN);
2271 }
2272
2273 asMasm().wasmTrap(wasm::Trap::IntegerOverflow, trapOffset);
2274 asMasm().bind(&inputIsNaN);
2275 asMasm().wasmTrap(wasm::Trap::InvalidConversionToInteger, trapOffset);
2276 }
2277
outOfLineWasmTruncateToInt64Check(FloatRegister input,Register64 output_,MIRType fromType,TruncFlags flags,Label * rejoin,wasm::BytecodeOffset trapOffset)2278 void MacroAssemblerLOONG64::outOfLineWasmTruncateToInt64Check(
2279 FloatRegister input, Register64 output_, MIRType fromType, TruncFlags flags,
2280 Label* rejoin, wasm::BytecodeOffset trapOffset) {
2281 bool isUnsigned = flags & TRUNC_UNSIGNED;
2282 bool isSaturating = flags & TRUNC_SATURATING;
2283
2284 if (isSaturating) {
2285 ScratchDoubleScope fpscratch(asMasm());
2286 Register output = output_.reg;
2287
2288 if (fromType == MIRType::Double) {
2289 asMasm().loadConstantDouble(0.0, fpscratch);
2290 } else {
2291 asMasm().loadConstantFloat32(0.0f, fpscratch);
2292 }
2293
2294 if (isUnsigned) {
2295 asMasm().ma_li(output, ImmWord(UINT64_MAX));
2296
2297 compareFloatingPoint(
2298 fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
2299 fpscratch, Assembler::DoubleLessThanOrUnordered);
2300
2301 ScratchRegisterScope scratch(asMasm());
2302 as_movcf2gr(scratch, FCC0);
2303 // FCC0 = 1, output = zero; else not change.
2304 as_masknez(output, output, scratch);
2305 } else {
2306 // Positive overflow is already saturated to INT64_MAX, so we only have
2307 // to handle NaN and negative overflow here.
2308
2309 compareFloatingPoint(
2310 fromType == MIRType::Double ? DoubleFloat : SingleFloat, input, input,
2311 Assembler::DoubleLessThanOrUnordered);
2312
2313 ScratchRegisterScope scratch(asMasm());
2314 as_movcf2gr(scratch, FCC0);
2315 // FCC0 = 1, output = zero; else not change.
2316 as_masknez(output, output, scratch);
2317
2318 compareFloatingPoint(
2319 fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
2320 fpscratch, Assembler::DoubleLessThan);
2321
2322 as_movcf2gr(scratch, FCC0);
2323 // FCC0 == 1, move INT64_MIN to output; else not change.
2324 as_slli_d(scratch, scratch, 63);
2325 as_or(output, output, scratch);
2326 }
2327
2328 MOZ_ASSERT(rejoin->bound());
2329 asMasm().jump(rejoin);
2330 return;
2331 }
2332
2333 Label inputIsNaN;
2334
2335 if (fromType == MIRType::Double) {
2336 asMasm().branchDouble(Assembler::DoubleUnordered, input, input,
2337 &inputIsNaN);
2338 } else if (fromType == MIRType::Float32) {
2339 asMasm().branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN);
2340 }
2341
2342 asMasm().wasmTrap(wasm::Trap::IntegerOverflow, trapOffset);
2343 asMasm().bind(&inputIsNaN);
2344 asMasm().wasmTrap(wasm::Trap::InvalidConversionToInteger, trapOffset);
2345 }
2346
profilerEnterFrame(Register framePtr,Register scratch)2347 void MacroAssemblerLOONG64Compat::profilerEnterFrame(Register framePtr,
2348 Register scratch) {
2349 asMasm().loadJSContext(scratch);
2350 loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
2351 storePtr(framePtr,
2352 Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
2353 storePtr(ImmPtr(nullptr),
2354 Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
2355 }
2356
profilerExitFrame()2357 void MacroAssemblerLOONG64Compat::profilerExitFrame() {
2358 jump(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
2359 }
2360
asMasm()2361 MacroAssembler& MacroAssemblerLOONG64::asMasm() {
2362 return *static_cast<MacroAssembler*>(this);
2363 }
2364
asMasm() const2365 const MacroAssembler& MacroAssemblerLOONG64::asMasm() const {
2366 return *static_cast<const MacroAssembler*>(this);
2367 }
2368
subFromStackPtr(Imm32 imm32)2369 void MacroAssembler::subFromStackPtr(Imm32 imm32) {
2370 if (imm32.value) {
2371 asMasm().subPtr(imm32, StackPointer);
2372 }
2373 }
2374
2375 //{{{ check_macroassembler_style
2376 // ===============================================================
2377 // MacroAssembler high-level usage.
2378
flush()2379 void MacroAssembler::flush() {}
2380
2381 // ===============================================================
2382 // Stack manipulation functions.
2383
PushRegsInMaskSizeInBytes(LiveRegisterSet set)2384 size_t MacroAssembler::PushRegsInMaskSizeInBytes(LiveRegisterSet set) {
2385 return set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
2386 }
2387
PushRegsInMask(LiveRegisterSet set)2388 void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
2389 int32_t diff =
2390 set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
2391 const int32_t reserved = diff;
2392
2393 reserveStack(reserved);
2394 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
2395 diff -= sizeof(intptr_t);
2396 storePtr(*iter, Address(StackPointer, diff));
2397 }
2398
2399 #ifdef ENABLE_WASM_SIMD
2400 # error "Needs more careful logic if SIMD is enabled"
2401 #endif
2402
2403 for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
2404 iter.more(); ++iter) {
2405 diff -= sizeof(double);
2406 storeDouble(*iter, Address(StackPointer, diff));
2407 }
2408 MOZ_ASSERT(diff == 0);
2409 }
2410
PopRegsInMaskIgnore(LiveRegisterSet set,LiveRegisterSet ignore)2411 void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
2412 LiveRegisterSet ignore) {
2413 int32_t diff =
2414 set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
2415 const int32_t reserved = diff;
2416
2417 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
2418 diff -= sizeof(intptr_t);
2419 if (!ignore.has(*iter)) {
2420 loadPtr(Address(StackPointer, diff), *iter);
2421 }
2422 }
2423
2424 #ifdef ENABLE_WASM_SIMD
2425 # error "Needs more careful logic if SIMD is enabled"
2426 #endif
2427
2428 for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
2429 iter.more(); ++iter) {
2430 diff -= sizeof(double);
2431 if (!ignore.has(*iter)) {
2432 loadDouble(Address(StackPointer, diff), *iter);
2433 }
2434 }
2435 MOZ_ASSERT(diff == 0);
2436 freeStack(reserved);
2437 }
2438
storeRegsInMask(LiveRegisterSet set,Address dest,Register)2439 void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
2440 Register) {
2441 FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
2442 mozilla::DebugOnly<unsigned> numFpu = fpuSet.size();
2443 int32_t diffF = fpuSet.getPushSizeInBytes();
2444 mozilla::DebugOnly<int32_t> diffG = set.gprs().size() * sizeof(intptr_t);
2445
2446 MOZ_ASSERT(dest.offset >= diffG + diffF);
2447
2448 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
2449 diffG -= sizeof(intptr_t);
2450 dest.offset -= sizeof(intptr_t);
2451 storePtr(*iter, dest);
2452 }
2453 MOZ_ASSERT(diffG == 0);
2454
2455 #ifdef ENABLE_WASM_SIMD
2456 # error "Needs more careful logic if SIMD is enabled"
2457 #endif
2458
2459 for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
2460 FloatRegister reg = *iter;
2461 diffF -= reg.size();
2462 numFpu -= 1;
2463 dest.offset -= reg.size();
2464 if (reg.isDouble()) {
2465 storeDouble(reg, dest);
2466 } else if (reg.isSingle()) {
2467 storeFloat32(reg, dest);
2468 } else {
2469 MOZ_CRASH("Unknown register type.");
2470 }
2471 }
2472 MOZ_ASSERT(numFpu == 0);
2473 diffF -= diffF % sizeof(uintptr_t);
2474 MOZ_ASSERT(diffF == 0);
2475 }
2476
Push(Register reg)2477 void MacroAssembler::Push(Register reg) {
2478 ma_push(reg);
2479 adjustFrame(int32_t(sizeof(intptr_t)));
2480 }
2481
Push(const Imm32 imm)2482 void MacroAssembler::Push(const Imm32 imm) {
2483 ScratchRegisterScope scratch(asMasm());
2484 ma_li(scratch, imm);
2485 ma_push(scratch);
2486 adjustFrame(int32_t(sizeof(intptr_t)));
2487 }
2488
Push(const ImmWord imm)2489 void MacroAssembler::Push(const ImmWord imm) {
2490 ScratchRegisterScope scratch(asMasm());
2491 ma_li(scratch, imm);
2492 ma_push(scratch);
2493 adjustFrame(int32_t(sizeof(intptr_t)));
2494 }
2495
Push(const ImmPtr imm)2496 void MacroAssembler::Push(const ImmPtr imm) {
2497 Push(ImmWord(uintptr_t(imm.value)));
2498 }
2499
Push(const ImmGCPtr ptr)2500 void MacroAssembler::Push(const ImmGCPtr ptr) {
2501 ScratchRegisterScope scratch(asMasm());
2502 ma_li(scratch, ptr);
2503 ma_push(scratch);
2504 adjustFrame(int32_t(sizeof(intptr_t)));
2505 }
2506
Push(FloatRegister f)2507 void MacroAssembler::Push(FloatRegister f) {
2508 ma_push(f);
2509 adjustFrame(int32_t(sizeof(double)));
2510 }
2511
PushBoxed(FloatRegister reg)2512 void MacroAssembler::PushBoxed(FloatRegister reg) {
2513 subFromStackPtr(Imm32(sizeof(double)));
2514 boxDouble(reg, Address(getStackPointer(), 0));
2515 adjustFrame(sizeof(double));
2516 }
2517
Pop(Register reg)2518 void MacroAssembler::Pop(Register reg) {
2519 ma_pop(reg);
2520 adjustFrame(-int32_t(sizeof(intptr_t)));
2521 }
2522
Pop(FloatRegister f)2523 void MacroAssembler::Pop(FloatRegister f) {
2524 ma_pop(f);
2525 adjustFrame(-int32_t(sizeof(double)));
2526 }
2527
Pop(const ValueOperand & val)2528 void MacroAssembler::Pop(const ValueOperand& val) {
2529 popValue(val);
2530 adjustFrame(-int32_t(sizeof(Value)));
2531 }
2532
PopStackPtr()2533 void MacroAssembler::PopStackPtr() {
2534 loadPtr(Address(StackPointer, 0), StackPointer);
2535 adjustFrame(-int32_t(sizeof(intptr_t)));
2536 }
2537
2538 // ===============================================================
2539 // Simple call functions.
2540
call(Register reg)2541 CodeOffset MacroAssembler::call(Register reg) {
2542 as_jirl(ra, reg, BOffImm16(0));
2543 return CodeOffset(currentOffset());
2544 }
2545
call(Label * label)2546 CodeOffset MacroAssembler::call(Label* label) {
2547 ma_bl(label);
2548 return CodeOffset(currentOffset());
2549 }
2550
callWithPatch()2551 CodeOffset MacroAssembler::callWithPatch() {
2552 as_bl(JOffImm26(1 * sizeof(uint32_t)));
2553 return CodeOffset(currentOffset());
2554 }
2555
patchCall(uint32_t callerOffset,uint32_t calleeOffset)2556 void MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset) {
2557 BufferOffset call(callerOffset - 1 * sizeof(uint32_t));
2558
2559 JOffImm26 offset = BufferOffset(calleeOffset).diffB<JOffImm26>(call);
2560 if (!offset.isInvalid()) {
2561 InstJump* bal = (InstJump*)editSrc(call);
2562 bal->setJOffImm26(offset);
2563 } else {
2564 uint32_t u32Offset = callerOffset - 4 * sizeof(uint32_t);
2565 uint32_t* u32 =
2566 reinterpret_cast<uint32_t*>(editSrc(BufferOffset(u32Offset)));
2567 *u32 = calleeOffset - callerOffset;
2568 }
2569 }
2570
farJumpWithPatch()2571 CodeOffset MacroAssembler::farJumpWithPatch() {
2572 ScratchRegisterScope scratch(asMasm());
2573 SecondScratchRegisterScope scratch2(asMasm());
2574 as_pcaddi(scratch, 4);
2575 as_ld_w(scratch2, scratch, 0);
2576 as_add_d(scratch, scratch, scratch2);
2577 as_jirl(zero, scratch, BOffImm16(0));
2578 // Allocate space which will be patched by patchFarJump().
2579 CodeOffset farJump(currentOffset());
2580 spew(".space 32bit initValue 0xffff ffff");
2581 writeInst(UINT32_MAX);
2582 return farJump;
2583 }
2584
patchFarJump(CodeOffset farJump,uint32_t targetOffset)2585 void MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset) {
2586 uint32_t* u32 =
2587 reinterpret_cast<uint32_t*>(editSrc(BufferOffset(farJump.offset())));
2588 MOZ_ASSERT(*u32 == UINT32_MAX);
2589 *u32 = targetOffset - farJump.offset();
2590 }
2591
call(wasm::SymbolicAddress target)2592 CodeOffset MacroAssembler::call(wasm::SymbolicAddress target) {
2593 movePtr(target, CallReg);
2594 return call(CallReg);
2595 }
2596
call(const Address & addr)2597 void MacroAssembler::call(const Address& addr) {
2598 loadPtr(addr, CallReg);
2599 call(CallReg);
2600 }
2601
call(ImmWord target)2602 void MacroAssembler::call(ImmWord target) { call(ImmPtr((void*)target.value)); }
2603
call(ImmPtr target)2604 void MacroAssembler::call(ImmPtr target) {
2605 BufferOffset bo = m_buffer.nextOffset();
2606 addPendingJump(bo, target, RelocationKind::HARDCODED);
2607 ma_call(target);
2608 }
2609
call(JitCode * c)2610 void MacroAssembler::call(JitCode* c) {
2611 ScratchRegisterScope scratch(asMasm());
2612 BufferOffset bo = m_buffer.nextOffset();
2613 addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
2614 ma_liPatchable(scratch, ImmPtr(c->raw()));
2615 callJitNoProfiler(scratch);
2616 }
2617
nopPatchableToCall()2618 CodeOffset MacroAssembler::nopPatchableToCall() {
2619 // LOONG64
2620 as_nop(); // lu12i_w
2621 as_nop(); // ori
2622 as_nop(); // lu32i_d
2623 as_nop(); // jirl
2624 return CodeOffset(currentOffset());
2625 }
2626
patchNopToCall(uint8_t * call,uint8_t * target)2627 void MacroAssembler::patchNopToCall(uint8_t* call, uint8_t* target) {
2628 Instruction* inst = (Instruction*)call - 4 /* four nops */;
2629 Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)target);
2630 inst[3] = InstImm(op_jirl, BOffImm16(0), ScratchRegister, ra);
2631 }
2632
patchCallToNop(uint8_t * call)2633 void MacroAssembler::patchCallToNop(uint8_t* call) {
2634 Instruction* inst = (Instruction*)call - 4 /* four nops */;
2635 inst[0].makeNop(); // lu12i_w
2636 inst[1].makeNop(); // ori
2637 inst[2].makeNop(); // lu32i_d
2638 inst[3].makeNop(); // jirl
2639 }
2640
pushReturnAddress()2641 void MacroAssembler::pushReturnAddress() { push(ra); }
2642
popReturnAddress()2643 void MacroAssembler::popReturnAddress() { pop(ra); }
2644
2645 // ===============================================================
2646 // ABI function calls.
2647
setupUnalignedABICall(Register scratch)2648 void MacroAssembler::setupUnalignedABICall(Register scratch) {
2649 MOZ_ASSERT(!IsCompilingWasm(), "wasm should only use aligned ABI calls");
2650 setupNativeABICall();
2651 dynamicAlignment_ = true;
2652
2653 as_or(scratch, StackPointer, zero);
2654
2655 // Force sp to be aligned
2656 asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
2657 ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
2658 storePtr(scratch, Address(StackPointer, 0));
2659 }
2660
callWithABIPre(uint32_t * stackAdjust,bool callFromWasm)2661 void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
2662 MOZ_ASSERT(inCall_);
2663 uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
2664
2665 // Reserve place for $ra.
2666 stackForCall += sizeof(intptr_t);
2667
2668 if (dynamicAlignment_) {
2669 stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
2670 } else {
2671 uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
2672 stackForCall += ComputeByteAlignment(
2673 stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
2674 }
2675
2676 *stackAdjust = stackForCall;
2677 reserveStack(stackForCall);
2678
2679 // Save $ra because call is going to clobber it. Restore it in
2680 // callWithABIPost. NOTE: This is needed for calls from SharedIC.
2681 // Maybe we can do this differently.
2682 storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
2683
2684 // Position all arguments.
2685 {
2686 enoughMemory_ &= moveResolver_.resolve();
2687 if (!enoughMemory_) {
2688 return;
2689 }
2690
2691 MoveEmitter emitter(*this);
2692 emitter.emit(moveResolver_);
2693 emitter.finish();
2694 }
2695
2696 assertStackAlignment(ABIStackAlignment);
2697 }
2698
callWithABIPost(uint32_t stackAdjust,MoveOp::Type result,bool callFromWasm)2699 void MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
2700 bool callFromWasm) {
2701 // Restore ra value (as stored in callWithABIPre()).
2702 loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
2703
2704 if (dynamicAlignment_) {
2705 // Restore sp value from stack (as stored in setupUnalignedABICall()).
2706 loadPtr(Address(StackPointer, stackAdjust), StackPointer);
2707 // Use adjustFrame instead of freeStack because we already restored sp.
2708 adjustFrame(-stackAdjust);
2709 } else {
2710 freeStack(stackAdjust);
2711 }
2712
2713 #ifdef DEBUG
2714 MOZ_ASSERT(inCall_);
2715 inCall_ = false;
2716 #endif
2717 }
2718
callWithABINoProfiler(Register fun,MoveOp::Type result)2719 void MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) {
2720 SecondScratchRegisterScope scratch2(asMasm());
2721 // Load the callee in scratch2, no instruction between the movePtr and
2722 // call should clobber it. Note that we can't use fun because it may be
2723 // one of the IntArg registers clobbered before the call.
2724 movePtr(fun, scratch2);
2725
2726 uint32_t stackAdjust;
2727 callWithABIPre(&stackAdjust);
2728 call(scratch2);
2729 callWithABIPost(stackAdjust, result);
2730 }
2731
callWithABINoProfiler(const Address & fun,MoveOp::Type result)2732 void MacroAssembler::callWithABINoProfiler(const Address& fun,
2733 MoveOp::Type result) {
2734 SecondScratchRegisterScope scratch2(asMasm());
2735 // Load the callee in scratch2, as above.
2736 loadPtr(fun, scratch2);
2737
2738 uint32_t stackAdjust;
2739 callWithABIPre(&stackAdjust);
2740 call(scratch2);
2741 callWithABIPost(stackAdjust, result);
2742 }
2743
2744 // ===============================================================
2745 // Jit Frames.
2746
pushFakeReturnAddress(Register scratch)2747 uint32_t MacroAssembler::pushFakeReturnAddress(Register scratch) {
2748 CodeLabel cl;
2749
2750 ma_li(scratch, &cl);
2751 Push(scratch);
2752 bind(&cl);
2753 uint32_t retAddr = currentOffset();
2754
2755 addCodeLabel(cl);
2756 return retAddr;
2757 }
2758
2759 // ===============================================================
2760 // Move instructions
2761
moveValue(const TypedOrValueRegister & src,const ValueOperand & dest)2762 void MacroAssembler::moveValue(const TypedOrValueRegister& src,
2763 const ValueOperand& dest) {
2764 if (src.hasValue()) {
2765 moveValue(src.valueReg(), dest);
2766 return;
2767 }
2768
2769 MIRType type = src.type();
2770 AnyRegister reg = src.typedReg();
2771
2772 if (!IsFloatingPointType(type)) {
2773 boxNonDouble(ValueTypeFromMIRType(type), reg.gpr(), dest);
2774 return;
2775 }
2776
2777 ScratchDoubleScope fpscratch(asMasm());
2778 FloatRegister scratch = fpscratch;
2779 FloatRegister freg = reg.fpu();
2780 if (type == MIRType::Float32) {
2781 convertFloat32ToDouble(freg, scratch);
2782 freg = scratch;
2783 }
2784 boxDouble(freg, dest, scratch);
2785 }
2786
moveValue(const ValueOperand & src,const ValueOperand & dest)2787 void MacroAssembler::moveValue(const ValueOperand& src,
2788 const ValueOperand& dest) {
2789 if (src == dest) {
2790 return;
2791 }
2792 movePtr(src.valueReg(), dest.valueReg());
2793 }
2794
moveValue(const Value & src,const ValueOperand & dest)2795 void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
2796 if (!src.isGCThing()) {
2797 ma_li(dest.valueReg(), ImmWord(src.asRawBits()));
2798 return;
2799 }
2800
2801 writeDataRelocation(src);
2802 movWithPatch(ImmWord(src.asRawBits()), dest.valueReg());
2803 }
2804
2805 // ===============================================================
2806 // Branch functions
2807
loadStoreBuffer(Register ptr,Register buffer)2808 void MacroAssembler::loadStoreBuffer(Register ptr, Register buffer) {
2809 if (ptr != buffer) {
2810 movePtr(ptr, buffer);
2811 }
2812 orPtr(Imm32(gc::ChunkMask), buffer);
2813 loadPtr(Address(buffer, gc::ChunkStoreBufferOffsetFromLastByte), buffer);
2814 }
2815
branchPtrInNurseryChunk(Condition cond,Register ptr,Register temp,Label * label)2816 void MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr,
2817 Register temp, Label* label) {
2818 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
2819 MOZ_ASSERT(ptr != temp);
2820 MOZ_ASSERT(ptr != ScratchRegister &&
2821 ptr != SecondScratchReg); // Both may be used internally.
2822 MOZ_ASSERT(temp != ScratchRegister && temp != SecondScratchReg);
2823 MOZ_ASSERT(temp != InvalidReg);
2824
2825 movePtr(ptr, temp);
2826 orPtr(Imm32(gc::ChunkMask), temp);
2827 branchPtr(InvertCondition(cond),
2828 Address(temp, gc::ChunkStoreBufferOffsetFromLastByte), zero, label);
2829 }
2830
branchValueIsNurseryCell(Condition cond,const Address & address,Register temp,Label * label)2831 void MacroAssembler::branchValueIsNurseryCell(Condition cond,
2832 const Address& address,
2833 Register temp, Label* label) {
2834 branchValueIsNurseryCellImpl(cond, address, temp, label);
2835 }
2836
branchValueIsNurseryCell(Condition cond,ValueOperand value,Register temp,Label * label)2837 void MacroAssembler::branchValueIsNurseryCell(Condition cond,
2838 ValueOperand value, Register temp,
2839 Label* label) {
2840 branchValueIsNurseryCellImpl(cond, value, temp, label);
2841 }
2842
2843 template <typename T>
branchValueIsNurseryCellImpl(Condition cond,const T & value,Register temp,Label * label)2844 void MacroAssembler::branchValueIsNurseryCellImpl(Condition cond,
2845 const T& value, Register temp,
2846 Label* label) {
2847 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
2848 MOZ_ASSERT(temp != InvalidReg);
2849 Label done;
2850 branchTestGCThing(Assembler::NotEqual, value,
2851 cond == Assembler::Equal ? &done : label);
2852
2853 unboxGCThingForGCBarrier(value, temp);
2854 orPtr(Imm32(gc::ChunkMask), temp);
2855 loadPtr(Address(temp, gc::ChunkStoreBufferOffsetFromLastByte), temp);
2856 branchPtr(InvertCondition(cond), temp, zero, label);
2857
2858 bind(&done);
2859 }
2860
branchTestValue(Condition cond,const ValueOperand & lhs,const Value & rhs,Label * label)2861 void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
2862 const Value& rhs, Label* label) {
2863 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2864 ScratchRegisterScope scratch(asMasm());
2865 MOZ_ASSERT(lhs.valueReg() != scratch);
2866 moveValue(rhs, ValueOperand(scratch));
2867 ma_b(lhs.valueReg(), scratch, label, cond);
2868 }
2869
2870 // ========================================================================
2871 // Memory access primitives.
2872
2873 template <typename T>
storeUnboxedValue(const ConstantOrRegister & value,MIRType valueType,const T & dest,MIRType slotType)2874 void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
2875 MIRType valueType, const T& dest,
2876 MIRType slotType) {
2877 if (valueType == MIRType::Double) {
2878 boxDouble(value.reg().typedReg().fpu(), dest);
2879 return;
2880 }
2881
2882 // For known integers and booleans, we can just store the unboxed value if
2883 // the slot has the same type.
2884 if ((valueType == MIRType::Int32 || valueType == MIRType::Boolean) &&
2885 slotType == valueType) {
2886 if (value.constant()) {
2887 Value val = value.value();
2888 if (valueType == MIRType::Int32) {
2889 store32(Imm32(val.toInt32()), dest);
2890 } else {
2891 store32(Imm32(val.toBoolean() ? 1 : 0), dest);
2892 }
2893 } else {
2894 store32(value.reg().typedReg().gpr(), dest);
2895 }
2896 return;
2897 }
2898
2899 if (value.constant()) {
2900 storeValue(value.value(), dest);
2901 } else {
2902 storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(),
2903 dest);
2904 }
2905 }
2906
2907 template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
2908 MIRType valueType,
2909 const Address& dest,
2910 MIRType slotType);
2911 template void MacroAssembler::storeUnboxedValue(
2912 const ConstantOrRegister& value, MIRType valueType,
2913 const BaseObjectElementIndex& dest, MIRType slotType);
2914
comment(const char * msg)2915 void MacroAssembler::comment(const char* msg) { Assembler::comment(msg); }
2916
2917 // ===============================================================
2918 // WebAssembly
2919
wasmTrapInstruction()2920 CodeOffset MacroAssembler::wasmTrapInstruction() {
2921 CodeOffset offset(currentOffset());
2922 as_break(WASM_TRAP); // TODO: as_teq(zero, zero, WASM_TRAP)
2923 return offset;
2924 }
2925
wasmBoundsCheck32(Condition cond,Register index,Register boundsCheckLimit,Label * ok)2926 void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
2927 Register boundsCheckLimit, Label* ok) {
2928 ma_b(index, boundsCheckLimit, ok, cond);
2929 }
2930
wasmBoundsCheck32(Condition cond,Register index,Address boundsCheckLimit,Label * ok)2931 void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
2932 Address boundsCheckLimit, Label* ok) {
2933 SecondScratchRegisterScope scratch2(asMasm());
2934 load32(boundsCheckLimit, scratch2);
2935 ma_b(index, Register(scratch2), ok, cond);
2936 }
2937
wasmBoundsCheck64(Condition cond,Register64 index,Register64 boundsCheckLimit,Label * ok)2938 void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
2939 Register64 boundsCheckLimit, Label* ok) {
2940 ma_b(index.reg, boundsCheckLimit.reg, ok, cond);
2941 }
2942
wasmBoundsCheck64(Condition cond,Register64 index,Address boundsCheckLimit,Label * ok)2943 void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
2944 Address boundsCheckLimit, Label* ok) {
2945 SecondScratchRegisterScope scratch2(asMasm());
2946 loadPtr(boundsCheckLimit, scratch2);
2947 ma_b(index.reg, scratch2, ok, cond);
2948 }
2949
2950 // FTINTRZ behaves as follows:
2951 //
2952 // on NaN it produces zero
2953 // on too large it produces INT_MAX (for appropriate type)
2954 // on too small it produces INT_MIN (ditto)
2955
wasmTruncateDoubleToUInt32(FloatRegister input,Register output,bool isSaturating,Label * oolEntry)2956 void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
2957 Register output,
2958 bool isSaturating,
2959 Label* oolEntry) {
2960 ScratchRegisterScope scratch(asMasm());
2961 ScratchDoubleScope fpscratch(asMasm());
2962 if (!isSaturating) {
2963 ma_bc_d(input, input, oolEntry, Assembler::DoubleUnordered);
2964 }
2965 as_ftintrz_l_d(fpscratch, input);
2966 moveFromDouble(fpscratch, output);
2967 as_srli_d(scratch, output, 32);
2968 as_slli_w(output, output, 0);
2969 ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
2970 }
2971
wasmTruncateFloat32ToUInt32(FloatRegister input,Register output,bool isSaturating,Label * oolEntry)2972 void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
2973 Register output,
2974 bool isSaturating,
2975 Label* oolEntry) {
2976 ScratchRegisterScope scratch(asMasm());
2977 ScratchDoubleScope fpscratch(asMasm());
2978 if (!isSaturating) {
2979 ma_bc_s(input, input, oolEntry, Assembler::DoubleUnordered);
2980 }
2981 as_ftintrz_l_s(fpscratch, input);
2982 moveFromDouble(fpscratch, output);
2983 as_srli_d(scratch, output, 32);
2984 as_slli_w(output, output, 0);
2985 ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
2986 }
2987
2988 // Assembler::CauseV is a enum,called FCSRBit. Assembler::CauseV == 16
wasmTruncateDoubleToInt32(FloatRegister input,Register output,bool isSaturating,Label * oolEntry)2989 void MacroAssembler::wasmTruncateDoubleToInt32(FloatRegister input,
2990 Register output,
2991 bool isSaturating,
2992 Label* oolEntry) {
2993 ScratchRegisterScope scratch(asMasm());
2994 ScratchFloat32Scope fpscratch(asMasm());
2995 as_ftintrz_w_d(fpscratch, input);
2996 as_movfcsr2gr(scratch);
2997 moveFromFloat32(fpscratch, output);
2998 MOZ_ASSERT(Assembler::CauseV < 32);
2999 as_bstrpick_w(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
3000 ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
3001 }
3002
wasmTruncateFloat32ToInt32(FloatRegister input,Register output,bool isSaturating,Label * oolEntry)3003 void MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input,
3004 Register output,
3005 bool isSaturating,
3006 Label* oolEntry) {
3007 ScratchRegisterScope scratch(asMasm());
3008 ScratchFloat32Scope fpscratch(asMasm());
3009 as_ftintrz_w_s(fpscratch, input);
3010 as_movfcsr2gr(scratch);
3011 moveFromFloat32(fpscratch, output);
3012 MOZ_ASSERT(Assembler::CauseV < 32);
3013 as_bstrpick_w(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
3014 ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
3015 }
3016
wasmTruncateDoubleToUInt64(FloatRegister input,Register64 output_,bool isSaturating,Label * oolEntry,Label * oolRejoin,FloatRegister tempDouble)3017 void MacroAssembler::wasmTruncateDoubleToUInt64(
3018 FloatRegister input, Register64 output_, bool isSaturating, Label* oolEntry,
3019 Label* oolRejoin, FloatRegister tempDouble) {
3020 MOZ_ASSERT(tempDouble.isInvalid());
3021 ScratchDoubleScope fpscratch(asMasm());
3022 Register output = output_.reg;
3023
3024 Label done;
3025
3026 if (!isSaturating) {
3027 ma_bc_d(input, input, oolEntry, Assembler::DoubleUnordered);
3028 }
3029 as_ftintrz_l_d(fpscratch, input);
3030 moveFromDouble(fpscratch, output);
3031 loadConstantDouble(double(INT64_MAX + 1ULL), fpscratch);
3032
3033 ScratchRegisterScope scratch(asMasm());
3034 SecondScratchRegisterScope scratch2(asMasm());
3035 ma_li(scratch2, ImmWord(INT64_MAX));
3036 // For numbers in -1.[ : ]INT64_MAX range do nothing more
3037 ma_b(output, Register(scratch2), &done, Assembler::Below, ShortJump);
3038
3039 ma_li(scratch2, ImmWord(INT64_MIN));
3040 as_fsub_d(fpscratch, input, fpscratch);
3041 as_ftintrz_l_d(fpscratch, fpscratch);
3042 as_movfcsr2gr(scratch);
3043 moveFromDouble(fpscratch, output);
3044 as_bstrpick_d(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
3045 as_add_d(output, output, scratch2);
3046
3047 // Guard against negative values that result in 0 due the precision loss.
3048 as_sltui(scratch2, output, 1);
3049 as_or(scratch, scratch, scratch2);
3050
3051 ma_b(scratch, zero, oolEntry, Assembler::NotEqual);
3052
3053 bind(&done);
3054
3055 if (isSaturating) {
3056 bind(oolRejoin);
3057 }
3058 }
3059
wasmTruncateFloat32ToUInt64(FloatRegister input,Register64 output_,bool isSaturating,Label * oolEntry,Label * oolRejoin,FloatRegister tempFloat)3060 void MacroAssembler::wasmTruncateFloat32ToUInt64(
3061 FloatRegister input, Register64 output_, bool isSaturating, Label* oolEntry,
3062 Label* oolRejoin, FloatRegister tempFloat) {
3063 MOZ_ASSERT(tempFloat.isInvalid());
3064 ScratchDoubleScope fpscratch(asMasm());
3065 Register output = output_.reg;
3066
3067 Label done;
3068
3069 if (!isSaturating) {
3070 ma_bc_s(input, input, oolEntry, Assembler::DoubleUnordered);
3071 }
3072 as_ftintrz_l_s(fpscratch, input);
3073 moveFromDouble(fpscratch, output);
3074 loadConstantFloat32(float(INT64_MAX + 1ULL), fpscratch);
3075
3076 ScratchRegisterScope scratch(asMasm());
3077 SecondScratchRegisterScope scratch2(asMasm());
3078 ma_li(scratch2, ImmWord(INT64_MAX));
3079 // For numbers in -1.[ : ]INT64_MAX range do nothing more
3080 ma_b(output, Register(scratch2), &done, Assembler::Below, ShortJump);
3081
3082 ma_li(scratch2, ImmWord(INT64_MIN));
3083 as_fsub_s(fpscratch, input, fpscratch);
3084 as_ftintrz_l_s(fpscratch, fpscratch);
3085 as_movfcsr2gr(scratch);
3086 moveFromDouble(fpscratch, output);
3087 as_bstrpick_d(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
3088 as_add_d(output, output, scratch2);
3089
3090 // Guard against negative values that result in 0 due the precision loss.
3091 as_sltui(scratch2, output, 1);
3092 as_or(scratch, scratch, scratch2);
3093
3094 ma_b(scratch, zero, oolEntry, Assembler::NotEqual);
3095
3096 bind(&done);
3097
3098 if (isSaturating) {
3099 bind(oolRejoin);
3100 }
3101 }
3102
wasmTruncateDoubleToInt64(FloatRegister input,Register64 output,bool isSaturating,Label * oolEntry,Label * oolRejoin,FloatRegister tempDouble)3103 void MacroAssembler::wasmTruncateDoubleToInt64(
3104 FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
3105 Label* oolRejoin, FloatRegister tempDouble) {
3106 MOZ_ASSERT(tempDouble.isInvalid());
3107 ScratchRegisterScope scratch(asMasm());
3108 ScratchDoubleScope fpscratch(asMasm());
3109
3110 as_ftintrz_l_d(fpscratch, input);
3111 as_movfcsr2gr(scratch);
3112 moveFromDouble(fpscratch, output.reg);
3113 as_bstrpick_d(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
3114 ma_b(scratch, zero, oolEntry, Assembler::NotEqual);
3115
3116 if (isSaturating) {
3117 bind(oolRejoin);
3118 }
3119 }
3120
wasmTruncateFloat32ToInt64(FloatRegister input,Register64 output,bool isSaturating,Label * oolEntry,Label * oolRejoin,FloatRegister tempFloat)3121 void MacroAssembler::wasmTruncateFloat32ToInt64(
3122 FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
3123 Label* oolRejoin, FloatRegister tempFloat) {
3124 MOZ_ASSERT(tempFloat.isInvalid());
3125 ScratchRegisterScope scratch(asMasm());
3126 ScratchDoubleScope fpscratch(asMasm());
3127
3128 as_ftintrz_l_s(fpscratch, input);
3129 as_movfcsr2gr(scratch);
3130 moveFromDouble(fpscratch, output.reg);
3131 as_bstrpick_d(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
3132 ma_b(scratch, zero, oolEntry, Assembler::NotEqual);
3133
3134 if (isSaturating) {
3135 bind(oolRejoin);
3136 }
3137 }
3138
oolWasmTruncateCheckF32ToI32(FloatRegister input,Register output,TruncFlags flags,wasm::BytecodeOffset off,Label * rejoin)3139 void MacroAssembler::oolWasmTruncateCheckF32ToI32(FloatRegister input,
3140 Register output,
3141 TruncFlags flags,
3142 wasm::BytecodeOffset off,
3143 Label* rejoin) {
3144 outOfLineWasmTruncateToInt32Check(input, output, MIRType::Float32, flags,
3145 rejoin, off);
3146 }
3147
oolWasmTruncateCheckF64ToI32(FloatRegister input,Register output,TruncFlags flags,wasm::BytecodeOffset off,Label * rejoin)3148 void MacroAssembler::oolWasmTruncateCheckF64ToI32(FloatRegister input,
3149 Register output,
3150 TruncFlags flags,
3151 wasm::BytecodeOffset off,
3152 Label* rejoin) {
3153 outOfLineWasmTruncateToInt32Check(input, output, MIRType::Double, flags,
3154 rejoin, off);
3155 }
3156
oolWasmTruncateCheckF32ToI64(FloatRegister input,Register64 output,TruncFlags flags,wasm::BytecodeOffset off,Label * rejoin)3157 void MacroAssembler::oolWasmTruncateCheckF32ToI64(FloatRegister input,
3158 Register64 output,
3159 TruncFlags flags,
3160 wasm::BytecodeOffset off,
3161 Label* rejoin) {
3162 outOfLineWasmTruncateToInt64Check(input, output, MIRType::Float32, flags,
3163 rejoin, off);
3164 }
3165
oolWasmTruncateCheckF64ToI64(FloatRegister input,Register64 output,TruncFlags flags,wasm::BytecodeOffset off,Label * rejoin)3166 void MacroAssembler::oolWasmTruncateCheckF64ToI64(FloatRegister input,
3167 Register64 output,
3168 TruncFlags flags,
3169 wasm::BytecodeOffset off,
3170 Label* rejoin) {
3171 outOfLineWasmTruncateToInt64Check(input, output, MIRType::Double, flags,
3172 rejoin, off);
3173 }
3174
wasmLoad(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,AnyRegister output)3175 void MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access,
3176 Register memoryBase, Register ptr,
3177 Register ptrScratch, AnyRegister output) {
3178 wasmLoadImpl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
3179 }
3180
wasmLoadI64(const wasm::MemoryAccessDesc & access,Register memoryBase,Register ptr,Register ptrScratch,Register64 output)3181 void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
3182 Register memoryBase, Register ptr,
3183 Register ptrScratch, Register64 output) {
3184 wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
3185 }
3186
wasmStore(const wasm::MemoryAccessDesc & access,AnyRegister value,Register memoryBase,Register ptr,Register ptrScratch)3187 void MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access,
3188 AnyRegister value, Register memoryBase,
3189 Register ptr, Register ptrScratch) {
3190 wasmStoreImpl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
3191 }
3192
wasmStoreI64(const wasm::MemoryAccessDesc & access,Register64 value,Register memoryBase,Register ptr,Register ptrScratch)3193 void MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access,
3194 Register64 value, Register memoryBase,
3195 Register ptr, Register ptrScratch) {
3196 wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
3197 }
3198
enterFakeExitFrameForWasm(Register cxreg,Register scratch,ExitFrameType type)3199 void MacroAssembler::enterFakeExitFrameForWasm(Register cxreg, Register scratch,
3200 ExitFrameType type) {
3201 enterFakeExitFrame(cxreg, scratch, type);
3202 }
3203
3204 // TODO(loong64): widenInt32 should be nop?
widenInt32(Register r)3205 void MacroAssembler::widenInt32(Register r) {
3206 move32To64SignExtend(r, Register64(r));
3207 }
3208
3209 // ========================================================================
3210 // Convert floating point.
3211
convertUInt64ToFloat32(Register64 src_,FloatRegister dest,Register temp)3212 void MacroAssembler::convertUInt64ToFloat32(Register64 src_, FloatRegister dest,
3213 Register temp) {
3214 MOZ_ASSERT(temp == Register::Invalid());
3215 ScratchRegisterScope scratch(asMasm());
3216 SecondScratchRegisterScope scratch2(asMasm());
3217
3218 Register src = src_.reg;
3219 Label positive, done;
3220 ma_b(src, src, &positive, NotSigned, ShortJump);
3221
3222 MOZ_ASSERT(src != scratch);
3223 MOZ_ASSERT(src != scratch2);
3224
3225 ma_and(scratch, src, Imm32(1));
3226 as_srli_d(scratch2, src, 1);
3227 as_or(scratch, scratch, scratch2);
3228 as_movgr2fr_d(dest, scratch);
3229 as_ffint_s_l(dest, dest);
3230 addFloat32(dest, dest);
3231 ma_b(&done, ShortJump);
3232
3233 bind(&positive);
3234 as_movgr2fr_d(dest, src);
3235 as_ffint_s_l(dest, dest);
3236
3237 bind(&done);
3238 }
3239
convertInt64ToFloat32(Register64 src,FloatRegister dest)3240 void MacroAssembler::convertInt64ToFloat32(Register64 src, FloatRegister dest) {
3241 as_movgr2fr_d(dest, src.reg);
3242 as_ffint_s_l(dest, dest);
3243 }
3244
convertUInt64ToDoubleNeedsTemp()3245 bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return false; }
3246
convertUInt64ToDouble(Register64 src,FloatRegister dest,Register temp)3247 void MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest,
3248 Register temp) {
3249 MOZ_ASSERT(temp == Register::Invalid());
3250 MacroAssemblerSpecific::convertUInt64ToDouble(src.reg, dest);
3251 }
3252
convertInt64ToDouble(Register64 src,FloatRegister dest)3253 void MacroAssembler::convertInt64ToDouble(Register64 src, FloatRegister dest) {
3254 as_movgr2fr_d(dest, src.reg);
3255 as_ffint_d_l(dest, dest);
3256 }
3257
convertIntPtrToDouble(Register src,FloatRegister dest)3258 void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
3259 convertInt64ToDouble(Register64(src), dest);
3260 }
3261
3262 // ========================================================================
3263 // Primitive atomic operations.
3264
3265 template <typename T>
CompareExchange(MacroAssembler & masm,const wasm::MemoryAccessDesc * access,Scalar::Type type,const Synchronization & sync,const T & mem,Register oldval,Register newval,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)3266 static void CompareExchange(MacroAssembler& masm,
3267 const wasm::MemoryAccessDesc* access,
3268 Scalar::Type type, const Synchronization& sync,
3269 const T& mem, Register oldval, Register newval,
3270 Register valueTemp, Register offsetTemp,
3271 Register maskTemp, Register output) {
3272 ScratchRegisterScope scratch(masm);
3273 SecondScratchRegisterScope scratch2(masm);
3274 bool signExtend = Scalar::isSignedIntType(type);
3275 unsigned nbytes = Scalar::byteSize(type);
3276
3277 switch (nbytes) {
3278 case 1:
3279 case 2:
3280 break;
3281 case 4:
3282 MOZ_ASSERT(valueTemp == InvalidReg);
3283 MOZ_ASSERT(offsetTemp == InvalidReg);
3284 MOZ_ASSERT(maskTemp == InvalidReg);
3285 break;
3286 default:
3287 MOZ_CRASH();
3288 }
3289
3290 Label again, end;
3291
3292 masm.computeEffectiveAddress(mem, scratch);
3293
3294 if (nbytes == 4) {
3295 masm.memoryBarrierBefore(sync);
3296 masm.bind(&again);
3297
3298 if (access) {
3299 masm.append(*access, masm.size());
3300 }
3301
3302 masm.as_ll_w(output, scratch, 0);
3303 masm.ma_b(output, oldval, &end, Assembler::NotEqual, ShortJump);
3304 masm.as_or(scratch2, newval, zero);
3305 masm.as_sc_w(scratch2, scratch, 0);
3306 masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
3307
3308 masm.memoryBarrierAfter(sync);
3309 masm.bind(&end);
3310
3311 return;
3312 }
3313
3314 masm.as_andi(offsetTemp, scratch, 3);
3315 masm.subPtr(offsetTemp, scratch);
3316 masm.as_slli_w(offsetTemp, offsetTemp, 3);
3317 masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
3318 masm.as_sll_w(maskTemp, maskTemp, offsetTemp);
3319 masm.as_nor(maskTemp, zero, maskTemp);
3320
3321 masm.memoryBarrierBefore(sync);
3322
3323 masm.bind(&again);
3324
3325 if (access) {
3326 masm.append(*access, masm.size());
3327 }
3328
3329 masm.as_ll_w(scratch2, scratch, 0);
3330
3331 masm.as_srl_w(output, scratch2, offsetTemp);
3332
3333 switch (nbytes) {
3334 case 1:
3335 if (signExtend) {
3336 masm.as_ext_w_b(valueTemp, oldval);
3337 masm.as_ext_w_b(output, output);
3338 } else {
3339 masm.as_andi(valueTemp, oldval, 0xff);
3340 masm.as_andi(output, output, 0xff);
3341 }
3342 break;
3343 case 2:
3344 if (signExtend) {
3345 masm.as_ext_w_h(valueTemp, oldval);
3346 masm.as_ext_w_h(output, output);
3347 } else {
3348 masm.as_bstrpick_d(valueTemp, oldval, 15, 0);
3349 masm.as_bstrpick_d(output, output, 15, 0);
3350 }
3351 break;
3352 }
3353
3354 masm.ma_b(output, valueTemp, &end, Assembler::NotEqual, ShortJump);
3355
3356 masm.as_sll_w(valueTemp, newval, offsetTemp);
3357 masm.as_and(scratch2, scratch2, maskTemp);
3358 masm.as_or(scratch2, scratch2, valueTemp);
3359
3360 masm.as_sc_w(scratch2, scratch, 0);
3361
3362 masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
3363
3364 masm.memoryBarrierAfter(sync);
3365
3366 masm.bind(&end);
3367 }
3368
3369 template <typename T>
CompareExchange64(MacroAssembler & masm,const wasm::MemoryAccessDesc * access,const Synchronization & sync,const T & mem,Register64 expect,Register64 replace,Register64 output)3370 static void CompareExchange64(MacroAssembler& masm,
3371 const wasm::MemoryAccessDesc* access,
3372 const Synchronization& sync, const T& mem,
3373 Register64 expect, Register64 replace,
3374 Register64 output) {
3375 MOZ_ASSERT(expect != output && replace != output);
3376 ScratchRegisterScope scratch(masm);
3377 SecondScratchRegisterScope scratch2(masm);
3378 masm.computeEffectiveAddress(mem, scratch);
3379
3380 Label tryAgain;
3381 Label exit;
3382
3383 masm.memoryBarrierBefore(sync);
3384
3385 masm.bind(&tryAgain);
3386
3387 if (access) {
3388 masm.append(*access, masm.size());
3389 }
3390
3391 masm.as_ll_d(output.reg, scratch, 0);
3392
3393 masm.ma_b(output.reg, expect.reg, &exit, Assembler::NotEqual, ShortJump);
3394 masm.movePtr(replace.reg, scratch2);
3395 masm.as_sc_d(scratch2, scratch, 0);
3396 masm.ma_b(scratch2, Register(scratch2), &tryAgain, Assembler::Zero,
3397 ShortJump);
3398
3399 masm.memoryBarrierAfter(sync);
3400
3401 masm.bind(&exit);
3402 }
3403
3404 template <typename T>
AtomicExchange(MacroAssembler & masm,const wasm::MemoryAccessDesc * access,Scalar::Type type,const Synchronization & sync,const T & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)3405 static void AtomicExchange(MacroAssembler& masm,
3406 const wasm::MemoryAccessDesc* access,
3407 Scalar::Type type, const Synchronization& sync,
3408 const T& mem, Register value, Register valueTemp,
3409 Register offsetTemp, Register maskTemp,
3410 Register output) {
3411 ScratchRegisterScope scratch(masm);
3412 SecondScratchRegisterScope scratch2(masm);
3413 bool signExtend = Scalar::isSignedIntType(type);
3414 unsigned nbytes = Scalar::byteSize(type);
3415
3416 switch (nbytes) {
3417 case 1:
3418 case 2:
3419 break;
3420 case 4:
3421 MOZ_ASSERT(valueTemp == InvalidReg);
3422 MOZ_ASSERT(offsetTemp == InvalidReg);
3423 MOZ_ASSERT(maskTemp == InvalidReg);
3424 break;
3425 default:
3426 MOZ_CRASH();
3427 }
3428
3429 Label again;
3430
3431 masm.computeEffectiveAddress(mem, scratch);
3432
3433 if (nbytes == 4) {
3434 masm.memoryBarrierBefore(sync);
3435 masm.bind(&again);
3436
3437 if (access) {
3438 masm.append(*access, masm.size());
3439 }
3440
3441 masm.as_ll_w(output, scratch, 0);
3442 masm.as_or(scratch2, value, zero);
3443 masm.as_sc_w(scratch2, scratch, 0);
3444 masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
3445
3446 masm.memoryBarrierAfter(sync);
3447
3448 return;
3449 }
3450
3451 masm.as_andi(offsetTemp, scratch, 3);
3452 masm.subPtr(offsetTemp, scratch);
3453 masm.as_slli_w(offsetTemp, offsetTemp, 3);
3454 masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
3455 masm.as_sll_w(maskTemp, maskTemp, offsetTemp);
3456 masm.as_nor(maskTemp, zero, maskTemp);
3457 switch (nbytes) {
3458 case 1:
3459 masm.as_andi(valueTemp, value, 0xff);
3460 break;
3461 case 2:
3462 masm.as_bstrpick_d(valueTemp, value, 15, 0);
3463 break;
3464 }
3465 masm.as_sll_w(valueTemp, valueTemp, offsetTemp);
3466
3467 masm.memoryBarrierBefore(sync);
3468
3469 masm.bind(&again);
3470
3471 if (access) {
3472 masm.append(*access, masm.size());
3473 }
3474
3475 masm.as_ll_w(output, scratch, 0);
3476 masm.as_and(scratch2, output, maskTemp);
3477 masm.as_or(scratch2, scratch2, valueTemp);
3478
3479 masm.as_sc_w(scratch2, scratch, 0);
3480
3481 masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
3482
3483 masm.as_srl_w(output, output, offsetTemp);
3484
3485 switch (nbytes) {
3486 case 1:
3487 if (signExtend) {
3488 masm.as_ext_w_b(output, output);
3489 } else {
3490 masm.as_andi(output, output, 0xff);
3491 }
3492 break;
3493 case 2:
3494 if (signExtend) {
3495 masm.as_ext_w_h(output, output);
3496 } else {
3497 masm.as_bstrpick_d(output, output, 15, 0);
3498 }
3499 break;
3500 }
3501
3502 masm.memoryBarrierAfter(sync);
3503 }
3504
3505 template <typename T>
AtomicExchange64(MacroAssembler & masm,const wasm::MemoryAccessDesc * access,const Synchronization & sync,const T & mem,Register64 value,Register64 output)3506 static void AtomicExchange64(MacroAssembler& masm,
3507 const wasm::MemoryAccessDesc* access,
3508 const Synchronization& sync, const T& mem,
3509 Register64 value, Register64 output) {
3510 MOZ_ASSERT(value != output);
3511 ScratchRegisterScope scratch(masm);
3512 SecondScratchRegisterScope scratch2(masm);
3513 masm.computeEffectiveAddress(mem, scratch);
3514
3515 Label tryAgain;
3516
3517 masm.memoryBarrierBefore(sync);
3518
3519 masm.bind(&tryAgain);
3520
3521 if (access) {
3522 masm.append(*access, masm.size());
3523 }
3524
3525 masm.as_ll_d(output.reg, scratch, 0);
3526
3527 masm.movePtr(value.reg, scratch2);
3528 masm.as_sc_d(scratch2, scratch, 0);
3529 masm.ma_b(scratch2, Register(scratch2), &tryAgain, Assembler::Zero,
3530 ShortJump);
3531
3532 masm.memoryBarrierAfter(sync);
3533 }
3534
3535 template <typename T>
AtomicFetchOp(MacroAssembler & masm,const wasm::MemoryAccessDesc * access,Scalar::Type type,const Synchronization & sync,AtomicOp op,const T & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)3536 static void AtomicFetchOp(MacroAssembler& masm,
3537 const wasm::MemoryAccessDesc* access,
3538 Scalar::Type type, const Synchronization& sync,
3539 AtomicOp op, const T& mem, Register value,
3540 Register valueTemp, Register offsetTemp,
3541 Register maskTemp, Register output) {
3542 ScratchRegisterScope scratch(masm);
3543 SecondScratchRegisterScope scratch2(masm);
3544 bool signExtend = Scalar::isSignedIntType(type);
3545 unsigned nbytes = Scalar::byteSize(type);
3546
3547 switch (nbytes) {
3548 case 1:
3549 case 2:
3550 break;
3551 case 4:
3552 MOZ_ASSERT(valueTemp == InvalidReg);
3553 MOZ_ASSERT(offsetTemp == InvalidReg);
3554 MOZ_ASSERT(maskTemp == InvalidReg);
3555 break;
3556 default:
3557 MOZ_CRASH();
3558 }
3559
3560 Label again;
3561
3562 masm.computeEffectiveAddress(mem, scratch);
3563
3564 if (nbytes == 4) {
3565 masm.memoryBarrierBefore(sync);
3566 masm.bind(&again);
3567
3568 if (access) {
3569 masm.append(*access, masm.size());
3570 }
3571
3572 masm.as_ll_w(output, scratch, 0);
3573
3574 switch (op) {
3575 case AtomicFetchAddOp:
3576 masm.as_add_w(scratch2, output, value);
3577 break;
3578 case AtomicFetchSubOp:
3579 masm.as_sub_w(scratch2, output, value);
3580 break;
3581 case AtomicFetchAndOp:
3582 masm.as_and(scratch2, output, value);
3583 break;
3584 case AtomicFetchOrOp:
3585 masm.as_or(scratch2, output, value);
3586 break;
3587 case AtomicFetchXorOp:
3588 masm.as_xor(scratch2, output, value);
3589 break;
3590 default:
3591 MOZ_CRASH();
3592 }
3593
3594 masm.as_sc_w(scratch2, scratch, 0);
3595 masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
3596
3597 masm.memoryBarrierAfter(sync);
3598
3599 return;
3600 }
3601
3602 masm.as_andi(offsetTemp, scratch, 3);
3603 masm.subPtr(offsetTemp, scratch);
3604 masm.as_slli_w(offsetTemp, offsetTemp, 3);
3605 masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
3606 masm.as_sll_w(maskTemp, maskTemp, offsetTemp);
3607 masm.as_nor(maskTemp, zero, maskTemp);
3608
3609 masm.memoryBarrierBefore(sync);
3610
3611 masm.bind(&again);
3612
3613 if (access) {
3614 masm.append(*access, masm.size());
3615 }
3616
3617 masm.as_ll_w(scratch2, scratch, 0);
3618 masm.as_srl_w(output, scratch2, offsetTemp);
3619
3620 switch (op) {
3621 case AtomicFetchAddOp:
3622 masm.as_add_w(valueTemp, output, value);
3623 break;
3624 case AtomicFetchSubOp:
3625 masm.as_sub_w(valueTemp, output, value);
3626 break;
3627 case AtomicFetchAndOp:
3628 masm.as_and(valueTemp, output, value);
3629 break;
3630 case AtomicFetchOrOp:
3631 masm.as_or(valueTemp, output, value);
3632 break;
3633 case AtomicFetchXorOp:
3634 masm.as_xor(valueTemp, output, value);
3635 break;
3636 default:
3637 MOZ_CRASH();
3638 }
3639
3640 switch (nbytes) {
3641 case 1:
3642 masm.as_andi(valueTemp, valueTemp, 0xff);
3643 break;
3644 case 2:
3645 masm.as_bstrpick_d(valueTemp, valueTemp, 15, 0);
3646 break;
3647 }
3648
3649 masm.as_sll_w(valueTemp, valueTemp, offsetTemp);
3650
3651 masm.as_and(scratch2, scratch2, maskTemp);
3652 masm.as_or(scratch2, scratch2, valueTemp);
3653
3654 masm.as_sc_w(scratch2, scratch, 0);
3655
3656 masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
3657
3658 switch (nbytes) {
3659 case 1:
3660 if (signExtend) {
3661 masm.as_ext_w_b(output, output);
3662 } else {
3663 masm.as_andi(output, output, 0xff);
3664 }
3665 break;
3666 case 2:
3667 if (signExtend) {
3668 masm.as_ext_w_h(output, output);
3669 } else {
3670 masm.as_bstrpick_d(output, output, 15, 0);
3671 }
3672 break;
3673 }
3674
3675 masm.memoryBarrierAfter(sync);
3676 }
3677
3678 template <typename T>
AtomicFetchOp64(MacroAssembler & masm,const wasm::MemoryAccessDesc * access,const Synchronization & sync,AtomicOp op,Register64 value,const T & mem,Register64 temp,Register64 output)3679 static void AtomicFetchOp64(MacroAssembler& masm,
3680 const wasm::MemoryAccessDesc* access,
3681 const Synchronization& sync, AtomicOp op,
3682 Register64 value, const T& mem, Register64 temp,
3683 Register64 output) {
3684 MOZ_ASSERT(value != output);
3685 MOZ_ASSERT(value != temp);
3686 ScratchRegisterScope scratch(masm);
3687 SecondScratchRegisterScope scratch2(masm);
3688 masm.computeEffectiveAddress(mem, scratch);
3689
3690 Label tryAgain;
3691
3692 masm.memoryBarrierBefore(sync);
3693
3694 masm.bind(&tryAgain);
3695
3696 if (access) {
3697 masm.append(*access, masm.size());
3698 }
3699
3700 masm.as_ll_d(output.reg, scratch, 0);
3701
3702 switch (op) {
3703 case AtomicFetchAddOp:
3704 masm.as_add_d(temp.reg, output.reg, value.reg);
3705 break;
3706 case AtomicFetchSubOp:
3707 masm.as_sub_d(temp.reg, output.reg, value.reg);
3708 break;
3709 case AtomicFetchAndOp:
3710 masm.as_and(temp.reg, output.reg, value.reg);
3711 break;
3712 case AtomicFetchOrOp:
3713 masm.as_or(temp.reg, output.reg, value.reg);
3714 break;
3715 case AtomicFetchXorOp:
3716 masm.as_xor(temp.reg, output.reg, value.reg);
3717 break;
3718 default:
3719 MOZ_CRASH();
3720 }
3721
3722 masm.as_sc_d(temp.reg, scratch, 0);
3723 masm.ma_b(temp.reg, temp.reg, &tryAgain, Assembler::Zero, ShortJump);
3724
3725 masm.memoryBarrierAfter(sync);
3726 }
3727
compareExchange(Scalar::Type type,const Synchronization & sync,const Address & mem,Register oldval,Register newval,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)3728 void MacroAssembler::compareExchange(Scalar::Type type,
3729 const Synchronization& sync,
3730 const Address& mem, Register oldval,
3731 Register newval, Register valueTemp,
3732 Register offsetTemp, Register maskTemp,
3733 Register output) {
3734 CompareExchange(*this, nullptr, type, sync, mem, oldval, newval, valueTemp,
3735 offsetTemp, maskTemp, output);
3736 }
3737
compareExchange(Scalar::Type type,const Synchronization & sync,const BaseIndex & mem,Register oldval,Register newval,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)3738 void MacroAssembler::compareExchange(Scalar::Type type,
3739 const Synchronization& sync,
3740 const BaseIndex& mem, Register oldval,
3741 Register newval, Register valueTemp,
3742 Register offsetTemp, Register maskTemp,
3743 Register output) {
3744 CompareExchange(*this, nullptr, type, sync, mem, oldval, newval, valueTemp,
3745 offsetTemp, maskTemp, output);
3746 }
3747
compareExchange64(const Synchronization & sync,const Address & mem,Register64 expect,Register64 replace,Register64 output)3748 void MacroAssembler::compareExchange64(const Synchronization& sync,
3749 const Address& mem, Register64 expect,
3750 Register64 replace, Register64 output) {
3751 CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
3752 }
3753
compareExchange64(const Synchronization & sync,const BaseIndex & mem,Register64 expect,Register64 replace,Register64 output)3754 void MacroAssembler::compareExchange64(const Synchronization& sync,
3755 const BaseIndex& mem, Register64 expect,
3756 Register64 replace, Register64 output) {
3757 CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
3758 }
3759
wasmCompareExchange(const wasm::MemoryAccessDesc & access,const Address & mem,Register oldval,Register newval,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)3760 void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
3761 const Address& mem, Register oldval,
3762 Register newval, Register valueTemp,
3763 Register offsetTemp, Register maskTemp,
3764 Register output) {
3765 CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
3766 newval, valueTemp, offsetTemp, maskTemp, output);
3767 }
3768
wasmCompareExchange(const wasm::MemoryAccessDesc & access,const BaseIndex & mem,Register oldval,Register newval,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)3769 void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
3770 const BaseIndex& mem, Register oldval,
3771 Register newval, Register valueTemp,
3772 Register offsetTemp, Register maskTemp,
3773 Register output) {
3774 CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
3775 newval, valueTemp, offsetTemp, maskTemp, output);
3776 }
3777
wasmCompareExchange64(const wasm::MemoryAccessDesc & access,const Address & mem,Register64 expect,Register64 replace,Register64 output)3778 void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
3779 const Address& mem,
3780 Register64 expect,
3781 Register64 replace,
3782 Register64 output) {
3783 CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
3784 output);
3785 }
3786
wasmCompareExchange64(const wasm::MemoryAccessDesc & access,const BaseIndex & mem,Register64 expect,Register64 replace,Register64 output)3787 void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
3788 const BaseIndex& mem,
3789 Register64 expect,
3790 Register64 replace,
3791 Register64 output) {
3792 CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
3793 output);
3794 }
3795
atomicExchange(Scalar::Type type,const Synchronization & sync,const Address & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)3796 void MacroAssembler::atomicExchange(Scalar::Type type,
3797 const Synchronization& sync,
3798 const Address& mem, Register value,
3799 Register valueTemp, Register offsetTemp,
3800 Register maskTemp, Register output) {
3801 AtomicExchange(*this, nullptr, type, sync, mem, value, valueTemp, offsetTemp,
3802 maskTemp, output);
3803 }
3804
atomicExchange(Scalar::Type type,const Synchronization & sync,const BaseIndex & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)3805 void MacroAssembler::atomicExchange(Scalar::Type type,
3806 const Synchronization& sync,
3807 const BaseIndex& mem, Register value,
3808 Register valueTemp, Register offsetTemp,
3809 Register maskTemp, Register output) {
3810 AtomicExchange(*this, nullptr, type, sync, mem, value, valueTemp, offsetTemp,
3811 maskTemp, output);
3812 }
3813
atomicExchange64(const Synchronization & sync,const Address & mem,Register64 value,Register64 output)3814 void MacroAssembler::atomicExchange64(const Synchronization& sync,
3815 const Address& mem, Register64 value,
3816 Register64 output) {
3817 AtomicExchange64(*this, nullptr, sync, mem, value, output);
3818 }
3819
atomicExchange64(const Synchronization & sync,const BaseIndex & mem,Register64 value,Register64 output)3820 void MacroAssembler::atomicExchange64(const Synchronization& sync,
3821 const BaseIndex& mem, Register64 value,
3822 Register64 output) {
3823 AtomicExchange64(*this, nullptr, sync, mem, value, output);
3824 }
3825
wasmAtomicExchange(const wasm::MemoryAccessDesc & access,const Address & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)3826 void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
3827 const Address& mem, Register value,
3828 Register valueTemp, Register offsetTemp,
3829 Register maskTemp, Register output) {
3830 AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
3831 valueTemp, offsetTemp, maskTemp, output);
3832 }
3833
wasmAtomicExchange(const wasm::MemoryAccessDesc & access,const BaseIndex & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)3834 void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
3835 const BaseIndex& mem, Register value,
3836 Register valueTemp, Register offsetTemp,
3837 Register maskTemp, Register output) {
3838 AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
3839 valueTemp, offsetTemp, maskTemp, output);
3840 }
3841
atomicFetchOp(Scalar::Type type,const Synchronization & sync,AtomicOp op,Register value,const Address & mem,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)3842 void MacroAssembler::atomicFetchOp(Scalar::Type type,
3843 const Synchronization& sync, AtomicOp op,
3844 Register value, const Address& mem,
3845 Register valueTemp, Register offsetTemp,
3846 Register maskTemp, Register output) {
3847 AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
3848 offsetTemp, maskTemp, output);
3849 }
3850
atomicFetchOp(Scalar::Type type,const Synchronization & sync,AtomicOp op,Register value,const BaseIndex & mem,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)3851 void MacroAssembler::atomicFetchOp(Scalar::Type type,
3852 const Synchronization& sync, AtomicOp op,
3853 Register value, const BaseIndex& mem,
3854 Register valueTemp, Register offsetTemp,
3855 Register maskTemp, Register output) {
3856 AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
3857 offsetTemp, maskTemp, output);
3858 }
3859
atomicFetchOp64(const Synchronization & sync,AtomicOp op,Register64 value,const Address & mem,Register64 temp,Register64 output)3860 void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
3861 Register64 value, const Address& mem,
3862 Register64 temp, Register64 output) {
3863 AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
3864 }
3865
atomicFetchOp64(const Synchronization & sync,AtomicOp op,Register64 value,const BaseIndex & mem,Register64 temp,Register64 output)3866 void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
3867 Register64 value, const BaseIndex& mem,
3868 Register64 temp, Register64 output) {
3869 AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
3870 }
3871
atomicEffectOp64(const Synchronization & sync,AtomicOp op,Register64 value,const Address & mem,Register64 temp)3872 void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
3873 Register64 value, const Address& mem,
3874 Register64 temp) {
3875 AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
3876 }
3877
atomicEffectOp64(const Synchronization & sync,AtomicOp op,Register64 value,const BaseIndex & mem,Register64 temp)3878 void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
3879 Register64 value, const BaseIndex& mem,
3880 Register64 temp) {
3881 AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
3882 }
3883
wasmAtomicFetchOp(const wasm::MemoryAccessDesc & access,AtomicOp op,Register value,const Address & mem,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)3884 void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
3885 AtomicOp op, Register value,
3886 const Address& mem, Register valueTemp,
3887 Register offsetTemp, Register maskTemp,
3888 Register output) {
3889 AtomicFetchOp(*this, &access, access.type(), access.sync(), op, mem, value,
3890 valueTemp, offsetTemp, maskTemp, output);
3891 }
3892
wasmAtomicFetchOp(const wasm::MemoryAccessDesc & access,AtomicOp op,Register value,const BaseIndex & mem,Register valueTemp,Register offsetTemp,Register maskTemp,Register output)3893 void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
3894 AtomicOp op, Register value,
3895 const BaseIndex& mem, Register valueTemp,
3896 Register offsetTemp, Register maskTemp,
3897 Register output) {
3898 AtomicFetchOp(*this, &access, access.type(), access.sync(), op, mem, value,
3899 valueTemp, offsetTemp, maskTemp, output);
3900 }
3901
3902 template <typename T>
AtomicEffectOp(MacroAssembler & masm,const wasm::MemoryAccessDesc * access,Scalar::Type type,const Synchronization & sync,AtomicOp op,const T & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp)3903 static void AtomicEffectOp(MacroAssembler& masm,
3904 const wasm::MemoryAccessDesc* access,
3905 Scalar::Type type, const Synchronization& sync,
3906 AtomicOp op, const T& mem, Register value,
3907 Register valueTemp, Register offsetTemp,
3908 Register maskTemp) {
3909 ScratchRegisterScope scratch(masm);
3910 SecondScratchRegisterScope scratch2(masm);
3911 unsigned nbytes = Scalar::byteSize(type);
3912
3913 switch (nbytes) {
3914 case 1:
3915 case 2:
3916 break;
3917 case 4:
3918 MOZ_ASSERT(valueTemp == InvalidReg);
3919 MOZ_ASSERT(offsetTemp == InvalidReg);
3920 MOZ_ASSERT(maskTemp == InvalidReg);
3921 break;
3922 default:
3923 MOZ_CRASH();
3924 }
3925
3926 Label again;
3927
3928 masm.computeEffectiveAddress(mem, scratch);
3929
3930 if (nbytes == 4) {
3931 masm.memoryBarrierBefore(sync);
3932 masm.bind(&again);
3933
3934 if (access) {
3935 masm.append(*access, masm.size());
3936 }
3937
3938 masm.as_ll_w(scratch2, scratch, 0);
3939
3940 switch (op) {
3941 case AtomicFetchAddOp:
3942 masm.as_add_w(scratch2, scratch2, value);
3943 break;
3944 case AtomicFetchSubOp:
3945 masm.as_sub_w(scratch2, scratch2, value);
3946 break;
3947 case AtomicFetchAndOp:
3948 masm.as_and(scratch2, scratch2, value);
3949 break;
3950 case AtomicFetchOrOp:
3951 masm.as_or(scratch2, scratch2, value);
3952 break;
3953 case AtomicFetchXorOp:
3954 masm.as_xor(scratch2, scratch2, value);
3955 break;
3956 default:
3957 MOZ_CRASH();
3958 }
3959
3960 masm.as_sc_w(scratch2, scratch, 0);
3961 masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
3962
3963 masm.memoryBarrierAfter(sync);
3964
3965 return;
3966 }
3967
3968 masm.as_andi(offsetTemp, scratch, 3);
3969 masm.subPtr(offsetTemp, scratch);
3970 masm.as_slli_w(offsetTemp, offsetTemp, 3);
3971 masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
3972 masm.as_sll_w(maskTemp, maskTemp, offsetTemp);
3973 masm.as_nor(maskTemp, zero, maskTemp);
3974
3975 masm.memoryBarrierBefore(sync);
3976
3977 masm.bind(&again);
3978
3979 if (access) {
3980 masm.append(*access, masm.size());
3981 }
3982
3983 masm.as_ll_w(scratch2, scratch, 0);
3984 masm.as_srl_w(valueTemp, scratch2, offsetTemp);
3985
3986 switch (op) {
3987 case AtomicFetchAddOp:
3988 masm.as_add_w(valueTemp, valueTemp, value);
3989 break;
3990 case AtomicFetchSubOp:
3991 masm.as_sub_w(valueTemp, valueTemp, value);
3992 break;
3993 case AtomicFetchAndOp:
3994 masm.as_and(valueTemp, valueTemp, value);
3995 break;
3996 case AtomicFetchOrOp:
3997 masm.as_or(valueTemp, valueTemp, value);
3998 break;
3999 case AtomicFetchXorOp:
4000 masm.as_xor(valueTemp, valueTemp, value);
4001 break;
4002 default:
4003 MOZ_CRASH();
4004 }
4005
4006 switch (nbytes) {
4007 case 1:
4008 masm.as_andi(valueTemp, valueTemp, 0xff);
4009 break;
4010 case 2:
4011 masm.as_bstrpick_d(valueTemp, valueTemp, 15, 0);
4012 break;
4013 }
4014
4015 masm.as_sll_w(valueTemp, valueTemp, offsetTemp);
4016
4017 masm.as_and(scratch2, scratch2, maskTemp);
4018 masm.as_or(scratch2, scratch2, valueTemp);
4019
4020 masm.as_sc_w(scratch2, scratch, 0);
4021
4022 masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
4023
4024 masm.memoryBarrierAfter(sync);
4025 }
4026
wasmAtomicEffectOp(const wasm::MemoryAccessDesc & access,AtomicOp op,Register value,const Address & mem,Register valueTemp,Register offsetTemp,Register maskTemp)4027 void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
4028 AtomicOp op, Register value,
4029 const Address& mem, Register valueTemp,
4030 Register offsetTemp,
4031 Register maskTemp) {
4032 AtomicEffectOp(*this, &access, access.type(), access.sync(), op, mem, value,
4033 valueTemp, offsetTemp, maskTemp);
4034 }
4035
wasmAtomicEffectOp(const wasm::MemoryAccessDesc & access,AtomicOp op,Register value,const BaseIndex & mem,Register valueTemp,Register offsetTemp,Register maskTemp)4036 void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
4037 AtomicOp op, Register value,
4038 const BaseIndex& mem,
4039 Register valueTemp, Register offsetTemp,
4040 Register maskTemp) {
4041 AtomicEffectOp(*this, &access, access.type(), access.sync(), op, mem, value,
4042 valueTemp, offsetTemp, maskTemp);
4043 }
4044
4045 template <typename T>
WasmAtomicExchange64(MacroAssembler & masm,const wasm::MemoryAccessDesc & access,const T & mem,Register64 value,Register64 output)4046 static void WasmAtomicExchange64(MacroAssembler& masm,
4047 const wasm::MemoryAccessDesc& access,
4048 const T& mem, Register64 value,
4049 Register64 output) {
4050 AtomicExchange64(masm, &access, access.sync(), mem, value, output);
4051 }
4052
wasmAtomicExchange64(const wasm::MemoryAccessDesc & access,const Address & mem,Register64 src,Register64 output)4053 void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
4054 const Address& mem, Register64 src,
4055 Register64 output) {
4056 WasmAtomicExchange64(*this, access, mem, src, output);
4057 }
4058
wasmAtomicExchange64(const wasm::MemoryAccessDesc & access,const BaseIndex & mem,Register64 src,Register64 output)4059 void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
4060 const BaseIndex& mem, Register64 src,
4061 Register64 output) {
4062 WasmAtomicExchange64(*this, access, mem, src, output);
4063 }
4064
wasmAtomicFetchOp64(const wasm::MemoryAccessDesc & access,AtomicOp op,Register64 value,const Address & mem,Register64 temp,Register64 output)4065 void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
4066 AtomicOp op, Register64 value,
4067 const Address& mem, Register64 temp,
4068 Register64 output) {
4069 AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
4070 }
4071
wasmAtomicFetchOp64(const wasm::MemoryAccessDesc & access,AtomicOp op,Register64 value,const BaseIndex & mem,Register64 temp,Register64 output)4072 void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
4073 AtomicOp op, Register64 value,
4074 const BaseIndex& mem, Register64 temp,
4075 Register64 output) {
4076 AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
4077 }
4078
4079 // ========================================================================
4080 // JS atomic operations.
4081
4082 template <typename T>
CompareExchangeJS(MacroAssembler & masm,Scalar::Type arrayType,const Synchronization & sync,const T & mem,Register oldval,Register newval,Register valueTemp,Register offsetTemp,Register maskTemp,Register temp,AnyRegister output)4083 static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
4084 const Synchronization& sync, const T& mem,
4085 Register oldval, Register newval,
4086 Register valueTemp, Register offsetTemp,
4087 Register maskTemp, Register temp,
4088 AnyRegister output) {
4089 if (arrayType == Scalar::Uint32) {
4090 masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp,
4091 offsetTemp, maskTemp, temp);
4092 masm.convertUInt32ToDouble(temp, output.fpu());
4093 } else {
4094 masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp,
4095 offsetTemp, maskTemp, output.gpr());
4096 }
4097 }
4098
4099 template <typename T>
AtomicExchangeJS(MacroAssembler & masm,Scalar::Type arrayType,const Synchronization & sync,const T & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp,Register temp,AnyRegister output)4100 static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
4101 const Synchronization& sync, const T& mem,
4102 Register value, Register valueTemp,
4103 Register offsetTemp, Register maskTemp,
4104 Register temp, AnyRegister output) {
4105 if (arrayType == Scalar::Uint32) {
4106 masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
4107 maskTemp, temp);
4108 masm.convertUInt32ToDouble(temp, output.fpu());
4109 } else {
4110 masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
4111 maskTemp, output.gpr());
4112 }
4113 }
4114
4115 template <typename T>
AtomicFetchOpJS(MacroAssembler & masm,Scalar::Type arrayType,const Synchronization & sync,AtomicOp op,Register value,const T & mem,Register valueTemp,Register offsetTemp,Register maskTemp,Register temp,AnyRegister output)4116 static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
4117 const Synchronization& sync, AtomicOp op,
4118 Register value, const T& mem, Register valueTemp,
4119 Register offsetTemp, Register maskTemp,
4120 Register temp, AnyRegister output) {
4121 if (arrayType == Scalar::Uint32) {
4122 masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp,
4123 maskTemp, temp);
4124 masm.convertUInt32ToDouble(temp, output.fpu());
4125 } else {
4126 masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp,
4127 maskTemp, output.gpr());
4128 }
4129 }
4130
compareExchangeJS(Scalar::Type arrayType,const Synchronization & sync,const Address & mem,Register oldval,Register newval,Register valueTemp,Register offsetTemp,Register maskTemp,Register temp,AnyRegister output)4131 void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
4132 const Synchronization& sync,
4133 const Address& mem, Register oldval,
4134 Register newval, Register valueTemp,
4135 Register offsetTemp, Register maskTemp,
4136 Register temp, AnyRegister output) {
4137 CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
4138 offsetTemp, maskTemp, temp, output);
4139 }
4140
compareExchangeJS(Scalar::Type arrayType,const Synchronization & sync,const BaseIndex & mem,Register oldval,Register newval,Register valueTemp,Register offsetTemp,Register maskTemp,Register temp,AnyRegister output)4141 void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
4142 const Synchronization& sync,
4143 const BaseIndex& mem, Register oldval,
4144 Register newval, Register valueTemp,
4145 Register offsetTemp, Register maskTemp,
4146 Register temp, AnyRegister output) {
4147 CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
4148 offsetTemp, maskTemp, temp, output);
4149 }
4150
atomicExchangeJS(Scalar::Type arrayType,const Synchronization & sync,const Address & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp,Register temp,AnyRegister output)4151 void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
4152 const Synchronization& sync,
4153 const Address& mem, Register value,
4154 Register valueTemp, Register offsetTemp,
4155 Register maskTemp, Register temp,
4156 AnyRegister output) {
4157 AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
4158 maskTemp, temp, output);
4159 }
4160
atomicExchangeJS(Scalar::Type arrayType,const Synchronization & sync,const BaseIndex & mem,Register value,Register valueTemp,Register offsetTemp,Register maskTemp,Register temp,AnyRegister output)4161 void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
4162 const Synchronization& sync,
4163 const BaseIndex& mem, Register value,
4164 Register valueTemp, Register offsetTemp,
4165 Register maskTemp, Register temp,
4166 AnyRegister output) {
4167 AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
4168 maskTemp, temp, output);
4169 }
4170
atomicFetchOpJS(Scalar::Type arrayType,const Synchronization & sync,AtomicOp op,Register value,const Address & mem,Register valueTemp,Register offsetTemp,Register maskTemp,Register temp,AnyRegister output)4171 void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
4172 const Synchronization& sync, AtomicOp op,
4173 Register value, const Address& mem,
4174 Register valueTemp, Register offsetTemp,
4175 Register maskTemp, Register temp,
4176 AnyRegister output) {
4177 AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp,
4178 maskTemp, temp, output);
4179 }
4180
atomicFetchOpJS(Scalar::Type arrayType,const Synchronization & sync,AtomicOp op,Register value,const BaseIndex & mem,Register valueTemp,Register offsetTemp,Register maskTemp,Register temp,AnyRegister output)4181 void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
4182 const Synchronization& sync, AtomicOp op,
4183 Register value, const BaseIndex& mem,
4184 Register valueTemp, Register offsetTemp,
4185 Register maskTemp, Register temp,
4186 AnyRegister output) {
4187 AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp,
4188 maskTemp, temp, output);
4189 }
4190
atomicEffectOpJS(Scalar::Type arrayType,const Synchronization & sync,AtomicOp op,Register value,const BaseIndex & mem,Register valueTemp,Register offsetTemp,Register maskTemp)4191 void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
4192 const Synchronization& sync, AtomicOp op,
4193 Register value, const BaseIndex& mem,
4194 Register valueTemp, Register offsetTemp,
4195 Register maskTemp) {
4196 AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
4197 offsetTemp, maskTemp);
4198 }
4199
atomicEffectOpJS(Scalar::Type arrayType,const Synchronization & sync,AtomicOp op,Register value,const Address & mem,Register valueTemp,Register offsetTemp,Register maskTemp)4200 void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
4201 const Synchronization& sync, AtomicOp op,
4202 Register value, const Address& mem,
4203 Register valueTemp, Register offsetTemp,
4204 Register maskTemp) {
4205 AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
4206 offsetTemp, maskTemp);
4207 }
4208
flexibleQuotient32(Register rhs,Register srcDest,bool isUnsigned,const LiveRegisterSet &)4209 void MacroAssembler::flexibleQuotient32(Register rhs, Register srcDest,
4210 bool isUnsigned,
4211 const LiveRegisterSet&) {
4212 quotient32(rhs, srcDest, isUnsigned);
4213 }
4214
flexibleRemainder32(Register rhs,Register srcDest,bool isUnsigned,const LiveRegisterSet &)4215 void MacroAssembler::flexibleRemainder32(Register rhs, Register srcDest,
4216 bool isUnsigned,
4217 const LiveRegisterSet&) {
4218 remainder32(rhs, srcDest, isUnsigned);
4219 }
4220
flexibleDivMod32(Register rhs,Register srcDest,Register remOutput,bool isUnsigned,const LiveRegisterSet &)4221 void MacroAssembler::flexibleDivMod32(Register rhs, Register srcDest,
4222 Register remOutput, bool isUnsigned,
4223 const LiveRegisterSet&) {
4224 if (isUnsigned) {
4225 as_mod_wu(remOutput, srcDest, rhs);
4226 as_div_wu(srcDest, srcDest, rhs);
4227 } else {
4228 as_mod_w(remOutput, srcDest, rhs);
4229 as_div_w(srcDest, srcDest, rhs);
4230 }
4231 }
4232
moveNearAddressWithPatch(Register dest)4233 CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest) {
4234 return movWithPatch(ImmPtr(nullptr), dest);
4235 }
4236
patchNearAddressMove(CodeLocationLabel loc,CodeLocationLabel target)4237 void MacroAssembler::patchNearAddressMove(CodeLocationLabel loc,
4238 CodeLocationLabel target) {
4239 PatchDataWithValueCheck(loc, ImmPtr(target.raw()), ImmPtr(nullptr));
4240 }
4241
4242 // ========================================================================
4243 // Spectre Mitigations.
4244
speculationBarrier()4245 void MacroAssembler::speculationBarrier() { MOZ_CRASH(); }
4246
floorFloat32ToInt32(FloatRegister src,Register dest,Label * fail)4247 void MacroAssembler::floorFloat32ToInt32(FloatRegister src, Register dest,
4248 Label* fail) {
4249 ScratchFloat32Scope fpscratch(asMasm());
4250 FloatRegister scratch = fpscratch;
4251 Label skipCheck, done;
4252
4253 // If Nan, 0 or -0 check for bailout
4254 loadConstantFloat32(0.0f, scratch);
4255 ma_bc_s(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
4256
4257 // If high part is not zero, it is NaN or -0, so we bail.
4258 {
4259 ScratchRegisterScope scratch(asMasm());
4260 moveFromDoubleLo(src, scratch);
4261 branch32(Assembler::NotEqual, scratch, zero, fail);
4262 }
4263
4264 // Input was zero, so return zero.
4265 move32(Imm32(0), dest);
4266 ma_b(&done, ShortJump);
4267
4268 bind(&skipCheck);
4269 as_ftintrm_w_s(scratch, src);
4270 moveFromDoubleLo(scratch, dest);
4271
4272 branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
4273 branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
4274
4275 bind(&done);
4276 }
4277
floorDoubleToInt32(FloatRegister src,Register dest,Label * fail)4278 void MacroAssembler::floorDoubleToInt32(FloatRegister src, Register dest,
4279 Label* fail) {
4280 ScratchDoubleScope fpscratch(asMasm());
4281 FloatRegister scratch = fpscratch;
4282 Label skipCheck, done;
4283
4284 // If Nan, 0 or -0 check for bailout
4285 loadConstantDouble(0.0, scratch);
4286 ma_bc_d(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
4287
4288 // If high part is not zero, it is NaN or -0, so we bail.
4289 {
4290 ScratchRegisterScope scratch(asMasm());
4291 moveFromDoubleHi(src, scratch);
4292 branch32(Assembler::NotEqual, scratch, zero, fail);
4293 }
4294
4295 // Input was zero, so return zero.
4296 move32(Imm32(0), dest);
4297 ma_b(&done, ShortJump);
4298
4299 bind(&skipCheck);
4300 as_ftintrm_w_d(scratch, src);
4301 moveFromDoubleLo(scratch, dest);
4302
4303 branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
4304 branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
4305
4306 bind(&done);
4307 }
4308
ceilFloat32ToInt32(FloatRegister src,Register dest,Label * fail)4309 void MacroAssembler::ceilFloat32ToInt32(FloatRegister src, Register dest,
4310 Label* fail) {
4311 ScratchFloat32Scope fpscratch(asMasm());
4312 FloatRegister scratch = fpscratch;
4313 Label performCeil, done;
4314
4315 // If x < -1 or x > 0 then perform ceil.
4316 loadConstantFloat32(0.0f, scratch);
4317 branchFloat(Assembler::DoubleGreaterThan, src, scratch, &performCeil);
4318 loadConstantFloat32(-1.0f, scratch);
4319 branchFloat(Assembler::DoubleLessThanOrEqual, src, scratch, &performCeil);
4320
4321 // If binary value is not zero, the input was not 0, so we bail.
4322 {
4323 ScratchRegisterScope scratch(asMasm());
4324 moveFromFloat32(src, scratch);
4325 branch32(Assembler::NotEqual, scratch, zero, fail);
4326 }
4327
4328 // Input was zero, so return zero.
4329 move32(Imm32(0), dest);
4330 ma_b(&done, ShortJump);
4331
4332 bind(&performCeil);
4333 as_ftintrp_w_s(scratch, src);
4334 moveFromFloat32(scratch, dest);
4335
4336 branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
4337 branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
4338
4339 bind(&done);
4340 }
4341
ceilDoubleToInt32(FloatRegister src,Register dest,Label * fail)4342 void MacroAssembler::ceilDoubleToInt32(FloatRegister src, Register dest,
4343 Label* fail) {
4344 ScratchDoubleScope fpscratch(asMasm());
4345 FloatRegister scratch = fpscratch;
4346 Label performCeil, done;
4347
4348 // If x < -1 or x > 0 then perform ceil.
4349 loadConstantDouble(0, scratch);
4350 branchDouble(Assembler::DoubleGreaterThan, src, scratch, &performCeil);
4351 loadConstantDouble(-1.0, scratch);
4352 branchDouble(Assembler::DoubleLessThanOrEqual, src, scratch, &performCeil);
4353
4354 // If binary value is not zero, the input was not 0, so we bail.
4355 {
4356 ScratchRegisterScope scratch(asMasm());
4357 moveFromDoubleHi(src, scratch);
4358 branch32(Assembler::NotEqual, scratch, zero, fail);
4359 }
4360
4361 // Input was zero, so return zero.
4362 move32(Imm32(0), dest);
4363 ma_b(&done, ShortJump);
4364
4365 bind(&performCeil);
4366 as_ftintrp_w_d(scratch, src);
4367 moveFromDoubleLo(scratch, dest);
4368
4369 branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
4370 branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
4371
4372 bind(&done);
4373 }
4374
roundFloat32ToInt32(FloatRegister src,Register dest,FloatRegister temp,Label * fail)4375 void MacroAssembler::roundFloat32ToInt32(FloatRegister src, Register dest,
4376 FloatRegister temp, Label* fail) {
4377 ScratchFloat32Scope scratch(*this);
4378
4379 Label negative, end, skipCheck;
4380
4381 // Load biggest number less than 0.5 in the temp register.
4382 loadConstantFloat32(GetBiggestNumberLessThan(0.5f), temp);
4383
4384 // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
4385 loadConstantFloat32(0.0f, scratch);
4386 ma_bc_s(src, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
4387
4388 // If Nan, 0 or -0 check for bailout
4389 ma_bc_s(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
4390
4391 // If binary value is not zero, it is NaN or -0, so we bail.
4392 {
4393 ScratchRegisterScope scratch(asMasm());
4394 moveFromFloat32(src, scratch);
4395 branch32(Assembler::NotEqual, scratch, zero, fail);
4396 }
4397
4398 // Input was zero, so return zero.
4399 move32(Imm32(0), dest);
4400 ma_b(&end, ShortJump);
4401
4402 bind(&skipCheck);
4403 as_fadd_s(scratch, src, temp);
4404 as_ftintrm_w_s(scratch, scratch);
4405
4406 moveFromFloat32(scratch, dest);
4407
4408 branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
4409 branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
4410
4411 jump(&end);
4412
4413 // Input is negative, but isn't -0.
4414 bind(&negative);
4415
4416 // Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
4417 // be added the biggest double less than 0.5.
4418 Label loadJoin;
4419 loadConstantFloat32(-0.5f, scratch);
4420 branchFloat(Assembler::DoubleLessThan, src, scratch, &loadJoin);
4421 loadConstantFloat32(0.5f, temp);
4422 bind(&loadJoin);
4423
4424 as_fadd_s(temp, src, temp);
4425
4426 // If input + 0.5 >= 0, input is a negative number >= -0.5 and the
4427 // result is -0.
4428 branchFloat(Assembler::DoubleGreaterThanOrEqual, temp, scratch, fail);
4429
4430 // Truncate and round toward zero.
4431 // This is off-by-one for everything but integer-valued inputs.
4432 as_ftintrm_w_s(scratch, temp);
4433 moveFromFloat32(scratch, dest);
4434
4435 branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
4436
4437 bind(&end);
4438 }
4439
roundDoubleToInt32(FloatRegister src,Register dest,FloatRegister temp,Label * fail)4440 void MacroAssembler::roundDoubleToInt32(FloatRegister src, Register dest,
4441 FloatRegister temp, Label* fail) {
4442 ScratchDoubleScope scratch(*this);
4443
4444 Label negative, end, skipCheck;
4445
4446 // Load biggest number less than 0.5 in the temp register.
4447 loadConstantDouble(GetBiggestNumberLessThan(0.5), temp);
4448
4449 // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
4450 loadConstantDouble(0.0, scratch);
4451 ma_bc_d(src, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
4452
4453 // If Nan, 0 or -0 check for bailout
4454 ma_bc_d(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
4455
4456 // If high part is not zero, it is NaN or -0, so we bail.
4457 {
4458 ScratchRegisterScope scratch(asMasm());
4459 moveFromDoubleHi(src, scratch);
4460 branch32(Assembler::NotEqual, scratch, zero, fail);
4461 }
4462
4463 // Input was zero, so return zero.
4464 move32(Imm32(0), dest);
4465 ma_b(&end, ShortJump);
4466
4467 bind(&skipCheck);
4468 as_fadd_d(scratch, src, temp);
4469 as_ftintrm_w_d(scratch, scratch);
4470
4471 moveFromDoubleLo(scratch, dest);
4472
4473 branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
4474 branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
4475
4476 jump(&end);
4477
4478 // Input is negative, but isn't -0.
4479 bind(&negative);
4480
4481 // Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
4482 // be added the biggest double less than 0.5.
4483 Label loadJoin;
4484 loadConstantDouble(-0.5, scratch);
4485 branchDouble(Assembler::DoubleLessThan, src, scratch, &loadJoin);
4486 loadConstantDouble(0.5, temp);
4487 bind(&loadJoin);
4488
4489 addDouble(src, temp);
4490
4491 // If input + 0.5 >= 0, input is a negative number >= -0.5 and the
4492 // result is -0.
4493 branchDouble(Assembler::DoubleGreaterThanOrEqual, temp, scratch, fail);
4494
4495 // Truncate and round toward zero.
4496 // This is off-by-one for everything but integer-valued inputs.
4497 as_ftintrm_w_d(scratch, temp);
4498 moveFromDoubleLo(scratch, dest);
4499
4500 branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
4501
4502 bind(&end);
4503 }
4504
truncFloat32ToInt32(FloatRegister src,Register dest,Label * fail)4505 void MacroAssembler::truncFloat32ToInt32(FloatRegister src, Register dest,
4506 Label* fail) {
4507 ScratchRegisterScope scratch(asMasm());
4508 ScratchFloat32Scope fpscratch(asMasm());
4509
4510 Label notZero;
4511 as_ftintrz_w_s(fpscratch, src);
4512 as_movfcsr2gr(scratch);
4513 moveFromFloat32(fpscratch, dest);
4514 as_bstrpick_w(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
4515 ma_b(dest, zero, ¬Zero, Assembler::NotEqual, ShortJump);
4516
4517 {
4518 // dest == zero
4519 SecondScratchRegisterScope scratch2(asMasm());
4520 moveFromFloat32(src, scratch2);
4521 // Check if input is in ]-1; -0] range by checking the sign bit.
4522 as_slt(scratch2, scratch2, zero);
4523 as_add_d(scratch, scratch, scratch2);
4524 }
4525
4526 bind(¬Zero);
4527 branch32(Assembler::NotEqual, Register(scratch), zero, fail);
4528 }
4529
truncDoubleToInt32(FloatRegister src,Register dest,Label * fail)4530 void MacroAssembler::truncDoubleToInt32(FloatRegister src, Register dest,
4531 Label* fail) {
4532 ScratchRegisterScope scratch(asMasm());
4533 ScratchFloat32Scope fpscratch(asMasm());
4534
4535 Label notZero;
4536 as_ftintrz_w_d(fpscratch, src);
4537 as_movfcsr2gr(scratch);
4538 moveFromFloat32(fpscratch, dest);
4539 as_bstrpick_w(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
4540 ma_b(dest, zero, ¬Zero, Assembler::NotEqual, ShortJump);
4541
4542 {
4543 // dest == zero
4544 SecondScratchRegisterScope scratch2(asMasm());
4545 moveFromDoubleHi(src, scratch2);
4546 // Check if input is in ]-1; -0] range by checking the sign bit.
4547 as_slt(scratch2, scratch2, zero);
4548 as_add_d(scratch, scratch, scratch2);
4549 }
4550
4551 bind(¬Zero);
4552 branch32(Assembler::NotEqual, Register(scratch), zero, fail);
4553 }
4554
nearbyIntDouble(RoundingMode mode,FloatRegister src,FloatRegister dest)4555 void MacroAssembler::nearbyIntDouble(RoundingMode mode, FloatRegister src,
4556 FloatRegister dest) {
4557 MOZ_CRASH("not supported on this platform");
4558 }
4559
nearbyIntFloat32(RoundingMode mode,FloatRegister src,FloatRegister dest)4560 void MacroAssembler::nearbyIntFloat32(RoundingMode mode, FloatRegister src,
4561 FloatRegister dest) {
4562 MOZ_CRASH("not supported on this platform");
4563 }
4564
copySignDouble(FloatRegister lhs,FloatRegister rhs,FloatRegister output)4565 void MacroAssembler::copySignDouble(FloatRegister lhs, FloatRegister rhs,
4566 FloatRegister output) {
4567 MOZ_CRASH("not supported on this platform");
4568 }
4569
move32(Imm32 imm,Register dest)4570 void MacroAssemblerLOONG64Compat::move32(Imm32 imm, Register dest) {
4571 ma_li(dest, imm);
4572 }
4573
move32(Register src,Register dest)4574 void MacroAssemblerLOONG64Compat::move32(Register src, Register dest) {
4575 as_slli_w(dest, src, 0);
4576 }
4577
movePtr(Register src,Register dest)4578 void MacroAssemblerLOONG64Compat::movePtr(Register src, Register dest) {
4579 as_or(dest, src, zero);
4580 }
movePtr(ImmWord imm,Register dest)4581 void MacroAssemblerLOONG64Compat::movePtr(ImmWord imm, Register dest) {
4582 ma_li(dest, imm);
4583 }
4584
movePtr(ImmGCPtr imm,Register dest)4585 void MacroAssemblerLOONG64Compat::movePtr(ImmGCPtr imm, Register dest) {
4586 ma_li(dest, imm);
4587 }
4588
movePtr(ImmPtr imm,Register dest)4589 void MacroAssemblerLOONG64Compat::movePtr(ImmPtr imm, Register dest) {
4590 movePtr(ImmWord(uintptr_t(imm.value)), dest);
4591 }
4592
movePtr(wasm::SymbolicAddress imm,Register dest)4593 void MacroAssemblerLOONG64Compat::movePtr(wasm::SymbolicAddress imm,
4594 Register dest) {
4595 append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm));
4596 ma_liPatchable(dest, ImmWord(-1));
4597 }
4598
load8ZeroExtend(const Address & address,Register dest)4599 void MacroAssemblerLOONG64Compat::load8ZeroExtend(const Address& address,
4600 Register dest) {
4601 ma_load(dest, address, SizeByte, ZeroExtend);
4602 }
4603
load8ZeroExtend(const BaseIndex & src,Register dest)4604 void MacroAssemblerLOONG64Compat::load8ZeroExtend(const BaseIndex& src,
4605 Register dest) {
4606 ma_load(dest, src, SizeByte, ZeroExtend);
4607 }
4608
load8SignExtend(const Address & address,Register dest)4609 void MacroAssemblerLOONG64Compat::load8SignExtend(const Address& address,
4610 Register dest) {
4611 ma_load(dest, address, SizeByte, SignExtend);
4612 }
4613
load8SignExtend(const BaseIndex & src,Register dest)4614 void MacroAssemblerLOONG64Compat::load8SignExtend(const BaseIndex& src,
4615 Register dest) {
4616 ma_load(dest, src, SizeByte, SignExtend);
4617 }
4618
load16ZeroExtend(const Address & address,Register dest)4619 void MacroAssemblerLOONG64Compat::load16ZeroExtend(const Address& address,
4620 Register dest) {
4621 ma_load(dest, address, SizeHalfWord, ZeroExtend);
4622 }
4623
load16ZeroExtend(const BaseIndex & src,Register dest)4624 void MacroAssemblerLOONG64Compat::load16ZeroExtend(const BaseIndex& src,
4625 Register dest) {
4626 ma_load(dest, src, SizeHalfWord, ZeroExtend);
4627 }
4628
load16SignExtend(const Address & address,Register dest)4629 void MacroAssemblerLOONG64Compat::load16SignExtend(const Address& address,
4630 Register dest) {
4631 ma_load(dest, address, SizeHalfWord, SignExtend);
4632 }
4633
load16SignExtend(const BaseIndex & src,Register dest)4634 void MacroAssemblerLOONG64Compat::load16SignExtend(const BaseIndex& src,
4635 Register dest) {
4636 ma_load(dest, src, SizeHalfWord, SignExtend);
4637 }
4638
load32(const Address & address,Register dest)4639 void MacroAssemblerLOONG64Compat::load32(const Address& address,
4640 Register dest) {
4641 ma_ld_w(dest, address);
4642 }
4643
load32(const BaseIndex & address,Register dest)4644 void MacroAssemblerLOONG64Compat::load32(const BaseIndex& address,
4645 Register dest) {
4646 Register base = address.base;
4647 Register index = address.index;
4648 int32_t offset = address.offset;
4649 uint32_t shift = Imm32::ShiftOf(address.scale).value;
4650
4651 if (offset != 0) {
4652 ScratchRegisterScope scratch(asMasm());
4653 ma_li(scratch, Imm32(offset));
4654 if (shift != 0) {
4655 MOZ_ASSERT(shift <= 4);
4656 as_alsl_d(scratch, index, scratch, shift - 1);
4657 } else {
4658 as_add_d(scratch, index, scratch);
4659 }
4660 as_ldx_w(dest, base, scratch);
4661 } else if (shift != 0) {
4662 ScratchRegisterScope scratch(asMasm());
4663 as_slli_d(scratch, index, shift);
4664 as_ldx_w(dest, base, scratch);
4665 } else {
4666 as_ldx_w(dest, base, index);
4667 }
4668 }
4669
load32(AbsoluteAddress address,Register dest)4670 void MacroAssemblerLOONG64Compat::load32(AbsoluteAddress address,
4671 Register dest) {
4672 ScratchRegisterScope scratch(asMasm());
4673 movePtr(ImmPtr(address.addr), scratch);
4674 load32(Address(scratch, 0), dest);
4675 }
4676
load32(wasm::SymbolicAddress address,Register dest)4677 void MacroAssemblerLOONG64Compat::load32(wasm::SymbolicAddress address,
4678 Register dest) {
4679 ScratchRegisterScope scratch(asMasm());
4680 movePtr(address, scratch);
4681 load32(Address(scratch, 0), dest);
4682 }
4683
loadPtr(const Address & address,Register dest)4684 void MacroAssemblerLOONG64Compat::loadPtr(const Address& address,
4685 Register dest) {
4686 ma_ld_d(dest, address);
4687 }
4688
loadPtr(const BaseIndex & src,Register dest)4689 void MacroAssemblerLOONG64Compat::loadPtr(const BaseIndex& src, Register dest) {
4690 Register base = src.base;
4691 Register index = src.index;
4692 int32_t offset = src.offset;
4693 uint32_t shift = Imm32::ShiftOf(src.scale).value;
4694
4695 if (offset != 0) {
4696 ScratchRegisterScope scratch(asMasm());
4697 ma_li(scratch, Imm32(offset));
4698 if (shift != 0) {
4699 MOZ_ASSERT(shift <= 4);
4700 as_alsl_d(scratch, index, scratch, shift - 1);
4701 } else {
4702 as_add_d(scratch, index, scratch);
4703 }
4704 as_ldx_d(dest, base, scratch);
4705 } else if (shift != 0) {
4706 ScratchRegisterScope scratch(asMasm());
4707 as_slli_d(scratch, index, shift);
4708 as_ldx_d(dest, base, scratch);
4709 } else {
4710 as_ldx_d(dest, base, index);
4711 }
4712 }
4713
loadPtr(AbsoluteAddress address,Register dest)4714 void MacroAssemblerLOONG64Compat::loadPtr(AbsoluteAddress address,
4715 Register dest) {
4716 ScratchRegisterScope scratch(asMasm());
4717 movePtr(ImmPtr(address.addr), scratch);
4718 loadPtr(Address(scratch, 0), dest);
4719 }
4720
loadPtr(wasm::SymbolicAddress address,Register dest)4721 void MacroAssemblerLOONG64Compat::loadPtr(wasm::SymbolicAddress address,
4722 Register dest) {
4723 ScratchRegisterScope scratch(asMasm());
4724 movePtr(address, scratch);
4725 loadPtr(Address(scratch, 0), dest);
4726 }
4727
loadPrivate(const Address & address,Register dest)4728 void MacroAssemblerLOONG64Compat::loadPrivate(const Address& address,
4729 Register dest) {
4730 loadPtr(address, dest);
4731 }
4732
store8(Imm32 imm,const Address & address)4733 void MacroAssemblerLOONG64Compat::store8(Imm32 imm, const Address& address) {
4734 SecondScratchRegisterScope scratch2(asMasm());
4735 ma_li(scratch2, imm);
4736 ma_store(scratch2, address, SizeByte);
4737 }
4738
store8(Register src,const Address & address)4739 void MacroAssemblerLOONG64Compat::store8(Register src, const Address& address) {
4740 ma_store(src, address, SizeByte);
4741 }
4742
store8(Imm32 imm,const BaseIndex & dest)4743 void MacroAssemblerLOONG64Compat::store8(Imm32 imm, const BaseIndex& dest) {
4744 ma_store(imm, dest, SizeByte);
4745 }
4746
store8(Register src,const BaseIndex & dest)4747 void MacroAssemblerLOONG64Compat::store8(Register src, const BaseIndex& dest) {
4748 ma_store(src, dest, SizeByte);
4749 }
4750
store16(Imm32 imm,const Address & address)4751 void MacroAssemblerLOONG64Compat::store16(Imm32 imm, const Address& address) {
4752 SecondScratchRegisterScope scratch2(asMasm());
4753 ma_li(scratch2, imm);
4754 ma_store(scratch2, address, SizeHalfWord);
4755 }
4756
store16(Register src,const Address & address)4757 void MacroAssemblerLOONG64Compat::store16(Register src,
4758 const Address& address) {
4759 ma_store(src, address, SizeHalfWord);
4760 }
4761
store16(Imm32 imm,const BaseIndex & dest)4762 void MacroAssemblerLOONG64Compat::store16(Imm32 imm, const BaseIndex& dest) {
4763 ma_store(imm, dest, SizeHalfWord);
4764 }
4765
store16(Register src,const BaseIndex & address)4766 void MacroAssemblerLOONG64Compat::store16(Register src,
4767 const BaseIndex& address) {
4768 ma_store(src, address, SizeHalfWord);
4769 }
4770
store32(Register src,AbsoluteAddress address)4771 void MacroAssemblerLOONG64Compat::store32(Register src,
4772 AbsoluteAddress address) {
4773 ScratchRegisterScope scratch(asMasm());
4774 movePtr(ImmPtr(address.addr), scratch);
4775 store32(src, Address(scratch, 0));
4776 }
4777
store32(Register src,const Address & address)4778 void MacroAssemblerLOONG64Compat::store32(Register src,
4779 const Address& address) {
4780 ma_store(src, address, SizeWord);
4781 }
4782
store32(Imm32 src,const Address & address)4783 void MacroAssemblerLOONG64Compat::store32(Imm32 src, const Address& address) {
4784 SecondScratchRegisterScope scratch2(asMasm());
4785 move32(src, scratch2);
4786 ma_store(scratch2, address, SizeWord);
4787 }
4788
store32(Imm32 imm,const BaseIndex & dest)4789 void MacroAssemblerLOONG64Compat::store32(Imm32 imm, const BaseIndex& dest) {
4790 ma_store(imm, dest, SizeWord);
4791 }
4792
store32(Register src,const BaseIndex & dest)4793 void MacroAssemblerLOONG64Compat::store32(Register src, const BaseIndex& dest) {
4794 ma_store(src, dest, SizeWord);
4795 }
4796
4797 template <typename T>
storePtr(ImmWord imm,T address)4798 void MacroAssemblerLOONG64Compat::storePtr(ImmWord imm, T address) {
4799 SecondScratchRegisterScope scratch2(asMasm());
4800 ma_li(scratch2, imm);
4801 ma_store(scratch2, address, SizeDouble);
4802 }
4803
4804 template void MacroAssemblerLOONG64Compat::storePtr<Address>(ImmWord imm,
4805 Address address);
4806 template void MacroAssemblerLOONG64Compat::storePtr<BaseIndex>(
4807 ImmWord imm, BaseIndex address);
4808
4809 template <typename T>
storePtr(ImmPtr imm,T address)4810 void MacroAssemblerLOONG64Compat::storePtr(ImmPtr imm, T address) {
4811 storePtr(ImmWord(uintptr_t(imm.value)), address);
4812 }
4813
4814 template void MacroAssemblerLOONG64Compat::storePtr<Address>(ImmPtr imm,
4815 Address address);
4816 template void MacroAssemblerLOONG64Compat::storePtr<BaseIndex>(
4817 ImmPtr imm, BaseIndex address);
4818
4819 template <typename T>
storePtr(ImmGCPtr imm,T address)4820 void MacroAssemblerLOONG64Compat::storePtr(ImmGCPtr imm, T address) {
4821 SecondScratchRegisterScope scratch2(asMasm());
4822 movePtr(imm, scratch2);
4823 storePtr(scratch2, address);
4824 }
4825
4826 template void MacroAssemblerLOONG64Compat::storePtr<Address>(ImmGCPtr imm,
4827 Address address);
4828 template void MacroAssemblerLOONG64Compat::storePtr<BaseIndex>(
4829 ImmGCPtr imm, BaseIndex address);
4830
storePtr(Register src,const Address & address)4831 void MacroAssemblerLOONG64Compat::storePtr(Register src,
4832 const Address& address) {
4833 ma_st_d(src, address);
4834 }
4835
storePtr(Register src,const BaseIndex & address)4836 void MacroAssemblerLOONG64Compat::storePtr(Register src,
4837 const BaseIndex& address) {
4838 Register base = address.base;
4839 Register index = address.index;
4840 int32_t offset = address.offset;
4841 int32_t shift = Imm32::ShiftOf(address.scale).value;
4842
4843 if ((offset == 0) && (shift == 0)) {
4844 as_stx_d(src, base, index);
4845 } else if (is_intN(offset, 12)) {
4846 ScratchRegisterScope scratch(asMasm());
4847 if (shift == 0) {
4848 as_add_d(scratch, base, index);
4849 } else {
4850 as_alsl_d(scratch, index, base, shift - 1);
4851 }
4852 as_st_d(src, scratch, offset);
4853 } else {
4854 ScratchRegisterScope scratch(asMasm());
4855 ma_li(scratch, Imm32(offset));
4856 if (shift == 0) {
4857 as_add_d(scratch, scratch, index);
4858 } else {
4859 as_alsl_d(scratch, index, scratch, shift - 1);
4860 }
4861 as_stx_d(src, base, scratch);
4862 }
4863 }
4864
storePtr(Register src,AbsoluteAddress dest)4865 void MacroAssemblerLOONG64Compat::storePtr(Register src, AbsoluteAddress dest) {
4866 ScratchRegisterScope scratch(asMasm());
4867 movePtr(ImmPtr(dest.addr), scratch);
4868 storePtr(src, Address(scratch, 0));
4869 }
4870
testNullSet(Condition cond,const ValueOperand & value,Register dest)4871 void MacroAssemblerLOONG64Compat::testNullSet(Condition cond,
4872 const ValueOperand& value,
4873 Register dest) {
4874 MOZ_ASSERT(cond == Equal || cond == NotEqual);
4875 SecondScratchRegisterScope scratch2(asMasm());
4876 splitTag(value, scratch2);
4877 ma_cmp_set(dest, scratch2, ImmTag(JSVAL_TAG_NULL), cond);
4878 }
4879
testObjectSet(Condition cond,const ValueOperand & value,Register dest)4880 void MacroAssemblerLOONG64Compat::testObjectSet(Condition cond,
4881 const ValueOperand& value,
4882 Register dest) {
4883 MOZ_ASSERT(cond == Equal || cond == NotEqual);
4884 SecondScratchRegisterScope scratch2(asMasm());
4885 splitTag(value, scratch2);
4886 ma_cmp_set(dest, scratch2, ImmTag(JSVAL_TAG_OBJECT), cond);
4887 }
4888
testUndefinedSet(Condition cond,const ValueOperand & value,Register dest)4889 void MacroAssemblerLOONG64Compat::testUndefinedSet(Condition cond,
4890 const ValueOperand& value,
4891 Register dest) {
4892 MOZ_ASSERT(cond == Equal || cond == NotEqual);
4893 SecondScratchRegisterScope scratch2(asMasm());
4894 splitTag(value, scratch2);
4895 ma_cmp_set(dest, scratch2, ImmTag(JSVAL_TAG_UNDEFINED), cond);
4896 }
4897
unboxInt32(const ValueOperand & operand,Register dest)4898 void MacroAssemblerLOONG64Compat::unboxInt32(const ValueOperand& operand,
4899 Register dest) {
4900 as_slli_w(dest, operand.valueReg(), 0);
4901 }
4902
unboxInt32(Register src,Register dest)4903 void MacroAssemblerLOONG64Compat::unboxInt32(Register src, Register dest) {
4904 as_slli_w(dest, src, 0);
4905 }
4906
unboxInt32(const Address & src,Register dest)4907 void MacroAssemblerLOONG64Compat::unboxInt32(const Address& src,
4908 Register dest) {
4909 load32(Address(src.base, src.offset), dest);
4910 }
4911
unboxInt32(const BaseIndex & src,Register dest)4912 void MacroAssemblerLOONG64Compat::unboxInt32(const BaseIndex& src,
4913 Register dest) {
4914 SecondScratchRegisterScope scratch2(asMasm());
4915 computeScaledAddress(src, scratch2);
4916 load32(Address(scratch2, src.offset), dest);
4917 }
4918
unboxBoolean(const ValueOperand & operand,Register dest)4919 void MacroAssemblerLOONG64Compat::unboxBoolean(const ValueOperand& operand,
4920 Register dest) {
4921 as_slli_w(dest, operand.valueReg(), 0);
4922 }
4923
unboxBoolean(Register src,Register dest)4924 void MacroAssemblerLOONG64Compat::unboxBoolean(Register src, Register dest) {
4925 as_slli_w(dest, src, 0);
4926 }
4927
unboxBoolean(const Address & src,Register dest)4928 void MacroAssemblerLOONG64Compat::unboxBoolean(const Address& src,
4929 Register dest) {
4930 ma_ld_w(dest, src);
4931 }
4932
unboxBoolean(const BaseIndex & src,Register dest)4933 void MacroAssemblerLOONG64Compat::unboxBoolean(const BaseIndex& src,
4934 Register dest) {
4935 SecondScratchRegisterScope scratch2(asMasm());
4936 computeScaledAddress(src, scratch2);
4937 ma_ld_w(dest, Address(scratch2, src.offset));
4938 }
4939
unboxDouble(const ValueOperand & operand,FloatRegister dest)4940 void MacroAssemblerLOONG64Compat::unboxDouble(const ValueOperand& operand,
4941 FloatRegister dest) {
4942 as_movgr2fr_d(dest, operand.valueReg());
4943 }
4944
unboxDouble(const Address & src,FloatRegister dest)4945 void MacroAssemblerLOONG64Compat::unboxDouble(const Address& src,
4946 FloatRegister dest) {
4947 ma_fld_d(dest, Address(src.base, src.offset));
4948 }
4949
unboxDouble(const BaseIndex & src,FloatRegister dest)4950 void MacroAssemblerLOONG64Compat::unboxDouble(const BaseIndex& src,
4951 FloatRegister dest) {
4952 SecondScratchRegisterScope scratch2(asMasm());
4953 loadPtr(src, scratch2);
4954 unboxDouble(ValueOperand(scratch2), dest);
4955 }
4956
unboxString(const ValueOperand & operand,Register dest)4957 void MacroAssemblerLOONG64Compat::unboxString(const ValueOperand& operand,
4958 Register dest) {
4959 unboxNonDouble(operand, dest, JSVAL_TYPE_STRING);
4960 }
4961
unboxString(Register src,Register dest)4962 void MacroAssemblerLOONG64Compat::unboxString(Register src, Register dest) {
4963 unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
4964 }
4965
unboxString(const Address & src,Register dest)4966 void MacroAssemblerLOONG64Compat::unboxString(const Address& src,
4967 Register dest) {
4968 unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
4969 }
4970
unboxSymbol(const ValueOperand & operand,Register dest)4971 void MacroAssemblerLOONG64Compat::unboxSymbol(const ValueOperand& operand,
4972 Register dest) {
4973 unboxNonDouble(operand, dest, JSVAL_TYPE_SYMBOL);
4974 }
4975
unboxSymbol(Register src,Register dest)4976 void MacroAssemblerLOONG64Compat::unboxSymbol(Register src, Register dest) {
4977 unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
4978 }
4979
unboxSymbol(const Address & src,Register dest)4980 void MacroAssemblerLOONG64Compat::unboxSymbol(const Address& src,
4981 Register dest) {
4982 unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
4983 }
4984
unboxBigInt(const ValueOperand & operand,Register dest)4985 void MacroAssemblerLOONG64Compat::unboxBigInt(const ValueOperand& operand,
4986 Register dest) {
4987 unboxNonDouble(operand, dest, JSVAL_TYPE_BIGINT);
4988 }
4989
unboxBigInt(Register src,Register dest)4990 void MacroAssemblerLOONG64Compat::unboxBigInt(Register src, Register dest) {
4991 unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
4992 }
4993
unboxBigInt(const Address & src,Register dest)4994 void MacroAssemblerLOONG64Compat::unboxBigInt(const Address& src,
4995 Register dest) {
4996 unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
4997 }
4998
unboxObject(const ValueOperand & src,Register dest)4999 void MacroAssemblerLOONG64Compat::unboxObject(const ValueOperand& src,
5000 Register dest) {
5001 unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
5002 }
5003
unboxObject(Register src,Register dest)5004 void MacroAssemblerLOONG64Compat::unboxObject(Register src, Register dest) {
5005 unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
5006 }
5007
unboxObject(const Address & src,Register dest)5008 void MacroAssemblerLOONG64Compat::unboxObject(const Address& src,
5009 Register dest) {
5010 unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
5011 }
5012
unboxValue(const ValueOperand & src,AnyRegister dest,JSValueType type)5013 void MacroAssemblerLOONG64Compat::unboxValue(const ValueOperand& src,
5014 AnyRegister dest,
5015 JSValueType type) {
5016 if (dest.isFloat()) {
5017 Label notInt32, end;
5018 asMasm().branchTestInt32(Assembler::NotEqual, src, ¬Int32);
5019 convertInt32ToDouble(src.valueReg(), dest.fpu());
5020 ma_b(&end, ShortJump);
5021 bind(¬Int32);
5022 unboxDouble(src, dest.fpu());
5023 bind(&end);
5024 } else {
5025 unboxNonDouble(src, dest.gpr(), type);
5026 }
5027 }
5028
boxDouble(FloatRegister src,const ValueOperand & dest,FloatRegister)5029 void MacroAssemblerLOONG64Compat::boxDouble(FloatRegister src,
5030 const ValueOperand& dest,
5031 FloatRegister) {
5032 as_movfr2gr_d(dest.valueReg(), src);
5033 }
5034
boxNonDouble(JSValueType type,Register src,const ValueOperand & dest)5035 void MacroAssemblerLOONG64Compat::boxNonDouble(JSValueType type, Register src,
5036 const ValueOperand& dest) {
5037 boxValue(type, src, dest.valueReg());
5038 }
5039
boolValueToDouble(const ValueOperand & operand,FloatRegister dest)5040 void MacroAssemblerLOONG64Compat::boolValueToDouble(const ValueOperand& operand,
5041 FloatRegister dest) {
5042 ScratchRegisterScope scratch(asMasm());
5043 convertBoolToInt32(operand.valueReg(), scratch);
5044 convertInt32ToDouble(scratch, dest);
5045 }
5046
int32ValueToDouble(const ValueOperand & operand,FloatRegister dest)5047 void MacroAssemblerLOONG64Compat::int32ValueToDouble(
5048 const ValueOperand& operand, FloatRegister dest) {
5049 convertInt32ToDouble(operand.valueReg(), dest);
5050 }
5051
boolValueToFloat32(const ValueOperand & operand,FloatRegister dest)5052 void MacroAssemblerLOONG64Compat::boolValueToFloat32(
5053 const ValueOperand& operand, FloatRegister dest) {
5054 ScratchRegisterScope scratch(asMasm());
5055 convertBoolToInt32(operand.valueReg(), scratch);
5056 convertInt32ToFloat32(scratch, dest);
5057 }
5058
int32ValueToFloat32(const ValueOperand & operand,FloatRegister dest)5059 void MacroAssemblerLOONG64Compat::int32ValueToFloat32(
5060 const ValueOperand& operand, FloatRegister dest) {
5061 convertInt32ToFloat32(operand.valueReg(), dest);
5062 }
5063
loadConstantFloat32(float f,FloatRegister dest)5064 void MacroAssemblerLOONG64Compat::loadConstantFloat32(float f,
5065 FloatRegister dest) {
5066 ma_lis(dest, f);
5067 }
5068
loadInt32OrDouble(const Address & src,FloatRegister dest)5069 void MacroAssemblerLOONG64Compat::loadInt32OrDouble(const Address& src,
5070 FloatRegister dest) {
5071 SecondScratchRegisterScope scratch2(asMasm());
5072 Label end;
5073
5074 // If it's an int, convert it to double.
5075 loadPtr(Address(src.base, src.offset), scratch2);
5076 as_movgr2fr_d(dest, scratch2);
5077 as_srli_d(scratch2, scratch2, JSVAL_TAG_SHIFT);
5078 asMasm().branchTestInt32(Assembler::NotEqual, scratch2, &end);
5079 as_ffint_d_w(dest, dest);
5080
5081 bind(&end);
5082 }
5083
loadInt32OrDouble(const BaseIndex & addr,FloatRegister dest)5084 void MacroAssemblerLOONG64Compat::loadInt32OrDouble(const BaseIndex& addr,
5085 FloatRegister dest) {
5086 SecondScratchRegisterScope scratch2(asMasm());
5087 Label end;
5088
5089 // If it's an int, convert it to double.
5090 computeScaledAddress(addr, scratch2);
5091 // Since we only have one scratch, we need to stomp over it with the tag.
5092 loadPtr(Address(scratch2, 0), scratch2);
5093 as_movgr2fr_d(dest, scratch2);
5094 as_srli_d(scratch2, scratch2, JSVAL_TAG_SHIFT);
5095 asMasm().branchTestInt32(Assembler::NotEqual, scratch2, &end);
5096 as_ffint_d_w(dest, dest);
5097
5098 bind(&end);
5099 }
5100
loadConstantDouble(double dp,FloatRegister dest)5101 void MacroAssemblerLOONG64Compat::loadConstantDouble(double dp,
5102 FloatRegister dest) {
5103 ma_lid(dest, dp);
5104 }
5105
extractObject(const Address & address,Register scratch)5106 Register MacroAssemblerLOONG64Compat::extractObject(const Address& address,
5107 Register scratch) {
5108 loadPtr(Address(address.base, address.offset), scratch);
5109 as_bstrpick_d(scratch, scratch, JSVAL_TAG_SHIFT - 1, 0);
5110 return scratch;
5111 }
5112
extractTag(const Address & address,Register scratch)5113 Register MacroAssemblerLOONG64Compat::extractTag(const Address& address,
5114 Register scratch) {
5115 loadPtr(Address(address.base, address.offset), scratch);
5116 as_bstrpick_d(scratch, scratch, 63, JSVAL_TAG_SHIFT);
5117 return scratch;
5118 }
5119
extractTag(const BaseIndex & address,Register scratch)5120 Register MacroAssemblerLOONG64Compat::extractTag(const BaseIndex& address,
5121 Register scratch) {
5122 computeScaledAddress(address, scratch);
5123 return extractTag(Address(scratch, address.offset), scratch);
5124 }
5125
5126 /////////////////////////////////////////////////////////////////
5127 // X86/X64-common/ARM/LoongArch interface.
5128 /////////////////////////////////////////////////////////////////
storeValue(ValueOperand val,const Address & dest)5129 void MacroAssemblerLOONG64Compat::storeValue(ValueOperand val,
5130 const Address& dest) {
5131 storePtr(val.valueReg(), Address(dest.base, dest.offset));
5132 }
5133
storeValue(ValueOperand val,const BaseIndex & dest)5134 void MacroAssemblerLOONG64Compat::storeValue(ValueOperand val,
5135 const BaseIndex& dest) {
5136 storePtr(val.valueReg(), dest);
5137 }
5138
storeValue(JSValueType type,Register reg,Address dest)5139 void MacroAssemblerLOONG64Compat::storeValue(JSValueType type, Register reg,
5140 Address dest) {
5141 SecondScratchRegisterScope scratch2(asMasm());
5142 MOZ_ASSERT(dest.base != scratch2);
5143
5144 tagValue(type, reg, ValueOperand(scratch2));
5145 storePtr(scratch2, dest);
5146 }
5147
storeValue(JSValueType type,Register reg,BaseIndex dest)5148 void MacroAssemblerLOONG64Compat::storeValue(JSValueType type, Register reg,
5149 BaseIndex dest) {
5150 SecondScratchRegisterScope scratch2(asMasm());
5151 MOZ_ASSERT(dest.base != scratch2);
5152
5153 tagValue(type, reg, ValueOperand(scratch2));
5154 storePtr(scratch2, dest);
5155 }
5156
storeValue(const Value & val,Address dest)5157 void MacroAssemblerLOONG64Compat::storeValue(const Value& val, Address dest) {
5158 SecondScratchRegisterScope scratch2(asMasm());
5159 MOZ_ASSERT(dest.base != scratch2);
5160
5161 if (val.isGCThing()) {
5162 writeDataRelocation(val);
5163 movWithPatch(ImmWord(val.asRawBits()), scratch2);
5164 } else {
5165 ma_li(scratch2, ImmWord(val.asRawBits()));
5166 }
5167 storePtr(scratch2, dest);
5168 }
5169
storeValue(const Value & val,BaseIndex dest)5170 void MacroAssemblerLOONG64Compat::storeValue(const Value& val, BaseIndex dest) {
5171 SecondScratchRegisterScope scratch2(asMasm());
5172 MOZ_ASSERT(dest.base != scratch2);
5173
5174 if (val.isGCThing()) {
5175 writeDataRelocation(val);
5176 movWithPatch(ImmWord(val.asRawBits()), scratch2);
5177 } else {
5178 ma_li(scratch2, ImmWord(val.asRawBits()));
5179 }
5180 storePtr(scratch2, dest);
5181 }
5182
loadValue(Address src,ValueOperand val)5183 void MacroAssemblerLOONG64Compat::loadValue(Address src, ValueOperand val) {
5184 loadPtr(src, val.valueReg());
5185 }
5186
loadValue(const BaseIndex & src,ValueOperand val)5187 void MacroAssemblerLOONG64Compat::loadValue(const BaseIndex& src,
5188 ValueOperand val) {
5189 loadPtr(src, val.valueReg());
5190 }
5191
tagValue(JSValueType type,Register payload,ValueOperand dest)5192 void MacroAssemblerLOONG64Compat::tagValue(JSValueType type, Register payload,
5193 ValueOperand dest) {
5194 ScratchRegisterScope scratch(asMasm());
5195 MOZ_ASSERT(dest.valueReg() != scratch);
5196
5197 if (payload == dest.valueReg()) {
5198 as_or(scratch, payload, zero);
5199 payload = scratch;
5200 }
5201 ma_li(dest.valueReg(), ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)));
5202 if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
5203 as_bstrins_d(dest.valueReg(), payload, 31, 0);
5204 } else {
5205 as_bstrins_d(dest.valueReg(), payload, JSVAL_TAG_SHIFT - 1, 0);
5206 }
5207 }
5208
pushValue(ValueOperand val)5209 void MacroAssemblerLOONG64Compat::pushValue(ValueOperand val) {
5210 push(val.valueReg());
5211 }
5212
pushValue(const Address & addr)5213 void MacroAssemblerLOONG64Compat::pushValue(const Address& addr) { push(addr); }
5214
popValue(ValueOperand val)5215 void MacroAssemblerLOONG64Compat::popValue(ValueOperand val) {
5216 pop(val.valueReg());
5217 }
5218
breakpoint(uint32_t value)5219 void MacroAssemblerLOONG64Compat::breakpoint(uint32_t value) {
5220 as_break(value);
5221 }
5222
handleFailureWithHandlerTail(Label * profilerExitTail)5223 void MacroAssemblerLOONG64Compat::handleFailureWithHandlerTail(
5224 Label* profilerExitTail) {
5225 // Reserve space for exception information.
5226 int size = (sizeof(ResumeFromException) + ABIStackAlignment) &
5227 ~(ABIStackAlignment - 1);
5228 asMasm().subPtr(Imm32(size), StackPointer);
5229 mov(StackPointer, a0); // Use a0 since it is a first function argument
5230
5231 // Call the handler.
5232 using Fn = void (*)(ResumeFromException * rfe);
5233 asMasm().setupUnalignedABICall(a1);
5234 asMasm().passABIArg(a0);
5235 asMasm().callWithABI<Fn, HandleException>(
5236 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5237
5238 Label entryFrame;
5239 Label catch_;
5240 Label finally;
5241 Label return_;
5242 Label bailout;
5243 Label wasm;
5244 Label wasmCatch;
5245
5246 // Already clobbered a0, so use it...
5247 load32(Address(StackPointer, offsetof(ResumeFromException, kind)), a0);
5248 asMasm().branch32(Assembler::Equal, a0,
5249 Imm32(ResumeFromException::RESUME_ENTRY_FRAME),
5250 &entryFrame);
5251 asMasm().branch32(Assembler::Equal, a0,
5252 Imm32(ResumeFromException::RESUME_CATCH), &catch_);
5253 asMasm().branch32(Assembler::Equal, a0,
5254 Imm32(ResumeFromException::RESUME_FINALLY), &finally);
5255 asMasm().branch32(Assembler::Equal, a0,
5256 Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_);
5257 asMasm().branch32(Assembler::Equal, a0,
5258 Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
5259 asMasm().branch32(Assembler::Equal, a0,
5260 Imm32(ResumeFromException::RESUME_WASM), &wasm);
5261 asMasm().branch32(Assembler::Equal, a0,
5262 Imm32(ResumeFromException::RESUME_WASM_CATCH), &wasmCatch);
5263
5264 breakpoint(); // Invalid kind.
5265
5266 // No exception handler. Load the error value, load the new stack pointer
5267 // and return from the entry frame.
5268 bind(&entryFrame);
5269 asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
5270 loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
5271 StackPointer);
5272
5273 // We're going to be returning by the ion calling convention
5274 ma_pop(ra);
5275 as_jirl(zero, ra, BOffImm16(0));
5276
5277 // If we found a catch handler, this must be a baseline frame. Restore
5278 // state and jump to the catch block.
5279 bind(&catch_);
5280 loadPtr(Address(StackPointer, offsetof(ResumeFromException, target)), a0);
5281 loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
5282 BaselineFrameReg);
5283 loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
5284 StackPointer);
5285 jump(a0);
5286
5287 // If we found a finally block, this must be a baseline frame. Push
5288 // two values expected by JSOp::Retsub: BooleanValue(true) and the
5289 // exception.
5290 bind(&finally);
5291 ValueOperand exception = ValueOperand(a1);
5292 loadValue(Address(sp, offsetof(ResumeFromException, exception)), exception);
5293
5294 loadPtr(Address(sp, offsetof(ResumeFromException, target)), a0);
5295 loadPtr(Address(sp, offsetof(ResumeFromException, framePointer)),
5296 BaselineFrameReg);
5297 loadPtr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp);
5298
5299 pushValue(BooleanValue(true));
5300 pushValue(exception);
5301 jump(a0);
5302
5303 // Only used in debug mode. Return BaselineFrame->returnValue() to the
5304 // caller.
5305 bind(&return_);
5306 loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
5307 BaselineFrameReg);
5308 loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
5309 StackPointer);
5310 loadValue(
5311 Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
5312 JSReturnOperand);
5313 as_or(StackPointer, BaselineFrameReg, zero);
5314 pop(BaselineFrameReg);
5315
5316 // If profiling is enabled, then update the lastProfilingFrame to refer to
5317 // caller frame before returning.
5318 {
5319 Label skipProfilingInstrumentation;
5320 // Test if profiler enabled.
5321 AbsoluteAddress addressOfEnabled(
5322 GetJitContext()->runtime->geckoProfiler().addressOfEnabled());
5323 asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
5324 &skipProfilingInstrumentation);
5325 jump(profilerExitTail);
5326 bind(&skipProfilingInstrumentation);
5327 }
5328
5329 ret();
5330
5331 // If we are bailing out to baseline to handle an exception, jump to
5332 // the bailout tail stub. Load 1 (true) in ReturnReg to indicate success.
5333 bind(&bailout);
5334 loadPtr(Address(sp, offsetof(ResumeFromException, bailoutInfo)), a2);
5335 ma_li(ReturnReg, Imm32(1));
5336 loadPtr(Address(sp, offsetof(ResumeFromException, target)), a1);
5337 jump(a1);
5338
5339 // If we are throwing and the innermost frame was a wasm frame, reset SP and
5340 // FP; SP is pointing to the unwound return address to the wasm entry, so
5341 // we can just ret().
5342 bind(&wasm);
5343 loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
5344 FramePointer);
5345 loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
5346 StackPointer);
5347 ret();
5348
5349 // Found a wasm catch handler, restore state and jump to it.
5350 bind(&wasmCatch);
5351 loadPtr(Address(sp, offsetof(ResumeFromException, target)), a1);
5352 loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
5353 FramePointer);
5354 loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
5355 StackPointer);
5356 jump(a1);
5357 }
5358
toggledJump(Label * label)5359 CodeOffset MacroAssemblerLOONG64Compat::toggledJump(Label* label) {
5360 CodeOffset ret(nextOffset().getOffset());
5361 ma_b(label);
5362 return ret;
5363 }
5364
toggledCall(JitCode * target,bool enabled)5365 CodeOffset MacroAssemblerLOONG64Compat::toggledCall(JitCode* target,
5366 bool enabled) {
5367 ScratchRegisterScope scratch(asMasm());
5368 BufferOffset bo = nextOffset();
5369 CodeOffset offset(bo.getOffset()); // first instruction location,not changed.
5370 addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
5371 ma_liPatchable(scratch, ImmPtr(target->raw()));
5372 if (enabled) {
5373 as_jirl(ra, scratch, BOffImm16(0));
5374 } else {
5375 as_nop();
5376 }
5377 MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() ==
5378 ToggledCallSize(nullptr));
5379 return offset; // location of first instruction of call instr sequence.
5380 }
5381
shiftIndex32AndAdd(Register indexTemp32,int shift,Register pointer)5382 void MacroAssembler::shiftIndex32AndAdd(Register indexTemp32, int shift,
5383 Register pointer) {
5384 if (IsShiftInScaleRange(shift)) {
5385 computeEffectiveAddress(
5386 BaseIndex(pointer, indexTemp32, ShiftToScale(shift)), pointer);
5387 return;
5388 }
5389 lshift32(Imm32(shift), indexTemp32);
5390 addPtr(indexTemp32, pointer);
5391 }
5392
5393 //}}} check_macroassembler_style
5394
5395 } // namespace jit
5396 } // namespace js
5397