1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/mips-shared/CodeGenerator-mips-shared.h"
8
9 #include "mozilla/DebugOnly.h"
10 #include "mozilla/MathAlgorithms.h"
11
12 #include "jsnum.h"
13
14 #include "jit/CodeGenerator.h"
15 #include "jit/InlineScriptTree.h"
16 #include "jit/JitRuntime.h"
17 #include "jit/MIR.h"
18 #include "jit/MIRGraph.h"
19 #include "js/Conversions.h"
20 #include "vm/JSContext.h"
21 #include "vm/Realm.h"
22 #include "vm/Shape.h"
23 #include "vm/TraceLogging.h"
24
25 #include "jit/MacroAssembler-inl.h"
26 #include "jit/shared/CodeGenerator-shared-inl.h"
27 #include "vm/JSScript-inl.h"
28
29 using namespace js;
30 using namespace js::jit;
31
32 using JS::GenericNaN;
33 using JS::ToInt32;
34 using mozilla::DebugOnly;
35 using mozilla::FloorLog2;
36 using mozilla::NegativeInfinity;
37
38 // shared
CodeGeneratorMIPSShared(MIRGenerator * gen,LIRGraph * graph,MacroAssembler * masm)39 CodeGeneratorMIPSShared::CodeGeneratorMIPSShared(MIRGenerator* gen,
40 LIRGraph* graph,
41 MacroAssembler* masm)
42 : CodeGeneratorShared(gen, graph, masm) {}
43
ToOperand(const LAllocation & a)44 Operand CodeGeneratorMIPSShared::ToOperand(const LAllocation& a) {
45 if (a.isGeneralReg()) {
46 return Operand(a.toGeneralReg()->reg());
47 }
48 if (a.isFloatReg()) {
49 return Operand(a.toFloatReg()->reg());
50 }
51 return Operand(ToAddress(a));
52 }
53
ToOperand(const LAllocation * a)54 Operand CodeGeneratorMIPSShared::ToOperand(const LAllocation* a) {
55 return ToOperand(*a);
56 }
57
ToOperand(const LDefinition * def)58 Operand CodeGeneratorMIPSShared::ToOperand(const LDefinition* def) {
59 return ToOperand(def->output());
60 }
61
62 #ifdef JS_PUNBOX64
ToOperandOrRegister64(const LInt64Allocation input)63 Operand CodeGeneratorMIPSShared::ToOperandOrRegister64(
64 const LInt64Allocation input) {
65 return ToOperand(input.value());
66 }
67 #else
ToOperandOrRegister64(const LInt64Allocation input)68 Register64 CodeGeneratorMIPSShared::ToOperandOrRegister64(
69 const LInt64Allocation input) {
70 return ToRegister64(input);
71 }
72 #endif
73
branchToBlock(Assembler::FloatFormat fmt,FloatRegister lhs,FloatRegister rhs,MBasicBlock * mir,Assembler::DoubleCondition cond)74 void CodeGeneratorMIPSShared::branchToBlock(Assembler::FloatFormat fmt,
75 FloatRegister lhs,
76 FloatRegister rhs, MBasicBlock* mir,
77 Assembler::DoubleCondition cond) {
78 // Skip past trivial blocks.
79 Label* label = skipTrivialBlocks(mir)->lir()->label();
80 if (fmt == Assembler::DoubleFloat) {
81 masm.branchDouble(cond, lhs, rhs, label);
82 } else {
83 masm.branchFloat(cond, lhs, rhs, label);
84 }
85 }
86
FromDepth(uint32_t frameDepth)87 FrameSizeClass FrameSizeClass::FromDepth(uint32_t frameDepth) {
88 return FrameSizeClass::None();
89 }
90
ClassLimit()91 FrameSizeClass FrameSizeClass::ClassLimit() { return FrameSizeClass(0); }
92
frameSize() const93 uint32_t FrameSizeClass::frameSize() const {
94 MOZ_CRASH("MIPS does not use frame size classes");
95 }
96
accept(CodeGeneratorMIPSShared * codegen)97 void OutOfLineBailout::accept(CodeGeneratorMIPSShared* codegen) {
98 codegen->visitOutOfLineBailout(this);
99 }
100
visitTestIAndBranch(LTestIAndBranch * test)101 void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) {
102 const LAllocation* opd = test->getOperand(0);
103 MBasicBlock* ifTrue = test->ifTrue();
104 MBasicBlock* ifFalse = test->ifFalse();
105
106 emitBranch(ToRegister(opd), Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
107 }
108
visitCompare(LCompare * comp)109 void CodeGenerator::visitCompare(LCompare* comp) {
110 MCompare* mir = comp->mir();
111 Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
112 const LAllocation* left = comp->getOperand(0);
113 const LAllocation* right = comp->getOperand(1);
114 const LDefinition* def = comp->getDef(0);
115
116 #ifdef JS_CODEGEN_MIPS64
117 if (mir->compareType() == MCompare::Compare_Object ||
118 mir->compareType() == MCompare::Compare_Symbol ||
119 mir->compareType() == MCompare::Compare_UIntPtr) {
120 if (right->isConstant()) {
121 MOZ_ASSERT(mir->compareType() == MCompare::Compare_UIntPtr);
122 masm.cmpPtrSet(cond, ToRegister(left), Imm32(ToInt32(right)),
123 ToRegister(def));
124 } else if (right->isGeneralReg()) {
125 masm.cmpPtrSet(cond, ToRegister(left), ToRegister(right),
126 ToRegister(def));
127 } else {
128 masm.cmpPtrSet(cond, ToRegister(left), ToAddress(right), ToRegister(def));
129 }
130 return;
131 }
132 #endif
133
134 if (right->isConstant()) {
135 masm.cmp32Set(cond, ToRegister(left), Imm32(ToInt32(right)),
136 ToRegister(def));
137 } else if (right->isGeneralReg()) {
138 masm.cmp32Set(cond, ToRegister(left), ToRegister(right), ToRegister(def));
139 } else {
140 masm.cmp32Set(cond, ToRegister(left), ToAddress(right), ToRegister(def));
141 }
142 }
143
visitCompareAndBranch(LCompareAndBranch * comp)144 void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
145 MCompare* mir = comp->cmpMir();
146 Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
147
148 #ifdef JS_CODEGEN_MIPS64
149 if (mir->compareType() == MCompare::Compare_Object ||
150 mir->compareType() == MCompare::Compare_Symbol ||
151 mir->compareType() == MCompare::Compare_UIntPtr) {
152 if (comp->right()->isConstant()) {
153 MOZ_ASSERT(mir->compareType() == MCompare::Compare_UIntPtr);
154 emitBranch(ToRegister(comp->left()), Imm32(ToInt32(comp->right())), cond,
155 comp->ifTrue(), comp->ifFalse());
156 } else if (comp->right()->isGeneralReg()) {
157 emitBranch(ToRegister(comp->left()), ToRegister(comp->right()), cond,
158 comp->ifTrue(), comp->ifFalse());
159 } else {
160 masm.loadPtr(ToAddress(comp->right()), ScratchRegister);
161 emitBranch(ToRegister(comp->left()), ScratchRegister, cond,
162 comp->ifTrue(), comp->ifFalse());
163 }
164 return;
165 }
166 #endif
167
168 if (comp->right()->isConstant()) {
169 emitBranch(ToRegister(comp->left()), Imm32(ToInt32(comp->right())), cond,
170 comp->ifTrue(), comp->ifFalse());
171 } else if (comp->right()->isGeneralReg()) {
172 emitBranch(ToRegister(comp->left()), ToRegister(comp->right()), cond,
173 comp->ifTrue(), comp->ifFalse());
174 } else {
175 masm.load32(ToAddress(comp->right()), ScratchRegister);
176 emitBranch(ToRegister(comp->left()), ScratchRegister, cond, comp->ifTrue(),
177 comp->ifFalse());
178 }
179 }
180
generateOutOfLineCode()181 bool CodeGeneratorMIPSShared::generateOutOfLineCode() {
182 if (!CodeGeneratorShared::generateOutOfLineCode()) {
183 return false;
184 }
185
186 if (deoptLabel_.used()) {
187 // All non-table-based bailouts will go here.
188 masm.bind(&deoptLabel_);
189
190 // Push the frame size, so the handler can recover the IonScript.
191 // Frame size is stored in 'ra' and pushed by GenerateBailoutThunk
192 // We have to use 'ra' because generateBailoutTable will implicitly do
193 // the same.
194 masm.move32(Imm32(frameSize()), ra);
195
196 TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
197 masm.jump(handler);
198 }
199
200 return !masm.oom();
201 }
202
bailoutFrom(Label * label,LSnapshot * snapshot)203 void CodeGeneratorMIPSShared::bailoutFrom(Label* label, LSnapshot* snapshot) {
204 MOZ_ASSERT_IF(!masm.oom(), label->used());
205 MOZ_ASSERT_IF(!masm.oom(), !label->bound());
206
207 encode(snapshot);
208
209 // Though the assembler doesn't track all frame pushes, at least make sure
210 // the known value makes sense. We can't use bailout tables if the stack
211 // isn't properly aligned to the static frame size.
212 MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None(),
213 frameClass_.frameSize() == masm.framePushed());
214
215 // We don't use table bailouts because retargeting is easier this way.
216 InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
217 OutOfLineBailout* ool =
218 new (alloc()) OutOfLineBailout(snapshot, masm.framePushed());
219 addOutOfLineCode(ool,
220 new (alloc()) BytecodeSite(tree, tree->script()->code()));
221
222 masm.retarget(label, ool->entry());
223 }
224
bailout(LSnapshot * snapshot)225 void CodeGeneratorMIPSShared::bailout(LSnapshot* snapshot) {
226 Label label;
227 masm.jump(&label);
228 bailoutFrom(&label, snapshot);
229 }
230
visitMinMaxD(LMinMaxD * ins)231 void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
232 FloatRegister first = ToFloatRegister(ins->first());
233 FloatRegister second = ToFloatRegister(ins->second());
234
235 MOZ_ASSERT(first == ToFloatRegister(ins->output()));
236
237 if (ins->mir()->isMax()) {
238 masm.maxDouble(second, first, true);
239 } else {
240 masm.minDouble(second, first, true);
241 }
242 }
243
visitMinMaxF(LMinMaxF * ins)244 void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
245 FloatRegister first = ToFloatRegister(ins->first());
246 FloatRegister second = ToFloatRegister(ins->second());
247
248 MOZ_ASSERT(first == ToFloatRegister(ins->output()));
249
250 if (ins->mir()->isMax()) {
251 masm.maxFloat32(second, first, true);
252 } else {
253 masm.minFloat32(second, first, true);
254 }
255 }
256
visitAddI(LAddI * ins)257 void CodeGenerator::visitAddI(LAddI* ins) {
258 const LAllocation* lhs = ins->getOperand(0);
259 const LAllocation* rhs = ins->getOperand(1);
260 const LDefinition* dest = ins->getDef(0);
261
262 MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
263
264 // If there is no snapshot, we don't need to check for overflow
265 if (!ins->snapshot()) {
266 if (rhs->isConstant()) {
267 masm.ma_addu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
268 } else {
269 masm.as_addu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
270 }
271 return;
272 }
273
274 Label overflow;
275 if (rhs->isConstant()) {
276 masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
277 Imm32(ToInt32(rhs)), &overflow);
278 } else {
279 masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
280 ToRegister(rhs), &overflow);
281 }
282
283 bailoutFrom(&overflow, ins->snapshot());
284 }
285
visitAddI64(LAddI64 * lir)286 void CodeGenerator::visitAddI64(LAddI64* lir) {
287 const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
288 const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
289
290 MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
291
292 if (IsConstant(rhs)) {
293 masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
294 return;
295 }
296
297 masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
298 }
299
visitSubI(LSubI * ins)300 void CodeGenerator::visitSubI(LSubI* ins) {
301 const LAllocation* lhs = ins->getOperand(0);
302 const LAllocation* rhs = ins->getOperand(1);
303 const LDefinition* dest = ins->getDef(0);
304
305 MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
306
307 // If there is no snapshot, we don't need to check for overflow
308 if (!ins->snapshot()) {
309 if (rhs->isConstant()) {
310 masm.ma_subu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
311 } else {
312 masm.as_subu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
313 }
314 return;
315 }
316
317 Label overflow;
318 if (rhs->isConstant()) {
319 masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
320 Imm32(ToInt32(rhs)), &overflow);
321 } else {
322 masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
323 ToRegister(rhs), &overflow);
324 }
325
326 bailoutFrom(&overflow, ins->snapshot());
327 }
328
visitSubI64(LSubI64 * lir)329 void CodeGenerator::visitSubI64(LSubI64* lir) {
330 const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs);
331 const LInt64Allocation rhs = lir->getInt64Operand(LSubI64::Rhs);
332
333 MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
334
335 if (IsConstant(rhs)) {
336 masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
337 return;
338 }
339
340 masm.sub64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
341 }
342
visitMulI(LMulI * ins)343 void CodeGenerator::visitMulI(LMulI* ins) {
344 const LAllocation* lhs = ins->lhs();
345 const LAllocation* rhs = ins->rhs();
346 Register dest = ToRegister(ins->output());
347 MMul* mul = ins->mir();
348
349 MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
350 !mul->canBeNegativeZero() && !mul->canOverflow());
351
352 if (rhs->isConstant()) {
353 int32_t constant = ToInt32(rhs);
354 Register src = ToRegister(lhs);
355
356 // Bailout on -0.0
357 if (mul->canBeNegativeZero() && constant <= 0) {
358 Assembler::Condition cond =
359 (constant == 0) ? Assembler::LessThan : Assembler::Equal;
360 bailoutCmp32(cond, src, Imm32(0), ins->snapshot());
361 }
362
363 switch (constant) {
364 case -1:
365 if (mul->canOverflow()) {
366 bailoutCmp32(Assembler::Equal, src, Imm32(INT32_MIN),
367 ins->snapshot());
368 }
369
370 masm.ma_negu(dest, src);
371 break;
372 case 0:
373 masm.move32(Imm32(0), dest);
374 break;
375 case 1:
376 masm.move32(src, dest);
377 break;
378 case 2:
379 if (mul->canOverflow()) {
380 Label mulTwoOverflow;
381 masm.ma_add32TestOverflow(dest, src, src, &mulTwoOverflow);
382
383 bailoutFrom(&mulTwoOverflow, ins->snapshot());
384 } else {
385 masm.as_addu(dest, src, src);
386 }
387 break;
388 default:
389 uint32_t shift = FloorLog2(constant);
390
391 if (!mul->canOverflow() && (constant > 0)) {
392 // If it cannot overflow, we can do lots of optimizations.
393 uint32_t rest = constant - (1 << shift);
394
395 // See if the constant has one bit set, meaning it can be
396 // encoded as a bitshift.
397 if ((1 << shift) == constant) {
398 masm.ma_sll(dest, src, Imm32(shift));
399 return;
400 }
401
402 // If the constant cannot be encoded as (1<<C1), see if it can
403 // be encoded as (1<<C1) | (1<<C2), which can be computed
404 // using an add and a shift.
405 uint32_t shift_rest = FloorLog2(rest);
406 if (src != dest && (1u << shift_rest) == rest) {
407 masm.ma_sll(dest, src, Imm32(shift - shift_rest));
408 masm.add32(src, dest);
409 if (shift_rest != 0) {
410 masm.ma_sll(dest, dest, Imm32(shift_rest));
411 }
412 return;
413 }
414 }
415
416 if (mul->canOverflow() && (constant > 0) && (src != dest)) {
417 // To stay on the safe side, only optimize things that are a
418 // power of 2.
419
420 if ((1 << shift) == constant) {
421 // dest = lhs * pow(2, shift)
422 masm.ma_sll(dest, src, Imm32(shift));
423 // At runtime, check (lhs == dest >> shift), if this does
424 // not hold, some bits were lost due to overflow, and the
425 // computation should be resumed as a double.
426 masm.ma_sra(ScratchRegister, dest, Imm32(shift));
427 bailoutCmp32(Assembler::NotEqual, src, ScratchRegister,
428 ins->snapshot());
429 return;
430 }
431 }
432
433 if (mul->canOverflow()) {
434 Label mulConstOverflow;
435 masm.ma_mul32TestOverflow(dest, ToRegister(lhs), Imm32(ToInt32(rhs)),
436 &mulConstOverflow);
437
438 bailoutFrom(&mulConstOverflow, ins->snapshot());
439 } else {
440 masm.ma_mul(dest, src, Imm32(ToInt32(rhs)));
441 }
442 break;
443 }
444 } else {
445 Label multRegOverflow;
446
447 if (mul->canOverflow()) {
448 masm.ma_mul32TestOverflow(dest, ToRegister(lhs), ToRegister(rhs),
449 &multRegOverflow);
450 bailoutFrom(&multRegOverflow, ins->snapshot());
451 } else {
452 masm.as_mul(dest, ToRegister(lhs), ToRegister(rhs));
453 }
454
455 if (mul->canBeNegativeZero()) {
456 Label done;
457 masm.ma_b(dest, dest, &done, Assembler::NonZero, ShortJump);
458
459 // Result is -0 if lhs or rhs is negative.
460 // In that case result must be double value so bailout
461 Register scratch = SecondScratchReg;
462 masm.as_or(scratch, ToRegister(lhs), ToRegister(rhs));
463 bailoutCmp32(Assembler::Signed, scratch, scratch, ins->snapshot());
464
465 masm.bind(&done);
466 }
467 }
468 }
469
visitMulI64(LMulI64 * lir)470 void CodeGenerator::visitMulI64(LMulI64* lir) {
471 const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
472 const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
473 const Register64 output = ToOutRegister64(lir);
474
475 if (IsConstant(rhs)) {
476 int64_t constant = ToInt64(rhs);
477 switch (constant) {
478 case -1:
479 masm.neg64(ToRegister64(lhs));
480 return;
481 case 0:
482 masm.xor64(ToRegister64(lhs), ToRegister64(lhs));
483 return;
484 case 1:
485 // nop
486 return;
487 default:
488 if (constant > 0) {
489 if (mozilla::IsPowerOfTwo(static_cast<uint32_t>(constant + 1))) {
490 masm.move64(ToRegister64(lhs), output);
491 masm.lshift64(Imm32(FloorLog2(constant + 1)), output);
492 masm.sub64(ToRegister64(lhs), output);
493 return;
494 } else if (mozilla::IsPowerOfTwo(
495 static_cast<uint32_t>(constant - 1))) {
496 masm.move64(ToRegister64(lhs), output);
497 masm.lshift64(Imm32(FloorLog2(constant - 1u)), output);
498 masm.add64(ToRegister64(lhs), output);
499 return;
500 }
501 // Use shift if constant is power of 2.
502 int32_t shift = mozilla::FloorLog2(constant);
503 if (int64_t(1) << shift == constant) {
504 masm.lshift64(Imm32(shift), ToRegister64(lhs));
505 return;
506 }
507 }
508 Register temp = ToTempRegisterOrInvalid(lir->temp());
509 masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
510 }
511 } else {
512 Register temp = ToTempRegisterOrInvalid(lir->temp());
513 masm.mul64(ToOperandOrRegister64(rhs), ToRegister64(lhs), temp);
514 }
515 }
516
visitDivI(LDivI * ins)517 void CodeGenerator::visitDivI(LDivI* ins) {
518 // Extract the registers from this instruction
519 Register lhs = ToRegister(ins->lhs());
520 Register rhs = ToRegister(ins->rhs());
521 Register dest = ToRegister(ins->output());
522 Register temp = ToRegister(ins->getTemp(0));
523 MDiv* mir = ins->mir();
524
525 Label done;
526
527 // Handle divide by zero.
528 if (mir->canBeDivideByZero()) {
529 if (mir->trapOnError()) {
530 Label nonZero;
531 masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
532 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
533 masm.bind(&nonZero);
534 } else if (mir->canTruncateInfinities()) {
535 // Truncated division by zero is zero (Infinity|0 == 0)
536 Label notzero;
537 masm.ma_b(rhs, rhs, ¬zero, Assembler::NonZero, ShortJump);
538 masm.move32(Imm32(0), dest);
539 masm.ma_b(&done, ShortJump);
540 masm.bind(¬zero);
541 } else {
542 MOZ_ASSERT(mir->fallible());
543 bailoutCmp32(Assembler::Zero, rhs, rhs, ins->snapshot());
544 }
545 }
546
547 // Handle an integer overflow exception from -2147483648 / -1.
548 if (mir->canBeNegativeOverflow()) {
549 Label notMinInt;
550 masm.move32(Imm32(INT32_MIN), temp);
551 masm.ma_b(lhs, temp, ¬MinInt, Assembler::NotEqual, ShortJump);
552
553 masm.move32(Imm32(-1), temp);
554 if (mir->trapOnError()) {
555 Label ok;
556 masm.ma_b(rhs, temp, &ok, Assembler::NotEqual);
557 masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
558 masm.bind(&ok);
559 } else if (mir->canTruncateOverflow()) {
560 // (-INT32_MIN)|0 == INT32_MIN
561 Label skip;
562 masm.ma_b(rhs, temp, &skip, Assembler::NotEqual, ShortJump);
563 masm.move32(Imm32(INT32_MIN), dest);
564 masm.ma_b(&done, ShortJump);
565 masm.bind(&skip);
566 } else {
567 MOZ_ASSERT(mir->fallible());
568 bailoutCmp32(Assembler::Equal, rhs, temp, ins->snapshot());
569 }
570 masm.bind(¬MinInt);
571 }
572
573 // Handle negative 0. (0/-Y)
574 if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
575 Label nonzero;
576 masm.ma_b(lhs, lhs, &nonzero, Assembler::NonZero, ShortJump);
577 bailoutCmp32(Assembler::LessThan, rhs, Imm32(0), ins->snapshot());
578 masm.bind(&nonzero);
579 }
580 // Note: above safety checks could not be verified as Ion seems to be
581 // smarter and requires double arithmetic in such cases.
582
583 // All regular. Lets call div.
584 if (mir->canTruncateRemainder()) {
585 #ifdef MIPSR6
586 masm.as_div(dest, lhs, rhs);
587 #else
588 masm.as_div(lhs, rhs);
589 masm.as_mflo(dest);
590 #endif
591 } else {
592 MOZ_ASSERT(mir->fallible());
593
594 Label remainderNonZero;
595 masm.ma_div_branch_overflow(dest, lhs, rhs, &remainderNonZero);
596 bailoutFrom(&remainderNonZero, ins->snapshot());
597 }
598
599 masm.bind(&done);
600 }
601
visitDivPowTwoI(LDivPowTwoI * ins)602 void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
603 Register lhs = ToRegister(ins->numerator());
604 Register dest = ToRegister(ins->output());
605 Register tmp = ToRegister(ins->getTemp(0));
606 int32_t shift = ins->shift();
607
608 if (shift != 0) {
609 MDiv* mir = ins->mir();
610 if (!mir->isTruncated()) {
611 // If the remainder is going to be != 0, bailout since this must
612 // be a double.
613 masm.ma_sll(tmp, lhs, Imm32(32 - shift));
614 bailoutCmp32(Assembler::NonZero, tmp, tmp, ins->snapshot());
615 }
616
617 if (!mir->canBeNegativeDividend()) {
618 // Numerator is unsigned, so needs no adjusting. Do the shift.
619 masm.ma_sra(dest, lhs, Imm32(shift));
620 return;
621 }
622
623 // Adjust the value so that shifting produces a correctly rounded result
624 // when the numerator is negative. See 10-1 "Signed Division by a Known
625 // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
626 if (shift > 1) {
627 masm.ma_sra(tmp, lhs, Imm32(31));
628 masm.ma_srl(tmp, tmp, Imm32(32 - shift));
629 masm.add32(lhs, tmp);
630 } else {
631 masm.ma_srl(tmp, lhs, Imm32(32 - shift));
632 masm.add32(lhs, tmp);
633 }
634
635 // Do the shift.
636 masm.ma_sra(dest, tmp, Imm32(shift));
637 } else {
638 masm.move32(lhs, dest);
639 }
640 }
641
visitModI(LModI * ins)642 void CodeGenerator::visitModI(LModI* ins) {
643 // Extract the registers from this instruction
644 Register lhs = ToRegister(ins->lhs());
645 Register rhs = ToRegister(ins->rhs());
646 Register dest = ToRegister(ins->output());
647 Register callTemp = ToRegister(ins->callTemp());
648 MMod* mir = ins->mir();
649 Label done, prevent;
650
651 masm.move32(lhs, callTemp);
652
653 // Prevent INT_MIN % -1;
654 // The integer division will give INT_MIN, but we want -(double)INT_MIN.
655 if (mir->canBeNegativeDividend()) {
656 masm.ma_b(lhs, Imm32(INT_MIN), &prevent, Assembler::NotEqual, ShortJump);
657 if (mir->isTruncated()) {
658 // (INT_MIN % -1)|0 == 0
659 Label skip;
660 masm.ma_b(rhs, Imm32(-1), &skip, Assembler::NotEqual, ShortJump);
661 masm.move32(Imm32(0), dest);
662 masm.ma_b(&done, ShortJump);
663 masm.bind(&skip);
664 } else {
665 MOZ_ASSERT(mir->fallible());
666 bailoutCmp32(Assembler::Equal, rhs, Imm32(-1), ins->snapshot());
667 }
668 masm.bind(&prevent);
669 }
670
671 // 0/X (with X < 0) is bad because both of these values *should* be
672 // doubles, and the result should be -0.0, which cannot be represented in
673 // integers. X/0 is bad because it will give garbage (or abort), when it
674 // should give either \infty, -\infty or NAN.
675
676 // Prevent 0 / X (with X < 0) and X / 0
677 // testing X / Y. Compare Y with 0.
678 // There are three cases: (Y < 0), (Y == 0) and (Y > 0)
679 // If (Y < 0), then we compare X with 0, and bail if X == 0
680 // If (Y == 0), then we simply want to bail.
681 // if (Y > 0), we don't bail.
682
683 if (mir->canBeDivideByZero()) {
684 if (mir->isTruncated()) {
685 if (mir->trapOnError()) {
686 Label nonZero;
687 masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
688 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
689 masm.bind(&nonZero);
690 } else {
691 Label skip;
692 masm.ma_b(rhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
693 masm.move32(Imm32(0), dest);
694 masm.ma_b(&done, ShortJump);
695 masm.bind(&skip);
696 }
697 } else {
698 MOZ_ASSERT(mir->fallible());
699 bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
700 }
701 }
702
703 if (mir->canBeNegativeDividend()) {
704 Label notNegative;
705 masm.ma_b(rhs, Imm32(0), ¬Negative, Assembler::GreaterThan, ShortJump);
706 if (mir->isTruncated()) {
707 // NaN|0 == 0 and (0 % -X)|0 == 0
708 Label skip;
709 masm.ma_b(lhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
710 masm.move32(Imm32(0), dest);
711 masm.ma_b(&done, ShortJump);
712 masm.bind(&skip);
713 } else {
714 MOZ_ASSERT(mir->fallible());
715 bailoutCmp32(Assembler::Equal, lhs, Imm32(0), ins->snapshot());
716 }
717 masm.bind(¬Negative);
718 }
719 #ifdef MIPSR6
720 masm.as_mod(dest, lhs, rhs);
721 #else
722 masm.as_div(lhs, rhs);
723 masm.as_mfhi(dest);
724 #endif
725
726 // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
727 if (mir->canBeNegativeDividend()) {
728 if (mir->isTruncated()) {
729 // -0.0|0 == 0
730 } else {
731 MOZ_ASSERT(mir->fallible());
732 // See if X < 0
733 masm.ma_b(dest, Imm32(0), &done, Assembler::NotEqual, ShortJump);
734 bailoutCmp32(Assembler::Signed, callTemp, Imm32(0), ins->snapshot());
735 }
736 }
737 masm.bind(&done);
738 }
739
visitModPowTwoI(LModPowTwoI * ins)740 void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
741 Register in = ToRegister(ins->getOperand(0));
742 Register out = ToRegister(ins->getDef(0));
743 MMod* mir = ins->mir();
744 Label negative, done;
745
746 masm.move32(in, out);
747 masm.ma_b(in, in, &done, Assembler::Zero, ShortJump);
748 // Switch based on sign of the lhs.
749 // Positive numbers are just a bitmask
750 masm.ma_b(in, in, &negative, Assembler::Signed, ShortJump);
751 {
752 masm.and32(Imm32((1 << ins->shift()) - 1), out);
753 masm.ma_b(&done, ShortJump);
754 }
755
756 // Negative numbers need a negate, bitmask, negate
757 {
758 masm.bind(&negative);
759 masm.neg32(out);
760 masm.and32(Imm32((1 << ins->shift()) - 1), out);
761 masm.neg32(out);
762 }
763 if (mir->canBeNegativeDividend()) {
764 if (!mir->isTruncated()) {
765 MOZ_ASSERT(mir->fallible());
766 bailoutCmp32(Assembler::Equal, out, zero, ins->snapshot());
767 } else {
768 // -0|0 == 0
769 }
770 }
771 masm.bind(&done);
772 }
773
visitModMaskI(LModMaskI * ins)774 void CodeGenerator::visitModMaskI(LModMaskI* ins) {
775 Register src = ToRegister(ins->getOperand(0));
776 Register dest = ToRegister(ins->getDef(0));
777 Register tmp0 = ToRegister(ins->getTemp(0));
778 Register tmp1 = ToRegister(ins->getTemp(1));
779 MMod* mir = ins->mir();
780
781 if (!mir->isTruncated() && mir->canBeNegativeDividend()) {
782 MOZ_ASSERT(mir->fallible());
783
784 Label bail;
785 masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), &bail);
786 bailoutFrom(&bail, ins->snapshot());
787 } else {
788 masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), nullptr);
789 }
790 }
791
visitBitNotI(LBitNotI * ins)792 void CodeGenerator::visitBitNotI(LBitNotI* ins) {
793 const LAllocation* input = ins->getOperand(0);
794 const LDefinition* dest = ins->getDef(0);
795 MOZ_ASSERT(!input->isConstant());
796
797 masm.ma_not(ToRegister(dest), ToRegister(input));
798 }
799
visitBitOpI(LBitOpI * ins)800 void CodeGenerator::visitBitOpI(LBitOpI* ins) {
801 const LAllocation* lhs = ins->getOperand(0);
802 const LAllocation* rhs = ins->getOperand(1);
803 const LDefinition* dest = ins->getDef(0);
804 // all of these bitops should be either imm32's, or integer registers.
805 switch (ins->bitop()) {
806 case JSOp::BitOr:
807 if (rhs->isConstant()) {
808 masm.ma_or(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
809 } else {
810 masm.as_or(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
811 }
812 break;
813 case JSOp::BitXor:
814 if (rhs->isConstant()) {
815 masm.ma_xor(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
816 } else {
817 masm.as_xor(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
818 }
819 break;
820 case JSOp::BitAnd:
821 if (rhs->isConstant()) {
822 masm.ma_and(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
823 } else {
824 masm.as_and(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
825 }
826 break;
827 default:
828 MOZ_CRASH("unexpected binary opcode");
829 }
830 }
831
visitBitOpI64(LBitOpI64 * lir)832 void CodeGenerator::visitBitOpI64(LBitOpI64* lir) {
833 const LInt64Allocation lhs = lir->getInt64Operand(LBitOpI64::Lhs);
834 const LInt64Allocation rhs = lir->getInt64Operand(LBitOpI64::Rhs);
835
836 MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
837
838 switch (lir->bitop()) {
839 case JSOp::BitOr:
840 if (IsConstant(rhs)) {
841 masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
842 } else {
843 masm.or64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
844 }
845 break;
846 case JSOp::BitXor:
847 if (IsConstant(rhs)) {
848 masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
849 } else {
850 masm.xor64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
851 }
852 break;
853 case JSOp::BitAnd:
854 if (IsConstant(rhs)) {
855 masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
856 } else {
857 masm.and64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
858 }
859 break;
860 default:
861 MOZ_CRASH("unexpected binary opcode");
862 }
863 }
864
visitShiftI(LShiftI * ins)865 void CodeGenerator::visitShiftI(LShiftI* ins) {
866 Register lhs = ToRegister(ins->lhs());
867 const LAllocation* rhs = ins->rhs();
868 Register dest = ToRegister(ins->output());
869
870 if (rhs->isConstant()) {
871 int32_t shift = ToInt32(rhs) & 0x1F;
872 switch (ins->bitop()) {
873 case JSOp::Lsh:
874 if (shift) {
875 masm.ma_sll(dest, lhs, Imm32(shift));
876 } else {
877 masm.move32(lhs, dest);
878 }
879 break;
880 case JSOp::Rsh:
881 if (shift) {
882 masm.ma_sra(dest, lhs, Imm32(shift));
883 } else {
884 masm.move32(lhs, dest);
885 }
886 break;
887 case JSOp::Ursh:
888 if (shift) {
889 masm.ma_srl(dest, lhs, Imm32(shift));
890 } else {
891 // x >>> 0 can overflow.
892 if (ins->mir()->toUrsh()->fallible()) {
893 bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
894 }
895 masm.move32(lhs, dest);
896 }
897 break;
898 default:
899 MOZ_CRASH("Unexpected shift op");
900 }
901 } else {
902 // The shift amounts should be AND'ed into the 0-31 range
903 masm.ma_and(dest, ToRegister(rhs), Imm32(0x1F));
904
905 switch (ins->bitop()) {
906 case JSOp::Lsh:
907 masm.ma_sll(dest, lhs, dest);
908 break;
909 case JSOp::Rsh:
910 masm.ma_sra(dest, lhs, dest);
911 break;
912 case JSOp::Ursh:
913 masm.ma_srl(dest, lhs, dest);
914 if (ins->mir()->toUrsh()->fallible()) {
915 // x >>> 0 can overflow.
916 bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot());
917 }
918 break;
919 default:
920 MOZ_CRASH("Unexpected shift op");
921 }
922 }
923 }
924
visitShiftI64(LShiftI64 * lir)925 void CodeGenerator::visitShiftI64(LShiftI64* lir) {
926 const LInt64Allocation lhs = lir->getInt64Operand(LShiftI64::Lhs);
927 LAllocation* rhs = lir->getOperand(LShiftI64::Rhs);
928
929 MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
930
931 if (rhs->isConstant()) {
932 int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
933 switch (lir->bitop()) {
934 case JSOp::Lsh:
935 if (shift) {
936 masm.lshift64(Imm32(shift), ToRegister64(lhs));
937 }
938 break;
939 case JSOp::Rsh:
940 if (shift) {
941 masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs));
942 }
943 break;
944 case JSOp::Ursh:
945 if (shift) {
946 masm.rshift64(Imm32(shift), ToRegister64(lhs));
947 }
948 break;
949 default:
950 MOZ_CRASH("Unexpected shift op");
951 }
952 return;
953 }
954
955 switch (lir->bitop()) {
956 case JSOp::Lsh:
957 masm.lshift64(ToRegister(rhs), ToRegister64(lhs));
958 break;
959 case JSOp::Rsh:
960 masm.rshift64Arithmetic(ToRegister(rhs), ToRegister64(lhs));
961 break;
962 case JSOp::Ursh:
963 masm.rshift64(ToRegister(rhs), ToRegister64(lhs));
964 break;
965 default:
966 MOZ_CRASH("Unexpected shift op");
967 }
968 }
969
visitRotateI64(LRotateI64 * lir)970 void CodeGenerator::visitRotateI64(LRotateI64* lir) {
971 MRotate* mir = lir->mir();
972 LAllocation* count = lir->count();
973
974 Register64 input = ToRegister64(lir->input());
975 Register64 output = ToOutRegister64(lir);
976 Register temp = ToTempRegisterOrInvalid(lir->temp());
977
978 #ifdef JS_CODEGEN_MIPS64
979 MOZ_ASSERT(input == output);
980 #endif
981
982 if (count->isConstant()) {
983 int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
984 if (!c) {
985 #ifdef JS_CODEGEN_MIPS32
986 masm.move64(input, output);
987 #endif
988 return;
989 }
990 if (mir->isLeftRotate()) {
991 masm.rotateLeft64(Imm32(c), input, output, temp);
992 } else {
993 masm.rotateRight64(Imm32(c), input, output, temp);
994 }
995 } else {
996 if (mir->isLeftRotate()) {
997 masm.rotateLeft64(ToRegister(count), input, output, temp);
998 } else {
999 masm.rotateRight64(ToRegister(count), input, output, temp);
1000 }
1001 }
1002 }
1003
visitUrshD(LUrshD * ins)1004 void CodeGenerator::visitUrshD(LUrshD* ins) {
1005 Register lhs = ToRegister(ins->lhs());
1006 Register temp = ToRegister(ins->temp());
1007
1008 const LAllocation* rhs = ins->rhs();
1009 FloatRegister out = ToFloatRegister(ins->output());
1010
1011 if (rhs->isConstant()) {
1012 masm.ma_srl(temp, lhs, Imm32(ToInt32(rhs)));
1013 } else {
1014 masm.ma_srl(temp, lhs, ToRegister(rhs));
1015 }
1016
1017 masm.convertUInt32ToDouble(temp, out);
1018 }
1019
visitClzI(LClzI * ins)1020 void CodeGenerator::visitClzI(LClzI* ins) {
1021 Register input = ToRegister(ins->input());
1022 Register output = ToRegister(ins->output());
1023
1024 masm.as_clz(output, input);
1025 }
1026
visitCtzI(LCtzI * ins)1027 void CodeGenerator::visitCtzI(LCtzI* ins) {
1028 Register input = ToRegister(ins->input());
1029 Register output = ToRegister(ins->output());
1030
1031 masm.ma_ctz(output, input);
1032 }
1033
visitPopcntI(LPopcntI * ins)1034 void CodeGenerator::visitPopcntI(LPopcntI* ins) {
1035 Register input = ToRegister(ins->input());
1036 Register output = ToRegister(ins->output());
1037 Register tmp = ToRegister(ins->temp0());
1038
1039 masm.popcnt32(input, output, tmp);
1040 }
1041
visitPopcntI64(LPopcntI64 * ins)1042 void CodeGenerator::visitPopcntI64(LPopcntI64* ins) {
1043 Register64 input = ToRegister64(ins->getInt64Operand(0));
1044 Register64 output = ToOutRegister64(ins);
1045 Register tmp = ToRegister(ins->getTemp(0));
1046
1047 masm.popcnt64(input, output, tmp);
1048 }
1049
visitPowHalfD(LPowHalfD * ins)1050 void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
1051 FloatRegister input = ToFloatRegister(ins->input());
1052 FloatRegister output = ToFloatRegister(ins->output());
1053
1054 Label done, skip;
1055
1056 // Masm.pow(-Infinity, 0.5) == Infinity.
1057 masm.loadConstantDouble(NegativeInfinity<double>(), ScratchDoubleReg);
1058 masm.ma_bc1d(input, ScratchDoubleReg, &skip,
1059 Assembler::DoubleNotEqualOrUnordered, ShortJump);
1060 masm.as_negd(output, ScratchDoubleReg);
1061 masm.ma_b(&done, ShortJump);
1062
1063 masm.bind(&skip);
1064 // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
1065 // Adding 0 converts any -0 to 0.
1066 masm.loadConstantDouble(0.0, ScratchDoubleReg);
1067 masm.as_addd(output, input, ScratchDoubleReg);
1068 masm.as_sqrtd(output, output);
1069
1070 masm.bind(&done);
1071 }
1072
toMoveOperand(LAllocation a) const1073 MoveOperand CodeGeneratorMIPSShared::toMoveOperand(LAllocation a) const {
1074 if (a.isGeneralReg()) {
1075 return MoveOperand(ToRegister(a));
1076 }
1077 if (a.isFloatReg()) {
1078 return MoveOperand(ToFloatRegister(a));
1079 }
1080 MoveOperand::Kind kind =
1081 a.isStackArea() ? MoveOperand::EFFECTIVE_ADDRESS : MoveOperand::MEMORY;
1082 Address address = ToAddress(a);
1083 MOZ_ASSERT((address.offset & 3) == 0);
1084 return MoveOperand(address, kind);
1085 }
1086
visitMathD(LMathD * math)1087 void CodeGenerator::visitMathD(LMathD* math) {
1088 FloatRegister src1 = ToFloatRegister(math->getOperand(0));
1089 FloatRegister src2 = ToFloatRegister(math->getOperand(1));
1090 FloatRegister output = ToFloatRegister(math->getDef(0));
1091
1092 switch (math->jsop()) {
1093 case JSOp::Add:
1094 masm.as_addd(output, src1, src2);
1095 break;
1096 case JSOp::Sub:
1097 masm.as_subd(output, src1, src2);
1098 break;
1099 case JSOp::Mul:
1100 masm.as_muld(output, src1, src2);
1101 break;
1102 case JSOp::Div:
1103 masm.as_divd(output, src1, src2);
1104 break;
1105 default:
1106 MOZ_CRASH("unexpected opcode");
1107 }
1108 }
1109
visitMathF(LMathF * math)1110 void CodeGenerator::visitMathF(LMathF* math) {
1111 FloatRegister src1 = ToFloatRegister(math->getOperand(0));
1112 FloatRegister src2 = ToFloatRegister(math->getOperand(1));
1113 FloatRegister output = ToFloatRegister(math->getDef(0));
1114
1115 switch (math->jsop()) {
1116 case JSOp::Add:
1117 masm.as_adds(output, src1, src2);
1118 break;
1119 case JSOp::Sub:
1120 masm.as_subs(output, src1, src2);
1121 break;
1122 case JSOp::Mul:
1123 masm.as_muls(output, src1, src2);
1124 break;
1125 case JSOp::Div:
1126 masm.as_divs(output, src1, src2);
1127 break;
1128 default:
1129 MOZ_CRASH("unexpected opcode");
1130 }
1131 }
1132
visitTruncateDToInt32(LTruncateDToInt32 * ins)1133 void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
1134 emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
1135 ins->mir());
1136 }
1137
visitTruncateFToInt32(LTruncateFToInt32 * ins)1138 void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
1139 emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
1140 ins->mir());
1141 }
1142
visitWasmBuiltinTruncateDToInt32(LWasmBuiltinTruncateDToInt32 * lir)1143 void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
1144 LWasmBuiltinTruncateDToInt32* lir) {
1145 emitTruncateDouble(ToFloatRegister(lir->getOperand(0)),
1146 ToRegister(lir->getDef(0)), lir->mir());
1147 }
1148
visitWasmBuiltinTruncateFToInt32(LWasmBuiltinTruncateFToInt32 * lir)1149 void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
1150 LWasmBuiltinTruncateFToInt32* lir) {
1151 emitTruncateFloat32(ToFloatRegister(lir->getOperand(0)),
1152 ToRegister(lir->getDef(0)), lir->mir());
1153 }
1154
visitWasmTruncateToInt32(LWasmTruncateToInt32 * lir)1155 void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
1156 auto input = ToFloatRegister(lir->input());
1157 auto output = ToRegister(lir->output());
1158
1159 MWasmTruncateToInt32* mir = lir->mir();
1160 MIRType fromType = mir->input()->type();
1161
1162 MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
1163
1164 auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
1165 addOutOfLineCode(ool, mir);
1166
1167 Label* oolEntry = ool->entry();
1168 if (mir->isUnsigned()) {
1169 if (fromType == MIRType::Double) {
1170 masm.wasmTruncateDoubleToUInt32(input, output, mir->isSaturating(),
1171 oolEntry);
1172 } else if (fromType == MIRType::Float32) {
1173 masm.wasmTruncateFloat32ToUInt32(input, output, mir->isSaturating(),
1174 oolEntry);
1175 } else {
1176 MOZ_CRASH("unexpected type");
1177 }
1178
1179 masm.bind(ool->rejoin());
1180 return;
1181 }
1182
1183 if (fromType == MIRType::Double) {
1184 masm.wasmTruncateDoubleToInt32(input, output, mir->isSaturating(),
1185 oolEntry);
1186 } else if (fromType == MIRType::Float32) {
1187 masm.wasmTruncateFloat32ToInt32(input, output, mir->isSaturating(),
1188 oolEntry);
1189 } else {
1190 MOZ_CRASH("unexpected type");
1191 }
1192
1193 masm.bind(ool->rejoin());
1194 }
1195
visitOutOfLineBailout(OutOfLineBailout * ool)1196 void CodeGeneratorMIPSShared::visitOutOfLineBailout(OutOfLineBailout* ool) {
1197 // Push snapshotOffset and make sure stack is aligned.
1198 masm.subPtr(Imm32(sizeof(Value)), StackPointer);
1199 masm.storePtr(ImmWord(ool->snapshot()->snapshotOffset()),
1200 Address(StackPointer, 0));
1201
1202 masm.jump(&deoptLabel_);
1203 }
1204
visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck * ool)1205 void CodeGeneratorMIPSShared::visitOutOfLineWasmTruncateCheck(
1206 OutOfLineWasmTruncateCheck* ool) {
1207 if (ool->toType() == MIRType::Int32) {
1208 masm.outOfLineWasmTruncateToInt32Check(
1209 ool->input(), ool->output(), ool->fromType(), ool->flags(),
1210 ool->rejoin(), ool->bytecodeOffset());
1211 } else {
1212 MOZ_ASSERT(ool->toType() == MIRType::Int64);
1213 masm.outOfLineWasmTruncateToInt64Check(
1214 ool->input(), ool->output64(), ool->fromType(), ool->flags(),
1215 ool->rejoin(), ool->bytecodeOffset());
1216 }
1217 }
1218
visitCopySignF(LCopySignF * ins)1219 void CodeGenerator::visitCopySignF(LCopySignF* ins) {
1220 FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
1221 FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
1222 FloatRegister output = ToFloatRegister(ins->getDef(0));
1223
1224 Register lhsi = ToRegister(ins->getTemp(0));
1225 Register rhsi = ToRegister(ins->getTemp(1));
1226
1227 masm.moveFromFloat32(lhs, lhsi);
1228 masm.moveFromFloat32(rhs, rhsi);
1229
1230 // Combine.
1231 masm.ma_ins(rhsi, lhsi, 0, 31);
1232
1233 masm.moveToFloat32(rhsi, output);
1234 }
1235
visitCopySignD(LCopySignD * ins)1236 void CodeGenerator::visitCopySignD(LCopySignD* ins) {
1237 FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
1238 FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
1239 FloatRegister output = ToFloatRegister(ins->getDef(0));
1240
1241 Register lhsi = ToRegister(ins->getTemp(0));
1242 Register rhsi = ToRegister(ins->getTemp(1));
1243
1244 // Manipulate high words of double inputs.
1245 masm.moveFromDoubleHi(lhs, lhsi);
1246 masm.moveFromDoubleHi(rhs, rhsi);
1247
1248 // Combine.
1249 masm.ma_ins(rhsi, lhsi, 0, 31);
1250
1251 masm.moveToDoubleHi(rhsi, output);
1252 }
1253
visitValue(LValue * value)1254 void CodeGenerator::visitValue(LValue* value) {
1255 const ValueOperand out = ToOutValue(value);
1256
1257 masm.moveValue(value->value(), out);
1258 }
1259
visitDouble(LDouble * ins)1260 void CodeGenerator::visitDouble(LDouble* ins) {
1261 const LDefinition* out = ins->getDef(0);
1262
1263 masm.loadConstantDouble(ins->value(), ToFloatRegister(out));
1264 }
1265
visitFloat32(LFloat32 * ins)1266 void CodeGenerator::visitFloat32(LFloat32* ins) {
1267 const LDefinition* out = ins->getDef(0);
1268 masm.loadConstantFloat32(ins->value(), ToFloatRegister(out));
1269 }
1270
visitTestDAndBranch(LTestDAndBranch * test)1271 void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
1272 FloatRegister input = ToFloatRegister(test->input());
1273
1274 MBasicBlock* ifTrue = test->ifTrue();
1275 MBasicBlock* ifFalse = test->ifFalse();
1276
1277 masm.loadConstantDouble(0.0, ScratchDoubleReg);
1278 // If 0, or NaN, the result is false.
1279
1280 if (isNextBlock(ifFalse->lir())) {
1281 branchToBlock(Assembler::DoubleFloat, input, ScratchDoubleReg, ifTrue,
1282 Assembler::DoubleNotEqual);
1283 } else {
1284 branchToBlock(Assembler::DoubleFloat, input, ScratchDoubleReg, ifFalse,
1285 Assembler::DoubleEqualOrUnordered);
1286 jumpToBlock(ifTrue);
1287 }
1288 }
1289
visitTestFAndBranch(LTestFAndBranch * test)1290 void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
1291 FloatRegister input = ToFloatRegister(test->input());
1292
1293 MBasicBlock* ifTrue = test->ifTrue();
1294 MBasicBlock* ifFalse = test->ifFalse();
1295
1296 masm.loadConstantFloat32(0.0f, ScratchFloat32Reg);
1297 // If 0, or NaN, the result is false.
1298
1299 if (isNextBlock(ifFalse->lir())) {
1300 branchToBlock(Assembler::SingleFloat, input, ScratchFloat32Reg, ifTrue,
1301 Assembler::DoubleNotEqual);
1302 } else {
1303 branchToBlock(Assembler::SingleFloat, input, ScratchFloat32Reg, ifFalse,
1304 Assembler::DoubleEqualOrUnordered);
1305 jumpToBlock(ifTrue);
1306 }
1307 }
1308
visitCompareD(LCompareD * comp)1309 void CodeGenerator::visitCompareD(LCompareD* comp) {
1310 FloatRegister lhs = ToFloatRegister(comp->left());
1311 FloatRegister rhs = ToFloatRegister(comp->right());
1312 Register dest = ToRegister(comp->output());
1313
1314 Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
1315 masm.ma_cmp_set_double(dest, lhs, rhs, cond);
1316 }
1317
visitCompareF(LCompareF * comp)1318 void CodeGenerator::visitCompareF(LCompareF* comp) {
1319 FloatRegister lhs = ToFloatRegister(comp->left());
1320 FloatRegister rhs = ToFloatRegister(comp->right());
1321 Register dest = ToRegister(comp->output());
1322
1323 Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
1324 masm.ma_cmp_set_float32(dest, lhs, rhs, cond);
1325 }
1326
visitCompareDAndBranch(LCompareDAndBranch * comp)1327 void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
1328 FloatRegister lhs = ToFloatRegister(comp->left());
1329 FloatRegister rhs = ToFloatRegister(comp->right());
1330
1331 Assembler::DoubleCondition cond =
1332 JSOpToDoubleCondition(comp->cmpMir()->jsop());
1333 MBasicBlock* ifTrue = comp->ifTrue();
1334 MBasicBlock* ifFalse = comp->ifFalse();
1335
1336 if (isNextBlock(ifFalse->lir())) {
1337 branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifTrue, cond);
1338 } else {
1339 branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifFalse,
1340 Assembler::InvertCondition(cond));
1341 jumpToBlock(ifTrue);
1342 }
1343 }
1344
visitCompareFAndBranch(LCompareFAndBranch * comp)1345 void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
1346 FloatRegister lhs = ToFloatRegister(comp->left());
1347 FloatRegister rhs = ToFloatRegister(comp->right());
1348
1349 Assembler::DoubleCondition cond =
1350 JSOpToDoubleCondition(comp->cmpMir()->jsop());
1351 MBasicBlock* ifTrue = comp->ifTrue();
1352 MBasicBlock* ifFalse = comp->ifFalse();
1353
1354 if (isNextBlock(ifFalse->lir())) {
1355 branchToBlock(Assembler::SingleFloat, lhs, rhs, ifTrue, cond);
1356 } else {
1357 branchToBlock(Assembler::SingleFloat, lhs, rhs, ifFalse,
1358 Assembler::InvertCondition(cond));
1359 jumpToBlock(ifTrue);
1360 }
1361 }
1362
visitBitAndAndBranch(LBitAndAndBranch * lir)1363 void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* lir) {
1364 if (lir->right()->isConstant()) {
1365 masm.ma_and(ScratchRegister, ToRegister(lir->left()),
1366 Imm32(ToInt32(lir->right())));
1367 } else {
1368 masm.as_and(ScratchRegister, ToRegister(lir->left()),
1369 ToRegister(lir->right()));
1370 }
1371 emitBranch(ScratchRegister, ScratchRegister, lir->cond(), lir->ifTrue(),
1372 lir->ifFalse());
1373 }
1374
visitWasmUint32ToDouble(LWasmUint32ToDouble * lir)1375 void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
1376 masm.convertUInt32ToDouble(ToRegister(lir->input()),
1377 ToFloatRegister(lir->output()));
1378 }
1379
visitWasmUint32ToFloat32(LWasmUint32ToFloat32 * lir)1380 void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
1381 masm.convertUInt32ToFloat32(ToRegister(lir->input()),
1382 ToFloatRegister(lir->output()));
1383 }
1384
visitNotI(LNotI * ins)1385 void CodeGenerator::visitNotI(LNotI* ins) {
1386 masm.cmp32Set(Assembler::Equal, ToRegister(ins->input()), Imm32(0),
1387 ToRegister(ins->output()));
1388 }
1389
visitNotD(LNotD * ins)1390 void CodeGenerator::visitNotD(LNotD* ins) {
1391 // Since this operation is not, we want to set a bit if
1392 // the double is falsey, which means 0.0, -0.0 or NaN.
1393 FloatRegister in = ToFloatRegister(ins->input());
1394 Register dest = ToRegister(ins->output());
1395
1396 masm.loadConstantDouble(0.0, ScratchDoubleReg);
1397 masm.ma_cmp_set_double(dest, in, ScratchDoubleReg,
1398 Assembler::DoubleEqualOrUnordered);
1399 }
1400
visitNotF(LNotF * ins)1401 void CodeGenerator::visitNotF(LNotF* ins) {
1402 // Since this operation is not, we want to set a bit if
1403 // the float32 is falsey, which means 0.0, -0.0 or NaN.
1404 FloatRegister in = ToFloatRegister(ins->input());
1405 Register dest = ToRegister(ins->output());
1406
1407 masm.loadConstantFloat32(0.0f, ScratchFloat32Reg);
1408 masm.ma_cmp_set_float32(dest, in, ScratchFloat32Reg,
1409 Assembler::DoubleEqualOrUnordered);
1410 }
1411
visitMemoryBarrier(LMemoryBarrier * ins)1412 void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
1413 masm.memoryBarrier(ins->type());
1414 }
1415
generateInvalidateEpilogue()1416 void CodeGeneratorMIPSShared::generateInvalidateEpilogue() {
1417 // Ensure that there is enough space in the buffer for the OsiPoint
1418 // patching to occur. Otherwise, we could overwrite the invalidation
1419 // epilogue.
1420 for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
1421 masm.nop();
1422 }
1423
1424 masm.bind(&invalidate_);
1425
1426 // Push the return address of the point that we bailed out at to the stack
1427 masm.Push(ra);
1428
1429 // Push the Ion script onto the stack (when we determine what that
1430 // pointer is).
1431 invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
1432
1433 // Jump to the invalidator which will replace the current frame.
1434 TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
1435 masm.jump(thunk);
1436 }
1437
1438 class js::jit::OutOfLineTableSwitch
1439 : public OutOfLineCodeBase<CodeGeneratorMIPSShared> {
1440 MTableSwitch* mir_;
1441 CodeLabel jumpLabel_;
1442
accept(CodeGeneratorMIPSShared * codegen)1443 void accept(CodeGeneratorMIPSShared* codegen) {
1444 codegen->visitOutOfLineTableSwitch(this);
1445 }
1446
1447 public:
OutOfLineTableSwitch(MTableSwitch * mir)1448 OutOfLineTableSwitch(MTableSwitch* mir) : mir_(mir) {}
1449
mir() const1450 MTableSwitch* mir() const { return mir_; }
1451
jumpLabel()1452 CodeLabel* jumpLabel() { return &jumpLabel_; }
1453 };
1454
visitOutOfLineTableSwitch(OutOfLineTableSwitch * ool)1455 void CodeGeneratorMIPSShared::visitOutOfLineTableSwitch(
1456 OutOfLineTableSwitch* ool) {
1457 MTableSwitch* mir = ool->mir();
1458
1459 masm.haltingAlign(sizeof(void*));
1460 masm.bind(ool->jumpLabel());
1461 masm.addCodeLabel(*ool->jumpLabel());
1462
1463 for (size_t i = 0; i < mir->numCases(); i++) {
1464 LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
1465 Label* caseheader = caseblock->label();
1466 uint32_t caseoffset = caseheader->offset();
1467
1468 // The entries of the jump table need to be absolute addresses and thus
1469 // must be patched after codegen is finished.
1470 CodeLabel cl;
1471 masm.writeCodePointer(&cl);
1472 cl.target()->bind(caseoffset);
1473 masm.addCodeLabel(cl);
1474 }
1475 }
1476
emitTableSwitchDispatch(MTableSwitch * mir,Register index,Register base)1477 void CodeGeneratorMIPSShared::emitTableSwitchDispatch(MTableSwitch* mir,
1478 Register index,
1479 Register base) {
1480 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
1481
1482 // Lower value with low value
1483 if (mir->low() != 0) {
1484 masm.subPtr(Imm32(mir->low()), index);
1485 }
1486
1487 // Jump to default case if input is out of range
1488 int32_t cases = mir->numCases();
1489 masm.branchPtr(Assembler::AboveOrEqual, index, ImmWord(cases), defaultcase);
1490
1491 // To fill in the CodeLabels for the case entries, we need to first
1492 // generate the case entries (we don't yet know their offsets in the
1493 // instruction stream).
1494 OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(mir);
1495 addOutOfLineCode(ool, mir);
1496
1497 // Compute the position where a pointer to the right case stands.
1498 masm.ma_li(base, ool->jumpLabel());
1499
1500 BaseIndex pointer(base, index, ScalePointer);
1501
1502 // Jump to the right case
1503 masm.branchToComputedAddress(pointer);
1504 }
1505
visitWasmHeapBase(LWasmHeapBase * ins)1506 void CodeGenerator::visitWasmHeapBase(LWasmHeapBase* ins) {
1507 MOZ_ASSERT(ins->tlsPtr()->isBogus());
1508 masm.movePtr(HeapReg, ToRegister(ins->output()));
1509 }
1510
1511 template <typename T>
emitWasmLoad(T * lir)1512 void CodeGeneratorMIPSShared::emitWasmLoad(T* lir) {
1513 const MWasmLoad* mir = lir->mir();
1514 SecondScratchRegisterScope scratch2(masm);
1515
1516 Register ptr = ToRegister(lir->ptr());
1517 Register ptrScratch = InvalidReg;
1518 if (!lir->ptrCopy()->isBogusTemp()) {
1519 ptrScratch = ToRegister(lir->ptrCopy());
1520 }
1521
1522 if (mir->base()->type() == MIRType::Int32) {
1523 masm.move32To64ZeroExtend(ptr, Register64(scratch2));
1524 ptr = scratch2;
1525 ptrScratch = ptrScratch != InvalidReg ? scratch2 : InvalidReg;
1526 }
1527
1528 if (IsUnaligned(mir->access())) {
1529 if (IsFloatingPointType(mir->type())) {
1530 masm.wasmUnalignedLoadFP(mir->access(), HeapReg, ptr, ptrScratch,
1531 ToFloatRegister(lir->output()),
1532 ToRegister(lir->getTemp(1)));
1533 } else {
1534 masm.wasmUnalignedLoad(mir->access(), HeapReg, ptr, ptrScratch,
1535 ToRegister(lir->output()),
1536 ToRegister(lir->getTemp(1)));
1537 }
1538 } else {
1539 masm.wasmLoad(mir->access(), HeapReg, ptr, ptrScratch,
1540 ToAnyRegister(lir->output()));
1541 }
1542 }
1543
visitWasmLoad(LWasmLoad * lir)1544 void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); }
1545
visitWasmUnalignedLoad(LWasmUnalignedLoad * lir)1546 void CodeGenerator::visitWasmUnalignedLoad(LWasmUnalignedLoad* lir) {
1547 emitWasmLoad(lir);
1548 }
1549
1550 template <typename T>
emitWasmStore(T * lir)1551 void CodeGeneratorMIPSShared::emitWasmStore(T* lir) {
1552 const MWasmStore* mir = lir->mir();
1553 SecondScratchRegisterScope scratch2(masm);
1554
1555 Register ptr = ToRegister(lir->ptr());
1556 Register ptrScratch = InvalidReg;
1557 if (!lir->ptrCopy()->isBogusTemp()) {
1558 ptrScratch = ToRegister(lir->ptrCopy());
1559 }
1560
1561 if (mir->base()->type() == MIRType::Int32) {
1562 masm.move32To64ZeroExtend(ptr, Register64(scratch2));
1563 ptr = scratch2;
1564 ptrScratch = ptrScratch != InvalidReg ? scratch2 : InvalidReg;
1565 }
1566
1567 if (IsUnaligned(mir->access())) {
1568 if (mir->access().type() == Scalar::Float32 ||
1569 mir->access().type() == Scalar::Float64) {
1570 masm.wasmUnalignedStoreFP(mir->access(), ToFloatRegister(lir->value()),
1571 HeapReg, ptr, ptrScratch,
1572 ToRegister(lir->getTemp(1)));
1573 } else {
1574 masm.wasmUnalignedStore(mir->access(), ToRegister(lir->value()), HeapReg,
1575 ptr, ptrScratch, ToRegister(lir->getTemp(1)));
1576 }
1577 } else {
1578 masm.wasmStore(mir->access(), ToAnyRegister(lir->value()), HeapReg, ptr,
1579 ptrScratch);
1580 }
1581 }
1582
visitWasmStore(LWasmStore * lir)1583 void CodeGenerator::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); }
1584
visitWasmUnalignedStore(LWasmUnalignedStore * lir)1585 void CodeGenerator::visitWasmUnalignedStore(LWasmUnalignedStore* lir) {
1586 emitWasmStore(lir);
1587 }
1588
visitAsmJSLoadHeap(LAsmJSLoadHeap * ins)1589 void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
1590 const MAsmJSLoadHeap* mir = ins->mir();
1591 const LAllocation* ptr = ins->ptr();
1592 const LDefinition* out = ins->output();
1593 const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
1594
1595 bool isSigned;
1596 int size;
1597 bool isFloat = false;
1598 switch (mir->access().type()) {
1599 case Scalar::Int8:
1600 isSigned = true;
1601 size = 8;
1602 break;
1603 case Scalar::Uint8:
1604 isSigned = false;
1605 size = 8;
1606 break;
1607 case Scalar::Int16:
1608 isSigned = true;
1609 size = 16;
1610 break;
1611 case Scalar::Uint16:
1612 isSigned = false;
1613 size = 16;
1614 break;
1615 case Scalar::Int32:
1616 isSigned = true;
1617 size = 32;
1618 break;
1619 case Scalar::Uint32:
1620 isSigned = false;
1621 size = 32;
1622 break;
1623 case Scalar::Float64:
1624 isFloat = true;
1625 size = 64;
1626 break;
1627 case Scalar::Float32:
1628 isFloat = true;
1629 size = 32;
1630 break;
1631 default:
1632 MOZ_CRASH("unexpected array type");
1633 }
1634
1635 if (ptr->isConstant()) {
1636 MOZ_ASSERT(!mir->needsBoundsCheck());
1637 int32_t ptrImm = ptr->toConstant()->toInt32();
1638 MOZ_ASSERT(ptrImm >= 0);
1639 if (isFloat) {
1640 if (size == 32) {
1641 masm.loadFloat32(Address(HeapReg, ptrImm), ToFloatRegister(out));
1642 } else {
1643 masm.loadDouble(Address(HeapReg, ptrImm), ToFloatRegister(out));
1644 }
1645 } else {
1646 masm.ma_load(ToRegister(out), Address(HeapReg, ptrImm),
1647 static_cast<LoadStoreSize>(size),
1648 isSigned ? SignExtend : ZeroExtend);
1649 }
1650 return;
1651 }
1652
1653 Register ptrReg = ToRegister(ptr);
1654
1655 if (!mir->needsBoundsCheck()) {
1656 if (isFloat) {
1657 if (size == 32) {
1658 masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne),
1659 ToFloatRegister(out));
1660 } else {
1661 masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne),
1662 ToFloatRegister(out));
1663 }
1664 } else {
1665 masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
1666 static_cast<LoadStoreSize>(size),
1667 isSigned ? SignExtend : ZeroExtend);
1668 }
1669 return;
1670 }
1671
1672 Label done, outOfRange;
1673 masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg,
1674 ToRegister(boundsCheckLimit), &outOfRange);
1675 // Offset is ok, let's load value.
1676 if (isFloat) {
1677 if (size == 32) {
1678 masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne),
1679 ToFloatRegister(out));
1680 } else {
1681 masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne),
1682 ToFloatRegister(out));
1683 }
1684 } else {
1685 masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
1686 static_cast<LoadStoreSize>(size),
1687 isSigned ? SignExtend : ZeroExtend);
1688 }
1689 masm.ma_b(&done, ShortJump);
1690 masm.bind(&outOfRange);
1691 // Offset is out of range. Load default values.
1692 if (isFloat) {
1693 if (size == 32) {
1694 masm.loadConstantFloat32(float(GenericNaN()), ToFloatRegister(out));
1695 } else {
1696 masm.loadConstantDouble(GenericNaN(), ToFloatRegister(out));
1697 }
1698 } else {
1699 masm.move32(Imm32(0), ToRegister(out));
1700 }
1701 masm.bind(&done);
1702 }
1703
visitAsmJSStoreHeap(LAsmJSStoreHeap * ins)1704 void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
1705 const MAsmJSStoreHeap* mir = ins->mir();
1706 const LAllocation* value = ins->value();
1707 const LAllocation* ptr = ins->ptr();
1708 const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
1709
1710 bool isSigned;
1711 int size;
1712 bool isFloat = false;
1713 switch (mir->access().type()) {
1714 case Scalar::Int8:
1715 isSigned = true;
1716 size = 8;
1717 break;
1718 case Scalar::Uint8:
1719 isSigned = false;
1720 size = 8;
1721 break;
1722 case Scalar::Int16:
1723 isSigned = true;
1724 size = 16;
1725 break;
1726 case Scalar::Uint16:
1727 isSigned = false;
1728 size = 16;
1729 break;
1730 case Scalar::Int32:
1731 isSigned = true;
1732 size = 32;
1733 break;
1734 case Scalar::Uint32:
1735 isSigned = false;
1736 size = 32;
1737 break;
1738 case Scalar::Float64:
1739 isFloat = true;
1740 size = 64;
1741 break;
1742 case Scalar::Float32:
1743 isFloat = true;
1744 size = 32;
1745 break;
1746 default:
1747 MOZ_CRASH("unexpected array type");
1748 }
1749
1750 if (ptr->isConstant()) {
1751 MOZ_ASSERT(!mir->needsBoundsCheck());
1752 int32_t ptrImm = ptr->toConstant()->toInt32();
1753 MOZ_ASSERT(ptrImm >= 0);
1754
1755 if (isFloat) {
1756 FloatRegister freg = ToFloatRegister(value);
1757 Address addr(HeapReg, ptrImm);
1758 if (size == 32) {
1759 masm.storeFloat32(freg, addr);
1760 } else {
1761 masm.storeDouble(freg, addr);
1762 }
1763 } else {
1764 masm.ma_store(ToRegister(value), Address(HeapReg, ptrImm),
1765 static_cast<LoadStoreSize>(size),
1766 isSigned ? SignExtend : ZeroExtend);
1767 }
1768 return;
1769 }
1770
1771 Register ptrReg = ToRegister(ptr);
1772 Address dstAddr(ptrReg, 0);
1773
1774 if (!mir->needsBoundsCheck()) {
1775 if (isFloat) {
1776 FloatRegister freg = ToFloatRegister(value);
1777 BaseIndex bi(HeapReg, ptrReg, TimesOne);
1778 if (size == 32) {
1779 masm.storeFloat32(freg, bi);
1780 } else {
1781 masm.storeDouble(freg, bi);
1782 }
1783 } else {
1784 masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
1785 static_cast<LoadStoreSize>(size),
1786 isSigned ? SignExtend : ZeroExtend);
1787 }
1788 return;
1789 }
1790
1791 Label outOfRange;
1792 masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg,
1793 ToRegister(boundsCheckLimit), &outOfRange);
1794
1795 // Offset is ok, let's store value.
1796 if (isFloat) {
1797 if (size == 32) {
1798 masm.storeFloat32(ToFloatRegister(value),
1799 BaseIndex(HeapReg, ptrReg, TimesOne));
1800 } else
1801 masm.storeDouble(ToFloatRegister(value),
1802 BaseIndex(HeapReg, ptrReg, TimesOne));
1803 } else {
1804 masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
1805 static_cast<LoadStoreSize>(size),
1806 isSigned ? SignExtend : ZeroExtend);
1807 }
1808
1809 masm.bind(&outOfRange);
1810 }
1811
visitWasmCompareExchangeHeap(LWasmCompareExchangeHeap * ins)1812 void CodeGenerator::visitWasmCompareExchangeHeap(
1813 LWasmCompareExchangeHeap* ins) {
1814 MWasmCompareExchangeHeap* mir = ins->mir();
1815 Register ptrReg = ToRegister(ins->ptr());
1816 BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
1817 MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
1818
1819 Register oldval = ToRegister(ins->oldValue());
1820 Register newval = ToRegister(ins->newValue());
1821 Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
1822 Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
1823 Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
1824
1825 masm.wasmCompareExchange(mir->access(), srcAddr, oldval, newval, valueTemp,
1826 offsetTemp, maskTemp, ToRegister(ins->output()));
1827 }
1828
visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap * ins)1829 void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
1830 MWasmAtomicExchangeHeap* mir = ins->mir();
1831 Register ptrReg = ToRegister(ins->ptr());
1832 Register value = ToRegister(ins->value());
1833 BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
1834 MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
1835
1836 Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
1837 Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
1838 Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
1839
1840 masm.wasmAtomicExchange(mir->access(), srcAddr, value, valueTemp, offsetTemp,
1841 maskTemp, ToRegister(ins->output()));
1842 }
1843
visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap * ins)1844 void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
1845 MOZ_ASSERT(ins->mir()->hasUses());
1846 MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
1847
1848 MWasmAtomicBinopHeap* mir = ins->mir();
1849 Register ptrReg = ToRegister(ins->ptr());
1850 Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
1851 Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
1852 Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
1853
1854 BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
1855
1856 masm.wasmAtomicFetchOp(mir->access(), mir->operation(),
1857 ToRegister(ins->value()), srcAddr, valueTemp,
1858 offsetTemp, maskTemp, ToRegister(ins->output()));
1859 }
1860
visitWasmAtomicBinopHeapForEffect(LWasmAtomicBinopHeapForEffect * ins)1861 void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
1862 LWasmAtomicBinopHeapForEffect* ins) {
1863 MOZ_ASSERT(!ins->mir()->hasUses());
1864 MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
1865
1866 MWasmAtomicBinopHeap* mir = ins->mir();
1867 Register ptrReg = ToRegister(ins->ptr());
1868 Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
1869 Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
1870 Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
1871
1872 BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
1873 masm.wasmAtomicEffectOp(mir->access(), mir->operation(),
1874 ToRegister(ins->value()), srcAddr, valueTemp,
1875 offsetTemp, maskTemp);
1876 }
1877
visitWasmStackArg(LWasmStackArg * ins)1878 void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
1879 const MWasmStackArg* mir = ins->mir();
1880 if (ins->arg()->isConstant()) {
1881 masm.storePtr(ImmWord(ToInt32(ins->arg())),
1882 Address(StackPointer, mir->spOffset()));
1883 } else {
1884 if (ins->arg()->isGeneralReg()) {
1885 masm.storePtr(ToRegister(ins->arg()),
1886 Address(StackPointer, mir->spOffset()));
1887 } else if (mir->input()->type() == MIRType::Double) {
1888 masm.storeDouble(ToFloatRegister(ins->arg()).doubleOverlay(),
1889 Address(StackPointer, mir->spOffset()));
1890 } else {
1891 masm.storeFloat32(ToFloatRegister(ins->arg()),
1892 Address(StackPointer, mir->spOffset()));
1893 }
1894 }
1895 }
1896
visitWasmStackArgI64(LWasmStackArgI64 * ins)1897 void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) {
1898 const MWasmStackArg* mir = ins->mir();
1899 Address dst(StackPointer, mir->spOffset());
1900 if (IsConstant(ins->arg())) {
1901 masm.store64(Imm64(ToInt64(ins->arg())), dst);
1902 } else {
1903 masm.store64(ToRegister64(ins->arg()), dst);
1904 }
1905 }
1906
visitWasmSelect(LWasmSelect * ins)1907 void CodeGenerator::visitWasmSelect(LWasmSelect* ins) {
1908 MIRType mirType = ins->mir()->type();
1909
1910 Register cond = ToRegister(ins->condExpr());
1911 const LAllocation* falseExpr = ins->falseExpr();
1912
1913 if (mirType == MIRType::Int32 || mirType == MIRType::RefOrNull) {
1914 Register out = ToRegister(ins->output());
1915 MOZ_ASSERT(ToRegister(ins->trueExpr()) == out,
1916 "true expr input is reused for output");
1917 if (falseExpr->isRegister()) {
1918 masm.as_movz(out, ToRegister(falseExpr), cond);
1919 } else {
1920 masm.cmp32Load32(Assembler::Zero, cond, cond, ToAddress(falseExpr), out);
1921 }
1922 return;
1923 }
1924
1925 FloatRegister out = ToFloatRegister(ins->output());
1926 MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out,
1927 "true expr input is reused for output");
1928
1929 if (falseExpr->isFloatReg()) {
1930 if (mirType == MIRType::Float32) {
1931 masm.as_movz(Assembler::SingleFloat, out, ToFloatRegister(falseExpr),
1932 cond);
1933 } else if (mirType == MIRType::Double) {
1934 masm.as_movz(Assembler::DoubleFloat, out, ToFloatRegister(falseExpr),
1935 cond);
1936 } else {
1937 MOZ_CRASH("unhandled type in visitWasmSelect!");
1938 }
1939 } else {
1940 Label done;
1941 masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
1942
1943 if (mirType == MIRType::Float32) {
1944 masm.loadFloat32(ToAddress(falseExpr), out);
1945 } else if (mirType == MIRType::Double) {
1946 masm.loadDouble(ToAddress(falseExpr), out);
1947 } else {
1948 MOZ_CRASH("unhandled type in visitWasmSelect!");
1949 }
1950
1951 masm.bind(&done);
1952 }
1953 }
1954
1955 // We expect to handle only the case where compare is {U,}Int32 and select is
1956 // {U,}Int32, and the "true" input is reused for the output.
visitWasmCompareAndSelect(LWasmCompareAndSelect * ins)1957 void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
1958 bool cmpIs32bit = ins->compareType() == MCompare::Compare_Int32 ||
1959 ins->compareType() == MCompare::Compare_UInt32;
1960 bool selIs32bit = ins->mir()->type() == MIRType::Int32;
1961
1962 MOZ_RELEASE_ASSERT(
1963 cmpIs32bit && selIs32bit,
1964 "CodeGenerator::visitWasmCompareAndSelect: unexpected types");
1965
1966 Register trueExprAndDest = ToRegister(ins->output());
1967 MOZ_ASSERT(ToRegister(ins->ifTrueExpr()) == trueExprAndDest,
1968 "true expr input is reused for output");
1969
1970 Assembler::Condition cond = Assembler::InvertCondition(
1971 JSOpToCondition(ins->compareType(), ins->jsop()));
1972 const LAllocation* rhs = ins->rightExpr();
1973 const LAllocation* falseExpr = ins->ifFalseExpr();
1974 Register lhs = ToRegister(ins->leftExpr());
1975
1976 masm.cmp32Move32(cond, lhs, ToRegister(rhs), ToRegister(falseExpr),
1977 trueExprAndDest);
1978 }
1979
visitWasmReinterpret(LWasmReinterpret * lir)1980 void CodeGenerator::visitWasmReinterpret(LWasmReinterpret* lir) {
1981 MOZ_ASSERT(gen->compilingWasm());
1982 MWasmReinterpret* ins = lir->mir();
1983
1984 MIRType to = ins->type();
1985 DebugOnly<MIRType> from = ins->input()->type();
1986
1987 switch (to) {
1988 case MIRType::Int32:
1989 MOZ_ASSERT(from == MIRType::Float32);
1990 masm.as_mfc1(ToRegister(lir->output()), ToFloatRegister(lir->input()));
1991 break;
1992 case MIRType::Float32:
1993 MOZ_ASSERT(from == MIRType::Int32);
1994 masm.as_mtc1(ToRegister(lir->input()), ToFloatRegister(lir->output()));
1995 break;
1996 case MIRType::Double:
1997 case MIRType::Int64:
1998 MOZ_CRASH("not handled by this LIR opcode");
1999 default:
2000 MOZ_CRASH("unexpected WasmReinterpret");
2001 }
2002 }
2003
visitUDivOrMod(LUDivOrMod * ins)2004 void CodeGenerator::visitUDivOrMod(LUDivOrMod* ins) {
2005 Register lhs = ToRegister(ins->lhs());
2006 Register rhs = ToRegister(ins->rhs());
2007 Register output = ToRegister(ins->output());
2008 Label done;
2009
2010 // Prevent divide by zero.
2011 if (ins->canBeDivideByZero()) {
2012 if (ins->mir()->isTruncated()) {
2013 if (ins->trapOnError()) {
2014 Label nonZero;
2015 masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
2016 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, ins->bytecodeOffset());
2017 masm.bind(&nonZero);
2018 } else {
2019 // Infinity|0 == 0
2020 Label notzero;
2021 masm.ma_b(rhs, rhs, ¬zero, Assembler::NonZero, ShortJump);
2022 masm.move32(Imm32(0), output);
2023 masm.ma_b(&done, ShortJump);
2024 masm.bind(¬zero);
2025 }
2026 } else {
2027 bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
2028 }
2029 }
2030
2031 #ifdef MIPSR6
2032 masm.as_modu(output, lhs, rhs);
2033 #else
2034 masm.as_divu(lhs, rhs);
2035 masm.as_mfhi(output);
2036 #endif
2037
2038 // If the remainder is > 0, bailout since this must be a double.
2039 if (ins->mir()->isDiv()) {
2040 if (!ins->mir()->toDiv()->canTruncateRemainder()) {
2041 bailoutCmp32(Assembler::NonZero, output, output, ins->snapshot());
2042 }
2043 // Get quotient
2044 #ifdef MIPSR6
2045 masm.as_divu(output, lhs, rhs);
2046 #else
2047 masm.as_mflo(output);
2048 #endif
2049 }
2050
2051 if (!ins->mir()->isTruncated()) {
2052 bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot());
2053 }
2054
2055 masm.bind(&done);
2056 }
2057
visitEffectiveAddress(LEffectiveAddress * ins)2058 void CodeGenerator::visitEffectiveAddress(LEffectiveAddress* ins) {
2059 const MEffectiveAddress* mir = ins->mir();
2060 Register base = ToRegister(ins->base());
2061 Register index = ToRegister(ins->index());
2062 Register output = ToRegister(ins->output());
2063
2064 BaseIndex address(base, index, mir->scale(), mir->displacement());
2065 masm.computeEffectiveAddress(address, output);
2066 }
2067
visitNegI(LNegI * ins)2068 void CodeGenerator::visitNegI(LNegI* ins) {
2069 Register input = ToRegister(ins->input());
2070 Register output = ToRegister(ins->output());
2071
2072 masm.ma_negu(output, input);
2073 }
2074
visitNegI64(LNegI64 * ins)2075 void CodeGenerator::visitNegI64(LNegI64* ins) {
2076 Register64 input = ToRegister64(ins->getInt64Operand(0));
2077 MOZ_ASSERT(input == ToOutRegister64(ins));
2078 masm.neg64(input);
2079 }
2080
visitNegD(LNegD * ins)2081 void CodeGenerator::visitNegD(LNegD* ins) {
2082 FloatRegister input = ToFloatRegister(ins->input());
2083 FloatRegister output = ToFloatRegister(ins->output());
2084
2085 masm.as_negd(output, input);
2086 }
2087
visitNegF(LNegF * ins)2088 void CodeGenerator::visitNegF(LNegF* ins) {
2089 FloatRegister input = ToFloatRegister(ins->input());
2090 FloatRegister output = ToFloatRegister(ins->output());
2091
2092 masm.as_negs(output, input);
2093 }
2094
visitWasmAddOffset(LWasmAddOffset * lir)2095 void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
2096 MWasmAddOffset* mir = lir->mir();
2097 Register base = ToRegister(lir->base());
2098 Register out = ToRegister(lir->output());
2099
2100 Label ok;
2101 masm.ma_add32TestCarry(Assembler::CarryClear, out, base, Imm32(mir->offset()),
2102 &ok);
2103 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
2104 masm.bind(&ok);
2105 }
2106
visitWasmAddOffset64(LWasmAddOffset64 * lir)2107 void CodeGenerator::visitWasmAddOffset64(LWasmAddOffset64* lir) {
2108 MWasmAddOffset* mir = lir->mir();
2109 Register64 base = ToRegister64(lir->base());
2110 Register64 out = ToOutRegister64(lir);
2111
2112 Label ok;
2113 masm.ma_addPtrTestCarry(Assembler::CarryClear, out.reg, base.reg,
2114 ImmWord(mir->offset()), &ok);
2115 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
2116 masm.bind(&ok);
2117 }
2118
visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop * lir)2119 void CodeGenerator::visitAtomicTypedArrayElementBinop(
2120 LAtomicTypedArrayElementBinop* lir) {
2121 MOZ_ASSERT(!lir->mir()->isForEffect());
2122
2123 AnyRegister output = ToAnyRegister(lir->output());
2124 Register elements = ToRegister(lir->elements());
2125 Register outTemp = ToTempRegisterOrInvalid(lir->temp2());
2126 Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
2127 Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
2128 Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
2129 Register value = ToRegister(lir->value());
2130
2131 Scalar::Type arrayType = lir->mir()->arrayType();
2132
2133 if (lir->index()->isConstant()) {
2134 Address mem = ToAddress(elements, lir->index(), arrayType);
2135 masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
2136 lir->mir()->operation(), value, mem, valueTemp,
2137 offsetTemp, maskTemp, outTemp, output);
2138 } else {
2139 BaseIndex mem(elements, ToRegister(lir->index()),
2140 ScaleFromScalarType(arrayType));
2141 masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
2142 lir->mir()->operation(), value, mem, valueTemp,
2143 offsetTemp, maskTemp, outTemp, output);
2144 }
2145 }
2146
visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect * lir)2147 void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
2148 LAtomicTypedArrayElementBinopForEffect* lir) {
2149 MOZ_ASSERT(lir->mir()->isForEffect());
2150
2151 Register elements = ToRegister(lir->elements());
2152 Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
2153 Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
2154 Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
2155 Register value = ToRegister(lir->value());
2156 Scalar::Type arrayType = lir->mir()->arrayType();
2157
2158 if (lir->index()->isConstant()) {
2159 Address mem = ToAddress(elements, lir->index(), arrayType);
2160 masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
2161 lir->mir()->operation(), value, mem, valueTemp,
2162 offsetTemp, maskTemp);
2163 } else {
2164 BaseIndex mem(elements, ToRegister(lir->index()),
2165 ScaleFromScalarType(arrayType));
2166 masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
2167 lir->mir()->operation(), value, mem, valueTemp,
2168 offsetTemp, maskTemp);
2169 }
2170 }
2171
visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement * lir)2172 void CodeGenerator::visitCompareExchangeTypedArrayElement(
2173 LCompareExchangeTypedArrayElement* lir) {
2174 Register elements = ToRegister(lir->elements());
2175 AnyRegister output = ToAnyRegister(lir->output());
2176 Register outTemp = ToTempRegisterOrInvalid(lir->temp());
2177
2178 Register oldval = ToRegister(lir->oldval());
2179 Register newval = ToRegister(lir->newval());
2180 Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
2181 Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
2182 Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
2183
2184 Scalar::Type arrayType = lir->mir()->arrayType();
2185
2186 if (lir->index()->isConstant()) {
2187 Address dest = ToAddress(elements, lir->index(), arrayType);
2188 masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
2189 newval, valueTemp, offsetTemp, maskTemp, outTemp,
2190 output);
2191 } else {
2192 BaseIndex dest(elements, ToRegister(lir->index()),
2193 ScaleFromScalarType(arrayType));
2194 masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
2195 newval, valueTemp, offsetTemp, maskTemp, outTemp,
2196 output);
2197 }
2198 }
2199
visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement * lir)2200 void CodeGenerator::visitAtomicExchangeTypedArrayElement(
2201 LAtomicExchangeTypedArrayElement* lir) {
2202 Register elements = ToRegister(lir->elements());
2203 AnyRegister output = ToAnyRegister(lir->output());
2204 Register outTemp = ToTempRegisterOrInvalid(lir->temp());
2205
2206 Register value = ToRegister(lir->value());
2207 Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
2208 Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
2209 Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
2210
2211 Scalar::Type arrayType = lir->mir()->arrayType();
2212
2213 if (lir->index()->isConstant()) {
2214 Address dest = ToAddress(elements, lir->index(), arrayType);
2215 masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
2216 valueTemp, offsetTemp, maskTemp, outTemp, output);
2217 } else {
2218 BaseIndex dest(elements, ToRegister(lir->index()),
2219 ScaleFromScalarType(arrayType));
2220 masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
2221 valueTemp, offsetTemp, maskTemp, outTemp, output);
2222 }
2223 }
2224
visitCompareExchangeTypedArrayElement64(LCompareExchangeTypedArrayElement64 * lir)2225 void CodeGenerator::visitCompareExchangeTypedArrayElement64(
2226 LCompareExchangeTypedArrayElement64* lir) {
2227 Register elements = ToRegister(lir->elements());
2228 Register oldval = ToRegister(lir->oldval());
2229 Register newval = ToRegister(lir->newval());
2230 Register64 temp1 = ToRegister64(lir->temp1());
2231 Register64 temp2 = ToRegister64(lir->temp2());
2232 Register out = ToRegister(lir->output());
2233 Register64 tempOut(out);
2234
2235 Scalar::Type arrayType = lir->mir()->arrayType();
2236
2237 masm.loadBigInt64(oldval, temp1);
2238 masm.loadBigInt64(newval, tempOut);
2239
2240 if (lir->index()->isConstant()) {
2241 Address dest = ToAddress(elements, lir->index(), arrayType);
2242 masm.compareExchange64(Synchronization::Full(), dest, temp1, tempOut,
2243 temp2);
2244 } else {
2245 BaseIndex dest(elements, ToRegister(lir->index()),
2246 ScaleFromScalarType(arrayType));
2247 masm.compareExchange64(Synchronization::Full(), dest, temp1, tempOut,
2248 temp2);
2249 }
2250
2251 emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
2252 }
2253
visitAtomicExchangeTypedArrayElement64(LAtomicExchangeTypedArrayElement64 * lir)2254 void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
2255 LAtomicExchangeTypedArrayElement64* lir) {
2256 Register elements = ToRegister(lir->elements());
2257 Register value = ToRegister(lir->value());
2258 Register64 temp1 = ToRegister64(lir->temp1());
2259 Register64 temp2 = Register64(ToRegister(lir->temp2()));
2260 Register out = ToRegister(lir->output());
2261
2262 Scalar::Type arrayType = lir->mir()->arrayType();
2263
2264 masm.loadBigInt64(value, temp1);
2265
2266 if (lir->index()->isConstant()) {
2267 Address dest = ToAddress(elements, lir->index(), arrayType);
2268 masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
2269 } else {
2270 BaseIndex dest(elements, ToRegister(lir->index()),
2271 ScaleFromScalarType(arrayType));
2272 masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
2273 }
2274
2275 emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
2276 }
2277
visitAtomicTypedArrayElementBinop64(LAtomicTypedArrayElementBinop64 * lir)2278 void CodeGenerator::visitAtomicTypedArrayElementBinop64(
2279 LAtomicTypedArrayElementBinop64* lir) {
2280 MOZ_ASSERT(lir->mir()->hasUses());
2281
2282 Register elements = ToRegister(lir->elements());
2283 Register value = ToRegister(lir->value());
2284 Register64 temp1 = ToRegister64(lir->temp1());
2285 Register64 temp2 = ToRegister64(lir->temp2());
2286 Register out = ToRegister(lir->output());
2287 Register64 tempOut = Register64(out);
2288
2289 Scalar::Type arrayType = lir->mir()->arrayType();
2290 AtomicOp atomicOp = lir->mir()->operation();
2291
2292 masm.loadBigInt64(value, temp1);
2293
2294 if (lir->index()->isConstant()) {
2295 Address dest = ToAddress(elements, lir->index(), arrayType);
2296 masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
2297 tempOut, temp2);
2298 } else {
2299 BaseIndex dest(elements, ToRegister(lir->index()),
2300 ScaleFromScalarType(arrayType));
2301 masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
2302 tempOut, temp2);
2303 }
2304
2305 emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
2306 }
2307
visitAtomicTypedArrayElementBinopForEffect64(LAtomicTypedArrayElementBinopForEffect64 * lir)2308 void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
2309 LAtomicTypedArrayElementBinopForEffect64* lir) {
2310 MOZ_ASSERT(!lir->mir()->hasUses());
2311
2312 Register elements = ToRegister(lir->elements());
2313 Register value = ToRegister(lir->value());
2314 Register64 temp1 = ToRegister64(lir->temp1());
2315 Register64 temp2 = ToRegister64(lir->temp2());
2316
2317 Scalar::Type arrayType = lir->mir()->arrayType();
2318 AtomicOp atomicOp = lir->mir()->operation();
2319
2320 masm.loadBigInt64(value, temp1);
2321
2322 if (lir->index()->isConstant()) {
2323 Address dest = ToAddress(elements, lir->index(), arrayType);
2324 masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
2325 temp2);
2326 } else {
2327 BaseIndex dest(elements, ToRegister(lir->index()),
2328 ScaleFromScalarType(arrayType));
2329 masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
2330 temp2);
2331 }
2332 }
2333
visitWasmCompareExchangeI64(LWasmCompareExchangeI64 * lir)2334 void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir) {
2335 Register ptr = ToRegister(lir->ptr());
2336 Register64 oldValue = ToRegister64(lir->oldValue());
2337 Register64 newValue = ToRegister64(lir->newValue());
2338 Register64 output = ToOutRegister64(lir);
2339 uint32_t offset = lir->mir()->access().offset();
2340
2341 BaseIndex addr(HeapReg, ptr, TimesOne, offset);
2342 masm.wasmCompareExchange64(lir->mir()->access(), addr, oldValue, newValue,
2343 output);
2344 }
2345
visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64 * lir)2346 void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir) {
2347 Register ptr = ToRegister(lir->ptr());
2348 Register64 value = ToRegister64(lir->value());
2349 Register64 output = ToOutRegister64(lir);
2350 uint32_t offset = lir->mir()->access().offset();
2351
2352 BaseIndex addr(HeapReg, ptr, TimesOne, offset);
2353 masm.wasmAtomicExchange64(lir->mir()->access(), addr, value, output);
2354 }
2355
visitWasmAtomicBinopI64(LWasmAtomicBinopI64 * lir)2356 void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir) {
2357 Register ptr = ToRegister(lir->ptr());
2358 Register64 value = ToRegister64(lir->value());
2359 Register64 output = ToOutRegister64(lir);
2360 #ifdef JS_CODEGEN_MIPS32
2361 Register64 temp(ToRegister(lir->getTemp(0)), ToRegister(lir->getTemp(1)));
2362 #else
2363 Register64 temp(ToRegister(lir->getTemp(0)));
2364 #endif
2365 uint32_t offset = lir->mir()->access().offset();
2366
2367 BaseIndex addr(HeapReg, ptr, TimesOne, offset);
2368
2369 masm.wasmAtomicFetchOp64(lir->mir()->access(), lir->mir()->operation(), value,
2370 addr, temp, output);
2371 }
2372
visitNearbyInt(LNearbyInt *)2373 void CodeGenerator::visitNearbyInt(LNearbyInt*) { MOZ_CRASH("NYI"); }
2374
visitNearbyIntF(LNearbyIntF *)2375 void CodeGenerator::visitNearbyIntF(LNearbyIntF*) { MOZ_CRASH("NYI"); }
2376
visitSimd128(LSimd128 * ins)2377 void CodeGenerator::visitSimd128(LSimd128* ins) { MOZ_CRASH("No SIMD"); }
2378
visitWasmTernarySimd128(LWasmTernarySimd128 * ins)2379 void CodeGenerator::visitWasmTernarySimd128(LWasmTernarySimd128* ins) {
2380 MOZ_CRASH("No SIMD");
2381 }
2382
visitWasmBinarySimd128(LWasmBinarySimd128 * ins)2383 void CodeGenerator::visitWasmBinarySimd128(LWasmBinarySimd128* ins) {
2384 MOZ_CRASH("No SIMD");
2385 }
2386
visitWasmBinarySimd128WithConstant(LWasmBinarySimd128WithConstant * ins)2387 void CodeGenerator::visitWasmBinarySimd128WithConstant(
2388 LWasmBinarySimd128WithConstant* ins) {
2389 MOZ_CRASH("No SIMD");
2390 }
2391
visitWasmVariableShiftSimd128(LWasmVariableShiftSimd128 * ins)2392 void CodeGenerator::visitWasmVariableShiftSimd128(
2393 LWasmVariableShiftSimd128* ins) {
2394 MOZ_CRASH("No SIMD");
2395 }
2396
visitWasmConstantShiftSimd128(LWasmConstantShiftSimd128 * ins)2397 void CodeGenerator::visitWasmConstantShiftSimd128(
2398 LWasmConstantShiftSimd128* ins) {
2399 MOZ_CRASH("No SIMD");
2400 }
2401
visitWasmSignReplicationSimd128(LWasmSignReplicationSimd128 * ins)2402 void CodeGenerator::visitWasmSignReplicationSimd128(
2403 LWasmSignReplicationSimd128* ins) {
2404 MOZ_CRASH("No SIMD");
2405 }
2406
visitWasmShuffleSimd128(LWasmShuffleSimd128 * ins)2407 void CodeGenerator::visitWasmShuffleSimd128(LWasmShuffleSimd128* ins) {
2408 MOZ_CRASH("No SIMD");
2409 }
2410
visitWasmPermuteSimd128(LWasmPermuteSimd128 * ins)2411 void CodeGenerator::visitWasmPermuteSimd128(LWasmPermuteSimd128* ins) {
2412 MOZ_CRASH("No SIMD");
2413 }
2414
visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128 * ins)2415 void CodeGenerator::visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128* ins) {
2416 MOZ_CRASH("No SIMD");
2417 }
2418
visitWasmReplaceInt64LaneSimd128(LWasmReplaceInt64LaneSimd128 * ins)2419 void CodeGenerator::visitWasmReplaceInt64LaneSimd128(
2420 LWasmReplaceInt64LaneSimd128* ins) {
2421 MOZ_CRASH("No SIMD");
2422 }
2423
visitWasmScalarToSimd128(LWasmScalarToSimd128 * ins)2424 void CodeGenerator::visitWasmScalarToSimd128(LWasmScalarToSimd128* ins) {
2425 MOZ_CRASH("No SIMD");
2426 }
2427
visitWasmInt64ToSimd128(LWasmInt64ToSimd128 * ins)2428 void CodeGenerator::visitWasmInt64ToSimd128(LWasmInt64ToSimd128* ins) {
2429 MOZ_CRASH("No SIMD");
2430 }
2431
visitWasmUnarySimd128(LWasmUnarySimd128 * ins)2432 void CodeGenerator::visitWasmUnarySimd128(LWasmUnarySimd128* ins) {
2433 MOZ_CRASH("No SIMD");
2434 }
2435
visitWasmReduceSimd128(LWasmReduceSimd128 * ins)2436 void CodeGenerator::visitWasmReduceSimd128(LWasmReduceSimd128* ins) {
2437 MOZ_CRASH("No SIMD");
2438 }
2439
visitWasmReduceAndBranchSimd128(LWasmReduceAndBranchSimd128 * ins)2440 void CodeGenerator::visitWasmReduceAndBranchSimd128(
2441 LWasmReduceAndBranchSimd128* ins) {
2442 MOZ_CRASH("No SIMD");
2443 }
2444
visitWasmReduceSimd128ToInt64(LWasmReduceSimd128ToInt64 * ins)2445 void CodeGenerator::visitWasmReduceSimd128ToInt64(
2446 LWasmReduceSimd128ToInt64* ins) {
2447 MOZ_CRASH("No SIMD");
2448 }
2449
visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128 * ins)2450 void CodeGenerator::visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128* ins) {
2451 MOZ_CRASH("No SIMD");
2452 }
2453
visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128 * ins)2454 void CodeGenerator::visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128* ins) {
2455 MOZ_CRASH("No SIMD");
2456 }
2457