1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/arm/CodeGenerator-arm.h"
8
9 #include "mozilla/DebugOnly.h"
10 #include "mozilla/MathAlgorithms.h"
11 #include "mozilla/Maybe.h"
12
13 #include <iterator>
14
15 #include "jsnum.h"
16
17 #include "jit/CodeGenerator.h"
18 #include "jit/InlineScriptTree.h"
19 #include "jit/JitRuntime.h"
20 #include "jit/MIR.h"
21 #include "jit/MIRGraph.h"
22 #include "js/Conversions.h"
23 #include "js/ScalarType.h" // js::Scalar::Type
24 #include "vm/JSContext.h"
25 #include "vm/Realm.h"
26 #include "vm/Shape.h"
27 #include "vm/TraceLogging.h"
28
29 #include "jit/MacroAssembler-inl.h"
30 #include "jit/shared/CodeGenerator-shared-inl.h"
31 #include "vm/JSScript-inl.h"
32
33 using namespace js;
34 using namespace js::jit;
35
36 using JS::GenericNaN;
37 using JS::ToInt32;
38 using mozilla::DebugOnly;
39 using mozilla::FloorLog2;
40 using mozilla::NegativeInfinity;
41
42 // shared
CodeGeneratorARM(MIRGenerator * gen,LIRGraph * graph,MacroAssembler * masm)43 CodeGeneratorARM::CodeGeneratorARM(MIRGenerator* gen, LIRGraph* graph,
44 MacroAssembler* masm)
45 : CodeGeneratorShared(gen, graph, masm) {}
46
ToOperandOrRegister64(const LInt64Allocation input)47 Register64 CodeGeneratorARM::ToOperandOrRegister64(
48 const LInt64Allocation input) {
49 return ToRegister64(input);
50 }
51
emitBranch(Assembler::Condition cond,MBasicBlock * mirTrue,MBasicBlock * mirFalse)52 void CodeGeneratorARM::emitBranch(Assembler::Condition cond,
53 MBasicBlock* mirTrue, MBasicBlock* mirFalse) {
54 if (isNextBlock(mirFalse->lir())) {
55 jumpToBlock(mirTrue, cond);
56 } else {
57 jumpToBlock(mirFalse, Assembler::InvertCondition(cond));
58 jumpToBlock(mirTrue);
59 }
60 }
61
accept(CodeGeneratorARM * codegen)62 void OutOfLineBailout::accept(CodeGeneratorARM* codegen) {
63 codegen->visitOutOfLineBailout(this);
64 }
65
visitTestIAndBranch(LTestIAndBranch * test)66 void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) {
67 const LAllocation* opd = test->getOperand(0);
68 MBasicBlock* ifTrue = test->ifTrue();
69 MBasicBlock* ifFalse = test->ifFalse();
70
71 // Test the operand
72 masm.as_cmp(ToRegister(opd), Imm8(0));
73
74 if (isNextBlock(ifFalse->lir())) {
75 jumpToBlock(ifTrue, Assembler::NonZero);
76 } else if (isNextBlock(ifTrue->lir())) {
77 jumpToBlock(ifFalse, Assembler::Zero);
78 } else {
79 jumpToBlock(ifFalse, Assembler::Zero);
80 jumpToBlock(ifTrue);
81 }
82 }
83
visitCompare(LCompare * comp)84 void CodeGenerator::visitCompare(LCompare* comp) {
85 Assembler::Condition cond =
86 JSOpToCondition(comp->mir()->compareType(), comp->jsop());
87 const LAllocation* left = comp->getOperand(0);
88 const LAllocation* right = comp->getOperand(1);
89 const LDefinition* def = comp->getDef(0);
90
91 ScratchRegisterScope scratch(masm);
92
93 if (right->isConstant()) {
94 masm.ma_cmp(ToRegister(left), Imm32(ToInt32(right)), scratch);
95 } else if (right->isRegister()) {
96 masm.ma_cmp(ToRegister(left), ToRegister(right));
97 } else {
98 SecondScratchRegisterScope scratch2(masm);
99 masm.ma_cmp(ToRegister(left), Operand(ToAddress(right)), scratch, scratch2);
100 }
101 masm.ma_mov(Imm32(0), ToRegister(def));
102 masm.ma_mov(Imm32(1), ToRegister(def), cond);
103 }
104
visitCompareAndBranch(LCompareAndBranch * comp)105 void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
106 Assembler::Condition cond =
107 JSOpToCondition(comp->cmpMir()->compareType(), comp->jsop());
108 const LAllocation* left = comp->left();
109 const LAllocation* right = comp->right();
110
111 ScratchRegisterScope scratch(masm);
112
113 if (right->isConstant()) {
114 masm.ma_cmp(ToRegister(left), Imm32(ToInt32(right)), scratch);
115 } else if (right->isRegister()) {
116 masm.ma_cmp(ToRegister(left), ToRegister(right));
117 } else {
118 SecondScratchRegisterScope scratch2(masm);
119 masm.ma_cmp(ToRegister(left), Operand(ToAddress(right)), scratch, scratch2);
120 }
121 emitBranch(cond, comp->ifTrue(), comp->ifFalse());
122 }
123
generateOutOfLineCode()124 bool CodeGeneratorARM::generateOutOfLineCode() {
125 if (!CodeGeneratorShared::generateOutOfLineCode()) {
126 return false;
127 }
128
129 if (deoptLabel_.used()) {
130 // All non-table-based bailouts will go here.
131 masm.bind(&deoptLabel_);
132
133 // Push the frame size, so the handler can recover the IonScript.
134 masm.ma_mov(Imm32(frameSize()), lr);
135
136 TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
137 masm.jump(handler);
138 }
139
140 return !masm.oom();
141 }
142
bailoutIf(Assembler::Condition condition,LSnapshot * snapshot)143 void CodeGeneratorARM::bailoutIf(Assembler::Condition condition,
144 LSnapshot* snapshot) {
145 encode(snapshot);
146
147 // Though the assembler doesn't track all frame pushes, at least make sure
148 // the known value makes sense. We can't use bailout tables if the stack
149 // isn't properly aligned to the static frame size.
150 MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None(),
151 frameClass_.frameSize() == masm.framePushed());
152
153 if (assignBailoutId(snapshot)) {
154 uint8_t* bailoutTable = Assembler::BailoutTableStart(deoptTable_->value);
155 uint8_t* code =
156 bailoutTable + snapshot->bailoutId() * BAILOUT_TABLE_ENTRY_SIZE;
157 masm.ma_b(code, condition);
158 return;
159 }
160
161 // We could not use a jump table, either because all bailout IDs were
162 // reserved, or a jump table is not optimal for this frame size or
163 // platform. Whatever, we will generate a lazy bailout.
164 InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
165 OutOfLineBailout* ool =
166 new (alloc()) OutOfLineBailout(snapshot, masm.framePushed());
167
168 // All bailout code is associated with the bytecodeSite of the block we are
169 // bailing out from.
170 addOutOfLineCode(ool,
171 new (alloc()) BytecodeSite(tree, tree->script()->code()));
172
173 masm.ma_b(ool->entry(), condition);
174 }
175
bailoutFrom(Label * label,LSnapshot * snapshot)176 void CodeGeneratorARM::bailoutFrom(Label* label, LSnapshot* snapshot) {
177 MOZ_ASSERT_IF(!masm.oom(), label->used());
178 MOZ_ASSERT_IF(!masm.oom(), !label->bound());
179
180 encode(snapshot);
181
182 // Though the assembler doesn't track all frame pushes, at least make sure
183 // the known value makes sense. We can't use bailout tables if the stack
184 // isn't properly aligned to the static frame size.
185 MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None(),
186 frameClass_.frameSize() == masm.framePushed());
187
188 // On ARM we don't use a bailout table.
189 InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
190 OutOfLineBailout* ool =
191 new (alloc()) OutOfLineBailout(snapshot, masm.framePushed());
192
193 // All bailout code is associated with the bytecodeSite of the block we are
194 // bailing out from.
195 addOutOfLineCode(ool,
196 new (alloc()) BytecodeSite(tree, tree->script()->code()));
197
198 masm.retarget(label, ool->entry());
199 }
200
bailout(LSnapshot * snapshot)201 void CodeGeneratorARM::bailout(LSnapshot* snapshot) {
202 Label label;
203 masm.ma_b(&label);
204 bailoutFrom(&label, snapshot);
205 }
206
visitOutOfLineBailout(OutOfLineBailout * ool)207 void CodeGeneratorARM::visitOutOfLineBailout(OutOfLineBailout* ool) {
208 ScratchRegisterScope scratch(masm);
209 masm.ma_mov(Imm32(ool->snapshot()->snapshotOffset()), scratch);
210 masm.ma_push(scratch); // BailoutStack::padding_
211 masm.ma_push(scratch); // BailoutStack::snapshotOffset_
212 masm.ma_b(&deoptLabel_);
213 }
214
visitMinMaxD(LMinMaxD * ins)215 void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
216 FloatRegister first = ToFloatRegister(ins->first());
217 FloatRegister second = ToFloatRegister(ins->second());
218
219 MOZ_ASSERT(first == ToFloatRegister(ins->output()));
220
221 if (ins->mir()->isMax()) {
222 masm.maxDouble(second, first, true);
223 } else {
224 masm.minDouble(second, first, true);
225 }
226 }
227
visitMinMaxF(LMinMaxF * ins)228 void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
229 FloatRegister first = ToFloatRegister(ins->first());
230 FloatRegister second = ToFloatRegister(ins->second());
231
232 MOZ_ASSERT(first == ToFloatRegister(ins->output()));
233
234 if (ins->mir()->isMax()) {
235 masm.maxFloat32(second, first, true);
236 } else {
237 masm.minFloat32(second, first, true);
238 }
239 }
240
visitAddI(LAddI * ins)241 void CodeGenerator::visitAddI(LAddI* ins) {
242 const LAllocation* lhs = ins->getOperand(0);
243 const LAllocation* rhs = ins->getOperand(1);
244 const LDefinition* dest = ins->getDef(0);
245
246 ScratchRegisterScope scratch(masm);
247
248 if (rhs->isConstant()) {
249 masm.ma_add(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), scratch,
250 SetCC);
251 } else if (rhs->isRegister()) {
252 masm.ma_add(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), SetCC);
253 } else {
254 masm.ma_add(ToRegister(lhs), Operand(ToAddress(rhs)), ToRegister(dest),
255 SetCC);
256 }
257
258 if (ins->snapshot()) {
259 bailoutIf(Assembler::Overflow, ins->snapshot());
260 }
261 }
262
visitAddI64(LAddI64 * lir)263 void CodeGenerator::visitAddI64(LAddI64* lir) {
264 const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
265 const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
266
267 MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
268
269 if (IsConstant(rhs)) {
270 masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
271 return;
272 }
273
274 masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
275 }
276
visitSubI(LSubI * ins)277 void CodeGenerator::visitSubI(LSubI* ins) {
278 const LAllocation* lhs = ins->getOperand(0);
279 const LAllocation* rhs = ins->getOperand(1);
280 const LDefinition* dest = ins->getDef(0);
281
282 ScratchRegisterScope scratch(masm);
283
284 if (rhs->isConstant()) {
285 masm.ma_sub(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), scratch,
286 SetCC);
287 } else if (rhs->isRegister()) {
288 masm.ma_sub(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), SetCC);
289 } else {
290 masm.ma_sub(ToRegister(lhs), Operand(ToAddress(rhs)), ToRegister(dest),
291 SetCC);
292 }
293
294 if (ins->snapshot()) {
295 bailoutIf(Assembler::Overflow, ins->snapshot());
296 }
297 }
298
visitSubI64(LSubI64 * lir)299 void CodeGenerator::visitSubI64(LSubI64* lir) {
300 const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs);
301 const LInt64Allocation rhs = lir->getInt64Operand(LSubI64::Rhs);
302
303 MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
304
305 if (IsConstant(rhs)) {
306 masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
307 return;
308 }
309
310 masm.sub64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
311 }
312
visitMulI(LMulI * ins)313 void CodeGenerator::visitMulI(LMulI* ins) {
314 const LAllocation* lhs = ins->getOperand(0);
315 const LAllocation* rhs = ins->getOperand(1);
316 const LDefinition* dest = ins->getDef(0);
317 MMul* mul = ins->mir();
318 MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
319 !mul->canBeNegativeZero() && !mul->canOverflow());
320
321 if (rhs->isConstant()) {
322 // Bailout when this condition is met.
323 Assembler::Condition c = Assembler::Overflow;
324 // Bailout on -0.0
325 int32_t constant = ToInt32(rhs);
326 if (mul->canBeNegativeZero() && constant <= 0) {
327 Assembler::Condition bailoutCond =
328 (constant == 0) ? Assembler::LessThan : Assembler::Equal;
329 masm.as_cmp(ToRegister(lhs), Imm8(0));
330 bailoutIf(bailoutCond, ins->snapshot());
331 }
332 // TODO: move these to ma_mul.
333 switch (constant) {
334 case -1:
335 masm.as_rsb(ToRegister(dest), ToRegister(lhs), Imm8(0), SetCC);
336 break;
337 case 0:
338 masm.ma_mov(Imm32(0), ToRegister(dest));
339 return; // Escape overflow check;
340 case 1:
341 // Nop
342 masm.ma_mov(ToRegister(lhs), ToRegister(dest));
343 return; // Escape overflow check;
344 case 2:
345 masm.ma_add(ToRegister(lhs), ToRegister(lhs), ToRegister(dest), SetCC);
346 // Overflow is handled later.
347 break;
348 default: {
349 bool handled = false;
350 if (constant > 0) {
351 // Try shift and add sequences for a positive constant.
352 if (!mul->canOverflow()) {
353 // If it cannot overflow, we can do lots of optimizations.
354 Register src = ToRegister(lhs);
355 uint32_t shift = FloorLog2(constant);
356 uint32_t rest = constant - (1 << shift);
357 // See if the constant has one bit set, meaning it can be
358 // encoded as a bitshift.
359 if ((1 << shift) == constant) {
360 masm.ma_lsl(Imm32(shift), src, ToRegister(dest));
361 handled = true;
362 } else {
363 // If the constant cannot be encoded as (1 << C1), see
364 // if it can be encoded as (1 << C1) | (1 << C2), which
365 // can be computed using an add and a shift.
366 uint32_t shift_rest = FloorLog2(rest);
367 if ((1u << shift_rest) == rest) {
368 masm.as_add(ToRegister(dest), src,
369 lsl(src, shift - shift_rest));
370 if (shift_rest != 0) {
371 masm.ma_lsl(Imm32(shift_rest), ToRegister(dest),
372 ToRegister(dest));
373 }
374 handled = true;
375 }
376 }
377 } else if (ToRegister(lhs) != ToRegister(dest)) {
378 // To stay on the safe side, only optimize things that are a
379 // power of 2.
380
381 uint32_t shift = FloorLog2(constant);
382 if ((1 << shift) == constant) {
383 // dest = lhs * pow(2,shift)
384 masm.ma_lsl(Imm32(shift), ToRegister(lhs), ToRegister(dest));
385 // At runtime, check (lhs == dest >> shift), if this
386 // does not hold, some bits were lost due to overflow,
387 // and the computation should be resumed as a double.
388 masm.as_cmp(ToRegister(lhs), asr(ToRegister(dest), shift));
389 c = Assembler::NotEqual;
390 handled = true;
391 }
392 }
393 }
394
395 if (!handled) {
396 ScratchRegisterScope scratch(masm);
397 if (mul->canOverflow()) {
398 c = masm.ma_check_mul(ToRegister(lhs), Imm32(ToInt32(rhs)),
399 ToRegister(dest), scratch, c);
400 } else {
401 masm.ma_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest),
402 scratch);
403 }
404 }
405 }
406 }
407 // Bailout on overflow.
408 if (mul->canOverflow()) {
409 bailoutIf(c, ins->snapshot());
410 }
411 } else {
412 Assembler::Condition c = Assembler::Overflow;
413
414 if (mul->canOverflow()) {
415 ScratchRegisterScope scratch(masm);
416 c = masm.ma_check_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest),
417 scratch, c);
418 } else {
419 masm.ma_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest));
420 }
421
422 // Bailout on overflow.
423 if (mul->canOverflow()) {
424 bailoutIf(c, ins->snapshot());
425 }
426
427 if (mul->canBeNegativeZero()) {
428 Label done;
429 masm.as_cmp(ToRegister(dest), Imm8(0));
430 masm.ma_b(&done, Assembler::NotEqual);
431
432 // Result is -0 if lhs or rhs is negative.
433 masm.ma_cmn(ToRegister(lhs), ToRegister(rhs));
434 bailoutIf(Assembler::Signed, ins->snapshot());
435
436 masm.bind(&done);
437 }
438 }
439 }
440
visitMulI64(LMulI64 * lir)441 void CodeGenerator::visitMulI64(LMulI64* lir) {
442 const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
443 const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
444
445 MOZ_ASSERT(ToRegister64(lhs) == ToOutRegister64(lir));
446
447 if (IsConstant(rhs)) {
448 int64_t constant = ToInt64(rhs);
449 switch (constant) {
450 case -1:
451 masm.neg64(ToRegister64(lhs));
452 return;
453 case 0:
454 masm.xor64(ToRegister64(lhs), ToRegister64(lhs));
455 return;
456 case 1:
457 // nop
458 return;
459 case 2:
460 masm.add64(ToRegister64(lhs), ToRegister64(lhs));
461 return;
462 default:
463 if (constant > 0) {
464 // Use shift if constant is power of 2.
465 int32_t shift = mozilla::FloorLog2(constant);
466 if (int64_t(1) << shift == constant) {
467 masm.lshift64(Imm32(shift), ToRegister64(lhs));
468 return;
469 }
470 }
471 Register temp = ToTempRegisterOrInvalid(lir->temp());
472 masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
473 }
474 } else {
475 Register temp = ToTempRegisterOrInvalid(lir->temp());
476 masm.mul64(ToOperandOrRegister64(rhs), ToRegister64(lhs), temp);
477 }
478 }
479
divICommon(MDiv * mir,Register lhs,Register rhs,Register output,LSnapshot * snapshot,Label & done)480 void CodeGeneratorARM::divICommon(MDiv* mir, Register lhs, Register rhs,
481 Register output, LSnapshot* snapshot,
482 Label& done) {
483 ScratchRegisterScope scratch(masm);
484
485 if (mir->canBeNegativeOverflow()) {
486 // Handle INT32_MIN / -1;
487 // The integer division will give INT32_MIN, but we want -(double)INT32_MIN.
488
489 // Sets EQ if lhs == INT32_MIN.
490 masm.ma_cmp(lhs, Imm32(INT32_MIN), scratch);
491 // If EQ (LHS == INT32_MIN), sets EQ if rhs == -1.
492 masm.ma_cmp(rhs, Imm32(-1), scratch, Assembler::Equal);
493 if (mir->canTruncateOverflow()) {
494 if (mir->trapOnError()) {
495 Label ok;
496 masm.ma_b(&ok, Assembler::NotEqual);
497 masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
498 masm.bind(&ok);
499 } else {
500 // (-INT32_MIN)|0 = INT32_MIN
501 Label skip;
502 masm.ma_b(&skip, Assembler::NotEqual);
503 masm.ma_mov(Imm32(INT32_MIN), output);
504 masm.ma_b(&done);
505 masm.bind(&skip);
506 }
507 } else {
508 MOZ_ASSERT(mir->fallible());
509 bailoutIf(Assembler::Equal, snapshot);
510 }
511 }
512
513 // Handle divide by zero.
514 if (mir->canBeDivideByZero()) {
515 masm.as_cmp(rhs, Imm8(0));
516 if (mir->canTruncateInfinities()) {
517 if (mir->trapOnError()) {
518 Label nonZero;
519 masm.ma_b(&nonZero, Assembler::NotEqual);
520 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
521 masm.bind(&nonZero);
522 } else {
523 // Infinity|0 == 0
524 Label skip;
525 masm.ma_b(&skip, Assembler::NotEqual);
526 masm.ma_mov(Imm32(0), output);
527 masm.ma_b(&done);
528 masm.bind(&skip);
529 }
530 } else {
531 MOZ_ASSERT(mir->fallible());
532 bailoutIf(Assembler::Equal, snapshot);
533 }
534 }
535
536 // Handle negative 0.
537 if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
538 Label nonzero;
539 masm.as_cmp(lhs, Imm8(0));
540 masm.ma_b(&nonzero, Assembler::NotEqual);
541 masm.as_cmp(rhs, Imm8(0));
542 MOZ_ASSERT(mir->fallible());
543 bailoutIf(Assembler::LessThan, snapshot);
544 masm.bind(&nonzero);
545 }
546 }
547
visitDivI(LDivI * ins)548 void CodeGenerator::visitDivI(LDivI* ins) {
549 Register lhs = ToRegister(ins->lhs());
550 Register rhs = ToRegister(ins->rhs());
551 Register temp = ToRegister(ins->getTemp(0));
552 Register output = ToRegister(ins->output());
553 MDiv* mir = ins->mir();
554
555 Label done;
556 divICommon(mir, lhs, rhs, output, ins->snapshot(), done);
557
558 if (mir->canTruncateRemainder()) {
559 masm.ma_sdiv(lhs, rhs, output);
560 } else {
561 {
562 ScratchRegisterScope scratch(masm);
563 masm.ma_sdiv(lhs, rhs, temp);
564 masm.ma_mul(temp, rhs, scratch);
565 masm.ma_cmp(lhs, scratch);
566 }
567 bailoutIf(Assembler::NotEqual, ins->snapshot());
568 masm.ma_mov(temp, output);
569 }
570
571 masm.bind(&done);
572 }
573
574 extern "C" {
575 extern MOZ_EXPORT int64_t __aeabi_idivmod(int, int);
576 extern MOZ_EXPORT int64_t __aeabi_uidivmod(int, int);
577 }
578
visitSoftDivI(LSoftDivI * ins)579 void CodeGenerator::visitSoftDivI(LSoftDivI* ins) {
580 Register lhs = ToRegister(ins->lhs());
581 Register rhs = ToRegister(ins->rhs());
582 Register output = ToRegister(ins->output());
583 MDiv* mir = ins->mir();
584
585 Label done;
586 divICommon(mir, lhs, rhs, output, ins->snapshot(), done);
587
588 if (gen->compilingWasm()) {
589 masm.Push(WasmTlsReg);
590 int32_t framePushedAfterTls = masm.framePushed();
591 masm.setupWasmABICall();
592 masm.passABIArg(lhs);
593 masm.passABIArg(rhs);
594 int32_t tlsOffset = masm.framePushed() - framePushedAfterTls;
595 masm.callWithABI(mir->bytecodeOffset(),
596 wasm::SymbolicAddress::aeabi_idivmod,
597 mozilla::Some(tlsOffset));
598 masm.Pop(WasmTlsReg);
599 } else {
600 using Fn = int64_t (*)(int, int);
601 masm.setupAlignedABICall();
602 masm.passABIArg(lhs);
603 masm.passABIArg(rhs);
604 masm.callWithABI<Fn, __aeabi_idivmod>(
605 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
606 }
607
608 // idivmod returns the quotient in r0, and the remainder in r1.
609 if (!mir->canTruncateRemainder()) {
610 MOZ_ASSERT(mir->fallible());
611 masm.as_cmp(r1, Imm8(0));
612 bailoutIf(Assembler::NonZero, ins->snapshot());
613 }
614
615 masm.bind(&done);
616 }
617
visitDivPowTwoI(LDivPowTwoI * ins)618 void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
619 MDiv* mir = ins->mir();
620 Register lhs = ToRegister(ins->numerator());
621 Register output = ToRegister(ins->output());
622 int32_t shift = ins->shift();
623
624 if (shift == 0) {
625 masm.ma_mov(lhs, output);
626 return;
627 }
628
629 if (!mir->isTruncated()) {
630 // If the remainder is != 0, bailout since this must be a double.
631 {
632 // The bailout code also needs the scratch register.
633 // Here it is only used as a dummy target to set CC flags.
634 ScratchRegisterScope scratch(masm);
635 masm.as_mov(scratch, lsl(lhs, 32 - shift), SetCC);
636 }
637 bailoutIf(Assembler::NonZero, ins->snapshot());
638 }
639
640 if (!mir->canBeNegativeDividend()) {
641 // Numerator is unsigned, so needs no adjusting. Do the shift.
642 masm.as_mov(output, asr(lhs, shift));
643 return;
644 }
645
646 // Adjust the value so that shifting produces a correctly rounded result
647 // when the numerator is negative. See 10-1 "Signed Division by a Known
648 // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
649 ScratchRegisterScope scratch(masm);
650
651 if (shift > 1) {
652 masm.as_mov(scratch, asr(lhs, 31));
653 masm.as_add(scratch, lhs, lsr(scratch, 32 - shift));
654 } else {
655 masm.as_add(scratch, lhs, lsr(lhs, 32 - shift));
656 }
657
658 // Do the shift.
659 masm.as_mov(output, asr(scratch, shift));
660 }
661
modICommon(MMod * mir,Register lhs,Register rhs,Register output,LSnapshot * snapshot,Label & done)662 void CodeGeneratorARM::modICommon(MMod* mir, Register lhs, Register rhs,
663 Register output, LSnapshot* snapshot,
664 Label& done) {
665 // X % 0 is bad because it will give garbage (or abort), when it should give
666 // NaN.
667
668 if (mir->canBeDivideByZero()) {
669 masm.as_cmp(rhs, Imm8(0));
670 if (mir->isTruncated()) {
671 Label nonZero;
672 masm.ma_b(&nonZero, Assembler::NotEqual);
673 if (mir->trapOnError()) {
674 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
675 } else {
676 // NaN|0 == 0
677 masm.ma_mov(Imm32(0), output);
678 masm.ma_b(&done);
679 }
680 masm.bind(&nonZero);
681 } else {
682 MOZ_ASSERT(mir->fallible());
683 bailoutIf(Assembler::Equal, snapshot);
684 }
685 }
686 }
687
visitModI(LModI * ins)688 void CodeGenerator::visitModI(LModI* ins) {
689 Register lhs = ToRegister(ins->lhs());
690 Register rhs = ToRegister(ins->rhs());
691 Register output = ToRegister(ins->output());
692 MMod* mir = ins->mir();
693
694 // Contrary to other architectures (notably x86) INT_MIN % -1 doesn't need to
695 // be handled separately. |ma_smod| computes the remainder using the |SDIV|
696 // and the |MLS| instructions. On overflow, |SDIV| truncates the result to
697 // 32-bit and returns INT_MIN, see ARM Architecture Reference Manual, SDIV
698 // instruction.
699 //
700 // mls(INT_MIN, sdiv(INT_MIN, -1), -1)
701 // = INT_MIN - (sdiv(INT_MIN, -1) * -1)
702 // = INT_MIN - (INT_MIN * -1)
703 // = INT_MIN - INT_MIN
704 // = 0
705 //
706 // And a zero remainder with a negative dividend is already handled below.
707
708 Label done;
709 modICommon(mir, lhs, rhs, output, ins->snapshot(), done);
710
711 {
712 ScratchRegisterScope scratch(masm);
713 masm.ma_smod(lhs, rhs, output, scratch);
714 }
715
716 // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0.
717 if (mir->canBeNegativeDividend()) {
718 if (mir->isTruncated()) {
719 // -0.0|0 == 0
720 } else {
721 MOZ_ASSERT(mir->fallible());
722 // See if X < 0
723 masm.as_cmp(output, Imm8(0));
724 masm.ma_b(&done, Assembler::NotEqual);
725 masm.as_cmp(lhs, Imm8(0));
726 bailoutIf(Assembler::Signed, ins->snapshot());
727 }
728 }
729
730 masm.bind(&done);
731 }
732
visitSoftModI(LSoftModI * ins)733 void CodeGenerator::visitSoftModI(LSoftModI* ins) {
734 // Extract the registers from this instruction.
735 Register lhs = ToRegister(ins->lhs());
736 Register rhs = ToRegister(ins->rhs());
737 Register output = ToRegister(ins->output());
738 Register callTemp = ToRegister(ins->callTemp());
739 MMod* mir = ins->mir();
740 Label done;
741
742 // Save the lhs in case we end up with a 0 that should be a -0.0 because lhs <
743 // 0.
744 MOZ_ASSERT(callTemp != lhs);
745 MOZ_ASSERT(callTemp != rhs);
746 masm.ma_mov(lhs, callTemp);
747
748 // Prevent INT_MIN % -1.
749 //
750 // |aeabi_idivmod| is allowed to return any arbitrary value when called with
751 // |(INT_MIN, -1)|, see "Run-time ABI for the ARM architecture manual". Most
752 // implementations perform a non-trapping signed integer division and
753 // return the expected result, i.e. INT_MIN. But since we can't rely on this
754 // behavior, handle this case separately here.
755 if (mir->canBeNegativeDividend()) {
756 {
757 ScratchRegisterScope scratch(masm);
758 // Sets EQ if lhs == INT_MIN
759 masm.ma_cmp(lhs, Imm32(INT_MIN), scratch);
760 // If EQ (LHS == INT_MIN), sets EQ if rhs == -1
761 masm.ma_cmp(rhs, Imm32(-1), scratch, Assembler::Equal);
762 }
763 if (mir->isTruncated()) {
764 // (INT_MIN % -1)|0 == 0
765 Label skip;
766 masm.ma_b(&skip, Assembler::NotEqual);
767 masm.ma_mov(Imm32(0), output);
768 masm.ma_b(&done);
769 masm.bind(&skip);
770 } else {
771 MOZ_ASSERT(mir->fallible());
772 bailoutIf(Assembler::Equal, ins->snapshot());
773 }
774 }
775
776 modICommon(mir, lhs, rhs, output, ins->snapshot(), done);
777
778 if (gen->compilingWasm()) {
779 masm.Push(WasmTlsReg);
780 int32_t framePushedAfterTls = masm.framePushed();
781 masm.setupWasmABICall();
782 masm.passABIArg(lhs);
783 masm.passABIArg(rhs);
784 int32_t tlsOffset = masm.framePushed() - framePushedAfterTls;
785 masm.callWithABI(mir->bytecodeOffset(),
786 wasm::SymbolicAddress::aeabi_idivmod,
787 mozilla::Some(tlsOffset));
788 masm.Pop(WasmTlsReg);
789 } else {
790 using Fn = int64_t (*)(int, int);
791 masm.setupAlignedABICall();
792 masm.passABIArg(lhs);
793 masm.passABIArg(rhs);
794 masm.callWithABI<Fn, __aeabi_idivmod>(
795 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
796 }
797
798 MOZ_ASSERT(r1 != output);
799 masm.move32(r1, output);
800
801 // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
802 if (mir->canBeNegativeDividend()) {
803 if (mir->isTruncated()) {
804 // -0.0|0 == 0
805 } else {
806 MOZ_ASSERT(mir->fallible());
807 // See if X < 0
808 masm.as_cmp(output, Imm8(0));
809 masm.ma_b(&done, Assembler::NotEqual);
810 masm.as_cmp(callTemp, Imm8(0));
811 bailoutIf(Assembler::Signed, ins->snapshot());
812 }
813 }
814
815 masm.bind(&done);
816 }
817
visitModPowTwoI(LModPowTwoI * ins)818 void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
819 Register in = ToRegister(ins->getOperand(0));
820 Register out = ToRegister(ins->getDef(0));
821 MMod* mir = ins->mir();
822 Label fin;
823 // bug 739870, jbramley has a different sequence that may help with speed
824 // here.
825
826 masm.ma_mov(in, out, SetCC);
827 masm.ma_b(&fin, Assembler::Zero);
828 masm.as_rsb(out, out, Imm8(0), LeaveCC, Assembler::Signed);
829 {
830 ScratchRegisterScope scratch(masm);
831 masm.ma_and(Imm32((1 << ins->shift()) - 1), out, scratch);
832 }
833 masm.as_rsb(out, out, Imm8(0), SetCC, Assembler::Signed);
834 if (mir->canBeNegativeDividend()) {
835 if (!mir->isTruncated()) {
836 MOZ_ASSERT(mir->fallible());
837 bailoutIf(Assembler::Zero, ins->snapshot());
838 } else {
839 // -0|0 == 0
840 }
841 }
842 masm.bind(&fin);
843 }
844
visitModMaskI(LModMaskI * ins)845 void CodeGenerator::visitModMaskI(LModMaskI* ins) {
846 Register src = ToRegister(ins->getOperand(0));
847 Register dest = ToRegister(ins->getDef(0));
848 Register tmp1 = ToRegister(ins->getTemp(0));
849 Register tmp2 = ToRegister(ins->getTemp(1));
850 MMod* mir = ins->mir();
851
852 ScratchRegisterScope scratch(masm);
853 SecondScratchRegisterScope scratch2(masm);
854
855 masm.ma_mod_mask(src, dest, tmp1, tmp2, scratch, scratch2, ins->shift());
856
857 if (mir->canBeNegativeDividend()) {
858 if (!mir->isTruncated()) {
859 MOZ_ASSERT(mir->fallible());
860 bailoutIf(Assembler::Zero, ins->snapshot());
861 } else {
862 // -0|0 == 0
863 }
864 }
865 }
866
emitBigIntDiv(LBigIntDiv * ins,Register dividend,Register divisor,Register output,Label * fail)867 void CodeGeneratorARM::emitBigIntDiv(LBigIntDiv* ins, Register dividend,
868 Register divisor, Register output,
869 Label* fail) {
870 // Callers handle division by zero and integer overflow.
871
872 if (HasIDIV()) {
873 masm.ma_sdiv(dividend, divisor, /* result= */ dividend);
874
875 // Create and return the result.
876 masm.newGCBigInt(output, divisor, fail, bigIntsCanBeInNursery());
877 masm.initializeBigInt(output, dividend);
878
879 return;
880 }
881
882 // idivmod returns the quotient in r0, and the remainder in r1.
883 MOZ_ASSERT(dividend == r0);
884 MOZ_ASSERT(divisor == r1);
885
886 LiveRegisterSet volatileRegs = liveVolatileRegs(ins);
887 volatileRegs.takeUnchecked(dividend);
888 volatileRegs.takeUnchecked(divisor);
889 volatileRegs.takeUnchecked(output);
890
891 masm.PushRegsInMask(volatileRegs);
892
893 using Fn = int64_t (*)(int, int);
894 masm.setupUnalignedABICall(output);
895 masm.passABIArg(dividend);
896 masm.passABIArg(divisor);
897 masm.callWithABI<Fn, __aeabi_idivmod>(MoveOp::GENERAL,
898 CheckUnsafeCallWithABI::DontCheckOther);
899
900 masm.PopRegsInMask(volatileRegs);
901
902 // Create and return the result.
903 masm.newGCBigInt(output, divisor, fail, bigIntsCanBeInNursery());
904 masm.initializeBigInt(output, dividend);
905 }
906
emitBigIntMod(LBigIntMod * ins,Register dividend,Register divisor,Register output,Label * fail)907 void CodeGeneratorARM::emitBigIntMod(LBigIntMod* ins, Register dividend,
908 Register divisor, Register output,
909 Label* fail) {
910 // Callers handle division by zero and integer overflow.
911
912 if (HasIDIV()) {
913 {
914 ScratchRegisterScope scratch(masm);
915 masm.ma_smod(dividend, divisor, /* result= */ dividend, scratch);
916 }
917
918 // Create and return the result.
919 masm.newGCBigInt(output, divisor, fail, bigIntsCanBeInNursery());
920 masm.initializeBigInt(output, dividend);
921
922 return;
923 }
924
925 // idivmod returns the quotient in r0, and the remainder in r1.
926 MOZ_ASSERT(dividend == r0);
927 MOZ_ASSERT(divisor == r1);
928
929 LiveRegisterSet volatileRegs = liveVolatileRegs(ins);
930 volatileRegs.takeUnchecked(dividend);
931 volatileRegs.takeUnchecked(divisor);
932 volatileRegs.takeUnchecked(output);
933
934 masm.PushRegsInMask(volatileRegs);
935
936 using Fn = int64_t (*)(int, int);
937 masm.setupUnalignedABICall(output);
938 masm.passABIArg(dividend);
939 masm.passABIArg(divisor);
940 masm.callWithABI<Fn, __aeabi_idivmod>(MoveOp::GENERAL,
941 CheckUnsafeCallWithABI::DontCheckOther);
942
943 masm.PopRegsInMask(volatileRegs);
944
945 // Create and return the result.
946 masm.newGCBigInt(output, dividend, fail, bigIntsCanBeInNursery());
947 masm.initializeBigInt(output, divisor);
948 }
949
visitBitNotI(LBitNotI * ins)950 void CodeGenerator::visitBitNotI(LBitNotI* ins) {
951 const LAllocation* input = ins->getOperand(0);
952 const LDefinition* dest = ins->getDef(0);
953 // This will not actually be true on arm. We can not an imm8m in order to
954 // get a wider range of numbers
955 MOZ_ASSERT(!input->isConstant());
956
957 masm.ma_mvn(ToRegister(input), ToRegister(dest));
958 }
959
visitBitOpI(LBitOpI * ins)960 void CodeGenerator::visitBitOpI(LBitOpI* ins) {
961 const LAllocation* lhs = ins->getOperand(0);
962 const LAllocation* rhs = ins->getOperand(1);
963 const LDefinition* dest = ins->getDef(0);
964
965 ScratchRegisterScope scratch(masm);
966
967 // All of these bitops should be either imm32's, or integer registers.
968 switch (ins->bitop()) {
969 case JSOp::BitOr:
970 if (rhs->isConstant()) {
971 masm.ma_orr(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest),
972 scratch);
973 } else {
974 masm.ma_orr(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
975 }
976 break;
977 case JSOp::BitXor:
978 if (rhs->isConstant()) {
979 masm.ma_eor(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest),
980 scratch);
981 } else {
982 masm.ma_eor(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
983 }
984 break;
985 case JSOp::BitAnd:
986 if (rhs->isConstant()) {
987 masm.ma_and(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest),
988 scratch);
989 } else {
990 masm.ma_and(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
991 }
992 break;
993 default:
994 MOZ_CRASH("unexpected binary opcode");
995 }
996 }
997
visitShiftI(LShiftI * ins)998 void CodeGenerator::visitShiftI(LShiftI* ins) {
999 Register lhs = ToRegister(ins->lhs());
1000 const LAllocation* rhs = ins->rhs();
1001 Register dest = ToRegister(ins->output());
1002
1003 if (rhs->isConstant()) {
1004 int32_t shift = ToInt32(rhs) & 0x1F;
1005 switch (ins->bitop()) {
1006 case JSOp::Lsh:
1007 if (shift) {
1008 masm.ma_lsl(Imm32(shift), lhs, dest);
1009 } else {
1010 masm.ma_mov(lhs, dest);
1011 }
1012 break;
1013 case JSOp::Rsh:
1014 if (shift) {
1015 masm.ma_asr(Imm32(shift), lhs, dest);
1016 } else {
1017 masm.ma_mov(lhs, dest);
1018 }
1019 break;
1020 case JSOp::Ursh:
1021 if (shift) {
1022 masm.ma_lsr(Imm32(shift), lhs, dest);
1023 } else {
1024 // x >>> 0 can overflow.
1025 masm.ma_mov(lhs, dest);
1026 if (ins->mir()->toUrsh()->fallible()) {
1027 masm.as_cmp(dest, Imm8(0));
1028 bailoutIf(Assembler::LessThan, ins->snapshot());
1029 }
1030 }
1031 break;
1032 default:
1033 MOZ_CRASH("Unexpected shift op");
1034 }
1035 } else {
1036 // The shift amounts should be AND'ed into the 0-31 range since arm
1037 // shifts by the lower byte of the register (it will attempt to shift by
1038 // 250 if you ask it to).
1039 masm.as_and(dest, ToRegister(rhs), Imm8(0x1F));
1040
1041 switch (ins->bitop()) {
1042 case JSOp::Lsh:
1043 masm.ma_lsl(dest, lhs, dest);
1044 break;
1045 case JSOp::Rsh:
1046 masm.ma_asr(dest, lhs, dest);
1047 break;
1048 case JSOp::Ursh:
1049 masm.ma_lsr(dest, lhs, dest);
1050 if (ins->mir()->toUrsh()->fallible()) {
1051 // x >>> 0 can overflow.
1052 masm.as_cmp(dest, Imm8(0));
1053 bailoutIf(Assembler::LessThan, ins->snapshot());
1054 }
1055 break;
1056 default:
1057 MOZ_CRASH("Unexpected shift op");
1058 }
1059 }
1060 }
1061
visitUrshD(LUrshD * ins)1062 void CodeGenerator::visitUrshD(LUrshD* ins) {
1063 Register lhs = ToRegister(ins->lhs());
1064 Register temp = ToRegister(ins->temp());
1065
1066 const LAllocation* rhs = ins->rhs();
1067 FloatRegister out = ToFloatRegister(ins->output());
1068
1069 if (rhs->isConstant()) {
1070 int32_t shift = ToInt32(rhs) & 0x1F;
1071 if (shift) {
1072 masm.ma_lsr(Imm32(shift), lhs, temp);
1073 } else {
1074 masm.ma_mov(lhs, temp);
1075 }
1076 } else {
1077 masm.as_and(temp, ToRegister(rhs), Imm8(0x1F));
1078 masm.ma_lsr(temp, lhs, temp);
1079 }
1080
1081 masm.convertUInt32ToDouble(temp, out);
1082 }
1083
visitClzI(LClzI * ins)1084 void CodeGenerator::visitClzI(LClzI* ins) {
1085 Register input = ToRegister(ins->input());
1086 Register output = ToRegister(ins->output());
1087
1088 masm.clz32(input, output, /* knownNotZero = */ false);
1089 }
1090
visitCtzI(LCtzI * ins)1091 void CodeGenerator::visitCtzI(LCtzI* ins) {
1092 Register input = ToRegister(ins->input());
1093 Register output = ToRegister(ins->output());
1094
1095 masm.ctz32(input, output, /* knownNotZero = */ false);
1096 }
1097
visitPopcntI(LPopcntI * ins)1098 void CodeGenerator::visitPopcntI(LPopcntI* ins) {
1099 Register input = ToRegister(ins->input());
1100 Register output = ToRegister(ins->output());
1101
1102 Register tmp = ToRegister(ins->temp());
1103
1104 masm.popcnt32(input, output, tmp);
1105 }
1106
visitPowHalfD(LPowHalfD * ins)1107 void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
1108 FloatRegister input = ToFloatRegister(ins->input());
1109 FloatRegister output = ToFloatRegister(ins->output());
1110 ScratchDoubleScope scratch(masm);
1111
1112 Label done;
1113
1114 // Masm.pow(-Infinity, 0.5) == Infinity.
1115 masm.loadConstantDouble(NegativeInfinity<double>(), scratch);
1116 masm.compareDouble(input, scratch);
1117 masm.ma_vneg(scratch, output, Assembler::Equal);
1118 masm.ma_b(&done, Assembler::Equal);
1119
1120 // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
1121 // Adding 0 converts any -0 to 0.
1122 masm.loadConstantDouble(0.0, scratch);
1123 masm.ma_vadd(scratch, input, output);
1124 masm.ma_vsqrt(output, output);
1125
1126 masm.bind(&done);
1127 }
1128
toMoveOperand(LAllocation a) const1129 MoveOperand CodeGeneratorARM::toMoveOperand(LAllocation a) const {
1130 if (a.isGeneralReg()) {
1131 return MoveOperand(ToRegister(a));
1132 }
1133 if (a.isFloatReg()) {
1134 return MoveOperand(ToFloatRegister(a));
1135 }
1136 MoveOperand::Kind kind =
1137 a.isStackArea() ? MoveOperand::EFFECTIVE_ADDRESS : MoveOperand::MEMORY;
1138 Address addr = ToAddress(a);
1139 MOZ_ASSERT((addr.offset & 3) == 0);
1140 return MoveOperand(addr, kind);
1141 }
1142
1143 class js::jit::OutOfLineTableSwitch
1144 : public OutOfLineCodeBase<CodeGeneratorARM> {
1145 MTableSwitch* mir_;
1146 Vector<CodeLabel, 8, JitAllocPolicy> codeLabels_;
1147
accept(CodeGeneratorARM * codegen)1148 void accept(CodeGeneratorARM* codegen) override {
1149 codegen->visitOutOfLineTableSwitch(this);
1150 }
1151
1152 public:
OutOfLineTableSwitch(TempAllocator & alloc,MTableSwitch * mir)1153 OutOfLineTableSwitch(TempAllocator& alloc, MTableSwitch* mir)
1154 : mir_(mir), codeLabels_(alloc) {}
1155
mir() const1156 MTableSwitch* mir() const { return mir_; }
1157
addCodeLabel(CodeLabel label)1158 bool addCodeLabel(CodeLabel label) { return codeLabels_.append(label); }
codeLabel(unsigned i)1159 CodeLabel codeLabel(unsigned i) { return codeLabels_[i]; }
1160 };
1161
visitOutOfLineTableSwitch(OutOfLineTableSwitch * ool)1162 void CodeGeneratorARM::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool) {
1163 MTableSwitch* mir = ool->mir();
1164
1165 size_t numCases = mir->numCases();
1166 for (size_t i = 0; i < numCases; i++) {
1167 LBlock* caseblock =
1168 skipTrivialBlocks(mir->getCase(numCases - 1 - i))->lir();
1169 Label* caseheader = caseblock->label();
1170 uint32_t caseoffset = caseheader->offset();
1171
1172 // The entries of the jump table need to be absolute addresses and thus
1173 // must be patched after codegen is finished.
1174 CodeLabel cl = ool->codeLabel(i);
1175 cl.target()->bind(caseoffset);
1176 masm.addCodeLabel(cl);
1177 }
1178 }
1179
emitTableSwitchDispatch(MTableSwitch * mir,Register index,Register base)1180 void CodeGeneratorARM::emitTableSwitchDispatch(MTableSwitch* mir,
1181 Register index, Register base) {
1182 // The code generated by this is utter hax.
1183 // The end result looks something like:
1184 // SUBS index, input, #base
1185 // RSBSPL index, index, #max
1186 // LDRPL pc, pc, index lsl 2
1187 // B default
1188
1189 // If the range of targets in N through M, we first subtract off the lowest
1190 // case (N), which both shifts the arguments into the range 0 to (M - N)
1191 // with and sets the MInus flag if the argument was out of range on the low
1192 // end.
1193
1194 // Then we a reverse subtract with the size of the jump table, which will
1195 // reverse the order of range (It is size through 0, rather than 0 through
1196 // size). The main purpose of this is that we set the same flag as the lower
1197 // bound check for the upper bound check. Lastly, we do this conditionally
1198 // on the previous check succeeding.
1199
1200 // Then we conditionally load the pc offset by the (reversed) index (times
1201 // the address size) into the pc, which branches to the correct case. NOTE:
1202 // when we go to read the pc, the value that we get back is the pc of the
1203 // current instruction *PLUS 8*. This means that ldr foo, [pc, +0] reads
1204 // $pc+8. In other words, there is an empty word after the branch into the
1205 // switch table before the table actually starts. Since the only other
1206 // unhandled case is the default case (both out of range high and out of
1207 // range low) I then insert a branch to default case into the extra slot,
1208 // which ensures we don't attempt to execute the address table.
1209 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
1210
1211 ScratchRegisterScope scratch(masm);
1212
1213 int32_t cases = mir->numCases();
1214 // Lower value with low value.
1215 masm.ma_sub(index, Imm32(mir->low()), index, scratch, SetCC);
1216 masm.ma_rsb(index, Imm32(cases - 1), index, scratch, SetCC,
1217 Assembler::NotSigned);
1218 // Inhibit pools within the following sequence because we are indexing into
1219 // a pc relative table. The region will have one instruction for ma_ldr, one
1220 // for ma_b, and each table case takes one word.
1221 AutoForbidPoolsAndNops afp(&masm, 1 + 1 + cases);
1222 masm.ma_ldr(DTRAddr(pc, DtrRegImmShift(index, LSL, 2)), pc, Offset,
1223 Assembler::NotSigned);
1224 masm.ma_b(defaultcase);
1225
1226 // To fill in the CodeLabels for the case entries, we need to first generate
1227 // the case entries (we don't yet know their offsets in the instruction
1228 // stream).
1229 OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(alloc(), mir);
1230 for (int32_t i = 0; i < cases; i++) {
1231 CodeLabel cl;
1232 masm.writeCodePointer(&cl);
1233 masm.propagateOOM(ool->addCodeLabel(cl));
1234 }
1235 addOutOfLineCode(ool, mir);
1236 }
1237
visitMathD(LMathD * math)1238 void CodeGenerator::visitMathD(LMathD* math) {
1239 FloatRegister src1 = ToFloatRegister(math->getOperand(0));
1240 FloatRegister src2 = ToFloatRegister(math->getOperand(1));
1241 FloatRegister output = ToFloatRegister(math->getDef(0));
1242
1243 switch (math->jsop()) {
1244 case JSOp::Add:
1245 masm.ma_vadd(src1, src2, output);
1246 break;
1247 case JSOp::Sub:
1248 masm.ma_vsub(src1, src2, output);
1249 break;
1250 case JSOp::Mul:
1251 masm.ma_vmul(src1, src2, output);
1252 break;
1253 case JSOp::Div:
1254 masm.ma_vdiv(src1, src2, output);
1255 break;
1256 default:
1257 MOZ_CRASH("unexpected opcode");
1258 }
1259 }
1260
visitMathF(LMathF * math)1261 void CodeGenerator::visitMathF(LMathF* math) {
1262 FloatRegister src1 = ToFloatRegister(math->getOperand(0));
1263 FloatRegister src2 = ToFloatRegister(math->getOperand(1));
1264 FloatRegister output = ToFloatRegister(math->getDef(0));
1265
1266 switch (math->jsop()) {
1267 case JSOp::Add:
1268 masm.ma_vadd_f32(src1, src2, output);
1269 break;
1270 case JSOp::Sub:
1271 masm.ma_vsub_f32(src1, src2, output);
1272 break;
1273 case JSOp::Mul:
1274 masm.ma_vmul_f32(src1, src2, output);
1275 break;
1276 case JSOp::Div:
1277 masm.ma_vdiv_f32(src1, src2, output);
1278 break;
1279 default:
1280 MOZ_CRASH("unexpected opcode");
1281 }
1282 }
1283
visitTruncateDToInt32(LTruncateDToInt32 * ins)1284 void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
1285 emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
1286 ins->mir());
1287 }
1288
visitWasmBuiltinTruncateDToInt32(LWasmBuiltinTruncateDToInt32 * ins)1289 void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
1290 LWasmBuiltinTruncateDToInt32* ins) {
1291 emitTruncateDouble(ToFloatRegister(ins->getOperand(0)),
1292 ToRegister(ins->getDef(0)), ins->mir());
1293 }
1294
visitTruncateFToInt32(LTruncateFToInt32 * ins)1295 void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
1296 emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
1297 ins->mir());
1298 }
1299
visitWasmBuiltinTruncateFToInt32(LWasmBuiltinTruncateFToInt32 * ins)1300 void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
1301 LWasmBuiltinTruncateFToInt32* ins) {
1302 emitTruncateFloat32(ToFloatRegister(ins->getOperand(0)),
1303 ToRegister(ins->getDef(0)), ins->mir());
1304 }
1305
1306 static const uint32_t FrameSizes[] = {128, 256, 512, 1024};
1307
FromDepth(uint32_t frameDepth)1308 FrameSizeClass FrameSizeClass::FromDepth(uint32_t frameDepth) {
1309 for (uint32_t i = 0; i < std::size(FrameSizes); i++) {
1310 if (frameDepth < FrameSizes[i]) {
1311 return FrameSizeClass(i);
1312 }
1313 }
1314
1315 return FrameSizeClass::None();
1316 }
1317
ClassLimit()1318 FrameSizeClass FrameSizeClass::ClassLimit() {
1319 return FrameSizeClass(std::size(FrameSizes));
1320 }
1321
frameSize() const1322 uint32_t FrameSizeClass::frameSize() const {
1323 MOZ_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID);
1324 MOZ_ASSERT(class_ < std::size(FrameSizes));
1325
1326 return FrameSizes[class_];
1327 }
1328
ToValue(LInstruction * ins,size_t pos)1329 ValueOperand CodeGeneratorARM::ToValue(LInstruction* ins, size_t pos) {
1330 Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
1331 Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
1332 return ValueOperand(typeReg, payloadReg);
1333 }
1334
ToTempValue(LInstruction * ins,size_t pos)1335 ValueOperand CodeGeneratorARM::ToTempValue(LInstruction* ins, size_t pos) {
1336 Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
1337 Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
1338 return ValueOperand(typeReg, payloadReg);
1339 }
1340
visitValue(LValue * value)1341 void CodeGenerator::visitValue(LValue* value) {
1342 const ValueOperand out = ToOutValue(value);
1343
1344 masm.moveValue(value->value(), out);
1345 }
1346
visitBox(LBox * box)1347 void CodeGenerator::visitBox(LBox* box) {
1348 const LDefinition* type = box->getDef(TYPE_INDEX);
1349
1350 MOZ_ASSERT(!box->getOperand(0)->isConstant());
1351
1352 // On arm, the input operand and the output payload have the same virtual
1353 // register. All that needs to be written is the type tag for the type
1354 // definition.
1355 masm.ma_mov(Imm32(MIRTypeToTag(box->type())), ToRegister(type));
1356 }
1357
visitBoxFloatingPoint(LBoxFloatingPoint * box)1358 void CodeGenerator::visitBoxFloatingPoint(LBoxFloatingPoint* box) {
1359 const AnyRegister in = ToAnyRegister(box->getOperand(0));
1360 const ValueOperand out = ToOutValue(box);
1361
1362 masm.moveValue(TypedOrValueRegister(box->type(), in), out);
1363 }
1364
visitUnbox(LUnbox * unbox)1365 void CodeGenerator::visitUnbox(LUnbox* unbox) {
1366 // Note that for unbox, the type and payload indexes are switched on the
1367 // inputs.
1368 MUnbox* mir = unbox->mir();
1369 Register type = ToRegister(unbox->type());
1370 Register payload = ToRegister(unbox->payload());
1371 Register output = ToRegister(unbox->output());
1372
1373 mozilla::Maybe<ScratchRegisterScope> scratch;
1374 scratch.emplace(masm);
1375
1376 JSValueTag tag = MIRTypeToTag(mir->type());
1377 if (mir->fallible()) {
1378 masm.ma_cmp(type, Imm32(tag), *scratch);
1379 bailoutIf(Assembler::NotEqual, unbox->snapshot());
1380 } else {
1381 #ifdef DEBUG
1382 Label ok;
1383 masm.ma_cmp(type, Imm32(tag), *scratch);
1384 masm.ma_b(&ok, Assembler::Equal);
1385 scratch.reset();
1386 masm.assumeUnreachable("Infallible unbox type mismatch");
1387 masm.bind(&ok);
1388 #endif
1389 }
1390
1391 // Note: If spectreValueMasking is disabled, then this instruction will
1392 // default to a no-op as long as the lowering allocate the same register for
1393 // the output and the payload.
1394 masm.unboxNonDouble(ValueOperand(type, payload), output,
1395 ValueTypeFromMIRType(mir->type()));
1396 }
1397
visitDouble(LDouble * ins)1398 void CodeGenerator::visitDouble(LDouble* ins) {
1399 const LDefinition* out = ins->getDef(0);
1400 masm.loadConstantDouble(ins->getDouble(), ToFloatRegister(out));
1401 }
1402
visitFloat32(LFloat32 * ins)1403 void CodeGenerator::visitFloat32(LFloat32* ins) {
1404 const LDefinition* out = ins->getDef(0);
1405 masm.loadConstantFloat32(ins->getFloat(), ToFloatRegister(out));
1406 }
1407
splitTagForTest(const ValueOperand & value,ScratchTagScope & tag)1408 void CodeGeneratorARM::splitTagForTest(const ValueOperand& value,
1409 ScratchTagScope& tag) {
1410 MOZ_ASSERT(value.typeReg() == tag);
1411 }
1412
visitTestDAndBranch(LTestDAndBranch * test)1413 void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
1414 const LAllocation* opd = test->input();
1415 masm.ma_vcmpz(ToFloatRegister(opd));
1416 masm.as_vmrs(pc);
1417
1418 MBasicBlock* ifTrue = test->ifTrue();
1419 MBasicBlock* ifFalse = test->ifFalse();
1420 // If the compare set the 0 bit, then the result is definitely false.
1421 jumpToBlock(ifFalse, Assembler::Zero);
1422 // It is also false if one of the operands is NAN, which is shown as
1423 // Overflow.
1424 jumpToBlock(ifFalse, Assembler::Overflow);
1425 jumpToBlock(ifTrue);
1426 }
1427
visitTestFAndBranch(LTestFAndBranch * test)1428 void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
1429 const LAllocation* opd = test->input();
1430 masm.ma_vcmpz_f32(ToFloatRegister(opd));
1431 masm.as_vmrs(pc);
1432
1433 MBasicBlock* ifTrue = test->ifTrue();
1434 MBasicBlock* ifFalse = test->ifFalse();
1435 // If the compare set the 0 bit, then the result is definitely false.
1436 jumpToBlock(ifFalse, Assembler::Zero);
1437 // It is also false if one of the operands is NAN, which is shown as
1438 // Overflow.
1439 jumpToBlock(ifFalse, Assembler::Overflow);
1440 jumpToBlock(ifTrue);
1441 }
1442
visitCompareD(LCompareD * comp)1443 void CodeGenerator::visitCompareD(LCompareD* comp) {
1444 FloatRegister lhs = ToFloatRegister(comp->left());
1445 FloatRegister rhs = ToFloatRegister(comp->right());
1446
1447 Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
1448 masm.compareDouble(lhs, rhs);
1449 masm.emitSet(Assembler::ConditionFromDoubleCondition(cond),
1450 ToRegister(comp->output()));
1451 }
1452
visitCompareF(LCompareF * comp)1453 void CodeGenerator::visitCompareF(LCompareF* comp) {
1454 FloatRegister lhs = ToFloatRegister(comp->left());
1455 FloatRegister rhs = ToFloatRegister(comp->right());
1456
1457 Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
1458 masm.compareFloat(lhs, rhs);
1459 masm.emitSet(Assembler::ConditionFromDoubleCondition(cond),
1460 ToRegister(comp->output()));
1461 }
1462
visitCompareDAndBranch(LCompareDAndBranch * comp)1463 void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
1464 FloatRegister lhs = ToFloatRegister(comp->left());
1465 FloatRegister rhs = ToFloatRegister(comp->right());
1466
1467 Assembler::DoubleCondition cond =
1468 JSOpToDoubleCondition(comp->cmpMir()->jsop());
1469 masm.compareDouble(lhs, rhs);
1470 emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(),
1471 comp->ifFalse());
1472 }
1473
visitCompareFAndBranch(LCompareFAndBranch * comp)1474 void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
1475 FloatRegister lhs = ToFloatRegister(comp->left());
1476 FloatRegister rhs = ToFloatRegister(comp->right());
1477
1478 Assembler::DoubleCondition cond =
1479 JSOpToDoubleCondition(comp->cmpMir()->jsop());
1480 masm.compareFloat(lhs, rhs);
1481 emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(),
1482 comp->ifFalse());
1483 }
1484
visitBitAndAndBranch(LBitAndAndBranch * baab)1485 void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* baab) {
1486 ScratchRegisterScope scratch(masm);
1487 if (baab->right()->isConstant()) {
1488 masm.ma_tst(ToRegister(baab->left()), Imm32(ToInt32(baab->right())),
1489 scratch);
1490 } else {
1491 masm.ma_tst(ToRegister(baab->left()), ToRegister(baab->right()));
1492 }
1493 emitBranch(baab->cond(), baab->ifTrue(), baab->ifFalse());
1494 }
1495
1496 // See ../CodeGenerator.cpp for more information.
visitWasmRegisterResult(LWasmRegisterResult * lir)1497 void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {}
1498
visitWasmUint32ToDouble(LWasmUint32ToDouble * lir)1499 void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
1500 masm.convertUInt32ToDouble(ToRegister(lir->input()),
1501 ToFloatRegister(lir->output()));
1502 }
1503
visitWasmUint32ToFloat32(LWasmUint32ToFloat32 * lir)1504 void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
1505 masm.convertUInt32ToFloat32(ToRegister(lir->input()),
1506 ToFloatRegister(lir->output()));
1507 }
1508
visitNotI(LNotI * ins)1509 void CodeGenerator::visitNotI(LNotI* ins) {
1510 // It is hard to optimize !x, so just do it the basic way for now.
1511 masm.as_cmp(ToRegister(ins->input()), Imm8(0));
1512 masm.emitSet(Assembler::Equal, ToRegister(ins->output()));
1513 }
1514
visitNotI64(LNotI64 * lir)1515 void CodeGenerator::visitNotI64(LNotI64* lir) {
1516 Register64 input = ToRegister64(lir->getInt64Operand(0));
1517 Register output = ToRegister(lir->output());
1518
1519 masm.ma_orr(input.low, input.high, output);
1520 masm.as_cmp(output, Imm8(0));
1521 masm.emitSet(Assembler::Equal, output);
1522 }
1523
visitNotD(LNotD * ins)1524 void CodeGenerator::visitNotD(LNotD* ins) {
1525 // Since this operation is not, we want to set a bit if the double is
1526 // falsey, which means 0.0, -0.0 or NaN. When comparing with 0, an input of
1527 // 0 will set the Z bit (30) and NaN will set the V bit (28) of the APSR.
1528 FloatRegister opd = ToFloatRegister(ins->input());
1529 Register dest = ToRegister(ins->output());
1530
1531 // Do the compare.
1532 masm.ma_vcmpz(opd);
1533 // TODO There are three variations here to compare performance-wise.
1534 bool nocond = true;
1535 if (nocond) {
1536 // Load the value into the dest register.
1537 masm.as_vmrs(dest);
1538 masm.ma_lsr(Imm32(28), dest, dest);
1539 // 28 + 2 = 30
1540 masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
1541 masm.as_and(dest, dest, Imm8(1));
1542 } else {
1543 masm.as_vmrs(pc);
1544 masm.ma_mov(Imm32(0), dest);
1545 masm.ma_mov(Imm32(1), dest, Assembler::Equal);
1546 masm.ma_mov(Imm32(1), dest, Assembler::Overflow);
1547 }
1548 }
1549
visitNotF(LNotF * ins)1550 void CodeGenerator::visitNotF(LNotF* ins) {
1551 // Since this operation is not, we want to set a bit if the double is
1552 // falsey, which means 0.0, -0.0 or NaN. When comparing with 0, an input of
1553 // 0 will set the Z bit (30) and NaN will set the V bit (28) of the APSR.
1554 FloatRegister opd = ToFloatRegister(ins->input());
1555 Register dest = ToRegister(ins->output());
1556
1557 // Do the compare.
1558 masm.ma_vcmpz_f32(opd);
1559 // TODO There are three variations here to compare performance-wise.
1560 bool nocond = true;
1561 if (nocond) {
1562 // Load the value into the dest register.
1563 masm.as_vmrs(dest);
1564 masm.ma_lsr(Imm32(28), dest, dest);
1565 // 28 + 2 = 30
1566 masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
1567 masm.as_and(dest, dest, Imm8(1));
1568 } else {
1569 masm.as_vmrs(pc);
1570 masm.ma_mov(Imm32(0), dest);
1571 masm.ma_mov(Imm32(1), dest, Assembler::Equal);
1572 masm.ma_mov(Imm32(1), dest, Assembler::Overflow);
1573 }
1574 }
1575
generateInvalidateEpilogue()1576 void CodeGeneratorARM::generateInvalidateEpilogue() {
1577 // Ensure that there is enough space in the buffer for the OsiPoint patching
1578 // to occur. Otherwise, we could overwrite the invalidation epilogue.
1579 for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
1580 masm.nop();
1581 }
1582
1583 masm.bind(&invalidate_);
1584
1585 // Push the return address of the point that we bailed out at onto the stack.
1586 masm.Push(lr);
1587
1588 // Push the Ion script onto the stack (when we determine what that pointer
1589 // is).
1590 invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
1591
1592 // Jump to the invalidator which will replace the current frame.
1593 TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
1594 masm.jump(thunk);
1595 }
1596
visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement * lir)1597 void CodeGenerator::visitCompareExchangeTypedArrayElement(
1598 LCompareExchangeTypedArrayElement* lir) {
1599 Register elements = ToRegister(lir->elements());
1600 AnyRegister output = ToAnyRegister(lir->output());
1601 Register temp =
1602 lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
1603
1604 Register oldval = ToRegister(lir->oldval());
1605 Register newval = ToRegister(lir->newval());
1606
1607 Scalar::Type arrayType = lir->mir()->arrayType();
1608
1609 if (lir->index()->isConstant()) {
1610 Address dest = ToAddress(elements, lir->index(), arrayType);
1611 masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
1612 newval, temp, output);
1613 } else {
1614 BaseIndex dest(elements, ToRegister(lir->index()),
1615 ScaleFromScalarType(arrayType));
1616 masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
1617 newval, temp, output);
1618 }
1619 }
1620
visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement * lir)1621 void CodeGenerator::visitAtomicExchangeTypedArrayElement(
1622 LAtomicExchangeTypedArrayElement* lir) {
1623 Register elements = ToRegister(lir->elements());
1624 AnyRegister output = ToAnyRegister(lir->output());
1625 Register temp =
1626 lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
1627
1628 Register value = ToRegister(lir->value());
1629
1630 Scalar::Type arrayType = lir->mir()->arrayType();
1631
1632 if (lir->index()->isConstant()) {
1633 Address dest = ToAddress(elements, lir->index(), arrayType);
1634 masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp,
1635 output);
1636 } else {
1637 BaseIndex dest(elements, ToRegister(lir->index()),
1638 ScaleFromScalarType(arrayType));
1639 masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp,
1640 output);
1641 }
1642 }
1643
visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop * lir)1644 void CodeGenerator::visitAtomicTypedArrayElementBinop(
1645 LAtomicTypedArrayElementBinop* lir) {
1646 MOZ_ASSERT(!lir->mir()->isForEffect());
1647
1648 AnyRegister output = ToAnyRegister(lir->output());
1649 Register elements = ToRegister(lir->elements());
1650 Register flagTemp = ToRegister(lir->temp1());
1651 Register outTemp =
1652 lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
1653 Register value = ToRegister(lir->value());
1654
1655 Scalar::Type arrayType = lir->mir()->arrayType();
1656
1657 if (lir->index()->isConstant()) {
1658 Address mem = ToAddress(elements, lir->index(), arrayType);
1659 masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
1660 lir->mir()->operation(), value, mem, flagTemp, outTemp,
1661 output);
1662 } else {
1663 BaseIndex mem(elements, ToRegister(lir->index()),
1664 ScaleFromScalarType(arrayType));
1665 masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
1666 lir->mir()->operation(), value, mem, flagTemp, outTemp,
1667 output);
1668 }
1669 }
1670
visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect * lir)1671 void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
1672 LAtomicTypedArrayElementBinopForEffect* lir) {
1673 MOZ_ASSERT(lir->mir()->isForEffect());
1674
1675 Register elements = ToRegister(lir->elements());
1676 Register flagTemp = ToRegister(lir->flagTemp());
1677 Register value = ToRegister(lir->value());
1678 Scalar::Type arrayType = lir->mir()->arrayType();
1679
1680 if (lir->index()->isConstant()) {
1681 Address mem = ToAddress(elements, lir->index(), arrayType);
1682 masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
1683 lir->mir()->operation(), value, mem, flagTemp);
1684 } else {
1685 BaseIndex mem(elements, ToRegister(lir->index()),
1686 ScaleFromScalarType(arrayType));
1687 masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
1688 lir->mir()->operation(), value, mem, flagTemp);
1689 }
1690 }
1691
visitAtomicLoad64(LAtomicLoad64 * lir)1692 void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
1693 Register elements = ToRegister(lir->elements());
1694 Register temp = ToRegister(lir->temp());
1695 Register64 temp64 = ToRegister64(lir->temp64());
1696 Register out = ToRegister(lir->output());
1697
1698 const MLoadUnboxedScalar* mir = lir->mir();
1699
1700 Scalar::Type storageType = mir->storageType();
1701
1702 if (lir->index()->isConstant()) {
1703 Address source =
1704 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
1705 masm.atomicLoad64(Synchronization::Load(), source, temp64);
1706 } else {
1707 BaseIndex source(elements, ToRegister(lir->index()),
1708 ScaleFromScalarType(storageType), mir->offsetAdjustment());
1709 masm.atomicLoad64(Synchronization::Load(), source, temp64);
1710 }
1711
1712 emitCreateBigInt(lir, storageType, temp64, out, temp);
1713 }
1714
visitAtomicStore64(LAtomicStore64 * lir)1715 void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) {
1716 Register elements = ToRegister(lir->elements());
1717 Register value = ToRegister(lir->value());
1718 Register64 temp1 = ToRegister64(lir->temp1());
1719 Register64 temp2 = ToRegister64(lir->temp2());
1720
1721 Scalar::Type writeType = lir->mir()->writeType();
1722
1723 masm.loadBigInt64(value, temp1);
1724
1725 if (lir->index()->isConstant()) {
1726 Address dest = ToAddress(elements, lir->index(), writeType);
1727 masm.atomicStore64(Synchronization::Store(), dest, temp1, temp2);
1728 } else {
1729 BaseIndex dest(elements, ToRegister(lir->index()),
1730 ScaleFromScalarType(writeType));
1731 masm.atomicStore64(Synchronization::Store(), dest, temp1, temp2);
1732 }
1733 }
1734
visitCompareExchangeTypedArrayElement64(LCompareExchangeTypedArrayElement64 * lir)1735 void CodeGenerator::visitCompareExchangeTypedArrayElement64(
1736 LCompareExchangeTypedArrayElement64* lir) {
1737 Register elements = ToRegister(lir->elements());
1738 Register oldval = ToRegister(lir->oldval());
1739 Register newval = ToRegister(lir->newval());
1740 Register64 temp1 = ToRegister64(lir->temp1());
1741 Register64 temp2 = ToRegister64(lir->temp2());
1742 Register64 temp3 = ToRegister64(lir->temp3());
1743 Register out = ToRegister(lir->output());
1744
1745 Scalar::Type arrayType = lir->mir()->arrayType();
1746
1747 masm.loadBigInt64(oldval, temp1);
1748 masm.loadBigInt64(newval, temp2);
1749
1750 if (lir->index()->isConstant()) {
1751 Address dest = ToAddress(elements, lir->index(), arrayType);
1752 masm.compareExchange64(Synchronization::Full(), dest, temp1, temp2, temp3);
1753 } else {
1754 BaseIndex dest(elements, ToRegister(lir->index()),
1755 ScaleFromScalarType(arrayType));
1756 masm.compareExchange64(Synchronization::Full(), dest, temp1, temp2, temp3);
1757 }
1758
1759 emitCreateBigInt(lir, arrayType, temp3, out, temp1.scratchReg());
1760 }
1761
visitAtomicExchangeTypedArrayElement64(LAtomicExchangeTypedArrayElement64 * lir)1762 void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
1763 LAtomicExchangeTypedArrayElement64* lir) {
1764 Register elements = ToRegister(lir->elements());
1765 Register value = ToRegister(lir->value());
1766 Register64 temp1 = ToRegister64(lir->temp1());
1767 Register temp2 = ToRegister(lir->temp2());
1768 Register out = ToRegister(lir->output());
1769 Register64 temp64 = Register64(temp2, out);
1770
1771 Scalar::Type arrayType = lir->mir()->arrayType();
1772
1773 masm.loadBigInt64(value, temp64);
1774
1775 if (lir->index()->isConstant()) {
1776 Address dest = ToAddress(elements, lir->index(), arrayType);
1777 masm.atomicExchange64(Synchronization::Full(), dest, temp64, temp1);
1778 } else {
1779 BaseIndex dest(elements, ToRegister(lir->index()),
1780 ScaleFromScalarType(arrayType));
1781 masm.atomicExchange64(Synchronization::Full(), dest, temp64, temp1);
1782 }
1783
1784 emitCreateBigInt(lir, arrayType, temp1, out, temp2);
1785 }
1786
visitAtomicTypedArrayElementBinop64(LAtomicTypedArrayElementBinop64 * lir)1787 void CodeGenerator::visitAtomicTypedArrayElementBinop64(
1788 LAtomicTypedArrayElementBinop64* lir) {
1789 MOZ_ASSERT(!lir->mir()->isForEffect());
1790
1791 Register elements = ToRegister(lir->elements());
1792 Register value = ToRegister(lir->value());
1793 Register64 temp1 = ToRegister64(lir->temp1());
1794 Register64 temp2 = ToRegister64(lir->temp2());
1795 Register64 temp3 = ToRegister64(lir->temp3());
1796 Register out = ToRegister(lir->output());
1797
1798 Scalar::Type arrayType = lir->mir()->arrayType();
1799 AtomicOp atomicOp = lir->mir()->operation();
1800
1801 masm.loadBigInt64(value, temp1);
1802
1803 if (lir->index()->isConstant()) {
1804 Address dest = ToAddress(elements, lir->index(), arrayType);
1805 masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest, temp2,
1806 temp3);
1807 } else {
1808 BaseIndex dest(elements, ToRegister(lir->index()),
1809 ScaleFromScalarType(arrayType));
1810 masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest, temp2,
1811 temp3);
1812 }
1813
1814 emitCreateBigInt(lir, arrayType, temp3, out, temp2.scratchReg());
1815 }
1816
visitAtomicTypedArrayElementBinopForEffect64(LAtomicTypedArrayElementBinopForEffect64 * lir)1817 void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
1818 LAtomicTypedArrayElementBinopForEffect64* lir) {
1819 MOZ_ASSERT(lir->mir()->isForEffect());
1820
1821 Register elements = ToRegister(lir->elements());
1822 Register value = ToRegister(lir->value());
1823 Register64 temp1 = ToRegister64(lir->temp1());
1824 Register64 temp2 = ToRegister64(lir->temp2());
1825
1826 Scalar::Type arrayType = lir->mir()->arrayType();
1827 AtomicOp atomicOp = lir->mir()->operation();
1828
1829 masm.loadBigInt64(value, temp1);
1830
1831 if (lir->index()->isConstant()) {
1832 Address dest = ToAddress(elements, lir->index(), arrayType);
1833 masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
1834 temp2);
1835 } else {
1836 BaseIndex dest(elements, ToRegister(lir->index()),
1837 ScaleFromScalarType(arrayType));
1838 masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
1839 temp2);
1840 }
1841 }
1842
visitWasmSelect(LWasmSelect * ins)1843 void CodeGenerator::visitWasmSelect(LWasmSelect* ins) {
1844 MIRType mirType = ins->mir()->type();
1845
1846 Register cond = ToRegister(ins->condExpr());
1847 masm.as_cmp(cond, Imm8(0));
1848
1849 if (mirType == MIRType::Int32 || mirType == MIRType::RefOrNull) {
1850 Register falseExpr = ToRegister(ins->falseExpr());
1851 Register out = ToRegister(ins->output());
1852 MOZ_ASSERT(ToRegister(ins->trueExpr()) == out,
1853 "true expr input is reused for output");
1854 masm.ma_mov(falseExpr, out, LeaveCC, Assembler::Zero);
1855 return;
1856 }
1857
1858 FloatRegister out = ToFloatRegister(ins->output());
1859 MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out,
1860 "true expr input is reused for output");
1861
1862 FloatRegister falseExpr = ToFloatRegister(ins->falseExpr());
1863
1864 if (mirType == MIRType::Double) {
1865 masm.moveDouble(falseExpr, out, Assembler::Zero);
1866 } else if (mirType == MIRType::Float32) {
1867 masm.moveFloat32(falseExpr, out, Assembler::Zero);
1868 } else {
1869 MOZ_CRASH("unhandled type in visitWasmSelect!");
1870 }
1871 }
1872
visitWasmCompareAndSelect(LWasmCompareAndSelect * ins)1873 void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
1874 emitWasmCompareAndSelect(ins);
1875 }
1876
visitWasmReinterpret(LWasmReinterpret * lir)1877 void CodeGenerator::visitWasmReinterpret(LWasmReinterpret* lir) {
1878 MOZ_ASSERT(gen->compilingWasm());
1879 MWasmReinterpret* ins = lir->mir();
1880
1881 MIRType to = ins->type();
1882 DebugOnly<MIRType> from = ins->input()->type();
1883
1884 switch (to) {
1885 case MIRType::Int32:
1886 MOZ_ASSERT(static_cast<MIRType>(from) == MIRType::Float32);
1887 masm.ma_vxfer(ToFloatRegister(lir->input()), ToRegister(lir->output()));
1888 break;
1889 case MIRType::Float32:
1890 MOZ_ASSERT(static_cast<MIRType>(from) == MIRType::Int32);
1891 masm.ma_vxfer(ToRegister(lir->input()), ToFloatRegister(lir->output()));
1892 break;
1893 case MIRType::Double:
1894 case MIRType::Int64:
1895 MOZ_CRASH("not handled by this LIR opcode");
1896 default:
1897 MOZ_CRASH("unexpected WasmReinterpret");
1898 }
1899 }
1900
visitAsmJSLoadHeap(LAsmJSLoadHeap * ins)1901 void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
1902 const MAsmJSLoadHeap* mir = ins->mir();
1903
1904 const LAllocation* ptr = ins->ptr();
1905 const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
1906
1907 bool isSigned;
1908 int size;
1909 bool isFloat = false;
1910 switch (mir->accessType()) {
1911 case Scalar::Int8:
1912 isSigned = true;
1913 size = 8;
1914 break;
1915 case Scalar::Uint8:
1916 isSigned = false;
1917 size = 8;
1918 break;
1919 case Scalar::Int16:
1920 isSigned = true;
1921 size = 16;
1922 break;
1923 case Scalar::Uint16:
1924 isSigned = false;
1925 size = 16;
1926 break;
1927 case Scalar::Int32:
1928 case Scalar::Uint32:
1929 isSigned = true;
1930 size = 32;
1931 break;
1932 case Scalar::Float64:
1933 isFloat = true;
1934 size = 64;
1935 break;
1936 case Scalar::Float32:
1937 isFloat = true;
1938 size = 32;
1939 break;
1940 default:
1941 MOZ_CRASH("unexpected array type");
1942 }
1943
1944 if (ptr->isConstant()) {
1945 MOZ_ASSERT(!mir->needsBoundsCheck());
1946 int32_t ptrImm = ptr->toConstant()->toInt32();
1947 MOZ_ASSERT(ptrImm >= 0);
1948 if (isFloat) {
1949 ScratchRegisterScope scratch(masm);
1950 VFPRegister vd(ToFloatRegister(ins->output()));
1951 if (size == 32) {
1952 masm.ma_vldr(Address(HeapReg, ptrImm), vd.singleOverlay(), scratch,
1953 Assembler::Always);
1954 } else {
1955 masm.ma_vldr(Address(HeapReg, ptrImm), vd, scratch, Assembler::Always);
1956 }
1957 } else {
1958 ScratchRegisterScope scratch(masm);
1959 masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, Imm32(ptrImm),
1960 ToRegister(ins->output()), scratch, Offset,
1961 Assembler::Always);
1962 }
1963 } else {
1964 Register ptrReg = ToRegister(ptr);
1965 if (isFloat) {
1966 FloatRegister output = ToFloatRegister(ins->output());
1967 if (size == 32) {
1968 output = output.singleOverlay();
1969 }
1970
1971 Assembler::Condition cond = Assembler::Always;
1972 if (mir->needsBoundsCheck()) {
1973 Register boundsCheckLimitReg = ToRegister(boundsCheckLimit);
1974 masm.as_cmp(ptrReg, O2Reg(boundsCheckLimitReg));
1975 if (size == 32) {
1976 masm.ma_vimm_f32(GenericNaN(), output, Assembler::AboveOrEqual);
1977 } else {
1978 masm.ma_vimm(GenericNaN(), output, Assembler::AboveOrEqual);
1979 }
1980 cond = Assembler::Below;
1981 }
1982
1983 ScratchRegisterScope scratch(masm);
1984 masm.ma_vldr(output, HeapReg, ptrReg, scratch, 0, cond);
1985 } else {
1986 Register output = ToRegister(ins->output());
1987
1988 Assembler::Condition cond = Assembler::Always;
1989 if (mir->needsBoundsCheck()) {
1990 Register boundsCheckLimitReg = ToRegister(boundsCheckLimit);
1991 masm.as_cmp(ptrReg, O2Reg(boundsCheckLimitReg));
1992 masm.ma_mov(Imm32(0), output, Assembler::AboveOrEqual);
1993 cond = Assembler::Below;
1994 }
1995
1996 ScratchRegisterScope scratch(masm);
1997 masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, output,
1998 scratch, Offset, cond);
1999 }
2000 }
2001 }
2002
visitWasmHeapBase(LWasmHeapBase * ins)2003 void CodeGenerator::visitWasmHeapBase(LWasmHeapBase* ins) {
2004 MOZ_ASSERT(ins->tlsPtr()->isBogus());
2005 masm.movePtr(HeapReg, ToRegister(ins->output()));
2006 }
2007
2008 template <typename T>
emitWasmLoad(T * lir)2009 void CodeGeneratorARM::emitWasmLoad(T* lir) {
2010 const MWasmLoad* mir = lir->mir();
2011 MIRType resultType = mir->type();
2012 Register ptr;
2013
2014 if (mir->access().offset() || mir->access().type() == Scalar::Int64) {
2015 ptr = ToRegister(lir->ptrCopy());
2016 } else {
2017 MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
2018 ptr = ToRegister(lir->ptr());
2019 }
2020
2021 if (resultType == MIRType::Int64) {
2022 masm.wasmLoadI64(mir->access(), HeapReg, ptr, ptr, ToOutRegister64(lir));
2023 } else {
2024 masm.wasmLoad(mir->access(), HeapReg, ptr, ptr,
2025 ToAnyRegister(lir->output()));
2026 }
2027 }
2028
visitWasmLoad(LWasmLoad * lir)2029 void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); }
2030
visitWasmLoadI64(LWasmLoadI64 * lir)2031 void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* lir) { emitWasmLoad(lir); }
2032
visitWasmAddOffset(LWasmAddOffset * lir)2033 void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
2034 MWasmAddOffset* mir = lir->mir();
2035 Register base = ToRegister(lir->base());
2036 Register out = ToRegister(lir->output());
2037
2038 ScratchRegisterScope scratch(masm);
2039 masm.ma_add(base, Imm32(mir->offset()), out, scratch, SetCC);
2040
2041 Label ok;
2042 masm.ma_b(&ok, Assembler::CarryClear);
2043 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
2044 masm.bind(&ok);
2045 }
2046
2047 template <typename T>
emitWasmStore(T * lir)2048 void CodeGeneratorARM::emitWasmStore(T* lir) {
2049 const MWasmStore* mir = lir->mir();
2050 Scalar::Type accessType = mir->access().type();
2051 Register ptr;
2052
2053 // Maybe add the offset.
2054 if (mir->access().offset() || accessType == Scalar::Int64) {
2055 ptr = ToRegister(lir->ptrCopy());
2056 } else {
2057 MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
2058 ptr = ToRegister(lir->ptr());
2059 }
2060
2061 if (accessType == Scalar::Int64) {
2062 masm.wasmStoreI64(mir->access(),
2063 ToRegister64(lir->getInt64Operand(lir->ValueIndex)),
2064 HeapReg, ptr, ptr);
2065 } else {
2066 masm.wasmStore(mir->access(),
2067 ToAnyRegister(lir->getOperand(lir->ValueIndex)), HeapReg,
2068 ptr, ptr);
2069 }
2070 }
2071
visitWasmStore(LWasmStore * lir)2072 void CodeGenerator::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); }
2073
visitWasmStoreI64(LWasmStoreI64 * lir)2074 void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) {
2075 emitWasmStore(lir);
2076 }
2077
visitAsmJSStoreHeap(LAsmJSStoreHeap * ins)2078 void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
2079 const MAsmJSStoreHeap* mir = ins->mir();
2080
2081 const LAllocation* ptr = ins->ptr();
2082 const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
2083
2084 bool isSigned;
2085 int size;
2086 bool isFloat = false;
2087 switch (mir->accessType()) {
2088 case Scalar::Int8:
2089 case Scalar::Uint8:
2090 isSigned = false;
2091 size = 8;
2092 break;
2093 case Scalar::Int16:
2094 case Scalar::Uint16:
2095 isSigned = false;
2096 size = 16;
2097 break;
2098 case Scalar::Int32:
2099 case Scalar::Uint32:
2100 isSigned = true;
2101 size = 32;
2102 break;
2103 case Scalar::Float64:
2104 isFloat = true;
2105 size = 64;
2106 break;
2107 case Scalar::Float32:
2108 isFloat = true;
2109 size = 32;
2110 break;
2111 default:
2112 MOZ_CRASH("unexpected array type");
2113 }
2114
2115 if (ptr->isConstant()) {
2116 MOZ_ASSERT(!mir->needsBoundsCheck());
2117 int32_t ptrImm = ptr->toConstant()->toInt32();
2118 MOZ_ASSERT(ptrImm >= 0);
2119 if (isFloat) {
2120 VFPRegister vd(ToFloatRegister(ins->value()));
2121 Address addr(HeapReg, ptrImm);
2122 if (size == 32) {
2123 masm.storeFloat32(vd, addr);
2124 } else {
2125 masm.storeDouble(vd, addr);
2126 }
2127 } else {
2128 ScratchRegisterScope scratch(masm);
2129 masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, Imm32(ptrImm),
2130 ToRegister(ins->value()), scratch, Offset,
2131 Assembler::Always);
2132 }
2133 } else {
2134 Register ptrReg = ToRegister(ptr);
2135
2136 Assembler::Condition cond = Assembler::Always;
2137 if (mir->needsBoundsCheck()) {
2138 Register boundsCheckLimitReg = ToRegister(boundsCheckLimit);
2139 masm.as_cmp(ptrReg, O2Reg(boundsCheckLimitReg));
2140 cond = Assembler::Below;
2141 }
2142
2143 if (isFloat) {
2144 ScratchRegisterScope scratch(masm);
2145 FloatRegister value = ToFloatRegister(ins->value());
2146 if (size == 32) {
2147 value = value.singleOverlay();
2148 }
2149
2150 masm.ma_vstr(value, HeapReg, ptrReg, scratch, 0, Assembler::Below);
2151 } else {
2152 ScratchRegisterScope scratch(masm);
2153 Register value = ToRegister(ins->value());
2154 masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg, value,
2155 scratch, Offset, cond);
2156 }
2157 }
2158 }
2159
visitWasmCompareExchangeHeap(LWasmCompareExchangeHeap * ins)2160 void CodeGenerator::visitWasmCompareExchangeHeap(
2161 LWasmCompareExchangeHeap* ins) {
2162 MWasmCompareExchangeHeap* mir = ins->mir();
2163
2164 const LAllocation* ptr = ins->ptr();
2165 Register ptrReg = ToRegister(ptr);
2166 BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
2167
2168 MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
2169
2170 Register oldval = ToRegister(ins->oldValue());
2171 Register newval = ToRegister(ins->newValue());
2172 Register out = ToRegister(ins->output());
2173
2174 masm.wasmCompareExchange(mir->access(), srcAddr, oldval, newval, out);
2175 }
2176
visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap * ins)2177 void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
2178 MWasmAtomicExchangeHeap* mir = ins->mir();
2179
2180 Register ptrReg = ToRegister(ins->ptr());
2181 Register value = ToRegister(ins->value());
2182 Register output = ToRegister(ins->output());
2183 BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
2184 MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
2185
2186 masm.wasmAtomicExchange(mir->access(), srcAddr, value, output);
2187 }
2188
visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap * ins)2189 void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
2190 MWasmAtomicBinopHeap* mir = ins->mir();
2191 MOZ_ASSERT(mir->hasUses());
2192
2193 Register ptrReg = ToRegister(ins->ptr());
2194 Register flagTemp = ToRegister(ins->flagTemp());
2195 Register output = ToRegister(ins->output());
2196 const LAllocation* value = ins->value();
2197 AtomicOp op = mir->operation();
2198 MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
2199
2200 BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
2201 masm.wasmAtomicFetchOp(mir->access(), op, ToRegister(value), srcAddr,
2202 flagTemp, output);
2203 }
2204
visitWasmAtomicBinopHeapForEffect(LWasmAtomicBinopHeapForEffect * ins)2205 void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
2206 LWasmAtomicBinopHeapForEffect* ins) {
2207 MWasmAtomicBinopHeap* mir = ins->mir();
2208 MOZ_ASSERT(!mir->hasUses());
2209
2210 Register ptrReg = ToRegister(ins->ptr());
2211 Register flagTemp = ToRegister(ins->flagTemp());
2212 const LAllocation* value = ins->value();
2213 AtomicOp op = mir->operation();
2214 MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
2215
2216 BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
2217 masm.wasmAtomicEffectOp(mir->access(), op, ToRegister(value), srcAddr,
2218 flagTemp);
2219 }
2220
visitWasmStackArg(LWasmStackArg * ins)2221 void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
2222 const MWasmStackArg* mir = ins->mir();
2223 Address dst(StackPointer, mir->spOffset());
2224 ScratchRegisterScope scratch(masm);
2225 SecondScratchRegisterScope scratch2(masm);
2226
2227 if (ins->arg()->isConstant()) {
2228 masm.ma_mov(Imm32(ToInt32(ins->arg())), scratch);
2229 masm.ma_str(scratch, dst, scratch2);
2230 } else {
2231 if (ins->arg()->isGeneralReg()) {
2232 masm.ma_str(ToRegister(ins->arg()), dst, scratch);
2233 } else {
2234 masm.ma_vstr(ToFloatRegister(ins->arg()), dst, scratch);
2235 }
2236 }
2237 }
2238
visitUDiv(LUDiv * ins)2239 void CodeGenerator::visitUDiv(LUDiv* ins) {
2240 Register lhs = ToRegister(ins->lhs());
2241 Register rhs = ToRegister(ins->rhs());
2242 Register output = ToRegister(ins->output());
2243
2244 Label done;
2245 generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), ins->mir());
2246
2247 masm.ma_udiv(lhs, rhs, output);
2248
2249 // Check for large unsigned result - represent as double.
2250 if (!ins->mir()->isTruncated()) {
2251 MOZ_ASSERT(ins->mir()->fallible());
2252 masm.as_cmp(output, Imm8(0));
2253 bailoutIf(Assembler::LessThan, ins->snapshot());
2254 }
2255
2256 // Check for non-zero remainder if not truncating to int.
2257 if (!ins->mir()->canTruncateRemainder()) {
2258 MOZ_ASSERT(ins->mir()->fallible());
2259 {
2260 ScratchRegisterScope scratch(masm);
2261 masm.ma_mul(rhs, output, scratch);
2262 masm.ma_cmp(scratch, lhs);
2263 }
2264 bailoutIf(Assembler::NotEqual, ins->snapshot());
2265 }
2266
2267 if (done.used()) {
2268 masm.bind(&done);
2269 }
2270 }
2271
visitUMod(LUMod * ins)2272 void CodeGenerator::visitUMod(LUMod* ins) {
2273 Register lhs = ToRegister(ins->lhs());
2274 Register rhs = ToRegister(ins->rhs());
2275 Register output = ToRegister(ins->output());
2276
2277 Label done;
2278 generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), ins->mir());
2279
2280 {
2281 ScratchRegisterScope scratch(masm);
2282 masm.ma_umod(lhs, rhs, output, scratch);
2283 }
2284
2285 // Check for large unsigned result - represent as double.
2286 if (!ins->mir()->isTruncated()) {
2287 MOZ_ASSERT(ins->mir()->fallible());
2288 masm.as_cmp(output, Imm8(0));
2289 bailoutIf(Assembler::LessThan, ins->snapshot());
2290 }
2291
2292 if (done.used()) {
2293 masm.bind(&done);
2294 }
2295 }
2296
2297 template <class T>
generateUDivModZeroCheck(Register rhs,Register output,Label * done,LSnapshot * snapshot,T * mir)2298 void CodeGeneratorARM::generateUDivModZeroCheck(Register rhs, Register output,
2299 Label* done,
2300 LSnapshot* snapshot, T* mir) {
2301 if (!mir) {
2302 return;
2303 }
2304 if (mir->canBeDivideByZero()) {
2305 masm.as_cmp(rhs, Imm8(0));
2306 if (mir->isTruncated()) {
2307 if (mir->trapOnError()) {
2308 Label nonZero;
2309 masm.ma_b(&nonZero, Assembler::NotEqual);
2310 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
2311 masm.bind(&nonZero);
2312 } else {
2313 Label skip;
2314 masm.ma_b(&skip, Assembler::NotEqual);
2315 // Infinity|0 == 0
2316 masm.ma_mov(Imm32(0), output);
2317 masm.ma_b(done);
2318 masm.bind(&skip);
2319 }
2320 } else {
2321 // Bailout for divide by zero
2322 MOZ_ASSERT(mir->fallible());
2323 bailoutIf(Assembler::Equal, snapshot);
2324 }
2325 }
2326 }
2327
visitSoftUDivOrMod(LSoftUDivOrMod * ins)2328 void CodeGenerator::visitSoftUDivOrMod(LSoftUDivOrMod* ins) {
2329 Register lhs = ToRegister(ins->lhs());
2330 Register rhs = ToRegister(ins->rhs());
2331 Register output = ToRegister(ins->output());
2332
2333 MOZ_ASSERT(lhs == r0);
2334 MOZ_ASSERT(rhs == r1);
2335 MOZ_ASSERT(output == r0);
2336
2337 Label done;
2338 MDiv* div = ins->mir()->isDiv() ? ins->mir()->toDiv() : nullptr;
2339 MMod* mod = !div ? ins->mir()->toMod() : nullptr;
2340
2341 generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), div);
2342 generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), mod);
2343
2344 if (gen->compilingWasm()) {
2345 masm.Push(WasmTlsReg);
2346 int32_t framePushedAfterTls = masm.framePushed();
2347 masm.setupWasmABICall();
2348 masm.passABIArg(lhs);
2349 masm.passABIArg(rhs);
2350 wasm::BytecodeOffset bytecodeOffset =
2351 (div ? div->bytecodeOffset() : mod->bytecodeOffset());
2352 int32_t tlsOffset = masm.framePushed() - framePushedAfterTls;
2353 masm.callWithABI(bytecodeOffset, wasm::SymbolicAddress::aeabi_uidivmod,
2354 mozilla::Some(tlsOffset));
2355 masm.Pop(WasmTlsReg);
2356 } else {
2357 using Fn = int64_t (*)(int, int);
2358 masm.setupAlignedABICall();
2359 masm.passABIArg(lhs);
2360 masm.passABIArg(rhs);
2361 masm.callWithABI<Fn, __aeabi_uidivmod>(
2362 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
2363 }
2364
2365 if (mod) {
2366 MOZ_ASSERT(output == r0, "output should not be r1 for mod");
2367 masm.move32(r1, output);
2368 }
2369
2370 // uidivmod returns the quotient in r0, and the remainder in r1.
2371 if (div && !div->canTruncateRemainder()) {
2372 MOZ_ASSERT(div->fallible());
2373 masm.as_cmp(r1, Imm8(0));
2374 bailoutIf(Assembler::NonZero, ins->snapshot());
2375 }
2376
2377 // Bailout for big unsigned results
2378 if ((div && !div->isTruncated()) || (mod && !mod->isTruncated())) {
2379 DebugOnly<bool> isFallible =
2380 (div && div->fallible()) || (mod && mod->fallible());
2381 MOZ_ASSERT(isFallible);
2382 masm.as_cmp(output, Imm8(0));
2383 bailoutIf(Assembler::LessThan, ins->snapshot());
2384 }
2385
2386 masm.bind(&done);
2387 }
2388
visitEffectiveAddress(LEffectiveAddress * ins)2389 void CodeGenerator::visitEffectiveAddress(LEffectiveAddress* ins) {
2390 const MEffectiveAddress* mir = ins->mir();
2391 Register base = ToRegister(ins->base());
2392 Register index = ToRegister(ins->index());
2393 Register output = ToRegister(ins->output());
2394
2395 ScratchRegisterScope scratch(masm);
2396
2397 masm.as_add(output, base, lsl(index, mir->scale()));
2398 masm.ma_add(Imm32(mir->displacement()), output, scratch);
2399 }
2400
visitNegI(LNegI * ins)2401 void CodeGenerator::visitNegI(LNegI* ins) {
2402 Register input = ToRegister(ins->input());
2403 masm.ma_neg(input, ToRegister(ins->output()));
2404 }
2405
visitNegI64(LNegI64 * ins)2406 void CodeGenerator::visitNegI64(LNegI64* ins) {
2407 Register64 input = ToRegister64(ins->getInt64Operand(0));
2408 MOZ_ASSERT(input == ToOutRegister64(ins));
2409 masm.neg64(input);
2410 }
2411
visitNegD(LNegD * ins)2412 void CodeGenerator::visitNegD(LNegD* ins) {
2413 FloatRegister input = ToFloatRegister(ins->input());
2414 masm.ma_vneg(input, ToFloatRegister(ins->output()));
2415 }
2416
visitNegF(LNegF * ins)2417 void CodeGenerator::visitNegF(LNegF* ins) {
2418 FloatRegister input = ToFloatRegister(ins->input());
2419 masm.ma_vneg_f32(input, ToFloatRegister(ins->output()));
2420 }
2421
visitMemoryBarrier(LMemoryBarrier * ins)2422 void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
2423 masm.memoryBarrier(ins->type());
2424 }
2425
visitWasmTruncateToInt32(LWasmTruncateToInt32 * lir)2426 void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
2427 auto input = ToFloatRegister(lir->input());
2428 auto output = ToRegister(lir->output());
2429
2430 MWasmTruncateToInt32* mir = lir->mir();
2431 MIRType fromType = mir->input()->type();
2432
2433 OutOfLineWasmTruncateCheck* ool = nullptr;
2434 Label* oolEntry = nullptr;
2435 if (!lir->mir()->isSaturating()) {
2436 ool = new (alloc())
2437 OutOfLineWasmTruncateCheck(mir, input, Register::Invalid());
2438 addOutOfLineCode(ool, mir);
2439 oolEntry = ool->entry();
2440 }
2441
2442 masm.wasmTruncateToInt32(input, output, fromType, mir->isUnsigned(),
2443 mir->isSaturating(), oolEntry);
2444
2445 if (!lir->mir()->isSaturating()) {
2446 masm.bind(ool->rejoin());
2447 }
2448 }
2449
visitWasmTruncateToInt64(LWasmTruncateToInt64 * lir)2450 void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
2451 MOZ_ASSERT(gen->compilingWasm());
2452 MOZ_ASSERT(ToRegister(lir->tls()) == WasmTlsReg);
2453 masm.Push(WasmTlsReg);
2454 int32_t framePushedAfterTls = masm.framePushed();
2455
2456 FloatRegister input = ToFloatRegister(lir->input());
2457 FloatRegister inputDouble = input;
2458 Register64 output = ToOutRegister64(lir);
2459
2460 MWasmBuiltinTruncateToInt64* mir = lir->mir();
2461 MIRType fromType = mir->input()->type();
2462
2463 OutOfLineWasmTruncateCheck* ool = nullptr;
2464 if (!lir->mir()->isSaturating()) {
2465 ool = new (alloc())
2466 OutOfLineWasmTruncateCheck(mir, input, Register64::Invalid());
2467 addOutOfLineCode(ool, mir);
2468 }
2469
2470 ScratchDoubleScope fpscratch(masm);
2471 if (fromType == MIRType::Float32) {
2472 inputDouble = fpscratch;
2473 masm.convertFloat32ToDouble(input, inputDouble);
2474 }
2475
2476 masm.Push(input);
2477
2478 masm.setupWasmABICall();
2479 masm.passABIArg(inputDouble, MoveOp::DOUBLE);
2480
2481 int32_t tlsOffset = masm.framePushed() - framePushedAfterTls;
2482 if (lir->mir()->isSaturating()) {
2483 if (lir->mir()->isUnsigned()) {
2484 masm.callWithABI(mir->bytecodeOffset(),
2485 wasm::SymbolicAddress::SaturatingTruncateDoubleToUint64,
2486 mozilla::Some(tlsOffset));
2487 } else {
2488 masm.callWithABI(mir->bytecodeOffset(),
2489 wasm::SymbolicAddress::SaturatingTruncateDoubleToInt64,
2490 mozilla::Some(tlsOffset));
2491 }
2492 } else {
2493 if (lir->mir()->isUnsigned()) {
2494 masm.callWithABI(mir->bytecodeOffset(),
2495 wasm::SymbolicAddress::TruncateDoubleToUint64,
2496 mozilla::Some(tlsOffset));
2497 } else {
2498 masm.callWithABI(mir->bytecodeOffset(),
2499 wasm::SymbolicAddress::TruncateDoubleToInt64,
2500 mozilla::Some(tlsOffset));
2501 }
2502 }
2503
2504 masm.Pop(input);
2505 masm.Pop(WasmTlsReg);
2506
2507 // TruncateDoubleTo{UI,I}nt64 returns 0x8000000000000000 to indicate
2508 // exceptional results, so check for that and produce the appropriate
2509 // traps. The Saturating form always returns a normal value and never
2510 // needs traps.
2511 if (!lir->mir()->isSaturating()) {
2512 ScratchRegisterScope scratch(masm);
2513 masm.ma_cmp(output.high, Imm32(0x80000000), scratch);
2514 masm.as_cmp(output.low, Imm8(0x00000000), Assembler::Equal);
2515 masm.ma_b(ool->entry(), Assembler::Equal);
2516
2517 masm.bind(ool->rejoin());
2518 }
2519
2520 MOZ_ASSERT(ReturnReg64 == output);
2521 }
2522
visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck * ool)2523 void CodeGeneratorARM::visitOutOfLineWasmTruncateCheck(
2524 OutOfLineWasmTruncateCheck* ool) {
2525 // On ARM, saturating truncation codegen handles saturating itself rather than
2526 // relying on out-of-line fixup code.
2527 if (ool->isSaturating()) {
2528 return;
2529 }
2530
2531 masm.outOfLineWasmTruncateToIntCheck(ool->input(), ool->fromType(),
2532 ool->toType(), ool->isUnsigned(),
2533 ool->rejoin(), ool->bytecodeOffset());
2534 }
2535
visitInt64ToFloatingPointCall(LInt64ToFloatingPointCall * lir)2536 void CodeGenerator::visitInt64ToFloatingPointCall(
2537 LInt64ToFloatingPointCall* lir) {
2538 MOZ_ASSERT(gen->compilingWasm());
2539 MOZ_ASSERT(ToRegister(lir->getOperand(LInt64ToFloatingPointCall::Tls)) ==
2540 WasmTlsReg);
2541 masm.Push(WasmTlsReg);
2542 int32_t framePushedAfterTls = masm.framePushed();
2543
2544 Register64 input = ToRegister64(lir->getInt64Operand(0));
2545
2546 MBuiltinInt64ToFloatingPoint* mir = lir->mir();
2547 MIRType toType = mir->type();
2548
2549 masm.setupWasmABICall();
2550 masm.passABIArg(input.high);
2551 masm.passABIArg(input.low);
2552
2553 bool isUnsigned = mir->isUnsigned();
2554 wasm::SymbolicAddress callee =
2555 toType == MIRType::Float32
2556 ? (isUnsigned ? wasm::SymbolicAddress::Uint64ToFloat32
2557 : wasm::SymbolicAddress::Int64ToFloat32)
2558 : (isUnsigned ? wasm::SymbolicAddress::Uint64ToDouble
2559 : wasm::SymbolicAddress::Int64ToDouble);
2560
2561 int32_t tlsOffset = masm.framePushed() - framePushedAfterTls;
2562 MoveOp::Type result =
2563 toType == MIRType::Float32 ? MoveOp::FLOAT32 : MoveOp::DOUBLE;
2564 masm.callWithABI(mir->bytecodeOffset(), callee, mozilla::Some(tlsOffset),
2565 result);
2566
2567 DebugOnly<FloatRegister> output(ToFloatRegister(lir->output()));
2568 MOZ_ASSERT_IF(toType == MIRType::Double, output.value == ReturnDoubleReg);
2569 MOZ_ASSERT_IF(toType == MIRType::Float32, output.value == ReturnFloat32Reg);
2570
2571 masm.Pop(WasmTlsReg);
2572 }
2573
visitCopySignF(LCopySignF * ins)2574 void CodeGenerator::visitCopySignF(LCopySignF* ins) {
2575 FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
2576 FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
2577 FloatRegister output = ToFloatRegister(ins->getDef(0));
2578
2579 Register lhsi = ToRegister(ins->getTemp(0));
2580 Register rhsi = ToRegister(ins->getTemp(1));
2581
2582 masm.ma_vxfer(lhs, lhsi);
2583 masm.ma_vxfer(rhs, rhsi);
2584
2585 ScratchRegisterScope scratch(masm);
2586
2587 // Clear lhs's sign.
2588 masm.ma_and(Imm32(INT32_MAX), lhsi, lhsi, scratch);
2589
2590 // Keep rhs's sign.
2591 masm.ma_and(Imm32(INT32_MIN), rhsi, rhsi, scratch);
2592
2593 // Combine.
2594 masm.ma_orr(lhsi, rhsi, rhsi);
2595
2596 masm.ma_vxfer(rhsi, output);
2597 }
2598
visitCopySignD(LCopySignD * ins)2599 void CodeGenerator::visitCopySignD(LCopySignD* ins) {
2600 FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
2601 FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
2602 FloatRegister output = ToFloatRegister(ins->getDef(0));
2603
2604 Register lhsi = ToRegister(ins->getTemp(0));
2605 Register rhsi = ToRegister(ins->getTemp(1));
2606
2607 // Manipulate high words of double inputs.
2608 masm.as_vxfer(lhsi, InvalidReg, lhs, Assembler::FloatToCore,
2609 Assembler::Always, 1);
2610 masm.as_vxfer(rhsi, InvalidReg, rhs, Assembler::FloatToCore,
2611 Assembler::Always, 1);
2612
2613 ScratchRegisterScope scratch(masm);
2614
2615 // Clear lhs's sign.
2616 masm.ma_and(Imm32(INT32_MAX), lhsi, lhsi, scratch);
2617
2618 // Keep rhs's sign.
2619 masm.ma_and(Imm32(INT32_MIN), rhsi, rhsi, scratch);
2620
2621 // Combine.
2622 masm.ma_orr(lhsi, rhsi, rhsi);
2623
2624 // Reconstruct the output.
2625 masm.as_vxfer(lhsi, InvalidReg, lhs, Assembler::FloatToCore,
2626 Assembler::Always, 0);
2627 masm.ma_vxfer(lhsi, rhsi, output);
2628 }
2629
visitWrapInt64ToInt32(LWrapInt64ToInt32 * lir)2630 void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
2631 const LInt64Allocation& input = lir->getInt64Operand(0);
2632 Register output = ToRegister(lir->output());
2633
2634 if (lir->mir()->bottomHalf()) {
2635 masm.move32(ToRegister(input.low()), output);
2636 } else {
2637 masm.move32(ToRegister(input.high()), output);
2638 }
2639 }
2640
visitExtendInt32ToInt64(LExtendInt32ToInt64 * lir)2641 void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
2642 Register64 output = ToOutRegister64(lir);
2643 MOZ_ASSERT(ToRegister(lir->input()) == output.low);
2644
2645 if (lir->mir()->isUnsigned()) {
2646 masm.ma_mov(Imm32(0), output.high);
2647 } else {
2648 masm.ma_asr(Imm32(31), output.low, output.high);
2649 }
2650 }
2651
visitSignExtendInt64(LSignExtendInt64 * lir)2652 void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
2653 Register64 input = ToRegister64(lir->getInt64Operand(0));
2654 Register64 output = ToOutRegister64(lir);
2655 switch (lir->mode()) {
2656 case MSignExtendInt64::Byte:
2657 masm.move8SignExtend(input.low, output.low);
2658 break;
2659 case MSignExtendInt64::Half:
2660 masm.move16SignExtend(input.low, output.low);
2661 break;
2662 case MSignExtendInt64::Word:
2663 masm.move32(input.low, output.low);
2664 break;
2665 }
2666 masm.ma_asr(Imm32(31), output.low, output.high);
2667 }
2668
visitWasmExtendU32Index(LWasmExtendU32Index *)2669 void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index*) {
2670 MOZ_CRASH("64-bit only");
2671 }
2672
visitWasmWrapU32Index(LWasmWrapU32Index *)2673 void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index*) {
2674 MOZ_CRASH("64-bit only");
2675 }
2676
visitDivOrModI64(LDivOrModI64 * lir)2677 void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
2678 MOZ_ASSERT(gen->compilingWasm());
2679 MOZ_ASSERT(ToRegister(lir->getOperand(LDivOrModI64::Tls)) == WasmTlsReg);
2680 masm.Push(WasmTlsReg);
2681 int32_t framePushedAfterTls = masm.framePushed();
2682
2683 Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
2684 Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
2685 Register64 output = ToOutRegister64(lir);
2686
2687 MOZ_ASSERT(output == ReturnReg64);
2688
2689 Label done;
2690
2691 // Handle divide by zero.
2692 if (lir->canBeDivideByZero()) {
2693 Label nonZero;
2694 // We can use WasmTlsReg as temp register because we preserved it before.
2695 masm.branchTest64(Assembler::NonZero, rhs, rhs, WasmTlsReg, &nonZero);
2696 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
2697 masm.bind(&nonZero);
2698 }
2699
2700 auto* mir = lir->mir();
2701
2702 // Handle an integer overflow exception from INT64_MIN / -1.
2703 if (lir->canBeNegativeOverflow()) {
2704 Label notmin;
2705 masm.branch64(Assembler::NotEqual, lhs, Imm64(INT64_MIN), ¬min);
2706 masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), ¬min);
2707 if (mir->isWasmBuiltinModI64()) {
2708 masm.xor64(output, output);
2709 } else {
2710 masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
2711 }
2712 masm.jump(&done);
2713 masm.bind(¬min);
2714 }
2715
2716 masm.setupWasmABICall();
2717 masm.passABIArg(lhs.high);
2718 masm.passABIArg(lhs.low);
2719 masm.passABIArg(rhs.high);
2720 masm.passABIArg(rhs.low);
2721
2722 int32_t tlsOffset = masm.framePushed() - framePushedAfterTls;
2723 if (mir->isWasmBuiltinModI64()) {
2724 masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::ModI64,
2725 mozilla::Some(tlsOffset));
2726 } else {
2727 masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::DivI64,
2728 mozilla::Some(tlsOffset));
2729 }
2730
2731 MOZ_ASSERT(ReturnReg64 == output);
2732
2733 masm.bind(&done);
2734 masm.Pop(WasmTlsReg);
2735 }
2736
visitUDivOrModI64(LUDivOrModI64 * lir)2737 void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
2738 MOZ_ASSERT(gen->compilingWasm());
2739 MOZ_ASSERT(ToRegister(lir->getOperand(LDivOrModI64::Tls)) == WasmTlsReg);
2740 masm.Push(WasmTlsReg);
2741 int32_t framePushedAfterTls = masm.framePushed();
2742
2743 Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
2744 Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
2745
2746 MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
2747
2748 // Prevent divide by zero.
2749 if (lir->canBeDivideByZero()) {
2750 Label nonZero;
2751 // We can use WasmTlsReg as temp register because we preserved it before.
2752 masm.branchTest64(Assembler::NonZero, rhs, rhs, WasmTlsReg, &nonZero);
2753 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
2754 masm.bind(&nonZero);
2755 }
2756
2757 masm.setupWasmABICall();
2758 masm.passABIArg(lhs.high);
2759 masm.passABIArg(lhs.low);
2760 masm.passABIArg(rhs.high);
2761 masm.passABIArg(rhs.low);
2762
2763 MDefinition* mir = lir->mir();
2764 int32_t tlsOffset = masm.framePushed() - framePushedAfterTls;
2765 if (mir->isWasmBuiltinModI64()) {
2766 masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UModI64,
2767 mozilla::Some(tlsOffset));
2768 } else {
2769 masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UDivI64,
2770 mozilla::Some(tlsOffset));
2771 }
2772 masm.Pop(WasmTlsReg);
2773 }
2774
visitCompareI64(LCompareI64 * lir)2775 void CodeGenerator::visitCompareI64(LCompareI64* lir) {
2776 MCompare* mir = lir->mir();
2777 MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
2778 mir->compareType() == MCompare::Compare_UInt64);
2779
2780 const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
2781 const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
2782 Register64 lhsRegs = ToRegister64(lhs);
2783 Register output = ToRegister(lir->output());
2784
2785 bool isSigned = mir->compareType() == MCompare::Compare_Int64;
2786 Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
2787 Label done;
2788
2789 masm.move32(Imm32(1), output);
2790
2791 if (IsConstant(rhs)) {
2792 Imm64 imm = Imm64(ToInt64(rhs));
2793 masm.branch64(condition, lhsRegs, imm, &done);
2794 } else {
2795 Register64 rhsRegs = ToRegister64(rhs);
2796 masm.branch64(condition, lhsRegs, rhsRegs, &done);
2797 }
2798
2799 masm.move32(Imm32(0), output);
2800 masm.bind(&done);
2801 }
2802
visitCompareI64AndBranch(LCompareI64AndBranch * lir)2803 void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
2804 MCompare* mir = lir->cmpMir();
2805 MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
2806 mir->compareType() == MCompare::Compare_UInt64);
2807
2808 const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
2809 const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
2810 Register64 lhsRegs = ToRegister64(lhs);
2811
2812 bool isSigned = mir->compareType() == MCompare::Compare_Int64;
2813 Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
2814
2815 Label* trueLabel = getJumpLabelForBranch(lir->ifTrue());
2816 Label* falseLabel = getJumpLabelForBranch(lir->ifFalse());
2817
2818 if (isNextBlock(lir->ifFalse()->lir())) {
2819 falseLabel = nullptr;
2820 } else if (isNextBlock(lir->ifTrue()->lir())) {
2821 condition = Assembler::InvertCondition(condition);
2822 trueLabel = falseLabel;
2823 falseLabel = nullptr;
2824 }
2825
2826 if (IsConstant(rhs)) {
2827 Imm64 imm = Imm64(ToInt64(rhs));
2828 masm.branch64(condition, lhsRegs, imm, trueLabel, falseLabel);
2829 } else {
2830 Register64 rhsRegs = ToRegister64(rhs);
2831 masm.branch64(condition, lhsRegs, rhsRegs, trueLabel, falseLabel);
2832 }
2833 }
2834
visitShiftI64(LShiftI64 * lir)2835 void CodeGenerator::visitShiftI64(LShiftI64* lir) {
2836 const LInt64Allocation lhs = lir->getInt64Operand(LShiftI64::Lhs);
2837 LAllocation* rhs = lir->getOperand(LShiftI64::Rhs);
2838
2839 MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
2840
2841 if (rhs->isConstant()) {
2842 int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
2843 switch (lir->bitop()) {
2844 case JSOp::Lsh:
2845 if (shift) {
2846 masm.lshift64(Imm32(shift), ToRegister64(lhs));
2847 }
2848 break;
2849 case JSOp::Rsh:
2850 if (shift) {
2851 masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs));
2852 }
2853 break;
2854 case JSOp::Ursh:
2855 if (shift) {
2856 masm.rshift64(Imm32(shift), ToRegister64(lhs));
2857 }
2858 break;
2859 default:
2860 MOZ_CRASH("Unexpected shift op");
2861 }
2862 return;
2863 }
2864
2865 switch (lir->bitop()) {
2866 case JSOp::Lsh:
2867 masm.lshift64(ToRegister(rhs), ToRegister64(lhs));
2868 break;
2869 case JSOp::Rsh:
2870 masm.rshift64Arithmetic(ToRegister(rhs), ToRegister64(lhs));
2871 break;
2872 case JSOp::Ursh:
2873 masm.rshift64(ToRegister(rhs), ToRegister64(lhs));
2874 break;
2875 default:
2876 MOZ_CRASH("Unexpected shift op");
2877 }
2878 }
2879
visitBitOpI64(LBitOpI64 * lir)2880 void CodeGenerator::visitBitOpI64(LBitOpI64* lir) {
2881 const LInt64Allocation lhs = lir->getInt64Operand(LBitOpI64::Lhs);
2882 const LInt64Allocation rhs = lir->getInt64Operand(LBitOpI64::Rhs);
2883
2884 MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
2885
2886 switch (lir->bitop()) {
2887 case JSOp::BitOr:
2888 if (IsConstant(rhs)) {
2889 masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
2890 } else {
2891 masm.or64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
2892 }
2893 break;
2894 case JSOp::BitXor:
2895 if (IsConstant(rhs)) {
2896 masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
2897 } else {
2898 masm.xor64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
2899 }
2900 break;
2901 case JSOp::BitAnd:
2902 if (IsConstant(rhs)) {
2903 masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
2904 } else {
2905 masm.and64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
2906 }
2907 break;
2908 default:
2909 MOZ_CRASH("unexpected binary opcode");
2910 }
2911 }
2912
visitRotateI64(LRotateI64 * lir)2913 void CodeGenerator::visitRotateI64(LRotateI64* lir) {
2914 MRotate* mir = lir->mir();
2915 LAllocation* count = lir->count();
2916
2917 Register64 input = ToRegister64(lir->input());
2918 Register64 output = ToOutRegister64(lir);
2919 Register temp = ToTempRegisterOrInvalid(lir->temp());
2920
2921 if (count->isConstant()) {
2922 int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
2923 if (!c) {
2924 masm.move64(input, output);
2925 return;
2926 }
2927 if (mir->isLeftRotate()) {
2928 masm.rotateLeft64(Imm32(c), input, output, temp);
2929 } else {
2930 masm.rotateRight64(Imm32(c), input, output, temp);
2931 }
2932 } else {
2933 if (mir->isLeftRotate()) {
2934 masm.rotateLeft64(ToRegister(count), input, output, temp);
2935 } else {
2936 masm.rotateRight64(ToRegister(count), input, output, temp);
2937 }
2938 }
2939 }
2940
visitWasmStackArgI64(LWasmStackArgI64 * ins)2941 void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) {
2942 const MWasmStackArg* mir = ins->mir();
2943 Address dst(StackPointer, mir->spOffset());
2944 if (IsConstant(ins->arg())) {
2945 masm.store64(Imm64(ToInt64(ins->arg())), dst);
2946 } else {
2947 masm.store64(ToRegister64(ins->arg()), dst);
2948 }
2949 }
2950
visitWasmSelectI64(LWasmSelectI64 * lir)2951 void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
2952 Register cond = ToRegister(lir->condExpr());
2953 const LInt64Allocation falseExpr = lir->falseExpr();
2954
2955 Register64 out = ToOutRegister64(lir);
2956 MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
2957 "true expr is reused for input");
2958
2959 masm.as_cmp(cond, Imm8(0));
2960 if (falseExpr.low().isRegister()) {
2961 masm.ma_mov(ToRegister(falseExpr.low()), out.low, LeaveCC,
2962 Assembler::Equal);
2963 masm.ma_mov(ToRegister(falseExpr.high()), out.high, LeaveCC,
2964 Assembler::Equal);
2965 } else {
2966 ScratchRegisterScope scratch(masm);
2967 masm.ma_ldr(ToAddress(falseExpr.low()), out.low, scratch, Offset,
2968 Assembler::Equal);
2969 masm.ma_ldr(ToAddress(falseExpr.high()), out.high, scratch, Offset,
2970 Assembler::Equal);
2971 }
2972 }
2973
visitWasmReinterpretFromI64(LWasmReinterpretFromI64 * lir)2974 void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
2975 MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
2976 MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
2977 Register64 input = ToRegister64(lir->getInt64Operand(0));
2978 FloatRegister output = ToFloatRegister(lir->output());
2979
2980 masm.ma_vxfer(input.low, input.high, output);
2981 }
2982
visitWasmReinterpretToI64(LWasmReinterpretToI64 * lir)2983 void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
2984 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
2985 MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
2986 FloatRegister input = ToFloatRegister(lir->getOperand(0));
2987 Register64 output = ToOutRegister64(lir);
2988
2989 masm.ma_vxfer(input, output.low, output.high);
2990 }
2991
visitPopcntI64(LPopcntI64 * lir)2992 void CodeGenerator::visitPopcntI64(LPopcntI64* lir) {
2993 Register64 input = ToRegister64(lir->getInt64Operand(0));
2994 Register64 output = ToOutRegister64(lir);
2995 Register temp = ToRegister(lir->getTemp(0));
2996
2997 masm.popcnt64(input, output, temp);
2998 }
2999
visitClzI64(LClzI64 * lir)3000 void CodeGenerator::visitClzI64(LClzI64* lir) {
3001 Register64 input = ToRegister64(lir->getInt64Operand(0));
3002 Register64 output = ToOutRegister64(lir);
3003
3004 masm.clz64(input, output.low);
3005 masm.move32(Imm32(0), output.high);
3006 }
3007
visitCtzI64(LCtzI64 * lir)3008 void CodeGenerator::visitCtzI64(LCtzI64* lir) {
3009 Register64 input = ToRegister64(lir->getInt64Operand(0));
3010 Register64 output = ToOutRegister64(lir);
3011
3012 masm.ctz64(input, output.low);
3013 masm.move32(Imm32(0), output.high);
3014 }
3015
visitTestI64AndBranch(LTestI64AndBranch * lir)3016 void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
3017 Register64 input = ToRegister64(lir->getInt64Operand(0));
3018
3019 masm.as_cmp(input.high, Imm8(0));
3020 jumpToBlock(lir->ifTrue(), Assembler::NonZero);
3021 masm.as_cmp(input.low, Imm8(0));
3022 emitBranch(Assembler::NonZero, lir->ifTrue(), lir->ifFalse());
3023 }
3024
visitWasmAtomicLoadI64(LWasmAtomicLoadI64 * lir)3025 void CodeGenerator::visitWasmAtomicLoadI64(LWasmAtomicLoadI64* lir) {
3026 Register ptr = ToRegister(lir->ptr());
3027 Register64 output = ToOutRegister64(lir);
3028 Register64 tmp(InvalidReg, InvalidReg);
3029
3030 BaseIndex addr(HeapReg, ptr, TimesOne, lir->mir()->access().offset());
3031 masm.wasmAtomicLoad64(lir->mir()->access(), addr, tmp, output);
3032 }
3033
visitWasmAtomicStoreI64(LWasmAtomicStoreI64 * lir)3034 void CodeGenerator::visitWasmAtomicStoreI64(LWasmAtomicStoreI64* lir) {
3035 Register ptr = ToRegister(lir->ptr());
3036 Register64 value = ToRegister64(lir->value());
3037 Register64 tmp(ToRegister(lir->tmpHigh()), ToRegister(lir->tmpLow()));
3038
3039 BaseIndex addr(HeapReg, ptr, TimesOne, lir->mir()->access().offset());
3040 masm.wasmAtomicExchange64(lir->mir()->access(), addr, value, tmp);
3041 }
3042
visitWasmCompareExchangeI64(LWasmCompareExchangeI64 * lir)3043 void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir) {
3044 Register ptr = ToRegister(lir->ptr());
3045 Register64 expected = ToRegister64(lir->expected());
3046 Register64 replacement = ToRegister64(lir->replacement());
3047 Register64 out = ToOutRegister64(lir);
3048
3049 BaseIndex addr(HeapReg, ptr, TimesOne, lir->mir()->access().offset());
3050 masm.wasmCompareExchange64(lir->mir()->access(), addr, expected, replacement,
3051 out);
3052 }
3053
visitWasmAtomicBinopI64(LWasmAtomicBinopI64 * lir)3054 void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir) {
3055 Register ptr = ToRegister(lir->ptr());
3056 Register64 value = ToRegister64(lir->value());
3057 Register64 out = ToOutRegister64(lir);
3058
3059 BaseIndex addr(HeapReg, ptr, TimesOne, lir->access().offset());
3060 Register64 tmp(ToRegister(lir->tmpHigh()), ToRegister(lir->tmpLow()));
3061 masm.wasmAtomicFetchOp64(lir->access(), lir->operation(), value, addr, tmp,
3062 out);
3063 }
3064
visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64 * lir)3065 void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir) {
3066 Register ptr = ToRegister(lir->ptr());
3067 Register64 value = ToRegister64(lir->value());
3068 Register64 out = ToOutRegister64(lir);
3069
3070 BaseIndex addr(HeapReg, ptr, TimesOne, lir->access().offset());
3071 masm.wasmAtomicExchange64(lir->access(), addr, value, out);
3072 }
3073
visitNearbyInt(LNearbyInt *)3074 void CodeGenerator::visitNearbyInt(LNearbyInt*) { MOZ_CRASH("NYI"); }
3075
visitNearbyIntF(LNearbyIntF *)3076 void CodeGenerator::visitNearbyIntF(LNearbyIntF*) { MOZ_CRASH("NYI"); }
3077
visitSimd128(LSimd128 * ins)3078 void CodeGenerator::visitSimd128(LSimd128* ins) { MOZ_CRASH("No SIMD"); }
3079
visitWasmBitselectSimd128(LWasmBitselectSimd128 * ins)3080 void CodeGenerator::visitWasmBitselectSimd128(LWasmBitselectSimd128* ins) {
3081 MOZ_CRASH("No SIMD");
3082 }
3083
visitWasmBinarySimd128(LWasmBinarySimd128 * ins)3084 void CodeGenerator::visitWasmBinarySimd128(LWasmBinarySimd128* ins) {
3085 MOZ_CRASH("No SIMD");
3086 }
3087
visitWasmBinarySimd128WithConstant(LWasmBinarySimd128WithConstant * ins)3088 void CodeGenerator::visitWasmBinarySimd128WithConstant(
3089 LWasmBinarySimd128WithConstant* ins) {
3090 MOZ_CRASH("No SIMD");
3091 }
3092
visitWasmVariableShiftSimd128(LWasmVariableShiftSimd128 * ins)3093 void CodeGenerator::visitWasmVariableShiftSimd128(
3094 LWasmVariableShiftSimd128* ins) {
3095 MOZ_CRASH("No SIMD");
3096 }
3097
visitWasmConstantShiftSimd128(LWasmConstantShiftSimd128 * ins)3098 void CodeGenerator::visitWasmConstantShiftSimd128(
3099 LWasmConstantShiftSimd128* ins) {
3100 MOZ_CRASH("No SIMD");
3101 }
3102
visitWasmSignReplicationSimd128(LWasmSignReplicationSimd128 * ins)3103 void CodeGenerator::visitWasmSignReplicationSimd128(
3104 LWasmSignReplicationSimd128* ins) {
3105 MOZ_CRASH("No SIMD");
3106 }
3107
visitWasmShuffleSimd128(LWasmShuffleSimd128 * ins)3108 void CodeGenerator::visitWasmShuffleSimd128(LWasmShuffleSimd128* ins) {
3109 MOZ_CRASH("No SIMD");
3110 }
3111
visitWasmPermuteSimd128(LWasmPermuteSimd128 * ins)3112 void CodeGenerator::visitWasmPermuteSimd128(LWasmPermuteSimd128* ins) {
3113 MOZ_CRASH("No SIMD");
3114 }
3115
visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128 * ins)3116 void CodeGenerator::visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128* ins) {
3117 MOZ_CRASH("No SIMD");
3118 }
3119
visitWasmReplaceInt64LaneSimd128(LWasmReplaceInt64LaneSimd128 * ins)3120 void CodeGenerator::visitWasmReplaceInt64LaneSimd128(
3121 LWasmReplaceInt64LaneSimd128* ins) {
3122 MOZ_CRASH("No SIMD");
3123 }
3124
visitWasmScalarToSimd128(LWasmScalarToSimd128 * ins)3125 void CodeGenerator::visitWasmScalarToSimd128(LWasmScalarToSimd128* ins) {
3126 MOZ_CRASH("No SIMD");
3127 }
3128
visitWasmInt64ToSimd128(LWasmInt64ToSimd128 * ins)3129 void CodeGenerator::visitWasmInt64ToSimd128(LWasmInt64ToSimd128* ins) {
3130 MOZ_CRASH("No SIMD");
3131 }
3132
visitWasmUnarySimd128(LWasmUnarySimd128 * ins)3133 void CodeGenerator::visitWasmUnarySimd128(LWasmUnarySimd128* ins) {
3134 MOZ_CRASH("No SIMD");
3135 }
3136
visitWasmReduceSimd128(LWasmReduceSimd128 * ins)3137 void CodeGenerator::visitWasmReduceSimd128(LWasmReduceSimd128* ins) {
3138 MOZ_CRASH("No SIMD");
3139 }
3140
visitWasmReduceAndBranchSimd128(LWasmReduceAndBranchSimd128 * ins)3141 void CodeGenerator::visitWasmReduceAndBranchSimd128(
3142 LWasmReduceAndBranchSimd128* ins) {
3143 MOZ_CRASH("No SIMD");
3144 }
3145
visitWasmReduceSimd128ToInt64(LWasmReduceSimd128ToInt64 * ins)3146 void CodeGenerator::visitWasmReduceSimd128ToInt64(
3147 LWasmReduceSimd128ToInt64* ins) {
3148 MOZ_CRASH("No SIMD");
3149 }
3150
visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128 * ins)3151 void CodeGenerator::visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128* ins) {
3152 MOZ_CRASH("No SIMD");
3153 }
3154
visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128 * ins)3155 void CodeGenerator::visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128* ins) {
3156 MOZ_CRASH("No SIMD");
3157 }
3158