1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/mips64/CodeGenerator-mips64.h"
8
9 #include "mozilla/MathAlgorithms.h"
10
11 #include "jit/CodeGenerator.h"
12 #include "jit/JitFrames.h"
13 #include "jit/JitRealm.h"
14 #include "jit/MIR.h"
15 #include "jit/MIRGraph.h"
16 #include "js/Conversions.h"
17 #include "vm/Shape.h"
18 #include "vm/TraceLogging.h"
19
20 #include "jit/MacroAssembler-inl.h"
21 #include "jit/shared/CodeGenerator-shared-inl.h"
22
23 using namespace js;
24 using namespace js::jit;
25
ToValue(LInstruction * ins,size_t pos)26 ValueOperand CodeGeneratorMIPS64::ToValue(LInstruction* ins, size_t pos) {
27 return ValueOperand(ToRegister(ins->getOperand(pos)));
28 }
29
ToTempValue(LInstruction * ins,size_t pos)30 ValueOperand CodeGeneratorMIPS64::ToTempValue(LInstruction* ins, size_t pos) {
31 return ValueOperand(ToRegister(ins->getTemp(pos)));
32 }
33
visitBox(LBox * box)34 void CodeGenerator::visitBox(LBox* box) {
35 const LAllocation* in = box->getOperand(0);
36 ValueOperand result = ToOutValue(box);
37
38 masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result);
39 }
40
visitUnbox(LUnbox * unbox)41 void CodeGenerator::visitUnbox(LUnbox* unbox) {
42 MUnbox* mir = unbox->mir();
43
44 Register result = ToRegister(unbox->output());
45
46 if (mir->fallible()) {
47 const ValueOperand value = ToValue(unbox, LUnbox::Input);
48 Label bail;
49 switch (mir->type()) {
50 case MIRType::Int32:
51 masm.fallibleUnboxInt32(value, result, &bail);
52 break;
53 case MIRType::Boolean:
54 masm.fallibleUnboxBoolean(value, result, &bail);
55 break;
56 case MIRType::Object:
57 masm.fallibleUnboxObject(value, result, &bail);
58 break;
59 case MIRType::String:
60 masm.fallibleUnboxString(value, result, &bail);
61 break;
62 case MIRType::Symbol:
63 masm.fallibleUnboxSymbol(value, result, &bail);
64 break;
65 case MIRType::BigInt:
66 masm.fallibleUnboxBigInt(value, result, &bail);
67 break;
68 default:
69 MOZ_CRASH("Given MIRType cannot be unboxed.");
70 }
71 bailoutFrom(&bail, unbox->snapshot());
72 return;
73 }
74
75 LAllocation* input = unbox->getOperand(LUnbox::Input);
76 if (input->isRegister()) {
77 Register inputReg = ToRegister(input);
78 switch (mir->type()) {
79 case MIRType::Int32:
80 masm.unboxInt32(inputReg, result);
81 break;
82 case MIRType::Boolean:
83 masm.unboxBoolean(inputReg, result);
84 break;
85 case MIRType::Object:
86 masm.unboxObject(inputReg, result);
87 break;
88 case MIRType::String:
89 masm.unboxString(inputReg, result);
90 break;
91 case MIRType::Symbol:
92 masm.unboxSymbol(inputReg, result);
93 break;
94 case MIRType::BigInt:
95 masm.unboxBigInt(inputReg, result);
96 break;
97 default:
98 MOZ_CRASH("Given MIRType cannot be unboxed.");
99 }
100 return;
101 }
102
103 Address inputAddr = ToAddress(input);
104 switch (mir->type()) {
105 case MIRType::Int32:
106 masm.unboxInt32(inputAddr, result);
107 break;
108 case MIRType::Boolean:
109 masm.unboxBoolean(inputAddr, result);
110 break;
111 case MIRType::Object:
112 masm.unboxObject(inputAddr, result);
113 break;
114 case MIRType::String:
115 masm.unboxString(inputAddr, result);
116 break;
117 case MIRType::Symbol:
118 masm.unboxSymbol(inputAddr, result);
119 break;
120 case MIRType::BigInt:
121 masm.unboxBigInt(inputAddr, result);
122 break;
123 default:
124 MOZ_CRASH("Given MIRType cannot be unboxed.");
125 }
126 }
127
splitTagForTest(const ValueOperand & value,ScratchTagScope & tag)128 void CodeGeneratorMIPS64::splitTagForTest(const ValueOperand& value,
129 ScratchTagScope& tag) {
130 masm.splitTag(value.valueReg(), tag);
131 }
132
visitCompareB(LCompareB * lir)133 void CodeGenerator::visitCompareB(LCompareB* lir) {
134 MCompare* mir = lir->mir();
135
136 const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
137 const LAllocation* rhs = lir->rhs();
138 const Register output = ToRegister(lir->output());
139
140 MOZ_ASSERT(mir->jsop() == JSOp::StrictEq || mir->jsop() == JSOp::StrictNe);
141 Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
142
143 // Load boxed boolean in ScratchRegister.
144 if (rhs->isConstant()) {
145 masm.moveValue(rhs->toConstant()->toJSValue(),
146 ValueOperand(ScratchRegister));
147 } else {
148 masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), ScratchRegister);
149 }
150
151 // Perform the comparison.
152 masm.cmpPtrSet(cond, lhs.valueReg(), ScratchRegister, output);
153 }
154
visitCompareBAndBranch(LCompareBAndBranch * lir)155 void CodeGenerator::visitCompareBAndBranch(LCompareBAndBranch* lir) {
156 MCompare* mir = lir->cmpMir();
157 const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
158 const LAllocation* rhs = lir->rhs();
159
160 MOZ_ASSERT(mir->jsop() == JSOp::StrictEq || mir->jsop() == JSOp::StrictNe);
161
162 // Load boxed boolean in ScratchRegister.
163 if (rhs->isConstant()) {
164 masm.moveValue(rhs->toConstant()->toJSValue(),
165 ValueOperand(ScratchRegister));
166 } else {
167 masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), ScratchRegister);
168 }
169
170 // Perform the comparison.
171 Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
172 emitBranch(lhs.valueReg(), ScratchRegister, cond, lir->ifTrue(),
173 lir->ifFalse());
174 }
175
visitCompareBitwise(LCompareBitwise * lir)176 void CodeGenerator::visitCompareBitwise(LCompareBitwise* lir) {
177 MCompare* mir = lir->mir();
178 Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
179 const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput);
180 const ValueOperand rhs = ToValue(lir, LCompareBitwise::RhsInput);
181 const Register output = ToRegister(lir->output());
182
183 MOZ_ASSERT(IsEqualityOp(mir->jsop()));
184
185 masm.cmpPtrSet(cond, lhs.valueReg(), rhs.valueReg(), output);
186 }
187
visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch * lir)188 void CodeGenerator::visitCompareBitwiseAndBranch(
189 LCompareBitwiseAndBranch* lir) {
190 MCompare* mir = lir->cmpMir();
191 Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
192 const ValueOperand lhs = ToValue(lir, LCompareBitwiseAndBranch::LhsInput);
193 const ValueOperand rhs = ToValue(lir, LCompareBitwiseAndBranch::RhsInput);
194
195 MOZ_ASSERT(mir->jsop() == JSOp::Eq || mir->jsop() == JSOp::StrictEq ||
196 mir->jsop() == JSOp::Ne || mir->jsop() == JSOp::StrictNe);
197
198 emitBranch(lhs.valueReg(), rhs.valueReg(), cond, lir->ifTrue(),
199 lir->ifFalse());
200 }
201
visitCompareI64(LCompareI64 * lir)202 void CodeGenerator::visitCompareI64(LCompareI64* lir) {
203 MCompare* mir = lir->mir();
204 MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
205 mir->compareType() == MCompare::Compare_UInt64);
206
207 const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
208 const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
209 Register lhsReg = ToRegister64(lhs).reg;
210 Register output = ToRegister(lir->output());
211 Register rhsReg;
212 ScratchRegisterScope scratch(masm);
213
214 if (IsConstant(rhs)) {
215 rhsReg = scratch;
216 masm.ma_li(rhsReg, ImmWord(ToInt64(rhs)));
217 } else if (rhs.value().isGeneralReg()) {
218 rhsReg = ToRegister64(rhs).reg;
219 } else {
220 rhsReg = scratch;
221 masm.loadPtr(ToAddress(rhs.value()), rhsReg);
222 }
223
224 bool isSigned = mir->compareType() == MCompare::Compare_Int64;
225 masm.cmpPtrSet(JSOpToCondition(lir->jsop(), isSigned), lhsReg, rhsReg,
226 output);
227 }
228
visitCompareI64AndBranch(LCompareI64AndBranch * lir)229 void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
230 MCompare* mir = lir->cmpMir();
231 MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
232 mir->compareType() == MCompare::Compare_UInt64);
233
234 const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
235 const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
236 Register lhsReg = ToRegister64(lhs).reg;
237 Register rhsReg;
238 ScratchRegisterScope scratch(masm);
239
240 if (IsConstant(rhs)) {
241 rhsReg = scratch;
242 masm.ma_li(rhsReg, ImmWord(ToInt64(rhs)));
243 } else if (rhs.value().isGeneralReg()) {
244 rhsReg = ToRegister64(rhs).reg;
245 } else {
246 rhsReg = scratch;
247 masm.loadPtr(ToAddress(rhs.value()), rhsReg);
248 }
249
250 bool isSigned = mir->compareType() == MCompare::Compare_Int64;
251 Assembler::Condition cond = JSOpToCondition(lir->jsop(), isSigned);
252 emitBranch(lhsReg, rhsReg, cond, lir->ifTrue(), lir->ifFalse());
253 }
254
visitDivOrModI64(LDivOrModI64 * lir)255 void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
256 Register lhs = ToRegister(lir->lhs());
257 Register rhs = ToRegister(lir->rhs());
258 Register output = ToRegister(lir->output());
259
260 Label done;
261
262 // Handle divide by zero.
263 if (lir->canBeDivideByZero()) {
264 Label nonZero;
265 masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
266 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
267 masm.bind(&nonZero);
268 }
269
270 // Handle an integer overflow exception from INT64_MIN / -1.
271 if (lir->canBeNegativeOverflow()) {
272 Label notOverflow;
273 masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), ¬Overflow);
274 masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), ¬Overflow);
275 if (lir->mir()->isMod()) {
276 masm.ma_xor(output, output);
277 } else {
278 masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
279 }
280 masm.jump(&done);
281 masm.bind(¬Overflow);
282 }
283
284 #ifdef MIPSR6
285 if (lir->mir()->isMod()) {
286 masm.as_dmod(output, lhs, rhs);
287 } else {
288 masm.as_ddiv(output, lhs, rhs);
289 }
290 #else
291 masm.as_ddiv(lhs, rhs);
292 if (lir->mir()->isMod()) {
293 masm.as_mfhi(output);
294 } else {
295 masm.as_mflo(output);
296 }
297 #endif
298 masm.bind(&done);
299 }
300
visitUDivOrModI64(LUDivOrModI64 * lir)301 void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
302 Register lhs = ToRegister(lir->lhs());
303 Register rhs = ToRegister(lir->rhs());
304 Register output = ToRegister(lir->output());
305
306 Label done;
307
308 // Prevent divide by zero.
309 if (lir->canBeDivideByZero()) {
310 Label nonZero;
311 masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
312 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
313 masm.bind(&nonZero);
314 }
315
316 #ifdef MIPSR6
317 if (lir->mir()->isMod()) {
318 masm.as_dmodu(output, lhs, rhs);
319 } else {
320 masm.as_ddivu(output, lhs, rhs);
321 }
322 #else
323 masm.as_ddivu(lhs, rhs);
324 if (lir->mir()->isMod()) {
325 masm.as_mfhi(output);
326 } else {
327 masm.as_mflo(output);
328 }
329 #endif
330 masm.bind(&done);
331 }
332
333 template <typename T>
emitWasmLoadI64(T * lir)334 void CodeGeneratorMIPS64::emitWasmLoadI64(T* lir) {
335 const MWasmLoad* mir = lir->mir();
336
337 Register ptrScratch = InvalidReg;
338 if (!lir->ptrCopy()->isBogusTemp()) {
339 ptrScratch = ToRegister(lir->ptrCopy());
340 }
341
342 if (IsUnaligned(mir->access())) {
343 masm.wasmUnalignedLoadI64(mir->access(), HeapReg, ToRegister(lir->ptr()),
344 ptrScratch, ToOutRegister64(lir),
345 ToRegister(lir->getTemp(1)));
346 } else {
347 masm.wasmLoadI64(mir->access(), HeapReg, ToRegister(lir->ptr()), ptrScratch,
348 ToOutRegister64(lir));
349 }
350 }
351
visitWasmLoadI64(LWasmLoadI64 * lir)352 void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* lir) {
353 emitWasmLoadI64(lir);
354 }
355
visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64 * lir)356 void CodeGenerator::visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir) {
357 emitWasmLoadI64(lir);
358 }
359
360 template <typename T>
emitWasmStoreI64(T * lir)361 void CodeGeneratorMIPS64::emitWasmStoreI64(T* lir) {
362 const MWasmStore* mir = lir->mir();
363
364 Register ptrScratch = InvalidReg;
365 if (!lir->ptrCopy()->isBogusTemp()) {
366 ptrScratch = ToRegister(lir->ptrCopy());
367 }
368
369 if (IsUnaligned(mir->access())) {
370 masm.wasmUnalignedStoreI64(mir->access(), ToRegister64(lir->value()),
371 HeapReg, ToRegister(lir->ptr()), ptrScratch,
372 ToRegister(lir->getTemp(1)));
373 } else {
374 masm.wasmStoreI64(mir->access(), ToRegister64(lir->value()), HeapReg,
375 ToRegister(lir->ptr()), ptrScratch);
376 }
377 }
378
visitWasmStoreI64(LWasmStoreI64 * lir)379 void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) {
380 emitWasmStoreI64(lir);
381 }
382
visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64 * lir)383 void CodeGenerator::visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* lir) {
384 emitWasmStoreI64(lir);
385 }
386
visitWasmSelectI64(LWasmSelectI64 * lir)387 void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
388 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
389
390 Register cond = ToRegister(lir->condExpr());
391 const LInt64Allocation falseExpr = lir->falseExpr();
392
393 Register64 out = ToOutRegister64(lir);
394 MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
395 "true expr is reused for input");
396
397 if (falseExpr.value().isRegister()) {
398 masm.as_movz(out.reg, ToRegister(falseExpr.value()), cond);
399 } else {
400 Label done;
401 masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
402 masm.loadPtr(ToAddress(falseExpr.value()), out.reg);
403 masm.bind(&done);
404 }
405 }
406
visitWasmReinterpretFromI64(LWasmReinterpretFromI64 * lir)407 void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
408 MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
409 MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
410 masm.as_dmtc1(ToRegister(lir->input()), ToFloatRegister(lir->output()));
411 }
412
visitWasmReinterpretToI64(LWasmReinterpretToI64 * lir)413 void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
414 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
415 MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
416 masm.as_dmfc1(ToRegister(lir->output()), ToFloatRegister(lir->input()));
417 }
418
visitExtendInt32ToInt64(LExtendInt32ToInt64 * lir)419 void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
420 const LAllocation* input = lir->getOperand(0);
421 Register output = ToRegister(lir->output());
422
423 if (lir->mir()->isUnsigned()) {
424 masm.ma_dext(output, ToRegister(input), Imm32(0), Imm32(32));
425 } else {
426 masm.ma_sll(output, ToRegister(input), Imm32(0));
427 }
428 }
429
visitWrapInt64ToInt32(LWrapInt64ToInt32 * lir)430 void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
431 const LAllocation* input = lir->getOperand(0);
432 Register output = ToRegister(lir->output());
433
434 if (lir->mir()->bottomHalf()) {
435 if (input->isMemory()) {
436 masm.load32(ToAddress(input), output);
437 } else {
438 masm.ma_sll(output, ToRegister(input), Imm32(0));
439 }
440 } else {
441 MOZ_CRASH("Not implemented.");
442 }
443 }
444
visitSignExtendInt64(LSignExtendInt64 * lir)445 void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
446 Register64 input = ToRegister64(lir->getInt64Operand(0));
447 Register64 output = ToOutRegister64(lir);
448 switch (lir->mode()) {
449 case MSignExtendInt64::Byte:
450 masm.move32To64SignExtend(input.reg, output);
451 masm.move8SignExtend(output.reg, output.reg);
452 break;
453 case MSignExtendInt64::Half:
454 masm.move32To64SignExtend(input.reg, output);
455 masm.move16SignExtend(output.reg, output.reg);
456 break;
457 case MSignExtendInt64::Word:
458 masm.move32To64SignExtend(input.reg, output);
459 break;
460 }
461 }
462
visitClzI64(LClzI64 * lir)463 void CodeGenerator::visitClzI64(LClzI64* lir) {
464 Register64 input = ToRegister64(lir->getInt64Operand(0));
465 Register64 output = ToOutRegister64(lir);
466 masm.clz64(input, output.reg);
467 }
468
visitCtzI64(LCtzI64 * lir)469 void CodeGenerator::visitCtzI64(LCtzI64* lir) {
470 Register64 input = ToRegister64(lir->getInt64Operand(0));
471 Register64 output = ToOutRegister64(lir);
472 masm.ctz64(input, output.reg);
473 }
474
visitNotI64(LNotI64 * lir)475 void CodeGenerator::visitNotI64(LNotI64* lir) {
476 Register64 input = ToRegister64(lir->getInt64Operand(0));
477 Register output = ToRegister(lir->output());
478
479 masm.cmp64Set(Assembler::Equal, input.reg, Imm32(0), output);
480 }
481
visitWasmTruncateToInt64(LWasmTruncateToInt64 * lir)482 void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
483 FloatRegister input = ToFloatRegister(lir->input());
484 Register64 output = ToOutRegister64(lir);
485
486 MWasmTruncateToInt64* mir = lir->mir();
487 MIRType fromType = mir->input()->type();
488
489 MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
490
491 auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
492 addOutOfLineCode(ool, mir);
493
494 Label* oolEntry = ool->entry();
495 Label* oolRejoin = ool->rejoin();
496 bool isSaturating = mir->isSaturating();
497
498 if (fromType == MIRType::Double) {
499 if (mir->isUnsigned()) {
500 masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, oolEntry,
501 oolRejoin, InvalidFloatReg);
502 } else {
503 masm.wasmTruncateDoubleToInt64(input, output, isSaturating, oolEntry,
504 oolRejoin, InvalidFloatReg);
505 }
506 } else {
507 if (mir->isUnsigned()) {
508 masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating, oolEntry,
509 oolRejoin, InvalidFloatReg);
510 } else {
511 masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, oolEntry,
512 oolRejoin, InvalidFloatReg);
513 }
514 }
515 }
516
visitInt64ToFloatingPoint(LInt64ToFloatingPoint * lir)517 void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
518 Register64 input = ToRegister64(lir->getInt64Operand(0));
519 FloatRegister output = ToFloatRegister(lir->output());
520
521 MIRType outputType = lir->mir()->type();
522 MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
523
524 if (outputType == MIRType::Double) {
525 if (lir->mir()->isUnsigned()) {
526 masm.convertUInt64ToDouble(input, output, Register::Invalid());
527 } else {
528 masm.convertInt64ToDouble(input, output);
529 }
530 } else {
531 if (lir->mir()->isUnsigned()) {
532 masm.convertUInt64ToFloat32(input, output, Register::Invalid());
533 } else {
534 masm.convertInt64ToFloat32(input, output);
535 }
536 }
537 }
538
visitTestI64AndBranch(LTestI64AndBranch * lir)539 void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
540 Register64 input = ToRegister64(lir->getInt64Operand(0));
541 MBasicBlock* ifTrue = lir->ifTrue();
542 MBasicBlock* ifFalse = lir->ifFalse();
543
544 emitBranch(input.reg, Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
545 }
546