1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/codegen/assembler-inl.h"
6 #include "src/codegen/callable.h"
7 #include "src/codegen/macro-assembler.h"
8 #include "src/codegen/optimized-compilation-info.h"
9 #include "src/compiler/backend/code-generator-impl.h"
10 #include "src/compiler/backend/code-generator.h"
11 #include "src/compiler/backend/gap-resolver.h"
12 #include "src/compiler/node-matchers.h"
13 #include "src/compiler/osr.h"
14 #include "src/heap/heap-inl.h" // crbug.com/v8/8499
15 #include "src/wasm/wasm-code-manager.h"
16
17 namespace v8 {
18 namespace internal {
19 namespace compiler {
20
21 #define __ tasm()->
22
23 // TODO(plind): consider renaming these macros.
24 #define TRACE_MSG(msg) \
25 PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
26 __LINE__)
27
28 #define TRACE_UNIMPL() \
29 PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
30 __LINE__)
31
32 // Adds Mips-specific methods to convert InstructionOperands.
33 class MipsOperandConverter final : public InstructionOperandConverter {
34 public:
MipsOperandConverter(CodeGenerator * gen,Instruction * instr)35 MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
36 : InstructionOperandConverter(gen, instr) {}
37
OutputSingleRegister(size_t index=0)38 FloatRegister OutputSingleRegister(size_t index = 0) {
39 return ToSingleRegister(instr_->OutputAt(index));
40 }
41
InputSingleRegister(size_t index)42 FloatRegister InputSingleRegister(size_t index) {
43 return ToSingleRegister(instr_->InputAt(index));
44 }
45
ToSingleRegister(InstructionOperand * op)46 FloatRegister ToSingleRegister(InstructionOperand* op) {
47 // Single (Float) and Double register namespace is same on MIPS,
48 // both are typedefs of FPURegister.
49 return ToDoubleRegister(op);
50 }
51
InputOrZeroRegister(size_t index)52 Register InputOrZeroRegister(size_t index) {
53 if (instr_->InputAt(index)->IsImmediate()) {
54 DCHECK_EQ(0, InputInt32(index));
55 return zero_reg;
56 }
57 return InputRegister(index);
58 }
59
InputOrZeroDoubleRegister(size_t index)60 DoubleRegister InputOrZeroDoubleRegister(size_t index) {
61 if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
62
63 return InputDoubleRegister(index);
64 }
65
InputOrZeroSingleRegister(size_t index)66 DoubleRegister InputOrZeroSingleRegister(size_t index) {
67 if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
68
69 return InputSingleRegister(index);
70 }
71
InputImmediate(size_t index)72 Operand InputImmediate(size_t index) {
73 Constant constant = ToConstant(instr_->InputAt(index));
74 switch (constant.type()) {
75 case Constant::kInt32:
76 return Operand(constant.ToInt32());
77 case Constant::kFloat32:
78 return Operand::EmbeddedNumber(constant.ToFloat32());
79 case Constant::kFloat64:
80 return Operand::EmbeddedNumber(constant.ToFloat64().value());
81 case Constant::kInt64:
82 case Constant::kExternalReference:
83 case Constant::kCompressedHeapObject:
84 case Constant::kHeapObject:
85 // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
86 // maybe not done on arm due to const pool ??
87 break;
88 case Constant::kDelayedStringConstant:
89 return Operand::EmbeddedStringConstant(
90 constant.ToDelayedStringConstant());
91 case Constant::kRpoNumber:
92 UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
93 break;
94 }
95 UNREACHABLE();
96 }
97
InputOperand(size_t index)98 Operand InputOperand(size_t index) {
99 InstructionOperand* op = instr_->InputAt(index);
100 if (op->IsRegister()) {
101 return Operand(ToRegister(op));
102 }
103 return InputImmediate(index);
104 }
105
MemoryOperand(size_t * first_index)106 MemOperand MemoryOperand(size_t* first_index) {
107 const size_t index = *first_index;
108 switch (AddressingModeField::decode(instr_->opcode())) {
109 case kMode_None:
110 break;
111 case kMode_MRI:
112 *first_index += 2;
113 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
114 case kMode_MRR:
115 // TODO(plind): r6 address mode, to be implemented ...
116 UNREACHABLE();
117 }
118 UNREACHABLE();
119 }
120
MemoryOperand(size_t index=0)121 MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
122
ToMemOperand(InstructionOperand * op) const123 MemOperand ToMemOperand(InstructionOperand* op) const {
124 DCHECK_NOT_NULL(op);
125 DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
126 return SlotToMemOperand(AllocatedOperand::cast(op)->index());
127 }
128
SlotToMemOperand(int slot) const129 MemOperand SlotToMemOperand(int slot) const {
130 FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
131 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
132 }
133 };
134
HasRegisterInput(Instruction * instr,size_t index)135 static inline bool HasRegisterInput(Instruction* instr, size_t index) {
136 return instr->InputAt(index)->IsRegister();
137 }
138
139 namespace {
140
141 class OutOfLineRecordWrite final : public OutOfLineCode {
142 public:
OutOfLineRecordWrite(CodeGenerator * gen,Register object,Register index,Register value,Register scratch0,Register scratch1,RecordWriteMode mode,StubCallMode stub_mode)143 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
144 Register value, Register scratch0, Register scratch1,
145 RecordWriteMode mode, StubCallMode stub_mode)
146 : OutOfLineCode(gen),
147 object_(object),
148 index_(index),
149 value_(value),
150 scratch0_(scratch0),
151 scratch1_(scratch1),
152 mode_(mode),
153 stub_mode_(stub_mode),
154 must_save_lr_(!gen->frame_access_state()->has_frame()),
155 zone_(gen->zone()) {}
156
Generate()157 void Generate() final {
158 if (mode_ > RecordWriteMode::kValueIsPointer) {
159 __ JumpIfSmi(value_, exit());
160 }
161 __ CheckPageFlag(value_, scratch0_,
162 MemoryChunk::kPointersToHereAreInterestingMask, eq,
163 exit());
164 __ Addu(scratch1_, object_, index_);
165 RememberedSetAction const remembered_set_action =
166 mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
167 : OMIT_REMEMBERED_SET;
168 SaveFPRegsMode const save_fp_mode =
169 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
170 if (must_save_lr_) {
171 // We need to save and restore ra if the frame was elided.
172 __ Push(ra);
173 }
174
175 if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
176 __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
177 } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
178 // A direct call to a wasm runtime stub defined in this module.
179 // Just encode the stub index. This will be patched when the code
180 // is added to the native module and copied into wasm code space.
181 __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
182 save_fp_mode, wasm::WasmCode::kRecordWrite);
183 } else {
184 __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
185 save_fp_mode);
186 }
187 if (must_save_lr_) {
188 __ Pop(ra);
189 }
190 }
191
192 private:
193 Register const object_;
194 Register const index_;
195 Register const value_;
196 Register const scratch0_;
197 Register const scratch1_;
198 RecordWriteMode const mode_;
199 StubCallMode const stub_mode_;
200 bool must_save_lr_;
201 Zone* zone_;
202 };
203
204 #define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \
205 class ool_name final : public OutOfLineCode { \
206 public: \
207 ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
208 : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
209 \
210 void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \
211 \
212 private: \
213 T const dst_; \
214 T const src1_; \
215 T const src2_; \
216 }
217
218 CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
219 CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
220 CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, DoubleRegister);
221 CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, DoubleRegister);
222
223 #undef CREATE_OOL_CLASS
224
FlagsConditionToConditionCmp(FlagsCondition condition)225 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
226 switch (condition) {
227 case kEqual:
228 return eq;
229 case kNotEqual:
230 return ne;
231 case kSignedLessThan:
232 return lt;
233 case kSignedGreaterThanOrEqual:
234 return ge;
235 case kSignedLessThanOrEqual:
236 return le;
237 case kSignedGreaterThan:
238 return gt;
239 case kUnsignedLessThan:
240 return lo;
241 case kUnsignedGreaterThanOrEqual:
242 return hs;
243 case kUnsignedLessThanOrEqual:
244 return ls;
245 case kUnsignedGreaterThan:
246 return hi;
247 case kUnorderedEqual:
248 case kUnorderedNotEqual:
249 break;
250 default:
251 break;
252 }
253 UNREACHABLE();
254 }
255
FlagsConditionToConditionTst(FlagsCondition condition)256 Condition FlagsConditionToConditionTst(FlagsCondition condition) {
257 switch (condition) {
258 case kNotEqual:
259 return ne;
260 case kEqual:
261 return eq;
262 default:
263 break;
264 }
265 UNREACHABLE();
266 }
267
FlagsConditionToConditionCmpFPU(bool * predicate,FlagsCondition condition)268 FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
269 FlagsCondition condition) {
270 switch (condition) {
271 case kEqual:
272 *predicate = true;
273 return EQ;
274 case kNotEqual:
275 *predicate = false;
276 return EQ;
277 case kUnsignedLessThan:
278 *predicate = true;
279 return OLT;
280 case kUnsignedGreaterThanOrEqual:
281 *predicate = false;
282 return OLT;
283 case kUnsignedLessThanOrEqual:
284 *predicate = true;
285 return OLE;
286 case kUnsignedGreaterThan:
287 *predicate = false;
288 return OLE;
289 case kUnorderedEqual:
290 case kUnorderedNotEqual:
291 *predicate = true;
292 break;
293 default:
294 *predicate = true;
295 break;
296 }
297 UNREACHABLE();
298 }
299
300 #define UNSUPPORTED_COND(opcode, condition) \
301 StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
302 << "\""; \
303 UNIMPLEMENTED();
304
EmitWordLoadPoisoningIfNeeded(CodeGenerator * codegen,InstructionCode opcode,Instruction * instr,MipsOperandConverter const & i)305 void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
306 InstructionCode opcode, Instruction* instr,
307 MipsOperandConverter const& i) {
308 const MemoryAccessMode access_mode =
309 static_cast<MemoryAccessMode>(MiscField::decode(opcode));
310 if (access_mode == kMemoryAccessPoisoned) {
311 Register value = i.OutputRegister();
312 codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
313 }
314 }
315
316 } // namespace
317
318 #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
319 do { \
320 __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
321 __ sync(); \
322 } while (0)
323
324 #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
325 do { \
326 __ sync(); \
327 __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
328 __ sync(); \
329 } while (0)
330
331 #define ASSEMBLE_ATOMIC_BINOP(bin_instr) \
332 do { \
333 Label binop; \
334 __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
335 __ sync(); \
336 __ bind(&binop); \
337 __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
338 __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
339 Operand(i.InputRegister(2))); \
340 __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
341 __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
342 __ sync(); \
343 } while (0)
344
345 #define ASSEMBLE_ATOMIC64_LOGIC_BINOP(bin_instr, external) \
346 do { \
347 if (IsMipsArchVariant(kMips32r6)) { \
348 Label binop; \
349 Register oldval_low = \
350 instr->OutputCount() >= 1 ? i.OutputRegister(0) : i.TempRegister(1); \
351 Register oldval_high = \
352 instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(2); \
353 __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
354 __ sync(); \
355 __ bind(&binop); \
356 __ llx(oldval_high, MemOperand(i.TempRegister(0), 4)); \
357 __ ll(oldval_low, MemOperand(i.TempRegister(0), 0)); \
358 __ bin_instr(i.TempRegister(1), i.TempRegister(2), oldval_low, \
359 oldval_high, i.InputRegister(2), i.InputRegister(3)); \
360 __ scx(i.TempRegister(2), MemOperand(i.TempRegister(0), 4)); \
361 __ sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
362 __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
363 __ sync(); \
364 } else { \
365 FrameScope scope(tasm(), StackFrame::MANUAL); \
366 __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); \
367 __ PushCallerSaved(kDontSaveFPRegs, v0, v1); \
368 __ PrepareCallCFunction(3, 0, kScratchReg); \
369 __ CallCFunction(ExternalReference::external(), 3, 0); \
370 __ PopCallerSaved(kDontSaveFPRegs, v0, v1); \
371 } \
372 } while (0)
373
374 #define ASSEMBLE_ATOMIC64_ARITH_BINOP(bin_instr, external) \
375 do { \
376 if (IsMipsArchVariant(kMips32r6)) { \
377 Label binop; \
378 Register oldval_low = \
379 instr->OutputCount() >= 1 ? i.OutputRegister(0) : i.TempRegister(1); \
380 Register oldval_high = \
381 instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(2); \
382 __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
383 __ sync(); \
384 __ bind(&binop); \
385 __ llx(oldval_high, MemOperand(i.TempRegister(0), 4)); \
386 __ ll(oldval_low, MemOperand(i.TempRegister(0), 0)); \
387 __ bin_instr(i.TempRegister(1), i.TempRegister(2), oldval_low, \
388 oldval_high, i.InputRegister(2), i.InputRegister(3), \
389 kScratchReg, kScratchReg2); \
390 __ scx(i.TempRegister(2), MemOperand(i.TempRegister(0), 4)); \
391 __ sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
392 __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
393 __ sync(); \
394 } else { \
395 FrameScope scope(tasm(), StackFrame::MANUAL); \
396 __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); \
397 __ PushCallerSaved(kDontSaveFPRegs, v0, v1); \
398 __ PrepareCallCFunction(3, 0, kScratchReg); \
399 __ CallCFunction(ExternalReference::external(), 3, 0); \
400 __ PopCallerSaved(kDontSaveFPRegs, v0, v1); \
401 } \
402 } while (0)
403
404 #define ASSEMBLE_ATOMIC_BINOP_EXT(sign_extend, size, bin_instr) \
405 do { \
406 Label binop; \
407 __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
408 __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \
409 __ Subu(i.TempRegister(0), i.TempRegister(0), Operand(i.TempRegister(3))); \
410 __ sll(i.TempRegister(3), i.TempRegister(3), 3); \
411 __ sync(); \
412 __ bind(&binop); \
413 __ Ll(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
414 __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \
415 size, sign_extend); \
416 __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \
417 Operand(i.InputRegister(2))); \
418 __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \
419 size); \
420 __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
421 __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
422 __ sync(); \
423 } while (0)
424
425 #define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER() \
426 do { \
427 Label exchange; \
428 __ sync(); \
429 __ bind(&exchange); \
430 __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
431 __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
432 __ mov(i.TempRegister(1), i.InputRegister(2)); \
433 __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
434 __ BranchShort(&exchange, eq, i.TempRegister(1), Operand(zero_reg)); \
435 __ sync(); \
436 } while (0)
437
438 #define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(sign_extend, size) \
439 do { \
440 Label exchange; \
441 __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
442 __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
443 __ Subu(i.TempRegister(0), i.TempRegister(0), Operand(i.TempRegister(1))); \
444 __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
445 __ sync(); \
446 __ bind(&exchange); \
447 __ Ll(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
448 __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
449 size, sign_extend); \
450 __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \
451 size); \
452 __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
453 __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \
454 __ sync(); \
455 } while (0)
456
457 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER() \
458 do { \
459 Label compareExchange; \
460 Label exit; \
461 __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
462 __ sync(); \
463 __ bind(&compareExchange); \
464 __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
465 __ BranchShort(&exit, ne, i.InputRegister(2), \
466 Operand(i.OutputRegister(0))); \
467 __ mov(i.TempRegister(2), i.InputRegister(3)); \
468 __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
469 __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
470 Operand(zero_reg)); \
471 __ bind(&exit); \
472 __ sync(); \
473 } while (0)
474
475 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(sign_extend, size) \
476 do { \
477 Label compareExchange; \
478 Label exit; \
479 __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
480 __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
481 __ Subu(i.TempRegister(0), i.TempRegister(0), Operand(i.TempRegister(1))); \
482 __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
483 __ sync(); \
484 __ bind(&compareExchange); \
485 __ Ll(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
486 __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
487 size, sign_extend); \
488 __ ExtractBits(i.InputRegister(2), i.InputRegister(2), i.TempRegister(1), \
489 size, sign_extend); \
490 __ BranchShort(&exit, ne, i.InputRegister(2), \
491 Operand(i.OutputRegister(0))); \
492 __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
493 size); \
494 __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
495 __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
496 Operand(zero_reg)); \
497 __ bind(&exit); \
498 __ sync(); \
499 } while (0)
500
501 #define ASSEMBLE_IEEE754_BINOP(name) \
502 do { \
503 FrameScope scope(tasm(), StackFrame::MANUAL); \
504 __ PrepareCallCFunction(0, 2, kScratchReg); \
505 __ MovToFloatParameters(i.InputDoubleRegister(0), \
506 i.InputDoubleRegister(1)); \
507 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
508 /* Move the result in the double result register. */ \
509 __ MovFromFloatResult(i.OutputDoubleRegister()); \
510 } while (0)
511
512 #define ASSEMBLE_IEEE754_UNOP(name) \
513 do { \
514 FrameScope scope(tasm(), StackFrame::MANUAL); \
515 __ PrepareCallCFunction(0, 1, kScratchReg); \
516 __ MovToFloatParameter(i.InputDoubleRegister(0)); \
517 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
518 /* Move the result in the double result register. */ \
519 __ MovFromFloatResult(i.OutputDoubleRegister()); \
520 } while (0)
521
522 #define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op) \
523 do { \
524 __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
525 i.InputSimd128Register(1)); \
526 } while (0)
527
AssembleDeconstructFrame()528 void CodeGenerator::AssembleDeconstructFrame() {
529 __ mov(sp, fp);
530 __ Pop(ra, fp);
531 }
532
AssemblePrepareTailCall()533 void CodeGenerator::AssemblePrepareTailCall() {
534 if (frame_access_state()->has_frame()) {
535 __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
536 __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
537 }
538 frame_access_state()->SetFrameAccessToSP();
539 }
540
AssemblePopArgumentsAdaptorFrame(Register args_reg,Register scratch1,Register scratch2,Register scratch3)541 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
542 Register scratch1,
543 Register scratch2,
544 Register scratch3) {
545 DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
546 Label done;
547
548 // Check if current frame is an arguments adaptor frame.
549 __ lw(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
550 __ Branch(&done, ne, scratch1,
551 Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
552
553 // Load arguments count from current arguments adaptor frame (note, it
554 // does not include receiver).
555 Register caller_args_count_reg = scratch1;
556 __ lw(caller_args_count_reg,
557 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
558 __ SmiUntag(caller_args_count_reg);
559
560 __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3);
561 __ bind(&done);
562 }
563
564 namespace {
565
AdjustStackPointerForTailCall(TurboAssembler * tasm,FrameAccessState * state,int new_slot_above_sp,bool allow_shrinkage=true)566 void AdjustStackPointerForTailCall(TurboAssembler* tasm,
567 FrameAccessState* state,
568 int new_slot_above_sp,
569 bool allow_shrinkage = true) {
570 int current_sp_offset = state->GetSPToFPSlotCount() +
571 StandardFrameConstants::kFixedSlotCountAboveFp;
572 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
573 if (stack_slot_delta > 0) {
574 tasm->Subu(sp, sp, stack_slot_delta * kSystemPointerSize);
575 state->IncreaseSPDelta(stack_slot_delta);
576 } else if (allow_shrinkage && stack_slot_delta < 0) {
577 tasm->Addu(sp, sp, -stack_slot_delta * kSystemPointerSize);
578 state->IncreaseSPDelta(stack_slot_delta);
579 }
580 }
581
582 } // namespace
583
AssembleTailCallBeforeGap(Instruction * instr,int first_unused_stack_slot)584 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
585 int first_unused_stack_slot) {
586 AdjustStackPointerForTailCall(tasm(), frame_access_state(),
587 first_unused_stack_slot, false);
588 }
589
AssembleTailCallAfterGap(Instruction * instr,int first_unused_stack_slot)590 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
591 int first_unused_stack_slot) {
592 AdjustStackPointerForTailCall(tasm(), frame_access_state(),
593 first_unused_stack_slot);
594 }
595
596 // Check that {kJavaScriptCallCodeStartRegister} is correct.
AssembleCodeStartRegisterCheck()597 void CodeGenerator::AssembleCodeStartRegisterCheck() {
598 __ ComputeCodeStartAddress(kScratchReg);
599 __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
600 kJavaScriptCallCodeStartRegister, Operand(kScratchReg));
601 }
602
603 // Check if the code object is marked for deoptimization. If it is, then it
604 // jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
605 // to:
606 // 1. read from memory the word that contains that bit, which can be found in
607 // the flags in the referenced {CodeDataContainer} object;
608 // 2. test kMarkedForDeoptimizationBit in those flags; and
609 // 3. if it is not zero then it jumps to the builtin.
BailoutIfDeoptimized()610 void CodeGenerator::BailoutIfDeoptimized() {
611 int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
612 __ lw(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
613 __ lw(kScratchReg,
614 FieldMemOperand(kScratchReg,
615 CodeDataContainer::kKindSpecificFlagsOffset));
616 __ And(kScratchReg, kScratchReg,
617 Operand(1 << Code::kMarkedForDeoptimizationBit));
618 __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
619 RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
620 }
621
GenerateSpeculationPoisonFromCodeStartRegister()622 void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
623 // Calculate a mask which has all bits set in the normal case, but has all
624 // bits cleared if we are speculatively executing the wrong PC.
625 // difference = (current - expected) | (expected - current)
626 // poison = ~(difference >> (kBitsPerSystemPointer - 1))
627 __ ComputeCodeStartAddress(kScratchReg);
628 __ Move(kSpeculationPoisonRegister, kScratchReg);
629 __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
630 kJavaScriptCallCodeStartRegister);
631 __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
632 kScratchReg);
633 __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
634 kJavaScriptCallCodeStartRegister);
635 __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
636 kBitsPerSystemPointer - 1);
637 __ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
638 kSpeculationPoisonRegister);
639 }
640
AssembleRegisterArgumentPoisoning()641 void CodeGenerator::AssembleRegisterArgumentPoisoning() {
642 __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
643 __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
644 __ And(sp, sp, kSpeculationPoisonRegister);
645 }
646
647 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction(Instruction * instr)648 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
649 Instruction* instr) {
650 MipsOperandConverter i(this, instr);
651 InstructionCode opcode = instr->opcode();
652 ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
653 switch (arch_opcode) {
654 case kArchCallCodeObject: {
655 if (instr->InputAt(0)->IsImmediate()) {
656 __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
657 } else {
658 Register reg = i.InputRegister(0);
659 DCHECK_IMPLIES(
660 HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
661 reg == kJavaScriptCallCodeStartRegister);
662 __ Call(reg, reg, Code::kHeaderSize - kHeapObjectTag);
663 }
664 RecordCallPosition(instr);
665 frame_access_state()->ClearSPDelta();
666 break;
667 }
668 case kArchCallBuiltinPointer: {
669 DCHECK(!instr->InputAt(0)->IsImmediate());
670 Register builtin_index = i.InputRegister(0);
671 __ CallBuiltinByIndex(builtin_index);
672 RecordCallPosition(instr);
673 frame_access_state()->ClearSPDelta();
674 break;
675 }
676 case kArchCallWasmFunction: {
677 if (instr->InputAt(0)->IsImmediate()) {
678 Constant constant = i.ToConstant(instr->InputAt(0));
679 Address wasm_code = static_cast<Address>(constant.ToInt32());
680 __ Call(wasm_code, constant.rmode());
681 } else {
682 __ Call(i.InputRegister(0));
683 }
684 RecordCallPosition(instr);
685 frame_access_state()->ClearSPDelta();
686 break;
687 }
688 case kArchTailCallCodeObjectFromJSFunction:
689 case kArchTailCallCodeObject: {
690 if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
691 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
692 i.TempRegister(0), i.TempRegister(1),
693 i.TempRegister(2));
694 }
695 if (instr->InputAt(0)->IsImmediate()) {
696 __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
697 } else {
698 Register reg = i.InputRegister(0);
699 DCHECK_IMPLIES(
700 HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
701 reg == kJavaScriptCallCodeStartRegister);
702 __ Addu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
703 __ Jump(reg);
704 }
705 frame_access_state()->ClearSPDelta();
706 frame_access_state()->SetFrameAccessToDefault();
707 break;
708 }
709 case kArchTailCallWasm: {
710 if (instr->InputAt(0)->IsImmediate()) {
711 Constant constant = i.ToConstant(instr->InputAt(0));
712 Address wasm_code = static_cast<Address>(constant.ToInt32());
713 __ Jump(wasm_code, constant.rmode());
714 } else {
715 __ Jump(i.InputRegister(0));
716 }
717 frame_access_state()->ClearSPDelta();
718 frame_access_state()->SetFrameAccessToDefault();
719 break;
720 }
721 case kArchTailCallAddress: {
722 CHECK(!instr->InputAt(0)->IsImmediate());
723 Register reg = i.InputRegister(0);
724 DCHECK_IMPLIES(
725 HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
726 reg == kJavaScriptCallCodeStartRegister);
727 __ Jump(reg);
728 frame_access_state()->ClearSPDelta();
729 frame_access_state()->SetFrameAccessToDefault();
730 break;
731 }
732 case kArchCallJSFunction: {
733 Register func = i.InputRegister(0);
734 if (FLAG_debug_code) {
735 // Check the function's context matches the context argument.
736 __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
737 __ Assert(eq, AbortReason::kWrongFunctionContext, cp,
738 Operand(kScratchReg));
739 }
740 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
741 __ lw(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
742 __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
743 __ Call(a2);
744 RecordCallPosition(instr);
745 frame_access_state()->ClearSPDelta();
746 frame_access_state()->SetFrameAccessToDefault();
747 break;
748 }
749 case kArchPrepareCallCFunction: {
750 int const num_parameters = MiscField::decode(instr->opcode());
751 __ PrepareCallCFunction(num_parameters, kScratchReg);
752 // Frame alignment requires using FP-relative frame addressing.
753 frame_access_state()->SetFrameAccessToFP();
754 break;
755 }
756 case kArchSaveCallerRegisters: {
757 fp_mode_ =
758 static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
759 DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
760 // kReturnRegister0 should have been saved before entering the stub.
761 int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
762 DCHECK(IsAligned(bytes, kSystemPointerSize));
763 DCHECK_EQ(0, frame_access_state()->sp_delta());
764 frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
765 DCHECK(!caller_registers_saved_);
766 caller_registers_saved_ = true;
767 break;
768 }
769 case kArchRestoreCallerRegisters: {
770 DCHECK(fp_mode_ ==
771 static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
772 DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
773 // Don't overwrite the returned value.
774 int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
775 frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
776 DCHECK_EQ(0, frame_access_state()->sp_delta());
777 DCHECK(caller_registers_saved_);
778 caller_registers_saved_ = false;
779 break;
780 }
781 case kArchPrepareTailCall:
782 AssemblePrepareTailCall();
783 break;
784 case kArchCallCFunction: {
785 int const num_parameters = MiscField::decode(instr->opcode());
786 Label start_call;
787 bool isWasmCapiFunction =
788 linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
789 // from start_call to return address.
790 int offset = __ root_array_available() ? 68 : 80;
791 #if V8_HOST_ARCH_MIPS
792 if (__ emit_debug_code()) {
793 offset += 16;
794 }
795 #endif
796 if (isWasmCapiFunction) {
797 // Put the return address in a stack slot.
798 __ mov(kScratchReg, ra);
799 __ bind(&start_call);
800 __ nal();
801 __ nop();
802 __ Addu(ra, ra, offset - 8); // 8 = nop + nal
803 __ sw(ra, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
804 __ mov(ra, kScratchReg);
805 }
806 if (instr->InputAt(0)->IsImmediate()) {
807 ExternalReference ref = i.InputExternalReference(0);
808 __ CallCFunction(ref, num_parameters);
809 } else {
810 Register func = i.InputRegister(0);
811 __ CallCFunction(func, num_parameters);
812 }
813 if (isWasmCapiFunction) {
814 CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
815 RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
816 }
817
818 frame_access_state()->SetFrameAccessToDefault();
819 // Ideally, we should decrement SP delta to match the change of stack
820 // pointer in CallCFunction. However, for certain architectures (e.g.
821 // ARM), there may be more strict alignment requirement, causing old SP
822 // to be saved on the stack. In those cases, we can not calculate the SP
823 // delta statically.
824 frame_access_state()->ClearSPDelta();
825 if (caller_registers_saved_) {
826 // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
827 // Here, we assume the sequence to be:
828 // kArchSaveCallerRegisters;
829 // kArchCallCFunction;
830 // kArchRestoreCallerRegisters;
831 int bytes =
832 __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
833 frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
834 }
835 break;
836 }
837 case kArchJmp:
838 AssembleArchJump(i.InputRpo(0));
839 break;
840 case kArchBinarySearchSwitch:
841 AssembleArchBinarySearchSwitch(instr);
842 break;
843 case kArchTableSwitch:
844 AssembleArchTableSwitch(instr);
845 break;
846 case kArchAbortCSAAssert:
847 DCHECK(i.InputRegister(0) == a0);
848 {
849 // We don't actually want to generate a pile of code for this, so just
850 // claim there is a stack frame, without generating one.
851 FrameScope scope(tasm(), StackFrame::NONE);
852 __ Call(
853 isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
854 RelocInfo::CODE_TARGET);
855 }
856 __ stop();
857 break;
858 case kArchDebugBreak:
859 __ DebugBreak();
860 break;
861 case kArchComment:
862 __ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
863 break;
864 case kArchNop:
865 case kArchThrowTerminator:
866 // don't emit code for nops.
867 break;
868 case kArchDeoptimize: {
869 DeoptimizationExit* exit =
870 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
871 CodeGenResult result = AssembleDeoptimizerCall(exit);
872 if (result != kSuccess) return result;
873 break;
874 }
875 case kArchRet:
876 AssembleReturn(instr->InputAt(0));
877 break;
878 case kArchStackPointerGreaterThan:
879 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
880 break;
881 case kArchStackCheckOffset:
882 __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset()));
883 break;
884 case kArchFramePointer:
885 __ mov(i.OutputRegister(), fp);
886 break;
887 case kArchParentFramePointer:
888 if (frame_access_state()->has_frame()) {
889 __ lw(i.OutputRegister(), MemOperand(fp, 0));
890 } else {
891 __ mov(i.OutputRegister(), fp);
892 }
893 break;
894 case kArchTruncateDoubleToI:
895 __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
896 i.InputDoubleRegister(0), DetermineStubCallMode());
897 break;
898 case kArchStoreWithWriteBarrier: {
899 RecordWriteMode mode =
900 static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
901 Register object = i.InputRegister(0);
902 Register index = i.InputRegister(1);
903 Register value = i.InputRegister(2);
904 Register scratch0 = i.TempRegister(0);
905 Register scratch1 = i.TempRegister(1);
906 auto ool = new (zone())
907 OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1,
908 mode, DetermineStubCallMode());
909 __ Addu(kScratchReg, object, index);
910 __ sw(value, MemOperand(kScratchReg));
911 __ CheckPageFlag(object, scratch0,
912 MemoryChunk::kPointersFromHereAreInterestingMask, ne,
913 ool->entry());
914 __ bind(ool->exit());
915 break;
916 }
917 case kArchStackSlot: {
918 FrameOffset offset =
919 frame_access_state()->GetFrameOffset(i.InputInt32(0));
920 Register base_reg = offset.from_stack_pointer() ? sp : fp;
921 __ Addu(i.OutputRegister(), base_reg, Operand(offset.offset()));
922 int alignment = i.InputInt32(1);
923 DCHECK(alignment == 0 || alignment == 4 || alignment == 8 ||
924 alignment == 16);
925 if (FLAG_debug_code && alignment > 0) {
926 // Verify that the output_register is properly aligned
927 __ And(kScratchReg, i.OutputRegister(),
928 Operand(kSystemPointerSize - 1));
929 __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg,
930 Operand(zero_reg));
931 }
932
933 if (alignment == 2 * kSystemPointerSize) {
934 Label done;
935 __ Addu(kScratchReg, base_reg, Operand(offset.offset()));
936 __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
937 __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
938 __ Addu(i.OutputRegister(), i.OutputRegister(), kSystemPointerSize);
939 __ bind(&done);
940 } else if (alignment > 2 * kSystemPointerSize) {
941 Label done;
942 __ Addu(kScratchReg, base_reg, Operand(offset.offset()));
943 __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
944 __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
945 __ li(kScratchReg2, alignment);
946 __ Subu(kScratchReg2, kScratchReg2, Operand(kScratchReg));
947 __ Addu(i.OutputRegister(), i.OutputRegister(), kScratchReg2);
948 __ bind(&done);
949 }
950 break;
951 }
952 case kArchWordPoisonOnSpeculation:
953 __ And(i.OutputRegister(), i.InputRegister(0),
954 kSpeculationPoisonRegister);
955 break;
956 case kIeee754Float64Acos:
957 ASSEMBLE_IEEE754_UNOP(acos);
958 break;
959 case kIeee754Float64Acosh:
960 ASSEMBLE_IEEE754_UNOP(acosh);
961 break;
962 case kIeee754Float64Asin:
963 ASSEMBLE_IEEE754_UNOP(asin);
964 break;
965 case kIeee754Float64Asinh:
966 ASSEMBLE_IEEE754_UNOP(asinh);
967 break;
968 case kIeee754Float64Atan:
969 ASSEMBLE_IEEE754_UNOP(atan);
970 break;
971 case kIeee754Float64Atanh:
972 ASSEMBLE_IEEE754_UNOP(atanh);
973 break;
974 case kIeee754Float64Atan2:
975 ASSEMBLE_IEEE754_BINOP(atan2);
976 break;
977 case kIeee754Float64Cos:
978 ASSEMBLE_IEEE754_UNOP(cos);
979 break;
980 case kIeee754Float64Cosh:
981 ASSEMBLE_IEEE754_UNOP(cosh);
982 break;
983 case kIeee754Float64Cbrt:
984 ASSEMBLE_IEEE754_UNOP(cbrt);
985 break;
986 case kIeee754Float64Exp:
987 ASSEMBLE_IEEE754_UNOP(exp);
988 break;
989 case kIeee754Float64Expm1:
990 ASSEMBLE_IEEE754_UNOP(expm1);
991 break;
992 case kIeee754Float64Log:
993 ASSEMBLE_IEEE754_UNOP(log);
994 break;
995 case kIeee754Float64Log1p:
996 ASSEMBLE_IEEE754_UNOP(log1p);
997 break;
998 case kIeee754Float64Log10:
999 ASSEMBLE_IEEE754_UNOP(log10);
1000 break;
1001 case kIeee754Float64Log2:
1002 ASSEMBLE_IEEE754_UNOP(log2);
1003 break;
1004 case kIeee754Float64Pow:
1005 ASSEMBLE_IEEE754_BINOP(pow);
1006 break;
1007 case kIeee754Float64Sin:
1008 ASSEMBLE_IEEE754_UNOP(sin);
1009 break;
1010 case kIeee754Float64Sinh:
1011 ASSEMBLE_IEEE754_UNOP(sinh);
1012 break;
1013 case kIeee754Float64Tan:
1014 ASSEMBLE_IEEE754_UNOP(tan);
1015 break;
1016 case kIeee754Float64Tanh:
1017 ASSEMBLE_IEEE754_UNOP(tanh);
1018 break;
1019 case kMipsAdd:
1020 __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1021 break;
1022 case kMipsAddOvf:
1023 __ AddOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
1024 kScratchReg);
1025 break;
1026 case kMipsSub:
1027 __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1028 break;
1029 case kMipsSubOvf:
1030 __ SubOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
1031 kScratchReg);
1032 break;
1033 case kMipsMul:
1034 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1035 break;
1036 case kMipsMulOvf:
1037 __ MulOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
1038 kScratchReg);
1039 break;
1040 case kMipsMulHigh:
1041 __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1042 break;
1043 case kMipsMulHighU:
1044 __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1045 break;
1046 case kMipsDiv:
1047 __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1048 if (IsMipsArchVariant(kMips32r6)) {
1049 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1050 } else {
1051 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
1052 }
1053 break;
1054 case kMipsDivU:
1055 __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1056 if (IsMipsArchVariant(kMips32r6)) {
1057 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1058 } else {
1059 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
1060 }
1061 break;
1062 case kMipsMod:
1063 __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1064 break;
1065 case kMipsModU:
1066 __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1067 break;
1068 case kMipsAnd:
1069 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1070 break;
1071 case kMipsOr:
1072 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1073 break;
1074 case kMipsNor:
1075 if (instr->InputAt(1)->IsRegister()) {
1076 __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1077 } else {
1078 DCHECK_EQ(0, i.InputOperand(1).immediate());
1079 __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
1080 }
1081 break;
1082 case kMipsXor:
1083 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1084 break;
1085 case kMipsClz:
1086 __ Clz(i.OutputRegister(), i.InputRegister(0));
1087 break;
1088 case kMipsCtz: {
1089 Register src = i.InputRegister(0);
1090 Register dst = i.OutputRegister();
1091 __ Ctz(dst, src);
1092 } break;
1093 case kMipsPopcnt: {
1094 Register src = i.InputRegister(0);
1095 Register dst = i.OutputRegister();
1096 __ Popcnt(dst, src);
1097 } break;
1098 case kMipsShl:
1099 if (instr->InputAt(1)->IsRegister()) {
1100 __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1101 } else {
1102 int32_t imm = i.InputOperand(1).immediate();
1103 __ sll(i.OutputRegister(), i.InputRegister(0), imm);
1104 }
1105 break;
1106 case kMipsShr:
1107 if (instr->InputAt(1)->IsRegister()) {
1108 __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1109 } else {
1110 int32_t imm = i.InputOperand(1).immediate();
1111 __ srl(i.OutputRegister(), i.InputRegister(0), imm);
1112 }
1113 break;
1114 case kMipsSar:
1115 if (instr->InputAt(1)->IsRegister()) {
1116 __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1117 } else {
1118 int32_t imm = i.InputOperand(1).immediate();
1119 __ sra(i.OutputRegister(), i.InputRegister(0), imm);
1120 }
1121 break;
1122 case kMipsShlPair: {
1123 Register second_output =
1124 instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1125 if (instr->InputAt(2)->IsRegister()) {
1126 __ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1127 i.InputRegister(1), i.InputRegister(2), kScratchReg,
1128 kScratchReg2);
1129 } else {
1130 uint32_t imm = i.InputOperand(2).immediate();
1131 __ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1132 i.InputRegister(1), imm, kScratchReg);
1133 }
1134 } break;
1135 case kMipsShrPair: {
1136 Register second_output =
1137 instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1138 if (instr->InputAt(2)->IsRegister()) {
1139 __ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1140 i.InputRegister(1), i.InputRegister(2), kScratchReg,
1141 kScratchReg2);
1142 } else {
1143 uint32_t imm = i.InputOperand(2).immediate();
1144 __ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1145 i.InputRegister(1), imm, kScratchReg);
1146 }
1147 } break;
1148 case kMipsSarPair: {
1149 Register second_output =
1150 instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1151 if (instr->InputAt(2)->IsRegister()) {
1152 __ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1153 i.InputRegister(1), i.InputRegister(2), kScratchReg,
1154 kScratchReg2);
1155 } else {
1156 uint32_t imm = i.InputOperand(2).immediate();
1157 __ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1158 i.InputRegister(1), imm, kScratchReg);
1159 }
1160 } break;
1161 case kMipsExt:
1162 __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1163 i.InputInt8(2));
1164 break;
1165 case kMipsIns:
1166 if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
1167 __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
1168 } else {
1169 __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1170 i.InputInt8(2));
1171 }
1172 break;
1173 case kMipsRor:
1174 __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1175 break;
1176 case kMipsTst:
1177 __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
1178 break;
1179 case kMipsCmp:
1180 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
1181 break;
1182 case kMipsMov:
1183 // TODO(plind): Should we combine mov/li like this, or use separate instr?
1184 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
1185 if (HasRegisterInput(instr, 0)) {
1186 __ mov(i.OutputRegister(), i.InputRegister(0));
1187 } else {
1188 __ li(i.OutputRegister(), i.InputOperand(0));
1189 }
1190 break;
1191 case kMipsLsa:
1192 DCHECK(instr->InputAt(2)->IsImmediate());
1193 __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1194 i.InputInt8(2));
1195 break;
1196 case kMipsCmpS: {
1197 FPURegister left = i.InputOrZeroSingleRegister(0);
1198 FPURegister right = i.InputOrZeroSingleRegister(1);
1199 bool predicate;
1200 FPUCondition cc =
1201 FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
1202
1203 if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
1204 !__ IsDoubleZeroRegSet()) {
1205 __ Move(kDoubleRegZero, 0.0);
1206 }
1207
1208 __ CompareF32(cc, left, right);
1209 } break;
1210 case kMipsAddS:
1211 // TODO(plind): add special case: combine mult & add.
1212 __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1213 i.InputDoubleRegister(1));
1214 break;
1215 case kMipsSubS:
1216 __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1217 i.InputDoubleRegister(1));
1218 break;
1219 case kMipsMulS:
1220 // TODO(plind): add special case: right op is -1.0, see arm port.
1221 __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1222 i.InputDoubleRegister(1));
1223 break;
1224 case kMipsDivS:
1225 __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1226 i.InputDoubleRegister(1));
1227 break;
1228 case kMipsModS: {
1229 // TODO(bmeurer): We should really get rid of this special instruction,
1230 // and generate a CallAddress instruction instead.
1231 FrameScope scope(tasm(), StackFrame::MANUAL);
1232 __ PrepareCallCFunction(0, 2, kScratchReg);
1233 __ MovToFloatParameters(i.InputDoubleRegister(0),
1234 i.InputDoubleRegister(1));
1235 // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
1236 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
1237 // Move the result in the double result register.
1238 __ MovFromFloatResult(i.OutputSingleRegister());
1239 break;
1240 }
1241 case kMipsAbsS:
1242 __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1243 break;
1244 case kMipsSqrtS: {
1245 __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1246 break;
1247 }
1248 case kMipsMaxS:
1249 __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1250 i.InputDoubleRegister(1));
1251 break;
1252 case kMipsMinS:
1253 __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1254 i.InputDoubleRegister(1));
1255 break;
1256 case kMipsCmpD: {
1257 FPURegister left = i.InputOrZeroDoubleRegister(0);
1258 FPURegister right = i.InputOrZeroDoubleRegister(1);
1259 bool predicate;
1260 FPUCondition cc =
1261 FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
1262 if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
1263 !__ IsDoubleZeroRegSet()) {
1264 __ Move(kDoubleRegZero, 0.0);
1265 }
1266 __ CompareF64(cc, left, right);
1267 } break;
1268 case kMipsAddPair:
1269 __ AddPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
1270 i.InputRegister(1), i.InputRegister(2), i.InputRegister(3),
1271 kScratchReg, kScratchReg2);
1272 break;
1273 case kMipsSubPair:
1274 __ SubPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
1275 i.InputRegister(1), i.InputRegister(2), i.InputRegister(3),
1276 kScratchReg, kScratchReg2);
1277 break;
1278 case kMipsMulPair: {
1279 __ MulPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
1280 i.InputRegister(1), i.InputRegister(2), i.InputRegister(3),
1281 kScratchReg, kScratchReg2);
1282 } break;
1283 case kMipsAddD:
1284 // TODO(plind): add special case: combine mult & add.
1285 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1286 i.InputDoubleRegister(1));
1287 break;
1288 case kMipsSubD:
1289 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1290 i.InputDoubleRegister(1));
1291 break;
1292 case kMipsMaddS:
1293 __ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
1294 i.InputFloatRegister(1), i.InputFloatRegister(2),
1295 kScratchDoubleReg);
1296 break;
1297 case kMipsMaddD:
1298 __ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1299 i.InputDoubleRegister(1), i.InputDoubleRegister(2),
1300 kScratchDoubleReg);
1301 break;
1302 case kMipsMsubS:
1303 __ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
1304 i.InputFloatRegister(1), i.InputFloatRegister(2),
1305 kScratchDoubleReg);
1306 break;
1307 case kMipsMsubD:
1308 __ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1309 i.InputDoubleRegister(1), i.InputDoubleRegister(2),
1310 kScratchDoubleReg);
1311 break;
1312 case kMipsMulD:
1313 // TODO(plind): add special case: right op is -1.0, see arm port.
1314 __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1315 i.InputDoubleRegister(1));
1316 break;
1317 case kMipsDivD:
1318 __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1319 i.InputDoubleRegister(1));
1320 break;
1321 case kMipsModD: {
1322 // TODO(bmeurer): We should really get rid of this special instruction,
1323 // and generate a CallAddress instruction instead.
1324 FrameScope scope(tasm(), StackFrame::MANUAL);
1325 __ PrepareCallCFunction(0, 2, kScratchReg);
1326 __ MovToFloatParameters(i.InputDoubleRegister(0),
1327 i.InputDoubleRegister(1));
1328 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
1329 // Move the result in the double result register.
1330 __ MovFromFloatResult(i.OutputDoubleRegister());
1331 break;
1332 }
1333 case kMipsAbsD:
1334 __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1335 break;
1336 case kMipsNegS:
1337 __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1338 break;
1339 case kMipsNegD:
1340 __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1341 break;
1342 case kMipsSqrtD: {
1343 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1344 break;
1345 }
1346 case kMipsMaxD:
1347 __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1348 i.InputDoubleRegister(1));
1349 break;
1350 case kMipsMinD:
1351 __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1352 i.InputDoubleRegister(1));
1353 break;
1354 case kMipsFloat64RoundDown: {
1355 __ Floor_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1356 break;
1357 }
1358 case kMipsFloat32RoundDown: {
1359 __ Floor_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1360 break;
1361 }
1362 case kMipsFloat64RoundTruncate: {
1363 __ Trunc_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1364 break;
1365 }
1366 case kMipsFloat32RoundTruncate: {
1367 __ Trunc_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1368 break;
1369 }
1370 case kMipsFloat64RoundUp: {
1371 __ Ceil_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1372 break;
1373 }
1374 case kMipsFloat32RoundUp: {
1375 __ Ceil_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1376 break;
1377 }
1378 case kMipsFloat64RoundTiesEven: {
1379 __ Round_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1380 break;
1381 }
1382 case kMipsFloat32RoundTiesEven: {
1383 __ Round_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1384 break;
1385 }
1386 case kMipsFloat32Max: {
1387 FPURegister dst = i.OutputSingleRegister();
1388 FPURegister src1 = i.InputSingleRegister(0);
1389 FPURegister src2 = i.InputSingleRegister(1);
1390 auto ool = new (zone()) OutOfLineFloat32Max(this, dst, src1, src2);
1391 __ Float32Max(dst, src1, src2, ool->entry());
1392 __ bind(ool->exit());
1393 break;
1394 }
1395 case kMipsFloat64Max: {
1396 DoubleRegister dst = i.OutputDoubleRegister();
1397 DoubleRegister src1 = i.InputDoubleRegister(0);
1398 DoubleRegister src2 = i.InputDoubleRegister(1);
1399 auto ool = new (zone()) OutOfLineFloat64Max(this, dst, src1, src2);
1400 __ Float64Max(dst, src1, src2, ool->entry());
1401 __ bind(ool->exit());
1402 break;
1403 }
1404 case kMipsFloat32Min: {
1405 FPURegister dst = i.OutputSingleRegister();
1406 FPURegister src1 = i.InputSingleRegister(0);
1407 FPURegister src2 = i.InputSingleRegister(1);
1408 auto ool = new (zone()) OutOfLineFloat32Min(this, dst, src1, src2);
1409 __ Float32Min(dst, src1, src2, ool->entry());
1410 __ bind(ool->exit());
1411 break;
1412 }
1413 case kMipsFloat64Min: {
1414 DoubleRegister dst = i.OutputDoubleRegister();
1415 DoubleRegister src1 = i.InputDoubleRegister(0);
1416 DoubleRegister src2 = i.InputDoubleRegister(1);
1417 auto ool = new (zone()) OutOfLineFloat64Min(this, dst, src1, src2);
1418 __ Float64Min(dst, src1, src2, ool->entry());
1419 __ bind(ool->exit());
1420 break;
1421 }
1422 case kMipsCvtSD: {
1423 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
1424 break;
1425 }
1426 case kMipsCvtDS: {
1427 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
1428 break;
1429 }
1430 case kMipsCvtDW: {
1431 FPURegister scratch = kScratchDoubleReg;
1432 __ mtc1(i.InputRegister(0), scratch);
1433 __ cvt_d_w(i.OutputDoubleRegister(), scratch);
1434 break;
1435 }
1436 case kMipsCvtSW: {
1437 FPURegister scratch = kScratchDoubleReg;
1438 __ mtc1(i.InputRegister(0), scratch);
1439 __ cvt_s_w(i.OutputDoubleRegister(), scratch);
1440 break;
1441 }
1442 case kMipsCvtSUw: {
1443 FPURegister scratch = kScratchDoubleReg;
1444 __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
1445 __ cvt_s_d(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1446 break;
1447 }
1448 case kMipsCvtDUw: {
1449 FPURegister scratch = kScratchDoubleReg;
1450 __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
1451 break;
1452 }
1453 case kMipsFloorWD: {
1454 FPURegister scratch = kScratchDoubleReg;
1455 __ Floor_w_d(scratch, i.InputDoubleRegister(0));
1456 __ mfc1(i.OutputRegister(), scratch);
1457 break;
1458 }
1459 case kMipsCeilWD: {
1460 FPURegister scratch = kScratchDoubleReg;
1461 __ Ceil_w_d(scratch, i.InputDoubleRegister(0));
1462 __ mfc1(i.OutputRegister(), scratch);
1463 break;
1464 }
1465 case kMipsRoundWD: {
1466 FPURegister scratch = kScratchDoubleReg;
1467 __ Round_w_d(scratch, i.InputDoubleRegister(0));
1468 __ mfc1(i.OutputRegister(), scratch);
1469 break;
1470 }
1471 case kMipsTruncWD: {
1472 FPURegister scratch = kScratchDoubleReg;
1473 // Other arches use round to zero here, so we follow.
1474 __ Trunc_w_d(scratch, i.InputDoubleRegister(0));
1475 __ mfc1(i.OutputRegister(), scratch);
1476 break;
1477 }
1478 case kMipsFloorWS: {
1479 FPURegister scratch = kScratchDoubleReg;
1480 __ floor_w_s(scratch, i.InputDoubleRegister(0));
1481 __ mfc1(i.OutputRegister(), scratch);
1482 break;
1483 }
1484 case kMipsCeilWS: {
1485 FPURegister scratch = kScratchDoubleReg;
1486 __ ceil_w_s(scratch, i.InputDoubleRegister(0));
1487 __ mfc1(i.OutputRegister(), scratch);
1488 break;
1489 }
1490 case kMipsRoundWS: {
1491 FPURegister scratch = kScratchDoubleReg;
1492 __ round_w_s(scratch, i.InputDoubleRegister(0));
1493 __ mfc1(i.OutputRegister(), scratch);
1494 break;
1495 }
1496 case kMipsTruncWS: {
1497 FPURegister scratch = kScratchDoubleReg;
1498 __ trunc_w_s(scratch, i.InputDoubleRegister(0));
1499 __ mfc1(i.OutputRegister(), scratch);
1500 // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
1501 // because INT32_MIN allows easier out-of-bounds detection.
1502 __ Addu(kScratchReg, i.OutputRegister(), 1);
1503 __ Slt(kScratchReg2, kScratchReg, i.OutputRegister());
1504 __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2);
1505 break;
1506 }
1507 case kMipsTruncUwD: {
1508 FPURegister scratch = kScratchDoubleReg;
1509 __ Trunc_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
1510 break;
1511 }
1512 case kMipsTruncUwS: {
1513 FPURegister scratch = kScratchDoubleReg;
1514 __ Trunc_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
1515 // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
1516 // because 0 allows easier out-of-bounds detection.
1517 __ Addu(kScratchReg, i.OutputRegister(), 1);
1518 __ Movz(i.OutputRegister(), zero_reg, kScratchReg);
1519 break;
1520 }
1521 case kMipsFloat64ExtractLowWord32:
1522 __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
1523 break;
1524 case kMipsFloat64ExtractHighWord32:
1525 __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0));
1526 break;
1527 case kMipsFloat64InsertLowWord32:
1528 __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
1529 break;
1530 case kMipsFloat64InsertHighWord32:
1531 __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
1532 break;
1533 case kMipsFloat64SilenceNaN:
1534 __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1535 break;
1536
1537 // ... more basic instructions ...
1538 case kMipsSeb:
1539 __ Seb(i.OutputRegister(), i.InputRegister(0));
1540 break;
1541 case kMipsSeh:
1542 __ Seh(i.OutputRegister(), i.InputRegister(0));
1543 break;
1544 case kMipsLbu:
1545 __ lbu(i.OutputRegister(), i.MemoryOperand());
1546 EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1547 break;
1548 case kMipsLb:
1549 __ lb(i.OutputRegister(), i.MemoryOperand());
1550 EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1551 break;
1552 case kMipsSb:
1553 __ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
1554 break;
1555 case kMipsLhu:
1556 __ lhu(i.OutputRegister(), i.MemoryOperand());
1557 EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1558 break;
1559 case kMipsUlhu:
1560 __ Ulhu(i.OutputRegister(), i.MemoryOperand());
1561 EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1562 break;
1563 case kMipsLh:
1564 __ lh(i.OutputRegister(), i.MemoryOperand());
1565 EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1566 break;
1567 case kMipsUlh:
1568 __ Ulh(i.OutputRegister(), i.MemoryOperand());
1569 EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1570 break;
1571 case kMipsSh:
1572 __ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
1573 break;
1574 case kMipsUsh:
1575 __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg);
1576 break;
1577 case kMipsLw:
1578 __ lw(i.OutputRegister(), i.MemoryOperand());
1579 EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1580 break;
1581 case kMipsUlw:
1582 __ Ulw(i.OutputRegister(), i.MemoryOperand());
1583 EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1584 break;
1585 case kMipsSw:
1586 __ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
1587 break;
1588 case kMipsUsw:
1589 __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand());
1590 break;
1591 case kMipsLwc1: {
1592 __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
1593 break;
1594 }
1595 case kMipsUlwc1: {
1596 __ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
1597 break;
1598 }
1599 case kMipsSwc1: {
1600 size_t index = 0;
1601 MemOperand operand = i.MemoryOperand(&index);
1602 FPURegister ft = i.InputOrZeroSingleRegister(index);
1603 if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1604 __ Move(kDoubleRegZero, 0.0);
1605 }
1606 __ swc1(ft, operand);
1607 break;
1608 }
1609 case kMipsUswc1: {
1610 size_t index = 0;
1611 MemOperand operand = i.MemoryOperand(&index);
1612 FPURegister ft = i.InputOrZeroSingleRegister(index);
1613 if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1614 __ Move(kDoubleRegZero, 0.0);
1615 }
1616 __ Uswc1(ft, operand, kScratchReg);
1617 break;
1618 }
1619 case kMipsLdc1:
1620 __ Ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
1621 break;
1622 case kMipsUldc1:
1623 __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
1624 break;
1625 case kMipsSdc1: {
1626 FPURegister ft = i.InputOrZeroDoubleRegister(2);
1627 if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1628 __ Move(kDoubleRegZero, 0.0);
1629 }
1630 __ Sdc1(ft, i.MemoryOperand());
1631 break;
1632 }
1633 case kMipsUsdc1: {
1634 FPURegister ft = i.InputOrZeroDoubleRegister(2);
1635 if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1636 __ Move(kDoubleRegZero, 0.0);
1637 }
1638 __ Usdc1(ft, i.MemoryOperand(), kScratchReg);
1639 break;
1640 }
1641 case kMipsSync: {
1642 __ sync();
1643 break;
1644 }
1645 case kMipsPush:
1646 if (instr->InputAt(0)->IsFPRegister()) {
1647 LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1648 switch (op->representation()) {
1649 case MachineRepresentation::kFloat32:
1650 __ swc1(i.InputFloatRegister(0), MemOperand(sp, -kFloatSize));
1651 __ Subu(sp, sp, Operand(kFloatSize));
1652 frame_access_state()->IncreaseSPDelta(kFloatSize /
1653 kSystemPointerSize);
1654 break;
1655 case MachineRepresentation::kFloat64:
1656 __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
1657 __ Subu(sp, sp, Operand(kDoubleSize));
1658 frame_access_state()->IncreaseSPDelta(kDoubleSize /
1659 kSystemPointerSize);
1660 break;
1661 default: {
1662 UNREACHABLE();
1663 break;
1664 }
1665 }
1666 } else {
1667 __ Push(i.InputRegister(0));
1668 frame_access_state()->IncreaseSPDelta(1);
1669 }
1670 break;
1671 case kMipsPeek: {
1672 // The incoming value is 0-based, but we need a 1-based value.
1673 int reverse_slot = i.InputInt32(0) + 1;
1674 int offset =
1675 FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
1676 if (instr->OutputAt(0)->IsFPRegister()) {
1677 LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
1678 if (op->representation() == MachineRepresentation::kFloat64) {
1679 __ Ldc1(i.OutputDoubleRegister(), MemOperand(fp, offset));
1680 } else {
1681 DCHECK_EQ(op->representation(), MachineRepresentation::kFloat32);
1682 __ lwc1(i.OutputSingleRegister(0), MemOperand(fp, offset));
1683 }
1684 } else {
1685 __ lw(i.OutputRegister(0), MemOperand(fp, offset));
1686 }
1687 break;
1688 }
1689 case kMipsStackClaim: {
1690 __ Subu(sp, sp, Operand(i.InputInt32(0)));
1691 frame_access_state()->IncreaseSPDelta(i.InputInt32(0) /
1692 kSystemPointerSize);
1693 break;
1694 }
1695 case kMipsStoreToStackSlot: {
1696 if (instr->InputAt(0)->IsFPRegister()) {
1697 LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1698 if (op->representation() == MachineRepresentation::kFloat64) {
1699 __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
1700 } else if (op->representation() == MachineRepresentation::kFloat32) {
1701 __ swc1(i.InputSingleRegister(0), MemOperand(sp, i.InputInt32(1)));
1702 } else {
1703 DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
1704 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1705 __ st_b(i.InputSimd128Register(0), MemOperand(sp, i.InputInt32(1)));
1706 }
1707 } else {
1708 __ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
1709 }
1710 break;
1711 }
1712 case kMipsByteSwap32: {
1713 __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
1714 break;
1715 }
1716 case kMipsS8x16LoadSplat: {
1717 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1718 __ lb(kScratchReg, i.MemoryOperand());
1719 __ fill_b(i.OutputSimd128Register(), kScratchReg);
1720 break;
1721 }
1722 case kMipsS16x8LoadSplat: {
1723 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1724 __ lh(kScratchReg, i.MemoryOperand());
1725 __ fill_h(i.OutputSimd128Register(), kScratchReg);
1726 break;
1727 }
1728 case kMipsS32x4LoadSplat: {
1729 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1730 __ Lw(kScratchReg, i.MemoryOperand());
1731 __ fill_w(i.OutputSimd128Register(), kScratchReg);
1732 break;
1733 }
1734 case kMipsS64x2LoadSplat: {
1735 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1736 Simd128Register dst = i.OutputSimd128Register();
1737 MemOperand memLow = i.MemoryOperand();
1738 MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4);
1739 __ Lw(kScratchReg, memLow);
1740 __ fill_w(dst, kScratchReg);
1741 __ Lw(kScratchReg, memHigh);
1742 __ fill_w(kSimd128ScratchReg, kScratchReg);
1743 __ ilvr_w(dst, kSimd128ScratchReg, dst);
1744 break;
1745 }
1746 case kMipsI16x8Load8x8S: {
1747 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1748 Simd128Register dst = i.OutputSimd128Register();
1749 MemOperand memLow = i.MemoryOperand();
1750 MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4);
1751 __ Lw(kScratchReg, memLow);
1752 __ fill_w(dst, kScratchReg);
1753 __ Lw(kScratchReg, memHigh);
1754 __ fill_w(kSimd128ScratchReg, kScratchReg);
1755 __ ilvr_w(dst, kSimd128ScratchReg, dst);
1756 __ clti_s_b(kSimd128ScratchReg, dst, 0);
1757 __ ilvr_b(dst, kSimd128ScratchReg, dst);
1758 break;
1759 }
1760 case kMipsI16x8Load8x8U: {
1761 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1762 Simd128Register dst = i.OutputSimd128Register();
1763 MemOperand memLow = i.MemoryOperand();
1764 MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4);
1765 __ Lw(kScratchReg, memLow);
1766 __ fill_w(dst, kScratchReg);
1767 __ Lw(kScratchReg, memHigh);
1768 __ fill_w(kSimd128ScratchReg, kScratchReg);
1769 __ ilvr_w(dst, kSimd128ScratchReg, dst);
1770 __ ilvr_b(dst, kSimd128RegZero, dst);
1771 break;
1772 }
1773 case kMipsI32x4Load16x4S: {
1774 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1775 Simd128Register dst = i.OutputSimd128Register();
1776 MemOperand memLow = i.MemoryOperand();
1777 MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4);
1778 __ Lw(kScratchReg, memLow);
1779 __ fill_w(dst, kScratchReg);
1780 __ Lw(kScratchReg, memHigh);
1781 __ fill_w(kSimd128ScratchReg, kScratchReg);
1782 __ ilvr_w(dst, kSimd128ScratchReg, dst);
1783 __ clti_s_h(kSimd128ScratchReg, dst, 0);
1784 __ ilvr_h(dst, kSimd128ScratchReg, dst);
1785 break;
1786 }
1787 case kMipsI32x4Load16x4U: {
1788 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1789 Simd128Register dst = i.OutputSimd128Register();
1790 MemOperand memLow = i.MemoryOperand();
1791 MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4);
1792 __ Lw(kScratchReg, memLow);
1793 __ fill_w(dst, kScratchReg);
1794 __ Lw(kScratchReg, memHigh);
1795 __ fill_w(kSimd128ScratchReg, kScratchReg);
1796 __ ilvr_w(dst, kSimd128ScratchReg, dst);
1797 __ ilvr_h(dst, kSimd128RegZero, dst);
1798 break;
1799 }
1800 case kMipsI64x2Load32x2S: {
1801 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1802 Simd128Register dst = i.OutputSimd128Register();
1803 MemOperand memLow = i.MemoryOperand();
1804 MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4);
1805 __ Lw(kScratchReg, memLow);
1806 __ fill_w(dst, kScratchReg);
1807 __ Lw(kScratchReg, memHigh);
1808 __ fill_w(kSimd128ScratchReg, kScratchReg);
1809 __ ilvr_w(dst, kSimd128ScratchReg, dst);
1810 __ clti_s_w(kSimd128ScratchReg, dst, 0);
1811 __ ilvr_w(dst, kSimd128ScratchReg, dst);
1812 break;
1813 }
1814 case kMipsI64x2Load32x2U: {
1815 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1816 Simd128Register dst = i.OutputSimd128Register();
1817 MemOperand memLow = i.MemoryOperand();
1818 MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4);
1819 __ Lw(kScratchReg, memLow);
1820 __ fill_w(dst, kScratchReg);
1821 __ Lw(kScratchReg, memHigh);
1822 __ fill_w(kSimd128ScratchReg, kScratchReg);
1823 __ ilvr_w(dst, kSimd128ScratchReg, dst);
1824 __ ilvr_w(dst, kSimd128RegZero, dst);
1825 break;
1826 }
1827 case kWord32AtomicLoadInt8:
1828 ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
1829 break;
1830 case kWord32AtomicLoadUint8:
1831 ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
1832 break;
1833 case kWord32AtomicLoadInt16:
1834 ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
1835 break;
1836 case kWord32AtomicLoadUint16:
1837 ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
1838 break;
1839 case kWord32AtomicLoadWord32:
1840 ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
1841 break;
1842 case kWord32AtomicStoreWord8:
1843 ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
1844 break;
1845 case kWord32AtomicStoreWord16:
1846 ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
1847 break;
1848 case kWord32AtomicStoreWord32:
1849 ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
1850 break;
1851 case kWord32AtomicExchangeInt8:
1852 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 8);
1853 break;
1854 case kWord32AtomicExchangeUint8:
1855 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 8);
1856 break;
1857 case kWord32AtomicExchangeInt16:
1858 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 16);
1859 break;
1860 case kWord32AtomicExchangeUint16:
1861 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 16);
1862 break;
1863 case kWord32AtomicExchangeWord32:
1864 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER();
1865 break;
1866 case kWord32AtomicCompareExchangeInt8:
1867 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 8);
1868 break;
1869 case kWord32AtomicCompareExchangeUint8:
1870 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 8);
1871 break;
1872 case kWord32AtomicCompareExchangeInt16:
1873 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 16);
1874 break;
1875 case kWord32AtomicCompareExchangeUint16:
1876 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 16);
1877 break;
1878 case kWord32AtomicCompareExchangeWord32:
1879 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER();
1880 break;
1881 #define ATOMIC_BINOP_CASE(op, inst) \
1882 case kWord32Atomic##op##Int8: \
1883 ASSEMBLE_ATOMIC_BINOP_EXT(true, 8, inst); \
1884 break; \
1885 case kWord32Atomic##op##Uint8: \
1886 ASSEMBLE_ATOMIC_BINOP_EXT(false, 8, inst); \
1887 break; \
1888 case kWord32Atomic##op##Int16: \
1889 ASSEMBLE_ATOMIC_BINOP_EXT(true, 16, inst); \
1890 break; \
1891 case kWord32Atomic##op##Uint16: \
1892 ASSEMBLE_ATOMIC_BINOP_EXT(false, 16, inst); \
1893 break; \
1894 case kWord32Atomic##op##Word32: \
1895 ASSEMBLE_ATOMIC_BINOP(inst); \
1896 break;
1897 ATOMIC_BINOP_CASE(Add, Addu)
1898 ATOMIC_BINOP_CASE(Sub, Subu)
1899 ATOMIC_BINOP_CASE(And, And)
1900 ATOMIC_BINOP_CASE(Or, Or)
1901 ATOMIC_BINOP_CASE(Xor, Xor)
1902 #undef ATOMIC_BINOP_CASE
1903 case kMipsWord32AtomicPairLoad: {
1904 if (IsMipsArchVariant(kMips32r6)) {
1905 if (instr->OutputCount() > 0) {
1906 Register second_output = instr->OutputCount() == 2
1907 ? i.OutputRegister(1)
1908 : i.TempRegister(1);
1909 __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
1910 __ llx(second_output, MemOperand(a0, 4));
1911 __ ll(i.OutputRegister(0), MemOperand(a0, 0));
1912 __ sync();
1913 }
1914 } else {
1915 FrameScope scope(tasm(), StackFrame::MANUAL);
1916 __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
1917 __ PushCallerSaved(kDontSaveFPRegs, v0, v1);
1918 __ PrepareCallCFunction(1, 0, kScratchReg);
1919 __ CallCFunction(ExternalReference::atomic_pair_load_function(), 1, 0);
1920 __ PopCallerSaved(kDontSaveFPRegs, v0, v1);
1921 }
1922 break;
1923 }
1924 case kMipsWord32AtomicPairStore: {
1925 if (IsMipsArchVariant(kMips32r6)) {
1926 Label store;
1927 __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
1928 __ sync();
1929 __ bind(&store);
1930 __ llx(i.TempRegister(2), MemOperand(a0, 4));
1931 __ ll(i.TempRegister(1), MemOperand(a0, 0));
1932 __ Move(i.TempRegister(1), i.InputRegister(2));
1933 __ scx(i.InputRegister(3), MemOperand(a0, 4));
1934 __ sc(i.TempRegister(1), MemOperand(a0, 0));
1935 __ BranchShort(&store, eq, i.TempRegister(1), Operand(zero_reg));
1936 __ sync();
1937 } else {
1938 FrameScope scope(tasm(), StackFrame::MANUAL);
1939 __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
1940 __ PushCallerSaved(kDontSaveFPRegs);
1941 __ PrepareCallCFunction(3, 0, kScratchReg);
1942 __ CallCFunction(ExternalReference::atomic_pair_store_function(), 3, 0);
1943 __ PopCallerSaved(kDontSaveFPRegs);
1944 }
1945 break;
1946 }
1947 #define ATOMIC64_BINOP_ARITH_CASE(op, instr, external) \
1948 case kMipsWord32AtomicPair##op: \
1949 ASSEMBLE_ATOMIC64_ARITH_BINOP(instr, external); \
1950 break;
1951 ATOMIC64_BINOP_ARITH_CASE(Add, AddPair, atomic_pair_add_function)
1952 ATOMIC64_BINOP_ARITH_CASE(Sub, SubPair, atomic_pair_sub_function)
1953 #undef ATOMIC64_BINOP_ARITH_CASE
1954 #define ATOMIC64_BINOP_LOGIC_CASE(op, instr, external) \
1955 case kMipsWord32AtomicPair##op: \
1956 ASSEMBLE_ATOMIC64_LOGIC_BINOP(instr, external); \
1957 break;
1958 ATOMIC64_BINOP_LOGIC_CASE(And, AndPair, atomic_pair_and_function)
1959 ATOMIC64_BINOP_LOGIC_CASE(Or, OrPair, atomic_pair_or_function)
1960 ATOMIC64_BINOP_LOGIC_CASE(Xor, XorPair, atomic_pair_xor_function)
1961 #undef ATOMIC64_BINOP_LOGIC_CASE
1962 case kMipsWord32AtomicPairExchange:
1963 if (IsMipsArchVariant(kMips32r6)) {
1964 Label binop;
1965 Register oldval_low =
1966 instr->OutputCount() >= 1 ? i.OutputRegister(0) : i.TempRegister(1);
1967 Register oldval_high =
1968 instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(2);
1969 __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
1970 __ sync();
1971 __ bind(&binop);
1972 __ llx(oldval_high, MemOperand(i.TempRegister(0), 4));
1973 __ ll(oldval_low, MemOperand(i.TempRegister(0), 0));
1974 __ Move(i.TempRegister(1), i.InputRegister(2));
1975 __ scx(i.InputRegister(3), MemOperand(i.TempRegister(0), 4));
1976 __ sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0));
1977 __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg));
1978 __ sync();
1979 } else {
1980 FrameScope scope(tasm(), StackFrame::MANUAL);
1981 __ PushCallerSaved(kDontSaveFPRegs, v0, v1);
1982 __ PrepareCallCFunction(3, 0, kScratchReg);
1983 __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
1984 __ CallCFunction(ExternalReference::atomic_pair_exchange_function(), 3,
1985 0);
1986 __ PopCallerSaved(kDontSaveFPRegs, v0, v1);
1987 }
1988 break;
1989 case kMipsWord32AtomicPairCompareExchange: {
1990 if (IsMipsArchVariant(kMips32r6)) {
1991 Label compareExchange, exit;
1992 Register oldval_low =
1993 instr->OutputCount() >= 1 ? i.OutputRegister(0) : kScratchReg;
1994 Register oldval_high =
1995 instr->OutputCount() >= 2 ? i.OutputRegister(1) : kScratchReg2;
1996 __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
1997 __ sync();
1998 __ bind(&compareExchange);
1999 __ llx(oldval_high, MemOperand(i.TempRegister(0), 4));
2000 __ ll(oldval_low, MemOperand(i.TempRegister(0), 0));
2001 __ BranchShort(&exit, ne, i.InputRegister(2), Operand(oldval_low));
2002 __ BranchShort(&exit, ne, i.InputRegister(3), Operand(oldval_high));
2003 __ mov(kScratchReg, i.InputRegister(4));
2004 __ scx(i.InputRegister(5), MemOperand(i.TempRegister(0), 4));
2005 __ sc(kScratchReg, MemOperand(i.TempRegister(0), 0));
2006 __ BranchShort(&compareExchange, eq, kScratchReg, Operand(zero_reg));
2007 __ bind(&exit);
2008 __ sync();
2009 } else {
2010 FrameScope scope(tasm(), StackFrame::MANUAL);
2011 __ PushCallerSaved(kDontSaveFPRegs, v0, v1);
2012 __ PrepareCallCFunction(5, 0, kScratchReg);
2013 __ addu(a0, i.InputRegister(0), i.InputRegister(1));
2014 __ sw(i.InputRegister(5), MemOperand(sp, 16));
2015 __ CallCFunction(
2016 ExternalReference::atomic_pair_compare_exchange_function(), 5, 0);
2017 __ PopCallerSaved(kDontSaveFPRegs, v0, v1);
2018 }
2019 break;
2020 }
2021 case kMipsS128Zero: {
2022 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2023 __ xor_v(i.OutputSimd128Register(), i.OutputSimd128Register(),
2024 i.OutputSimd128Register());
2025 break;
2026 }
2027 case kMipsI32x4Splat: {
2028 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2029 __ fill_w(i.OutputSimd128Register(), i.InputRegister(0));
2030 break;
2031 }
2032 case kMipsI32x4ExtractLane: {
2033 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2034 __ copy_s_w(i.OutputRegister(), i.InputSimd128Register(0),
2035 i.InputInt8(1));
2036 break;
2037 }
2038 case kMipsI32x4ReplaceLane: {
2039 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2040 Simd128Register src = i.InputSimd128Register(0);
2041 Simd128Register dst = i.OutputSimd128Register();
2042 if (src != dst) {
2043 __ move_v(dst, src);
2044 }
2045 __ insert_w(dst, i.InputInt8(1), i.InputRegister(2));
2046 break;
2047 }
2048 case kMipsI32x4Add: {
2049 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2050 __ addv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2051 i.InputSimd128Register(1));
2052 break;
2053 }
2054 case kMipsI32x4Sub: {
2055 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2056 __ subv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2057 i.InputSimd128Register(1));
2058 break;
2059 }
2060 case kMipsF64x2Abs: {
2061 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2062 __ bclri_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63);
2063 break;
2064 }
2065 case kMipsF64x2Neg: {
2066 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2067 __ bnegi_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63);
2068 break;
2069 }
2070 case kMipsF64x2Sqrt: {
2071 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2072 __ fsqrt_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
2073 break;
2074 }
2075 case kMipsF64x2Add: {
2076 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2077 ASSEMBLE_F64X2_ARITHMETIC_BINOP(fadd_d);
2078 break;
2079 }
2080 case kMipsF64x2Sub: {
2081 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2082 ASSEMBLE_F64X2_ARITHMETIC_BINOP(fsub_d);
2083 break;
2084 }
2085 case kMipsF64x2Mul: {
2086 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2087 ASSEMBLE_F64X2_ARITHMETIC_BINOP(fmul_d);
2088 break;
2089 }
2090 case kMipsF64x2Div: {
2091 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2092 ASSEMBLE_F64X2_ARITHMETIC_BINOP(fdiv_d);
2093 break;
2094 }
2095 case kMipsF64x2Min: {
2096 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2097 ASSEMBLE_F64X2_ARITHMETIC_BINOP(fmin_d);
2098 break;
2099 }
2100 case kMipsF64x2Max: {
2101 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2102 ASSEMBLE_F64X2_ARITHMETIC_BINOP(fmax_d);
2103 break;
2104 }
2105 case kMipsF64x2Eq: {
2106 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2107 __ fceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2108 i.InputSimd128Register(1));
2109 break;
2110 }
2111 case kMipsF64x2Ne: {
2112 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2113 __ fcne_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2114 i.InputSimd128Register(1));
2115 break;
2116 }
2117 case kMipsF64x2Lt: {
2118 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2119 __ fclt_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2120 i.InputSimd128Register(1));
2121 break;
2122 }
2123 case kMipsF64x2Le: {
2124 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2125 __ fcle_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2126 i.InputSimd128Register(1));
2127 break;
2128 }
2129 case kMipsF64x2Splat: {
2130 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2131 Simd128Register dst = i.OutputSimd128Register();
2132 __ FmoveLow(kScratchReg, i.InputDoubleRegister(0));
2133 __ insert_w(dst, 0, kScratchReg);
2134 __ insert_w(dst, 2, kScratchReg);
2135 __ FmoveHigh(kScratchReg, i.InputDoubleRegister(0));
2136 __ insert_w(dst, 1, kScratchReg);
2137 __ insert_w(dst, 3, kScratchReg);
2138 break;
2139 }
2140 case kMipsF64x2ExtractLane: {
2141 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2142 __ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1) * 2);
2143 __ FmoveLow(i.OutputDoubleRegister(), kScratchReg);
2144 __ copy_u_w(kScratchReg, i.InputSimd128Register(0),
2145 i.InputInt8(1) * 2 + 1);
2146 __ FmoveHigh(i.OutputDoubleRegister(), kScratchReg);
2147 break;
2148 }
2149 case kMipsF64x2ReplaceLane: {
2150 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2151 Simd128Register src = i.InputSimd128Register(0);
2152 Simd128Register dst = i.OutputSimd128Register();
2153 if (src != dst) {
2154 __ move_v(dst, src);
2155 }
2156 __ FmoveLow(kScratchReg, i.InputDoubleRegister(2));
2157 __ insert_w(dst, i.InputInt8(1) * 2, kScratchReg);
2158 __ FmoveHigh(kScratchReg, i.InputDoubleRegister(2));
2159 __ insert_w(dst, i.InputInt8(1) * 2 + 1, kScratchReg);
2160 break;
2161 }
2162 case kMipsI64x2Add: {
2163 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2164 __ addv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2165 i.InputSimd128Register(1));
2166 break;
2167 }
2168 case kMipsI64x2Sub: {
2169 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2170 __ subv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2171 i.InputSimd128Register(1));
2172 break;
2173 }
2174 case kMipsI64x2Mul: {
2175 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2176 __ mulv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2177 i.InputSimd128Register(1));
2178 break;
2179 }
2180 case kMipsI64x2Neg: {
2181 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2182 __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2183 __ subv_d(i.OutputSimd128Register(), kSimd128RegZero,
2184 i.InputSimd128Register(0));
2185 break;
2186 }
2187 case kMipsI64x2Shl: {
2188 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2189 __ slli_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2190 i.InputInt6(1));
2191 break;
2192 }
2193 case kMipsI64x2ShrS: {
2194 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2195 __ srai_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2196 i.InputInt6(1));
2197 break;
2198 }
2199 case kMipsI64x2ShrU: {
2200 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2201 __ srli_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2202 i.InputInt6(1));
2203 break;
2204 }
2205 case kMipsF32x4Splat: {
2206 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2207 __ FmoveLow(kScratchReg, i.InputSingleRegister(0));
2208 __ fill_w(i.OutputSimd128Register(), kScratchReg);
2209 break;
2210 }
2211 case kMipsF32x4ExtractLane: {
2212 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2213 __ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1));
2214 __ FmoveLow(i.OutputSingleRegister(), kScratchReg);
2215 break;
2216 }
2217 case kMipsF32x4ReplaceLane: {
2218 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2219 Simd128Register src = i.InputSimd128Register(0);
2220 Simd128Register dst = i.OutputSimd128Register();
2221 if (src != dst) {
2222 __ move_v(dst, src);
2223 }
2224 __ FmoveLow(kScratchReg, i.InputSingleRegister(2));
2225 __ insert_w(dst, i.InputInt8(1), kScratchReg);
2226 break;
2227 }
2228 case kMipsF32x4SConvertI32x4: {
2229 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2230 __ ffint_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2231 break;
2232 }
2233 case kMipsF32x4UConvertI32x4: {
2234 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2235 __ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2236 break;
2237 }
2238 case kMipsI32x4Mul: {
2239 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2240 __ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2241 i.InputSimd128Register(1));
2242 break;
2243 }
2244 case kMipsI32x4MaxS: {
2245 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2246 __ max_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2247 i.InputSimd128Register(1));
2248 break;
2249 }
2250 case kMipsI32x4MinS: {
2251 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2252 __ min_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2253 i.InputSimd128Register(1));
2254 break;
2255 }
2256 case kMipsI32x4Eq: {
2257 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2258 __ ceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2259 i.InputSimd128Register(1));
2260 break;
2261 }
2262 case kMipsI32x4Ne: {
2263 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2264 Simd128Register dst = i.OutputSimd128Register();
2265 __ ceq_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
2266 __ nor_v(dst, dst, dst);
2267 break;
2268 }
2269 case kMipsI32x4Shl: {
2270 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2271 __ slli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2272 i.InputInt5(1));
2273 break;
2274 }
2275 case kMipsI32x4ShrS: {
2276 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2277 __ srai_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2278 i.InputInt5(1));
2279 break;
2280 }
2281 case kMipsI32x4ShrU: {
2282 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2283 __ srli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2284 i.InputInt5(1));
2285 break;
2286 }
2287 case kMipsI32x4MaxU: {
2288 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2289 __ max_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2290 i.InputSimd128Register(1));
2291 break;
2292 }
2293 case kMipsI32x4MinU: {
2294 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2295 __ min_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2296 i.InputSimd128Register(1));
2297 break;
2298 }
2299 case kMipsS128Select: {
2300 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2301 DCHECK(i.OutputSimd128Register() == i.InputSimd128Register(0));
2302 __ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2),
2303 i.InputSimd128Register(1));
2304 break;
2305 }
2306 case kMipsS128AndNot: {
2307 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2308 Simd128Register dst = i.OutputSimd128Register();
2309 __ nor_v(dst, i.InputSimd128Register(1), i.InputSimd128Register(1));
2310 __ and_v(dst, dst, i.InputSimd128Register(0));
2311 break;
2312 }
2313 case kMipsF32x4Abs: {
2314 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2315 __ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
2316 break;
2317 }
2318 case kMipsF32x4Neg: {
2319 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2320 __ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
2321 break;
2322 }
2323 case kMipsF32x4Sqrt: {
2324 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2325 __ fsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2326 break;
2327 }
2328 case kMipsF32x4RecipApprox: {
2329 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2330 __ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2331 break;
2332 }
2333 case kMipsF32x4RecipSqrtApprox: {
2334 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2335 __ frsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2336 break;
2337 }
2338 case kMipsF32x4Add: {
2339 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2340 __ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2341 i.InputSimd128Register(1));
2342 break;
2343 }
2344 case kMipsF32x4Sub: {
2345 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2346 __ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2347 i.InputSimd128Register(1));
2348 break;
2349 }
2350 case kMipsF32x4Mul: {
2351 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2352 __ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2353 i.InputSimd128Register(1));
2354 break;
2355 }
2356 case kMipsF32x4Div: {
2357 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2358 __ fdiv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2359 i.InputSimd128Register(1));
2360 break;
2361 }
2362 case kMipsF32x4Max: {
2363 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2364 __ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2365 i.InputSimd128Register(1));
2366 break;
2367 }
2368 case kMipsF32x4Min: {
2369 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2370 __ fmin_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2371 i.InputSimd128Register(1));
2372 break;
2373 }
2374 case kMipsF32x4Eq: {
2375 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2376 __ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2377 i.InputSimd128Register(1));
2378 break;
2379 }
2380 case kMipsF32x4Ne: {
2381 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2382 __ fcne_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2383 i.InputSimd128Register(1));
2384 break;
2385 }
2386 case kMipsF32x4Lt: {
2387 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2388 __ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2389 i.InputSimd128Register(1));
2390 break;
2391 }
2392 case kMipsF32x4Le: {
2393 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2394 __ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2395 i.InputSimd128Register(1));
2396 break;
2397 }
2398 case kMipsI32x4SConvertF32x4: {
2399 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2400 __ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2401 break;
2402 }
2403 case kMipsI32x4UConvertF32x4: {
2404 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2405 __ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2406 break;
2407 }
2408 case kMipsI32x4Neg: {
2409 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2410 __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2411 __ subv_w(i.OutputSimd128Register(), kSimd128RegZero,
2412 i.InputSimd128Register(0));
2413 break;
2414 }
2415 case kMipsI32x4GtS: {
2416 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2417 __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2418 i.InputSimd128Register(0));
2419 break;
2420 }
2421 case kMipsI32x4GeS: {
2422 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2423 __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2424 i.InputSimd128Register(0));
2425 break;
2426 }
2427 case kMipsI32x4GtU: {
2428 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2429 __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2430 i.InputSimd128Register(0));
2431 break;
2432 }
2433 case kMipsI32x4GeU: {
2434 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2435 __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2436 i.InputSimd128Register(0));
2437 break;
2438 }
2439 case kMipsI32x4Abs: {
2440 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2441 __ asub_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2442 kSimd128RegZero);
2443 break;
2444 }
2445 case kMipsI16x8Splat: {
2446 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2447 __ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
2448 break;
2449 }
2450 case kMipsI16x8ExtractLaneU: {
2451 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2452 __ copy_u_h(i.OutputRegister(), i.InputSimd128Register(0),
2453 i.InputInt8(1));
2454 break;
2455 }
2456 case kMipsI16x8ExtractLaneS: {
2457 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2458 __ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0),
2459 i.InputInt8(1));
2460 break;
2461 }
2462 case kMipsI16x8ReplaceLane: {
2463 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2464 Simd128Register src = i.InputSimd128Register(0);
2465 Simd128Register dst = i.OutputSimd128Register();
2466 if (src != dst) {
2467 __ move_v(dst, src);
2468 }
2469 __ insert_h(dst, i.InputInt8(1), i.InputRegister(2));
2470 break;
2471 }
2472 case kMipsI16x8Neg: {
2473 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2474 __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2475 __ subv_h(i.OutputSimd128Register(), kSimd128RegZero,
2476 i.InputSimd128Register(0));
2477 break;
2478 }
2479 case kMipsI16x8Shl: {
2480 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2481 __ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2482 i.InputInt4(1));
2483 break;
2484 }
2485 case kMipsI16x8ShrS: {
2486 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2487 __ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2488 i.InputInt4(1));
2489 break;
2490 }
2491 case kMipsI16x8ShrU: {
2492 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2493 __ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2494 i.InputInt4(1));
2495 break;
2496 }
2497 case kMipsI16x8Add: {
2498 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2499 __ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2500 i.InputSimd128Register(1));
2501 break;
2502 }
2503 case kMipsI16x8AddSaturateS: {
2504 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2505 __ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2506 i.InputSimd128Register(1));
2507 break;
2508 }
2509 case kMipsI16x8Sub: {
2510 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2511 __ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2512 i.InputSimd128Register(1));
2513 break;
2514 }
2515 case kMipsI16x8SubSaturateS: {
2516 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2517 __ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2518 i.InputSimd128Register(1));
2519 break;
2520 }
2521 case kMipsI16x8Mul: {
2522 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2523 __ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2524 i.InputSimd128Register(1));
2525 break;
2526 }
2527 case kMipsI16x8MaxS: {
2528 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2529 __ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2530 i.InputSimd128Register(1));
2531 break;
2532 }
2533 case kMipsI16x8MinS: {
2534 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2535 __ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2536 i.InputSimd128Register(1));
2537 break;
2538 }
2539 case kMipsI16x8Eq: {
2540 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2541 __ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2542 i.InputSimd128Register(1));
2543 break;
2544 }
2545 case kMipsI16x8Ne: {
2546 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2547 Simd128Register dst = i.OutputSimd128Register();
2548 __ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
2549 __ nor_v(dst, dst, dst);
2550 break;
2551 }
2552 case kMipsI16x8GtS: {
2553 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2554 __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2555 i.InputSimd128Register(0));
2556 break;
2557 }
2558 case kMipsI16x8GeS: {
2559 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2560 __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2561 i.InputSimd128Register(0));
2562 break;
2563 }
2564 case kMipsI16x8AddSaturateU: {
2565 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2566 __ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2567 i.InputSimd128Register(1));
2568 break;
2569 }
2570 case kMipsI16x8SubSaturateU: {
2571 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2572 __ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2573 i.InputSimd128Register(1));
2574 break;
2575 }
2576 case kMipsI16x8MaxU: {
2577 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2578 __ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2579 i.InputSimd128Register(1));
2580 break;
2581 }
2582 case kMipsI16x8MinU: {
2583 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2584 __ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2585 i.InputSimd128Register(1));
2586 break;
2587 }
2588 case kMipsI16x8GtU: {
2589 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2590 __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2591 i.InputSimd128Register(0));
2592 break;
2593 }
2594 case kMipsI16x8GeU: {
2595 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2596 __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2597 i.InputSimd128Register(0));
2598 break;
2599 }
2600 case kMipsI16x8RoundingAverageU: {
2601 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2602 __ aver_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2603 i.InputSimd128Register(0));
2604 break;
2605 }
2606 case kMipsI16x8Abs: {
2607 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2608 __ asub_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2609 kSimd128RegZero);
2610 break;
2611 }
2612 case kMipsI8x16Splat: {
2613 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2614 __ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
2615 break;
2616 }
2617 case kMipsI8x16ExtractLaneU: {
2618 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2619 __ copy_u_b(i.OutputRegister(), i.InputSimd128Register(0),
2620 i.InputInt8(1));
2621 break;
2622 }
2623 case kMipsI8x16ExtractLaneS: {
2624 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2625 __ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0),
2626 i.InputInt8(1));
2627 break;
2628 }
2629 case kMipsI8x16ReplaceLane: {
2630 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2631 Simd128Register src = i.InputSimd128Register(0);
2632 Simd128Register dst = i.OutputSimd128Register();
2633 if (src != dst) {
2634 __ move_v(dst, src);
2635 }
2636 __ insert_b(dst, i.InputInt8(1), i.InputRegister(2));
2637 break;
2638 }
2639 case kMipsI8x16Neg: {
2640 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2641 __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2642 __ subv_b(i.OutputSimd128Register(), kSimd128RegZero,
2643 i.InputSimd128Register(0));
2644 break;
2645 }
2646 case kMipsI8x16Shl: {
2647 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2648 __ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2649 i.InputInt3(1));
2650 break;
2651 }
2652 case kMipsI8x16ShrS: {
2653 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2654 __ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2655 i.InputInt3(1));
2656 break;
2657 }
2658 case kMipsI8x16Add: {
2659 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2660 __ addv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2661 i.InputSimd128Register(1));
2662 break;
2663 }
2664 case kMipsI8x16AddSaturateS: {
2665 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2666 __ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2667 i.InputSimd128Register(1));
2668 break;
2669 }
2670 case kMipsI8x16Sub: {
2671 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2672 __ subv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2673 i.InputSimd128Register(1));
2674 break;
2675 }
2676 case kMipsI8x16SubSaturateS: {
2677 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2678 __ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2679 i.InputSimd128Register(1));
2680 break;
2681 }
2682 case kMipsI8x16Mul: {
2683 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2684 __ mulv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2685 i.InputSimd128Register(1));
2686 break;
2687 }
2688 case kMipsI8x16MaxS: {
2689 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2690 __ max_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2691 i.InputSimd128Register(1));
2692 break;
2693 }
2694 case kMipsI8x16MinS: {
2695 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2696 __ min_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2697 i.InputSimd128Register(1));
2698 break;
2699 }
2700 case kMipsI8x16Eq: {
2701 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2702 __ ceq_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2703 i.InputSimd128Register(1));
2704 break;
2705 }
2706 case kMipsI8x16Ne: {
2707 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2708 Simd128Register dst = i.OutputSimd128Register();
2709 __ ceq_b(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
2710 __ nor_v(dst, dst, dst);
2711 break;
2712 }
2713 case kMipsI8x16GtS: {
2714 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2715 __ clt_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
2716 i.InputSimd128Register(0));
2717 break;
2718 }
2719 case kMipsI8x16GeS: {
2720 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2721 __ cle_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
2722 i.InputSimd128Register(0));
2723 break;
2724 }
2725 case kMipsI8x16ShrU: {
2726 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2727 __ srli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2728 i.InputInt3(1));
2729 break;
2730 }
2731 case kMipsI8x16AddSaturateU: {
2732 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2733 __ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2734 i.InputSimd128Register(1));
2735 break;
2736 }
2737 case kMipsI8x16SubSaturateU: {
2738 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2739 __ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2740 i.InputSimd128Register(1));
2741 break;
2742 }
2743 case kMipsI8x16MaxU: {
2744 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2745 __ max_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2746 i.InputSimd128Register(1));
2747 break;
2748 }
2749 case kMipsI8x16MinU: {
2750 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2751 __ min_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2752 i.InputSimd128Register(1));
2753 break;
2754 }
2755 case kMipsI8x16GtU: {
2756 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2757 __ clt_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
2758 i.InputSimd128Register(0));
2759 break;
2760 }
2761 case kMipsI8x16GeU: {
2762 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2763 __ cle_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
2764 i.InputSimd128Register(0));
2765 break;
2766 }
2767 case kMipsI8x16RoundingAverageU: {
2768 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2769 __ aver_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
2770 i.InputSimd128Register(0));
2771 break;
2772 }
2773 case kMipsI8x16Abs: {
2774 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2775 __ asub_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2776 kSimd128RegZero);
2777 break;
2778 }
2779 case kMipsS128And: {
2780 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2781 __ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
2782 i.InputSimd128Register(1));
2783 break;
2784 }
2785 case kMipsS128Or: {
2786 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2787 __ or_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
2788 i.InputSimd128Register(1));
2789 break;
2790 }
2791 case kMipsS128Xor: {
2792 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2793 __ xor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
2794 i.InputSimd128Register(1));
2795 break;
2796 }
2797 case kMipsS128Not: {
2798 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2799 __ nor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
2800 i.InputSimd128Register(0));
2801 break;
2802 }
2803 case kMipsS1x4AnyTrue:
2804 case kMipsS1x8AnyTrue:
2805 case kMipsS1x16AnyTrue: {
2806 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2807 Register dst = i.OutputRegister();
2808 Label all_false;
2809
2810 __ BranchMSA(&all_false, MSA_BRANCH_V, all_zero,
2811 i.InputSimd128Register(0), USE_DELAY_SLOT);
2812 __ li(dst, 0); // branch delay slot
2813 __ li(dst, -1);
2814 __ bind(&all_false);
2815 break;
2816 }
2817 case kMipsS1x4AllTrue: {
2818 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2819 Register dst = i.OutputRegister();
2820 Label all_true;
2821 __ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero,
2822 i.InputSimd128Register(0), USE_DELAY_SLOT);
2823 __ li(dst, -1); // branch delay slot
2824 __ li(dst, 0);
2825 __ bind(&all_true);
2826 break;
2827 }
2828 case kMipsS1x8AllTrue: {
2829 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2830 Register dst = i.OutputRegister();
2831 Label all_true;
2832 __ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero,
2833 i.InputSimd128Register(0), USE_DELAY_SLOT);
2834 __ li(dst, -1); // branch delay slot
2835 __ li(dst, 0);
2836 __ bind(&all_true);
2837 break;
2838 }
2839 case kMipsS1x16AllTrue: {
2840 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2841 Register dst = i.OutputRegister();
2842 Label all_true;
2843 __ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero,
2844 i.InputSimd128Register(0), USE_DELAY_SLOT);
2845 __ li(dst, -1); // branch delay slot
2846 __ li(dst, 0);
2847 __ bind(&all_true);
2848 break;
2849 }
2850 case kMipsMsaLd: {
2851 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2852 __ ld_b(i.OutputSimd128Register(), i.MemoryOperand());
2853 break;
2854 }
2855 case kMipsMsaSt: {
2856 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2857 __ st_b(i.InputSimd128Register(2), i.MemoryOperand());
2858 break;
2859 }
2860 case kMipsS32x4InterleaveRight: {
2861 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2862 Simd128Register dst = i.OutputSimd128Register(),
2863 src0 = i.InputSimd128Register(0),
2864 src1 = i.InputSimd128Register(1);
2865 // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
2866 // dst = [5, 1, 4, 0]
2867 __ ilvr_w(dst, src1, src0);
2868 break;
2869 }
2870 case kMipsS32x4InterleaveLeft: {
2871 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2872 Simd128Register dst = i.OutputSimd128Register(),
2873 src0 = i.InputSimd128Register(0),
2874 src1 = i.InputSimd128Register(1);
2875 // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
2876 // dst = [7, 3, 6, 2]
2877 __ ilvl_w(dst, src1, src0);
2878 break;
2879 }
2880 case kMipsS32x4PackEven: {
2881 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2882 Simd128Register dst = i.OutputSimd128Register(),
2883 src0 = i.InputSimd128Register(0),
2884 src1 = i.InputSimd128Register(1);
2885 // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
2886 // dst = [6, 4, 2, 0]
2887 __ pckev_w(dst, src1, src0);
2888 break;
2889 }
2890 case kMipsS32x4PackOdd: {
2891 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2892 Simd128Register dst = i.OutputSimd128Register(),
2893 src0 = i.InputSimd128Register(0),
2894 src1 = i.InputSimd128Register(1);
2895 // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
2896 // dst = [7, 5, 3, 1]
2897 __ pckod_w(dst, src1, src0);
2898 break;
2899 }
2900 case kMipsS32x4InterleaveEven: {
2901 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2902 Simd128Register dst = i.OutputSimd128Register(),
2903 src0 = i.InputSimd128Register(0),
2904 src1 = i.InputSimd128Register(1);
2905 // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
2906 // dst = [6, 2, 4, 0]
2907 __ ilvev_w(dst, src1, src0);
2908 break;
2909 }
2910 case kMipsS32x4InterleaveOdd: {
2911 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2912 Simd128Register dst = i.OutputSimd128Register(),
2913 src0 = i.InputSimd128Register(0),
2914 src1 = i.InputSimd128Register(1);
2915 // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
2916 // dst = [7, 3, 5, 1]
2917 __ ilvod_w(dst, src1, src0);
2918 break;
2919 }
2920 case kMipsS32x4Shuffle: {
2921 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2922 Simd128Register dst = i.OutputSimd128Register(),
2923 src0 = i.InputSimd128Register(0),
2924 src1 = i.InputSimd128Register(1);
2925
2926 int32_t shuffle = i.InputInt32(2);
2927
2928 if (src0 == src1) {
2929 // Unary S32x4 shuffles are handled with shf.w instruction
2930 unsigned lane = shuffle & 0xFF;
2931 if (FLAG_debug_code) {
2932 // range of all four lanes, for unary instruction,
2933 // should belong to the same range, which can be one of these:
2934 // [0, 3] or [4, 7]
2935 if (lane >= 4) {
2936 int32_t shuffle_helper = shuffle;
2937 for (int i = 0; i < 4; ++i) {
2938 lane = shuffle_helper & 0xFF;
2939 CHECK_GE(lane, 4);
2940 shuffle_helper >>= 8;
2941 }
2942 }
2943 }
2944 uint32_t i8 = 0;
2945 for (int i = 0; i < 4; i++) {
2946 lane = shuffle & 0xFF;
2947 if (lane >= 4) {
2948 lane -= 4;
2949 }
2950 DCHECK_GT(4, lane);
2951 i8 |= lane << (2 * i);
2952 shuffle >>= 8;
2953 }
2954 __ shf_w(dst, src0, i8);
2955 } else {
2956 // For binary shuffles use vshf.w instruction
2957 if (dst == src0) {
2958 __ move_v(kSimd128ScratchReg, src0);
2959 src0 = kSimd128ScratchReg;
2960 } else if (dst == src1) {
2961 __ move_v(kSimd128ScratchReg, src1);
2962 src1 = kSimd128ScratchReg;
2963 }
2964
2965 __ li(kScratchReg, i.InputInt32(2));
2966 __ insert_w(dst, 0, kScratchReg);
2967 __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2968 __ ilvr_b(dst, kSimd128RegZero, dst);
2969 __ ilvr_h(dst, kSimd128RegZero, dst);
2970 __ vshf_w(dst, src1, src0);
2971 }
2972 break;
2973 }
2974 case kMipsS16x8InterleaveRight: {
2975 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2976 Simd128Register dst = i.OutputSimd128Register(),
2977 src0 = i.InputSimd128Register(0),
2978 src1 = i.InputSimd128Register(1);
2979 // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
2980 // dst = [11, 3, 10, 2, 9, 1, 8, 0]
2981 __ ilvr_h(dst, src1, src0);
2982 break;
2983 }
2984 case kMipsS16x8InterleaveLeft: {
2985 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2986 Simd128Register dst = i.OutputSimd128Register(),
2987 src0 = i.InputSimd128Register(0),
2988 src1 = i.InputSimd128Register(1);
2989 // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
2990 // dst = [15, 7, 14, 6, 13, 5, 12, 4]
2991 __ ilvl_h(dst, src1, src0);
2992 break;
2993 }
2994 case kMipsS16x8PackEven: {
2995 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2996 Simd128Register dst = i.OutputSimd128Register(),
2997 src0 = i.InputSimd128Register(0),
2998 src1 = i.InputSimd128Register(1);
2999 // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3000 // dst = [14, 12, 10, 8, 6, 4, 2, 0]
3001 __ pckev_h(dst, src1, src0);
3002 break;
3003 }
3004 case kMipsS16x8PackOdd: {
3005 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3006 Simd128Register dst = i.OutputSimd128Register(),
3007 src0 = i.InputSimd128Register(0),
3008 src1 = i.InputSimd128Register(1);
3009 // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3010 // dst = [15, 13, 11, 9, 7, 5, 3, 1]
3011 __ pckod_h(dst, src1, src0);
3012 break;
3013 }
3014 case kMipsS16x8InterleaveEven: {
3015 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3016 Simd128Register dst = i.OutputSimd128Register(),
3017 src0 = i.InputSimd128Register(0),
3018 src1 = i.InputSimd128Register(1);
3019 // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3020 // dst = [14, 6, 12, 4, 10, 2, 8, 0]
3021 __ ilvev_h(dst, src1, src0);
3022 break;
3023 }
3024 case kMipsS16x8InterleaveOdd: {
3025 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3026 Simd128Register dst = i.OutputSimd128Register(),
3027 src0 = i.InputSimd128Register(0),
3028 src1 = i.InputSimd128Register(1);
3029 // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3030 // dst = [15, 7, ... 11, 3, 9, 1]
3031 __ ilvod_h(dst, src1, src0);
3032 break;
3033 }
3034 case kMipsS16x4Reverse: {
3035 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3036 // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [4, 5, 6, 7, 0, 1, 2, 3]
3037 // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
3038 __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
3039 break;
3040 }
3041 case kMipsS16x2Reverse: {
3042 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3043 // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [6, 7, 4, 5, 3, 2, 0, 1]
3044 // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
3045 __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
3046 break;
3047 }
3048 case kMipsS8x16InterleaveRight: {
3049 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3050 Simd128Register dst = i.OutputSimd128Register(),
3051 src0 = i.InputSimd128Register(0),
3052 src1 = i.InputSimd128Register(1);
3053 // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3054 // dst = [23, 7, ... 17, 1, 16, 0]
3055 __ ilvr_b(dst, src1, src0);
3056 break;
3057 }
3058 case kMipsS8x16InterleaveLeft: {
3059 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3060 Simd128Register dst = i.OutputSimd128Register(),
3061 src0 = i.InputSimd128Register(0),
3062 src1 = i.InputSimd128Register(1);
3063 // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3064 // dst = [31, 15, ... 25, 9, 24, 8]
3065 __ ilvl_b(dst, src1, src0);
3066 break;
3067 }
3068 case kMipsS8x16PackEven: {
3069 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3070 Simd128Register dst = i.OutputSimd128Register(),
3071 src0 = i.InputSimd128Register(0),
3072 src1 = i.InputSimd128Register(1);
3073 // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3074 // dst = [30, 28, ... 6, 4, 2, 0]
3075 __ pckev_b(dst, src1, src0);
3076 break;
3077 }
3078 case kMipsS8x16PackOdd: {
3079 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3080 Simd128Register dst = i.OutputSimd128Register(),
3081 src0 = i.InputSimd128Register(0),
3082 src1 = i.InputSimd128Register(1);
3083 // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3084 // dst = [31, 29, ... 7, 5, 3, 1]
3085 __ pckod_b(dst, src1, src0);
3086 break;
3087 }
3088 case kMipsS8x16InterleaveEven: {
3089 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3090 Simd128Register dst = i.OutputSimd128Register(),
3091 src0 = i.InputSimd128Register(0),
3092 src1 = i.InputSimd128Register(1);
3093 // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3094 // dst = [30, 14, ... 18, 2, 16, 0]
3095 __ ilvev_b(dst, src1, src0);
3096 break;
3097 }
3098 case kMipsS8x16InterleaveOdd: {
3099 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3100 Simd128Register dst = i.OutputSimd128Register(),
3101 src0 = i.InputSimd128Register(0),
3102 src1 = i.InputSimd128Register(1);
3103 // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3104 // dst = [31, 15, ... 19, 3, 17, 1]
3105 __ ilvod_b(dst, src1, src0);
3106 break;
3107 }
3108 case kMipsS8x16Concat: {
3109 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3110 Simd128Register dst = i.OutputSimd128Register();
3111 DCHECK(dst == i.InputSimd128Register(0));
3112 __ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2));
3113 break;
3114 }
3115 case kMipsS8x16Shuffle: {
3116 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3117 Simd128Register dst = i.OutputSimd128Register(),
3118 src0 = i.InputSimd128Register(0),
3119 src1 = i.InputSimd128Register(1);
3120
3121 if (dst == src0) {
3122 __ move_v(kSimd128ScratchReg, src0);
3123 src0 = kSimd128ScratchReg;
3124 } else if (dst == src1) {
3125 __ move_v(kSimd128ScratchReg, src1);
3126 src1 = kSimd128ScratchReg;
3127 }
3128
3129 __ li(kScratchReg, i.InputInt32(2));
3130 __ insert_w(dst, 0, kScratchReg);
3131 __ li(kScratchReg, i.InputInt32(3));
3132 __ insert_w(dst, 1, kScratchReg);
3133 __ li(kScratchReg, i.InputInt32(4));
3134 __ insert_w(dst, 2, kScratchReg);
3135 __ li(kScratchReg, i.InputInt32(5));
3136 __ insert_w(dst, 3, kScratchReg);
3137 __ vshf_b(dst, src1, src0);
3138 break;
3139 }
3140 case kMipsS8x16Swizzle: {
3141 Simd128Register dst = i.OutputSimd128Register(),
3142 tbl = i.InputSimd128Register(0),
3143 ctl = i.InputSimd128Register(1);
3144 DCHECK(dst != ctl && dst != tbl);
3145 Simd128Register zeroReg = i.TempSimd128Register(0);
3146 __ fill_w(zeroReg, zero_reg);
3147 __ move_v(dst, ctl);
3148 __ vshf_b(dst, tbl, zeroReg);
3149 break;
3150 }
3151 case kMipsS8x8Reverse: {
3152 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3153 // src = [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
3154 // dst = [8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7]
3155 // [A B C D] => [B A D C]: shf.w imm: 2 3 0 1 = 10110001 = 0xB1
3156 // C: [7, 6, 5, 4] => A': [4, 5, 6, 7]: shf.b imm: 00011011 = 0x1B
3157 __ shf_w(kSimd128ScratchReg, i.InputSimd128Register(0), 0xB1);
3158 __ shf_b(i.OutputSimd128Register(), kSimd128ScratchReg, 0x1B);
3159 break;
3160 }
3161 case kMipsS8x4Reverse: {
3162 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3163 // src = [15, 14, ... 3, 2, 1, 0], dst = [12, 13, 14, 15, ... 0, 1, 2, 3]
3164 // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
3165 __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
3166 break;
3167 }
3168 case kMipsS8x2Reverse: {
3169 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3170 // src = [15, 14, ... 3, 2, 1, 0], dst = [14, 15, 12, 13, ... 2, 3, 0, 1]
3171 // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
3172 __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
3173 break;
3174 }
3175 case kMipsI32x4SConvertI16x8Low: {
3176 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3177 Simd128Register dst = i.OutputSimd128Register();
3178 Simd128Register src = i.InputSimd128Register(0);
3179 __ ilvr_h(kSimd128ScratchReg, src, src);
3180 __ slli_w(dst, kSimd128ScratchReg, 16);
3181 __ srai_w(dst, dst, 16);
3182 break;
3183 }
3184 case kMipsI32x4SConvertI16x8High: {
3185 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3186 Simd128Register dst = i.OutputSimd128Register();
3187 Simd128Register src = i.InputSimd128Register(0);
3188 __ ilvl_h(kSimd128ScratchReg, src, src);
3189 __ slli_w(dst, kSimd128ScratchReg, 16);
3190 __ srai_w(dst, dst, 16);
3191 break;
3192 }
3193 case kMipsI32x4UConvertI16x8Low: {
3194 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3195 __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3196 __ ilvr_h(i.OutputSimd128Register(), kSimd128RegZero,
3197 i.InputSimd128Register(0));
3198 break;
3199 }
3200 case kMipsI32x4UConvertI16x8High: {
3201 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3202 __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3203 __ ilvl_h(i.OutputSimd128Register(), kSimd128RegZero,
3204 i.InputSimd128Register(0));
3205 break;
3206 }
3207 case kMipsI16x8SConvertI8x16Low: {
3208 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3209 Simd128Register dst = i.OutputSimd128Register();
3210 Simd128Register src = i.InputSimd128Register(0);
3211 __ ilvr_b(kSimd128ScratchReg, src, src);
3212 __ slli_h(dst, kSimd128ScratchReg, 8);
3213 __ srai_h(dst, dst, 8);
3214 break;
3215 }
3216 case kMipsI16x8SConvertI8x16High: {
3217 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3218 Simd128Register dst = i.OutputSimd128Register();
3219 Simd128Register src = i.InputSimd128Register(0);
3220 __ ilvl_b(kSimd128ScratchReg, src, src);
3221 __ slli_h(dst, kSimd128ScratchReg, 8);
3222 __ srai_h(dst, dst, 8);
3223 break;
3224 }
3225 case kMipsI16x8SConvertI32x4: {
3226 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3227 Simd128Register dst = i.OutputSimd128Register();
3228 Simd128Register src0 = i.InputSimd128Register(0);
3229 Simd128Register src1 = i.InputSimd128Register(1);
3230 __ sat_s_w(kSimd128ScratchReg, src0, 15);
3231 __ sat_s_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
3232 __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
3233 break;
3234 }
3235 case kMipsI16x8UConvertI32x4: {
3236 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3237 Simd128Register dst = i.OutputSimd128Register();
3238 Simd128Register src0 = i.InputSimd128Register(0);
3239 Simd128Register src1 = i.InputSimd128Register(1);
3240 __ sat_u_w(kSimd128ScratchReg, src0, 15);
3241 __ sat_u_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
3242 __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
3243 break;
3244 }
3245 case kMipsI16x8UConvertI8x16Low: {
3246 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3247 __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3248 __ ilvr_b(i.OutputSimd128Register(), kSimd128RegZero,
3249 i.InputSimd128Register(0));
3250 break;
3251 }
3252 case kMipsI16x8UConvertI8x16High: {
3253 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3254 __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3255 __ ilvl_b(i.OutputSimd128Register(), kSimd128RegZero,
3256 i.InputSimd128Register(0));
3257 break;
3258 }
3259 case kMipsI8x16SConvertI16x8: {
3260 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3261 Simd128Register dst = i.OutputSimd128Register();
3262 Simd128Register src0 = i.InputSimd128Register(0);
3263 Simd128Register src1 = i.InputSimd128Register(1);
3264 __ sat_s_h(kSimd128ScratchReg, src0, 7);
3265 __ sat_s_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
3266 __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
3267 break;
3268 }
3269 case kMipsI8x16UConvertI16x8: {
3270 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3271 Simd128Register dst = i.OutputSimd128Register();
3272 Simd128Register src0 = i.InputSimd128Register(0);
3273 Simd128Register src1 = i.InputSimd128Register(1);
3274 __ sat_u_h(kSimd128ScratchReg, src0, 7);
3275 __ sat_u_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
3276 __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
3277 break;
3278 }
3279 case kMipsF32x4AddHoriz: {
3280 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3281 Simd128Register src0 = i.InputSimd128Register(0);
3282 Simd128Register src1 = i.InputSimd128Register(1);
3283 Simd128Register dst = i.OutputSimd128Register();
3284 __ shf_w(kSimd128ScratchReg, src0, 0xB1); // 2 3 0 1 : 10110001 : 0xB1
3285 __ shf_w(kSimd128RegZero, src1, 0xB1); // kSimd128RegZero as scratch
3286 __ fadd_w(kSimd128ScratchReg, kSimd128ScratchReg, src0);
3287 __ fadd_w(kSimd128RegZero, kSimd128RegZero, src1);
3288 __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
3289 break;
3290 }
3291 case kMipsI32x4AddHoriz: {
3292 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3293 Simd128Register src0 = i.InputSimd128Register(0);
3294 Simd128Register src1 = i.InputSimd128Register(1);
3295 Simd128Register dst = i.OutputSimd128Register();
3296 __ hadd_s_d(kSimd128ScratchReg, src0, src0);
3297 __ hadd_s_d(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch
3298 __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
3299 break;
3300 }
3301 case kMipsI16x8AddHoriz: {
3302 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3303 Simd128Register src0 = i.InputSimd128Register(0);
3304 Simd128Register src1 = i.InputSimd128Register(1);
3305 Simd128Register dst = i.OutputSimd128Register();
3306 __ hadd_s_w(kSimd128ScratchReg, src0, src0);
3307 __ hadd_s_w(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch
3308 __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
3309 break;
3310 }
3311 }
3312 return kSuccess;
3313 } // NOLINT(readability/fn_size)
3314
AssembleBranchToLabels(CodeGenerator * gen,TurboAssembler * tasm,Instruction * instr,FlagsCondition condition,Label * tlabel,Label * flabel,bool fallthru)3315 void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
3316 Instruction* instr, FlagsCondition condition,
3317 Label* tlabel, Label* flabel, bool fallthru) {
3318 #undef __
3319 #define __ tasm->
3320
3321 Condition cc = kNoCondition;
3322 // MIPS does not have condition code flags, so compare and branch are
3323 // implemented differently than on the other arch's. The compare operations
3324 // emit mips pseudo-instructions, which are handled here by branch
3325 // instructions that do the actual comparison. Essential that the input
3326 // registers to compare pseudo-op are not modified before this branch op, as
3327 // they are tested here.
3328
3329 MipsOperandConverter i(gen, instr);
3330 if (instr->arch_opcode() == kMipsTst) {
3331 cc = FlagsConditionToConditionTst(condition);
3332 __ Branch(tlabel, cc, kScratchReg, Operand(zero_reg));
3333 } else if (instr->arch_opcode() == kMipsAddOvf ||
3334 instr->arch_opcode() == kMipsSubOvf) {
3335 // Overflow occurs if overflow register is negative
3336 switch (condition) {
3337 case kOverflow:
3338 __ Branch(tlabel, lt, kScratchReg, Operand(zero_reg));
3339 break;
3340 case kNotOverflow:
3341 __ Branch(tlabel, ge, kScratchReg, Operand(zero_reg));
3342 break;
3343 default:
3344 UNSUPPORTED_COND(instr->arch_opcode(), condition);
3345 break;
3346 }
3347 } else if (instr->arch_opcode() == kMipsMulOvf) {
3348 // Overflow occurs if overflow register is not zero
3349 switch (condition) {
3350 case kOverflow:
3351 __ Branch(tlabel, ne, kScratchReg, Operand(zero_reg));
3352 break;
3353 case kNotOverflow:
3354 __ Branch(tlabel, eq, kScratchReg, Operand(zero_reg));
3355 break;
3356 default:
3357 UNSUPPORTED_COND(kMipsMulOvf, condition);
3358 break;
3359 }
3360 } else if (instr->arch_opcode() == kMipsCmp) {
3361 cc = FlagsConditionToConditionCmp(condition);
3362 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
3363 } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
3364 cc = FlagsConditionToConditionCmp(condition);
3365 Register lhs_register = sp;
3366 uint32_t offset;
3367 if (gen->ShouldApplyOffsetToStackCheck(instr, &offset)) {
3368 lhs_register = i.TempRegister(0);
3369 __ Subu(lhs_register, sp, offset);
3370 }
3371 __ Branch(tlabel, cc, lhs_register, Operand(i.InputRegister(0)));
3372 } else if (instr->arch_opcode() == kMipsCmpS ||
3373 instr->arch_opcode() == kMipsCmpD) {
3374 bool predicate;
3375 FlagsConditionToConditionCmpFPU(&predicate, condition);
3376 if (predicate) {
3377 __ BranchTrueF(tlabel);
3378 } else {
3379 __ BranchFalseF(tlabel);
3380 }
3381 } else {
3382 PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
3383 instr->arch_opcode());
3384 UNIMPLEMENTED();
3385 }
3386 if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
3387 #undef __
3388 #define __ tasm()->
3389 }
3390
3391 // Assembles branches after an instruction.
AssembleArchBranch(Instruction * instr,BranchInfo * branch)3392 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
3393 Label* tlabel = branch->true_label;
3394 Label* flabel = branch->false_label;
3395 AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
3396 branch->fallthru);
3397 }
3398
AssembleBranchPoisoning(FlagsCondition condition,Instruction * instr)3399 void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
3400 Instruction* instr) {
3401 // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
3402 if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
3403 return;
3404 }
3405
3406 MipsOperandConverter i(this, instr);
3407 condition = NegateFlagsCondition(condition);
3408
3409 switch (instr->arch_opcode()) {
3410 case kMipsCmp: {
3411 __ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0),
3412 i.InputOperand(1),
3413 FlagsConditionToConditionCmp(condition));
3414 }
3415 return;
3416 case kMipsTst: {
3417 switch (condition) {
3418 case kEqual:
3419 __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
3420 break;
3421 case kNotEqual:
3422 __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
3423 kScratchReg);
3424 break;
3425 default:
3426 UNREACHABLE();
3427 }
3428 }
3429 return;
3430 case kMipsAddOvf:
3431 case kMipsSubOvf: {
3432 // Overflow occurs if overflow register is negative
3433 __ Slt(kScratchReg2, kScratchReg, zero_reg);
3434 switch (condition) {
3435 case kOverflow:
3436 __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
3437 kScratchReg2);
3438 break;
3439 case kNotOverflow:
3440 __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
3441 break;
3442 default:
3443 UNSUPPORTED_COND(instr->arch_opcode(), condition);
3444 }
3445 }
3446 return;
3447 case kMipsMulOvf: {
3448 // Overflow occurs if overflow register is not zero
3449 switch (condition) {
3450 case kOverflow:
3451 __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
3452 kScratchReg);
3453 break;
3454 case kNotOverflow:
3455 __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
3456 break;
3457 default:
3458 UNSUPPORTED_COND(instr->arch_opcode(), condition);
3459 }
3460 }
3461 return;
3462 case kMipsCmpS:
3463 case kMipsCmpD: {
3464 bool predicate;
3465 FlagsConditionToConditionCmpFPU(&predicate, condition);
3466 if (predicate) {
3467 __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister);
3468 } else {
3469 __ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister);
3470 }
3471 }
3472 return;
3473 default:
3474 UNREACHABLE();
3475 }
3476 }
3477
AssembleArchDeoptBranch(Instruction * instr,BranchInfo * branch)3478 void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
3479 BranchInfo* branch) {
3480 AssembleArchBranch(instr, branch);
3481 }
3482
AssembleArchJump(RpoNumber target)3483 void CodeGenerator::AssembleArchJump(RpoNumber target) {
3484 if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
3485 }
3486
AssembleArchTrap(Instruction * instr,FlagsCondition condition)3487 void CodeGenerator::AssembleArchTrap(Instruction* instr,
3488 FlagsCondition condition) {
3489 class OutOfLineTrap final : public OutOfLineCode {
3490 public:
3491 OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
3492 : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
3493
3494 void Generate() final {
3495 MipsOperandConverter i(gen_, instr_);
3496 TrapId trap_id =
3497 static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
3498 GenerateCallToTrap(trap_id);
3499 }
3500
3501 private:
3502 void GenerateCallToTrap(TrapId trap_id) {
3503 if (trap_id == TrapId::kInvalid) {
3504 // We cannot test calls to the runtime in cctest/test-run-wasm.
3505 // Therefore we emit a call to C here instead of a call to the runtime.
3506 // We use the context register as the scratch register, because we do
3507 // not have a context here.
3508 __ PrepareCallCFunction(0, 0, cp);
3509 __ CallCFunction(
3510 ExternalReference::wasm_call_trap_callback_for_testing(), 0);
3511 __ LeaveFrame(StackFrame::WASM_COMPILED);
3512 auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
3513 int pop_count =
3514 static_cast<int>(call_descriptor->StackParameterCount());
3515 __ Drop(pop_count);
3516 __ Ret();
3517 } else {
3518 gen_->AssembleSourcePosition(instr_);
3519 // A direct call to a wasm runtime stub defined in this module.
3520 // Just encode the stub index. This will be patched when the code
3521 // is added to the native module and copied into wasm code space.
3522 __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
3523 ReferenceMap* reference_map =
3524 new (gen_->zone()) ReferenceMap(gen_->zone());
3525 gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
3526 if (FLAG_debug_code) {
3527 __ stop();
3528 }
3529 }
3530 }
3531
3532 Instruction* instr_;
3533 CodeGenerator* gen_;
3534 };
3535 auto ool = new (zone()) OutOfLineTrap(this, instr);
3536 Label* tlabel = ool->entry();
3537 AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
3538 }
3539
3540 // Assembles boolean materializations after an instruction.
AssembleArchBoolean(Instruction * instr,FlagsCondition condition)3541 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
3542 FlagsCondition condition) {
3543 MipsOperandConverter i(this, instr);
3544
3545 // Materialize a full 32-bit 1 or 0 value. The result register is always the
3546 // last output of the instruction.
3547 DCHECK_NE(0u, instr->OutputCount());
3548 Register result = i.OutputRegister(instr->OutputCount() - 1);
3549 Condition cc = kNoCondition;
3550 // MIPS does not have condition code flags, so compare and branch are
3551 // implemented differently than on the other arch's. The compare operations
3552 // emit mips pseudo-instructions, which are checked and handled here.
3553
3554 if (instr->arch_opcode() == kMipsTst) {
3555 cc = FlagsConditionToConditionTst(condition);
3556 if (cc == eq) {
3557 __ Sltu(result, kScratchReg, 1);
3558 } else {
3559 __ Sltu(result, zero_reg, kScratchReg);
3560 }
3561 return;
3562 } else if (instr->arch_opcode() == kMipsAddOvf ||
3563 instr->arch_opcode() == kMipsSubOvf) {
3564 // Overflow occurs if overflow register is negative
3565 __ slt(result, kScratchReg, zero_reg);
3566 } else if (instr->arch_opcode() == kMipsMulOvf) {
3567 // Overflow occurs if overflow register is not zero
3568 __ Sgtu(result, kScratchReg, zero_reg);
3569 } else if (instr->arch_opcode() == kMipsCmp) {
3570 cc = FlagsConditionToConditionCmp(condition);
3571 switch (cc) {
3572 case eq:
3573 case ne: {
3574 Register left = i.InputRegister(0);
3575 Operand right = i.InputOperand(1);
3576 if (instr->InputAt(1)->IsImmediate()) {
3577 if (is_int16(-right.immediate())) {
3578 if (right.immediate() == 0) {
3579 if (cc == eq) {
3580 __ Sltu(result, left, 1);
3581 } else {
3582 __ Sltu(result, zero_reg, left);
3583 }
3584 } else {
3585 __ Addu(result, left, -right.immediate());
3586 if (cc == eq) {
3587 __ Sltu(result, result, 1);
3588 } else {
3589 __ Sltu(result, zero_reg, result);
3590 }
3591 }
3592 } else {
3593 if (is_uint16(right.immediate())) {
3594 __ Xor(result, left, right);
3595 } else {
3596 __ li(kScratchReg, right);
3597 __ Xor(result, left, kScratchReg);
3598 }
3599 if (cc == eq) {
3600 __ Sltu(result, result, 1);
3601 } else {
3602 __ Sltu(result, zero_reg, result);
3603 }
3604 }
3605 } else {
3606 __ Xor(result, left, right);
3607 if (cc == eq) {
3608 __ Sltu(result, result, 1);
3609 } else {
3610 __ Sltu(result, zero_reg, result);
3611 }
3612 }
3613 } break;
3614 case lt:
3615 case ge: {
3616 Register left = i.InputRegister(0);
3617 Operand right = i.InputOperand(1);
3618 __ Slt(result, left, right);
3619 if (cc == ge) {
3620 __ xori(result, result, 1);
3621 }
3622 } break;
3623 case gt:
3624 case le: {
3625 Register left = i.InputRegister(1);
3626 Operand right = i.InputOperand(0);
3627 __ Slt(result, left, right);
3628 if (cc == le) {
3629 __ xori(result, result, 1);
3630 }
3631 } break;
3632 case lo:
3633 case hs: {
3634 Register left = i.InputRegister(0);
3635 Operand right = i.InputOperand(1);
3636 __ Sltu(result, left, right);
3637 if (cc == hs) {
3638 __ xori(result, result, 1);
3639 }
3640 } break;
3641 case hi:
3642 case ls: {
3643 Register left = i.InputRegister(1);
3644 Operand right = i.InputOperand(0);
3645 __ Sltu(result, left, right);
3646 if (cc == ls) {
3647 __ xori(result, result, 1);
3648 }
3649 } break;
3650 default:
3651 UNREACHABLE();
3652 }
3653 return;
3654 } else if (instr->arch_opcode() == kMipsCmpD ||
3655 instr->arch_opcode() == kMipsCmpS) {
3656 FPURegister left = i.InputOrZeroDoubleRegister(0);
3657 FPURegister right = i.InputOrZeroDoubleRegister(1);
3658 if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
3659 !__ IsDoubleZeroRegSet()) {
3660 __ Move(kDoubleRegZero, 0.0);
3661 }
3662 bool predicate;
3663 FlagsConditionToConditionCmpFPU(&predicate, condition);
3664 if (!IsMipsArchVariant(kMips32r6)) {
3665 __ li(result, Operand(1));
3666 if (predicate) {
3667 __ Movf(result, zero_reg);
3668 } else {
3669 __ Movt(result, zero_reg);
3670 }
3671 } else {
3672 __ mfc1(result, kDoubleCompareReg);
3673 if (predicate) {
3674 __ And(result, result, 1); // cmp returns all 1's/0's, use only LSB.
3675 } else {
3676 __ Addu(result, result, 1); // Toggle result for not equal.
3677 }
3678 }
3679 return;
3680 } else {
3681 PrintF("AssembleArchBoolean Unimplemented arch_opcode is : %d\n",
3682 instr->arch_opcode());
3683 TRACE_UNIMPL();
3684 UNIMPLEMENTED();
3685 }
3686 }
3687
AssembleArchBinarySearchSwitch(Instruction * instr)3688 void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
3689 MipsOperandConverter i(this, instr);
3690 Register input = i.InputRegister(0);
3691 std::vector<std::pair<int32_t, Label*>> cases;
3692 for (size_t index = 2; index < instr->InputCount(); index += 2) {
3693 cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
3694 }
3695 AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
3696 cases.data() + cases.size());
3697 }
3698
AssembleArchTableSwitch(Instruction * instr)3699 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
3700 MipsOperandConverter i(this, instr);
3701 Register input = i.InputRegister(0);
3702 size_t const case_count = instr->InputCount() - 2;
3703 __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
3704 __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
3705 return GetLabel(i.InputRpo(index + 2));
3706 });
3707 }
3708
FinishFrame(Frame * frame)3709 void CodeGenerator::FinishFrame(Frame* frame) {
3710 auto call_descriptor = linkage()->GetIncomingDescriptor();
3711
3712 const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
3713 if (saves_fpu != 0) {
3714 frame->AlignSavedCalleeRegisterSlots();
3715 }
3716
3717 if (saves_fpu != 0) {
3718 int count = base::bits::CountPopulation(saves_fpu);
3719 DCHECK_EQ(kNumCalleeSavedFPU, count);
3720 frame->AllocateSavedCalleeRegisterSlots(count *
3721 (kDoubleSize / kSystemPointerSize));
3722 }
3723
3724 const RegList saves = call_descriptor->CalleeSavedRegisters();
3725 if (saves != 0) {
3726 int count = base::bits::CountPopulation(saves);
3727 DCHECK_EQ(kNumCalleeSaved, count + 1);
3728 frame->AllocateSavedCalleeRegisterSlots(count);
3729 }
3730 }
3731
AssembleConstructFrame()3732 void CodeGenerator::AssembleConstructFrame() {
3733 auto call_descriptor = linkage()->GetIncomingDescriptor();
3734 if (frame_access_state()->has_frame()) {
3735 if (call_descriptor->IsCFunctionCall()) {
3736 if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
3737 __ StubPrologue(StackFrame::C_WASM_ENTRY);
3738 // Reserve stack space for saving the c_entry_fp later.
3739 __ Subu(sp, sp, Operand(kSystemPointerSize));
3740 } else {
3741 __ Push(ra, fp);
3742 __ mov(fp, sp);
3743 }
3744 } else if (call_descriptor->IsJSFunctionCall()) {
3745 __ Prologue();
3746 if (call_descriptor->PushArgumentCount()) {
3747 __ Push(kJavaScriptCallArgCountRegister);
3748 }
3749 } else {
3750 __ StubPrologue(info()->GetOutputStackFrameType());
3751 if (call_descriptor->IsWasmFunctionCall()) {
3752 __ Push(kWasmInstanceRegister);
3753 } else if (call_descriptor->IsWasmImportWrapper() ||
3754 call_descriptor->IsWasmCapiFunction()) {
3755 // Wasm import wrappers are passed a tuple in the place of the instance.
3756 // Unpack the tuple into the instance and the target callable.
3757 // This must be done here in the codegen because it cannot be expressed
3758 // properly in the graph.
3759 __ lw(kJSFunctionRegister,
3760 FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
3761 __ lw(kWasmInstanceRegister,
3762 FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
3763 __ Push(kWasmInstanceRegister);
3764 if (call_descriptor->IsWasmCapiFunction()) {
3765 // Reserve space for saving the PC later.
3766 __ Subu(sp, sp, Operand(kSystemPointerSize));
3767 }
3768 }
3769 }
3770 }
3771
3772 int required_slots =
3773 frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
3774
3775 if (info()->is_osr()) {
3776 // TurboFan OSR-compiled functions cannot be entered directly.
3777 __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
3778
3779 // Unoptimized code jumps directly to this entrypoint while the unoptimized
3780 // frame is still on the stack. Optimized code uses OSR values directly from
3781 // the unoptimized frame. Thus, all that needs to be done is to allocate the
3782 // remaining stack slots.
3783 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
3784 osr_pc_offset_ = __ pc_offset();
3785 required_slots -= osr_helper()->UnoptimizedFrameSlots();
3786 ResetSpeculationPoison();
3787 }
3788
3789 const RegList saves = call_descriptor->CalleeSavedRegisters();
3790 const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
3791
3792 if (required_slots > 0) {
3793 DCHECK(frame_access_state()->has_frame());
3794 if (info()->IsWasm() && required_slots > 128) {
3795 // For WebAssembly functions with big frames we have to do the stack
3796 // overflow check before we construct the frame. Otherwise we may not
3797 // have enough space on the stack to call the runtime for the stack
3798 // overflow.
3799 Label done;
3800
3801 // If the frame is bigger than the stack, we throw the stack overflow
3802 // exception unconditionally. Thereby we can avoid the integer overflow
3803 // check in the condition code.
3804 if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
3805 __ Lw(
3806 kScratchReg,
3807 FieldMemOperand(kWasmInstanceRegister,
3808 WasmInstanceObject::kRealStackLimitAddressOffset));
3809 __ Lw(kScratchReg, MemOperand(kScratchReg));
3810 __ Addu(kScratchReg, kScratchReg,
3811 Operand(required_slots * kSystemPointerSize));
3812 __ Branch(&done, uge, sp, Operand(kScratchReg));
3813 }
3814
3815 __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
3816 // We come from WebAssembly, there are no references for the GC.
3817 ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
3818 RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
3819 if (FLAG_debug_code) {
3820 __ stop();
3821 }
3822
3823 __ bind(&done);
3824 }
3825 }
3826
3827 const int returns = frame()->GetReturnSlotCount();
3828
3829 // Skip callee-saved and return slots, which are pushed below.
3830 required_slots -= base::bits::CountPopulation(saves);
3831 required_slots -= 2 * base::bits::CountPopulation(saves_fpu);
3832 required_slots -= returns;
3833 if (required_slots > 0) {
3834 __ Subu(sp, sp, Operand(required_slots * kSystemPointerSize));
3835 }
3836
3837 // Save callee-saved FPU registers.
3838 if (saves_fpu != 0) {
3839 __ MultiPushFPU(saves_fpu);
3840 }
3841
3842 if (saves != 0) {
3843 // Save callee-saved registers.
3844 __ MultiPush(saves);
3845 DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation(saves) + 1);
3846 }
3847
3848 if (returns != 0) {
3849 // Create space for returns.
3850 __ Subu(sp, sp, Operand(returns * kSystemPointerSize));
3851 }
3852 }
3853
AssembleReturn(InstructionOperand * pop)3854 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
3855 auto call_descriptor = linkage()->GetIncomingDescriptor();
3856 int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
3857
3858 const int returns = frame()->GetReturnSlotCount();
3859 if (returns != 0) {
3860 __ Addu(sp, sp, Operand(returns * kSystemPointerSize));
3861 }
3862
3863 // Restore GP registers.
3864 const RegList saves = call_descriptor->CalleeSavedRegisters();
3865 if (saves != 0) {
3866 __ MultiPop(saves);
3867 }
3868
3869 // Restore FPU registers.
3870 const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
3871 if (saves_fpu != 0) {
3872 __ MultiPopFPU(saves_fpu);
3873 }
3874
3875 MipsOperandConverter g(this, nullptr);
3876 if (call_descriptor->IsCFunctionCall()) {
3877 AssembleDeconstructFrame();
3878 } else if (frame_access_state()->has_frame()) {
3879 // Canonicalize JSFunction return sites for now unless they have an variable
3880 // number of stack slot pops.
3881 if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
3882 if (return_label_.is_bound()) {
3883 __ Branch(&return_label_);
3884 return;
3885 } else {
3886 __ bind(&return_label_);
3887 AssembleDeconstructFrame();
3888 }
3889 } else {
3890 AssembleDeconstructFrame();
3891 }
3892 }
3893 if (pop->IsImmediate()) {
3894 DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
3895 pop_count += g.ToConstant(pop).ToInt32();
3896 } else {
3897 Register pop_reg = g.ToRegister(pop);
3898 __ sll(pop_reg, pop_reg, kSystemPointerSizeLog2);
3899 __ Addu(sp, sp, Operand(pop_reg));
3900 }
3901 if (pop_count != 0) {
3902 __ DropAndRet(pop_count);
3903 } else {
3904 __ Ret();
3905 }
3906 }
3907
FinishCode()3908 void CodeGenerator::FinishCode() {}
3909
PrepareForDeoptimizationExits(int deopt_count)3910 void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
3911
AssembleMove(InstructionOperand * source,InstructionOperand * destination)3912 void CodeGenerator::AssembleMove(InstructionOperand* source,
3913 InstructionOperand* destination) {
3914 MipsOperandConverter g(this, nullptr);
3915 // Dispatch on the source and destination operand kinds. Not all
3916 // combinations are possible.
3917 if (source->IsRegister()) {
3918 DCHECK(destination->IsRegister() || destination->IsStackSlot());
3919 Register src = g.ToRegister(source);
3920 if (destination->IsRegister()) {
3921 __ mov(g.ToRegister(destination), src);
3922 } else {
3923 __ sw(src, g.ToMemOperand(destination));
3924 }
3925 } else if (source->IsStackSlot()) {
3926 DCHECK(destination->IsRegister() || destination->IsStackSlot());
3927 MemOperand src = g.ToMemOperand(source);
3928 if (destination->IsRegister()) {
3929 __ lw(g.ToRegister(destination), src);
3930 } else {
3931 Register temp = kScratchReg;
3932 __ lw(temp, src);
3933 __ sw(temp, g.ToMemOperand(destination));
3934 }
3935 } else if (source->IsConstant()) {
3936 Constant src = g.ToConstant(source);
3937 if (destination->IsRegister() || destination->IsStackSlot()) {
3938 Register dst =
3939 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
3940 switch (src.type()) {
3941 case Constant::kInt32:
3942 if (RelocInfo::IsWasmReference(src.rmode())) {
3943 __ li(dst, Operand(src.ToInt32(), src.rmode()));
3944 } else {
3945 __ li(dst, Operand(src.ToInt32()));
3946 }
3947 break;
3948 case Constant::kFloat32:
3949 __ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
3950 break;
3951 case Constant::kInt64:
3952 UNREACHABLE();
3953 break;
3954 case Constant::kFloat64:
3955 __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
3956 break;
3957 case Constant::kExternalReference:
3958 __ li(dst, src.ToExternalReference());
3959 break;
3960 case Constant::kDelayedStringConstant:
3961 __ li(dst, src.ToDelayedStringConstant());
3962 break;
3963 case Constant::kHeapObject: {
3964 Handle<HeapObject> src_object = src.ToHeapObject();
3965 RootIndex index;
3966 if (IsMaterializableFromRoot(src_object, &index)) {
3967 __ LoadRoot(dst, index);
3968 } else {
3969 __ li(dst, src_object);
3970 }
3971 break;
3972 }
3973 case Constant::kCompressedHeapObject:
3974 UNREACHABLE();
3975 case Constant::kRpoNumber:
3976 UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips.
3977 break;
3978 }
3979 if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
3980 } else if (src.type() == Constant::kFloat32) {
3981 if (destination->IsFPStackSlot()) {
3982 MemOperand dst = g.ToMemOperand(destination);
3983 if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
3984 __ sw(zero_reg, dst);
3985 } else {
3986 __ li(kScratchReg, Operand(bit_cast<int32_t>(src.ToFloat32())));
3987 __ sw(kScratchReg, dst);
3988 }
3989 } else {
3990 DCHECK(destination->IsFPRegister());
3991 FloatRegister dst = g.ToSingleRegister(destination);
3992 __ Move(dst, src.ToFloat32());
3993 }
3994 } else {
3995 DCHECK_EQ(Constant::kFloat64, src.type());
3996 DoubleRegister dst = destination->IsFPRegister()
3997 ? g.ToDoubleRegister(destination)
3998 : kScratchDoubleReg;
3999 __ Move(dst, src.ToFloat64().value());
4000 if (destination->IsFPStackSlot()) {
4001 __ Sdc1(dst, g.ToMemOperand(destination));
4002 }
4003 }
4004 } else if (source->IsFPRegister()) {
4005 MachineRepresentation rep = LocationOperand::cast(source)->representation();
4006 if (rep == MachineRepresentation::kSimd128) {
4007 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
4008 MSARegister src = g.ToSimd128Register(source);
4009 if (destination->IsSimd128Register()) {
4010 MSARegister dst = g.ToSimd128Register(destination);
4011 __ move_v(dst, src);
4012 } else {
4013 DCHECK(destination->IsSimd128StackSlot());
4014 __ st_b(src, g.ToMemOperand(destination));
4015 }
4016 } else {
4017 FPURegister src = g.ToDoubleRegister(source);
4018 if (destination->IsFPRegister()) {
4019 FPURegister dst = g.ToDoubleRegister(destination);
4020 __ Move(dst, src);
4021 } else {
4022 DCHECK(destination->IsFPStackSlot());
4023 MachineRepresentation rep =
4024 LocationOperand::cast(source)->representation();
4025 if (rep == MachineRepresentation::kFloat64) {
4026 __ Sdc1(src, g.ToMemOperand(destination));
4027 } else if (rep == MachineRepresentation::kFloat32) {
4028 __ swc1(src, g.ToMemOperand(destination));
4029 } else {
4030 UNREACHABLE();
4031 }
4032 }
4033 }
4034 } else if (source->IsFPStackSlot()) {
4035 DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
4036 MemOperand src = g.ToMemOperand(source);
4037 MachineRepresentation rep = LocationOperand::cast(source)->representation();
4038 if (destination->IsFPRegister()) {
4039 if (rep == MachineRepresentation::kFloat64) {
4040 __ Ldc1(g.ToDoubleRegister(destination), src);
4041 } else if (rep == MachineRepresentation::kFloat32) {
4042 __ lwc1(g.ToDoubleRegister(destination), src);
4043 } else {
4044 DCHECK_EQ(MachineRepresentation::kSimd128, rep);
4045 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
4046 __ ld_b(g.ToSimd128Register(destination), src);
4047 }
4048 } else {
4049 FPURegister temp = kScratchDoubleReg;
4050 if (rep == MachineRepresentation::kFloat64) {
4051 __ Ldc1(temp, src);
4052 __ Sdc1(temp, g.ToMemOperand(destination));
4053 } else if (rep == MachineRepresentation::kFloat32) {
4054 __ lwc1(temp, src);
4055 __ swc1(temp, g.ToMemOperand(destination));
4056 } else {
4057 DCHECK_EQ(MachineRepresentation::kSimd128, rep);
4058 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
4059 MSARegister temp = kSimd128ScratchReg;
4060 __ ld_b(temp, src);
4061 __ st_b(temp, g.ToMemOperand(destination));
4062 }
4063 }
4064 } else {
4065 UNREACHABLE();
4066 }
4067 }
4068
AssembleSwap(InstructionOperand * source,InstructionOperand * destination)4069 void CodeGenerator::AssembleSwap(InstructionOperand* source,
4070 InstructionOperand* destination) {
4071 MipsOperandConverter g(this, nullptr);
4072 // Dispatch on the source and destination operand kinds. Not all
4073 // combinations are possible.
4074 if (source->IsRegister()) {
4075 // Register-register.
4076 Register temp = kScratchReg;
4077 Register src = g.ToRegister(source);
4078 if (destination->IsRegister()) {
4079 Register dst = g.ToRegister(destination);
4080 __ Move(temp, src);
4081 __ Move(src, dst);
4082 __ Move(dst, temp);
4083 } else {
4084 DCHECK(destination->IsStackSlot());
4085 MemOperand dst = g.ToMemOperand(destination);
4086 __ mov(temp, src);
4087 __ lw(src, dst);
4088 __ sw(temp, dst);
4089 }
4090 } else if (source->IsStackSlot()) {
4091 DCHECK(destination->IsStackSlot());
4092 Register temp_0 = kScratchReg;
4093 Register temp_1 = kScratchReg2;
4094 MemOperand src = g.ToMemOperand(source);
4095 MemOperand dst = g.ToMemOperand(destination);
4096 __ lw(temp_0, src);
4097 __ lw(temp_1, dst);
4098 __ sw(temp_0, dst);
4099 __ sw(temp_1, src);
4100 } else if (source->IsFPRegister()) {
4101 if (destination->IsFPRegister()) {
4102 MachineRepresentation rep =
4103 LocationOperand::cast(source)->representation();
4104 if (rep == MachineRepresentation::kSimd128) {
4105 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
4106 MSARegister temp = kSimd128ScratchReg;
4107 MSARegister src = g.ToSimd128Register(source);
4108 MSARegister dst = g.ToSimd128Register(destination);
4109 __ move_v(temp, src);
4110 __ move_v(src, dst);
4111 __ move_v(dst, temp);
4112 } else {
4113 FPURegister temp = kScratchDoubleReg;
4114 FPURegister src = g.ToDoubleRegister(source);
4115 FPURegister dst = g.ToDoubleRegister(destination);
4116 __ Move(temp, src);
4117 __ Move(src, dst);
4118 __ Move(dst, temp);
4119 }
4120 } else {
4121 DCHECK(destination->IsFPStackSlot());
4122 MemOperand dst = g.ToMemOperand(destination);
4123 MachineRepresentation rep =
4124 LocationOperand::cast(source)->representation();
4125 if (rep == MachineRepresentation::kFloat64) {
4126 FPURegister temp = kScratchDoubleReg;
4127 FPURegister src = g.ToDoubleRegister(source);
4128 __ Move(temp, src);
4129 __ Ldc1(src, dst);
4130 __ Sdc1(temp, dst);
4131 } else if (rep == MachineRepresentation::kFloat32) {
4132 FPURegister temp = kScratchDoubleReg;
4133 FPURegister src = g.ToFloatRegister(source);
4134 __ Move(temp, src);
4135 __ lwc1(src, dst);
4136 __ swc1(temp, dst);
4137 } else {
4138 DCHECK_EQ(MachineRepresentation::kSimd128, rep);
4139 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
4140 MSARegister temp = kSimd128ScratchReg;
4141 MSARegister src = g.ToSimd128Register(source);
4142 __ move_v(temp, src);
4143 __ ld_b(src, dst);
4144 __ st_b(temp, dst);
4145 }
4146 }
4147 } else if (source->IsFPStackSlot()) {
4148 DCHECK(destination->IsFPStackSlot());
4149 Register temp_0 = kScratchReg;
4150 FPURegister temp_1 = kScratchDoubleReg;
4151 MemOperand src0 = g.ToMemOperand(source);
4152 MemOperand dst0 = g.ToMemOperand(destination);
4153 MachineRepresentation rep = LocationOperand::cast(source)->representation();
4154 if (rep == MachineRepresentation::kFloat64) {
4155 MemOperand src1(src0.rm(), src0.offset() + kIntSize);
4156 MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
4157 __ Ldc1(temp_1, dst0); // Save destination in temp_1.
4158 __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
4159 __ sw(temp_0, dst0);
4160 __ lw(temp_0, src1);
4161 __ sw(temp_0, dst1);
4162 __ Sdc1(temp_1, src0);
4163 } else if (rep == MachineRepresentation::kFloat32) {
4164 __ lwc1(temp_1, dst0); // Save destination in temp_1.
4165 __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
4166 __ sw(temp_0, dst0);
4167 __ swc1(temp_1, src0);
4168 } else {
4169 DCHECK_EQ(MachineRepresentation::kSimd128, rep);
4170 MemOperand src1(src0.rm(), src0.offset() + kIntSize);
4171 MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
4172 MemOperand src2(src0.rm(), src0.offset() + 2 * kIntSize);
4173 MemOperand dst2(dst0.rm(), dst0.offset() + 2 * kIntSize);
4174 MemOperand src3(src0.rm(), src0.offset() + 3 * kIntSize);
4175 MemOperand dst3(dst0.rm(), dst0.offset() + 3 * kIntSize);
4176 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
4177 MSARegister temp_1 = kSimd128ScratchReg;
4178 __ ld_b(temp_1, dst0); // Save destination in temp_1.
4179 __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
4180 __ sw(temp_0, dst0);
4181 __ lw(temp_0, src1);
4182 __ sw(temp_0, dst1);
4183 __ lw(temp_0, src2);
4184 __ sw(temp_0, dst2);
4185 __ lw(temp_0, src3);
4186 __ sw(temp_0, dst3);
4187 __ st_b(temp_1, src0);
4188 }
4189 } else {
4190 // No other combinations are possible.
4191 UNREACHABLE();
4192 }
4193 }
4194
AssembleJumpTable(Label ** targets,size_t target_count)4195 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
4196 // On 32-bit MIPS we emit the jump tables inline.
4197 UNREACHABLE();
4198 }
4199
4200 #undef __
4201
4202 } // namespace compiler
4203 } // namespace internal
4204 } // namespace v8
4205