1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/base/bits.h"
6 #include "src/base/platform/wrappers.h"
7 #include "src/codegen/machine-type.h"
8 #include "src/compiler/backend/instruction-selector-impl.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties.h"
11
12 namespace v8 {
13 namespace internal {
14 namespace compiler {
15
16 #define TRACE_UNIMPL() \
17 PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
18
19 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
20
21 // Adds Mips-specific methods for generating InstructionOperands.
22 class Mips64OperandGenerator final : public OperandGenerator {
23 public:
Mips64OperandGenerator(InstructionSelector * selector)24 explicit Mips64OperandGenerator(InstructionSelector* selector)
25 : OperandGenerator(selector) {}
26
UseOperand(Node * node,InstructionCode opcode)27 InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
28 if (CanBeImmediate(node, opcode)) {
29 return UseImmediate(node);
30 }
31 return UseRegister(node);
32 }
33
34 // Use the zero register if the node has the immediate value zero, otherwise
35 // assign a register.
UseRegisterOrImmediateZero(Node * node)36 InstructionOperand UseRegisterOrImmediateZero(Node* node) {
37 if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
38 (IsFloatConstant(node) &&
39 (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
40 return UseImmediate(node);
41 }
42 return UseRegister(node);
43 }
44
IsIntegerConstant(Node * node)45 bool IsIntegerConstant(Node* node) {
46 return (node->opcode() == IrOpcode::kInt32Constant) ||
47 (node->opcode() == IrOpcode::kInt64Constant);
48 }
49
GetIntegerConstantValue(Node * node)50 int64_t GetIntegerConstantValue(Node* node) {
51 if (node->opcode() == IrOpcode::kInt32Constant) {
52 return OpParameter<int32_t>(node->op());
53 }
54 DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
55 return OpParameter<int64_t>(node->op());
56 }
57
IsFloatConstant(Node * node)58 bool IsFloatConstant(Node* node) {
59 return (node->opcode() == IrOpcode::kFloat32Constant) ||
60 (node->opcode() == IrOpcode::kFloat64Constant);
61 }
62
GetFloatConstantValue(Node * node)63 double GetFloatConstantValue(Node* node) {
64 if (node->opcode() == IrOpcode::kFloat32Constant) {
65 return OpParameter<float>(node->op());
66 }
67 DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
68 return OpParameter<double>(node->op());
69 }
70
CanBeImmediate(Node * node,InstructionCode mode)71 bool CanBeImmediate(Node* node, InstructionCode mode) {
72 return IsIntegerConstant(node) &&
73 CanBeImmediate(GetIntegerConstantValue(node), mode);
74 }
75
CanBeImmediate(int64_t value,InstructionCode opcode)76 bool CanBeImmediate(int64_t value, InstructionCode opcode) {
77 switch (ArchOpcodeField::decode(opcode)) {
78 case kMips64Shl:
79 case kMips64Sar:
80 case kMips64Shr:
81 return is_uint5(value);
82 case kMips64Dshl:
83 case kMips64Dsar:
84 case kMips64Dshr:
85 return is_uint6(value);
86 case kMips64Add:
87 case kMips64And32:
88 case kMips64And:
89 case kMips64Dadd:
90 case kMips64Or32:
91 case kMips64Or:
92 case kMips64Tst:
93 case kMips64Xor:
94 return is_uint16(value);
95 case kMips64Lb:
96 case kMips64Lbu:
97 case kMips64Sb:
98 case kMips64Lh:
99 case kMips64Lhu:
100 case kMips64Sh:
101 case kMips64Lw:
102 case kMips64Sw:
103 case kMips64Ld:
104 case kMips64Sd:
105 case kMips64Lwc1:
106 case kMips64Swc1:
107 case kMips64Ldc1:
108 case kMips64Sdc1:
109 return is_int32(value);
110 default:
111 return is_int16(value);
112 }
113 }
114
115 private:
ImmediateFitsAddrMode1Instruction(int32_t imm) const116 bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
117 TRACE_UNIMPL();
118 return false;
119 }
120 };
121
VisitRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)122 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
123 Node* node) {
124 Mips64OperandGenerator g(selector);
125 selector->Emit(opcode, g.DefineAsRegister(node),
126 g.UseRegister(node->InputAt(0)));
127 }
128
VisitRRI(InstructionSelector * selector,ArchOpcode opcode,Node * node)129 static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
130 Node* node) {
131 Mips64OperandGenerator g(selector);
132 int32_t imm = OpParameter<int32_t>(node->op());
133 selector->Emit(opcode, g.DefineAsRegister(node),
134 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
135 }
136
VisitSimdShift(InstructionSelector * selector,ArchOpcode opcode,Node * node)137 static void VisitSimdShift(InstructionSelector* selector, ArchOpcode opcode,
138 Node* node) {
139 Mips64OperandGenerator g(selector);
140 if (g.IsIntegerConstant(node->InputAt(1))) {
141 selector->Emit(opcode, g.DefineAsRegister(node),
142 g.UseRegister(node->InputAt(0)),
143 g.UseImmediate(node->InputAt(1)));
144 } else {
145 selector->Emit(opcode, g.DefineAsRegister(node),
146 g.UseRegister(node->InputAt(0)),
147 g.UseRegister(node->InputAt(1)));
148 }
149 }
150
VisitRRIR(InstructionSelector * selector,ArchOpcode opcode,Node * node)151 static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
152 Node* node) {
153 Mips64OperandGenerator g(selector);
154 int32_t imm = OpParameter<int32_t>(node->op());
155 selector->Emit(opcode, g.DefineAsRegister(node),
156 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
157 g.UseRegister(node->InputAt(1)));
158 }
159
VisitRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)160 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
161 Node* node) {
162 Mips64OperandGenerator g(selector);
163 selector->Emit(opcode, g.DefineAsRegister(node),
164 g.UseRegister(node->InputAt(0)),
165 g.UseRegister(node->InputAt(1)));
166 }
167
VisitUniqueRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)168 static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode,
169 Node* node) {
170 Mips64OperandGenerator g(selector);
171 selector->Emit(opcode, g.DefineAsRegister(node),
172 g.UseUniqueRegister(node->InputAt(0)),
173 g.UseUniqueRegister(node->InputAt(1)));
174 }
175
VisitRRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)176 void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
177 Mips64OperandGenerator g(selector);
178 selector->Emit(
179 opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
180 g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
181 }
182
VisitRRO(InstructionSelector * selector,ArchOpcode opcode,Node * node)183 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
184 Node* node) {
185 Mips64OperandGenerator g(selector);
186 selector->Emit(opcode, g.DefineAsRegister(node),
187 g.UseRegister(node->InputAt(0)),
188 g.UseOperand(node->InputAt(1), opcode));
189 }
190
191 struct ExtendingLoadMatcher {
ExtendingLoadMatcherv8::internal::compiler::ExtendingLoadMatcher192 ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
193 : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
194 Initialize(node);
195 }
196
Matchesv8::internal::compiler::ExtendingLoadMatcher197 bool Matches() const { return matches_; }
198
basev8::internal::compiler::ExtendingLoadMatcher199 Node* base() const {
200 DCHECK(Matches());
201 return base_;
202 }
immediatev8::internal::compiler::ExtendingLoadMatcher203 int64_t immediate() const {
204 DCHECK(Matches());
205 return immediate_;
206 }
opcodev8::internal::compiler::ExtendingLoadMatcher207 ArchOpcode opcode() const {
208 DCHECK(Matches());
209 return opcode_;
210 }
211
212 private:
213 bool matches_;
214 InstructionSelector* selector_;
215 Node* base_;
216 int64_t immediate_;
217 ArchOpcode opcode_;
218
Initializev8::internal::compiler::ExtendingLoadMatcher219 void Initialize(Node* node) {
220 Int64BinopMatcher m(node);
221 // When loading a 64-bit value and shifting by 32, we should
222 // just load and sign-extend the interesting 4 bytes instead.
223 // This happens, for example, when we're loading and untagging SMIs.
224 DCHECK(m.IsWord64Sar());
225 if (m.left().IsLoad() && m.right().Is(32) &&
226 selector_->CanCover(m.node(), m.left().node())) {
227 DCHECK_EQ(selector_->GetEffectLevel(node),
228 selector_->GetEffectLevel(m.left().node()));
229 MachineRepresentation rep =
230 LoadRepresentationOf(m.left().node()->op()).representation();
231 DCHECK_EQ(3, ElementSizeLog2Of(rep));
232 if (rep != MachineRepresentation::kTaggedSigned &&
233 rep != MachineRepresentation::kTaggedPointer &&
234 rep != MachineRepresentation::kTagged &&
235 rep != MachineRepresentation::kWord64) {
236 return;
237 }
238
239 Mips64OperandGenerator g(selector_);
240 Node* load = m.left().node();
241 Node* offset = load->InputAt(1);
242 base_ = load->InputAt(0);
243 opcode_ = kMips64Lw;
244 if (g.CanBeImmediate(offset, opcode_)) {
245 #if defined(V8_TARGET_LITTLE_ENDIAN)
246 immediate_ = g.GetIntegerConstantValue(offset) + 4;
247 #elif defined(V8_TARGET_BIG_ENDIAN)
248 immediate_ = g.GetIntegerConstantValue(offset);
249 #endif
250 matches_ = g.CanBeImmediate(immediate_, kMips64Lw);
251 }
252 }
253 }
254 };
255
TryEmitExtendingLoad(InstructionSelector * selector,Node * node,Node * output_node)256 bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node,
257 Node* output_node) {
258 ExtendingLoadMatcher m(node, selector);
259 Mips64OperandGenerator g(selector);
260 if (m.Matches()) {
261 InstructionOperand inputs[2];
262 inputs[0] = g.UseRegister(m.base());
263 InstructionCode opcode =
264 m.opcode() | AddressingModeField::encode(kMode_MRI);
265 DCHECK(is_int32(m.immediate()));
266 inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
267 InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
268 selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
269 inputs);
270 return true;
271 }
272 return false;
273 }
274
TryMatchImmediate(InstructionSelector * selector,InstructionCode * opcode_return,Node * node,size_t * input_count_return,InstructionOperand * inputs)275 bool TryMatchImmediate(InstructionSelector* selector,
276 InstructionCode* opcode_return, Node* node,
277 size_t* input_count_return, InstructionOperand* inputs) {
278 Mips64OperandGenerator g(selector);
279 if (g.CanBeImmediate(node, *opcode_return)) {
280 *opcode_return |= AddressingModeField::encode(kMode_MRI);
281 inputs[0] = g.UseImmediate(node);
282 *input_count_return = 1;
283 return true;
284 }
285 return false;
286 }
287
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode,FlagsContinuation * cont)288 static void VisitBinop(InstructionSelector* selector, Node* node,
289 InstructionCode opcode, bool has_reverse_opcode,
290 InstructionCode reverse_opcode,
291 FlagsContinuation* cont) {
292 Mips64OperandGenerator g(selector);
293 Int32BinopMatcher m(node);
294 InstructionOperand inputs[2];
295 size_t input_count = 0;
296 InstructionOperand outputs[1];
297 size_t output_count = 0;
298
299 if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
300 &inputs[1])) {
301 inputs[0] = g.UseRegister(m.left().node());
302 input_count++;
303 } else if (has_reverse_opcode &&
304 TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
305 &input_count, &inputs[1])) {
306 inputs[0] = g.UseRegister(m.right().node());
307 opcode = reverse_opcode;
308 input_count++;
309 } else {
310 inputs[input_count++] = g.UseRegister(m.left().node());
311 inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
312 }
313
314 outputs[output_count++] = g.DefineAsRegister(node);
315
316 DCHECK_NE(0u, input_count);
317 DCHECK_EQ(1u, output_count);
318 DCHECK_GE(arraysize(inputs), input_count);
319 DCHECK_GE(arraysize(outputs), output_count);
320
321 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
322 inputs, cont);
323 }
324
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode)325 static void VisitBinop(InstructionSelector* selector, Node* node,
326 InstructionCode opcode, bool has_reverse_opcode,
327 InstructionCode reverse_opcode) {
328 FlagsContinuation cont;
329 VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
330 }
331
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)332 static void VisitBinop(InstructionSelector* selector, Node* node,
333 InstructionCode opcode, FlagsContinuation* cont) {
334 VisitBinop(selector, node, opcode, false, kArchNop, cont);
335 }
336
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode)337 static void VisitBinop(InstructionSelector* selector, Node* node,
338 InstructionCode opcode) {
339 VisitBinop(selector, node, opcode, false, kArchNop);
340 }
341
VisitStackSlot(Node * node)342 void InstructionSelector::VisitStackSlot(Node* node) {
343 StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
344 int alignment = rep.alignment();
345 int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
346 OperandGenerator g(this);
347
348 Emit(kArchStackSlot, g.DefineAsRegister(node),
349 sequence()->AddImmediate(Constant(slot)), 0, nullptr);
350 }
351
VisitAbortCSADcheck(Node * node)352 void InstructionSelector::VisitAbortCSADcheck(Node* node) {
353 Mips64OperandGenerator g(this);
354 Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
355 }
356
EmitLoad(InstructionSelector * selector,Node * node,InstructionCode opcode,Node * output=nullptr)357 void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
358 Node* output = nullptr) {
359 Mips64OperandGenerator g(selector);
360 Node* base = node->InputAt(0);
361 Node* index = node->InputAt(1);
362
363 if (g.CanBeImmediate(index, opcode)) {
364 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
365 g.DefineAsRegister(output == nullptr ? node : output),
366 g.UseRegister(base), g.UseImmediate(index));
367 } else {
368 InstructionOperand addr_reg = g.TempRegister();
369 selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
370 addr_reg, g.UseRegister(base), g.UseRegister(index));
371 // Emit desired load opcode, using temp addr_reg.
372 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
373 g.DefineAsRegister(output == nullptr ? node : output),
374 addr_reg, g.TempImmediate(0));
375 }
376 }
377
378 namespace {
EmitAddBeforeS128LoadStore(InstructionSelector * selector,Node * node,InstructionCode * opcode)379 InstructionOperand EmitAddBeforeS128LoadStore(InstructionSelector* selector,
380 Node* node,
381 InstructionCode* opcode) {
382 Mips64OperandGenerator g(selector);
383 Node* base = node->InputAt(0);
384 Node* index = node->InputAt(1);
385 InstructionOperand addr_reg = g.TempRegister();
386 selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
387 addr_reg, g.UseRegister(base), g.UseRegister(index));
388 *opcode |= AddressingModeField::encode(kMode_MRI);
389 return addr_reg;
390 }
391
392 } // namespace
393
VisitStoreLane(Node * node)394 void InstructionSelector::VisitStoreLane(Node* node) {
395 StoreLaneParameters params = StoreLaneParametersOf(node->op());
396 LoadStoreLaneParams f(params.rep, params.laneidx);
397 InstructionCode opcode = kMips64S128StoreLane;
398 opcode |= MiscField::encode(f.sz);
399
400 Mips64OperandGenerator g(this);
401 InstructionOperand addr = EmitAddBeforeS128LoadStore(this, node, &opcode);
402 InstructionOperand inputs[4] = {
403 g.UseRegister(node->InputAt(2)),
404 g.UseImmediate(f.laneidx),
405 addr,
406 g.TempImmediate(0),
407 };
408 Emit(opcode, 0, nullptr, 4, inputs);
409 }
410
VisitLoadLane(Node * node)411 void InstructionSelector::VisitLoadLane(Node* node) {
412 LoadLaneParameters params = LoadLaneParametersOf(node->op());
413 LoadStoreLaneParams f(params.rep.representation(), params.laneidx);
414 InstructionCode opcode = kMips64S128LoadLane;
415 opcode |= MiscField::encode(f.sz);
416
417 Mips64OperandGenerator g(this);
418 InstructionOperand addr = EmitAddBeforeS128LoadStore(this, node, &opcode);
419 Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(2)),
420 g.UseImmediate(f.laneidx), addr, g.TempImmediate(0));
421 }
422
VisitLoadTransform(Node * node)423 void InstructionSelector::VisitLoadTransform(Node* node) {
424 LoadTransformParameters params = LoadTransformParametersOf(node->op());
425
426 InstructionCode opcode = kArchNop;
427 switch (params.transformation) {
428 case LoadTransformation::kS128Load8Splat:
429 opcode = kMips64S128LoadSplat;
430 opcode |= MiscField::encode(MSASize::MSA_B);
431 break;
432 case LoadTransformation::kS128Load16Splat:
433 opcode = kMips64S128LoadSplat;
434 opcode |= MiscField::encode(MSASize::MSA_H);
435 break;
436 case LoadTransformation::kS128Load32Splat:
437 opcode = kMips64S128LoadSplat;
438 opcode |= MiscField::encode(MSASize::MSA_W);
439 break;
440 case LoadTransformation::kS128Load64Splat:
441 opcode = kMips64S128LoadSplat;
442 opcode |= MiscField::encode(MSASize::MSA_D);
443 break;
444 case LoadTransformation::kS128Load8x8S:
445 opcode = kMips64S128Load8x8S;
446 break;
447 case LoadTransformation::kS128Load8x8U:
448 opcode = kMips64S128Load8x8U;
449 break;
450 case LoadTransformation::kS128Load16x4S:
451 opcode = kMips64S128Load16x4S;
452 break;
453 case LoadTransformation::kS128Load16x4U:
454 opcode = kMips64S128Load16x4U;
455 break;
456 case LoadTransformation::kS128Load32x2S:
457 opcode = kMips64S128Load32x2S;
458 break;
459 case LoadTransformation::kS128Load32x2U:
460 opcode = kMips64S128Load32x2U;
461 break;
462 case LoadTransformation::kS128Load32Zero:
463 opcode = kMips64S128Load32Zero;
464 break;
465 case LoadTransformation::kS128Load64Zero:
466 opcode = kMips64S128Load64Zero;
467 break;
468 default:
469 UNIMPLEMENTED();
470 }
471
472 EmitLoad(this, node, opcode);
473 }
474
VisitLoad(Node * node)475 void InstructionSelector::VisitLoad(Node* node) {
476 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
477
478 InstructionCode opcode = kArchNop;
479 switch (load_rep.representation()) {
480 case MachineRepresentation::kFloat32:
481 opcode = kMips64Lwc1;
482 break;
483 case MachineRepresentation::kFloat64:
484 opcode = kMips64Ldc1;
485 break;
486 case MachineRepresentation::kBit: // Fall through.
487 case MachineRepresentation::kWord8:
488 opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
489 break;
490 case MachineRepresentation::kWord16:
491 opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
492 break;
493 case MachineRepresentation::kWord32:
494 opcode = kMips64Lw;
495 break;
496 case MachineRepresentation::kTaggedSigned: // Fall through.
497 case MachineRepresentation::kTaggedPointer: // Fall through.
498 case MachineRepresentation::kTagged: // Fall through.
499 case MachineRepresentation::kWord64:
500 opcode = kMips64Ld;
501 break;
502 case MachineRepresentation::kSimd128:
503 opcode = kMips64MsaLd;
504 break;
505 case MachineRepresentation::kCompressedPointer: // Fall through.
506 case MachineRepresentation::kCompressed: // Fall through.
507 case MachineRepresentation::kMapWord: // Fall through.
508 case MachineRepresentation::kNone:
509 UNREACHABLE();
510 }
511
512 EmitLoad(this, node, opcode);
513 }
514
VisitProtectedLoad(Node * node)515 void InstructionSelector::VisitProtectedLoad(Node* node) {
516 // TODO(eholk)
517 UNIMPLEMENTED();
518 }
519
VisitStore(Node * node)520 void InstructionSelector::VisitStore(Node* node) {
521 Mips64OperandGenerator g(this);
522 Node* base = node->InputAt(0);
523 Node* index = node->InputAt(1);
524 Node* value = node->InputAt(2);
525
526 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
527 WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
528 MachineRepresentation rep = store_rep.representation();
529
530 if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
531 write_barrier_kind = kFullWriteBarrier;
532 }
533
534 // TODO(mips): I guess this could be done in a better way.
535 if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
536 DCHECK(CanBeTaggedPointer(rep));
537 InstructionOperand inputs[3];
538 size_t input_count = 0;
539 inputs[input_count++] = g.UseUniqueRegister(base);
540 inputs[input_count++] = g.UseUniqueRegister(index);
541 inputs[input_count++] = g.UseUniqueRegister(value);
542 RecordWriteMode record_write_mode =
543 WriteBarrierKindToRecordWriteMode(write_barrier_kind);
544 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
545 size_t const temp_count = arraysize(temps);
546 InstructionCode code = kArchStoreWithWriteBarrier;
547 code |= MiscField::encode(static_cast<int>(record_write_mode));
548 Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
549 } else {
550 ArchOpcode opcode;
551 switch (rep) {
552 case MachineRepresentation::kFloat32:
553 opcode = kMips64Swc1;
554 break;
555 case MachineRepresentation::kFloat64:
556 opcode = kMips64Sdc1;
557 break;
558 case MachineRepresentation::kBit: // Fall through.
559 case MachineRepresentation::kWord8:
560 opcode = kMips64Sb;
561 break;
562 case MachineRepresentation::kWord16:
563 opcode = kMips64Sh;
564 break;
565 case MachineRepresentation::kWord32:
566 opcode = kMips64Sw;
567 break;
568 case MachineRepresentation::kTaggedSigned: // Fall through.
569 case MachineRepresentation::kTaggedPointer: // Fall through.
570 case MachineRepresentation::kTagged: // Fall through.
571 case MachineRepresentation::kWord64:
572 opcode = kMips64Sd;
573 break;
574 case MachineRepresentation::kSimd128:
575 opcode = kMips64MsaSt;
576 break;
577 case MachineRepresentation::kCompressedPointer: // Fall through.
578 case MachineRepresentation::kCompressed: // Fall through.
579 case MachineRepresentation::kMapWord: // Fall through.
580 case MachineRepresentation::kNone:
581 UNREACHABLE();
582 }
583
584 if (g.CanBeImmediate(index, opcode)) {
585 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
586 g.UseRegister(base), g.UseImmediate(index),
587 g.UseRegisterOrImmediateZero(value));
588 } else {
589 InstructionOperand addr_reg = g.TempRegister();
590 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
591 g.UseRegister(index), g.UseRegister(base));
592 // Emit desired store opcode, using temp addr_reg.
593 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
594 addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
595 }
596 }
597 }
598
VisitProtectedStore(Node * node)599 void InstructionSelector::VisitProtectedStore(Node* node) {
600 // TODO(eholk)
601 UNIMPLEMENTED();
602 }
603
VisitWord32And(Node * node)604 void InstructionSelector::VisitWord32And(Node* node) {
605 Mips64OperandGenerator g(this);
606 Int32BinopMatcher m(node);
607 if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
608 m.right().HasResolvedValue()) {
609 uint32_t mask = m.right().ResolvedValue();
610 uint32_t mask_width = base::bits::CountPopulation(mask);
611 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
612 if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
613 // The mask must be contiguous, and occupy the least-significant bits.
614 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
615
616 // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
617 // significant bits.
618 Int32BinopMatcher mleft(m.left().node());
619 if (mleft.right().HasResolvedValue()) {
620 // Any shift value can match; int32 shifts use `value % 32`.
621 uint32_t lsb = mleft.right().ResolvedValue() & 0x1F;
622
623 // Ext cannot extract bits past the register size, however since
624 // shifting the original value would have introduced some zeros we can
625 // still use Ext with a smaller mask and the remaining bits will be
626 // zeros.
627 if (lsb + mask_width > 32) mask_width = 32 - lsb;
628
629 Emit(kMips64Ext, g.DefineAsRegister(node),
630 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
631 g.TempImmediate(mask_width));
632 return;
633 }
634 // Other cases fall through to the normal And operation.
635 }
636 }
637 if (m.right().HasResolvedValue()) {
638 uint32_t mask = m.right().ResolvedValue();
639 uint32_t shift = base::bits::CountPopulation(~mask);
640 uint32_t msb = base::bits::CountLeadingZeros32(~mask);
641 if (shift != 0 && shift != 32 && msb + shift == 32) {
642 // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
643 // and remove constant loading of inverted mask.
644 Emit(kMips64Ins, g.DefineSameAsFirst(node),
645 g.UseRegister(m.left().node()), g.TempImmediate(0),
646 g.TempImmediate(shift));
647 return;
648 }
649 }
650 VisitBinop(this, node, kMips64And32, true, kMips64And32);
651 }
652
VisitWord64And(Node * node)653 void InstructionSelector::VisitWord64And(Node* node) {
654 Mips64OperandGenerator g(this);
655 Int64BinopMatcher m(node);
656 if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
657 m.right().HasResolvedValue()) {
658 uint64_t mask = m.right().ResolvedValue();
659 uint32_t mask_width = base::bits::CountPopulation(mask);
660 uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
661 if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
662 // The mask must be contiguous, and occupy the least-significant bits.
663 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
664
665 // Select Dext for And(Shr(x, imm), mask) where the mask is in the least
666 // significant bits.
667 Int64BinopMatcher mleft(m.left().node());
668 if (mleft.right().HasResolvedValue()) {
669 // Any shift value can match; int64 shifts use `value % 64`.
670 uint32_t lsb =
671 static_cast<uint32_t>(mleft.right().ResolvedValue() & 0x3F);
672
673 // Dext cannot extract bits past the register size, however since
674 // shifting the original value would have introduced some zeros we can
675 // still use Dext with a smaller mask and the remaining bits will be
676 // zeros.
677 if (lsb + mask_width > 64) mask_width = 64 - lsb;
678
679 if (lsb == 0 && mask_width == 64) {
680 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
681 } else {
682 Emit(kMips64Dext, g.DefineAsRegister(node),
683 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
684 g.TempImmediate(static_cast<int32_t>(mask_width)));
685 }
686 return;
687 }
688 // Other cases fall through to the normal And operation.
689 }
690 }
691 if (m.right().HasResolvedValue()) {
692 uint64_t mask = m.right().ResolvedValue();
693 uint32_t shift = base::bits::CountPopulation(~mask);
694 uint32_t msb = base::bits::CountLeadingZeros64(~mask);
695 if (shift != 0 && shift < 32 && msb + shift == 64) {
696 // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
697 // and remove constant loading of inverted mask. Dins cannot insert bits
698 // past word size, so shifts smaller than 32 are covered.
699 Emit(kMips64Dins, g.DefineSameAsFirst(node),
700 g.UseRegister(m.left().node()), g.TempImmediate(0),
701 g.TempImmediate(shift));
702 return;
703 }
704 }
705 VisitBinop(this, node, kMips64And, true, kMips64And);
706 }
707
VisitWord32Or(Node * node)708 void InstructionSelector::VisitWord32Or(Node* node) {
709 VisitBinop(this, node, kMips64Or32, true, kMips64Or32);
710 }
711
VisitWord64Or(Node * node)712 void InstructionSelector::VisitWord64Or(Node* node) {
713 VisitBinop(this, node, kMips64Or, true, kMips64Or);
714 }
715
VisitWord32Xor(Node * node)716 void InstructionSelector::VisitWord32Xor(Node* node) {
717 Int32BinopMatcher m(node);
718 if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
719 m.right().Is(-1)) {
720 Int32BinopMatcher mleft(m.left().node());
721 if (!mleft.right().HasResolvedValue()) {
722 Mips64OperandGenerator g(this);
723 Emit(kMips64Nor32, g.DefineAsRegister(node),
724 g.UseRegister(mleft.left().node()),
725 g.UseRegister(mleft.right().node()));
726 return;
727 }
728 }
729 if (m.right().Is(-1)) {
730 // Use Nor for bit negation and eliminate constant loading for xori.
731 Mips64OperandGenerator g(this);
732 Emit(kMips64Nor32, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
733 g.TempImmediate(0));
734 return;
735 }
736 VisitBinop(this, node, kMips64Xor32, true, kMips64Xor32);
737 }
738
VisitWord64Xor(Node * node)739 void InstructionSelector::VisitWord64Xor(Node* node) {
740 Int64BinopMatcher m(node);
741 if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
742 m.right().Is(-1)) {
743 Int64BinopMatcher mleft(m.left().node());
744 if (!mleft.right().HasResolvedValue()) {
745 Mips64OperandGenerator g(this);
746 Emit(kMips64Nor, g.DefineAsRegister(node),
747 g.UseRegister(mleft.left().node()),
748 g.UseRegister(mleft.right().node()));
749 return;
750 }
751 }
752 if (m.right().Is(-1)) {
753 // Use Nor for bit negation and eliminate constant loading for xori.
754 Mips64OperandGenerator g(this);
755 Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
756 g.TempImmediate(0));
757 return;
758 }
759 VisitBinop(this, node, kMips64Xor, true, kMips64Xor);
760 }
761
VisitWord32Shl(Node * node)762 void InstructionSelector::VisitWord32Shl(Node* node) {
763 Int32BinopMatcher m(node);
764 if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
765 m.right().IsInRange(1, 31)) {
766 Mips64OperandGenerator g(this);
767 Int32BinopMatcher mleft(m.left().node());
768 // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
769 // contiguous, and the shift immediate non-zero.
770 if (mleft.right().HasResolvedValue()) {
771 uint32_t mask = mleft.right().ResolvedValue();
772 uint32_t mask_width = base::bits::CountPopulation(mask);
773 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
774 if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
775 uint32_t shift = m.right().ResolvedValue();
776 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
777 DCHECK_NE(0u, shift);
778 if ((shift + mask_width) >= 32) {
779 // If the mask is contiguous and reaches or extends beyond the top
780 // bit, only the shift is needed.
781 Emit(kMips64Shl, g.DefineAsRegister(node),
782 g.UseRegister(mleft.left().node()),
783 g.UseImmediate(m.right().node()));
784 return;
785 }
786 }
787 }
788 }
789 VisitRRO(this, kMips64Shl, node);
790 }
791
VisitWord32Shr(Node * node)792 void InstructionSelector::VisitWord32Shr(Node* node) {
793 Int32BinopMatcher m(node);
794 if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
795 uint32_t lsb = m.right().ResolvedValue() & 0x1F;
796 Int32BinopMatcher mleft(m.left().node());
797 if (mleft.right().HasResolvedValue() &&
798 mleft.right().ResolvedValue() != 0) {
799 // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
800 // shifted into the least-significant bits.
801 uint32_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
802 unsigned mask_width = base::bits::CountPopulation(mask);
803 unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
804 if ((mask_msb + mask_width + lsb) == 32) {
805 Mips64OperandGenerator g(this);
806 DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
807 Emit(kMips64Ext, g.DefineAsRegister(node),
808 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
809 g.TempImmediate(mask_width));
810 return;
811 }
812 }
813 }
814 VisitRRO(this, kMips64Shr, node);
815 }
816
VisitWord32Sar(Node * node)817 void InstructionSelector::VisitWord32Sar(Node* node) {
818 Int32BinopMatcher m(node);
819 if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
820 Int32BinopMatcher mleft(m.left().node());
821 if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
822 Mips64OperandGenerator g(this);
823 uint32_t sar = m.right().ResolvedValue();
824 uint32_t shl = mleft.right().ResolvedValue();
825 if ((sar == shl) && (sar == 16)) {
826 Emit(kMips64Seh, g.DefineAsRegister(node),
827 g.UseRegister(mleft.left().node()));
828 return;
829 } else if ((sar == shl) && (sar == 24)) {
830 Emit(kMips64Seb, g.DefineAsRegister(node),
831 g.UseRegister(mleft.left().node()));
832 return;
833 } else if ((sar == shl) && (sar == 32)) {
834 Emit(kMips64Shl, g.DefineAsRegister(node),
835 g.UseRegister(mleft.left().node()), g.TempImmediate(0));
836 return;
837 }
838 }
839 }
840 VisitRRO(this, kMips64Sar, node);
841 }
842
VisitWord64Shl(Node * node)843 void InstructionSelector::VisitWord64Shl(Node* node) {
844 Mips64OperandGenerator g(this);
845 Int64BinopMatcher m(node);
846 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
847 m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
848 // There's no need to sign/zero-extend to 64-bit if we shift out the upper
849 // 32 bits anyway.
850 Emit(kMips64Dshl, g.DefineAsRegister(node),
851 g.UseRegister(m.left().node()->InputAt(0)),
852 g.UseImmediate(m.right().node()));
853 return;
854 }
855 if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
856 m.right().IsInRange(1, 63)) {
857 // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is
858 // contiguous, and the shift immediate non-zero.
859 Int64BinopMatcher mleft(m.left().node());
860 if (mleft.right().HasResolvedValue()) {
861 uint64_t mask = mleft.right().ResolvedValue();
862 uint32_t mask_width = base::bits::CountPopulation(mask);
863 uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
864 if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
865 uint64_t shift = m.right().ResolvedValue();
866 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
867 DCHECK_NE(0u, shift);
868
869 if ((shift + mask_width) >= 64) {
870 // If the mask is contiguous and reaches or extends beyond the top
871 // bit, only the shift is needed.
872 Emit(kMips64Dshl, g.DefineAsRegister(node),
873 g.UseRegister(mleft.left().node()),
874 g.UseImmediate(m.right().node()));
875 return;
876 }
877 }
878 }
879 }
880 VisitRRO(this, kMips64Dshl, node);
881 }
882
VisitWord64Shr(Node * node)883 void InstructionSelector::VisitWord64Shr(Node* node) {
884 Int64BinopMatcher m(node);
885 if (m.left().IsWord64And() && m.right().HasResolvedValue()) {
886 uint32_t lsb = m.right().ResolvedValue() & 0x3F;
887 Int64BinopMatcher mleft(m.left().node());
888 if (mleft.right().HasResolvedValue() &&
889 mleft.right().ResolvedValue() != 0) {
890 // Select Dext for Shr(And(x, mask), imm) where the result of the mask is
891 // shifted into the least-significant bits.
892 uint64_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
893 unsigned mask_width = base::bits::CountPopulation(mask);
894 unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
895 if ((mask_msb + mask_width + lsb) == 64) {
896 Mips64OperandGenerator g(this);
897 DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
898 Emit(kMips64Dext, g.DefineAsRegister(node),
899 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
900 g.TempImmediate(mask_width));
901 return;
902 }
903 }
904 }
905 VisitRRO(this, kMips64Dshr, node);
906 }
907
VisitWord64Sar(Node * node)908 void InstructionSelector::VisitWord64Sar(Node* node) {
909 if (TryEmitExtendingLoad(this, node, node)) return;
910 VisitRRO(this, kMips64Dsar, node);
911 }
912
VisitWord32Rol(Node * node)913 void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
914
VisitWord64Rol(Node * node)915 void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); }
916
VisitWord32Ror(Node * node)917 void InstructionSelector::VisitWord32Ror(Node* node) {
918 VisitRRO(this, kMips64Ror, node);
919 }
920
VisitWord32Clz(Node * node)921 void InstructionSelector::VisitWord32Clz(Node* node) {
922 VisitRR(this, kMips64Clz, node);
923 }
924
VisitWord32ReverseBits(Node * node)925 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
926
VisitWord64ReverseBits(Node * node)927 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
928
VisitWord64ReverseBytes(Node * node)929 void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
930 Mips64OperandGenerator g(this);
931 Emit(kMips64ByteSwap64, g.DefineAsRegister(node),
932 g.UseRegister(node->InputAt(0)));
933 }
934
VisitWord32ReverseBytes(Node * node)935 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
936 Mips64OperandGenerator g(this);
937 Emit(kMips64ByteSwap32, g.DefineAsRegister(node),
938 g.UseRegister(node->InputAt(0)));
939 }
940
VisitSimd128ReverseBytes(Node * node)941 void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
942 UNREACHABLE();
943 }
944
VisitWord32Ctz(Node * node)945 void InstructionSelector::VisitWord32Ctz(Node* node) {
946 Mips64OperandGenerator g(this);
947 Emit(kMips64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
948 }
949
VisitWord64Ctz(Node * node)950 void InstructionSelector::VisitWord64Ctz(Node* node) {
951 Mips64OperandGenerator g(this);
952 Emit(kMips64Dctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
953 }
954
VisitWord32Popcnt(Node * node)955 void InstructionSelector::VisitWord32Popcnt(Node* node) {
956 Mips64OperandGenerator g(this);
957 Emit(kMips64Popcnt, g.DefineAsRegister(node),
958 g.UseRegister(node->InputAt(0)));
959 }
960
VisitWord64Popcnt(Node * node)961 void InstructionSelector::VisitWord64Popcnt(Node* node) {
962 Mips64OperandGenerator g(this);
963 Emit(kMips64Dpopcnt, g.DefineAsRegister(node),
964 g.UseRegister(node->InputAt(0)));
965 }
966
VisitWord64Ror(Node * node)967 void InstructionSelector::VisitWord64Ror(Node* node) {
968 VisitRRO(this, kMips64Dror, node);
969 }
970
VisitWord64Clz(Node * node)971 void InstructionSelector::VisitWord64Clz(Node* node) {
972 VisitRR(this, kMips64Dclz, node);
973 }
974
VisitInt32Add(Node * node)975 void InstructionSelector::VisitInt32Add(Node* node) {
976 Mips64OperandGenerator g(this);
977 Int32BinopMatcher m(node);
978
979 if (kArchVariant == kMips64r6) {
980 // Select Lsa for (left + (left_of_right << imm)).
981 if (m.right().opcode() == IrOpcode::kWord32Shl &&
982 CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
983 Int32BinopMatcher mright(m.right().node());
984 if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
985 int32_t shift_value =
986 static_cast<int32_t>(mright.right().ResolvedValue());
987 if (shift_value > 0 && shift_value <= 31) {
988 Emit(kMips64Lsa, g.DefineAsRegister(node),
989 g.UseRegister(m.left().node()),
990 g.UseRegister(mright.left().node()),
991 g.TempImmediate(shift_value));
992 return;
993 }
994 }
995 }
996
997 // Select Lsa for ((left_of_left << imm) + right).
998 if (m.left().opcode() == IrOpcode::kWord32Shl &&
999 CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
1000 Int32BinopMatcher mleft(m.left().node());
1001 if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
1002 int32_t shift_value =
1003 static_cast<int32_t>(mleft.right().ResolvedValue());
1004 if (shift_value > 0 && shift_value <= 31) {
1005 Emit(kMips64Lsa, g.DefineAsRegister(node),
1006 g.UseRegister(m.right().node()),
1007 g.UseRegister(mleft.left().node()),
1008 g.TempImmediate(shift_value));
1009 return;
1010 }
1011 }
1012 }
1013 }
1014
1015 VisitBinop(this, node, kMips64Add, true, kMips64Add);
1016 }
1017
VisitInt64Add(Node * node)1018 void InstructionSelector::VisitInt64Add(Node* node) {
1019 Mips64OperandGenerator g(this);
1020 Int64BinopMatcher m(node);
1021
1022 if (kArchVariant == kMips64r6) {
1023 // Select Dlsa for (left + (left_of_right << imm)).
1024 if (m.right().opcode() == IrOpcode::kWord64Shl &&
1025 CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
1026 Int64BinopMatcher mright(m.right().node());
1027 if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
1028 int32_t shift_value =
1029 static_cast<int32_t>(mright.right().ResolvedValue());
1030 if (shift_value > 0 && shift_value <= 31) {
1031 Emit(kMips64Dlsa, g.DefineAsRegister(node),
1032 g.UseRegister(m.left().node()),
1033 g.UseRegister(mright.left().node()),
1034 g.TempImmediate(shift_value));
1035 return;
1036 }
1037 }
1038 }
1039
1040 // Select Dlsa for ((left_of_left << imm) + right).
1041 if (m.left().opcode() == IrOpcode::kWord64Shl &&
1042 CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
1043 Int64BinopMatcher mleft(m.left().node());
1044 if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
1045 int32_t shift_value =
1046 static_cast<int32_t>(mleft.right().ResolvedValue());
1047 if (shift_value > 0 && shift_value <= 31) {
1048 Emit(kMips64Dlsa, g.DefineAsRegister(node),
1049 g.UseRegister(m.right().node()),
1050 g.UseRegister(mleft.left().node()),
1051 g.TempImmediate(shift_value));
1052 return;
1053 }
1054 }
1055 }
1056 }
1057
1058 VisitBinop(this, node, kMips64Dadd, true, kMips64Dadd);
1059 }
1060
VisitInt32Sub(Node * node)1061 void InstructionSelector::VisitInt32Sub(Node* node) {
1062 VisitBinop(this, node, kMips64Sub);
1063 }
1064
VisitInt64Sub(Node * node)1065 void InstructionSelector::VisitInt64Sub(Node* node) {
1066 VisitBinop(this, node, kMips64Dsub);
1067 }
1068
VisitInt32Mul(Node * node)1069 void InstructionSelector::VisitInt32Mul(Node* node) {
1070 Mips64OperandGenerator g(this);
1071 Int32BinopMatcher m(node);
1072 if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
1073 uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
1074 if (base::bits::IsPowerOfTwo(value)) {
1075 Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
1076 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1077 g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
1078 return;
1079 }
1080 if (base::bits::IsPowerOfTwo(value - 1) && kArchVariant == kMips64r6 &&
1081 value - 1 > 0 && value - 1 <= 31) {
1082 Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1083 g.UseRegister(m.left().node()),
1084 g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1)));
1085 return;
1086 }
1087 if (base::bits::IsPowerOfTwo(value + 1)) {
1088 InstructionOperand temp = g.TempRegister();
1089 Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
1090 g.UseRegister(m.left().node()),
1091 g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
1092 Emit(kMips64Sub | AddressingModeField::encode(kMode_None),
1093 g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
1094 return;
1095 }
1096 }
1097 Node* left = node->InputAt(0);
1098 Node* right = node->InputAt(1);
1099 if (CanCover(node, left) && CanCover(node, right)) {
1100 if (left->opcode() == IrOpcode::kWord64Sar &&
1101 right->opcode() == IrOpcode::kWord64Sar) {
1102 Int64BinopMatcher leftInput(left), rightInput(right);
1103 if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
1104 // Combine untagging shifts with Dmul high.
1105 Emit(kMips64DMulHigh, g.DefineSameAsFirst(node),
1106 g.UseRegister(leftInput.left().node()),
1107 g.UseRegister(rightInput.left().node()));
1108 return;
1109 }
1110 }
1111 }
1112 VisitRRR(this, kMips64Mul, node);
1113 }
1114
VisitInt32MulHigh(Node * node)1115 void InstructionSelector::VisitInt32MulHigh(Node* node) {
1116 VisitRRR(this, kMips64MulHigh, node);
1117 }
1118
VisitUint32MulHigh(Node * node)1119 void InstructionSelector::VisitUint32MulHigh(Node* node) {
1120 VisitRRR(this, kMips64MulHighU, node);
1121 }
1122
VisitInt64Mul(Node * node)1123 void InstructionSelector::VisitInt64Mul(Node* node) {
1124 Mips64OperandGenerator g(this);
1125 Int64BinopMatcher m(node);
1126 // TODO(dusmil): Add optimization for shifts larger than 32.
1127 if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
1128 uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
1129 if (base::bits::IsPowerOfTwo(value)) {
1130 Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
1131 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1132 g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
1133 return;
1134 }
1135 if (base::bits::IsPowerOfTwo(value - 1) && kArchVariant == kMips64r6 &&
1136 value - 1 > 0 && value - 1 <= 31) {
1137 // Dlsa macro will handle the shifting value out of bound cases.
1138 Emit(kMips64Dlsa, g.DefineAsRegister(node),
1139 g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
1140 g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1)));
1141 return;
1142 }
1143 if (base::bits::IsPowerOfTwo(value + 1)) {
1144 InstructionOperand temp = g.TempRegister();
1145 Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
1146 g.UseRegister(m.left().node()),
1147 g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
1148 Emit(kMips64Dsub | AddressingModeField::encode(kMode_None),
1149 g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
1150 return;
1151 }
1152 }
1153 Emit(kMips64Dmul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1154 g.UseRegister(m.right().node()));
1155 }
1156
VisitInt32Div(Node * node)1157 void InstructionSelector::VisitInt32Div(Node* node) {
1158 Mips64OperandGenerator g(this);
1159 Int32BinopMatcher m(node);
1160 Node* left = node->InputAt(0);
1161 Node* right = node->InputAt(1);
1162 if (CanCover(node, left) && CanCover(node, right)) {
1163 if (left->opcode() == IrOpcode::kWord64Sar &&
1164 right->opcode() == IrOpcode::kWord64Sar) {
1165 Int64BinopMatcher rightInput(right), leftInput(left);
1166 if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
1167 // Combine both shifted operands with Ddiv.
1168 Emit(kMips64Ddiv, g.DefineSameAsFirst(node),
1169 g.UseRegister(leftInput.left().node()),
1170 g.UseRegister(rightInput.left().node()));
1171 return;
1172 }
1173 }
1174 }
1175 Emit(kMips64Div, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1176 g.UseRegister(m.right().node()));
1177 }
1178
VisitUint32Div(Node * node)1179 void InstructionSelector::VisitUint32Div(Node* node) {
1180 Mips64OperandGenerator g(this);
1181 Int32BinopMatcher m(node);
1182 Emit(kMips64DivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1183 g.UseRegister(m.right().node()));
1184 }
1185
VisitInt32Mod(Node * node)1186 void InstructionSelector::VisitInt32Mod(Node* node) {
1187 Mips64OperandGenerator g(this);
1188 Int32BinopMatcher m(node);
1189 Node* left = node->InputAt(0);
1190 Node* right = node->InputAt(1);
1191 if (CanCover(node, left) && CanCover(node, right)) {
1192 if (left->opcode() == IrOpcode::kWord64Sar &&
1193 right->opcode() == IrOpcode::kWord64Sar) {
1194 Int64BinopMatcher rightInput(right), leftInput(left);
1195 if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
1196 // Combine both shifted operands with Dmod.
1197 Emit(kMips64Dmod, g.DefineSameAsFirst(node),
1198 g.UseRegister(leftInput.left().node()),
1199 g.UseRegister(rightInput.left().node()));
1200 return;
1201 }
1202 }
1203 }
1204 Emit(kMips64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1205 g.UseRegister(m.right().node()));
1206 }
1207
VisitUint32Mod(Node * node)1208 void InstructionSelector::VisitUint32Mod(Node* node) {
1209 Mips64OperandGenerator g(this);
1210 Int32BinopMatcher m(node);
1211 Emit(kMips64ModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1212 g.UseRegister(m.right().node()));
1213 }
1214
VisitInt64Div(Node * node)1215 void InstructionSelector::VisitInt64Div(Node* node) {
1216 Mips64OperandGenerator g(this);
1217 Int64BinopMatcher m(node);
1218 Emit(kMips64Ddiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1219 g.UseRegister(m.right().node()));
1220 }
1221
VisitUint64Div(Node * node)1222 void InstructionSelector::VisitUint64Div(Node* node) {
1223 Mips64OperandGenerator g(this);
1224 Int64BinopMatcher m(node);
1225 Emit(kMips64DdivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1226 g.UseRegister(m.right().node()));
1227 }
1228
VisitInt64Mod(Node * node)1229 void InstructionSelector::VisitInt64Mod(Node* node) {
1230 Mips64OperandGenerator g(this);
1231 Int64BinopMatcher m(node);
1232 Emit(kMips64Dmod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1233 g.UseRegister(m.right().node()));
1234 }
1235
VisitUint64Mod(Node * node)1236 void InstructionSelector::VisitUint64Mod(Node* node) {
1237 Mips64OperandGenerator g(this);
1238 Int64BinopMatcher m(node);
1239 Emit(kMips64DmodU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1240 g.UseRegister(m.right().node()));
1241 }
1242
VisitChangeFloat32ToFloat64(Node * node)1243 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
1244 VisitRR(this, kMips64CvtDS, node);
1245 }
1246
VisitRoundInt32ToFloat32(Node * node)1247 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
1248 VisitRR(this, kMips64CvtSW, node);
1249 }
1250
VisitRoundUint32ToFloat32(Node * node)1251 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
1252 VisitRR(this, kMips64CvtSUw, node);
1253 }
1254
VisitChangeInt32ToFloat64(Node * node)1255 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
1256 VisitRR(this, kMips64CvtDW, node);
1257 }
1258
VisitChangeInt64ToFloat64(Node * node)1259 void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
1260 VisitRR(this, kMips64CvtDL, node);
1261 }
1262
VisitChangeUint32ToFloat64(Node * node)1263 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
1264 VisitRR(this, kMips64CvtDUw, node);
1265 }
1266
VisitTruncateFloat32ToInt32(Node * node)1267 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
1268 Mips64OperandGenerator g(this);
1269 InstructionCode opcode = kMips64TruncWS;
1270 TruncateKind kind = OpParameter<TruncateKind>(node->op());
1271 if (kind == TruncateKind::kSetOverflowToMin) {
1272 opcode |= MiscField::encode(true);
1273 }
1274 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1275 }
1276
VisitTruncateFloat32ToUint32(Node * node)1277 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
1278 Mips64OperandGenerator g(this);
1279 InstructionCode opcode = kMips64TruncUwS;
1280 TruncateKind kind = OpParameter<TruncateKind>(node->op());
1281 if (kind == TruncateKind::kSetOverflowToMin) {
1282 opcode |= MiscField::encode(true);
1283 }
1284 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1285 }
1286
VisitChangeFloat64ToInt32(Node * node)1287 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
1288 Mips64OperandGenerator g(this);
1289 Node* value = node->InputAt(0);
1290 // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
1291 // which does rounding and conversion to integer format.
1292 if (CanCover(node, value)) {
1293 switch (value->opcode()) {
1294 case IrOpcode::kFloat64RoundDown:
1295 Emit(kMips64FloorWD, g.DefineAsRegister(node),
1296 g.UseRegister(value->InputAt(0)));
1297 return;
1298 case IrOpcode::kFloat64RoundUp:
1299 Emit(kMips64CeilWD, g.DefineAsRegister(node),
1300 g.UseRegister(value->InputAt(0)));
1301 return;
1302 case IrOpcode::kFloat64RoundTiesEven:
1303 Emit(kMips64RoundWD, g.DefineAsRegister(node),
1304 g.UseRegister(value->InputAt(0)));
1305 return;
1306 case IrOpcode::kFloat64RoundTruncate:
1307 Emit(kMips64TruncWD, g.DefineAsRegister(node),
1308 g.UseRegister(value->InputAt(0)));
1309 return;
1310 default:
1311 break;
1312 }
1313 if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
1314 Node* next = value->InputAt(0);
1315 if (CanCover(value, next)) {
1316 // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
1317 switch (next->opcode()) {
1318 case IrOpcode::kFloat32RoundDown:
1319 Emit(kMips64FloorWS, g.DefineAsRegister(node),
1320 g.UseRegister(next->InputAt(0)));
1321 return;
1322 case IrOpcode::kFloat32RoundUp:
1323 Emit(kMips64CeilWS, g.DefineAsRegister(node),
1324 g.UseRegister(next->InputAt(0)));
1325 return;
1326 case IrOpcode::kFloat32RoundTiesEven:
1327 Emit(kMips64RoundWS, g.DefineAsRegister(node),
1328 g.UseRegister(next->InputAt(0)));
1329 return;
1330 case IrOpcode::kFloat32RoundTruncate:
1331 Emit(kMips64TruncWS, g.DefineAsRegister(node),
1332 g.UseRegister(next->InputAt(0)));
1333 return;
1334 default:
1335 Emit(kMips64TruncWS, g.DefineAsRegister(node),
1336 g.UseRegister(value->InputAt(0)));
1337 return;
1338 }
1339 } else {
1340 // Match float32 -> float64 -> int32 representation change path.
1341 Emit(kMips64TruncWS, g.DefineAsRegister(node),
1342 g.UseRegister(value->InputAt(0)));
1343 return;
1344 }
1345 }
1346 }
1347 VisitRR(this, kMips64TruncWD, node);
1348 }
1349
VisitChangeFloat64ToInt64(Node * node)1350 void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
1351 VisitRR(this, kMips64TruncLD, node);
1352 }
1353
VisitChangeFloat64ToUint32(Node * node)1354 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1355 VisitRR(this, kMips64TruncUwD, node);
1356 }
1357
VisitChangeFloat64ToUint64(Node * node)1358 void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
1359 VisitRR(this, kMips64TruncUlD, node);
1360 }
1361
VisitTruncateFloat64ToUint32(Node * node)1362 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
1363 VisitRR(this, kMips64TruncUwD, node);
1364 }
1365
VisitTruncateFloat64ToInt64(Node * node)1366 void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
1367 Mips64OperandGenerator g(this);
1368 InstructionCode opcode = kMips64TruncLD;
1369 TruncateKind kind = OpParameter<TruncateKind>(node->op());
1370 if (kind == TruncateKind::kSetOverflowToMin) {
1371 opcode |= MiscField::encode(true);
1372 }
1373 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1374 }
1375
VisitTryTruncateFloat32ToInt64(Node * node)1376 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1377 Mips64OperandGenerator g(this);
1378 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1379 InstructionOperand outputs[2];
1380 size_t output_count = 0;
1381 outputs[output_count++] = g.DefineAsRegister(node);
1382
1383 Node* success_output = NodeProperties::FindProjection(node, 1);
1384 if (success_output) {
1385 outputs[output_count++] = g.DefineAsRegister(success_output);
1386 }
1387
1388 this->Emit(kMips64TruncLS, output_count, outputs, 1, inputs);
1389 }
1390
VisitTryTruncateFloat64ToInt64(Node * node)1391 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1392 Mips64OperandGenerator g(this);
1393 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1394 InstructionOperand outputs[2];
1395 size_t output_count = 0;
1396 outputs[output_count++] = g.DefineAsRegister(node);
1397
1398 Node* success_output = NodeProperties::FindProjection(node, 1);
1399 if (success_output) {
1400 outputs[output_count++] = g.DefineAsRegister(success_output);
1401 }
1402
1403 Emit(kMips64TruncLD, output_count, outputs, 1, inputs);
1404 }
1405
VisitTryTruncateFloat32ToUint64(Node * node)1406 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1407 Mips64OperandGenerator g(this);
1408 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1409 InstructionOperand outputs[2];
1410 size_t output_count = 0;
1411 outputs[output_count++] = g.DefineAsRegister(node);
1412
1413 Node* success_output = NodeProperties::FindProjection(node, 1);
1414 if (success_output) {
1415 outputs[output_count++] = g.DefineAsRegister(success_output);
1416 }
1417
1418 Emit(kMips64TruncUlS, output_count, outputs, 1, inputs);
1419 }
1420
VisitTryTruncateFloat64ToUint64(Node * node)1421 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1422 Mips64OperandGenerator g(this);
1423
1424 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1425 InstructionOperand outputs[2];
1426 size_t output_count = 0;
1427 outputs[output_count++] = g.DefineAsRegister(node);
1428
1429 Node* success_output = NodeProperties::FindProjection(node, 1);
1430 if (success_output) {
1431 outputs[output_count++] = g.DefineAsRegister(success_output);
1432 }
1433
1434 Emit(kMips64TruncUlD, output_count, outputs, 1, inputs);
1435 }
1436
VisitBitcastWord32ToWord64(Node * node)1437 void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
1438 UNIMPLEMENTED();
1439 }
1440
VisitChangeInt32ToInt64(Node * node)1441 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1442 // On MIPS64, int32 values should all be sign-extended to 64-bit, so
1443 // no need to sign-extend them here.
1444 // But when call to a host function in simulator, if the function return an
1445 // int32 value, the simulator do not sign-extend to int64, because in
1446 // simulator we do not know the function whether return an int32 or int64.
1447 #ifdef USE_SIMULATOR
1448 Node* value = node->InputAt(0);
1449 if (value->opcode() == IrOpcode::kCall) {
1450 Mips64OperandGenerator g(this);
1451 Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(value),
1452 g.TempImmediate(0));
1453 return;
1454 }
1455 #endif
1456 EmitIdentity(node);
1457 }
1458
ZeroExtendsWord32ToWord64NoPhis(Node * node)1459 bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
1460 DCHECK_NE(node->opcode(), IrOpcode::kPhi);
1461 switch (node->opcode()) {
1462 // Comparisons only emit 0/1, so the upper 32 bits must be zero.
1463 case IrOpcode::kWord32Equal:
1464 case IrOpcode::kInt32LessThan:
1465 case IrOpcode::kInt32LessThanOrEqual:
1466 case IrOpcode::kUint32LessThan:
1467 case IrOpcode::kUint32LessThanOrEqual:
1468 return true;
1469 case IrOpcode::kWord32And: {
1470 Int32BinopMatcher m(node);
1471 if (m.right().HasResolvedValue()) {
1472 uint32_t mask = m.right().ResolvedValue();
1473 return is_uint31(mask);
1474 }
1475 return false;
1476 }
1477 case IrOpcode::kWord32Shr: {
1478 Int32BinopMatcher m(node);
1479 if (m.right().HasResolvedValue()) {
1480 uint8_t sa = m.right().ResolvedValue() & 0x1f;
1481 return sa > 0;
1482 }
1483 return false;
1484 }
1485 case IrOpcode::kLoad:
1486 case IrOpcode::kLoadImmutable: {
1487 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1488 if (load_rep.IsUnsigned()) {
1489 switch (load_rep.representation()) {
1490 case MachineRepresentation::kWord8:
1491 case MachineRepresentation::kWord16:
1492 return true;
1493 default:
1494 return false;
1495 }
1496 }
1497 return false;
1498 }
1499 default:
1500 return false;
1501 }
1502 }
1503
VisitChangeUint32ToUint64(Node * node)1504 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1505 Mips64OperandGenerator g(this);
1506 Node* value = node->InputAt(0);
1507 IrOpcode::Value opcode = value->opcode();
1508
1509 if (opcode == IrOpcode::kLoad || opcode == IrOpcode::kUnalignedLoad) {
1510 LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1511 ArchOpcode arch_opcode =
1512 opcode == IrOpcode::kUnalignedLoad ? kMips64Ulwu : kMips64Lwu;
1513 if (load_rep.IsUnsigned() &&
1514 load_rep.representation() == MachineRepresentation::kWord32) {
1515 EmitLoad(this, value, arch_opcode, node);
1516 return;
1517 }
1518 }
1519
1520 if (ZeroExtendsWord32ToWord64(value)) {
1521 EmitIdentity(node);
1522 return;
1523 }
1524
1525 Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1526 g.TempImmediate(0), g.TempImmediate(32));
1527 }
1528
VisitTruncateInt64ToInt32(Node * node)1529 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1530 Mips64OperandGenerator g(this);
1531 Node* value = node->InputAt(0);
1532 if (CanCover(node, value)) {
1533 switch (value->opcode()) {
1534 case IrOpcode::kWord64Sar: {
1535 if (CanCoverTransitively(node, value, value->InputAt(0)) &&
1536 TryEmitExtendingLoad(this, value, node)) {
1537 return;
1538 } else {
1539 Int64BinopMatcher m(value);
1540 if (m.right().IsInRange(32, 63)) {
1541 // After smi untagging no need for truncate. Combine sequence.
1542 Emit(kMips64Dsar, g.DefineAsRegister(node),
1543 g.UseRegister(m.left().node()),
1544 g.UseImmediate(m.right().node()));
1545 return;
1546 }
1547 }
1548 break;
1549 }
1550 default:
1551 break;
1552 }
1553 }
1554 Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1555 g.TempImmediate(0));
1556 }
1557
VisitTruncateFloat64ToFloat32(Node * node)1558 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1559 Mips64OperandGenerator g(this);
1560 Node* value = node->InputAt(0);
1561 // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
1562 // instruction.
1563 if (CanCover(node, value) &&
1564 value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
1565 Emit(kMips64CvtSW, g.DefineAsRegister(node),
1566 g.UseRegister(value->InputAt(0)));
1567 return;
1568 }
1569 VisitRR(this, kMips64CvtSD, node);
1570 }
1571
VisitTruncateFloat64ToWord32(Node * node)1572 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1573 VisitRR(this, kArchTruncateDoubleToI, node);
1574 }
1575
VisitRoundFloat64ToInt32(Node * node)1576 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1577 VisitRR(this, kMips64TruncWD, node);
1578 }
1579
VisitRoundInt64ToFloat32(Node * node)1580 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1581 VisitRR(this, kMips64CvtSL, node);
1582 }
1583
VisitRoundInt64ToFloat64(Node * node)1584 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1585 VisitRR(this, kMips64CvtDL, node);
1586 }
1587
VisitRoundUint64ToFloat32(Node * node)1588 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1589 VisitRR(this, kMips64CvtSUl, node);
1590 }
1591
VisitRoundUint64ToFloat64(Node * node)1592 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1593 VisitRR(this, kMips64CvtDUl, node);
1594 }
1595
VisitBitcastFloat32ToInt32(Node * node)1596 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1597 VisitRR(this, kMips64Float64ExtractLowWord32, node);
1598 }
1599
VisitBitcastFloat64ToInt64(Node * node)1600 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1601 VisitRR(this, kMips64BitcastDL, node);
1602 }
1603
VisitBitcastInt32ToFloat32(Node * node)1604 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1605 Mips64OperandGenerator g(this);
1606 Emit(kMips64Float64InsertLowWord32, g.DefineAsRegister(node),
1607 ImmediateOperand(ImmediateOperand::INLINE_INT32, 0),
1608 g.UseRegister(node->InputAt(0)));
1609 }
1610
VisitBitcastInt64ToFloat64(Node * node)1611 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1612 VisitRR(this, kMips64BitcastLD, node);
1613 }
1614
VisitFloat32Add(Node * node)1615 void InstructionSelector::VisitFloat32Add(Node* node) {
1616 // Optimization with Madd.S(z, x, y) is intentionally removed.
1617 // See explanation for madd_s in assembler-mips64.cc.
1618 VisitRRR(this, kMips64AddS, node);
1619 }
1620
VisitFloat64Add(Node * node)1621 void InstructionSelector::VisitFloat64Add(Node* node) {
1622 // Optimization with Madd.D(z, x, y) is intentionally removed.
1623 // See explanation for madd_d in assembler-mips64.cc.
1624 VisitRRR(this, kMips64AddD, node);
1625 }
1626
VisitFloat32Sub(Node * node)1627 void InstructionSelector::VisitFloat32Sub(Node* node) {
1628 // Optimization with Msub.S(z, x, y) is intentionally removed.
1629 // See explanation for madd_s in assembler-mips64.cc.
1630 VisitRRR(this, kMips64SubS, node);
1631 }
1632
VisitFloat64Sub(Node * node)1633 void InstructionSelector::VisitFloat64Sub(Node* node) {
1634 // Optimization with Msub.D(z, x, y) is intentionally removed.
1635 // See explanation for madd_d in assembler-mips64.cc.
1636 VisitRRR(this, kMips64SubD, node);
1637 }
1638
VisitFloat32Mul(Node * node)1639 void InstructionSelector::VisitFloat32Mul(Node* node) {
1640 VisitRRR(this, kMips64MulS, node);
1641 }
1642
VisitFloat64Mul(Node * node)1643 void InstructionSelector::VisitFloat64Mul(Node* node) {
1644 VisitRRR(this, kMips64MulD, node);
1645 }
1646
VisitFloat32Div(Node * node)1647 void InstructionSelector::VisitFloat32Div(Node* node) {
1648 VisitRRR(this, kMips64DivS, node);
1649 }
1650
VisitFloat64Div(Node * node)1651 void InstructionSelector::VisitFloat64Div(Node* node) {
1652 VisitRRR(this, kMips64DivD, node);
1653 }
1654
VisitFloat64Mod(Node * node)1655 void InstructionSelector::VisitFloat64Mod(Node* node) {
1656 Mips64OperandGenerator g(this);
1657 Emit(kMips64ModD, g.DefineAsFixed(node, f0),
1658 g.UseFixed(node->InputAt(0), f12), g.UseFixed(node->InputAt(1), f14))
1659 ->MarkAsCall();
1660 }
1661
VisitFloat32Max(Node * node)1662 void InstructionSelector::VisitFloat32Max(Node* node) {
1663 Mips64OperandGenerator g(this);
1664 Emit(kMips64Float32Max, g.DefineAsRegister(node),
1665 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1666 }
1667
VisitFloat64Max(Node * node)1668 void InstructionSelector::VisitFloat64Max(Node* node) {
1669 Mips64OperandGenerator g(this);
1670 Emit(kMips64Float64Max, g.DefineAsRegister(node),
1671 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1672 }
1673
VisitFloat32Min(Node * node)1674 void InstructionSelector::VisitFloat32Min(Node* node) {
1675 Mips64OperandGenerator g(this);
1676 Emit(kMips64Float32Min, g.DefineAsRegister(node),
1677 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1678 }
1679
VisitFloat64Min(Node * node)1680 void InstructionSelector::VisitFloat64Min(Node* node) {
1681 Mips64OperandGenerator g(this);
1682 Emit(kMips64Float64Min, g.DefineAsRegister(node),
1683 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1684 }
1685
VisitFloat32Abs(Node * node)1686 void InstructionSelector::VisitFloat32Abs(Node* node) {
1687 VisitRR(this, kMips64AbsS, node);
1688 }
1689
VisitFloat64Abs(Node * node)1690 void InstructionSelector::VisitFloat64Abs(Node* node) {
1691 VisitRR(this, kMips64AbsD, node);
1692 }
1693
VisitFloat32Sqrt(Node * node)1694 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1695 VisitRR(this, kMips64SqrtS, node);
1696 }
1697
VisitFloat64Sqrt(Node * node)1698 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1699 VisitRR(this, kMips64SqrtD, node);
1700 }
1701
VisitFloat32RoundDown(Node * node)1702 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1703 VisitRR(this, kMips64Float32RoundDown, node);
1704 }
1705
VisitFloat64RoundDown(Node * node)1706 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1707 VisitRR(this, kMips64Float64RoundDown, node);
1708 }
1709
VisitFloat32RoundUp(Node * node)1710 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1711 VisitRR(this, kMips64Float32RoundUp, node);
1712 }
1713
VisitFloat64RoundUp(Node * node)1714 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1715 VisitRR(this, kMips64Float64RoundUp, node);
1716 }
1717
VisitFloat32RoundTruncate(Node * node)1718 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1719 VisitRR(this, kMips64Float32RoundTruncate, node);
1720 }
1721
VisitFloat64RoundTruncate(Node * node)1722 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1723 VisitRR(this, kMips64Float64RoundTruncate, node);
1724 }
1725
VisitFloat64RoundTiesAway(Node * node)1726 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1727 UNREACHABLE();
1728 }
1729
VisitFloat32RoundTiesEven(Node * node)1730 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1731 VisitRR(this, kMips64Float32RoundTiesEven, node);
1732 }
1733
VisitFloat64RoundTiesEven(Node * node)1734 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1735 VisitRR(this, kMips64Float64RoundTiesEven, node);
1736 }
1737
VisitFloat32Neg(Node * node)1738 void InstructionSelector::VisitFloat32Neg(Node* node) {
1739 VisitRR(this, kMips64NegS, node);
1740 }
1741
VisitFloat64Neg(Node * node)1742 void InstructionSelector::VisitFloat64Neg(Node* node) {
1743 VisitRR(this, kMips64NegD, node);
1744 }
1745
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1746 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1747 InstructionCode opcode) {
1748 Mips64OperandGenerator g(this);
1749 Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2),
1750 g.UseFixed(node->InputAt(1), f4))
1751 ->MarkAsCall();
1752 }
1753
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1754 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1755 InstructionCode opcode) {
1756 Mips64OperandGenerator g(this);
1757 Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
1758 ->MarkAsCall();
1759 }
1760
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * call_descriptor,Node * node)1761 void InstructionSelector::EmitPrepareArguments(
1762 ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
1763 Node* node) {
1764 Mips64OperandGenerator g(this);
1765
1766 // Prepare for C function call.
1767 if (call_descriptor->IsCFunctionCall()) {
1768 Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
1769 call_descriptor->ParameterCount())),
1770 0, nullptr, 0, nullptr);
1771
1772 // Poke any stack arguments.
1773 int slot = kCArgSlotCount;
1774 for (PushParameter input : (*arguments)) {
1775 Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1776 g.TempImmediate(slot << kSystemPointerSizeLog2));
1777 ++slot;
1778 }
1779 } else {
1780 int push_count = static_cast<int>(call_descriptor->ParameterSlotCount());
1781 if (push_count > 0) {
1782 // Calculate needed space
1783 int stack_size = 0;
1784 for (PushParameter input : (*arguments)) {
1785 if (input.node) {
1786 stack_size += input.location.GetSizeInPointers();
1787 }
1788 }
1789 Emit(kMips64StackClaim, g.NoOutput(),
1790 g.TempImmediate(stack_size << kSystemPointerSizeLog2));
1791 }
1792 for (size_t n = 0; n < arguments->size(); ++n) {
1793 PushParameter input = (*arguments)[n];
1794 if (input.node) {
1795 Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1796 g.TempImmediate(static_cast<int>(n << kSystemPointerSizeLog2)));
1797 }
1798 }
1799 }
1800 }
1801
EmitPrepareResults(ZoneVector<PushParameter> * results,const CallDescriptor * call_descriptor,Node * node)1802 void InstructionSelector::EmitPrepareResults(
1803 ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
1804 Node* node) {
1805 Mips64OperandGenerator g(this);
1806
1807 for (PushParameter output : *results) {
1808 if (!output.location.IsCallerFrameSlot()) continue;
1809 // Skip any alignment holes in nodes.
1810 if (output.node != nullptr) {
1811 DCHECK(!call_descriptor->IsCFunctionCall());
1812 if (output.location.GetType() == MachineType::Float32()) {
1813 MarkAsFloat32(output.node);
1814 } else if (output.location.GetType() == MachineType::Float64()) {
1815 MarkAsFloat64(output.node);
1816 } else if (output.location.GetType() == MachineType::Simd128()) {
1817 MarkAsSimd128(output.node);
1818 }
1819 int offset = call_descriptor->GetOffsetToReturns();
1820 int reverse_slot = -output.location.GetLocation() - offset;
1821 Emit(kMips64Peek, g.DefineAsRegister(output.node),
1822 g.UseImmediate(reverse_slot));
1823 }
1824 }
1825 }
1826
IsTailCallAddressImmediate()1827 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1828
VisitUnalignedLoad(Node * node)1829 void InstructionSelector::VisitUnalignedLoad(Node* node) {
1830 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1831 Mips64OperandGenerator g(this);
1832 Node* base = node->InputAt(0);
1833 Node* index = node->InputAt(1);
1834
1835 ArchOpcode opcode;
1836 switch (load_rep.representation()) {
1837 case MachineRepresentation::kFloat32:
1838 opcode = kMips64Ulwc1;
1839 break;
1840 case MachineRepresentation::kFloat64:
1841 opcode = kMips64Uldc1;
1842 break;
1843 case MachineRepresentation::kWord8:
1844 opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
1845 break;
1846 case MachineRepresentation::kWord16:
1847 opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
1848 break;
1849 case MachineRepresentation::kWord32:
1850 opcode = kMips64Ulw;
1851 break;
1852 case MachineRepresentation::kTaggedSigned: // Fall through.
1853 case MachineRepresentation::kTaggedPointer: // Fall through.
1854 case MachineRepresentation::kTagged: // Fall through.
1855 case MachineRepresentation::kWord64:
1856 opcode = kMips64Uld;
1857 break;
1858 case MachineRepresentation::kSimd128:
1859 opcode = kMips64MsaLd;
1860 break;
1861 case MachineRepresentation::kBit: // Fall through.
1862 case MachineRepresentation::kCompressedPointer: // Fall through.
1863 case MachineRepresentation::kCompressed: // Fall through.
1864 case MachineRepresentation::kMapWord: // Fall through.
1865 case MachineRepresentation::kNone:
1866 UNREACHABLE();
1867 }
1868
1869 if (g.CanBeImmediate(index, opcode)) {
1870 Emit(opcode | AddressingModeField::encode(kMode_MRI),
1871 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
1872 } else {
1873 InstructionOperand addr_reg = g.TempRegister();
1874 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
1875 g.UseRegister(index), g.UseRegister(base));
1876 // Emit desired load opcode, using temp addr_reg.
1877 Emit(opcode | AddressingModeField::encode(kMode_MRI),
1878 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1879 }
1880 }
1881
VisitUnalignedStore(Node * node)1882 void InstructionSelector::VisitUnalignedStore(Node* node) {
1883 Mips64OperandGenerator g(this);
1884 Node* base = node->InputAt(0);
1885 Node* index = node->InputAt(1);
1886 Node* value = node->InputAt(2);
1887
1888 UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
1889 ArchOpcode opcode;
1890 switch (rep) {
1891 case MachineRepresentation::kFloat32:
1892 opcode = kMips64Uswc1;
1893 break;
1894 case MachineRepresentation::kFloat64:
1895 opcode = kMips64Usdc1;
1896 break;
1897 case MachineRepresentation::kWord8:
1898 opcode = kMips64Sb;
1899 break;
1900 case MachineRepresentation::kWord16:
1901 opcode = kMips64Ush;
1902 break;
1903 case MachineRepresentation::kWord32:
1904 opcode = kMips64Usw;
1905 break;
1906 case MachineRepresentation::kTaggedSigned: // Fall through.
1907 case MachineRepresentation::kTaggedPointer: // Fall through.
1908 case MachineRepresentation::kTagged: // Fall through.
1909 case MachineRepresentation::kWord64:
1910 opcode = kMips64Usd;
1911 break;
1912 case MachineRepresentation::kSimd128:
1913 opcode = kMips64MsaSt;
1914 break;
1915 case MachineRepresentation::kBit: // Fall through.
1916 case MachineRepresentation::kCompressedPointer: // Fall through.
1917 case MachineRepresentation::kCompressed: // Fall through.
1918 case MachineRepresentation::kMapWord: // Fall through.
1919 case MachineRepresentation::kNone:
1920 UNREACHABLE();
1921 }
1922
1923 if (g.CanBeImmediate(index, opcode)) {
1924 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1925 g.UseRegister(base), g.UseImmediate(index),
1926 g.UseRegisterOrImmediateZero(value));
1927 } else {
1928 InstructionOperand addr_reg = g.TempRegister();
1929 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
1930 g.UseRegister(index), g.UseRegister(base));
1931 // Emit desired store opcode, using temp addr_reg.
1932 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1933 addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1934 }
1935 }
1936
1937 namespace {
1938
1939 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1940 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1941 InstructionOperand left, InstructionOperand right,
1942 FlagsContinuation* cont) {
1943 selector->EmitWithContinuation(opcode, left, right, cont);
1944 }
1945
1946 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1947 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1948 FlagsContinuation* cont) {
1949 Mips64OperandGenerator g(selector);
1950 Float32BinopMatcher m(node);
1951 InstructionOperand lhs, rhs;
1952
1953 lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1954 : g.UseRegister(m.left().node());
1955 rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1956 : g.UseRegister(m.right().node());
1957 VisitCompare(selector, kMips64CmpS, lhs, rhs, cont);
1958 }
1959
1960 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1961 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1962 FlagsContinuation* cont) {
1963 Mips64OperandGenerator g(selector);
1964 Float64BinopMatcher m(node);
1965 InstructionOperand lhs, rhs;
1966
1967 lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1968 : g.UseRegister(m.left().node());
1969 rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1970 : g.UseRegister(m.right().node());
1971 VisitCompare(selector, kMips64CmpD, lhs, rhs, cont);
1972 }
1973
1974 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont,bool commutative)1975 void VisitWordCompare(InstructionSelector* selector, Node* node,
1976 InstructionCode opcode, FlagsContinuation* cont,
1977 bool commutative) {
1978 Mips64OperandGenerator g(selector);
1979 Node* left = node->InputAt(0);
1980 Node* right = node->InputAt(1);
1981
1982 // Match immediates on left or right side of comparison.
1983 if (g.CanBeImmediate(right, opcode)) {
1984 if (opcode == kMips64Tst) {
1985 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1986 cont);
1987 } else {
1988 switch (cont->condition()) {
1989 case kEqual:
1990 case kNotEqual:
1991 if (cont->IsSet()) {
1992 VisitCompare(selector, opcode, g.UseRegister(left),
1993 g.UseImmediate(right), cont);
1994 } else {
1995 VisitCompare(selector, opcode, g.UseRegister(left),
1996 g.UseRegister(right), cont);
1997 }
1998 break;
1999 case kSignedLessThan:
2000 case kSignedGreaterThanOrEqual:
2001 case kUnsignedLessThan:
2002 case kUnsignedGreaterThanOrEqual:
2003 VisitCompare(selector, opcode, g.UseRegister(left),
2004 g.UseImmediate(right), cont);
2005 break;
2006 default:
2007 VisitCompare(selector, opcode, g.UseRegister(left),
2008 g.UseRegister(right), cont);
2009 }
2010 }
2011 } else if (g.CanBeImmediate(left, opcode)) {
2012 if (!commutative) cont->Commute();
2013 if (opcode == kMips64Tst) {
2014 VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
2015 cont);
2016 } else {
2017 switch (cont->condition()) {
2018 case kEqual:
2019 case kNotEqual:
2020 if (cont->IsSet()) {
2021 VisitCompare(selector, opcode, g.UseRegister(right),
2022 g.UseImmediate(left), cont);
2023 } else {
2024 VisitCompare(selector, opcode, g.UseRegister(right),
2025 g.UseRegister(left), cont);
2026 }
2027 break;
2028 case kSignedLessThan:
2029 case kSignedGreaterThanOrEqual:
2030 case kUnsignedLessThan:
2031 case kUnsignedGreaterThanOrEqual:
2032 VisitCompare(selector, opcode, g.UseRegister(right),
2033 g.UseImmediate(left), cont);
2034 break;
2035 default:
2036 VisitCompare(selector, opcode, g.UseRegister(right),
2037 g.UseRegister(left), cont);
2038 }
2039 }
2040 } else {
2041 VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
2042 cont);
2043 }
2044 }
2045
IsNodeUnsigned(Node * n)2046 bool IsNodeUnsigned(Node* n) {
2047 NodeMatcher m(n);
2048
2049 if (m.IsLoad() || m.IsUnalignedLoad() || m.IsProtectedLoad()) {
2050 LoadRepresentation load_rep = LoadRepresentationOf(n->op());
2051 return load_rep.IsUnsigned();
2052 } else if (m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
2053 AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(n->op());
2054 LoadRepresentation load_rep = atomic_load_params.representation();
2055 return load_rep.IsUnsigned();
2056 } else {
2057 return m.IsUint32Div() || m.IsUint32LessThan() ||
2058 m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
2059 m.IsUint32MulHigh() || m.IsChangeFloat64ToUint32() ||
2060 m.IsTruncateFloat64ToUint32() || m.IsTruncateFloat32ToUint32();
2061 }
2062 }
2063
2064 // Shared routine for multiple word compare operations.
VisitFullWord32Compare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)2065 void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
2066 InstructionCode opcode, FlagsContinuation* cont) {
2067 Mips64OperandGenerator g(selector);
2068 InstructionOperand leftOp = g.TempRegister();
2069 InstructionOperand rightOp = g.TempRegister();
2070
2071 selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
2072 g.TempImmediate(32));
2073 selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
2074 g.TempImmediate(32));
2075
2076 VisitCompare(selector, opcode, leftOp, rightOp, cont);
2077 }
2078
VisitOptimizedWord32Compare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)2079 void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
2080 InstructionCode opcode,
2081 FlagsContinuation* cont) {
2082 if (FLAG_debug_code) {
2083 Mips64OperandGenerator g(selector);
2084 InstructionOperand leftOp = g.TempRegister();
2085 InstructionOperand rightOp = g.TempRegister();
2086 InstructionOperand optimizedResult = g.TempRegister();
2087 InstructionOperand fullResult = g.TempRegister();
2088 FlagsCondition condition = cont->condition();
2089 InstructionCode testOpcode = opcode |
2090 FlagsConditionField::encode(condition) |
2091 FlagsModeField::encode(kFlags_set);
2092
2093 selector->Emit(testOpcode, optimizedResult, g.UseRegister(node->InputAt(0)),
2094 g.UseRegister(node->InputAt(1)));
2095
2096 selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
2097 g.TempImmediate(32));
2098 selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
2099 g.TempImmediate(32));
2100 selector->Emit(testOpcode, fullResult, leftOp, rightOp);
2101
2102 selector->Emit(
2103 kMips64AssertEqual, g.NoOutput(), optimizedResult, fullResult,
2104 g.TempImmediate(
2105 static_cast<int>(AbortReason::kUnsupportedNonPrimitiveCompare)));
2106 }
2107
2108 VisitWordCompare(selector, node, opcode, cont, false);
2109 }
2110
VisitWord32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)2111 void VisitWord32Compare(InstructionSelector* selector, Node* node,
2112 FlagsContinuation* cont) {
2113 // MIPS64 doesn't support Word32 compare instructions. Instead it relies
2114 // that the values in registers are correctly sign-extended and uses
2115 // Word64 comparison instead. This behavior is correct in most cases,
2116 // but doesn't work when comparing signed with unsigned operands.
2117 // We could simulate full Word32 compare in all cases but this would
2118 // create an unnecessary overhead since unsigned integers are rarely
2119 // used in JavaScript.
2120 // The solution proposed here tries to match a comparison of signed
2121 // with unsigned operand, and perform full Word32Compare only
2122 // in those cases. Unfortunately, the solution is not complete because
2123 // it might skip cases where Word32 full compare is needed, so
2124 // basically it is a hack.
2125 // When call to a host function in simulator, if the function return a
2126 // int32 value, the simulator do not sign-extended to int64 because in
2127 // simulator we do not know the function whether return a int32 or int64.
2128 // so we need do a full word32 compare in this case.
2129 #ifndef USE_SIMULATOR
2130 if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) {
2131 #else
2132 if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1)) ||
2133 node->InputAt(0)->opcode() == IrOpcode::kCall ||
2134 node->InputAt(1)->opcode() == IrOpcode::kCall ) {
2135 #endif
2136 VisitFullWord32Compare(selector, node, kMips64Cmp, cont);
2137 } else {
2138 VisitOptimizedWord32Compare(selector, node, kMips64Cmp, cont);
2139 }
2140 }
2141
2142 void VisitWord64Compare(InstructionSelector* selector, Node* node,
2143 FlagsContinuation* cont) {
2144 VisitWordCompare(selector, node, kMips64Cmp, cont, false);
2145 }
2146
2147 void EmitWordCompareZero(InstructionSelector* selector, Node* value,
2148 FlagsContinuation* cont) {
2149 Mips64OperandGenerator g(selector);
2150 selector->EmitWithContinuation(kMips64Cmp, g.UseRegister(value),
2151 g.TempImmediate(0), cont);
2152 }
2153
2154 void VisitAtomicLoad(InstructionSelector* selector, Node* node,
2155 AtomicWidth width) {
2156 Mips64OperandGenerator g(selector);
2157 Node* base = node->InputAt(0);
2158 Node* index = node->InputAt(1);
2159
2160 // The memory order is ignored.
2161 AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
2162 LoadRepresentation load_rep = atomic_load_params.representation();
2163 InstructionCode code;
2164 switch (load_rep.representation()) {
2165 case MachineRepresentation::kWord8:
2166 DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
2167 code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
2168 break;
2169 case MachineRepresentation::kWord16:
2170 DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
2171 code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
2172 break;
2173 case MachineRepresentation::kWord32:
2174 code = kAtomicLoadWord32;
2175 break;
2176 case MachineRepresentation::kWord64:
2177 code = kMips64Word64AtomicLoadUint64;
2178 break;
2179 case MachineRepresentation::kTaggedSigned: // Fall through.
2180 case MachineRepresentation::kTaggedPointer: // Fall through.
2181 case MachineRepresentation::kTagged:
2182 DCHECK_EQ(kTaggedSize, 8);
2183 code = kMips64Word64AtomicLoadUint64;
2184 break;
2185 default:
2186 UNREACHABLE();
2187 }
2188
2189 if (g.CanBeImmediate(index, code)) {
2190 selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
2191 AtomicWidthField::encode(width),
2192 g.DefineAsRegister(node), g.UseRegister(base),
2193 g.UseImmediate(index));
2194 } else {
2195 InstructionOperand addr_reg = g.TempRegister();
2196 selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
2197 addr_reg, g.UseRegister(index), g.UseRegister(base));
2198 // Emit desired load opcode, using temp addr_reg.
2199 selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
2200 AtomicWidthField::encode(width),
2201 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
2202 }
2203 }
2204
2205 void VisitAtomicStore(InstructionSelector* selector, Node* node,
2206 AtomicWidth width) {
2207 Mips64OperandGenerator g(selector);
2208 Node* base = node->InputAt(0);
2209 Node* index = node->InputAt(1);
2210 Node* value = node->InputAt(2);
2211
2212 // The memory order is ignored.
2213 AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
2214 WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
2215 MachineRepresentation rep = store_params.representation();
2216
2217 if (FLAG_enable_unconditional_write_barriers &&
2218 CanBeTaggedOrCompressedPointer(rep)) {
2219 write_barrier_kind = kFullWriteBarrier;
2220 }
2221
2222 InstructionCode code;
2223
2224 if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
2225 DCHECK(CanBeTaggedPointer(rep));
2226 DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
2227
2228 InstructionOperand inputs[3];
2229 size_t input_count = 0;
2230 inputs[input_count++] = g.UseUniqueRegister(base);
2231 inputs[input_count++] = g.UseUniqueRegister(index);
2232 inputs[input_count++] = g.UseUniqueRegister(value);
2233 RecordWriteMode record_write_mode =
2234 WriteBarrierKindToRecordWriteMode(write_barrier_kind);
2235 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
2236 size_t const temp_count = arraysize(temps);
2237 code = kArchAtomicStoreWithWriteBarrier;
2238 code |= MiscField::encode(static_cast<int>(record_write_mode));
2239 selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
2240 } else {
2241 switch (rep) {
2242 case MachineRepresentation::kWord8:
2243 code = kAtomicStoreWord8;
2244 break;
2245 case MachineRepresentation::kWord16:
2246 code = kAtomicStoreWord16;
2247 break;
2248 case MachineRepresentation::kWord32:
2249 code = kAtomicStoreWord32;
2250 break;
2251 case MachineRepresentation::kWord64:
2252 DCHECK_EQ(width, AtomicWidth::kWord64);
2253 code = kMips64Word64AtomicStoreWord64;
2254 break;
2255 case MachineRepresentation::kTaggedSigned: // Fall through.
2256 case MachineRepresentation::kTaggedPointer: // Fall through.
2257 case MachineRepresentation::kTagged:
2258 DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
2259 code = kMips64StoreCompressTagged;
2260 break;
2261 default:
2262 UNREACHABLE();
2263 }
2264 code |= AtomicWidthField::encode(width);
2265
2266 if (g.CanBeImmediate(index, code)) {
2267 selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
2268 AtomicWidthField::encode(width),
2269 g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
2270 g.UseRegisterOrImmediateZero(value));
2271 } else {
2272 InstructionOperand addr_reg = g.TempRegister();
2273 selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
2274 addr_reg, g.UseRegister(index), g.UseRegister(base));
2275 // Emit desired store opcode, using temp addr_reg.
2276 selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
2277 AtomicWidthField::encode(width),
2278 g.NoOutput(), addr_reg, g.TempImmediate(0),
2279 g.UseRegisterOrImmediateZero(value));
2280 }
2281 }
2282 }
2283
2284 void VisitAtomicExchange(InstructionSelector* selector, Node* node,
2285 ArchOpcode opcode, AtomicWidth width) {
2286 Mips64OperandGenerator g(selector);
2287 Node* base = node->InputAt(0);
2288 Node* index = node->InputAt(1);
2289 Node* value = node->InputAt(2);
2290
2291 AddressingMode addressing_mode = kMode_MRI;
2292 InstructionOperand inputs[3];
2293 size_t input_count = 0;
2294 inputs[input_count++] = g.UseUniqueRegister(base);
2295 inputs[input_count++] = g.UseUniqueRegister(index);
2296 inputs[input_count++] = g.UseUniqueRegister(value);
2297 InstructionOperand outputs[1];
2298 outputs[0] = g.UseUniqueRegister(node);
2299 InstructionOperand temp[3];
2300 temp[0] = g.TempRegister();
2301 temp[1] = g.TempRegister();
2302 temp[2] = g.TempRegister();
2303 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2304 AtomicWidthField::encode(width);
2305 selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
2306 }
2307
2308 void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
2309 ArchOpcode opcode, AtomicWidth width) {
2310 Mips64OperandGenerator g(selector);
2311 Node* base = node->InputAt(0);
2312 Node* index = node->InputAt(1);
2313 Node* old_value = node->InputAt(2);
2314 Node* new_value = node->InputAt(3);
2315
2316 AddressingMode addressing_mode = kMode_MRI;
2317 InstructionOperand inputs[4];
2318 size_t input_count = 0;
2319 inputs[input_count++] = g.UseUniqueRegister(base);
2320 inputs[input_count++] = g.UseUniqueRegister(index);
2321 inputs[input_count++] = g.UseUniqueRegister(old_value);
2322 inputs[input_count++] = g.UseUniqueRegister(new_value);
2323 InstructionOperand outputs[1];
2324 outputs[0] = g.UseUniqueRegister(node);
2325 InstructionOperand temp[3];
2326 temp[0] = g.TempRegister();
2327 temp[1] = g.TempRegister();
2328 temp[2] = g.TempRegister();
2329 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2330 AtomicWidthField::encode(width);
2331 selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
2332 }
2333
2334 void VisitAtomicBinop(InstructionSelector* selector, Node* node,
2335 ArchOpcode opcode, AtomicWidth width) {
2336 Mips64OperandGenerator g(selector);
2337 Node* base = node->InputAt(0);
2338 Node* index = node->InputAt(1);
2339 Node* value = node->InputAt(2);
2340
2341 AddressingMode addressing_mode = kMode_MRI;
2342 InstructionOperand inputs[3];
2343 size_t input_count = 0;
2344 inputs[input_count++] = g.UseUniqueRegister(base);
2345 inputs[input_count++] = g.UseUniqueRegister(index);
2346 inputs[input_count++] = g.UseUniqueRegister(value);
2347 InstructionOperand outputs[1];
2348 outputs[0] = g.UseUniqueRegister(node);
2349 InstructionOperand temps[4];
2350 temps[0] = g.TempRegister();
2351 temps[1] = g.TempRegister();
2352 temps[2] = g.TempRegister();
2353 temps[3] = g.TempRegister();
2354 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2355 AtomicWidthField::encode(width);
2356 selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
2357 }
2358
2359 } // namespace
2360
VisitStackPointerGreaterThan(Node * node,FlagsContinuation * cont)2361 void InstructionSelector::VisitStackPointerGreaterThan(
2362 Node* node, FlagsContinuation* cont) {
2363 StackCheckKind kind = StackCheckKindOf(node->op());
2364 InstructionCode opcode =
2365 kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
2366
2367 Mips64OperandGenerator g(this);
2368
2369 // No outputs.
2370 InstructionOperand* const outputs = nullptr;
2371 const int output_count = 0;
2372
2373 // TempRegister(0) is used to store the comparison result.
2374 // Applying an offset to this stack check requires a temp register. Offsets
2375 // are only applied to the first stack check. If applying an offset, we must
2376 // ensure the input and temp registers do not alias, thus kUniqueRegister.
2377 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
2378 const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 2 : 1);
2379 const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
2380 ? OperandGenerator::kUniqueRegister
2381 : OperandGenerator::kRegister;
2382
2383 Node* const value = node->InputAt(0);
2384 InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
2385 static constexpr int input_count = arraysize(inputs);
2386
2387 EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
2388 temp_count, temps, cont);
2389 }
2390
2391 // Shared routine for word comparisons against zero.
VisitWordCompareZero(Node * user,Node * value,FlagsContinuation * cont)2392 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
2393 FlagsContinuation* cont) {
2394 // Try to combine with comparisons against 0 by simply inverting the branch.
2395 while (CanCover(user, value)) {
2396 if (value->opcode() == IrOpcode::kWord32Equal) {
2397 Int32BinopMatcher m(value);
2398 if (!m.right().Is(0)) break;
2399 user = value;
2400 value = m.left().node();
2401 } else if (value->opcode() == IrOpcode::kWord64Equal) {
2402 Int64BinopMatcher m(value);
2403 if (!m.right().Is(0)) break;
2404 user = value;
2405 value = m.left().node();
2406 } else {
2407 break;
2408 }
2409
2410 cont->Negate();
2411 }
2412
2413 if (CanCover(user, value)) {
2414 switch (value->opcode()) {
2415 case IrOpcode::kWord32Equal:
2416 cont->OverwriteAndNegateIfEqual(kEqual);
2417 return VisitWord32Compare(this, value, cont);
2418 case IrOpcode::kInt32LessThan:
2419 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2420 return VisitWord32Compare(this, value, cont);
2421 case IrOpcode::kInt32LessThanOrEqual:
2422 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2423 return VisitWord32Compare(this, value, cont);
2424 case IrOpcode::kUint32LessThan:
2425 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2426 return VisitWord32Compare(this, value, cont);
2427 case IrOpcode::kUint32LessThanOrEqual:
2428 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2429 return VisitWord32Compare(this, value, cont);
2430 case IrOpcode::kWord64Equal:
2431 cont->OverwriteAndNegateIfEqual(kEqual);
2432 return VisitWord64Compare(this, value, cont);
2433 case IrOpcode::kInt64LessThan:
2434 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2435 return VisitWord64Compare(this, value, cont);
2436 case IrOpcode::kInt64LessThanOrEqual:
2437 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2438 return VisitWord64Compare(this, value, cont);
2439 case IrOpcode::kUint64LessThan:
2440 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2441 return VisitWord64Compare(this, value, cont);
2442 case IrOpcode::kUint64LessThanOrEqual:
2443 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2444 return VisitWord64Compare(this, value, cont);
2445 case IrOpcode::kFloat32Equal:
2446 cont->OverwriteAndNegateIfEqual(kEqual);
2447 return VisitFloat32Compare(this, value, cont);
2448 case IrOpcode::kFloat32LessThan:
2449 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2450 return VisitFloat32Compare(this, value, cont);
2451 case IrOpcode::kFloat32LessThanOrEqual:
2452 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2453 return VisitFloat32Compare(this, value, cont);
2454 case IrOpcode::kFloat64Equal:
2455 cont->OverwriteAndNegateIfEqual(kEqual);
2456 return VisitFloat64Compare(this, value, cont);
2457 case IrOpcode::kFloat64LessThan:
2458 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2459 return VisitFloat64Compare(this, value, cont);
2460 case IrOpcode::kFloat64LessThanOrEqual:
2461 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2462 return VisitFloat64Compare(this, value, cont);
2463 case IrOpcode::kProjection:
2464 // Check if this is the overflow output projection of an
2465 // <Operation>WithOverflow node.
2466 if (ProjectionIndexOf(value->op()) == 1u) {
2467 // We cannot combine the <Operation>WithOverflow with this branch
2468 // unless the 0th projection (the use of the actual value of the
2469 // <Operation> is either nullptr, which means there's no use of the
2470 // actual value, or was already defined, which means it is scheduled
2471 // *AFTER* this branch).
2472 Node* const node = value->InputAt(0);
2473 Node* const result = NodeProperties::FindProjection(node, 0);
2474 if (result == nullptr || IsDefined(result)) {
2475 switch (node->opcode()) {
2476 case IrOpcode::kInt32AddWithOverflow:
2477 cont->OverwriteAndNegateIfEqual(kOverflow);
2478 return VisitBinop(this, node, kMips64Dadd, cont);
2479 case IrOpcode::kInt32SubWithOverflow:
2480 cont->OverwriteAndNegateIfEqual(kOverflow);
2481 return VisitBinop(this, node, kMips64Dsub, cont);
2482 case IrOpcode::kInt32MulWithOverflow:
2483 cont->OverwriteAndNegateIfEqual(kOverflow);
2484 return VisitBinop(this, node, kMips64MulOvf, cont);
2485 case IrOpcode::kInt64AddWithOverflow:
2486 cont->OverwriteAndNegateIfEqual(kOverflow);
2487 return VisitBinop(this, node, kMips64DaddOvf, cont);
2488 case IrOpcode::kInt64SubWithOverflow:
2489 cont->OverwriteAndNegateIfEqual(kOverflow);
2490 return VisitBinop(this, node, kMips64DsubOvf, cont);
2491 default:
2492 break;
2493 }
2494 }
2495 }
2496 break;
2497 case IrOpcode::kWord32And:
2498 case IrOpcode::kWord64And:
2499 return VisitWordCompare(this, value, kMips64Tst, cont, true);
2500 case IrOpcode::kStackPointerGreaterThan:
2501 cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
2502 return VisitStackPointerGreaterThan(value, cont);
2503 default:
2504 break;
2505 }
2506 }
2507
2508 // Continuation could not be combined with a compare, emit compare against 0.
2509 EmitWordCompareZero(this, value, cont);
2510 }
2511
VisitSwitch(Node * node,const SwitchInfo & sw)2512 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
2513 Mips64OperandGenerator g(this);
2514 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
2515
2516 // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
2517 if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
2518 static const size_t kMaxTableSwitchValueRange = 2 << 16;
2519 size_t table_space_cost = 10 + 2 * sw.value_range();
2520 size_t table_time_cost = 3;
2521 size_t lookup_space_cost = 2 + 2 * sw.case_count();
2522 size_t lookup_time_cost = sw.case_count();
2523 if (sw.case_count() > 0 &&
2524 table_space_cost + 3 * table_time_cost <=
2525 lookup_space_cost + 3 * lookup_time_cost &&
2526 sw.min_value() > std::numeric_limits<int32_t>::min() &&
2527 sw.value_range() <= kMaxTableSwitchValueRange) {
2528 InstructionOperand index_operand = value_operand;
2529 if (sw.min_value()) {
2530 index_operand = g.TempRegister();
2531 Emit(kMips64Sub, index_operand, value_operand,
2532 g.TempImmediate(sw.min_value()));
2533 }
2534 // Generate a table lookup.
2535 return EmitTableSwitch(sw, index_operand);
2536 }
2537 }
2538
2539 // Generate a tree of conditional jumps.
2540 return EmitBinarySearchSwitch(sw, value_operand);
2541 }
2542
VisitWord32Equal(Node * const node)2543 void InstructionSelector::VisitWord32Equal(Node* const node) {
2544 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2545 Int32BinopMatcher m(node);
2546 if (m.right().Is(0)) {
2547 return VisitWordCompareZero(m.node(), m.left().node(), &cont);
2548 }
2549
2550 VisitWord32Compare(this, node, &cont);
2551 }
2552
VisitInt32LessThan(Node * node)2553 void InstructionSelector::VisitInt32LessThan(Node* node) {
2554 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2555 VisitWord32Compare(this, node, &cont);
2556 }
2557
VisitInt32LessThanOrEqual(Node * node)2558 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2559 FlagsContinuation cont =
2560 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2561 VisitWord32Compare(this, node, &cont);
2562 }
2563
VisitUint32LessThan(Node * node)2564 void InstructionSelector::VisitUint32LessThan(Node* node) {
2565 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2566 VisitWord32Compare(this, node, &cont);
2567 }
2568
VisitUint32LessThanOrEqual(Node * node)2569 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2570 FlagsContinuation cont =
2571 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2572 VisitWord32Compare(this, node, &cont);
2573 }
2574
VisitInt32AddWithOverflow(Node * node)2575 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
2576 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2577 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2578 return VisitBinop(this, node, kMips64Dadd, &cont);
2579 }
2580 FlagsContinuation cont;
2581 VisitBinop(this, node, kMips64Dadd, &cont);
2582 }
2583
VisitInt32SubWithOverflow(Node * node)2584 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
2585 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2586 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2587 return VisitBinop(this, node, kMips64Dsub, &cont);
2588 }
2589 FlagsContinuation cont;
2590 VisitBinop(this, node, kMips64Dsub, &cont);
2591 }
2592
VisitInt32MulWithOverflow(Node * node)2593 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
2594 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2595 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2596 return VisitBinop(this, node, kMips64MulOvf, &cont);
2597 }
2598 FlagsContinuation cont;
2599 VisitBinop(this, node, kMips64MulOvf, &cont);
2600 }
2601
VisitInt64AddWithOverflow(Node * node)2602 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
2603 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2604 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2605 return VisitBinop(this, node, kMips64DaddOvf, &cont);
2606 }
2607 FlagsContinuation cont;
2608 VisitBinop(this, node, kMips64DaddOvf, &cont);
2609 }
2610
VisitInt64SubWithOverflow(Node * node)2611 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
2612 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2613 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2614 return VisitBinop(this, node, kMips64DsubOvf, &cont);
2615 }
2616 FlagsContinuation cont;
2617 VisitBinop(this, node, kMips64DsubOvf, &cont);
2618 }
2619
VisitWord64Equal(Node * const node)2620 void InstructionSelector::VisitWord64Equal(Node* const node) {
2621 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2622 Int64BinopMatcher m(node);
2623 if (m.right().Is(0)) {
2624 return VisitWordCompareZero(m.node(), m.left().node(), &cont);
2625 }
2626
2627 VisitWord64Compare(this, node, &cont);
2628 }
2629
VisitInt64LessThan(Node * node)2630 void InstructionSelector::VisitInt64LessThan(Node* node) {
2631 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2632 VisitWord64Compare(this, node, &cont);
2633 }
2634
VisitInt64LessThanOrEqual(Node * node)2635 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2636 FlagsContinuation cont =
2637 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2638 VisitWord64Compare(this, node, &cont);
2639 }
2640
VisitUint64LessThan(Node * node)2641 void InstructionSelector::VisitUint64LessThan(Node* node) {
2642 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2643 VisitWord64Compare(this, node, &cont);
2644 }
2645
VisitUint64LessThanOrEqual(Node * node)2646 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2647 FlagsContinuation cont =
2648 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2649 VisitWord64Compare(this, node, &cont);
2650 }
2651
VisitFloat32Equal(Node * node)2652 void InstructionSelector::VisitFloat32Equal(Node* node) {
2653 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2654 VisitFloat32Compare(this, node, &cont);
2655 }
2656
VisitFloat32LessThan(Node * node)2657 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2658 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2659 VisitFloat32Compare(this, node, &cont);
2660 }
2661
VisitFloat32LessThanOrEqual(Node * node)2662 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2663 FlagsContinuation cont =
2664 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2665 VisitFloat32Compare(this, node, &cont);
2666 }
2667
VisitFloat64Equal(Node * node)2668 void InstructionSelector::VisitFloat64Equal(Node* node) {
2669 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2670 VisitFloat64Compare(this, node, &cont);
2671 }
2672
VisitFloat64LessThan(Node * node)2673 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2674 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2675 VisitFloat64Compare(this, node, &cont);
2676 }
2677
VisitFloat64LessThanOrEqual(Node * node)2678 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2679 FlagsContinuation cont =
2680 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2681 VisitFloat64Compare(this, node, &cont);
2682 }
2683
VisitFloat64ExtractLowWord32(Node * node)2684 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
2685 VisitRR(this, kMips64Float64ExtractLowWord32, node);
2686 }
2687
VisitFloat64ExtractHighWord32(Node * node)2688 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
2689 VisitRR(this, kMips64Float64ExtractHighWord32, node);
2690 }
2691
VisitFloat64SilenceNaN(Node * node)2692 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
2693 VisitRR(this, kMips64Float64SilenceNaN, node);
2694 }
2695
VisitFloat64InsertLowWord32(Node * node)2696 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2697 Mips64OperandGenerator g(this);
2698 Node* left = node->InputAt(0);
2699 Node* right = node->InputAt(1);
2700 Emit(kMips64Float64InsertLowWord32, g.DefineSameAsFirst(node),
2701 g.UseRegister(left), g.UseRegister(right));
2702 }
2703
VisitFloat64InsertHighWord32(Node * node)2704 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2705 Mips64OperandGenerator g(this);
2706 Node* left = node->InputAt(0);
2707 Node* right = node->InputAt(1);
2708 Emit(kMips64Float64InsertHighWord32, g.DefineSameAsFirst(node),
2709 g.UseRegister(left), g.UseRegister(right));
2710 }
2711
VisitMemoryBarrier(Node * node)2712 void InstructionSelector::VisitMemoryBarrier(Node* node) {
2713 Mips64OperandGenerator g(this);
2714 Emit(kMips64Sync, g.NoOutput());
2715 }
2716
VisitWord32AtomicLoad(Node * node)2717 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
2718 VisitAtomicLoad(this, node, AtomicWidth::kWord32);
2719 }
2720
VisitWord32AtomicStore(Node * node)2721 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
2722 VisitAtomicStore(this, node, AtomicWidth::kWord32);
2723 }
2724
VisitWord64AtomicLoad(Node * node)2725 void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
2726 VisitAtomicLoad(this, node, AtomicWidth::kWord64);
2727 }
2728
VisitWord64AtomicStore(Node * node)2729 void InstructionSelector::VisitWord64AtomicStore(Node* node) {
2730 VisitAtomicStore(this, node, AtomicWidth::kWord64);
2731 }
2732
VisitWord32AtomicExchange(Node * node)2733 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
2734 ArchOpcode opcode;
2735 MachineType type = AtomicOpType(node->op());
2736 if (type == MachineType::Int8()) {
2737 opcode = kAtomicExchangeInt8;
2738 } else if (type == MachineType::Uint8()) {
2739 opcode = kAtomicExchangeUint8;
2740 } else if (type == MachineType::Int16()) {
2741 opcode = kAtomicExchangeInt16;
2742 } else if (type == MachineType::Uint16()) {
2743 opcode = kAtomicExchangeUint16;
2744 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2745 opcode = kAtomicExchangeWord32;
2746 } else {
2747 UNREACHABLE();
2748 }
2749
2750 VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
2751 }
2752
VisitWord64AtomicExchange(Node * node)2753 void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
2754 ArchOpcode opcode;
2755 MachineType type = AtomicOpType(node->op());
2756 if (type == MachineType::Uint8()) {
2757 opcode = kAtomicExchangeUint8;
2758 } else if (type == MachineType::Uint16()) {
2759 opcode = kAtomicExchangeUint16;
2760 } else if (type == MachineType::Uint32()) {
2761 opcode = kAtomicExchangeWord32;
2762 } else if (type == MachineType::Uint64()) {
2763 opcode = kMips64Word64AtomicExchangeUint64;
2764 } else {
2765 UNREACHABLE();
2766 }
2767 VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
2768 }
2769
VisitWord32AtomicCompareExchange(Node * node)2770 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
2771 ArchOpcode opcode;
2772 MachineType type = AtomicOpType(node->op());
2773 if (type == MachineType::Int8()) {
2774 opcode = kAtomicCompareExchangeInt8;
2775 } else if (type == MachineType::Uint8()) {
2776 opcode = kAtomicCompareExchangeUint8;
2777 } else if (type == MachineType::Int16()) {
2778 opcode = kAtomicCompareExchangeInt16;
2779 } else if (type == MachineType::Uint16()) {
2780 opcode = kAtomicCompareExchangeUint16;
2781 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2782 opcode = kAtomicCompareExchangeWord32;
2783 } else {
2784 UNREACHABLE();
2785 }
2786
2787 VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
2788 }
2789
VisitWord64AtomicCompareExchange(Node * node)2790 void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
2791 ArchOpcode opcode;
2792 MachineType type = AtomicOpType(node->op());
2793 if (type == MachineType::Uint8()) {
2794 opcode = kAtomicCompareExchangeUint8;
2795 } else if (type == MachineType::Uint16()) {
2796 opcode = kAtomicCompareExchangeUint16;
2797 } else if (type == MachineType::Uint32()) {
2798 opcode = kAtomicCompareExchangeWord32;
2799 } else if (type == MachineType::Uint64()) {
2800 opcode = kMips64Word64AtomicCompareExchangeUint64;
2801 } else {
2802 UNREACHABLE();
2803 }
2804 VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
2805 }
VisitWord32AtomicBinaryOperation(Node * node,ArchOpcode int8_op,ArchOpcode uint8_op,ArchOpcode int16_op,ArchOpcode uint16_op,ArchOpcode word32_op)2806 void InstructionSelector::VisitWord32AtomicBinaryOperation(
2807 Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2808 ArchOpcode uint16_op, ArchOpcode word32_op) {
2809 ArchOpcode opcode;
2810 MachineType type = AtomicOpType(node->op());
2811 if (type == MachineType::Int8()) {
2812 opcode = int8_op;
2813 } else if (type == MachineType::Uint8()) {
2814 opcode = uint8_op;
2815 } else if (type == MachineType::Int16()) {
2816 opcode = int16_op;
2817 } else if (type == MachineType::Uint16()) {
2818 opcode = uint16_op;
2819 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2820 opcode = word32_op;
2821 } else {
2822 UNREACHABLE();
2823 }
2824
2825 VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
2826 }
2827
2828 #define VISIT_ATOMIC_BINOP(op) \
2829 void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
2830 VisitWord32AtomicBinaryOperation( \
2831 node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
2832 kAtomic##op##Uint16, kAtomic##op##Word32); \
2833 }
2834 VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)2835 VISIT_ATOMIC_BINOP(Sub)
2836 VISIT_ATOMIC_BINOP(And)
2837 VISIT_ATOMIC_BINOP(Or)
2838 VISIT_ATOMIC_BINOP(Xor)
2839 #undef VISIT_ATOMIC_BINOP
2840
2841 void InstructionSelector::VisitWord64AtomicBinaryOperation(
2842 Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
2843 ArchOpcode uint64_op) {
2844 ArchOpcode opcode;
2845 MachineType type = AtomicOpType(node->op());
2846 if (type == MachineType::Uint8()) {
2847 opcode = uint8_op;
2848 } else if (type == MachineType::Uint16()) {
2849 opcode = uint16_op;
2850 } else if (type == MachineType::Uint32()) {
2851 opcode = uint32_op;
2852 } else if (type == MachineType::Uint64()) {
2853 opcode = uint64_op;
2854 } else {
2855 UNREACHABLE();
2856 }
2857 VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
2858 }
2859
2860 #define VISIT_ATOMIC_BINOP(op) \
2861 void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
2862 VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
2863 kAtomic##op##Uint16, kAtomic##op##Word32, \
2864 kMips64Word64Atomic##op##Uint64); \
2865 }
2866 VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)2867 VISIT_ATOMIC_BINOP(Sub)
2868 VISIT_ATOMIC_BINOP(And)
2869 VISIT_ATOMIC_BINOP(Or)
2870 VISIT_ATOMIC_BINOP(Xor)
2871 #undef VISIT_ATOMIC_BINOP
2872
2873 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
2874 UNREACHABLE();
2875 }
2876
VisitInt64AbsWithOverflow(Node * node)2877 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
2878 UNREACHABLE();
2879 }
2880
2881 #define SIMD_TYPE_LIST(V) \
2882 V(F64x2) \
2883 V(F32x4) \
2884 V(I64x2) \
2885 V(I32x4) \
2886 V(I16x8) \
2887 V(I8x16)
2888
2889 #define SIMD_UNOP_LIST(V) \
2890 V(F64x2Abs, kMips64F64x2Abs) \
2891 V(F64x2Neg, kMips64F64x2Neg) \
2892 V(F64x2Sqrt, kMips64F64x2Sqrt) \
2893 V(F64x2Ceil, kMips64F64x2Ceil) \
2894 V(F64x2Floor, kMips64F64x2Floor) \
2895 V(F64x2Trunc, kMips64F64x2Trunc) \
2896 V(F64x2NearestInt, kMips64F64x2NearestInt) \
2897 V(I64x2Neg, kMips64I64x2Neg) \
2898 V(I64x2BitMask, kMips64I64x2BitMask) \
2899 V(F64x2ConvertLowI32x4S, kMips64F64x2ConvertLowI32x4S) \
2900 V(F64x2ConvertLowI32x4U, kMips64F64x2ConvertLowI32x4U) \
2901 V(F64x2PromoteLowF32x4, kMips64F64x2PromoteLowF32x4) \
2902 V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4) \
2903 V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
2904 V(F32x4Abs, kMips64F32x4Abs) \
2905 V(F32x4Neg, kMips64F32x4Neg) \
2906 V(F32x4Sqrt, kMips64F32x4Sqrt) \
2907 V(F32x4RecipApprox, kMips64F32x4RecipApprox) \
2908 V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox) \
2909 V(F32x4Ceil, kMips64F32x4Ceil) \
2910 V(F32x4Floor, kMips64F32x4Floor) \
2911 V(F32x4Trunc, kMips64F32x4Trunc) \
2912 V(F32x4NearestInt, kMips64F32x4NearestInt) \
2913 V(F32x4DemoteF64x2Zero, kMips64F32x4DemoteF64x2Zero) \
2914 V(I64x2Abs, kMips64I64x2Abs) \
2915 V(I64x2SConvertI32x4Low, kMips64I64x2SConvertI32x4Low) \
2916 V(I64x2SConvertI32x4High, kMips64I64x2SConvertI32x4High) \
2917 V(I64x2UConvertI32x4Low, kMips64I64x2UConvertI32x4Low) \
2918 V(I64x2UConvertI32x4High, kMips64I64x2UConvertI32x4High) \
2919 V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \
2920 V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4) \
2921 V(I32x4Neg, kMips64I32x4Neg) \
2922 V(I32x4SConvertI16x8Low, kMips64I32x4SConvertI16x8Low) \
2923 V(I32x4SConvertI16x8High, kMips64I32x4SConvertI16x8High) \
2924 V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \
2925 V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
2926 V(I32x4Abs, kMips64I32x4Abs) \
2927 V(I32x4BitMask, kMips64I32x4BitMask) \
2928 V(I32x4TruncSatF64x2SZero, kMips64I32x4TruncSatF64x2SZero) \
2929 V(I32x4TruncSatF64x2UZero, kMips64I32x4TruncSatF64x2UZero) \
2930 V(I16x8Neg, kMips64I16x8Neg) \
2931 V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \
2932 V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
2933 V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \
2934 V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
2935 V(I16x8Abs, kMips64I16x8Abs) \
2936 V(I16x8BitMask, kMips64I16x8BitMask) \
2937 V(I8x16Neg, kMips64I8x16Neg) \
2938 V(I8x16Abs, kMips64I8x16Abs) \
2939 V(I8x16Popcnt, kMips64I8x16Popcnt) \
2940 V(I8x16BitMask, kMips64I8x16BitMask) \
2941 V(S128Not, kMips64S128Not) \
2942 V(I64x2AllTrue, kMips64I64x2AllTrue) \
2943 V(I32x4AllTrue, kMips64I32x4AllTrue) \
2944 V(I16x8AllTrue, kMips64I16x8AllTrue) \
2945 V(I8x16AllTrue, kMips64I8x16AllTrue) \
2946 V(V128AnyTrue, kMips64V128AnyTrue)
2947
2948 #define SIMD_SHIFT_OP_LIST(V) \
2949 V(I64x2Shl) \
2950 V(I64x2ShrS) \
2951 V(I64x2ShrU) \
2952 V(I32x4Shl) \
2953 V(I32x4ShrS) \
2954 V(I32x4ShrU) \
2955 V(I16x8Shl) \
2956 V(I16x8ShrS) \
2957 V(I16x8ShrU) \
2958 V(I8x16Shl) \
2959 V(I8x16ShrS) \
2960 V(I8x16ShrU)
2961
2962 #define SIMD_BINOP_LIST(V) \
2963 V(F64x2Add, kMips64F64x2Add) \
2964 V(F64x2Sub, kMips64F64x2Sub) \
2965 V(F64x2Mul, kMips64F64x2Mul) \
2966 V(F64x2Div, kMips64F64x2Div) \
2967 V(F64x2Min, kMips64F64x2Min) \
2968 V(F64x2Max, kMips64F64x2Max) \
2969 V(F64x2Eq, kMips64F64x2Eq) \
2970 V(F64x2Ne, kMips64F64x2Ne) \
2971 V(F64x2Lt, kMips64F64x2Lt) \
2972 V(F64x2Le, kMips64F64x2Le) \
2973 V(I64x2Eq, kMips64I64x2Eq) \
2974 V(I64x2Ne, kMips64I64x2Ne) \
2975 V(I64x2Add, kMips64I64x2Add) \
2976 V(I64x2Sub, kMips64I64x2Sub) \
2977 V(I64x2Mul, kMips64I64x2Mul) \
2978 V(I64x2GtS, kMips64I64x2GtS) \
2979 V(I64x2GeS, kMips64I64x2GeS) \
2980 V(F32x4Add, kMips64F32x4Add) \
2981 V(F32x4Sub, kMips64F32x4Sub) \
2982 V(F32x4Mul, kMips64F32x4Mul) \
2983 V(F32x4Div, kMips64F32x4Div) \
2984 V(F32x4Max, kMips64F32x4Max) \
2985 V(F32x4Min, kMips64F32x4Min) \
2986 V(F32x4Eq, kMips64F32x4Eq) \
2987 V(F32x4Ne, kMips64F32x4Ne) \
2988 V(F32x4Lt, kMips64F32x4Lt) \
2989 V(F32x4Le, kMips64F32x4Le) \
2990 V(I32x4Add, kMips64I32x4Add) \
2991 V(I32x4Sub, kMips64I32x4Sub) \
2992 V(I32x4Mul, kMips64I32x4Mul) \
2993 V(I32x4MaxS, kMips64I32x4MaxS) \
2994 V(I32x4MinS, kMips64I32x4MinS) \
2995 V(I32x4MaxU, kMips64I32x4MaxU) \
2996 V(I32x4MinU, kMips64I32x4MinU) \
2997 V(I32x4Eq, kMips64I32x4Eq) \
2998 V(I32x4Ne, kMips64I32x4Ne) \
2999 V(I32x4GtS, kMips64I32x4GtS) \
3000 V(I32x4GeS, kMips64I32x4GeS) \
3001 V(I32x4GtU, kMips64I32x4GtU) \
3002 V(I32x4GeU, kMips64I32x4GeU) \
3003 V(I32x4DotI16x8S, kMips64I32x4DotI16x8S) \
3004 V(I16x8Add, kMips64I16x8Add) \
3005 V(I16x8AddSatS, kMips64I16x8AddSatS) \
3006 V(I16x8AddSatU, kMips64I16x8AddSatU) \
3007 V(I16x8Sub, kMips64I16x8Sub) \
3008 V(I16x8SubSatS, kMips64I16x8SubSatS) \
3009 V(I16x8SubSatU, kMips64I16x8SubSatU) \
3010 V(I16x8Mul, kMips64I16x8Mul) \
3011 V(I16x8MaxS, kMips64I16x8MaxS) \
3012 V(I16x8MinS, kMips64I16x8MinS) \
3013 V(I16x8MaxU, kMips64I16x8MaxU) \
3014 V(I16x8MinU, kMips64I16x8MinU) \
3015 V(I16x8Eq, kMips64I16x8Eq) \
3016 V(I16x8Ne, kMips64I16x8Ne) \
3017 V(I16x8GtS, kMips64I16x8GtS) \
3018 V(I16x8GeS, kMips64I16x8GeS) \
3019 V(I16x8GtU, kMips64I16x8GtU) \
3020 V(I16x8GeU, kMips64I16x8GeU) \
3021 V(I16x8RoundingAverageU, kMips64I16x8RoundingAverageU) \
3022 V(I16x8SConvertI32x4, kMips64I16x8SConvertI32x4) \
3023 V(I16x8UConvertI32x4, kMips64I16x8UConvertI32x4) \
3024 V(I16x8Q15MulRSatS, kMips64I16x8Q15MulRSatS) \
3025 V(I8x16Add, kMips64I8x16Add) \
3026 V(I8x16AddSatS, kMips64I8x16AddSatS) \
3027 V(I8x16AddSatU, kMips64I8x16AddSatU) \
3028 V(I8x16Sub, kMips64I8x16Sub) \
3029 V(I8x16SubSatS, kMips64I8x16SubSatS) \
3030 V(I8x16SubSatU, kMips64I8x16SubSatU) \
3031 V(I8x16MaxS, kMips64I8x16MaxS) \
3032 V(I8x16MinS, kMips64I8x16MinS) \
3033 V(I8x16MaxU, kMips64I8x16MaxU) \
3034 V(I8x16MinU, kMips64I8x16MinU) \
3035 V(I8x16Eq, kMips64I8x16Eq) \
3036 V(I8x16Ne, kMips64I8x16Ne) \
3037 V(I8x16GtS, kMips64I8x16GtS) \
3038 V(I8x16GeS, kMips64I8x16GeS) \
3039 V(I8x16GtU, kMips64I8x16GtU) \
3040 V(I8x16GeU, kMips64I8x16GeU) \
3041 V(I8x16RoundingAverageU, kMips64I8x16RoundingAverageU) \
3042 V(I8x16SConvertI16x8, kMips64I8x16SConvertI16x8) \
3043 V(I8x16UConvertI16x8, kMips64I8x16UConvertI16x8) \
3044 V(S128And, kMips64S128And) \
3045 V(S128Or, kMips64S128Or) \
3046 V(S128Xor, kMips64S128Xor) \
3047 V(S128AndNot, kMips64S128AndNot)
3048
VisitS128Const(Node * node)3049 void InstructionSelector::VisitS128Const(Node* node) {
3050 Mips64OperandGenerator g(this);
3051 static const int kUint32Immediates = kSimd128Size / sizeof(uint32_t);
3052 uint32_t val[kUint32Immediates];
3053 memcpy(val, S128ImmediateParameterOf(node->op()).data(), kSimd128Size);
3054 // If all bytes are zeros or ones, avoid emitting code for generic constants
3055 bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
3056 bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
3057 val[2] == UINT32_MAX && val[3] == UINT32_MAX;
3058 InstructionOperand dst = g.DefineAsRegister(node);
3059 if (all_zeros) {
3060 Emit(kMips64S128Zero, dst);
3061 } else if (all_ones) {
3062 Emit(kMips64S128AllOnes, dst);
3063 } else {
3064 Emit(kMips64S128Const, dst, g.UseImmediate(val[0]), g.UseImmediate(val[1]),
3065 g.UseImmediate(val[2]), g.UseImmediate(val[3]));
3066 }
3067 }
3068
VisitS128Zero(Node * node)3069 void InstructionSelector::VisitS128Zero(Node* node) {
3070 Mips64OperandGenerator g(this);
3071 Emit(kMips64S128Zero, g.DefineAsRegister(node));
3072 }
3073
3074 #define SIMD_VISIT_SPLAT(Type) \
3075 void InstructionSelector::Visit##Type##Splat(Node* node) { \
3076 VisitRR(this, kMips64##Type##Splat, node); \
3077 }
3078 SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
3079 #undef SIMD_VISIT_SPLAT
3080
3081 #define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
3082 void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
3083 VisitRRI(this, kMips64##Type##ExtractLane##Sign, node); \
3084 }
3085 SIMD_VISIT_EXTRACT_LANE(F64x2, )
3086 SIMD_VISIT_EXTRACT_LANE(F32x4, )
3087 SIMD_VISIT_EXTRACT_LANE(I64x2, )
3088 SIMD_VISIT_EXTRACT_LANE(I32x4, )
SIMD_VISIT_EXTRACT_LANE(I16x8,U)3089 SIMD_VISIT_EXTRACT_LANE(I16x8, U)
3090 SIMD_VISIT_EXTRACT_LANE(I16x8, S)
3091 SIMD_VISIT_EXTRACT_LANE(I8x16, U)
3092 SIMD_VISIT_EXTRACT_LANE(I8x16, S)
3093 #undef SIMD_VISIT_EXTRACT_LANE
3094
3095 #define SIMD_VISIT_REPLACE_LANE(Type) \
3096 void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
3097 VisitRRIR(this, kMips64##Type##ReplaceLane, node); \
3098 }
3099 SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
3100 #undef SIMD_VISIT_REPLACE_LANE
3101
3102 #define SIMD_VISIT_UNOP(Name, instruction) \
3103 void InstructionSelector::Visit##Name(Node* node) { \
3104 VisitRR(this, instruction, node); \
3105 }
3106 SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
3107 #undef SIMD_VISIT_UNOP
3108
3109 #define SIMD_VISIT_SHIFT_OP(Name) \
3110 void InstructionSelector::Visit##Name(Node* node) { \
3111 VisitSimdShift(this, kMips64##Name, node); \
3112 }
3113 SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
3114 #undef SIMD_VISIT_SHIFT_OP
3115
3116 #define SIMD_VISIT_BINOP(Name, instruction) \
3117 void InstructionSelector::Visit##Name(Node* node) { \
3118 VisitRRR(this, instruction, node); \
3119 }
3120 SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
3121 #undef SIMD_VISIT_BINOP
3122
3123 void InstructionSelector::VisitS128Select(Node* node) {
3124 VisitRRRR(this, kMips64S128Select, node);
3125 }
3126
3127 #if V8_ENABLE_WEBASSEMBLY
3128 namespace {
3129
3130 struct ShuffleEntry {
3131 uint8_t shuffle[kSimd128Size];
3132 ArchOpcode opcode;
3133 };
3134
3135 static const ShuffleEntry arch_shuffles[] = {
3136 {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
3137 kMips64S32x4InterleaveRight},
3138 {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
3139 kMips64S32x4InterleaveLeft},
3140 {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
3141 kMips64S32x4PackEven},
3142 {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
3143 kMips64S32x4PackOdd},
3144 {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
3145 kMips64S32x4InterleaveEven},
3146 {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
3147 kMips64S32x4InterleaveOdd},
3148
3149 {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
3150 kMips64S16x8InterleaveRight},
3151 {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
3152 kMips64S16x8InterleaveLeft},
3153 {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
3154 kMips64S16x8PackEven},
3155 {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
3156 kMips64S16x8PackOdd},
3157 {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
3158 kMips64S16x8InterleaveEven},
3159 {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
3160 kMips64S16x8InterleaveOdd},
3161 {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
3162 kMips64S16x4Reverse},
3163 {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
3164 kMips64S16x2Reverse},
3165
3166 {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
3167 kMips64S8x16InterleaveRight},
3168 {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
3169 kMips64S8x16InterleaveLeft},
3170 {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
3171 kMips64S8x16PackEven},
3172 {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
3173 kMips64S8x16PackOdd},
3174 {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
3175 kMips64S8x16InterleaveEven},
3176 {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
3177 kMips64S8x16InterleaveOdd},
3178 {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
3179 kMips64S8x8Reverse},
3180 {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
3181 kMips64S8x4Reverse},
3182 {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
3183 kMips64S8x2Reverse}};
3184
TryMatchArchShuffle(const uint8_t * shuffle,const ShuffleEntry * table,size_t num_entries,bool is_swizzle,ArchOpcode * opcode)3185 bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
3186 size_t num_entries, bool is_swizzle,
3187 ArchOpcode* opcode) {
3188 uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
3189 for (size_t i = 0; i < num_entries; ++i) {
3190 const ShuffleEntry& entry = table[i];
3191 int j = 0;
3192 for (; j < kSimd128Size; ++j) {
3193 if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
3194 break;
3195 }
3196 }
3197 if (j == kSimd128Size) {
3198 *opcode = entry.opcode;
3199 return true;
3200 }
3201 }
3202 return false;
3203 }
3204
3205 } // namespace
3206
VisitI8x16Shuffle(Node * node)3207 void InstructionSelector::VisitI8x16Shuffle(Node* node) {
3208 uint8_t shuffle[kSimd128Size];
3209 bool is_swizzle;
3210 CanonicalizeShuffle(node, shuffle, &is_swizzle);
3211 uint8_t shuffle32x4[4];
3212 ArchOpcode opcode;
3213 if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
3214 is_swizzle, &opcode)) {
3215 VisitRRR(this, opcode, node);
3216 return;
3217 }
3218 Node* input0 = node->InputAt(0);
3219 Node* input1 = node->InputAt(1);
3220 uint8_t offset;
3221 Mips64OperandGenerator g(this);
3222 if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
3223 Emit(kMips64S8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
3224 g.UseRegister(input0), g.UseImmediate(offset));
3225 return;
3226 }
3227 if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
3228 Emit(kMips64S32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
3229 g.UseRegister(input1),
3230 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
3231 return;
3232 }
3233 Emit(kMips64I8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
3234 g.UseRegister(input1),
3235 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
3236 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
3237 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)),
3238 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12)));
3239 }
3240 #else
VisitI8x16Shuffle(Node * node)3241 void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
3242 #endif // V8_ENABLE_WEBASSEMBLY
3243
VisitI8x16Swizzle(Node * node)3244 void InstructionSelector::VisitI8x16Swizzle(Node* node) {
3245 Mips64OperandGenerator g(this);
3246 InstructionOperand temps[] = {g.TempSimd128Register()};
3247 // We don't want input 0 or input 1 to be the same as output, since we will
3248 // modify output before do the calculation.
3249 Emit(kMips64I8x16Swizzle, g.DefineAsRegister(node),
3250 g.UseUniqueRegister(node->InputAt(0)),
3251 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
3252 }
3253
VisitSignExtendWord8ToInt32(Node * node)3254 void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
3255 Mips64OperandGenerator g(this);
3256 Emit(kMips64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
3257 }
3258
VisitSignExtendWord16ToInt32(Node * node)3259 void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
3260 Mips64OperandGenerator g(this);
3261 Emit(kMips64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
3262 }
3263
VisitSignExtendWord8ToInt64(Node * node)3264 void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
3265 Mips64OperandGenerator g(this);
3266 Emit(kMips64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
3267 }
3268
VisitSignExtendWord16ToInt64(Node * node)3269 void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
3270 Mips64OperandGenerator g(this);
3271 Emit(kMips64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
3272 }
3273
VisitSignExtendWord32ToInt64(Node * node)3274 void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
3275 Mips64OperandGenerator g(this);
3276 Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
3277 g.TempImmediate(0));
3278 }
3279
VisitF32x4Pmin(Node * node)3280 void InstructionSelector::VisitF32x4Pmin(Node* node) {
3281 VisitUniqueRRR(this, kMips64F32x4Pmin, node);
3282 }
3283
VisitF32x4Pmax(Node * node)3284 void InstructionSelector::VisitF32x4Pmax(Node* node) {
3285 VisitUniqueRRR(this, kMips64F32x4Pmax, node);
3286 }
3287
VisitF64x2Pmin(Node * node)3288 void InstructionSelector::VisitF64x2Pmin(Node* node) {
3289 VisitUniqueRRR(this, kMips64F64x2Pmin, node);
3290 }
3291
VisitF64x2Pmax(Node * node)3292 void InstructionSelector::VisitF64x2Pmax(Node* node) {
3293 VisitUniqueRRR(this, kMips64F64x2Pmax, node);
3294 }
3295
3296 #define VISIT_EXT_MUL(OPCODE1, OPCODE2, TYPE) \
3297 void InstructionSelector::Visit##OPCODE1##ExtMulLow##OPCODE2(Node* node) { \
3298 Mips64OperandGenerator g(this); \
3299 Emit(kMips64ExtMulLow | MiscField::encode(TYPE), g.DefineAsRegister(node), \
3300 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \
3301 } \
3302 void InstructionSelector::Visit##OPCODE1##ExtMulHigh##OPCODE2(Node* node) { \
3303 Mips64OperandGenerator g(this); \
3304 Emit(kMips64ExtMulHigh | MiscField::encode(TYPE), \
3305 g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), \
3306 g.UseRegister(node->InputAt(1))); \
3307 }
3308
VISIT_EXT_MUL(I64x2,I32x4S,MSAS32)3309 VISIT_EXT_MUL(I64x2, I32x4S, MSAS32)
3310 VISIT_EXT_MUL(I64x2, I32x4U, MSAU32)
3311 VISIT_EXT_MUL(I32x4, I16x8S, MSAS16)
3312 VISIT_EXT_MUL(I32x4, I16x8U, MSAU16)
3313 VISIT_EXT_MUL(I16x8, I8x16S, MSAS8)
3314 VISIT_EXT_MUL(I16x8, I8x16U, MSAU8)
3315 #undef VISIT_EXT_MUL
3316
3317 #define VISIT_EXTADD_PAIRWISE(OPCODE, TYPE) \
3318 void InstructionSelector::Visit##OPCODE(Node* node) { \
3319 Mips64OperandGenerator g(this); \
3320 Emit(kMips64ExtAddPairwise | MiscField::encode(TYPE), \
3321 g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); \
3322 }
3323 VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16S, MSAS8)
3324 VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16U, MSAU8)
3325 VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8S, MSAS16)
3326 VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8U, MSAU16)
3327 #undef VISIT_EXTADD_PAIRWISE
3328
3329 void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
3330 int first_input_index,
3331 Node* node) {
3332 UNREACHABLE();
3333 }
3334
3335 // static
3336 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()3337 InstructionSelector::SupportedMachineOperatorFlags() {
3338 MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
3339 return flags | MachineOperatorBuilder::kWord32Ctz |
3340 MachineOperatorBuilder::kWord64Ctz |
3341 MachineOperatorBuilder::kWord32Popcnt |
3342 MachineOperatorBuilder::kWord64Popcnt |
3343 MachineOperatorBuilder::kWord32ShiftIsSafe |
3344 MachineOperatorBuilder::kInt32DivIsSafe |
3345 MachineOperatorBuilder::kUint32DivIsSafe |
3346 MachineOperatorBuilder::kFloat64RoundDown |
3347 MachineOperatorBuilder::kFloat32RoundDown |
3348 MachineOperatorBuilder::kFloat64RoundUp |
3349 MachineOperatorBuilder::kFloat32RoundUp |
3350 MachineOperatorBuilder::kFloat64RoundTruncate |
3351 MachineOperatorBuilder::kFloat32RoundTruncate |
3352 MachineOperatorBuilder::kFloat64RoundTiesEven |
3353 MachineOperatorBuilder::kFloat32RoundTiesEven;
3354 }
3355
3356 // static
3357 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()3358 InstructionSelector::AlignmentRequirements() {
3359 if (kArchVariant == kMips64r6) {
3360 return MachineOperatorBuilder::AlignmentRequirements::
3361 FullUnalignedAccessSupport();
3362 } else {
3363 DCHECK_EQ(kMips64r2, kArchVariant);
3364 return MachineOperatorBuilder::AlignmentRequirements::
3365 NoUnalignedAccessSupport();
3366 }
3367 }
3368
3369 #undef SIMD_BINOP_LIST
3370 #undef SIMD_SHIFT_OP_LIST
3371 #undef SIMD_UNOP_LIST
3372 #undef SIMD_TYPE_LIST
3373 #undef TRACE_UNIMPL
3374 #undef TRACE
3375
3376 } // namespace compiler
3377 } // namespace internal
3378 } // namespace v8
3379