1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/base/bits.h"
6 #include "src/compiler/backend/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h"
8 #include "src/compiler/node-properties.h"
9
10 namespace v8 {
11 namespace internal {
12 namespace compiler {
13
14 #define TRACE_UNIMPL() \
15 PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
16
17 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
18
19 // Adds Mips-specific methods for generating InstructionOperands.
20 class Mips64OperandGenerator final : public OperandGenerator {
21 public:
Mips64OperandGenerator(InstructionSelector * selector)22 explicit Mips64OperandGenerator(InstructionSelector* selector)
23 : OperandGenerator(selector) {}
24
UseOperand(Node * node,InstructionCode opcode)25 InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
26 if (CanBeImmediate(node, opcode)) {
27 return UseImmediate(node);
28 }
29 return UseRegister(node);
30 }
31
32 // Use the zero register if the node has the immediate value zero, otherwise
33 // assign a register.
UseRegisterOrImmediateZero(Node * node)34 InstructionOperand UseRegisterOrImmediateZero(Node* node) {
35 if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
36 (IsFloatConstant(node) &&
37 (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
38 return UseImmediate(node);
39 }
40 return UseRegister(node);
41 }
42
IsIntegerConstant(Node * node)43 bool IsIntegerConstant(Node* node) {
44 return (node->opcode() == IrOpcode::kInt32Constant) ||
45 (node->opcode() == IrOpcode::kInt64Constant);
46 }
47
GetIntegerConstantValue(Node * node)48 int64_t GetIntegerConstantValue(Node* node) {
49 if (node->opcode() == IrOpcode::kInt32Constant) {
50 return OpParameter<int32_t>(node->op());
51 }
52 DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
53 return OpParameter<int64_t>(node->op());
54 }
55
IsFloatConstant(Node * node)56 bool IsFloatConstant(Node* node) {
57 return (node->opcode() == IrOpcode::kFloat32Constant) ||
58 (node->opcode() == IrOpcode::kFloat64Constant);
59 }
60
GetFloatConstantValue(Node * node)61 double GetFloatConstantValue(Node* node) {
62 if (node->opcode() == IrOpcode::kFloat32Constant) {
63 return OpParameter<float>(node->op());
64 }
65 DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
66 return OpParameter<double>(node->op());
67 }
68
CanBeImmediate(Node * node,InstructionCode mode)69 bool CanBeImmediate(Node* node, InstructionCode mode) {
70 return IsIntegerConstant(node) &&
71 CanBeImmediate(GetIntegerConstantValue(node), mode);
72 }
73
CanBeImmediate(int64_t value,InstructionCode opcode)74 bool CanBeImmediate(int64_t value, InstructionCode opcode) {
75 switch (ArchOpcodeField::decode(opcode)) {
76 case kMips64Shl:
77 case kMips64Sar:
78 case kMips64Shr:
79 return is_uint5(value);
80 case kMips64Dshl:
81 case kMips64Dsar:
82 case kMips64Dshr:
83 return is_uint6(value);
84 case kMips64Add:
85 case kMips64And32:
86 case kMips64And:
87 case kMips64Dadd:
88 case kMips64Or32:
89 case kMips64Or:
90 case kMips64Tst:
91 case kMips64Xor:
92 return is_uint16(value);
93 case kMips64Lb:
94 case kMips64Lbu:
95 case kMips64Sb:
96 case kMips64Lh:
97 case kMips64Lhu:
98 case kMips64Sh:
99 case kMips64Lw:
100 case kMips64Sw:
101 case kMips64Ld:
102 case kMips64Sd:
103 case kMips64Lwc1:
104 case kMips64Swc1:
105 case kMips64Ldc1:
106 case kMips64Sdc1:
107 return is_int32(value);
108 default:
109 return is_int16(value);
110 }
111 }
112
113 private:
ImmediateFitsAddrMode1Instruction(int32_t imm) const114 bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
115 TRACE_UNIMPL();
116 return false;
117 }
118 };
119
VisitRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)120 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
121 Node* node) {
122 Mips64OperandGenerator g(selector);
123 selector->Emit(opcode, g.DefineAsRegister(node),
124 g.UseRegister(node->InputAt(0)));
125 }
126
VisitRRI(InstructionSelector * selector,ArchOpcode opcode,Node * node)127 static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
128 Node* node) {
129 Mips64OperandGenerator g(selector);
130 int32_t imm = OpParameter<int32_t>(node->op());
131 selector->Emit(opcode, g.DefineAsRegister(node),
132 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
133 }
134
VisitSimdShift(InstructionSelector * selector,ArchOpcode opcode,Node * node)135 static void VisitSimdShift(InstructionSelector* selector, ArchOpcode opcode,
136 Node* node) {
137 Mips64OperandGenerator g(selector);
138 if (g.IsIntegerConstant(node->InputAt(1))) {
139 selector->Emit(opcode, g.DefineAsRegister(node),
140 g.UseRegister(node->InputAt(0)),
141 g.UseImmediate(node->InputAt(1)));
142 } else {
143 selector->Emit(opcode, g.DefineAsRegister(node),
144 g.UseRegister(node->InputAt(0)),
145 g.UseRegister(node->InputAt(1)));
146 }
147 }
148
VisitRRIR(InstructionSelector * selector,ArchOpcode opcode,Node * node)149 static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
150 Node* node) {
151 Mips64OperandGenerator g(selector);
152 int32_t imm = OpParameter<int32_t>(node->op());
153 selector->Emit(opcode, g.DefineAsRegister(node),
154 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
155 g.UseRegister(node->InputAt(1)));
156 }
157
VisitRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)158 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
159 Node* node) {
160 Mips64OperandGenerator g(selector);
161 selector->Emit(opcode, g.DefineAsRegister(node),
162 g.UseRegister(node->InputAt(0)),
163 g.UseRegister(node->InputAt(1)));
164 }
165
VisitUniqueRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)166 static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode,
167 Node* node) {
168 Mips64OperandGenerator g(selector);
169 selector->Emit(opcode, g.DefineAsRegister(node),
170 g.UseUniqueRegister(node->InputAt(0)),
171 g.UseUniqueRegister(node->InputAt(1)));
172 }
173
VisitRRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)174 void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
175 Mips64OperandGenerator g(selector);
176 selector->Emit(
177 opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
178 g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
179 }
180
VisitRRO(InstructionSelector * selector,ArchOpcode opcode,Node * node)181 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
182 Node* node) {
183 Mips64OperandGenerator g(selector);
184 selector->Emit(opcode, g.DefineAsRegister(node),
185 g.UseRegister(node->InputAt(0)),
186 g.UseOperand(node->InputAt(1), opcode));
187 }
188
189 struct ExtendingLoadMatcher {
ExtendingLoadMatcherv8::internal::compiler::ExtendingLoadMatcher190 ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
191 : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
192 Initialize(node);
193 }
194
Matchesv8::internal::compiler::ExtendingLoadMatcher195 bool Matches() const { return matches_; }
196
basev8::internal::compiler::ExtendingLoadMatcher197 Node* base() const {
198 DCHECK(Matches());
199 return base_;
200 }
immediatev8::internal::compiler::ExtendingLoadMatcher201 int64_t immediate() const {
202 DCHECK(Matches());
203 return immediate_;
204 }
opcodev8::internal::compiler::ExtendingLoadMatcher205 ArchOpcode opcode() const {
206 DCHECK(Matches());
207 return opcode_;
208 }
209
210 private:
211 bool matches_;
212 InstructionSelector* selector_;
213 Node* base_;
214 int64_t immediate_;
215 ArchOpcode opcode_;
216
Initializev8::internal::compiler::ExtendingLoadMatcher217 void Initialize(Node* node) {
218 Int64BinopMatcher m(node);
219 // When loading a 64-bit value and shifting by 32, we should
220 // just load and sign-extend the interesting 4 bytes instead.
221 // This happens, for example, when we're loading and untagging SMIs.
222 DCHECK(m.IsWord64Sar());
223 if (m.left().IsLoad() && m.right().Is(32) &&
224 selector_->CanCover(m.node(), m.left().node())) {
225 DCHECK_EQ(selector_->GetEffectLevel(node),
226 selector_->GetEffectLevel(m.left().node()));
227 MachineRepresentation rep =
228 LoadRepresentationOf(m.left().node()->op()).representation();
229 DCHECK_EQ(3, ElementSizeLog2Of(rep));
230 if (rep != MachineRepresentation::kTaggedSigned &&
231 rep != MachineRepresentation::kTaggedPointer &&
232 rep != MachineRepresentation::kTagged &&
233 rep != MachineRepresentation::kWord64) {
234 return;
235 }
236
237 Mips64OperandGenerator g(selector_);
238 Node* load = m.left().node();
239 Node* offset = load->InputAt(1);
240 base_ = load->InputAt(0);
241 opcode_ = kMips64Lw;
242 if (g.CanBeImmediate(offset, opcode_)) {
243 #if defined(V8_TARGET_LITTLE_ENDIAN)
244 immediate_ = g.GetIntegerConstantValue(offset) + 4;
245 #elif defined(V8_TARGET_BIG_ENDIAN)
246 immediate_ = g.GetIntegerConstantValue(offset);
247 #endif
248 matches_ = g.CanBeImmediate(immediate_, kMips64Lw);
249 }
250 }
251 }
252 };
253
TryEmitExtendingLoad(InstructionSelector * selector,Node * node,Node * output_node)254 bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node,
255 Node* output_node) {
256 ExtendingLoadMatcher m(node, selector);
257 Mips64OperandGenerator g(selector);
258 if (m.Matches()) {
259 InstructionOperand inputs[2];
260 inputs[0] = g.UseRegister(m.base());
261 InstructionCode opcode =
262 m.opcode() | AddressingModeField::encode(kMode_MRI);
263 DCHECK(is_int32(m.immediate()));
264 inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
265 InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
266 selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
267 inputs);
268 return true;
269 }
270 return false;
271 }
272
TryMatchImmediate(InstructionSelector * selector,InstructionCode * opcode_return,Node * node,size_t * input_count_return,InstructionOperand * inputs)273 bool TryMatchImmediate(InstructionSelector* selector,
274 InstructionCode* opcode_return, Node* node,
275 size_t* input_count_return, InstructionOperand* inputs) {
276 Mips64OperandGenerator g(selector);
277 if (g.CanBeImmediate(node, *opcode_return)) {
278 *opcode_return |= AddressingModeField::encode(kMode_MRI);
279 inputs[0] = g.UseImmediate(node);
280 *input_count_return = 1;
281 return true;
282 }
283 return false;
284 }
285
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode,FlagsContinuation * cont)286 static void VisitBinop(InstructionSelector* selector, Node* node,
287 InstructionCode opcode, bool has_reverse_opcode,
288 InstructionCode reverse_opcode,
289 FlagsContinuation* cont) {
290 Mips64OperandGenerator g(selector);
291 Int32BinopMatcher m(node);
292 InstructionOperand inputs[2];
293 size_t input_count = 0;
294 InstructionOperand outputs[1];
295 size_t output_count = 0;
296
297 if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
298 &inputs[1])) {
299 inputs[0] = g.UseRegister(m.left().node());
300 input_count++;
301 } else if (has_reverse_opcode &&
302 TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
303 &input_count, &inputs[1])) {
304 inputs[0] = g.UseRegister(m.right().node());
305 opcode = reverse_opcode;
306 input_count++;
307 } else {
308 inputs[input_count++] = g.UseRegister(m.left().node());
309 inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
310 }
311
312 if (cont->IsDeoptimize()) {
313 // If we can deoptimize as a result of the binop, we need to make sure that
314 // the deopt inputs are not overwritten by the binop result. One way
315 // to achieve that is to declare the output register as same-as-first.
316 outputs[output_count++] = g.DefineSameAsFirst(node);
317 } else {
318 outputs[output_count++] = g.DefineAsRegister(node);
319 }
320
321 DCHECK_NE(0u, input_count);
322 DCHECK_EQ(1u, output_count);
323 DCHECK_GE(arraysize(inputs), input_count);
324 DCHECK_GE(arraysize(outputs), output_count);
325
326 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
327 inputs, cont);
328 }
329
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode)330 static void VisitBinop(InstructionSelector* selector, Node* node,
331 InstructionCode opcode, bool has_reverse_opcode,
332 InstructionCode reverse_opcode) {
333 FlagsContinuation cont;
334 VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
335 }
336
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)337 static void VisitBinop(InstructionSelector* selector, Node* node,
338 InstructionCode opcode, FlagsContinuation* cont) {
339 VisitBinop(selector, node, opcode, false, kArchNop, cont);
340 }
341
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode)342 static void VisitBinop(InstructionSelector* selector, Node* node,
343 InstructionCode opcode) {
344 VisitBinop(selector, node, opcode, false, kArchNop);
345 }
346
VisitStackSlot(Node * node)347 void InstructionSelector::VisitStackSlot(Node* node) {
348 StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
349 int alignment = rep.alignment();
350 int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
351 OperandGenerator g(this);
352
353 Emit(kArchStackSlot, g.DefineAsRegister(node),
354 sequence()->AddImmediate(Constant(slot)),
355 sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
356 }
357
VisitAbortCSAAssert(Node * node)358 void InstructionSelector::VisitAbortCSAAssert(Node* node) {
359 Mips64OperandGenerator g(this);
360 Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
361 }
362
EmitLoad(InstructionSelector * selector,Node * node,InstructionCode opcode,Node * output=nullptr)363 void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
364 Node* output = nullptr) {
365 Mips64OperandGenerator g(selector);
366 Node* base = node->InputAt(0);
367 Node* index = node->InputAt(1);
368
369 if (g.CanBeImmediate(index, opcode)) {
370 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
371 g.DefineAsRegister(output == nullptr ? node : output),
372 g.UseRegister(base), g.UseImmediate(index));
373 } else {
374 InstructionOperand addr_reg = g.TempRegister();
375 selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
376 addr_reg, g.UseRegister(index), g.UseRegister(base));
377 // Emit desired load opcode, using temp addr_reg.
378 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
379 g.DefineAsRegister(output == nullptr ? node : output),
380 addr_reg, g.TempImmediate(0));
381 }
382 }
383
VisitLoadTransform(Node * node)384 void InstructionSelector::VisitLoadTransform(Node* node) {
385 LoadTransformParameters params = LoadTransformParametersOf(node->op());
386
387 InstructionCode opcode = kArchNop;
388 switch (params.transformation) {
389 case LoadTransformation::kS128Load8Splat:
390 opcode = kMips64S128Load8Splat;
391 break;
392 case LoadTransformation::kS128Load16Splat:
393 opcode = kMips64S128Load16Splat;
394 break;
395 case LoadTransformation::kS128Load32Splat:
396 opcode = kMips64S128Load32Splat;
397 break;
398 case LoadTransformation::kS128Load64Splat:
399 opcode = kMips64S128Load64Splat;
400 break;
401 case LoadTransformation::kS128Load8x8S:
402 opcode = kMips64S128Load8x8S;
403 break;
404 case LoadTransformation::kS128Load8x8U:
405 opcode = kMips64S128Load8x8U;
406 break;
407 case LoadTransformation::kS128Load16x4S:
408 opcode = kMips64S128Load16x4S;
409 break;
410 case LoadTransformation::kS128Load16x4U:
411 opcode = kMips64S128Load16x4U;
412 break;
413 case LoadTransformation::kS128Load32x2S:
414 opcode = kMips64S128Load32x2S;
415 break;
416 case LoadTransformation::kS128Load32x2U:
417 opcode = kMips64S128Load32x2U;
418 break;
419 case LoadTransformation::kS128Load32Zero:
420 opcode = kMips64S128Load32Zero;
421 break;
422 case LoadTransformation::kS128Load64Zero:
423 opcode = kMips64S128Load64Zero;
424 break;
425 default:
426 UNIMPLEMENTED();
427 }
428
429 EmitLoad(this, node, opcode);
430 }
431
VisitLoad(Node * node)432 void InstructionSelector::VisitLoad(Node* node) {
433 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
434
435 InstructionCode opcode = kArchNop;
436 switch (load_rep.representation()) {
437 case MachineRepresentation::kFloat32:
438 opcode = kMips64Lwc1;
439 break;
440 case MachineRepresentation::kFloat64:
441 opcode = kMips64Ldc1;
442 break;
443 case MachineRepresentation::kBit: // Fall through.
444 case MachineRepresentation::kWord8:
445 opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
446 break;
447 case MachineRepresentation::kWord16:
448 opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
449 break;
450 case MachineRepresentation::kWord32:
451 opcode = load_rep.IsUnsigned() ? kMips64Lwu : kMips64Lw;
452 break;
453 case MachineRepresentation::kTaggedSigned: // Fall through.
454 case MachineRepresentation::kTaggedPointer: // Fall through.
455 case MachineRepresentation::kTagged: // Fall through.
456 case MachineRepresentation::kWord64:
457 opcode = kMips64Ld;
458 break;
459 case MachineRepresentation::kSimd128:
460 opcode = kMips64MsaLd;
461 break;
462 case MachineRepresentation::kCompressedPointer: // Fall through.
463 case MachineRepresentation::kCompressed: // Fall through.
464 case MachineRepresentation::kNone:
465 UNREACHABLE();
466 }
467 if (node->opcode() == IrOpcode::kPoisonedLoad) {
468 CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
469 opcode |= MiscField::encode(kMemoryAccessPoisoned);
470 }
471
472 EmitLoad(this, node, opcode);
473 }
474
VisitPoisonedLoad(Node * node)475 void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
476
VisitProtectedLoad(Node * node)477 void InstructionSelector::VisitProtectedLoad(Node* node) {
478 // TODO(eholk)
479 UNIMPLEMENTED();
480 }
481
VisitStore(Node * node)482 void InstructionSelector::VisitStore(Node* node) {
483 Mips64OperandGenerator g(this);
484 Node* base = node->InputAt(0);
485 Node* index = node->InputAt(1);
486 Node* value = node->InputAt(2);
487
488 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
489 WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
490 MachineRepresentation rep = store_rep.representation();
491
492 if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
493 write_barrier_kind = kFullWriteBarrier;
494 }
495
496 // TODO(mips): I guess this could be done in a better way.
497 if (write_barrier_kind != kNoWriteBarrier &&
498 V8_LIKELY(!FLAG_disable_write_barriers)) {
499 DCHECK(CanBeTaggedPointer(rep));
500 InstructionOperand inputs[3];
501 size_t input_count = 0;
502 inputs[input_count++] = g.UseUniqueRegister(base);
503 inputs[input_count++] = g.UseUniqueRegister(index);
504 inputs[input_count++] = g.UseUniqueRegister(value);
505 RecordWriteMode record_write_mode =
506 WriteBarrierKindToRecordWriteMode(write_barrier_kind);
507 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
508 size_t const temp_count = arraysize(temps);
509 InstructionCode code = kArchStoreWithWriteBarrier;
510 code |= MiscField::encode(static_cast<int>(record_write_mode));
511 Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
512 } else {
513 ArchOpcode opcode;
514 switch (rep) {
515 case MachineRepresentation::kFloat32:
516 opcode = kMips64Swc1;
517 break;
518 case MachineRepresentation::kFloat64:
519 opcode = kMips64Sdc1;
520 break;
521 case MachineRepresentation::kBit: // Fall through.
522 case MachineRepresentation::kWord8:
523 opcode = kMips64Sb;
524 break;
525 case MachineRepresentation::kWord16:
526 opcode = kMips64Sh;
527 break;
528 case MachineRepresentation::kWord32:
529 opcode = kMips64Sw;
530 break;
531 case MachineRepresentation::kTaggedSigned: // Fall through.
532 case MachineRepresentation::kTaggedPointer: // Fall through.
533 case MachineRepresentation::kTagged: // Fall through.
534 case MachineRepresentation::kWord64:
535 opcode = kMips64Sd;
536 break;
537 case MachineRepresentation::kSimd128:
538 opcode = kMips64MsaSt;
539 break;
540 case MachineRepresentation::kCompressedPointer: // Fall through.
541 case MachineRepresentation::kCompressed: // Fall through.
542 case MachineRepresentation::kNone:
543 UNREACHABLE();
544 }
545
546 if (g.CanBeImmediate(index, opcode)) {
547 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
548 g.UseRegister(base), g.UseImmediate(index),
549 g.UseRegisterOrImmediateZero(value));
550 } else {
551 InstructionOperand addr_reg = g.TempRegister();
552 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
553 g.UseRegister(index), g.UseRegister(base));
554 // Emit desired store opcode, using temp addr_reg.
555 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
556 addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
557 }
558 }
559 }
560
VisitProtectedStore(Node * node)561 void InstructionSelector::VisitProtectedStore(Node* node) {
562 // TODO(eholk)
563 UNIMPLEMENTED();
564 }
565
VisitWord32And(Node * node)566 void InstructionSelector::VisitWord32And(Node* node) {
567 Mips64OperandGenerator g(this);
568 Int32BinopMatcher m(node);
569 if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
570 m.right().HasResolvedValue()) {
571 uint32_t mask = m.right().ResolvedValue();
572 uint32_t mask_width = base::bits::CountPopulation(mask);
573 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
574 if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
575 // The mask must be contiguous, and occupy the least-significant bits.
576 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
577
578 // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
579 // significant bits.
580 Int32BinopMatcher mleft(m.left().node());
581 if (mleft.right().HasResolvedValue()) {
582 // Any shift value can match; int32 shifts use `value % 32`.
583 uint32_t lsb = mleft.right().ResolvedValue() & 0x1F;
584
585 // Ext cannot extract bits past the register size, however since
586 // shifting the original value would have introduced some zeros we can
587 // still use Ext with a smaller mask and the remaining bits will be
588 // zeros.
589 if (lsb + mask_width > 32) mask_width = 32 - lsb;
590
591 Emit(kMips64Ext, g.DefineAsRegister(node),
592 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
593 g.TempImmediate(mask_width));
594 return;
595 }
596 // Other cases fall through to the normal And operation.
597 }
598 }
599 if (m.right().HasResolvedValue()) {
600 uint32_t mask = m.right().ResolvedValue();
601 uint32_t shift = base::bits::CountPopulation(~mask);
602 uint32_t msb = base::bits::CountLeadingZeros32(~mask);
603 if (shift != 0 && shift != 32 && msb + shift == 32) {
604 // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
605 // and remove constant loading of inverted mask.
606 Emit(kMips64Ins, g.DefineSameAsFirst(node),
607 g.UseRegister(m.left().node()), g.TempImmediate(0),
608 g.TempImmediate(shift));
609 return;
610 }
611 }
612 VisitBinop(this, node, kMips64And32, true, kMips64And32);
613 }
614
VisitWord64And(Node * node)615 void InstructionSelector::VisitWord64And(Node* node) {
616 Mips64OperandGenerator g(this);
617 Int64BinopMatcher m(node);
618 if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
619 m.right().HasResolvedValue()) {
620 uint64_t mask = m.right().ResolvedValue();
621 uint32_t mask_width = base::bits::CountPopulation(mask);
622 uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
623 if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
624 // The mask must be contiguous, and occupy the least-significant bits.
625 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
626
627 // Select Dext for And(Shr(x, imm), mask) where the mask is in the least
628 // significant bits.
629 Int64BinopMatcher mleft(m.left().node());
630 if (mleft.right().HasResolvedValue()) {
631 // Any shift value can match; int64 shifts use `value % 64`.
632 uint32_t lsb =
633 static_cast<uint32_t>(mleft.right().ResolvedValue() & 0x3F);
634
635 // Dext cannot extract bits past the register size, however since
636 // shifting the original value would have introduced some zeros we can
637 // still use Dext with a smaller mask and the remaining bits will be
638 // zeros.
639 if (lsb + mask_width > 64) mask_width = 64 - lsb;
640
641 if (lsb == 0 && mask_width == 64) {
642 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
643 } else {
644 Emit(kMips64Dext, g.DefineAsRegister(node),
645 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
646 g.TempImmediate(static_cast<int32_t>(mask_width)));
647 }
648 return;
649 }
650 // Other cases fall through to the normal And operation.
651 }
652 }
653 if (m.right().HasResolvedValue()) {
654 uint64_t mask = m.right().ResolvedValue();
655 uint32_t shift = base::bits::CountPopulation(~mask);
656 uint32_t msb = base::bits::CountLeadingZeros64(~mask);
657 if (shift != 0 && shift < 32 && msb + shift == 64) {
658 // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
659 // and remove constant loading of inverted mask. Dins cannot insert bits
660 // past word size, so shifts smaller than 32 are covered.
661 Emit(kMips64Dins, g.DefineSameAsFirst(node),
662 g.UseRegister(m.left().node()), g.TempImmediate(0),
663 g.TempImmediate(shift));
664 return;
665 }
666 }
667 VisitBinop(this, node, kMips64And, true, kMips64And);
668 }
669
VisitWord32Or(Node * node)670 void InstructionSelector::VisitWord32Or(Node* node) {
671 VisitBinop(this, node, kMips64Or32, true, kMips64Or32);
672 }
673
VisitWord64Or(Node * node)674 void InstructionSelector::VisitWord64Or(Node* node) {
675 VisitBinop(this, node, kMips64Or, true, kMips64Or);
676 }
677
VisitWord32Xor(Node * node)678 void InstructionSelector::VisitWord32Xor(Node* node) {
679 Int32BinopMatcher m(node);
680 if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
681 m.right().Is(-1)) {
682 Int32BinopMatcher mleft(m.left().node());
683 if (!mleft.right().HasResolvedValue()) {
684 Mips64OperandGenerator g(this);
685 Emit(kMips64Nor32, g.DefineAsRegister(node),
686 g.UseRegister(mleft.left().node()),
687 g.UseRegister(mleft.right().node()));
688 return;
689 }
690 }
691 if (m.right().Is(-1)) {
692 // Use Nor for bit negation and eliminate constant loading for xori.
693 Mips64OperandGenerator g(this);
694 Emit(kMips64Nor32, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
695 g.TempImmediate(0));
696 return;
697 }
698 VisitBinop(this, node, kMips64Xor32, true, kMips64Xor32);
699 }
700
VisitWord64Xor(Node * node)701 void InstructionSelector::VisitWord64Xor(Node* node) {
702 Int64BinopMatcher m(node);
703 if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
704 m.right().Is(-1)) {
705 Int64BinopMatcher mleft(m.left().node());
706 if (!mleft.right().HasResolvedValue()) {
707 Mips64OperandGenerator g(this);
708 Emit(kMips64Nor, g.DefineAsRegister(node),
709 g.UseRegister(mleft.left().node()),
710 g.UseRegister(mleft.right().node()));
711 return;
712 }
713 }
714 if (m.right().Is(-1)) {
715 // Use Nor for bit negation and eliminate constant loading for xori.
716 Mips64OperandGenerator g(this);
717 Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
718 g.TempImmediate(0));
719 return;
720 }
721 VisitBinop(this, node, kMips64Xor, true, kMips64Xor);
722 }
723
VisitWord32Shl(Node * node)724 void InstructionSelector::VisitWord32Shl(Node* node) {
725 Int32BinopMatcher m(node);
726 if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
727 m.right().IsInRange(1, 31)) {
728 Mips64OperandGenerator g(this);
729 Int32BinopMatcher mleft(m.left().node());
730 // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
731 // contiguous, and the shift immediate non-zero.
732 if (mleft.right().HasResolvedValue()) {
733 uint32_t mask = mleft.right().ResolvedValue();
734 uint32_t mask_width = base::bits::CountPopulation(mask);
735 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
736 if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
737 uint32_t shift = m.right().ResolvedValue();
738 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
739 DCHECK_NE(0u, shift);
740 if ((shift + mask_width) >= 32) {
741 // If the mask is contiguous and reaches or extends beyond the top
742 // bit, only the shift is needed.
743 Emit(kMips64Shl, g.DefineAsRegister(node),
744 g.UseRegister(mleft.left().node()),
745 g.UseImmediate(m.right().node()));
746 return;
747 }
748 }
749 }
750 }
751 VisitRRO(this, kMips64Shl, node);
752 }
753
VisitWord32Shr(Node * node)754 void InstructionSelector::VisitWord32Shr(Node* node) {
755 Int32BinopMatcher m(node);
756 if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
757 uint32_t lsb = m.right().ResolvedValue() & 0x1F;
758 Int32BinopMatcher mleft(m.left().node());
759 if (mleft.right().HasResolvedValue() &&
760 mleft.right().ResolvedValue() != 0) {
761 // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
762 // shifted into the least-significant bits.
763 uint32_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
764 unsigned mask_width = base::bits::CountPopulation(mask);
765 unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
766 if ((mask_msb + mask_width + lsb) == 32) {
767 Mips64OperandGenerator g(this);
768 DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
769 Emit(kMips64Ext, g.DefineAsRegister(node),
770 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
771 g.TempImmediate(mask_width));
772 return;
773 }
774 }
775 }
776 VisitRRO(this, kMips64Shr, node);
777 }
778
VisitWord32Sar(Node * node)779 void InstructionSelector::VisitWord32Sar(Node* node) {
780 Int32BinopMatcher m(node);
781 if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
782 Int32BinopMatcher mleft(m.left().node());
783 if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
784 Mips64OperandGenerator g(this);
785 uint32_t sar = m.right().ResolvedValue();
786 uint32_t shl = mleft.right().ResolvedValue();
787 if ((sar == shl) && (sar == 16)) {
788 Emit(kMips64Seh, g.DefineAsRegister(node),
789 g.UseRegister(mleft.left().node()));
790 return;
791 } else if ((sar == shl) && (sar == 24)) {
792 Emit(kMips64Seb, g.DefineAsRegister(node),
793 g.UseRegister(mleft.left().node()));
794 return;
795 } else if ((sar == shl) && (sar == 32)) {
796 Emit(kMips64Shl, g.DefineAsRegister(node),
797 g.UseRegister(mleft.left().node()), g.TempImmediate(0));
798 return;
799 }
800 }
801 }
802 VisitRRO(this, kMips64Sar, node);
803 }
804
VisitWord64Shl(Node * node)805 void InstructionSelector::VisitWord64Shl(Node* node) {
806 Mips64OperandGenerator g(this);
807 Int64BinopMatcher m(node);
808 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
809 m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
810 // There's no need to sign/zero-extend to 64-bit if we shift out the upper
811 // 32 bits anyway.
812 Emit(kMips64Dshl, g.DefineSameAsFirst(node),
813 g.UseRegister(m.left().node()->InputAt(0)),
814 g.UseImmediate(m.right().node()));
815 return;
816 }
817 if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
818 m.right().IsInRange(1, 63)) {
819 // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is
820 // contiguous, and the shift immediate non-zero.
821 Int64BinopMatcher mleft(m.left().node());
822 if (mleft.right().HasResolvedValue()) {
823 uint64_t mask = mleft.right().ResolvedValue();
824 uint32_t mask_width = base::bits::CountPopulation(mask);
825 uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
826 if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
827 uint64_t shift = m.right().ResolvedValue();
828 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
829 DCHECK_NE(0u, shift);
830
831 if ((shift + mask_width) >= 64) {
832 // If the mask is contiguous and reaches or extends beyond the top
833 // bit, only the shift is needed.
834 Emit(kMips64Dshl, g.DefineAsRegister(node),
835 g.UseRegister(mleft.left().node()),
836 g.UseImmediate(m.right().node()));
837 return;
838 }
839 }
840 }
841 }
842 VisitRRO(this, kMips64Dshl, node);
843 }
844
VisitWord64Shr(Node * node)845 void InstructionSelector::VisitWord64Shr(Node* node) {
846 Int64BinopMatcher m(node);
847 if (m.left().IsWord64And() && m.right().HasResolvedValue()) {
848 uint32_t lsb = m.right().ResolvedValue() & 0x3F;
849 Int64BinopMatcher mleft(m.left().node());
850 if (mleft.right().HasResolvedValue() &&
851 mleft.right().ResolvedValue() != 0) {
852 // Select Dext for Shr(And(x, mask), imm) where the result of the mask is
853 // shifted into the least-significant bits.
854 uint64_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
855 unsigned mask_width = base::bits::CountPopulation(mask);
856 unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
857 if ((mask_msb + mask_width + lsb) == 64) {
858 Mips64OperandGenerator g(this);
859 DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
860 Emit(kMips64Dext, g.DefineAsRegister(node),
861 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
862 g.TempImmediate(mask_width));
863 return;
864 }
865 }
866 }
867 VisitRRO(this, kMips64Dshr, node);
868 }
869
VisitWord64Sar(Node * node)870 void InstructionSelector::VisitWord64Sar(Node* node) {
871 if (TryEmitExtendingLoad(this, node, node)) return;
872 VisitRRO(this, kMips64Dsar, node);
873 }
874
VisitWord32Rol(Node * node)875 void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
876
VisitWord64Rol(Node * node)877 void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); }
878
VisitWord32Ror(Node * node)879 void InstructionSelector::VisitWord32Ror(Node* node) {
880 VisitRRO(this, kMips64Ror, node);
881 }
882
VisitWord32Clz(Node * node)883 void InstructionSelector::VisitWord32Clz(Node* node) {
884 VisitRR(this, kMips64Clz, node);
885 }
886
VisitWord32ReverseBits(Node * node)887 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
888
VisitWord64ReverseBits(Node * node)889 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
890
VisitWord64ReverseBytes(Node * node)891 void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
892 Mips64OperandGenerator g(this);
893 Emit(kMips64ByteSwap64, g.DefineAsRegister(node),
894 g.UseRegister(node->InputAt(0)));
895 }
896
VisitWord32ReverseBytes(Node * node)897 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
898 Mips64OperandGenerator g(this);
899 Emit(kMips64ByteSwap32, g.DefineAsRegister(node),
900 g.UseRegister(node->InputAt(0)));
901 }
902
VisitSimd128ReverseBytes(Node * node)903 void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
904 UNREACHABLE();
905 }
906
VisitWord32Ctz(Node * node)907 void InstructionSelector::VisitWord32Ctz(Node* node) {
908 Mips64OperandGenerator g(this);
909 Emit(kMips64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
910 }
911
VisitWord64Ctz(Node * node)912 void InstructionSelector::VisitWord64Ctz(Node* node) {
913 Mips64OperandGenerator g(this);
914 Emit(kMips64Dctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
915 }
916
VisitWord32Popcnt(Node * node)917 void InstructionSelector::VisitWord32Popcnt(Node* node) {
918 Mips64OperandGenerator g(this);
919 Emit(kMips64Popcnt, g.DefineAsRegister(node),
920 g.UseRegister(node->InputAt(0)));
921 }
922
VisitWord64Popcnt(Node * node)923 void InstructionSelector::VisitWord64Popcnt(Node* node) {
924 Mips64OperandGenerator g(this);
925 Emit(kMips64Dpopcnt, g.DefineAsRegister(node),
926 g.UseRegister(node->InputAt(0)));
927 }
928
VisitWord64Ror(Node * node)929 void InstructionSelector::VisitWord64Ror(Node* node) {
930 VisitRRO(this, kMips64Dror, node);
931 }
932
VisitWord64Clz(Node * node)933 void InstructionSelector::VisitWord64Clz(Node* node) {
934 VisitRR(this, kMips64Dclz, node);
935 }
936
VisitInt32Add(Node * node)937 void InstructionSelector::VisitInt32Add(Node* node) {
938 Mips64OperandGenerator g(this);
939 Int32BinopMatcher m(node);
940
941 if (kArchVariant == kMips64r6) {
942 // Select Lsa for (left + (left_of_right << imm)).
943 if (m.right().opcode() == IrOpcode::kWord32Shl &&
944 CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
945 Int32BinopMatcher mright(m.right().node());
946 if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
947 int32_t shift_value =
948 static_cast<int32_t>(mright.right().ResolvedValue());
949 if (shift_value > 0 && shift_value <= 31) {
950 Emit(kMips64Lsa, g.DefineAsRegister(node),
951 g.UseRegister(m.left().node()),
952 g.UseRegister(mright.left().node()),
953 g.TempImmediate(shift_value));
954 return;
955 }
956 }
957 }
958
959 // Select Lsa for ((left_of_left << imm) + right).
960 if (m.left().opcode() == IrOpcode::kWord32Shl &&
961 CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
962 Int32BinopMatcher mleft(m.left().node());
963 if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
964 int32_t shift_value =
965 static_cast<int32_t>(mleft.right().ResolvedValue());
966 if (shift_value > 0 && shift_value <= 31) {
967 Emit(kMips64Lsa, g.DefineAsRegister(node),
968 g.UseRegister(m.right().node()),
969 g.UseRegister(mleft.left().node()),
970 g.TempImmediate(shift_value));
971 return;
972 }
973 }
974 }
975 }
976
977 VisitBinop(this, node, kMips64Add, true, kMips64Add);
978 }
979
VisitInt64Add(Node * node)980 void InstructionSelector::VisitInt64Add(Node* node) {
981 Mips64OperandGenerator g(this);
982 Int64BinopMatcher m(node);
983
984 if (kArchVariant == kMips64r6) {
985 // Select Dlsa for (left + (left_of_right << imm)).
986 if (m.right().opcode() == IrOpcode::kWord64Shl &&
987 CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
988 Int64BinopMatcher mright(m.right().node());
989 if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
990 int32_t shift_value =
991 static_cast<int32_t>(mright.right().ResolvedValue());
992 if (shift_value > 0 && shift_value <= 31) {
993 Emit(kMips64Dlsa, g.DefineAsRegister(node),
994 g.UseRegister(m.left().node()),
995 g.UseRegister(mright.left().node()),
996 g.TempImmediate(shift_value));
997 return;
998 }
999 }
1000 }
1001
1002 // Select Dlsa for ((left_of_left << imm) + right).
1003 if (m.left().opcode() == IrOpcode::kWord64Shl &&
1004 CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
1005 Int64BinopMatcher mleft(m.left().node());
1006 if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
1007 int32_t shift_value =
1008 static_cast<int32_t>(mleft.right().ResolvedValue());
1009 if (shift_value > 0 && shift_value <= 31) {
1010 Emit(kMips64Dlsa, g.DefineAsRegister(node),
1011 g.UseRegister(m.right().node()),
1012 g.UseRegister(mleft.left().node()),
1013 g.TempImmediate(shift_value));
1014 return;
1015 }
1016 }
1017 }
1018 }
1019
1020 VisitBinop(this, node, kMips64Dadd, true, kMips64Dadd);
1021 }
1022
VisitInt32Sub(Node * node)1023 void InstructionSelector::VisitInt32Sub(Node* node) {
1024 VisitBinop(this, node, kMips64Sub);
1025 }
1026
VisitInt64Sub(Node * node)1027 void InstructionSelector::VisitInt64Sub(Node* node) {
1028 VisitBinop(this, node, kMips64Dsub);
1029 }
1030
VisitInt32Mul(Node * node)1031 void InstructionSelector::VisitInt32Mul(Node* node) {
1032 Mips64OperandGenerator g(this);
1033 Int32BinopMatcher m(node);
1034 if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
1035 uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
1036 if (base::bits::IsPowerOfTwo(value)) {
1037 Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
1038 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1039 g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
1040 return;
1041 }
1042 if (base::bits::IsPowerOfTwo(value - 1) && kArchVariant == kMips64r6 &&
1043 value - 1 > 0 && value - 1 <= 31) {
1044 Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1045 g.UseRegister(m.left().node()),
1046 g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1)));
1047 return;
1048 }
1049 if (base::bits::IsPowerOfTwo(value + 1)) {
1050 InstructionOperand temp = g.TempRegister();
1051 Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
1052 g.UseRegister(m.left().node()),
1053 g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
1054 Emit(kMips64Sub | AddressingModeField::encode(kMode_None),
1055 g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
1056 return;
1057 }
1058 }
1059 Node* left = node->InputAt(0);
1060 Node* right = node->InputAt(1);
1061 if (CanCover(node, left) && CanCover(node, right)) {
1062 if (left->opcode() == IrOpcode::kWord64Sar &&
1063 right->opcode() == IrOpcode::kWord64Sar) {
1064 Int64BinopMatcher leftInput(left), rightInput(right);
1065 if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
1066 // Combine untagging shifts with Dmul high.
1067 Emit(kMips64DMulHigh, g.DefineSameAsFirst(node),
1068 g.UseRegister(leftInput.left().node()),
1069 g.UseRegister(rightInput.left().node()));
1070 return;
1071 }
1072 }
1073 }
1074 VisitRRR(this, kMips64Mul, node);
1075 }
1076
VisitInt32MulHigh(Node * node)1077 void InstructionSelector::VisitInt32MulHigh(Node* node) {
1078 VisitRRR(this, kMips64MulHigh, node);
1079 }
1080
VisitUint32MulHigh(Node * node)1081 void InstructionSelector::VisitUint32MulHigh(Node* node) {
1082 VisitRRR(this, kMips64MulHighU, node);
1083 }
1084
VisitInt64Mul(Node * node)1085 void InstructionSelector::VisitInt64Mul(Node* node) {
1086 Mips64OperandGenerator g(this);
1087 Int64BinopMatcher m(node);
1088 // TODO(dusmil): Add optimization for shifts larger than 32.
1089 if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
1090 uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
1091 if (base::bits::IsPowerOfTwo(value)) {
1092 Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
1093 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1094 g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
1095 return;
1096 }
1097 if (base::bits::IsPowerOfTwo(value - 1) && kArchVariant == kMips64r6 &&
1098 value - 1 > 0 && value - 1 <= 31) {
1099 // Dlsa macro will handle the shifting value out of bound cases.
1100 Emit(kMips64Dlsa, g.DefineAsRegister(node),
1101 g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
1102 g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1)));
1103 return;
1104 }
1105 if (base::bits::IsPowerOfTwo(value + 1)) {
1106 InstructionOperand temp = g.TempRegister();
1107 Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
1108 g.UseRegister(m.left().node()),
1109 g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
1110 Emit(kMips64Dsub | AddressingModeField::encode(kMode_None),
1111 g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
1112 return;
1113 }
1114 }
1115 Emit(kMips64Dmul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1116 g.UseRegister(m.right().node()));
1117 }
1118
VisitInt32Div(Node * node)1119 void InstructionSelector::VisitInt32Div(Node* node) {
1120 Mips64OperandGenerator g(this);
1121 Int32BinopMatcher m(node);
1122 Node* left = node->InputAt(0);
1123 Node* right = node->InputAt(1);
1124 if (CanCover(node, left) && CanCover(node, right)) {
1125 if (left->opcode() == IrOpcode::kWord64Sar &&
1126 right->opcode() == IrOpcode::kWord64Sar) {
1127 Int64BinopMatcher rightInput(right), leftInput(left);
1128 if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
1129 // Combine both shifted operands with Ddiv.
1130 Emit(kMips64Ddiv, g.DefineSameAsFirst(node),
1131 g.UseRegister(leftInput.left().node()),
1132 g.UseRegister(rightInput.left().node()));
1133 return;
1134 }
1135 }
1136 }
1137 Emit(kMips64Div, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1138 g.UseRegister(m.right().node()));
1139 }
1140
VisitUint32Div(Node * node)1141 void InstructionSelector::VisitUint32Div(Node* node) {
1142 Mips64OperandGenerator g(this);
1143 Int32BinopMatcher m(node);
1144 Emit(kMips64DivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1145 g.UseRegister(m.right().node()));
1146 }
1147
VisitInt32Mod(Node * node)1148 void InstructionSelector::VisitInt32Mod(Node* node) {
1149 Mips64OperandGenerator g(this);
1150 Int32BinopMatcher m(node);
1151 Node* left = node->InputAt(0);
1152 Node* right = node->InputAt(1);
1153 if (CanCover(node, left) && CanCover(node, right)) {
1154 if (left->opcode() == IrOpcode::kWord64Sar &&
1155 right->opcode() == IrOpcode::kWord64Sar) {
1156 Int64BinopMatcher rightInput(right), leftInput(left);
1157 if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
1158 // Combine both shifted operands with Dmod.
1159 Emit(kMips64Dmod, g.DefineSameAsFirst(node),
1160 g.UseRegister(leftInput.left().node()),
1161 g.UseRegister(rightInput.left().node()));
1162 return;
1163 }
1164 }
1165 }
1166 Emit(kMips64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1167 g.UseRegister(m.right().node()));
1168 }
1169
VisitUint32Mod(Node * node)1170 void InstructionSelector::VisitUint32Mod(Node* node) {
1171 Mips64OperandGenerator g(this);
1172 Int32BinopMatcher m(node);
1173 Emit(kMips64ModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1174 g.UseRegister(m.right().node()));
1175 }
1176
VisitInt64Div(Node * node)1177 void InstructionSelector::VisitInt64Div(Node* node) {
1178 Mips64OperandGenerator g(this);
1179 Int64BinopMatcher m(node);
1180 Emit(kMips64Ddiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1181 g.UseRegister(m.right().node()));
1182 }
1183
VisitUint64Div(Node * node)1184 void InstructionSelector::VisitUint64Div(Node* node) {
1185 Mips64OperandGenerator g(this);
1186 Int64BinopMatcher m(node);
1187 Emit(kMips64DdivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1188 g.UseRegister(m.right().node()));
1189 }
1190
VisitInt64Mod(Node * node)1191 void InstructionSelector::VisitInt64Mod(Node* node) {
1192 Mips64OperandGenerator g(this);
1193 Int64BinopMatcher m(node);
1194 Emit(kMips64Dmod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1195 g.UseRegister(m.right().node()));
1196 }
1197
VisitUint64Mod(Node * node)1198 void InstructionSelector::VisitUint64Mod(Node* node) {
1199 Mips64OperandGenerator g(this);
1200 Int64BinopMatcher m(node);
1201 Emit(kMips64DmodU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1202 g.UseRegister(m.right().node()));
1203 }
1204
VisitChangeFloat32ToFloat64(Node * node)1205 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
1206 VisitRR(this, kMips64CvtDS, node);
1207 }
1208
VisitRoundInt32ToFloat32(Node * node)1209 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
1210 VisitRR(this, kMips64CvtSW, node);
1211 }
1212
VisitRoundUint32ToFloat32(Node * node)1213 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
1214 VisitRR(this, kMips64CvtSUw, node);
1215 }
1216
VisitChangeInt32ToFloat64(Node * node)1217 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
1218 VisitRR(this, kMips64CvtDW, node);
1219 }
1220
VisitChangeInt64ToFloat64(Node * node)1221 void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
1222 VisitRR(this, kMips64CvtDL, node);
1223 }
1224
VisitChangeUint32ToFloat64(Node * node)1225 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
1226 VisitRR(this, kMips64CvtDUw, node);
1227 }
1228
VisitTruncateFloat32ToInt32(Node * node)1229 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
1230 VisitRR(this, kMips64TruncWS, node);
1231 }
1232
VisitTruncateFloat32ToUint32(Node * node)1233 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
1234 VisitRR(this, kMips64TruncUwS, node);
1235 }
1236
VisitChangeFloat64ToInt32(Node * node)1237 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
1238 Mips64OperandGenerator g(this);
1239 Node* value = node->InputAt(0);
1240 // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
1241 // which does rounding and conversion to integer format.
1242 if (CanCover(node, value)) {
1243 switch (value->opcode()) {
1244 case IrOpcode::kFloat64RoundDown:
1245 Emit(kMips64FloorWD, g.DefineAsRegister(node),
1246 g.UseRegister(value->InputAt(0)));
1247 return;
1248 case IrOpcode::kFloat64RoundUp:
1249 Emit(kMips64CeilWD, g.DefineAsRegister(node),
1250 g.UseRegister(value->InputAt(0)));
1251 return;
1252 case IrOpcode::kFloat64RoundTiesEven:
1253 Emit(kMips64RoundWD, g.DefineAsRegister(node),
1254 g.UseRegister(value->InputAt(0)));
1255 return;
1256 case IrOpcode::kFloat64RoundTruncate:
1257 Emit(kMips64TruncWD, g.DefineAsRegister(node),
1258 g.UseRegister(value->InputAt(0)));
1259 return;
1260 default:
1261 break;
1262 }
1263 if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
1264 Node* next = value->InputAt(0);
1265 if (CanCover(value, next)) {
1266 // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
1267 switch (next->opcode()) {
1268 case IrOpcode::kFloat32RoundDown:
1269 Emit(kMips64FloorWS, g.DefineAsRegister(node),
1270 g.UseRegister(next->InputAt(0)));
1271 return;
1272 case IrOpcode::kFloat32RoundUp:
1273 Emit(kMips64CeilWS, g.DefineAsRegister(node),
1274 g.UseRegister(next->InputAt(0)));
1275 return;
1276 case IrOpcode::kFloat32RoundTiesEven:
1277 Emit(kMips64RoundWS, g.DefineAsRegister(node),
1278 g.UseRegister(next->InputAt(0)));
1279 return;
1280 case IrOpcode::kFloat32RoundTruncate:
1281 Emit(kMips64TruncWS, g.DefineAsRegister(node),
1282 g.UseRegister(next->InputAt(0)));
1283 return;
1284 default:
1285 Emit(kMips64TruncWS, g.DefineAsRegister(node),
1286 g.UseRegister(value->InputAt(0)));
1287 return;
1288 }
1289 } else {
1290 // Match float32 -> float64 -> int32 representation change path.
1291 Emit(kMips64TruncWS, g.DefineAsRegister(node),
1292 g.UseRegister(value->InputAt(0)));
1293 return;
1294 }
1295 }
1296 }
1297 VisitRR(this, kMips64TruncWD, node);
1298 }
1299
VisitChangeFloat64ToInt64(Node * node)1300 void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
1301 VisitRR(this, kMips64TruncLD, node);
1302 }
1303
VisitChangeFloat64ToUint32(Node * node)1304 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1305 VisitRR(this, kMips64TruncUwD, node);
1306 }
1307
VisitChangeFloat64ToUint64(Node * node)1308 void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
1309 VisitRR(this, kMips64TruncUlD, node);
1310 }
1311
VisitTruncateFloat64ToUint32(Node * node)1312 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
1313 VisitRR(this, kMips64TruncUwD, node);
1314 }
1315
VisitTruncateFloat64ToInt64(Node * node)1316 void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
1317 VisitRR(this, kMips64TruncLD, node);
1318 }
1319
VisitTryTruncateFloat32ToInt64(Node * node)1320 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1321 Mips64OperandGenerator g(this);
1322 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1323 InstructionOperand outputs[2];
1324 size_t output_count = 0;
1325 outputs[output_count++] = g.DefineAsRegister(node);
1326
1327 Node* success_output = NodeProperties::FindProjection(node, 1);
1328 if (success_output) {
1329 outputs[output_count++] = g.DefineAsRegister(success_output);
1330 }
1331
1332 this->Emit(kMips64TruncLS, output_count, outputs, 1, inputs);
1333 }
1334
VisitTryTruncateFloat64ToInt64(Node * node)1335 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1336 Mips64OperandGenerator g(this);
1337 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1338 InstructionOperand outputs[2];
1339 size_t output_count = 0;
1340 outputs[output_count++] = g.DefineAsRegister(node);
1341
1342 Node* success_output = NodeProperties::FindProjection(node, 1);
1343 if (success_output) {
1344 outputs[output_count++] = g.DefineAsRegister(success_output);
1345 }
1346
1347 Emit(kMips64TruncLD, output_count, outputs, 1, inputs);
1348 }
1349
VisitTryTruncateFloat32ToUint64(Node * node)1350 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1351 Mips64OperandGenerator g(this);
1352 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1353 InstructionOperand outputs[2];
1354 size_t output_count = 0;
1355 outputs[output_count++] = g.DefineAsRegister(node);
1356
1357 Node* success_output = NodeProperties::FindProjection(node, 1);
1358 if (success_output) {
1359 outputs[output_count++] = g.DefineAsRegister(success_output);
1360 }
1361
1362 Emit(kMips64TruncUlS, output_count, outputs, 1, inputs);
1363 }
1364
VisitTryTruncateFloat64ToUint64(Node * node)1365 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1366 Mips64OperandGenerator g(this);
1367
1368 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1369 InstructionOperand outputs[2];
1370 size_t output_count = 0;
1371 outputs[output_count++] = g.DefineAsRegister(node);
1372
1373 Node* success_output = NodeProperties::FindProjection(node, 1);
1374 if (success_output) {
1375 outputs[output_count++] = g.DefineAsRegister(success_output);
1376 }
1377
1378 Emit(kMips64TruncUlD, output_count, outputs, 1, inputs);
1379 }
1380
VisitBitcastWord32ToWord64(Node * node)1381 void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
1382 UNIMPLEMENTED();
1383 }
1384
VisitChangeInt32ToInt64(Node * node)1385 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1386 Node* value = node->InputAt(0);
1387 if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
1388 // Generate sign-extending load.
1389 LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1390 InstructionCode opcode = kArchNop;
1391 switch (load_rep.representation()) {
1392 case MachineRepresentation::kBit: // Fall through.
1393 case MachineRepresentation::kWord8:
1394 opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
1395 break;
1396 case MachineRepresentation::kWord16:
1397 opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
1398 break;
1399 case MachineRepresentation::kWord32:
1400 opcode = kMips64Lw;
1401 break;
1402 default:
1403 UNREACHABLE();
1404 }
1405 EmitLoad(this, value, opcode, node);
1406 } else {
1407 Mips64OperandGenerator g(this);
1408 Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1409 g.TempImmediate(0));
1410 }
1411 }
1412
ZeroExtendsWord32ToWord64NoPhis(Node * node)1413 bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
1414 DCHECK_NE(node->opcode(), IrOpcode::kPhi);
1415 switch (node->opcode()) {
1416 // 32-bit operations will write their result in a 64 bit register,
1417 // clearing the top 32 bits of the destination register.
1418 case IrOpcode::kUint32Div:
1419 case IrOpcode::kUint32Mod:
1420 case IrOpcode::kUint32MulHigh:
1421 return true;
1422 case IrOpcode::kLoad: {
1423 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1424 if (load_rep.IsUnsigned()) {
1425 switch (load_rep.representation()) {
1426 case MachineRepresentation::kWord8:
1427 case MachineRepresentation::kWord16:
1428 case MachineRepresentation::kWord32:
1429 return true;
1430 default:
1431 return false;
1432 }
1433 }
1434 return false;
1435 }
1436 default:
1437 return false;
1438 }
1439 }
1440
VisitChangeUint32ToUint64(Node * node)1441 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1442 Mips64OperandGenerator g(this);
1443 Node* value = node->InputAt(0);
1444 if (ZeroExtendsWord32ToWord64(value)) {
1445 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1446 return;
1447 }
1448 Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1449 g.TempImmediate(0), g.TempImmediate(32));
1450 }
1451
VisitTruncateInt64ToInt32(Node * node)1452 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1453 Mips64OperandGenerator g(this);
1454 Node* value = node->InputAt(0);
1455 if (CanCover(node, value)) {
1456 switch (value->opcode()) {
1457 case IrOpcode::kWord64Sar: {
1458 if (CanCoverTransitively(node, value, value->InputAt(0)) &&
1459 TryEmitExtendingLoad(this, value, node)) {
1460 return;
1461 } else {
1462 Int64BinopMatcher m(value);
1463 if (m.right().IsInRange(32, 63)) {
1464 // After smi untagging no need for truncate. Combine sequence.
1465 Emit(kMips64Dsar, g.DefineSameAsFirst(node),
1466 g.UseRegister(m.left().node()),
1467 g.UseImmediate(m.right().node()));
1468 return;
1469 }
1470 }
1471 break;
1472 }
1473 default:
1474 break;
1475 }
1476 }
1477 Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1478 g.TempImmediate(0), g.TempImmediate(32));
1479 }
1480
VisitTruncateFloat64ToFloat32(Node * node)1481 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1482 Mips64OperandGenerator g(this);
1483 Node* value = node->InputAt(0);
1484 // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
1485 // instruction.
1486 if (CanCover(node, value) &&
1487 value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
1488 Emit(kMips64CvtSW, g.DefineAsRegister(node),
1489 g.UseRegister(value->InputAt(0)));
1490 return;
1491 }
1492 VisitRR(this, kMips64CvtSD, node);
1493 }
1494
VisitTruncateFloat64ToWord32(Node * node)1495 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1496 VisitRR(this, kArchTruncateDoubleToI, node);
1497 }
1498
VisitRoundFloat64ToInt32(Node * node)1499 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1500 VisitRR(this, kMips64TruncWD, node);
1501 }
1502
VisitRoundInt64ToFloat32(Node * node)1503 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1504 VisitRR(this, kMips64CvtSL, node);
1505 }
1506
VisitRoundInt64ToFloat64(Node * node)1507 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1508 VisitRR(this, kMips64CvtDL, node);
1509 }
1510
VisitRoundUint64ToFloat32(Node * node)1511 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1512 VisitRR(this, kMips64CvtSUl, node);
1513 }
1514
VisitRoundUint64ToFloat64(Node * node)1515 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1516 VisitRR(this, kMips64CvtDUl, node);
1517 }
1518
VisitBitcastFloat32ToInt32(Node * node)1519 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1520 VisitRR(this, kMips64Float64ExtractLowWord32, node);
1521 }
1522
VisitBitcastFloat64ToInt64(Node * node)1523 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1524 VisitRR(this, kMips64BitcastDL, node);
1525 }
1526
VisitBitcastInt32ToFloat32(Node * node)1527 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1528 Mips64OperandGenerator g(this);
1529 Emit(kMips64Float64InsertLowWord32, g.DefineAsRegister(node),
1530 ImmediateOperand(ImmediateOperand::INLINE, 0),
1531 g.UseRegister(node->InputAt(0)));
1532 }
1533
VisitBitcastInt64ToFloat64(Node * node)1534 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1535 VisitRR(this, kMips64BitcastLD, node);
1536 }
1537
VisitFloat32Add(Node * node)1538 void InstructionSelector::VisitFloat32Add(Node* node) {
1539 // Optimization with Madd.S(z, x, y) is intentionally removed.
1540 // See explanation for madd_s in assembler-mips64.cc.
1541 VisitRRR(this, kMips64AddS, node);
1542 }
1543
VisitFloat64Add(Node * node)1544 void InstructionSelector::VisitFloat64Add(Node* node) {
1545 // Optimization with Madd.D(z, x, y) is intentionally removed.
1546 // See explanation for madd_d in assembler-mips64.cc.
1547 VisitRRR(this, kMips64AddD, node);
1548 }
1549
VisitFloat32Sub(Node * node)1550 void InstructionSelector::VisitFloat32Sub(Node* node) {
1551 // Optimization with Msub.S(z, x, y) is intentionally removed.
1552 // See explanation for madd_s in assembler-mips64.cc.
1553 VisitRRR(this, kMips64SubS, node);
1554 }
1555
VisitFloat64Sub(Node * node)1556 void InstructionSelector::VisitFloat64Sub(Node* node) {
1557 // Optimization with Msub.D(z, x, y) is intentionally removed.
1558 // See explanation for madd_d in assembler-mips64.cc.
1559 VisitRRR(this, kMips64SubD, node);
1560 }
1561
VisitFloat32Mul(Node * node)1562 void InstructionSelector::VisitFloat32Mul(Node* node) {
1563 VisitRRR(this, kMips64MulS, node);
1564 }
1565
VisitFloat64Mul(Node * node)1566 void InstructionSelector::VisitFloat64Mul(Node* node) {
1567 VisitRRR(this, kMips64MulD, node);
1568 }
1569
VisitFloat32Div(Node * node)1570 void InstructionSelector::VisitFloat32Div(Node* node) {
1571 VisitRRR(this, kMips64DivS, node);
1572 }
1573
VisitFloat64Div(Node * node)1574 void InstructionSelector::VisitFloat64Div(Node* node) {
1575 VisitRRR(this, kMips64DivD, node);
1576 }
1577
VisitFloat64Mod(Node * node)1578 void InstructionSelector::VisitFloat64Mod(Node* node) {
1579 Mips64OperandGenerator g(this);
1580 Emit(kMips64ModD, g.DefineAsFixed(node, f0),
1581 g.UseFixed(node->InputAt(0), f12), g.UseFixed(node->InputAt(1), f14))
1582 ->MarkAsCall();
1583 }
1584
VisitFloat32Max(Node * node)1585 void InstructionSelector::VisitFloat32Max(Node* node) {
1586 Mips64OperandGenerator g(this);
1587 Emit(kMips64Float32Max, g.DefineAsRegister(node),
1588 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1589 }
1590
VisitFloat64Max(Node * node)1591 void InstructionSelector::VisitFloat64Max(Node* node) {
1592 Mips64OperandGenerator g(this);
1593 Emit(kMips64Float64Max, g.DefineAsRegister(node),
1594 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1595 }
1596
VisitFloat32Min(Node * node)1597 void InstructionSelector::VisitFloat32Min(Node* node) {
1598 Mips64OperandGenerator g(this);
1599 Emit(kMips64Float32Min, g.DefineAsRegister(node),
1600 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1601 }
1602
VisitFloat64Min(Node * node)1603 void InstructionSelector::VisitFloat64Min(Node* node) {
1604 Mips64OperandGenerator g(this);
1605 Emit(kMips64Float64Min, g.DefineAsRegister(node),
1606 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1607 }
1608
VisitFloat32Abs(Node * node)1609 void InstructionSelector::VisitFloat32Abs(Node* node) {
1610 VisitRR(this, kMips64AbsS, node);
1611 }
1612
VisitFloat64Abs(Node * node)1613 void InstructionSelector::VisitFloat64Abs(Node* node) {
1614 VisitRR(this, kMips64AbsD, node);
1615 }
1616
VisitFloat32Sqrt(Node * node)1617 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1618 VisitRR(this, kMips64SqrtS, node);
1619 }
1620
VisitFloat64Sqrt(Node * node)1621 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1622 VisitRR(this, kMips64SqrtD, node);
1623 }
1624
VisitFloat32RoundDown(Node * node)1625 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1626 VisitRR(this, kMips64Float32RoundDown, node);
1627 }
1628
VisitFloat64RoundDown(Node * node)1629 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1630 VisitRR(this, kMips64Float64RoundDown, node);
1631 }
1632
VisitFloat32RoundUp(Node * node)1633 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1634 VisitRR(this, kMips64Float32RoundUp, node);
1635 }
1636
VisitFloat64RoundUp(Node * node)1637 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1638 VisitRR(this, kMips64Float64RoundUp, node);
1639 }
1640
VisitFloat32RoundTruncate(Node * node)1641 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1642 VisitRR(this, kMips64Float32RoundTruncate, node);
1643 }
1644
VisitFloat64RoundTruncate(Node * node)1645 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1646 VisitRR(this, kMips64Float64RoundTruncate, node);
1647 }
1648
VisitFloat64RoundTiesAway(Node * node)1649 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1650 UNREACHABLE();
1651 }
1652
VisitFloat32RoundTiesEven(Node * node)1653 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1654 VisitRR(this, kMips64Float32RoundTiesEven, node);
1655 }
1656
VisitFloat64RoundTiesEven(Node * node)1657 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1658 VisitRR(this, kMips64Float64RoundTiesEven, node);
1659 }
1660
VisitFloat32Neg(Node * node)1661 void InstructionSelector::VisitFloat32Neg(Node* node) {
1662 VisitRR(this, kMips64NegS, node);
1663 }
1664
VisitFloat64Neg(Node * node)1665 void InstructionSelector::VisitFloat64Neg(Node* node) {
1666 VisitRR(this, kMips64NegD, node);
1667 }
1668
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1669 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1670 InstructionCode opcode) {
1671 Mips64OperandGenerator g(this);
1672 Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2),
1673 g.UseFixed(node->InputAt(1), f4))
1674 ->MarkAsCall();
1675 }
1676
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1677 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1678 InstructionCode opcode) {
1679 Mips64OperandGenerator g(this);
1680 Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
1681 ->MarkAsCall();
1682 }
1683
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * call_descriptor,Node * node)1684 void InstructionSelector::EmitPrepareArguments(
1685 ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
1686 Node* node) {
1687 Mips64OperandGenerator g(this);
1688
1689 // Prepare for C function call.
1690 if (call_descriptor->IsCFunctionCall()) {
1691 Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
1692 call_descriptor->ParameterCount())),
1693 0, nullptr, 0, nullptr);
1694
1695 // Poke any stack arguments.
1696 int slot = kCArgSlotCount;
1697 for (PushParameter input : (*arguments)) {
1698 Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1699 g.TempImmediate(slot << kSystemPointerSizeLog2));
1700 ++slot;
1701 }
1702 } else {
1703 int push_count = static_cast<int>(call_descriptor->StackParameterCount());
1704 if (push_count > 0) {
1705 // Calculate needed space
1706 int stack_size = 0;
1707 for (PushParameter input : (*arguments)) {
1708 if (input.node) {
1709 stack_size += input.location.GetSizeInPointers();
1710 }
1711 }
1712 Emit(kMips64StackClaim, g.NoOutput(),
1713 g.TempImmediate(stack_size << kSystemPointerSizeLog2));
1714 }
1715 for (size_t n = 0; n < arguments->size(); ++n) {
1716 PushParameter input = (*arguments)[n];
1717 if (input.node) {
1718 Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1719 g.TempImmediate(static_cast<int>(n << kSystemPointerSizeLog2)));
1720 }
1721 }
1722 }
1723 }
1724
EmitPrepareResults(ZoneVector<PushParameter> * results,const CallDescriptor * call_descriptor,Node * node)1725 void InstructionSelector::EmitPrepareResults(
1726 ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
1727 Node* node) {
1728 Mips64OperandGenerator g(this);
1729
1730 int reverse_slot = 1;
1731 for (PushParameter output : *results) {
1732 if (!output.location.IsCallerFrameSlot()) continue;
1733 // Skip any alignment holes in nodes.
1734 if (output.node != nullptr) {
1735 DCHECK(!call_descriptor->IsCFunctionCall());
1736 if (output.location.GetType() == MachineType::Float32()) {
1737 MarkAsFloat32(output.node);
1738 } else if (output.location.GetType() == MachineType::Float64()) {
1739 MarkAsFloat64(output.node);
1740 } else if (output.location.GetType() == MachineType::Simd128()) {
1741 MarkAsSimd128(output.node);
1742 }
1743 Emit(kMips64Peek, g.DefineAsRegister(output.node),
1744 g.UseImmediate(reverse_slot));
1745 }
1746 reverse_slot += output.location.GetSizeInPointers();
1747 }
1748 }
1749
IsTailCallAddressImmediate()1750 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1751
GetTempsCountForTailCallFromJSFunction()1752 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
1753
VisitUnalignedLoad(Node * node)1754 void InstructionSelector::VisitUnalignedLoad(Node* node) {
1755 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1756 Mips64OperandGenerator g(this);
1757 Node* base = node->InputAt(0);
1758 Node* index = node->InputAt(1);
1759
1760 ArchOpcode opcode;
1761 switch (load_rep.representation()) {
1762 case MachineRepresentation::kFloat32:
1763 opcode = kMips64Ulwc1;
1764 break;
1765 case MachineRepresentation::kFloat64:
1766 opcode = kMips64Uldc1;
1767 break;
1768 case MachineRepresentation::kWord8:
1769 opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
1770 break;
1771 case MachineRepresentation::kWord16:
1772 opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
1773 break;
1774 case MachineRepresentation::kWord32:
1775 opcode = load_rep.IsUnsigned() ? kMips64Ulwu : kMips64Ulw;
1776 break;
1777 case MachineRepresentation::kTaggedSigned: // Fall through.
1778 case MachineRepresentation::kTaggedPointer: // Fall through.
1779 case MachineRepresentation::kTagged: // Fall through.
1780 case MachineRepresentation::kWord64:
1781 opcode = kMips64Uld;
1782 break;
1783 case MachineRepresentation::kSimd128:
1784 opcode = kMips64MsaLd;
1785 break;
1786 case MachineRepresentation::kBit: // Fall through.
1787 case MachineRepresentation::kCompressedPointer: // Fall through.
1788 case MachineRepresentation::kCompressed: // Fall through.
1789 case MachineRepresentation::kNone:
1790 UNREACHABLE();
1791 }
1792
1793 if (g.CanBeImmediate(index, opcode)) {
1794 Emit(opcode | AddressingModeField::encode(kMode_MRI),
1795 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
1796 } else {
1797 InstructionOperand addr_reg = g.TempRegister();
1798 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
1799 g.UseRegister(index), g.UseRegister(base));
1800 // Emit desired load opcode, using temp addr_reg.
1801 Emit(opcode | AddressingModeField::encode(kMode_MRI),
1802 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1803 }
1804 }
1805
VisitUnalignedStore(Node * node)1806 void InstructionSelector::VisitUnalignedStore(Node* node) {
1807 Mips64OperandGenerator g(this);
1808 Node* base = node->InputAt(0);
1809 Node* index = node->InputAt(1);
1810 Node* value = node->InputAt(2);
1811
1812 UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
1813 ArchOpcode opcode;
1814 switch (rep) {
1815 case MachineRepresentation::kFloat32:
1816 opcode = kMips64Uswc1;
1817 break;
1818 case MachineRepresentation::kFloat64:
1819 opcode = kMips64Usdc1;
1820 break;
1821 case MachineRepresentation::kWord8:
1822 opcode = kMips64Sb;
1823 break;
1824 case MachineRepresentation::kWord16:
1825 opcode = kMips64Ush;
1826 break;
1827 case MachineRepresentation::kWord32:
1828 opcode = kMips64Usw;
1829 break;
1830 case MachineRepresentation::kTaggedSigned: // Fall through.
1831 case MachineRepresentation::kTaggedPointer: // Fall through.
1832 case MachineRepresentation::kTagged: // Fall through.
1833 case MachineRepresentation::kWord64:
1834 opcode = kMips64Usd;
1835 break;
1836 case MachineRepresentation::kSimd128:
1837 opcode = kMips64MsaSt;
1838 break;
1839 case MachineRepresentation::kBit: // Fall through.
1840 case MachineRepresentation::kCompressedPointer: // Fall through.
1841 case MachineRepresentation::kCompressed: // Fall through.
1842 case MachineRepresentation::kNone:
1843 UNREACHABLE();
1844 }
1845
1846 if (g.CanBeImmediate(index, opcode)) {
1847 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1848 g.UseRegister(base), g.UseImmediate(index),
1849 g.UseRegisterOrImmediateZero(value));
1850 } else {
1851 InstructionOperand addr_reg = g.TempRegister();
1852 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
1853 g.UseRegister(index), g.UseRegister(base));
1854 // Emit desired store opcode, using temp addr_reg.
1855 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1856 addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1857 }
1858 }
1859
1860 namespace {
1861
1862 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1863 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1864 InstructionOperand left, InstructionOperand right,
1865 FlagsContinuation* cont) {
1866 selector->EmitWithContinuation(opcode, left, right, cont);
1867 }
1868
1869 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1870 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1871 FlagsContinuation* cont) {
1872 Mips64OperandGenerator g(selector);
1873 Float32BinopMatcher m(node);
1874 InstructionOperand lhs, rhs;
1875
1876 lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1877 : g.UseRegister(m.left().node());
1878 rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1879 : g.UseRegister(m.right().node());
1880 VisitCompare(selector, kMips64CmpS, lhs, rhs, cont);
1881 }
1882
1883 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1884 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1885 FlagsContinuation* cont) {
1886 Mips64OperandGenerator g(selector);
1887 Float64BinopMatcher m(node);
1888 InstructionOperand lhs, rhs;
1889
1890 lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1891 : g.UseRegister(m.left().node());
1892 rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1893 : g.UseRegister(m.right().node());
1894 VisitCompare(selector, kMips64CmpD, lhs, rhs, cont);
1895 }
1896
1897 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont,bool commutative)1898 void VisitWordCompare(InstructionSelector* selector, Node* node,
1899 InstructionCode opcode, FlagsContinuation* cont,
1900 bool commutative) {
1901 Mips64OperandGenerator g(selector);
1902 Node* left = node->InputAt(0);
1903 Node* right = node->InputAt(1);
1904
1905 // Match immediates on left or right side of comparison.
1906 if (g.CanBeImmediate(right, opcode)) {
1907 if (opcode == kMips64Tst) {
1908 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1909 cont);
1910 } else {
1911 switch (cont->condition()) {
1912 case kEqual:
1913 case kNotEqual:
1914 if (cont->IsSet()) {
1915 VisitCompare(selector, opcode, g.UseRegister(left),
1916 g.UseImmediate(right), cont);
1917 } else {
1918 VisitCompare(selector, opcode, g.UseRegister(left),
1919 g.UseRegister(right), cont);
1920 }
1921 break;
1922 case kSignedLessThan:
1923 case kSignedGreaterThanOrEqual:
1924 case kUnsignedLessThan:
1925 case kUnsignedGreaterThanOrEqual:
1926 VisitCompare(selector, opcode, g.UseRegister(left),
1927 g.UseImmediate(right), cont);
1928 break;
1929 default:
1930 VisitCompare(selector, opcode, g.UseRegister(left),
1931 g.UseRegister(right), cont);
1932 }
1933 }
1934 } else if (g.CanBeImmediate(left, opcode)) {
1935 if (!commutative) cont->Commute();
1936 if (opcode == kMips64Tst) {
1937 VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
1938 cont);
1939 } else {
1940 switch (cont->condition()) {
1941 case kEqual:
1942 case kNotEqual:
1943 if (cont->IsSet()) {
1944 VisitCompare(selector, opcode, g.UseRegister(right),
1945 g.UseImmediate(left), cont);
1946 } else {
1947 VisitCompare(selector, opcode, g.UseRegister(right),
1948 g.UseRegister(left), cont);
1949 }
1950 break;
1951 case kSignedLessThan:
1952 case kSignedGreaterThanOrEqual:
1953 case kUnsignedLessThan:
1954 case kUnsignedGreaterThanOrEqual:
1955 VisitCompare(selector, opcode, g.UseRegister(right),
1956 g.UseImmediate(left), cont);
1957 break;
1958 default:
1959 VisitCompare(selector, opcode, g.UseRegister(right),
1960 g.UseRegister(left), cont);
1961 }
1962 }
1963 } else {
1964 VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1965 cont);
1966 }
1967 }
1968
IsNodeUnsigned(Node * n)1969 bool IsNodeUnsigned(Node* n) {
1970 NodeMatcher m(n);
1971
1972 if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() ||
1973 m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
1974 LoadRepresentation load_rep = LoadRepresentationOf(n->op());
1975 return load_rep.IsUnsigned();
1976 } else {
1977 return m.IsUint32Div() || m.IsUint32LessThan() ||
1978 m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
1979 m.IsUint32MulHigh() || m.IsChangeFloat64ToUint32() ||
1980 m.IsTruncateFloat64ToUint32() || m.IsTruncateFloat32ToUint32();
1981 }
1982 }
1983
1984 // Shared routine for multiple word compare operations.
VisitFullWord32Compare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1985 void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
1986 InstructionCode opcode, FlagsContinuation* cont) {
1987 Mips64OperandGenerator g(selector);
1988 InstructionOperand leftOp = g.TempRegister();
1989 InstructionOperand rightOp = g.TempRegister();
1990
1991 selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
1992 g.TempImmediate(32));
1993 selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
1994 g.TempImmediate(32));
1995
1996 VisitCompare(selector, opcode, leftOp, rightOp, cont);
1997 }
1998
VisitOptimizedWord32Compare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1999 void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
2000 InstructionCode opcode,
2001 FlagsContinuation* cont) {
2002 if (FLAG_debug_code) {
2003 Mips64OperandGenerator g(selector);
2004 InstructionOperand leftOp = g.TempRegister();
2005 InstructionOperand rightOp = g.TempRegister();
2006 InstructionOperand optimizedResult = g.TempRegister();
2007 InstructionOperand fullResult = g.TempRegister();
2008 FlagsCondition condition = cont->condition();
2009 InstructionCode testOpcode = opcode |
2010 FlagsConditionField::encode(condition) |
2011 FlagsModeField::encode(kFlags_set);
2012
2013 selector->Emit(testOpcode, optimizedResult, g.UseRegister(node->InputAt(0)),
2014 g.UseRegister(node->InputAt(1)));
2015
2016 selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
2017 g.TempImmediate(32));
2018 selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
2019 g.TempImmediate(32));
2020 selector->Emit(testOpcode, fullResult, leftOp, rightOp);
2021
2022 selector->Emit(
2023 kMips64AssertEqual, g.NoOutput(), optimizedResult, fullResult,
2024 g.TempImmediate(
2025 static_cast<int>(AbortReason::kUnsupportedNonPrimitiveCompare)));
2026 }
2027
2028 VisitWordCompare(selector, node, opcode, cont, false);
2029 }
2030
VisitWord32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)2031 void VisitWord32Compare(InstructionSelector* selector, Node* node,
2032 FlagsContinuation* cont) {
2033 // MIPS64 doesn't support Word32 compare instructions. Instead it relies
2034 // that the values in registers are correctly sign-extended and uses
2035 // Word64 comparison instead. This behavior is correct in most cases,
2036 // but doesn't work when comparing signed with unsigned operands.
2037 // We could simulate full Word32 compare in all cases but this would
2038 // create an unnecessary overhead since unsigned integers are rarely
2039 // used in JavaScript.
2040 // The solution proposed here tries to match a comparison of signed
2041 // with unsigned operand, and perform full Word32Compare only
2042 // in those cases. Unfortunately, the solution is not complete because
2043 // it might skip cases where Word32 full compare is needed, so
2044 // basically it is a hack.
2045 // When call to a host function in simulator, if the function return a
2046 // int32 value, the simulator do not sign-extended to int64 because in
2047 // simulator we do not know the function whether return a int32 or int64.
2048 // so we need do a full word32 compare in this case.
2049 #ifndef USE_SIMULATOR
2050 if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) {
2051 #else
2052 if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1)) ||
2053 node->InputAt(0)->opcode() == IrOpcode::kCall ||
2054 node->InputAt(1)->opcode() == IrOpcode::kCall ) {
2055 #endif
2056 VisitFullWord32Compare(selector, node, kMips64Cmp, cont);
2057 } else {
2058 VisitOptimizedWord32Compare(selector, node, kMips64Cmp, cont);
2059 }
2060 }
2061
2062 void VisitWord64Compare(InstructionSelector* selector, Node* node,
2063 FlagsContinuation* cont) {
2064 VisitWordCompare(selector, node, kMips64Cmp, cont, false);
2065 }
2066
2067 void EmitWordCompareZero(InstructionSelector* selector, Node* value,
2068 FlagsContinuation* cont) {
2069 Mips64OperandGenerator g(selector);
2070 selector->EmitWithContinuation(kMips64Cmp, g.UseRegister(value),
2071 g.TempImmediate(0), cont);
2072 }
2073
2074 void VisitAtomicLoad(InstructionSelector* selector, Node* node,
2075 ArchOpcode opcode) {
2076 Mips64OperandGenerator g(selector);
2077 Node* base = node->InputAt(0);
2078 Node* index = node->InputAt(1);
2079 if (g.CanBeImmediate(index, opcode)) {
2080 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
2081 g.DefineAsRegister(node), g.UseRegister(base),
2082 g.UseImmediate(index));
2083 } else {
2084 InstructionOperand addr_reg = g.TempRegister();
2085 selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
2086 addr_reg, g.UseRegister(index), g.UseRegister(base));
2087 // Emit desired load opcode, using temp addr_reg.
2088 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
2089 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
2090 }
2091 }
2092
2093 void VisitAtomicStore(InstructionSelector* selector, Node* node,
2094 ArchOpcode opcode) {
2095 Mips64OperandGenerator g(selector);
2096 Node* base = node->InputAt(0);
2097 Node* index = node->InputAt(1);
2098 Node* value = node->InputAt(2);
2099
2100 if (g.CanBeImmediate(index, opcode)) {
2101 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
2102 g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
2103 g.UseRegisterOrImmediateZero(value));
2104 } else {
2105 InstructionOperand addr_reg = g.TempRegister();
2106 selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
2107 addr_reg, g.UseRegister(index), g.UseRegister(base));
2108 // Emit desired store opcode, using temp addr_reg.
2109 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
2110 g.NoOutput(), addr_reg, g.TempImmediate(0),
2111 g.UseRegisterOrImmediateZero(value));
2112 }
2113 }
2114
2115 void VisitAtomicExchange(InstructionSelector* selector, Node* node,
2116 ArchOpcode opcode) {
2117 Mips64OperandGenerator g(selector);
2118 Node* base = node->InputAt(0);
2119 Node* index = node->InputAt(1);
2120 Node* value = node->InputAt(2);
2121
2122 AddressingMode addressing_mode = kMode_MRI;
2123 InstructionOperand inputs[3];
2124 size_t input_count = 0;
2125 inputs[input_count++] = g.UseUniqueRegister(base);
2126 inputs[input_count++] = g.UseUniqueRegister(index);
2127 inputs[input_count++] = g.UseUniqueRegister(value);
2128 InstructionOperand outputs[1];
2129 outputs[0] = g.UseUniqueRegister(node);
2130 InstructionOperand temp[3];
2131 temp[0] = g.TempRegister();
2132 temp[1] = g.TempRegister();
2133 temp[2] = g.TempRegister();
2134 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2135 selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
2136 }
2137
2138 void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
2139 ArchOpcode opcode) {
2140 Mips64OperandGenerator g(selector);
2141 Node* base = node->InputAt(0);
2142 Node* index = node->InputAt(1);
2143 Node* old_value = node->InputAt(2);
2144 Node* new_value = node->InputAt(3);
2145
2146 AddressingMode addressing_mode = kMode_MRI;
2147 InstructionOperand inputs[4];
2148 size_t input_count = 0;
2149 inputs[input_count++] = g.UseUniqueRegister(base);
2150 inputs[input_count++] = g.UseUniqueRegister(index);
2151 inputs[input_count++] = g.UseUniqueRegister(old_value);
2152 inputs[input_count++] = g.UseUniqueRegister(new_value);
2153 InstructionOperand outputs[1];
2154 outputs[0] = g.UseUniqueRegister(node);
2155 InstructionOperand temp[3];
2156 temp[0] = g.TempRegister();
2157 temp[1] = g.TempRegister();
2158 temp[2] = g.TempRegister();
2159 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2160 selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
2161 }
2162
2163 void VisitAtomicBinop(InstructionSelector* selector, Node* node,
2164 ArchOpcode opcode) {
2165 Mips64OperandGenerator g(selector);
2166 Node* base = node->InputAt(0);
2167 Node* index = node->InputAt(1);
2168 Node* value = node->InputAt(2);
2169
2170 AddressingMode addressing_mode = kMode_MRI;
2171 InstructionOperand inputs[3];
2172 size_t input_count = 0;
2173 inputs[input_count++] = g.UseUniqueRegister(base);
2174 inputs[input_count++] = g.UseUniqueRegister(index);
2175 inputs[input_count++] = g.UseUniqueRegister(value);
2176 InstructionOperand outputs[1];
2177 outputs[0] = g.UseUniqueRegister(node);
2178 InstructionOperand temps[4];
2179 temps[0] = g.TempRegister();
2180 temps[1] = g.TempRegister();
2181 temps[2] = g.TempRegister();
2182 temps[3] = g.TempRegister();
2183 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2184 selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
2185 }
2186
2187 } // namespace
2188
VisitStackPointerGreaterThan(Node * node,FlagsContinuation * cont)2189 void InstructionSelector::VisitStackPointerGreaterThan(
2190 Node* node, FlagsContinuation* cont) {
2191 StackCheckKind kind = StackCheckKindOf(node->op());
2192 InstructionCode opcode =
2193 kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
2194
2195 Mips64OperandGenerator g(this);
2196
2197 // No outputs.
2198 InstructionOperand* const outputs = nullptr;
2199 const int output_count = 0;
2200
2201 // Applying an offset to this stack check requires a temp register. Offsets
2202 // are only applied to the first stack check. If applying an offset, we must
2203 // ensure the input and temp registers do not alias, thus kUniqueRegister.
2204 InstructionOperand temps[] = {g.TempRegister()};
2205 const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 1 : 0);
2206 const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
2207 ? OperandGenerator::kUniqueRegister
2208 : OperandGenerator::kRegister;
2209
2210 Node* const value = node->InputAt(0);
2211 InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
2212 static constexpr int input_count = arraysize(inputs);
2213
2214 EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
2215 temp_count, temps, cont);
2216 }
2217
2218 // Shared routine for word comparisons against zero.
VisitWordCompareZero(Node * user,Node * value,FlagsContinuation * cont)2219 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
2220 FlagsContinuation* cont) {
2221 // Try to combine with comparisons against 0 by simply inverting the branch.
2222 while (CanCover(user, value)) {
2223 if (value->opcode() == IrOpcode::kWord32Equal) {
2224 Int32BinopMatcher m(value);
2225 if (!m.right().Is(0)) break;
2226 user = value;
2227 value = m.left().node();
2228 } else if (value->opcode() == IrOpcode::kWord64Equal) {
2229 Int64BinopMatcher m(value);
2230 if (!m.right().Is(0)) break;
2231 user = value;
2232 value = m.left().node();
2233 } else {
2234 break;
2235 }
2236
2237 cont->Negate();
2238 }
2239
2240 if (CanCover(user, value)) {
2241 switch (value->opcode()) {
2242 case IrOpcode::kWord32Equal:
2243 cont->OverwriteAndNegateIfEqual(kEqual);
2244 return VisitWord32Compare(this, value, cont);
2245 case IrOpcode::kInt32LessThan:
2246 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2247 return VisitWord32Compare(this, value, cont);
2248 case IrOpcode::kInt32LessThanOrEqual:
2249 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2250 return VisitWord32Compare(this, value, cont);
2251 case IrOpcode::kUint32LessThan:
2252 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2253 return VisitWord32Compare(this, value, cont);
2254 case IrOpcode::kUint32LessThanOrEqual:
2255 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2256 return VisitWord32Compare(this, value, cont);
2257 case IrOpcode::kWord64Equal:
2258 cont->OverwriteAndNegateIfEqual(kEqual);
2259 return VisitWord64Compare(this, value, cont);
2260 case IrOpcode::kInt64LessThan:
2261 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2262 return VisitWord64Compare(this, value, cont);
2263 case IrOpcode::kInt64LessThanOrEqual:
2264 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2265 return VisitWord64Compare(this, value, cont);
2266 case IrOpcode::kUint64LessThan:
2267 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2268 return VisitWord64Compare(this, value, cont);
2269 case IrOpcode::kUint64LessThanOrEqual:
2270 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2271 return VisitWord64Compare(this, value, cont);
2272 case IrOpcode::kFloat32Equal:
2273 cont->OverwriteAndNegateIfEqual(kEqual);
2274 return VisitFloat32Compare(this, value, cont);
2275 case IrOpcode::kFloat32LessThan:
2276 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2277 return VisitFloat32Compare(this, value, cont);
2278 case IrOpcode::kFloat32LessThanOrEqual:
2279 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2280 return VisitFloat32Compare(this, value, cont);
2281 case IrOpcode::kFloat64Equal:
2282 cont->OverwriteAndNegateIfEqual(kEqual);
2283 return VisitFloat64Compare(this, value, cont);
2284 case IrOpcode::kFloat64LessThan:
2285 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2286 return VisitFloat64Compare(this, value, cont);
2287 case IrOpcode::kFloat64LessThanOrEqual:
2288 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2289 return VisitFloat64Compare(this, value, cont);
2290 case IrOpcode::kProjection:
2291 // Check if this is the overflow output projection of an
2292 // <Operation>WithOverflow node.
2293 if (ProjectionIndexOf(value->op()) == 1u) {
2294 // We cannot combine the <Operation>WithOverflow with this branch
2295 // unless the 0th projection (the use of the actual value of the
2296 // <Operation> is either nullptr, which means there's no use of the
2297 // actual value, or was already defined, which means it is scheduled
2298 // *AFTER* this branch).
2299 Node* const node = value->InputAt(0);
2300 Node* const result = NodeProperties::FindProjection(node, 0);
2301 if (result == nullptr || IsDefined(result)) {
2302 switch (node->opcode()) {
2303 case IrOpcode::kInt32AddWithOverflow:
2304 cont->OverwriteAndNegateIfEqual(kOverflow);
2305 return VisitBinop(this, node, kMips64Dadd, cont);
2306 case IrOpcode::kInt32SubWithOverflow:
2307 cont->OverwriteAndNegateIfEqual(kOverflow);
2308 return VisitBinop(this, node, kMips64Dsub, cont);
2309 case IrOpcode::kInt32MulWithOverflow:
2310 cont->OverwriteAndNegateIfEqual(kOverflow);
2311 return VisitBinop(this, node, kMips64MulOvf, cont);
2312 case IrOpcode::kInt64AddWithOverflow:
2313 cont->OverwriteAndNegateIfEqual(kOverflow);
2314 return VisitBinop(this, node, kMips64DaddOvf, cont);
2315 case IrOpcode::kInt64SubWithOverflow:
2316 cont->OverwriteAndNegateIfEqual(kOverflow);
2317 return VisitBinop(this, node, kMips64DsubOvf, cont);
2318 default:
2319 break;
2320 }
2321 }
2322 }
2323 break;
2324 case IrOpcode::kWord32And:
2325 case IrOpcode::kWord64And:
2326 return VisitWordCompare(this, value, kMips64Tst, cont, true);
2327 case IrOpcode::kStackPointerGreaterThan:
2328 cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
2329 return VisitStackPointerGreaterThan(value, cont);
2330 default:
2331 break;
2332 }
2333 }
2334
2335 // Continuation could not be combined with a compare, emit compare against 0.
2336 EmitWordCompareZero(this, value, cont);
2337 }
2338
VisitSwitch(Node * node,const SwitchInfo & sw)2339 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
2340 Mips64OperandGenerator g(this);
2341 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
2342
2343 // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
2344 if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
2345 static const size_t kMaxTableSwitchValueRange = 2 << 16;
2346 size_t table_space_cost = 10 + 2 * sw.value_range();
2347 size_t table_time_cost = 3;
2348 size_t lookup_space_cost = 2 + 2 * sw.case_count();
2349 size_t lookup_time_cost = sw.case_count();
2350 if (sw.case_count() > 0 &&
2351 table_space_cost + 3 * table_time_cost <=
2352 lookup_space_cost + 3 * lookup_time_cost &&
2353 sw.min_value() > std::numeric_limits<int32_t>::min() &&
2354 sw.value_range() <= kMaxTableSwitchValueRange) {
2355 InstructionOperand index_operand = value_operand;
2356 if (sw.min_value()) {
2357 index_operand = g.TempRegister();
2358 Emit(kMips64Sub, index_operand, value_operand,
2359 g.TempImmediate(sw.min_value()));
2360 }
2361 // Generate a table lookup.
2362 return EmitTableSwitch(sw, index_operand);
2363 }
2364 }
2365
2366 // Generate a tree of conditional jumps.
2367 return EmitBinarySearchSwitch(sw, value_operand);
2368 }
2369
VisitWord32Equal(Node * const node)2370 void InstructionSelector::VisitWord32Equal(Node* const node) {
2371 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2372 Int32BinopMatcher m(node);
2373 if (m.right().Is(0)) {
2374 return VisitWordCompareZero(m.node(), m.left().node(), &cont);
2375 }
2376
2377 VisitWord32Compare(this, node, &cont);
2378 }
2379
VisitInt32LessThan(Node * node)2380 void InstructionSelector::VisitInt32LessThan(Node* node) {
2381 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2382 VisitWord32Compare(this, node, &cont);
2383 }
2384
VisitInt32LessThanOrEqual(Node * node)2385 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2386 FlagsContinuation cont =
2387 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2388 VisitWord32Compare(this, node, &cont);
2389 }
2390
VisitUint32LessThan(Node * node)2391 void InstructionSelector::VisitUint32LessThan(Node* node) {
2392 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2393 VisitWord32Compare(this, node, &cont);
2394 }
2395
VisitUint32LessThanOrEqual(Node * node)2396 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2397 FlagsContinuation cont =
2398 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2399 VisitWord32Compare(this, node, &cont);
2400 }
2401
VisitInt32AddWithOverflow(Node * node)2402 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
2403 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2404 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2405 return VisitBinop(this, node, kMips64Dadd, &cont);
2406 }
2407 FlagsContinuation cont;
2408 VisitBinop(this, node, kMips64Dadd, &cont);
2409 }
2410
VisitInt32SubWithOverflow(Node * node)2411 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
2412 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2413 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2414 return VisitBinop(this, node, kMips64Dsub, &cont);
2415 }
2416 FlagsContinuation cont;
2417 VisitBinop(this, node, kMips64Dsub, &cont);
2418 }
2419
VisitInt32MulWithOverflow(Node * node)2420 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
2421 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2422 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2423 return VisitBinop(this, node, kMips64MulOvf, &cont);
2424 }
2425 FlagsContinuation cont;
2426 VisitBinop(this, node, kMips64MulOvf, &cont);
2427 }
2428
VisitInt64AddWithOverflow(Node * node)2429 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
2430 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2431 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2432 return VisitBinop(this, node, kMips64DaddOvf, &cont);
2433 }
2434 FlagsContinuation cont;
2435 VisitBinop(this, node, kMips64DaddOvf, &cont);
2436 }
2437
VisitInt64SubWithOverflow(Node * node)2438 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
2439 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2440 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2441 return VisitBinop(this, node, kMips64DsubOvf, &cont);
2442 }
2443 FlagsContinuation cont;
2444 VisitBinop(this, node, kMips64DsubOvf, &cont);
2445 }
2446
VisitWord64Equal(Node * const node)2447 void InstructionSelector::VisitWord64Equal(Node* const node) {
2448 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2449 Int64BinopMatcher m(node);
2450 if (m.right().Is(0)) {
2451 return VisitWordCompareZero(m.node(), m.left().node(), &cont);
2452 }
2453
2454 VisitWord64Compare(this, node, &cont);
2455 }
2456
VisitInt64LessThan(Node * node)2457 void InstructionSelector::VisitInt64LessThan(Node* node) {
2458 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2459 VisitWord64Compare(this, node, &cont);
2460 }
2461
VisitInt64LessThanOrEqual(Node * node)2462 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2463 FlagsContinuation cont =
2464 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2465 VisitWord64Compare(this, node, &cont);
2466 }
2467
VisitUint64LessThan(Node * node)2468 void InstructionSelector::VisitUint64LessThan(Node* node) {
2469 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2470 VisitWord64Compare(this, node, &cont);
2471 }
2472
VisitUint64LessThanOrEqual(Node * node)2473 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2474 FlagsContinuation cont =
2475 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2476 VisitWord64Compare(this, node, &cont);
2477 }
2478
VisitFloat32Equal(Node * node)2479 void InstructionSelector::VisitFloat32Equal(Node* node) {
2480 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2481 VisitFloat32Compare(this, node, &cont);
2482 }
2483
VisitFloat32LessThan(Node * node)2484 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2485 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2486 VisitFloat32Compare(this, node, &cont);
2487 }
2488
VisitFloat32LessThanOrEqual(Node * node)2489 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2490 FlagsContinuation cont =
2491 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2492 VisitFloat32Compare(this, node, &cont);
2493 }
2494
VisitFloat64Equal(Node * node)2495 void InstructionSelector::VisitFloat64Equal(Node* node) {
2496 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2497 VisitFloat64Compare(this, node, &cont);
2498 }
2499
VisitFloat64LessThan(Node * node)2500 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2501 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2502 VisitFloat64Compare(this, node, &cont);
2503 }
2504
VisitFloat64LessThanOrEqual(Node * node)2505 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2506 FlagsContinuation cont =
2507 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2508 VisitFloat64Compare(this, node, &cont);
2509 }
2510
VisitFloat64ExtractLowWord32(Node * node)2511 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
2512 VisitRR(this, kMips64Float64ExtractLowWord32, node);
2513 }
2514
VisitFloat64ExtractHighWord32(Node * node)2515 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
2516 VisitRR(this, kMips64Float64ExtractHighWord32, node);
2517 }
2518
VisitFloat64SilenceNaN(Node * node)2519 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
2520 VisitRR(this, kMips64Float64SilenceNaN, node);
2521 }
2522
VisitFloat64InsertLowWord32(Node * node)2523 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2524 Mips64OperandGenerator g(this);
2525 Node* left = node->InputAt(0);
2526 Node* right = node->InputAt(1);
2527 Emit(kMips64Float64InsertLowWord32, g.DefineSameAsFirst(node),
2528 g.UseRegister(left), g.UseRegister(right));
2529 }
2530
VisitFloat64InsertHighWord32(Node * node)2531 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2532 Mips64OperandGenerator g(this);
2533 Node* left = node->InputAt(0);
2534 Node* right = node->InputAt(1);
2535 Emit(kMips64Float64InsertHighWord32, g.DefineSameAsFirst(node),
2536 g.UseRegister(left), g.UseRegister(right));
2537 }
2538
VisitMemoryBarrier(Node * node)2539 void InstructionSelector::VisitMemoryBarrier(Node* node) {
2540 Mips64OperandGenerator g(this);
2541 Emit(kMips64Sync, g.NoOutput());
2542 }
2543
VisitWord32AtomicLoad(Node * node)2544 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
2545 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2546 ArchOpcode opcode;
2547 switch (load_rep.representation()) {
2548 case MachineRepresentation::kWord8:
2549 opcode =
2550 load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
2551 break;
2552 case MachineRepresentation::kWord16:
2553 opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
2554 : kWord32AtomicLoadUint16;
2555 break;
2556 case MachineRepresentation::kWord32:
2557 opcode = kWord32AtomicLoadWord32;
2558 break;
2559 default:
2560 UNREACHABLE();
2561 }
2562 VisitAtomicLoad(this, node, opcode);
2563 }
2564
VisitWord32AtomicStore(Node * node)2565 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
2566 MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2567 ArchOpcode opcode;
2568 switch (rep) {
2569 case MachineRepresentation::kWord8:
2570 opcode = kWord32AtomicStoreWord8;
2571 break;
2572 case MachineRepresentation::kWord16:
2573 opcode = kWord32AtomicStoreWord16;
2574 break;
2575 case MachineRepresentation::kWord32:
2576 opcode = kWord32AtomicStoreWord32;
2577 break;
2578 default:
2579 UNREACHABLE();
2580 }
2581
2582 VisitAtomicStore(this, node, opcode);
2583 }
2584
VisitWord64AtomicLoad(Node * node)2585 void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
2586 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2587 ArchOpcode opcode;
2588 switch (load_rep.representation()) {
2589 case MachineRepresentation::kWord8:
2590 opcode = kMips64Word64AtomicLoadUint8;
2591 break;
2592 case MachineRepresentation::kWord16:
2593 opcode = kMips64Word64AtomicLoadUint16;
2594 break;
2595 case MachineRepresentation::kWord32:
2596 opcode = kMips64Word64AtomicLoadUint32;
2597 break;
2598 case MachineRepresentation::kWord64:
2599 opcode = kMips64Word64AtomicLoadUint64;
2600 break;
2601 default:
2602 UNREACHABLE();
2603 }
2604 VisitAtomicLoad(this, node, opcode);
2605 }
2606
VisitWord64AtomicStore(Node * node)2607 void InstructionSelector::VisitWord64AtomicStore(Node* node) {
2608 MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2609 ArchOpcode opcode;
2610 switch (rep) {
2611 case MachineRepresentation::kWord8:
2612 opcode = kMips64Word64AtomicStoreWord8;
2613 break;
2614 case MachineRepresentation::kWord16:
2615 opcode = kMips64Word64AtomicStoreWord16;
2616 break;
2617 case MachineRepresentation::kWord32:
2618 opcode = kMips64Word64AtomicStoreWord32;
2619 break;
2620 case MachineRepresentation::kWord64:
2621 opcode = kMips64Word64AtomicStoreWord64;
2622 break;
2623 default:
2624 UNREACHABLE();
2625 }
2626
2627 VisitAtomicStore(this, node, opcode);
2628 }
2629
VisitWord32AtomicExchange(Node * node)2630 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
2631 ArchOpcode opcode;
2632 MachineType type = AtomicOpType(node->op());
2633 if (type == MachineType::Int8()) {
2634 opcode = kWord32AtomicExchangeInt8;
2635 } else if (type == MachineType::Uint8()) {
2636 opcode = kWord32AtomicExchangeUint8;
2637 } else if (type == MachineType::Int16()) {
2638 opcode = kWord32AtomicExchangeInt16;
2639 } else if (type == MachineType::Uint16()) {
2640 opcode = kWord32AtomicExchangeUint16;
2641 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2642 opcode = kWord32AtomicExchangeWord32;
2643 } else {
2644 UNREACHABLE();
2645 }
2646
2647 VisitAtomicExchange(this, node, opcode);
2648 }
2649
VisitWord64AtomicExchange(Node * node)2650 void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
2651 ArchOpcode opcode;
2652 MachineType type = AtomicOpType(node->op());
2653 if (type == MachineType::Uint8()) {
2654 opcode = kMips64Word64AtomicExchangeUint8;
2655 } else if (type == MachineType::Uint16()) {
2656 opcode = kMips64Word64AtomicExchangeUint16;
2657 } else if (type == MachineType::Uint32()) {
2658 opcode = kMips64Word64AtomicExchangeUint32;
2659 } else if (type == MachineType::Uint64()) {
2660 opcode = kMips64Word64AtomicExchangeUint64;
2661 } else {
2662 UNREACHABLE();
2663 }
2664 VisitAtomicExchange(this, node, opcode);
2665 }
2666
VisitWord32AtomicCompareExchange(Node * node)2667 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
2668 ArchOpcode opcode;
2669 MachineType type = AtomicOpType(node->op());
2670 if (type == MachineType::Int8()) {
2671 opcode = kWord32AtomicCompareExchangeInt8;
2672 } else if (type == MachineType::Uint8()) {
2673 opcode = kWord32AtomicCompareExchangeUint8;
2674 } else if (type == MachineType::Int16()) {
2675 opcode = kWord32AtomicCompareExchangeInt16;
2676 } else if (type == MachineType::Uint16()) {
2677 opcode = kWord32AtomicCompareExchangeUint16;
2678 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2679 opcode = kWord32AtomicCompareExchangeWord32;
2680 } else {
2681 UNREACHABLE();
2682 }
2683
2684 VisitAtomicCompareExchange(this, node, opcode);
2685 }
2686
VisitWord64AtomicCompareExchange(Node * node)2687 void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
2688 ArchOpcode opcode;
2689 MachineType type = AtomicOpType(node->op());
2690 if (type == MachineType::Uint8()) {
2691 opcode = kMips64Word64AtomicCompareExchangeUint8;
2692 } else if (type == MachineType::Uint16()) {
2693 opcode = kMips64Word64AtomicCompareExchangeUint16;
2694 } else if (type == MachineType::Uint32()) {
2695 opcode = kMips64Word64AtomicCompareExchangeUint32;
2696 } else if (type == MachineType::Uint64()) {
2697 opcode = kMips64Word64AtomicCompareExchangeUint64;
2698 } else {
2699 UNREACHABLE();
2700 }
2701 VisitAtomicCompareExchange(this, node, opcode);
2702 }
VisitWord32AtomicBinaryOperation(Node * node,ArchOpcode int8_op,ArchOpcode uint8_op,ArchOpcode int16_op,ArchOpcode uint16_op,ArchOpcode word32_op)2703 void InstructionSelector::VisitWord32AtomicBinaryOperation(
2704 Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2705 ArchOpcode uint16_op, ArchOpcode word32_op) {
2706 ArchOpcode opcode;
2707 MachineType type = AtomicOpType(node->op());
2708 if (type == MachineType::Int8()) {
2709 opcode = int8_op;
2710 } else if (type == MachineType::Uint8()) {
2711 opcode = uint8_op;
2712 } else if (type == MachineType::Int16()) {
2713 opcode = int16_op;
2714 } else if (type == MachineType::Uint16()) {
2715 opcode = uint16_op;
2716 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2717 opcode = word32_op;
2718 } else {
2719 UNREACHABLE();
2720 }
2721
2722 VisitAtomicBinop(this, node, opcode);
2723 }
2724
2725 #define VISIT_ATOMIC_BINOP(op) \
2726 void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
2727 VisitWord32AtomicBinaryOperation( \
2728 node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
2729 kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
2730 kWord32Atomic##op##Word32); \
2731 }
2732 VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)2733 VISIT_ATOMIC_BINOP(Sub)
2734 VISIT_ATOMIC_BINOP(And)
2735 VISIT_ATOMIC_BINOP(Or)
2736 VISIT_ATOMIC_BINOP(Xor)
2737 #undef VISIT_ATOMIC_BINOP
2738
2739 void InstructionSelector::VisitWord64AtomicBinaryOperation(
2740 Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
2741 ArchOpcode uint64_op) {
2742 ArchOpcode opcode;
2743 MachineType type = AtomicOpType(node->op());
2744 if (type == MachineType::Uint8()) {
2745 opcode = uint8_op;
2746 } else if (type == MachineType::Uint16()) {
2747 opcode = uint16_op;
2748 } else if (type == MachineType::Uint32()) {
2749 opcode = uint32_op;
2750 } else if (type == MachineType::Uint64()) {
2751 opcode = uint64_op;
2752 } else {
2753 UNREACHABLE();
2754 }
2755 VisitAtomicBinop(this, node, opcode);
2756 }
2757
2758 #define VISIT_ATOMIC_BINOP(op) \
2759 void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
2760 VisitWord64AtomicBinaryOperation( \
2761 node, kMips64Word64Atomic##op##Uint8, kMips64Word64Atomic##op##Uint16, \
2762 kMips64Word64Atomic##op##Uint32, kMips64Word64Atomic##op##Uint64); \
2763 }
2764 VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)2765 VISIT_ATOMIC_BINOP(Sub)
2766 VISIT_ATOMIC_BINOP(And)
2767 VISIT_ATOMIC_BINOP(Or)
2768 VISIT_ATOMIC_BINOP(Xor)
2769 #undef VISIT_ATOMIC_BINOP
2770
2771 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
2772 UNREACHABLE();
2773 }
2774
VisitInt64AbsWithOverflow(Node * node)2775 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
2776 UNREACHABLE();
2777 }
2778
2779 #define SIMD_TYPE_LIST(V) \
2780 V(F64x2) \
2781 V(F32x4) \
2782 V(I64x2) \
2783 V(I32x4) \
2784 V(I16x8) \
2785 V(I8x16)
2786
2787 #define SIMD_UNOP_LIST(V) \
2788 V(F64x2Abs, kMips64F64x2Abs) \
2789 V(F64x2Neg, kMips64F64x2Neg) \
2790 V(F64x2Sqrt, kMips64F64x2Sqrt) \
2791 V(F64x2Ceil, kMips64F64x2Ceil) \
2792 V(F64x2Floor, kMips64F64x2Floor) \
2793 V(F64x2Trunc, kMips64F64x2Trunc) \
2794 V(F64x2NearestInt, kMips64F64x2NearestInt) \
2795 V(I64x2Neg, kMips64I64x2Neg) \
2796 V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4) \
2797 V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
2798 V(F32x4Abs, kMips64F32x4Abs) \
2799 V(F32x4Neg, kMips64F32x4Neg) \
2800 V(F32x4Sqrt, kMips64F32x4Sqrt) \
2801 V(F32x4RecipApprox, kMips64F32x4RecipApprox) \
2802 V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox) \
2803 V(F32x4Ceil, kMips64F32x4Ceil) \
2804 V(F32x4Floor, kMips64F32x4Floor) \
2805 V(F32x4Trunc, kMips64F32x4Trunc) \
2806 V(F32x4NearestInt, kMips64F32x4NearestInt) \
2807 V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \
2808 V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4) \
2809 V(I32x4Neg, kMips64I32x4Neg) \
2810 V(I32x4SConvertI16x8Low, kMips64I32x4SConvertI16x8Low) \
2811 V(I32x4SConvertI16x8High, kMips64I32x4SConvertI16x8High) \
2812 V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \
2813 V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
2814 V(I32x4Abs, kMips64I32x4Abs) \
2815 V(I32x4BitMask, kMips64I32x4BitMask) \
2816 V(I16x8Neg, kMips64I16x8Neg) \
2817 V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \
2818 V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
2819 V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \
2820 V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
2821 V(I16x8Abs, kMips64I16x8Abs) \
2822 V(I16x8BitMask, kMips64I16x8BitMask) \
2823 V(I8x16Neg, kMips64I8x16Neg) \
2824 V(I8x16Abs, kMips64I8x16Abs) \
2825 V(I8x16BitMask, kMips64I8x16BitMask) \
2826 V(S128Not, kMips64S128Not) \
2827 V(V32x4AnyTrue, kMips64V32x4AnyTrue) \
2828 V(V32x4AllTrue, kMips64V32x4AllTrue) \
2829 V(V16x8AnyTrue, kMips64V16x8AnyTrue) \
2830 V(V16x8AllTrue, kMips64V16x8AllTrue) \
2831 V(V8x16AnyTrue, kMips64V8x16AnyTrue) \
2832 V(V8x16AllTrue, kMips64V8x16AllTrue)
2833
2834 #define SIMD_SHIFT_OP_LIST(V) \
2835 V(I64x2Shl) \
2836 V(I64x2ShrS) \
2837 V(I64x2ShrU) \
2838 V(I32x4Shl) \
2839 V(I32x4ShrS) \
2840 V(I32x4ShrU) \
2841 V(I16x8Shl) \
2842 V(I16x8ShrS) \
2843 V(I16x8ShrU) \
2844 V(I8x16Shl) \
2845 V(I8x16ShrS) \
2846 V(I8x16ShrU)
2847
2848 #define SIMD_BINOP_LIST(V) \
2849 V(F64x2Add, kMips64F64x2Add) \
2850 V(F64x2Sub, kMips64F64x2Sub) \
2851 V(F64x2Mul, kMips64F64x2Mul) \
2852 V(F64x2Div, kMips64F64x2Div) \
2853 V(F64x2Min, kMips64F64x2Min) \
2854 V(F64x2Max, kMips64F64x2Max) \
2855 V(F64x2Eq, kMips64F64x2Eq) \
2856 V(F64x2Ne, kMips64F64x2Ne) \
2857 V(F64x2Lt, kMips64F64x2Lt) \
2858 V(F64x2Le, kMips64F64x2Le) \
2859 V(I64x2Add, kMips64I64x2Add) \
2860 V(I64x2Sub, kMips64I64x2Sub) \
2861 V(I64x2Mul, kMips64I64x2Mul) \
2862 V(F32x4Add, kMips64F32x4Add) \
2863 V(F32x4AddHoriz, kMips64F32x4AddHoriz) \
2864 V(F32x4Sub, kMips64F32x4Sub) \
2865 V(F32x4Mul, kMips64F32x4Mul) \
2866 V(F32x4Div, kMips64F32x4Div) \
2867 V(F32x4Max, kMips64F32x4Max) \
2868 V(F32x4Min, kMips64F32x4Min) \
2869 V(F32x4Eq, kMips64F32x4Eq) \
2870 V(F32x4Ne, kMips64F32x4Ne) \
2871 V(F32x4Lt, kMips64F32x4Lt) \
2872 V(F32x4Le, kMips64F32x4Le) \
2873 V(I32x4Add, kMips64I32x4Add) \
2874 V(I32x4AddHoriz, kMips64I32x4AddHoriz) \
2875 V(I32x4Sub, kMips64I32x4Sub) \
2876 V(I32x4Mul, kMips64I32x4Mul) \
2877 V(I32x4MaxS, kMips64I32x4MaxS) \
2878 V(I32x4MinS, kMips64I32x4MinS) \
2879 V(I32x4MaxU, kMips64I32x4MaxU) \
2880 V(I32x4MinU, kMips64I32x4MinU) \
2881 V(I32x4Eq, kMips64I32x4Eq) \
2882 V(I32x4Ne, kMips64I32x4Ne) \
2883 V(I32x4GtS, kMips64I32x4GtS) \
2884 V(I32x4GeS, kMips64I32x4GeS) \
2885 V(I32x4GtU, kMips64I32x4GtU) \
2886 V(I32x4GeU, kMips64I32x4GeU) \
2887 V(I32x4DotI16x8S, kMips64I32x4DotI16x8S) \
2888 V(I16x8Add, kMips64I16x8Add) \
2889 V(I16x8AddSatS, kMips64I16x8AddSatS) \
2890 V(I16x8AddSatU, kMips64I16x8AddSatU) \
2891 V(I16x8AddHoriz, kMips64I16x8AddHoriz) \
2892 V(I16x8Sub, kMips64I16x8Sub) \
2893 V(I16x8SubSatS, kMips64I16x8SubSatS) \
2894 V(I16x8SubSatU, kMips64I16x8SubSatU) \
2895 V(I16x8Mul, kMips64I16x8Mul) \
2896 V(I16x8MaxS, kMips64I16x8MaxS) \
2897 V(I16x8MinS, kMips64I16x8MinS) \
2898 V(I16x8MaxU, kMips64I16x8MaxU) \
2899 V(I16x8MinU, kMips64I16x8MinU) \
2900 V(I16x8Eq, kMips64I16x8Eq) \
2901 V(I16x8Ne, kMips64I16x8Ne) \
2902 V(I16x8GtS, kMips64I16x8GtS) \
2903 V(I16x8GeS, kMips64I16x8GeS) \
2904 V(I16x8GtU, kMips64I16x8GtU) \
2905 V(I16x8GeU, kMips64I16x8GeU) \
2906 V(I16x8RoundingAverageU, kMips64I16x8RoundingAverageU) \
2907 V(I16x8SConvertI32x4, kMips64I16x8SConvertI32x4) \
2908 V(I16x8UConvertI32x4, kMips64I16x8UConvertI32x4) \
2909 V(I8x16Add, kMips64I8x16Add) \
2910 V(I8x16AddSatS, kMips64I8x16AddSatS) \
2911 V(I8x16AddSatU, kMips64I8x16AddSatU) \
2912 V(I8x16Sub, kMips64I8x16Sub) \
2913 V(I8x16SubSatS, kMips64I8x16SubSatS) \
2914 V(I8x16SubSatU, kMips64I8x16SubSatU) \
2915 V(I8x16Mul, kMips64I8x16Mul) \
2916 V(I8x16MaxS, kMips64I8x16MaxS) \
2917 V(I8x16MinS, kMips64I8x16MinS) \
2918 V(I8x16MaxU, kMips64I8x16MaxU) \
2919 V(I8x16MinU, kMips64I8x16MinU) \
2920 V(I8x16Eq, kMips64I8x16Eq) \
2921 V(I8x16Ne, kMips64I8x16Ne) \
2922 V(I8x16GtS, kMips64I8x16GtS) \
2923 V(I8x16GeS, kMips64I8x16GeS) \
2924 V(I8x16GtU, kMips64I8x16GtU) \
2925 V(I8x16GeU, kMips64I8x16GeU) \
2926 V(I8x16RoundingAverageU, kMips64I8x16RoundingAverageU) \
2927 V(I8x16SConvertI16x8, kMips64I8x16SConvertI16x8) \
2928 V(I8x16UConvertI16x8, kMips64I8x16UConvertI16x8) \
2929 V(S128And, kMips64S128And) \
2930 V(S128Or, kMips64S128Or) \
2931 V(S128Xor, kMips64S128Xor) \
2932 V(S128AndNot, kMips64S128AndNot)
2933
VisitS128Const(Node * node)2934 void InstructionSelector::VisitS128Const(Node* node) {
2935 Mips64OperandGenerator g(this);
2936 static const int kUint32Immediates = kSimd128Size / sizeof(uint32_t);
2937 uint32_t val[kUint32Immediates];
2938 memcpy(val, S128ImmediateParameterOf(node->op()).data(), kSimd128Size);
2939 // If all bytes are zeros or ones, avoid emitting code for generic constants
2940 bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
2941 bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
2942 val[2] == UINT32_MAX && val[3] == UINT32_MAX;
2943 InstructionOperand dst = g.DefineAsRegister(node);
2944 if (all_zeros) {
2945 Emit(kMips64S128Zero, dst);
2946 } else if (all_ones) {
2947 Emit(kMips64S128AllOnes, dst);
2948 } else {
2949 Emit(kMips64S128Const, dst, g.UseImmediate(val[0]), g.UseImmediate(val[1]),
2950 g.UseImmediate(val[2]), g.UseImmediate(val[3]));
2951 }
2952 }
2953
VisitS128Zero(Node * node)2954 void InstructionSelector::VisitS128Zero(Node* node) {
2955 Mips64OperandGenerator g(this);
2956 Emit(kMips64S128Zero, g.DefineAsRegister(node));
2957 }
2958
2959 #define SIMD_VISIT_SPLAT(Type) \
2960 void InstructionSelector::Visit##Type##Splat(Node* node) { \
2961 VisitRR(this, kMips64##Type##Splat, node); \
2962 }
2963 SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
2964 #undef SIMD_VISIT_SPLAT
2965
2966 #define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
2967 void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
2968 VisitRRI(this, kMips64##Type##ExtractLane##Sign, node); \
2969 }
2970 SIMD_VISIT_EXTRACT_LANE(F64x2, )
2971 SIMD_VISIT_EXTRACT_LANE(F32x4, )
2972 SIMD_VISIT_EXTRACT_LANE(I64x2, )
2973 SIMD_VISIT_EXTRACT_LANE(I32x4, )
SIMD_VISIT_EXTRACT_LANE(I16x8,U)2974 SIMD_VISIT_EXTRACT_LANE(I16x8, U)
2975 SIMD_VISIT_EXTRACT_LANE(I16x8, S)
2976 SIMD_VISIT_EXTRACT_LANE(I8x16, U)
2977 SIMD_VISIT_EXTRACT_LANE(I8x16, S)
2978 #undef SIMD_VISIT_EXTRACT_LANE
2979
2980 #define SIMD_VISIT_REPLACE_LANE(Type) \
2981 void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
2982 VisitRRIR(this, kMips64##Type##ReplaceLane, node); \
2983 }
2984 SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
2985 #undef SIMD_VISIT_REPLACE_LANE
2986
2987 #define SIMD_VISIT_UNOP(Name, instruction) \
2988 void InstructionSelector::Visit##Name(Node* node) { \
2989 VisitRR(this, instruction, node); \
2990 }
2991 SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
2992 #undef SIMD_VISIT_UNOP
2993
2994 #define SIMD_VISIT_SHIFT_OP(Name) \
2995 void InstructionSelector::Visit##Name(Node* node) { \
2996 VisitSimdShift(this, kMips64##Name, node); \
2997 }
2998 SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
2999 #undef SIMD_VISIT_SHIFT_OP
3000
3001 #define SIMD_VISIT_BINOP(Name, instruction) \
3002 void InstructionSelector::Visit##Name(Node* node) { \
3003 VisitRRR(this, instruction, node); \
3004 }
3005 SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
3006 #undef SIMD_VISIT_BINOP
3007
3008 void InstructionSelector::VisitS128Select(Node* node) {
3009 VisitRRRR(this, kMips64S128Select, node);
3010 }
3011
3012 namespace {
3013
3014 struct ShuffleEntry {
3015 uint8_t shuffle[kSimd128Size];
3016 ArchOpcode opcode;
3017 };
3018
3019 static const ShuffleEntry arch_shuffles[] = {
3020 {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
3021 kMips64S32x4InterleaveRight},
3022 {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
3023 kMips64S32x4InterleaveLeft},
3024 {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
3025 kMips64S32x4PackEven},
3026 {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
3027 kMips64S32x4PackOdd},
3028 {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
3029 kMips64S32x4InterleaveEven},
3030 {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
3031 kMips64S32x4InterleaveOdd},
3032
3033 {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
3034 kMips64S16x8InterleaveRight},
3035 {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
3036 kMips64S16x8InterleaveLeft},
3037 {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
3038 kMips64S16x8PackEven},
3039 {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
3040 kMips64S16x8PackOdd},
3041 {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
3042 kMips64S16x8InterleaveEven},
3043 {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
3044 kMips64S16x8InterleaveOdd},
3045 {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
3046 kMips64S16x4Reverse},
3047 {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
3048 kMips64S16x2Reverse},
3049
3050 {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
3051 kMips64S8x16InterleaveRight},
3052 {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
3053 kMips64S8x16InterleaveLeft},
3054 {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
3055 kMips64S8x16PackEven},
3056 {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
3057 kMips64S8x16PackOdd},
3058 {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
3059 kMips64S8x16InterleaveEven},
3060 {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
3061 kMips64S8x16InterleaveOdd},
3062 {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
3063 kMips64S8x8Reverse},
3064 {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
3065 kMips64S8x4Reverse},
3066 {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
3067 kMips64S8x2Reverse}};
3068
TryMatchArchShuffle(const uint8_t * shuffle,const ShuffleEntry * table,size_t num_entries,bool is_swizzle,ArchOpcode * opcode)3069 bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
3070 size_t num_entries, bool is_swizzle,
3071 ArchOpcode* opcode) {
3072 uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
3073 for (size_t i = 0; i < num_entries; ++i) {
3074 const ShuffleEntry& entry = table[i];
3075 int j = 0;
3076 for (; j < kSimd128Size; ++j) {
3077 if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
3078 break;
3079 }
3080 }
3081 if (j == kSimd128Size) {
3082 *opcode = entry.opcode;
3083 return true;
3084 }
3085 }
3086 return false;
3087 }
3088
3089 } // namespace
3090
VisitI8x16Shuffle(Node * node)3091 void InstructionSelector::VisitI8x16Shuffle(Node* node) {
3092 uint8_t shuffle[kSimd128Size];
3093 bool is_swizzle;
3094 CanonicalizeShuffle(node, shuffle, &is_swizzle);
3095 uint8_t shuffle32x4[4];
3096 ArchOpcode opcode;
3097 if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
3098 is_swizzle, &opcode)) {
3099 VisitRRR(this, opcode, node);
3100 return;
3101 }
3102 Node* input0 = node->InputAt(0);
3103 Node* input1 = node->InputAt(1);
3104 uint8_t offset;
3105 Mips64OperandGenerator g(this);
3106 if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
3107 Emit(kMips64S8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
3108 g.UseRegister(input0), g.UseImmediate(offset));
3109 return;
3110 }
3111 if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
3112 Emit(kMips64S32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
3113 g.UseRegister(input1),
3114 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
3115 return;
3116 }
3117 Emit(kMips64I8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
3118 g.UseRegister(input1),
3119 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
3120 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
3121 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)),
3122 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12)));
3123 }
3124
VisitI8x16Swizzle(Node * node)3125 void InstructionSelector::VisitI8x16Swizzle(Node* node) {
3126 Mips64OperandGenerator g(this);
3127 InstructionOperand temps[] = {g.TempSimd128Register()};
3128 // We don't want input 0 or input 1 to be the same as output, since we will
3129 // modify output before do the calculation.
3130 Emit(kMips64I8x16Swizzle, g.DefineAsRegister(node),
3131 g.UseUniqueRegister(node->InputAt(0)),
3132 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
3133 }
3134
VisitSignExtendWord8ToInt32(Node * node)3135 void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
3136 Mips64OperandGenerator g(this);
3137 Emit(kMips64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
3138 }
3139
VisitSignExtendWord16ToInt32(Node * node)3140 void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
3141 Mips64OperandGenerator g(this);
3142 Emit(kMips64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
3143 }
3144
VisitSignExtendWord8ToInt64(Node * node)3145 void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
3146 Mips64OperandGenerator g(this);
3147 Emit(kMips64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
3148 }
3149
VisitSignExtendWord16ToInt64(Node * node)3150 void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
3151 Mips64OperandGenerator g(this);
3152 Emit(kMips64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
3153 }
3154
VisitSignExtendWord32ToInt64(Node * node)3155 void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
3156 Mips64OperandGenerator g(this);
3157 Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
3158 g.TempImmediate(0));
3159 }
3160
VisitF32x4Pmin(Node * node)3161 void InstructionSelector::VisitF32x4Pmin(Node* node) {
3162 VisitUniqueRRR(this, kMips64F32x4Pmin, node);
3163 }
3164
VisitF32x4Pmax(Node * node)3165 void InstructionSelector::VisitF32x4Pmax(Node* node) {
3166 VisitUniqueRRR(this, kMips64F32x4Pmax, node);
3167 }
3168
VisitF64x2Pmin(Node * node)3169 void InstructionSelector::VisitF64x2Pmin(Node* node) {
3170 VisitUniqueRRR(this, kMips64F64x2Pmin, node);
3171 }
3172
VisitF64x2Pmax(Node * node)3173 void InstructionSelector::VisitF64x2Pmax(Node* node) {
3174 VisitUniqueRRR(this, kMips64F64x2Pmax, node);
3175 }
3176
3177 // static
3178 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()3179 InstructionSelector::SupportedMachineOperatorFlags() {
3180 MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
3181 return flags | MachineOperatorBuilder::kWord32Ctz |
3182 MachineOperatorBuilder::kWord64Ctz |
3183 MachineOperatorBuilder::kWord32Popcnt |
3184 MachineOperatorBuilder::kWord64Popcnt |
3185 MachineOperatorBuilder::kWord32ShiftIsSafe |
3186 MachineOperatorBuilder::kInt32DivIsSafe |
3187 MachineOperatorBuilder::kUint32DivIsSafe |
3188 MachineOperatorBuilder::kFloat64RoundDown |
3189 MachineOperatorBuilder::kFloat32RoundDown |
3190 MachineOperatorBuilder::kFloat64RoundUp |
3191 MachineOperatorBuilder::kFloat32RoundUp |
3192 MachineOperatorBuilder::kFloat64RoundTruncate |
3193 MachineOperatorBuilder::kFloat32RoundTruncate |
3194 MachineOperatorBuilder::kFloat64RoundTiesEven |
3195 MachineOperatorBuilder::kFloat32RoundTiesEven;
3196 }
3197
3198 // static
3199 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()3200 InstructionSelector::AlignmentRequirements() {
3201 if (kArchVariant == kMips64r6) {
3202 return MachineOperatorBuilder::AlignmentRequirements::
3203 FullUnalignedAccessSupport();
3204 } else {
3205 DCHECK_EQ(kMips64r2, kArchVariant);
3206 return MachineOperatorBuilder::AlignmentRequirements::
3207 NoUnalignedAccessSupport();
3208 }
3209 }
3210
3211 #undef SIMD_BINOP_LIST
3212 #undef SIMD_SHIFT_OP_LIST
3213 #undef SIMD_UNOP_LIST
3214 #undef SIMD_TYPE_LIST
3215 #undef TRACE_UNIMPL
3216 #undef TRACE
3217
3218 } // namespace compiler
3219 } // namespace internal
3220 } // namespace v8
3221