1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/adapters.h"
6 #include "src/base/bits.h"
7 #include "src/compiler/instruction-selector-impl.h"
8 #include "src/compiler/node-matchers.h"
9 #include "src/compiler/node-properties.h"
10 
11 namespace v8 {
12 namespace internal {
13 namespace compiler {
14 
15 #define TRACE_UNIMPL() \
16   PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
17 
18 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
19 
20 
21 // Adds Mips-specific methods for generating InstructionOperands.
22 class Mips64OperandGenerator final : public OperandGenerator {
23  public:
Mips64OperandGenerator(InstructionSelector * selector)24   explicit Mips64OperandGenerator(InstructionSelector* selector)
25       : OperandGenerator(selector) {}
26 
UseOperand(Node * node,InstructionCode opcode)27   InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
28     if (CanBeImmediate(node, opcode)) {
29       return UseImmediate(node);
30     }
31     return UseRegister(node);
32   }
33 
34   // Use the zero register if the node has the immediate value zero, otherwise
35   // assign a register.
UseRegisterOrImmediateZero(Node * node)36   InstructionOperand UseRegisterOrImmediateZero(Node* node) {
37     if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
38         (IsFloatConstant(node) &&
39          (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
40       return UseImmediate(node);
41     }
42     return UseRegister(node);
43   }
44 
IsIntegerConstant(Node * node)45   bool IsIntegerConstant(Node* node) {
46     return (node->opcode() == IrOpcode::kInt32Constant) ||
47            (node->opcode() == IrOpcode::kInt64Constant);
48   }
49 
GetIntegerConstantValue(Node * node)50   int64_t GetIntegerConstantValue(Node* node) {
51     if (node->opcode() == IrOpcode::kInt32Constant) {
52       return OpParameter<int32_t>(node->op());
53     }
54     DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
55     return OpParameter<int64_t>(node->op());
56   }
57 
IsFloatConstant(Node * node)58   bool IsFloatConstant(Node* node) {
59     return (node->opcode() == IrOpcode::kFloat32Constant) ||
60            (node->opcode() == IrOpcode::kFloat64Constant);
61   }
62 
GetFloatConstantValue(Node * node)63   double GetFloatConstantValue(Node* node) {
64     if (node->opcode() == IrOpcode::kFloat32Constant) {
65       return OpParameter<float>(node->op());
66     }
67     DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
68     return OpParameter<double>(node->op());
69   }
70 
CanBeImmediate(Node * node,InstructionCode mode)71   bool CanBeImmediate(Node* node, InstructionCode mode) {
72     return IsIntegerConstant(node) &&
73            CanBeImmediate(GetIntegerConstantValue(node), mode);
74   }
75 
CanBeImmediate(int64_t value,InstructionCode opcode)76   bool CanBeImmediate(int64_t value, InstructionCode opcode) {
77     switch (ArchOpcodeField::decode(opcode)) {
78       case kMips64Shl:
79       case kMips64Sar:
80       case kMips64Shr:
81         return is_uint5(value);
82       case kMips64Dshl:
83       case kMips64Dsar:
84       case kMips64Dshr:
85         return is_uint6(value);
86       case kMips64Add:
87       case kMips64And32:
88       case kMips64And:
89       case kMips64Dadd:
90       case kMips64Or32:
91       case kMips64Or:
92       case kMips64Tst:
93       case kMips64Xor:
94         return is_uint16(value);
95       case kMips64Lb:
96       case kMips64Lbu:
97       case kMips64Sb:
98       case kMips64Lh:
99       case kMips64Lhu:
100       case kMips64Sh:
101       case kMips64Lw:
102       case kMips64Sw:
103       case kMips64Ld:
104       case kMips64Sd:
105       case kMips64Lwc1:
106       case kMips64Swc1:
107       case kMips64Ldc1:
108       case kMips64Sdc1:
109         return is_int32(value);
110       default:
111         return is_int16(value);
112     }
113   }
114 
115  private:
ImmediateFitsAddrMode1Instruction(int32_t imm) const116   bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
117     TRACE_UNIMPL();
118     return false;
119   }
120 };
121 
122 
VisitRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)123 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
124                     Node* node) {
125   Mips64OperandGenerator g(selector);
126   selector->Emit(opcode, g.DefineAsRegister(node),
127                  g.UseRegister(node->InputAt(0)));
128 }
129 
VisitRRI(InstructionSelector * selector,ArchOpcode opcode,Node * node)130 static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
131                      Node* node) {
132   Mips64OperandGenerator g(selector);
133   int32_t imm = OpParameter<int32_t>(node->op());
134   selector->Emit(opcode, g.DefineAsRegister(node),
135                  g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
136 }
137 
VisitRRIR(InstructionSelector * selector,ArchOpcode opcode,Node * node)138 static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
139                       Node* node) {
140   Mips64OperandGenerator g(selector);
141   int32_t imm = OpParameter<int32_t>(node->op());
142   selector->Emit(opcode, g.DefineAsRegister(node),
143                  g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
144                  g.UseRegister(node->InputAt(1)));
145 }
146 
VisitRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)147 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
148                      Node* node) {
149   Mips64OperandGenerator g(selector);
150   selector->Emit(opcode, g.DefineAsRegister(node),
151                  g.UseRegister(node->InputAt(0)),
152                  g.UseRegister(node->InputAt(1)));
153 }
154 
VisitRRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)155 void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
156   Mips64OperandGenerator g(selector);
157   selector->Emit(
158       opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
159       g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
160 }
161 
VisitRRO(InstructionSelector * selector,ArchOpcode opcode,Node * node)162 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
163                      Node* node) {
164   Mips64OperandGenerator g(selector);
165   selector->Emit(opcode, g.DefineAsRegister(node),
166                  g.UseRegister(node->InputAt(0)),
167                  g.UseOperand(node->InputAt(1), opcode));
168 }
169 
170 struct ExtendingLoadMatcher {
ExtendingLoadMatcherv8::internal::compiler::ExtendingLoadMatcher171   ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
172       : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
173     Initialize(node);
174   }
175 
Matchesv8::internal::compiler::ExtendingLoadMatcher176   bool Matches() const { return matches_; }
177 
basev8::internal::compiler::ExtendingLoadMatcher178   Node* base() const {
179     DCHECK(Matches());
180     return base_;
181   }
immediatev8::internal::compiler::ExtendingLoadMatcher182   int64_t immediate() const {
183     DCHECK(Matches());
184     return immediate_;
185   }
opcodev8::internal::compiler::ExtendingLoadMatcher186   ArchOpcode opcode() const {
187     DCHECK(Matches());
188     return opcode_;
189   }
190 
191  private:
192   bool matches_;
193   InstructionSelector* selector_;
194   Node* base_;
195   int64_t immediate_;
196   ArchOpcode opcode_;
197 
Initializev8::internal::compiler::ExtendingLoadMatcher198   void Initialize(Node* node) {
199     Int64BinopMatcher m(node);
200     // When loading a 64-bit value and shifting by 32, we should
201     // just load and sign-extend the interesting 4 bytes instead.
202     // This happens, for example, when we're loading and untagging SMIs.
203     DCHECK(m.IsWord64Sar());
204     if (m.left().IsLoad() && m.right().Is(32) &&
205         selector_->CanCover(m.node(), m.left().node())) {
206       MachineRepresentation rep =
207           LoadRepresentationOf(m.left().node()->op()).representation();
208       DCHECK_EQ(3, ElementSizeLog2Of(rep));
209       if (rep != MachineRepresentation::kTaggedSigned &&
210           rep != MachineRepresentation::kTaggedPointer &&
211           rep != MachineRepresentation::kTagged &&
212           rep != MachineRepresentation::kWord64) {
213         return;
214       }
215 
216       Mips64OperandGenerator g(selector_);
217       Node* load = m.left().node();
218       Node* offset = load->InputAt(1);
219       base_ = load->InputAt(0);
220       opcode_ = kMips64Lw;
221       if (g.CanBeImmediate(offset, opcode_)) {
222 #if defined(V8_TARGET_LITTLE_ENDIAN)
223         immediate_ = g.GetIntegerConstantValue(offset) + 4;
224 #elif defined(V8_TARGET_BIG_ENDIAN)
225         immediate_ = g.GetIntegerConstantValue(offset);
226 #endif
227         matches_ = g.CanBeImmediate(immediate_, kMips64Lw);
228       }
229     }
230   }
231 };
232 
TryEmitExtendingLoad(InstructionSelector * selector,Node * node,Node * output_node)233 bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node,
234                           Node* output_node) {
235   ExtendingLoadMatcher m(node, selector);
236   Mips64OperandGenerator g(selector);
237   if (m.Matches()) {
238     InstructionOperand inputs[2];
239     inputs[0] = g.UseRegister(m.base());
240     InstructionCode opcode =
241         m.opcode() | AddressingModeField::encode(kMode_MRI);
242     DCHECK(is_int32(m.immediate()));
243     inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
244     InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
245     selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
246                    inputs);
247     return true;
248   }
249   return false;
250 }
251 
TryMatchImmediate(InstructionSelector * selector,InstructionCode * opcode_return,Node * node,size_t * input_count_return,InstructionOperand * inputs)252 bool TryMatchImmediate(InstructionSelector* selector,
253                        InstructionCode* opcode_return, Node* node,
254                        size_t* input_count_return, InstructionOperand* inputs) {
255   Mips64OperandGenerator g(selector);
256   if (g.CanBeImmediate(node, *opcode_return)) {
257     *opcode_return |= AddressingModeField::encode(kMode_MRI);
258     inputs[0] = g.UseImmediate(node);
259     *input_count_return = 1;
260     return true;
261   }
262   return false;
263 }
264 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode,FlagsContinuation * cont)265 static void VisitBinop(InstructionSelector* selector, Node* node,
266                        InstructionCode opcode, bool has_reverse_opcode,
267                        InstructionCode reverse_opcode,
268                        FlagsContinuation* cont) {
269   Mips64OperandGenerator g(selector);
270   Int32BinopMatcher m(node);
271   InstructionOperand inputs[2];
272   size_t input_count = 0;
273   InstructionOperand outputs[1];
274   size_t output_count = 0;
275 
276   if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
277                         &inputs[1])) {
278     inputs[0] = g.UseRegister(m.left().node());
279     input_count++;
280   } else if (has_reverse_opcode &&
281              TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
282                                &input_count, &inputs[1])) {
283     inputs[0] = g.UseRegister(m.right().node());
284     opcode = reverse_opcode;
285     input_count++;
286   } else {
287     inputs[input_count++] = g.UseRegister(m.left().node());
288     inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
289   }
290 
291   if (cont->IsDeoptimize()) {
292     // If we can deoptimize as a result of the binop, we need to make sure that
293     // the deopt inputs are not overwritten by the binop result. One way
294     // to achieve that is to declare the output register as same-as-first.
295     outputs[output_count++] = g.DefineSameAsFirst(node);
296   } else {
297     outputs[output_count++] = g.DefineAsRegister(node);
298   }
299 
300   DCHECK_NE(0u, input_count);
301   DCHECK_EQ(1u, output_count);
302   DCHECK_GE(arraysize(inputs), input_count);
303   DCHECK_GE(arraysize(outputs), output_count);
304 
305   selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
306                                  inputs, cont);
307 }
308 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode)309 static void VisitBinop(InstructionSelector* selector, Node* node,
310                        InstructionCode opcode, bool has_reverse_opcode,
311                        InstructionCode reverse_opcode) {
312   FlagsContinuation cont;
313   VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
314 }
315 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)316 static void VisitBinop(InstructionSelector* selector, Node* node,
317                        InstructionCode opcode, FlagsContinuation* cont) {
318   VisitBinop(selector, node, opcode, false, kArchNop, cont);
319 }
320 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode)321 static void VisitBinop(InstructionSelector* selector, Node* node,
322                        InstructionCode opcode) {
323   VisitBinop(selector, node, opcode, false, kArchNop);
324 }
325 
VisitStackSlot(Node * node)326 void InstructionSelector::VisitStackSlot(Node* node) {
327   StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
328   int alignment = rep.alignment();
329   int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
330   OperandGenerator g(this);
331 
332   Emit(kArchStackSlot, g.DefineAsRegister(node),
333        sequence()->AddImmediate(Constant(slot)),
334        sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
335 }
336 
VisitDebugAbort(Node * node)337 void InstructionSelector::VisitDebugAbort(Node* node) {
338   Mips64OperandGenerator g(this);
339   Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
340 }
341 
EmitLoad(InstructionSelector * selector,Node * node,InstructionCode opcode,Node * output=nullptr)342 void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
343               Node* output = nullptr) {
344   Mips64OperandGenerator g(selector);
345   Node* base = node->InputAt(0);
346   Node* index = node->InputAt(1);
347 
348   if (g.CanBeImmediate(index, opcode)) {
349     selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
350                    g.DefineAsRegister(output == nullptr ? node : output),
351                    g.UseRegister(base), g.UseImmediate(index));
352   } else {
353     InstructionOperand addr_reg = g.TempRegister();
354     selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
355                    addr_reg, g.UseRegister(index), g.UseRegister(base));
356     // Emit desired load opcode, using temp addr_reg.
357     selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
358                    g.DefineAsRegister(output == nullptr ? node : output),
359                    addr_reg, g.TempImmediate(0));
360   }
361 }
362 
VisitLoad(Node * node)363 void InstructionSelector::VisitLoad(Node* node) {
364   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
365 
366   InstructionCode opcode = kArchNop;
367   switch (load_rep.representation()) {
368     case MachineRepresentation::kFloat32:
369       opcode = kMips64Lwc1;
370       break;
371     case MachineRepresentation::kFloat64:
372       opcode = kMips64Ldc1;
373       break;
374     case MachineRepresentation::kBit:  // Fall through.
375     case MachineRepresentation::kWord8:
376       opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
377       break;
378     case MachineRepresentation::kWord16:
379       opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
380       break;
381     case MachineRepresentation::kWord32:
382       opcode = load_rep.IsUnsigned() ? kMips64Lwu : kMips64Lw;
383       break;
384     case MachineRepresentation::kTaggedSigned:   // Fall through.
385     case MachineRepresentation::kTaggedPointer:  // Fall through.
386     case MachineRepresentation::kTagged:  // Fall through.
387     case MachineRepresentation::kWord64:
388       opcode = kMips64Ld;
389       break;
390     case MachineRepresentation::kSimd128:
391       opcode = kMips64MsaLd;
392       break;
393     case MachineRepresentation::kNone:
394       UNREACHABLE();
395       return;
396   }
397   if (node->opcode() == IrOpcode::kPoisonedLoad) {
398     CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
399     opcode |= MiscField::encode(kMemoryAccessPoisoned);
400   }
401 
402   EmitLoad(this, node, opcode);
403 }
404 
VisitPoisonedLoad(Node * node)405 void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
406 
VisitProtectedLoad(Node * node)407 void InstructionSelector::VisitProtectedLoad(Node* node) {
408   // TODO(eholk)
409   UNIMPLEMENTED();
410 }
411 
VisitStore(Node * node)412 void InstructionSelector::VisitStore(Node* node) {
413   Mips64OperandGenerator g(this);
414   Node* base = node->InputAt(0);
415   Node* index = node->InputAt(1);
416   Node* value = node->InputAt(2);
417 
418   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
419   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
420   MachineRepresentation rep = store_rep.representation();
421 
422   // TODO(mips): I guess this could be done in a better way.
423   if (write_barrier_kind != kNoWriteBarrier) {
424     DCHECK(CanBeTaggedPointer(rep));
425     InstructionOperand inputs[3];
426     size_t input_count = 0;
427     inputs[input_count++] = g.UseUniqueRegister(base);
428     inputs[input_count++] = g.UseUniqueRegister(index);
429     inputs[input_count++] = g.UseUniqueRegister(value);
430     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
431     switch (write_barrier_kind) {
432       case kNoWriteBarrier:
433         UNREACHABLE();
434         break;
435       case kMapWriteBarrier:
436         record_write_mode = RecordWriteMode::kValueIsMap;
437         break;
438       case kPointerWriteBarrier:
439         record_write_mode = RecordWriteMode::kValueIsPointer;
440         break;
441       case kFullWriteBarrier:
442         record_write_mode = RecordWriteMode::kValueIsAny;
443         break;
444     }
445     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
446     size_t const temp_count = arraysize(temps);
447     InstructionCode code = kArchStoreWithWriteBarrier;
448     code |= MiscField::encode(static_cast<int>(record_write_mode));
449     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
450   } else {
451     ArchOpcode opcode = kArchNop;
452     switch (rep) {
453       case MachineRepresentation::kFloat32:
454         opcode = kMips64Swc1;
455         break;
456       case MachineRepresentation::kFloat64:
457         opcode = kMips64Sdc1;
458         break;
459       case MachineRepresentation::kBit:  // Fall through.
460       case MachineRepresentation::kWord8:
461         opcode = kMips64Sb;
462         break;
463       case MachineRepresentation::kWord16:
464         opcode = kMips64Sh;
465         break;
466       case MachineRepresentation::kWord32:
467         opcode = kMips64Sw;
468         break;
469       case MachineRepresentation::kTaggedSigned:   // Fall through.
470       case MachineRepresentation::kTaggedPointer:  // Fall through.
471       case MachineRepresentation::kTagged:  // Fall through.
472       case MachineRepresentation::kWord64:
473         opcode = kMips64Sd;
474         break;
475       case MachineRepresentation::kSimd128:
476         opcode = kMips64MsaSt;
477         break;
478       case MachineRepresentation::kNone:
479         UNREACHABLE();
480         return;
481     }
482 
483     if (g.CanBeImmediate(index, opcode)) {
484       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
485            g.UseRegister(base), g.UseImmediate(index),
486            g.UseRegisterOrImmediateZero(value));
487     } else {
488       InstructionOperand addr_reg = g.TempRegister();
489       Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
490            g.UseRegister(index), g.UseRegister(base));
491       // Emit desired store opcode, using temp addr_reg.
492       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
493            addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
494     }
495   }
496 }
497 
VisitProtectedStore(Node * node)498 void InstructionSelector::VisitProtectedStore(Node* node) {
499   // TODO(eholk)
500   UNIMPLEMENTED();
501 }
502 
VisitWord32And(Node * node)503 void InstructionSelector::VisitWord32And(Node* node) {
504   Mips64OperandGenerator g(this);
505   Int32BinopMatcher m(node);
506   if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
507       m.right().HasValue()) {
508     uint32_t mask = m.right().Value();
509     uint32_t mask_width = base::bits::CountPopulation(mask);
510     uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
511     if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
512       // The mask must be contiguous, and occupy the least-significant bits.
513       DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
514 
515       // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
516       // significant bits.
517       Int32BinopMatcher mleft(m.left().node());
518       if (mleft.right().HasValue()) {
519         // Any shift value can match; int32 shifts use `value % 32`.
520         uint32_t lsb = mleft.right().Value() & 0x1F;
521 
522         // Ext cannot extract bits past the register size, however since
523         // shifting the original value would have introduced some zeros we can
524         // still use Ext with a smaller mask and the remaining bits will be
525         // zeros.
526         if (lsb + mask_width > 32) mask_width = 32 - lsb;
527 
528         Emit(kMips64Ext, g.DefineAsRegister(node),
529              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
530              g.TempImmediate(mask_width));
531         return;
532       }
533       // Other cases fall through to the normal And operation.
534     }
535   }
536   if (m.right().HasValue()) {
537     uint32_t mask = m.right().Value();
538     uint32_t shift = base::bits::CountPopulation(~mask);
539     uint32_t msb = base::bits::CountLeadingZeros32(~mask);
540     if (shift != 0 && shift != 32 && msb + shift == 32) {
541       // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
542       // and remove constant loading of inverted mask.
543       Emit(kMips64Ins, g.DefineSameAsFirst(node),
544            g.UseRegister(m.left().node()), g.TempImmediate(0),
545            g.TempImmediate(shift));
546       return;
547     }
548   }
549   VisitBinop(this, node, kMips64And32, true, kMips64And32);
550 }
551 
552 
VisitWord64And(Node * node)553 void InstructionSelector::VisitWord64And(Node* node) {
554   Mips64OperandGenerator g(this);
555   Int64BinopMatcher m(node);
556   if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
557       m.right().HasValue()) {
558     uint64_t mask = m.right().Value();
559     uint32_t mask_width = base::bits::CountPopulation(mask);
560     uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
561     if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
562       // The mask must be contiguous, and occupy the least-significant bits.
563       DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
564 
565       // Select Dext for And(Shr(x, imm), mask) where the mask is in the least
566       // significant bits.
567       Int64BinopMatcher mleft(m.left().node());
568       if (mleft.right().HasValue()) {
569         // Any shift value can match; int64 shifts use `value % 64`.
570         uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3F);
571 
572         // Dext cannot extract bits past the register size, however since
573         // shifting the original value would have introduced some zeros we can
574         // still use Dext with a smaller mask and the remaining bits will be
575         // zeros.
576         if (lsb + mask_width > 64) mask_width = 64 - lsb;
577 
578         if (lsb == 0 && mask_width == 64) {
579           Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
580         } else {
581           Emit(kMips64Dext, g.DefineAsRegister(node),
582                g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
583                g.TempImmediate(static_cast<int32_t>(mask_width)));
584         }
585         return;
586       }
587       // Other cases fall through to the normal And operation.
588     }
589   }
590   if (m.right().HasValue()) {
591     uint64_t mask = m.right().Value();
592     uint32_t shift = base::bits::CountPopulation(~mask);
593     uint32_t msb = base::bits::CountLeadingZeros64(~mask);
594     if (shift != 0 && shift < 32 && msb + shift == 64) {
595       // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
596       // and remove constant loading of inverted mask. Dins cannot insert bits
597       // past word size, so shifts smaller than 32 are covered.
598       Emit(kMips64Dins, g.DefineSameAsFirst(node),
599            g.UseRegister(m.left().node()), g.TempImmediate(0),
600            g.TempImmediate(shift));
601       return;
602     }
603   }
604   VisitBinop(this, node, kMips64And, true, kMips64And);
605 }
606 
607 
VisitWord32Or(Node * node)608 void InstructionSelector::VisitWord32Or(Node* node) {
609   VisitBinop(this, node, kMips64Or32, true, kMips64Or32);
610 }
611 
612 
VisitWord64Or(Node * node)613 void InstructionSelector::VisitWord64Or(Node* node) {
614   VisitBinop(this, node, kMips64Or, true, kMips64Or);
615 }
616 
617 
VisitWord32Xor(Node * node)618 void InstructionSelector::VisitWord32Xor(Node* node) {
619   Int32BinopMatcher m(node);
620   if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
621       m.right().Is(-1)) {
622     Int32BinopMatcher mleft(m.left().node());
623     if (!mleft.right().HasValue()) {
624       Mips64OperandGenerator g(this);
625       Emit(kMips64Nor32, g.DefineAsRegister(node),
626            g.UseRegister(mleft.left().node()),
627            g.UseRegister(mleft.right().node()));
628       return;
629     }
630   }
631   if (m.right().Is(-1)) {
632     // Use Nor for bit negation and eliminate constant loading for xori.
633     Mips64OperandGenerator g(this);
634     Emit(kMips64Nor32, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
635          g.TempImmediate(0));
636     return;
637   }
638   VisitBinop(this, node, kMips64Xor32, true, kMips64Xor32);
639 }
640 
641 
VisitWord64Xor(Node * node)642 void InstructionSelector::VisitWord64Xor(Node* node) {
643   Int64BinopMatcher m(node);
644   if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
645       m.right().Is(-1)) {
646     Int64BinopMatcher mleft(m.left().node());
647     if (!mleft.right().HasValue()) {
648       Mips64OperandGenerator g(this);
649       Emit(kMips64Nor, g.DefineAsRegister(node),
650            g.UseRegister(mleft.left().node()),
651            g.UseRegister(mleft.right().node()));
652       return;
653     }
654   }
655   if (m.right().Is(-1)) {
656     // Use Nor for bit negation and eliminate constant loading for xori.
657     Mips64OperandGenerator g(this);
658     Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
659          g.TempImmediate(0));
660     return;
661   }
662   VisitBinop(this, node, kMips64Xor, true, kMips64Xor);
663 }
664 
665 
VisitWord32Shl(Node * node)666 void InstructionSelector::VisitWord32Shl(Node* node) {
667   Int32BinopMatcher m(node);
668   if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
669       m.right().IsInRange(1, 31)) {
670     Mips64OperandGenerator g(this);
671     Int32BinopMatcher mleft(m.left().node());
672     // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
673     // contiguous, and the shift immediate non-zero.
674     if (mleft.right().HasValue()) {
675       uint32_t mask = mleft.right().Value();
676       uint32_t mask_width = base::bits::CountPopulation(mask);
677       uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
678       if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
679         uint32_t shift = m.right().Value();
680         DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
681         DCHECK_NE(0u, shift);
682         if ((shift + mask_width) >= 32) {
683           // If the mask is contiguous and reaches or extends beyond the top
684           // bit, only the shift is needed.
685           Emit(kMips64Shl, g.DefineAsRegister(node),
686                g.UseRegister(mleft.left().node()),
687                g.UseImmediate(m.right().node()));
688           return;
689         }
690       }
691     }
692   }
693   VisitRRO(this, kMips64Shl, node);
694 }
695 
696 
VisitWord32Shr(Node * node)697 void InstructionSelector::VisitWord32Shr(Node* node) {
698   Int32BinopMatcher m(node);
699   if (m.left().IsWord32And() && m.right().HasValue()) {
700     uint32_t lsb = m.right().Value() & 0x1F;
701     Int32BinopMatcher mleft(m.left().node());
702     if (mleft.right().HasValue() && mleft.right().Value() != 0) {
703       // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
704       // shifted into the least-significant bits.
705       uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
706       unsigned mask_width = base::bits::CountPopulation(mask);
707       unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
708       if ((mask_msb + mask_width + lsb) == 32) {
709         Mips64OperandGenerator g(this);
710         DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
711         Emit(kMips64Ext, g.DefineAsRegister(node),
712              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
713              g.TempImmediate(mask_width));
714         return;
715       }
716     }
717   }
718   VisitRRO(this, kMips64Shr, node);
719 }
720 
721 
VisitWord32Sar(Node * node)722 void InstructionSelector::VisitWord32Sar(Node* node) {
723   Int32BinopMatcher m(node);
724   if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
725     Int32BinopMatcher mleft(m.left().node());
726     if (m.right().HasValue() && mleft.right().HasValue()) {
727       Mips64OperandGenerator g(this);
728       uint32_t sar = m.right().Value();
729       uint32_t shl = mleft.right().Value();
730       if ((sar == shl) && (sar == 16)) {
731         Emit(kMips64Seh, g.DefineAsRegister(node),
732              g.UseRegister(mleft.left().node()));
733         return;
734       } else if ((sar == shl) && (sar == 24)) {
735         Emit(kMips64Seb, g.DefineAsRegister(node),
736              g.UseRegister(mleft.left().node()));
737         return;
738       } else if ((sar == shl) && (sar == 32)) {
739         Emit(kMips64Shl, g.DefineAsRegister(node),
740              g.UseRegister(mleft.left().node()), g.TempImmediate(0));
741         return;
742       }
743     }
744   }
745   VisitRRO(this, kMips64Sar, node);
746 }
747 
748 
VisitWord64Shl(Node * node)749 void InstructionSelector::VisitWord64Shl(Node* node) {
750   Mips64OperandGenerator g(this);
751   Int64BinopMatcher m(node);
752   if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
753       m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
754     // There's no need to sign/zero-extend to 64-bit if we shift out the upper
755     // 32 bits anyway.
756     Emit(kMips64Dshl, g.DefineSameAsFirst(node),
757          g.UseRegister(m.left().node()->InputAt(0)),
758          g.UseImmediate(m.right().node()));
759     return;
760   }
761   if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
762       m.right().IsInRange(1, 63)) {
763     // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is
764     // contiguous, and the shift immediate non-zero.
765     Int64BinopMatcher mleft(m.left().node());
766     if (mleft.right().HasValue()) {
767       uint64_t mask = mleft.right().Value();
768       uint32_t mask_width = base::bits::CountPopulation(mask);
769       uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
770       if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
771         uint64_t shift = m.right().Value();
772         DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
773         DCHECK_NE(0u, shift);
774 
775         if ((shift + mask_width) >= 64) {
776           // If the mask is contiguous and reaches or extends beyond the top
777           // bit, only the shift is needed.
778           Emit(kMips64Dshl, g.DefineAsRegister(node),
779                g.UseRegister(mleft.left().node()),
780                g.UseImmediate(m.right().node()));
781           return;
782         }
783       }
784     }
785   }
786   VisitRRO(this, kMips64Dshl, node);
787 }
788 
789 
VisitWord64Shr(Node * node)790 void InstructionSelector::VisitWord64Shr(Node* node) {
791   Int64BinopMatcher m(node);
792   if (m.left().IsWord64And() && m.right().HasValue()) {
793     uint32_t lsb = m.right().Value() & 0x3F;
794     Int64BinopMatcher mleft(m.left().node());
795     if (mleft.right().HasValue() && mleft.right().Value() != 0) {
796       // Select Dext for Shr(And(x, mask), imm) where the result of the mask is
797       // shifted into the least-significant bits.
798       uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
799       unsigned mask_width = base::bits::CountPopulation(mask);
800       unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
801       if ((mask_msb + mask_width + lsb) == 64) {
802         Mips64OperandGenerator g(this);
803         DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
804         Emit(kMips64Dext, g.DefineAsRegister(node),
805              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
806              g.TempImmediate(mask_width));
807         return;
808       }
809     }
810   }
811   VisitRRO(this, kMips64Dshr, node);
812 }
813 
814 
VisitWord64Sar(Node * node)815 void InstructionSelector::VisitWord64Sar(Node* node) {
816   if (TryEmitExtendingLoad(this, node, node)) return;
817   VisitRRO(this, kMips64Dsar, node);
818 }
819 
820 
VisitWord32Ror(Node * node)821 void InstructionSelector::VisitWord32Ror(Node* node) {
822   VisitRRO(this, kMips64Ror, node);
823 }
824 
825 
VisitWord32Clz(Node * node)826 void InstructionSelector::VisitWord32Clz(Node* node) {
827   VisitRR(this, kMips64Clz, node);
828 }
829 
830 
VisitWord32ReverseBits(Node * node)831 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
832 
833 
VisitWord64ReverseBits(Node * node)834 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
835 
VisitWord64ReverseBytes(Node * node)836 void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
837   Mips64OperandGenerator g(this);
838   Emit(kMips64ByteSwap64, g.DefineAsRegister(node),
839        g.UseRegister(node->InputAt(0)));
840 }
841 
VisitWord32ReverseBytes(Node * node)842 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
843   Mips64OperandGenerator g(this);
844   Emit(kMips64ByteSwap32, g.DefineAsRegister(node),
845        g.UseRegister(node->InputAt(0)));
846 }
847 
VisitWord32Ctz(Node * node)848 void InstructionSelector::VisitWord32Ctz(Node* node) {
849   Mips64OperandGenerator g(this);
850   Emit(kMips64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
851 }
852 
853 
VisitWord64Ctz(Node * node)854 void InstructionSelector::VisitWord64Ctz(Node* node) {
855   Mips64OperandGenerator g(this);
856   Emit(kMips64Dctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
857 }
858 
859 
VisitWord32Popcnt(Node * node)860 void InstructionSelector::VisitWord32Popcnt(Node* node) {
861   Mips64OperandGenerator g(this);
862   Emit(kMips64Popcnt, g.DefineAsRegister(node),
863        g.UseRegister(node->InputAt(0)));
864 }
865 
866 
VisitWord64Popcnt(Node * node)867 void InstructionSelector::VisitWord64Popcnt(Node* node) {
868   Mips64OperandGenerator g(this);
869   Emit(kMips64Dpopcnt, g.DefineAsRegister(node),
870        g.UseRegister(node->InputAt(0)));
871 }
872 
873 
VisitWord64Ror(Node * node)874 void InstructionSelector::VisitWord64Ror(Node* node) {
875   VisitRRO(this, kMips64Dror, node);
876 }
877 
878 
VisitWord64Clz(Node * node)879 void InstructionSelector::VisitWord64Clz(Node* node) {
880   VisitRR(this, kMips64Dclz, node);
881 }
882 
883 
VisitInt32Add(Node * node)884 void InstructionSelector::VisitInt32Add(Node* node) {
885   Mips64OperandGenerator g(this);
886   Int32BinopMatcher m(node);
887 
888   // Select Lsa for (left + (left_of_right << imm)).
889   if (m.right().opcode() == IrOpcode::kWord32Shl &&
890       CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
891     Int32BinopMatcher mright(m.right().node());
892     if (mright.right().HasValue() && !m.left().HasValue()) {
893       int32_t shift_value = static_cast<int32_t>(mright.right().Value());
894       Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
895            g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
896       return;
897     }
898   }
899 
900   // Select Lsa for ((left_of_left << imm) + right).
901   if (m.left().opcode() == IrOpcode::kWord32Shl &&
902       CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
903     Int32BinopMatcher mleft(m.left().node());
904     if (mleft.right().HasValue() && !m.right().HasValue()) {
905       int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
906       Emit(kMips64Lsa, g.DefineAsRegister(node),
907            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
908            g.TempImmediate(shift_value));
909       return;
910     }
911   }
912   VisitBinop(this, node, kMips64Add, true, kMips64Add);
913 }
914 
915 
VisitInt64Add(Node * node)916 void InstructionSelector::VisitInt64Add(Node* node) {
917   Mips64OperandGenerator g(this);
918   Int64BinopMatcher m(node);
919 
920   // Select Dlsa for (left + (left_of_right << imm)).
921   if (m.right().opcode() == IrOpcode::kWord64Shl &&
922       CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
923     Int64BinopMatcher mright(m.right().node());
924     if (mright.right().HasValue() && !m.left().HasValue()) {
925       int32_t shift_value = static_cast<int32_t>(mright.right().Value());
926       Emit(kMips64Dlsa, g.DefineAsRegister(node),
927            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
928            g.TempImmediate(shift_value));
929       return;
930     }
931   }
932 
933   // Select Dlsa for ((left_of_left << imm) + right).
934   if (m.left().opcode() == IrOpcode::kWord64Shl &&
935       CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
936     Int64BinopMatcher mleft(m.left().node());
937     if (mleft.right().HasValue() && !m.right().HasValue()) {
938       int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
939       Emit(kMips64Dlsa, g.DefineAsRegister(node),
940            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
941            g.TempImmediate(shift_value));
942       return;
943     }
944   }
945 
946   VisitBinop(this, node, kMips64Dadd, true, kMips64Dadd);
947 }
948 
949 
VisitInt32Sub(Node * node)950 void InstructionSelector::VisitInt32Sub(Node* node) {
951   VisitBinop(this, node, kMips64Sub);
952 }
953 
954 
VisitInt64Sub(Node * node)955 void InstructionSelector::VisitInt64Sub(Node* node) {
956   VisitBinop(this, node, kMips64Dsub);
957 }
958 
959 
VisitInt32Mul(Node * node)960 void InstructionSelector::VisitInt32Mul(Node* node) {
961   Mips64OperandGenerator g(this);
962   Int32BinopMatcher m(node);
963   if (m.right().HasValue() && m.right().Value() > 0) {
964     uint32_t value = static_cast<uint32_t>(m.right().Value());
965     if (base::bits::IsPowerOfTwo(value)) {
966       Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
967            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
968            g.TempImmediate(WhichPowerOf2(value)));
969       return;
970     }
971     if (base::bits::IsPowerOfTwo(value - 1)) {
972       Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
973            g.UseRegister(m.left().node()),
974            g.TempImmediate(WhichPowerOf2(value - 1)));
975       return;
976     }
977     if (base::bits::IsPowerOfTwo(value + 1)) {
978       InstructionOperand temp = g.TempRegister();
979       Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
980            g.UseRegister(m.left().node()),
981            g.TempImmediate(WhichPowerOf2(value + 1)));
982       Emit(kMips64Sub | AddressingModeField::encode(kMode_None),
983            g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
984       return;
985     }
986   }
987   Node* left = node->InputAt(0);
988   Node* right = node->InputAt(1);
989   if (CanCover(node, left) && CanCover(node, right)) {
990     if (left->opcode() == IrOpcode::kWord64Sar &&
991         right->opcode() == IrOpcode::kWord64Sar) {
992       Int64BinopMatcher leftInput(left), rightInput(right);
993       if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
994         // Combine untagging shifts with Dmul high.
995         Emit(kMips64DMulHigh, g.DefineSameAsFirst(node),
996              g.UseRegister(leftInput.left().node()),
997              g.UseRegister(rightInput.left().node()));
998         return;
999       }
1000     }
1001   }
1002   VisitRRR(this, kMips64Mul, node);
1003 }
1004 
1005 
VisitInt32MulHigh(Node * node)1006 void InstructionSelector::VisitInt32MulHigh(Node* node) {
1007   VisitRRR(this, kMips64MulHigh, node);
1008 }
1009 
1010 
VisitUint32MulHigh(Node * node)1011 void InstructionSelector::VisitUint32MulHigh(Node* node) {
1012   VisitRRR(this, kMips64MulHighU, node);
1013 }
1014 
1015 
VisitInt64Mul(Node * node)1016 void InstructionSelector::VisitInt64Mul(Node* node) {
1017   Mips64OperandGenerator g(this);
1018   Int64BinopMatcher m(node);
1019   // TODO(dusmil): Add optimization for shifts larger than 32.
1020   if (m.right().HasValue() && m.right().Value() > 0) {
1021     uint32_t value = static_cast<uint32_t>(m.right().Value());
1022     if (base::bits::IsPowerOfTwo(value)) {
1023       Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
1024            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1025            g.TempImmediate(WhichPowerOf2(value)));
1026       return;
1027     }
1028     if (base::bits::IsPowerOfTwo(value - 1)) {
1029       // Dlsa macro will handle the shifting value out of bound cases.
1030       Emit(kMips64Dlsa, g.DefineAsRegister(node),
1031            g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
1032            g.TempImmediate(WhichPowerOf2(value - 1)));
1033       return;
1034     }
1035     if (base::bits::IsPowerOfTwo(value + 1)) {
1036       InstructionOperand temp = g.TempRegister();
1037       Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
1038            g.UseRegister(m.left().node()),
1039            g.TempImmediate(WhichPowerOf2(value + 1)));
1040       Emit(kMips64Dsub | AddressingModeField::encode(kMode_None),
1041            g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
1042       return;
1043     }
1044   }
1045   Emit(kMips64Dmul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1046        g.UseRegister(m.right().node()));
1047 }
1048 
1049 
VisitInt32Div(Node * node)1050 void InstructionSelector::VisitInt32Div(Node* node) {
1051   Mips64OperandGenerator g(this);
1052   Int32BinopMatcher m(node);
1053   Node* left = node->InputAt(0);
1054   Node* right = node->InputAt(1);
1055   if (CanCover(node, left) && CanCover(node, right)) {
1056     if (left->opcode() == IrOpcode::kWord64Sar &&
1057         right->opcode() == IrOpcode::kWord64Sar) {
1058       Int64BinopMatcher rightInput(right), leftInput(left);
1059       if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
1060         // Combine both shifted operands with Ddiv.
1061         Emit(kMips64Ddiv, g.DefineSameAsFirst(node),
1062              g.UseRegister(leftInput.left().node()),
1063              g.UseRegister(rightInput.left().node()));
1064         return;
1065       }
1066     }
1067   }
1068   Emit(kMips64Div, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1069        g.UseRegister(m.right().node()));
1070 }
1071 
1072 
VisitUint32Div(Node * node)1073 void InstructionSelector::VisitUint32Div(Node* node) {
1074   Mips64OperandGenerator g(this);
1075   Int32BinopMatcher m(node);
1076   Emit(kMips64DivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1077        g.UseRegister(m.right().node()));
1078 }
1079 
1080 
VisitInt32Mod(Node * node)1081 void InstructionSelector::VisitInt32Mod(Node* node) {
1082   Mips64OperandGenerator g(this);
1083   Int32BinopMatcher m(node);
1084   Node* left = node->InputAt(0);
1085   Node* right = node->InputAt(1);
1086   if (CanCover(node, left) && CanCover(node, right)) {
1087     if (left->opcode() == IrOpcode::kWord64Sar &&
1088         right->opcode() == IrOpcode::kWord64Sar) {
1089       Int64BinopMatcher rightInput(right), leftInput(left);
1090       if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
1091         // Combine both shifted operands with Dmod.
1092         Emit(kMips64Dmod, g.DefineSameAsFirst(node),
1093              g.UseRegister(leftInput.left().node()),
1094              g.UseRegister(rightInput.left().node()));
1095         return;
1096       }
1097     }
1098   }
1099   Emit(kMips64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1100        g.UseRegister(m.right().node()));
1101 }
1102 
1103 
VisitUint32Mod(Node * node)1104 void InstructionSelector::VisitUint32Mod(Node* node) {
1105   Mips64OperandGenerator g(this);
1106   Int32BinopMatcher m(node);
1107   Emit(kMips64ModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1108        g.UseRegister(m.right().node()));
1109 }
1110 
1111 
VisitInt64Div(Node * node)1112 void InstructionSelector::VisitInt64Div(Node* node) {
1113   Mips64OperandGenerator g(this);
1114   Int64BinopMatcher m(node);
1115   Emit(kMips64Ddiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1116        g.UseRegister(m.right().node()));
1117 }
1118 
1119 
VisitUint64Div(Node * node)1120 void InstructionSelector::VisitUint64Div(Node* node) {
1121   Mips64OperandGenerator g(this);
1122   Int64BinopMatcher m(node);
1123   Emit(kMips64DdivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1124        g.UseRegister(m.right().node()));
1125 }
1126 
1127 
VisitInt64Mod(Node * node)1128 void InstructionSelector::VisitInt64Mod(Node* node) {
1129   Mips64OperandGenerator g(this);
1130   Int64BinopMatcher m(node);
1131   Emit(kMips64Dmod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1132        g.UseRegister(m.right().node()));
1133 }
1134 
1135 
VisitUint64Mod(Node * node)1136 void InstructionSelector::VisitUint64Mod(Node* node) {
1137   Mips64OperandGenerator g(this);
1138   Int64BinopMatcher m(node);
1139   Emit(kMips64DmodU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1140        g.UseRegister(m.right().node()));
1141 }
1142 
1143 
VisitChangeFloat32ToFloat64(Node * node)1144 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
1145   VisitRR(this, kMips64CvtDS, node);
1146 }
1147 
1148 
VisitRoundInt32ToFloat32(Node * node)1149 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
1150   VisitRR(this, kMips64CvtSW, node);
1151 }
1152 
1153 
VisitRoundUint32ToFloat32(Node * node)1154 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
1155   VisitRR(this, kMips64CvtSUw, node);
1156 }
1157 
1158 
VisitChangeInt32ToFloat64(Node * node)1159 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
1160   VisitRR(this, kMips64CvtDW, node);
1161 }
1162 
1163 
VisitChangeUint32ToFloat64(Node * node)1164 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
1165   VisitRR(this, kMips64CvtDUw, node);
1166 }
1167 
1168 
VisitTruncateFloat32ToInt32(Node * node)1169 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
1170   VisitRR(this, kMips64TruncWS, node);
1171 }
1172 
1173 
VisitTruncateFloat32ToUint32(Node * node)1174 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
1175   VisitRR(this, kMips64TruncUwS, node);
1176 }
1177 
1178 
VisitChangeFloat64ToInt32(Node * node)1179 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
1180   Mips64OperandGenerator g(this);
1181   Node* value = node->InputAt(0);
1182   // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
1183   // which does rounding and conversion to integer format.
1184   if (CanCover(node, value)) {
1185     switch (value->opcode()) {
1186       case IrOpcode::kFloat64RoundDown:
1187         Emit(kMips64FloorWD, g.DefineAsRegister(node),
1188              g.UseRegister(value->InputAt(0)));
1189         return;
1190       case IrOpcode::kFloat64RoundUp:
1191         Emit(kMips64CeilWD, g.DefineAsRegister(node),
1192              g.UseRegister(value->InputAt(0)));
1193         return;
1194       case IrOpcode::kFloat64RoundTiesEven:
1195         Emit(kMips64RoundWD, g.DefineAsRegister(node),
1196              g.UseRegister(value->InputAt(0)));
1197         return;
1198       case IrOpcode::kFloat64RoundTruncate:
1199         Emit(kMips64TruncWD, g.DefineAsRegister(node),
1200              g.UseRegister(value->InputAt(0)));
1201         return;
1202       default:
1203         break;
1204     }
1205     if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
1206       Node* next = value->InputAt(0);
1207       if (CanCover(value, next)) {
1208         // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
1209         switch (next->opcode()) {
1210           case IrOpcode::kFloat32RoundDown:
1211             Emit(kMips64FloorWS, g.DefineAsRegister(node),
1212                  g.UseRegister(next->InputAt(0)));
1213             return;
1214           case IrOpcode::kFloat32RoundUp:
1215             Emit(kMips64CeilWS, g.DefineAsRegister(node),
1216                  g.UseRegister(next->InputAt(0)));
1217             return;
1218           case IrOpcode::kFloat32RoundTiesEven:
1219             Emit(kMips64RoundWS, g.DefineAsRegister(node),
1220                  g.UseRegister(next->InputAt(0)));
1221             return;
1222           case IrOpcode::kFloat32RoundTruncate:
1223             Emit(kMips64TruncWS, g.DefineAsRegister(node),
1224                  g.UseRegister(next->InputAt(0)));
1225             return;
1226           default:
1227             Emit(kMips64TruncWS, g.DefineAsRegister(node),
1228                  g.UseRegister(value->InputAt(0)));
1229             return;
1230         }
1231       } else {
1232         // Match float32 -> float64 -> int32 representation change path.
1233         Emit(kMips64TruncWS, g.DefineAsRegister(node),
1234              g.UseRegister(value->InputAt(0)));
1235         return;
1236       }
1237     }
1238   }
1239   VisitRR(this, kMips64TruncWD, node);
1240 }
1241 
1242 
VisitChangeFloat64ToUint32(Node * node)1243 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1244   VisitRR(this, kMips64TruncUwD, node);
1245 }
1246 
VisitChangeFloat64ToUint64(Node * node)1247 void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
1248   VisitRR(this, kMips64TruncUlD, node);
1249 }
1250 
VisitTruncateFloat64ToUint32(Node * node)1251 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
1252   VisitRR(this, kMips64TruncUwD, node);
1253 }
1254 
VisitTryTruncateFloat32ToInt64(Node * node)1255 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1256   Mips64OperandGenerator g(this);
1257   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1258   InstructionOperand outputs[2];
1259   size_t output_count = 0;
1260   outputs[output_count++] = g.DefineAsRegister(node);
1261 
1262   Node* success_output = NodeProperties::FindProjection(node, 1);
1263   if (success_output) {
1264     outputs[output_count++] = g.DefineAsRegister(success_output);
1265   }
1266 
1267   this->Emit(kMips64TruncLS, output_count, outputs, 1, inputs);
1268 }
1269 
1270 
VisitTryTruncateFloat64ToInt64(Node * node)1271 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1272   Mips64OperandGenerator g(this);
1273   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1274   InstructionOperand outputs[2];
1275   size_t output_count = 0;
1276   outputs[output_count++] = g.DefineAsRegister(node);
1277 
1278   Node* success_output = NodeProperties::FindProjection(node, 1);
1279   if (success_output) {
1280     outputs[output_count++] = g.DefineAsRegister(success_output);
1281   }
1282 
1283   Emit(kMips64TruncLD, output_count, outputs, 1, inputs);
1284 }
1285 
1286 
VisitTryTruncateFloat32ToUint64(Node * node)1287 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1288   Mips64OperandGenerator g(this);
1289   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1290   InstructionOperand outputs[2];
1291   size_t output_count = 0;
1292   outputs[output_count++] = g.DefineAsRegister(node);
1293 
1294   Node* success_output = NodeProperties::FindProjection(node, 1);
1295   if (success_output) {
1296     outputs[output_count++] = g.DefineAsRegister(success_output);
1297   }
1298 
1299   Emit(kMips64TruncUlS, output_count, outputs, 1, inputs);
1300 }
1301 
1302 
VisitTryTruncateFloat64ToUint64(Node * node)1303 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1304   Mips64OperandGenerator g(this);
1305 
1306   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1307   InstructionOperand outputs[2];
1308   size_t output_count = 0;
1309   outputs[output_count++] = g.DefineAsRegister(node);
1310 
1311   Node* success_output = NodeProperties::FindProjection(node, 1);
1312   if (success_output) {
1313     outputs[output_count++] = g.DefineAsRegister(success_output);
1314   }
1315 
1316   Emit(kMips64TruncUlD, output_count, outputs, 1, inputs);
1317 }
1318 
1319 
VisitChangeInt32ToInt64(Node * node)1320 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1321   Node* value = node->InputAt(0);
1322   if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
1323     // Generate sign-extending load.
1324     LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1325     InstructionCode opcode = kArchNop;
1326     switch (load_rep.representation()) {
1327       case MachineRepresentation::kBit:  // Fall through.
1328       case MachineRepresentation::kWord8:
1329         opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
1330         break;
1331       case MachineRepresentation::kWord16:
1332         opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
1333         break;
1334       case MachineRepresentation::kWord32:
1335         opcode = kMips64Lw;
1336         break;
1337       default:
1338         UNREACHABLE();
1339         return;
1340     }
1341     EmitLoad(this, value, opcode, node);
1342   } else {
1343     Mips64OperandGenerator g(this);
1344     Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1345          g.TempImmediate(0));
1346   }
1347 }
1348 
1349 
VisitChangeUint32ToUint64(Node * node)1350 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1351   Mips64OperandGenerator g(this);
1352   Node* value = node->InputAt(0);
1353   switch (value->opcode()) {
1354     // 32-bit operations will write their result in a 64 bit register,
1355     // clearing the top 32 bits of the destination register.
1356     case IrOpcode::kUint32Div:
1357     case IrOpcode::kUint32Mod:
1358     case IrOpcode::kUint32MulHigh: {
1359       Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1360       return;
1361     }
1362     case IrOpcode::kLoad: {
1363       LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1364       if (load_rep.IsUnsigned()) {
1365         switch (load_rep.representation()) {
1366           case MachineRepresentation::kWord8:
1367           case MachineRepresentation::kWord16:
1368           case MachineRepresentation::kWord32:
1369             Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1370             return;
1371           default:
1372             break;
1373         }
1374       }
1375       break;
1376     }
1377     default:
1378       break;
1379   }
1380   Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1381        g.TempImmediate(0), g.TempImmediate(32));
1382 }
1383 
1384 
VisitTruncateInt64ToInt32(Node * node)1385 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1386   Mips64OperandGenerator g(this);
1387   Node* value = node->InputAt(0);
1388   if (CanCover(node, value)) {
1389     switch (value->opcode()) {
1390       case IrOpcode::kWord64Sar: {
1391         if (TryEmitExtendingLoad(this, value, node)) {
1392           return;
1393         } else {
1394           Int64BinopMatcher m(value);
1395           if (m.right().IsInRange(32, 63)) {
1396             // After smi untagging no need for truncate. Combine sequence.
1397             Emit(kMips64Dsar, g.DefineSameAsFirst(node),
1398                  g.UseRegister(m.left().node()),
1399                  g.UseImmediate(m.right().node()));
1400             return;
1401           }
1402         }
1403         break;
1404       }
1405       default:
1406         break;
1407     }
1408   }
1409   Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1410        g.TempImmediate(0), g.TempImmediate(32));
1411 }
1412 
1413 
VisitTruncateFloat64ToFloat32(Node * node)1414 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1415   Mips64OperandGenerator g(this);
1416   Node* value = node->InputAt(0);
1417   // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
1418   // instruction.
1419   if (CanCover(node, value) &&
1420       value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
1421     Emit(kMips64CvtSW, g.DefineAsRegister(node),
1422          g.UseRegister(value->InputAt(0)));
1423     return;
1424   }
1425   VisitRR(this, kMips64CvtSD, node);
1426 }
1427 
VisitTruncateFloat64ToWord32(Node * node)1428 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1429   VisitRR(this, kArchTruncateDoubleToI, node);
1430 }
1431 
VisitRoundFloat64ToInt32(Node * node)1432 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1433   VisitRR(this, kMips64TruncWD, node);
1434 }
1435 
VisitRoundInt64ToFloat32(Node * node)1436 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1437   VisitRR(this, kMips64CvtSL, node);
1438 }
1439 
1440 
VisitRoundInt64ToFloat64(Node * node)1441 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1442   VisitRR(this, kMips64CvtDL, node);
1443 }
1444 
1445 
VisitRoundUint64ToFloat32(Node * node)1446 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1447   VisitRR(this, kMips64CvtSUl, node);
1448 }
1449 
1450 
VisitRoundUint64ToFloat64(Node * node)1451 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1452   VisitRR(this, kMips64CvtDUl, node);
1453 }
1454 
1455 
VisitBitcastFloat32ToInt32(Node * node)1456 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1457   VisitRR(this, kMips64Float64ExtractLowWord32, node);
1458 }
1459 
1460 
VisitBitcastFloat64ToInt64(Node * node)1461 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1462   VisitRR(this, kMips64BitcastDL, node);
1463 }
1464 
1465 
VisitBitcastInt32ToFloat32(Node * node)1466 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1467   Mips64OperandGenerator g(this);
1468   Emit(kMips64Float64InsertLowWord32, g.DefineAsRegister(node),
1469        ImmediateOperand(ImmediateOperand::INLINE, 0),
1470        g.UseRegister(node->InputAt(0)));
1471 }
1472 
1473 
VisitBitcastInt64ToFloat64(Node * node)1474 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1475   VisitRR(this, kMips64BitcastLD, node);
1476 }
1477 
1478 
VisitFloat32Add(Node * node)1479 void InstructionSelector::VisitFloat32Add(Node* node) {
1480   // Optimization with Madd.S(z, x, y) is intentionally removed.
1481   // See explanation for madd_s in assembler-mips64.cc.
1482   VisitRRR(this, kMips64AddS, node);
1483 }
1484 
1485 
VisitFloat64Add(Node * node)1486 void InstructionSelector::VisitFloat64Add(Node* node) {
1487   // Optimization with Madd.D(z, x, y) is intentionally removed.
1488   // See explanation for madd_d in assembler-mips64.cc.
1489   VisitRRR(this, kMips64AddD, node);
1490 }
1491 
1492 
VisitFloat32Sub(Node * node)1493 void InstructionSelector::VisitFloat32Sub(Node* node) {
1494   // Optimization with Msub.S(z, x, y) is intentionally removed.
1495   // See explanation for madd_s in assembler-mips64.cc.
1496   VisitRRR(this, kMips64SubS, node);
1497 }
1498 
VisitFloat64Sub(Node * node)1499 void InstructionSelector::VisitFloat64Sub(Node* node) {
1500   // Optimization with Msub.D(z, x, y) is intentionally removed.
1501   // See explanation for madd_d in assembler-mips64.cc.
1502   VisitRRR(this, kMips64SubD, node);
1503 }
1504 
VisitFloat32Mul(Node * node)1505 void InstructionSelector::VisitFloat32Mul(Node* node) {
1506   VisitRRR(this, kMips64MulS, node);
1507 }
1508 
1509 
VisitFloat64Mul(Node * node)1510 void InstructionSelector::VisitFloat64Mul(Node* node) {
1511   VisitRRR(this, kMips64MulD, node);
1512 }
1513 
1514 
VisitFloat32Div(Node * node)1515 void InstructionSelector::VisitFloat32Div(Node* node) {
1516   VisitRRR(this, kMips64DivS, node);
1517 }
1518 
1519 
VisitFloat64Div(Node * node)1520 void InstructionSelector::VisitFloat64Div(Node* node) {
1521   VisitRRR(this, kMips64DivD, node);
1522 }
1523 
1524 
VisitFloat64Mod(Node * node)1525 void InstructionSelector::VisitFloat64Mod(Node* node) {
1526   Mips64OperandGenerator g(this);
1527   Emit(kMips64ModD, g.DefineAsFixed(node, f0),
1528        g.UseFixed(node->InputAt(0), f12),
1529        g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
1530 }
1531 
VisitFloat32Max(Node * node)1532 void InstructionSelector::VisitFloat32Max(Node* node) {
1533   Mips64OperandGenerator g(this);
1534   Emit(kMips64Float32Max, g.DefineAsRegister(node),
1535        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1536 }
1537 
VisitFloat64Max(Node * node)1538 void InstructionSelector::VisitFloat64Max(Node* node) {
1539   Mips64OperandGenerator g(this);
1540   Emit(kMips64Float64Max, g.DefineAsRegister(node),
1541        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1542 }
1543 
VisitFloat32Min(Node * node)1544 void InstructionSelector::VisitFloat32Min(Node* node) {
1545   Mips64OperandGenerator g(this);
1546   Emit(kMips64Float32Min, g.DefineAsRegister(node),
1547        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1548 }
1549 
VisitFloat64Min(Node * node)1550 void InstructionSelector::VisitFloat64Min(Node* node) {
1551   Mips64OperandGenerator g(this);
1552   Emit(kMips64Float64Min, g.DefineAsRegister(node),
1553        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1554 }
1555 
1556 
VisitFloat32Abs(Node * node)1557 void InstructionSelector::VisitFloat32Abs(Node* node) {
1558   VisitRR(this, kMips64AbsS, node);
1559 }
1560 
1561 
VisitFloat64Abs(Node * node)1562 void InstructionSelector::VisitFloat64Abs(Node* node) {
1563   VisitRR(this, kMips64AbsD, node);
1564 }
1565 
VisitFloat32Sqrt(Node * node)1566 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1567   VisitRR(this, kMips64SqrtS, node);
1568 }
1569 
1570 
VisitFloat64Sqrt(Node * node)1571 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1572   VisitRR(this, kMips64SqrtD, node);
1573 }
1574 
1575 
VisitFloat32RoundDown(Node * node)1576 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1577   VisitRR(this, kMips64Float32RoundDown, node);
1578 }
1579 
1580 
VisitFloat64RoundDown(Node * node)1581 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1582   VisitRR(this, kMips64Float64RoundDown, node);
1583 }
1584 
1585 
VisitFloat32RoundUp(Node * node)1586 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1587   VisitRR(this, kMips64Float32RoundUp, node);
1588 }
1589 
1590 
VisitFloat64RoundUp(Node * node)1591 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1592   VisitRR(this, kMips64Float64RoundUp, node);
1593 }
1594 
1595 
VisitFloat32RoundTruncate(Node * node)1596 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1597   VisitRR(this, kMips64Float32RoundTruncate, node);
1598 }
1599 
1600 
VisitFloat64RoundTruncate(Node * node)1601 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1602   VisitRR(this, kMips64Float64RoundTruncate, node);
1603 }
1604 
1605 
VisitFloat64RoundTiesAway(Node * node)1606 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1607   UNREACHABLE();
1608 }
1609 
1610 
VisitFloat32RoundTiesEven(Node * node)1611 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1612   VisitRR(this, kMips64Float32RoundTiesEven, node);
1613 }
1614 
1615 
VisitFloat64RoundTiesEven(Node * node)1616 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1617   VisitRR(this, kMips64Float64RoundTiesEven, node);
1618 }
1619 
VisitFloat32Neg(Node * node)1620 void InstructionSelector::VisitFloat32Neg(Node* node) {
1621   VisitRR(this, kMips64NegS, node);
1622 }
1623 
VisitFloat64Neg(Node * node)1624 void InstructionSelector::VisitFloat64Neg(Node* node) {
1625   VisitRR(this, kMips64NegD, node);
1626 }
1627 
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1628 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1629                                                    InstructionCode opcode) {
1630   Mips64OperandGenerator g(this);
1631   Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2),
1632        g.UseFixed(node->InputAt(1), f4))
1633       ->MarkAsCall();
1634 }
1635 
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1636 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1637                                                   InstructionCode opcode) {
1638   Mips64OperandGenerator g(this);
1639   Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
1640       ->MarkAsCall();
1641 }
1642 
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * call_descriptor,Node * node)1643 void InstructionSelector::EmitPrepareArguments(
1644     ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
1645     Node* node) {
1646   Mips64OperandGenerator g(this);
1647 
1648   // Prepare for C function call.
1649   if (call_descriptor->IsCFunctionCall()) {
1650     Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
1651                                          call_descriptor->ParameterCount())),
1652          0, nullptr, 0, nullptr);
1653 
1654     // Poke any stack arguments.
1655     int slot = kCArgSlotCount;
1656     for (PushParameter input : (*arguments)) {
1657       Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1658            g.TempImmediate(slot << kPointerSizeLog2));
1659       ++slot;
1660     }
1661   } else {
1662     int push_count = static_cast<int>(call_descriptor->StackParameterCount());
1663     if (push_count > 0) {
1664       // Calculate needed space
1665       int stack_size = 0;
1666       for (PushParameter input : (*arguments)) {
1667         if (input.node) {
1668           stack_size += input.location.GetSizeInPointers();
1669         }
1670       }
1671       Emit(kMips64StackClaim, g.NoOutput(),
1672            g.TempImmediate(stack_size << kPointerSizeLog2));
1673     }
1674     for (size_t n = 0; n < arguments->size(); ++n) {
1675       PushParameter input = (*arguments)[n];
1676       if (input.node) {
1677         Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1678              g.TempImmediate(static_cast<int>(n << kPointerSizeLog2)));
1679       }
1680     }
1681   }
1682 }
1683 
EmitPrepareResults(ZoneVector<PushParameter> * results,const CallDescriptor * call_descriptor,Node * node)1684 void InstructionSelector::EmitPrepareResults(
1685     ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
1686     Node* node) {
1687   Mips64OperandGenerator g(this);
1688 
1689   int reverse_slot = 0;
1690   for (PushParameter output : *results) {
1691     if (!output.location.IsCallerFrameSlot()) continue;
1692     // Skip any alignment holes in nodes.
1693     if (output.node != nullptr) {
1694       DCHECK(!call_descriptor->IsCFunctionCall());
1695       if (output.location.GetType() == MachineType::Float32()) {
1696         MarkAsFloat32(output.node);
1697       } else if (output.location.GetType() == MachineType::Float64()) {
1698         MarkAsFloat64(output.node);
1699       }
1700       Emit(kMips64Peek, g.DefineAsRegister(output.node),
1701            g.UseImmediate(reverse_slot));
1702     }
1703     reverse_slot += output.location.GetSizeInPointers();
1704   }
1705 }
1706 
IsTailCallAddressImmediate()1707 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1708 
GetTempsCountForTailCallFromJSFunction()1709 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
1710 
VisitUnalignedLoad(Node * node)1711 void InstructionSelector::VisitUnalignedLoad(Node* node) {
1712   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1713   Mips64OperandGenerator g(this);
1714   Node* base = node->InputAt(0);
1715   Node* index = node->InputAt(1);
1716 
1717   ArchOpcode opcode = kArchNop;
1718   switch (load_rep.representation()) {
1719     case MachineRepresentation::kFloat32:
1720       opcode = kMips64Ulwc1;
1721       break;
1722     case MachineRepresentation::kFloat64:
1723       opcode = kMips64Uldc1;
1724       break;
1725     case MachineRepresentation::kBit:  // Fall through.
1726     case MachineRepresentation::kWord8:
1727       UNREACHABLE();
1728       break;
1729     case MachineRepresentation::kWord16:
1730       opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
1731       break;
1732     case MachineRepresentation::kWord32:
1733       opcode = load_rep.IsUnsigned() ? kMips64Ulwu : kMips64Ulw;
1734       break;
1735     case MachineRepresentation::kTaggedSigned:   // Fall through.
1736     case MachineRepresentation::kTaggedPointer:  // Fall through.
1737     case MachineRepresentation::kTagged:  // Fall through.
1738     case MachineRepresentation::kWord64:
1739       opcode = kMips64Uld;
1740       break;
1741     case MachineRepresentation::kSimd128:
1742       opcode = kMips64MsaLd;
1743       break;
1744     case MachineRepresentation::kNone:
1745       UNREACHABLE();
1746       return;
1747   }
1748 
1749   if (g.CanBeImmediate(index, opcode)) {
1750     Emit(opcode | AddressingModeField::encode(kMode_MRI),
1751          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
1752   } else {
1753     InstructionOperand addr_reg = g.TempRegister();
1754     Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
1755          g.UseRegister(index), g.UseRegister(base));
1756     // Emit desired load opcode, using temp addr_reg.
1757     Emit(opcode | AddressingModeField::encode(kMode_MRI),
1758          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1759   }
1760 }
1761 
VisitUnalignedStore(Node * node)1762 void InstructionSelector::VisitUnalignedStore(Node* node) {
1763   Mips64OperandGenerator g(this);
1764   Node* base = node->InputAt(0);
1765   Node* index = node->InputAt(1);
1766   Node* value = node->InputAt(2);
1767 
1768   UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
1769   ArchOpcode opcode = kArchNop;
1770   switch (rep) {
1771     case MachineRepresentation::kFloat32:
1772       opcode = kMips64Uswc1;
1773       break;
1774     case MachineRepresentation::kFloat64:
1775       opcode = kMips64Usdc1;
1776       break;
1777     case MachineRepresentation::kBit:  // Fall through.
1778     case MachineRepresentation::kWord8:
1779       UNREACHABLE();
1780       break;
1781     case MachineRepresentation::kWord16:
1782       opcode = kMips64Ush;
1783       break;
1784     case MachineRepresentation::kWord32:
1785       opcode = kMips64Usw;
1786       break;
1787     case MachineRepresentation::kTaggedSigned:   // Fall through.
1788     case MachineRepresentation::kTaggedPointer:  // Fall through.
1789     case MachineRepresentation::kTagged:  // Fall through.
1790     case MachineRepresentation::kWord64:
1791       opcode = kMips64Usd;
1792       break;
1793     case MachineRepresentation::kSimd128:
1794       opcode = kMips64MsaSt;
1795       break;
1796     case MachineRepresentation::kNone:
1797       UNREACHABLE();
1798       return;
1799   }
1800 
1801   if (g.CanBeImmediate(index, opcode)) {
1802     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1803          g.UseRegister(base), g.UseImmediate(index),
1804          g.UseRegisterOrImmediateZero(value));
1805   } else {
1806     InstructionOperand addr_reg = g.TempRegister();
1807     Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
1808          g.UseRegister(index), g.UseRegister(base));
1809     // Emit desired store opcode, using temp addr_reg.
1810     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1811          addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1812   }
1813 }
1814 
1815 namespace {
1816 
1817 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1818 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1819                          InstructionOperand left, InstructionOperand right,
1820                          FlagsContinuation* cont) {
1821   selector->EmitWithContinuation(opcode, left, right, cont);
1822 }
1823 
1824 
1825 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1826 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1827                          FlagsContinuation* cont) {
1828   Mips64OperandGenerator g(selector);
1829   Float32BinopMatcher m(node);
1830   InstructionOperand lhs, rhs;
1831 
1832   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1833                           : g.UseRegister(m.left().node());
1834   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1835                            : g.UseRegister(m.right().node());
1836   VisitCompare(selector, kMips64CmpS, lhs, rhs, cont);
1837 }
1838 
1839 
1840 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1841 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1842                          FlagsContinuation* cont) {
1843   Mips64OperandGenerator g(selector);
1844   Float64BinopMatcher m(node);
1845   InstructionOperand lhs, rhs;
1846 
1847   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1848                           : g.UseRegister(m.left().node());
1849   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1850                            : g.UseRegister(m.right().node());
1851   VisitCompare(selector, kMips64CmpD, lhs, rhs, cont);
1852 }
1853 
1854 
1855 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont,bool commutative)1856 void VisitWordCompare(InstructionSelector* selector, Node* node,
1857                       InstructionCode opcode, FlagsContinuation* cont,
1858                       bool commutative) {
1859   Mips64OperandGenerator g(selector);
1860   Node* left = node->InputAt(0);
1861   Node* right = node->InputAt(1);
1862 
1863   // Match immediates on left or right side of comparison.
1864   if (g.CanBeImmediate(right, opcode)) {
1865     if (opcode == kMips64Tst) {
1866       VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1867                    cont);
1868     } else {
1869       switch (cont->condition()) {
1870         case kEqual:
1871         case kNotEqual:
1872           if (cont->IsSet()) {
1873             VisitCompare(selector, opcode, g.UseRegister(left),
1874                          g.UseImmediate(right), cont);
1875           } else {
1876             VisitCompare(selector, opcode, g.UseRegister(left),
1877                          g.UseRegister(right), cont);
1878           }
1879           break;
1880         case kSignedLessThan:
1881         case kSignedGreaterThanOrEqual:
1882         case kUnsignedLessThan:
1883         case kUnsignedGreaterThanOrEqual:
1884           VisitCompare(selector, opcode, g.UseRegister(left),
1885                        g.UseImmediate(right), cont);
1886           break;
1887         default:
1888           VisitCompare(selector, opcode, g.UseRegister(left),
1889                        g.UseRegister(right), cont);
1890       }
1891     }
1892   } else if (g.CanBeImmediate(left, opcode)) {
1893     if (!commutative) cont->Commute();
1894     if (opcode == kMips64Tst) {
1895       VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
1896                    cont);
1897     } else {
1898       switch (cont->condition()) {
1899         case kEqual:
1900         case kNotEqual:
1901           if (cont->IsSet()) {
1902             VisitCompare(selector, opcode, g.UseRegister(right),
1903                          g.UseImmediate(left), cont);
1904           } else {
1905             VisitCompare(selector, opcode, g.UseRegister(right),
1906                          g.UseRegister(left), cont);
1907           }
1908           break;
1909         case kSignedLessThan:
1910         case kSignedGreaterThanOrEqual:
1911         case kUnsignedLessThan:
1912         case kUnsignedGreaterThanOrEqual:
1913           VisitCompare(selector, opcode, g.UseRegister(right),
1914                        g.UseImmediate(left), cont);
1915           break;
1916         default:
1917           VisitCompare(selector, opcode, g.UseRegister(right),
1918                        g.UseRegister(left), cont);
1919       }
1920     }
1921   } else {
1922     VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1923                  cont);
1924   }
1925 }
1926 
IsNodeUnsigned(Node * n)1927 bool IsNodeUnsigned(Node* n) {
1928   NodeMatcher m(n);
1929 
1930   if (m.IsLoad()) {
1931     LoadRepresentation load_rep = LoadRepresentationOf(n->op());
1932     return load_rep.IsUnsigned();
1933   } else if (m.IsUnalignedLoad()) {
1934     LoadRepresentation load_rep = LoadRepresentationOf(n->op());
1935     return load_rep.IsUnsigned();
1936   } else {
1937     return m.IsUint32Div() || m.IsUint32LessThan() ||
1938            m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
1939            m.IsUint32MulHigh() || m.IsChangeFloat64ToUint32() ||
1940            m.IsTruncateFloat64ToUint32() || m.IsTruncateFloat32ToUint32();
1941   }
1942 }
1943 
1944 // Shared routine for multiple word compare operations.
VisitFullWord32Compare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1945 void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
1946                             InstructionCode opcode, FlagsContinuation* cont) {
1947   Mips64OperandGenerator g(selector);
1948   InstructionOperand leftOp = g.TempRegister();
1949   InstructionOperand rightOp = g.TempRegister();
1950 
1951   selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
1952                  g.TempImmediate(32));
1953   selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
1954                  g.TempImmediate(32));
1955 
1956   VisitCompare(selector, opcode, leftOp, rightOp, cont);
1957 }
1958 
VisitOptimizedWord32Compare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1959 void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
1960                                  InstructionCode opcode,
1961                                  FlagsContinuation* cont) {
1962   if (FLAG_debug_code) {
1963     Mips64OperandGenerator g(selector);
1964     InstructionOperand leftOp = g.TempRegister();
1965     InstructionOperand rightOp = g.TempRegister();
1966     InstructionOperand optimizedResult = g.TempRegister();
1967     InstructionOperand fullResult = g.TempRegister();
1968     FlagsCondition condition = cont->condition();
1969     InstructionCode testOpcode = opcode |
1970                                  FlagsConditionField::encode(condition) |
1971                                  FlagsModeField::encode(kFlags_set);
1972 
1973     selector->Emit(testOpcode, optimizedResult, g.UseRegister(node->InputAt(0)),
1974                    g.UseRegister(node->InputAt(1)));
1975 
1976     selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
1977                    g.TempImmediate(32));
1978     selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
1979                    g.TempImmediate(32));
1980     selector->Emit(testOpcode, fullResult, leftOp, rightOp);
1981 
1982     selector->Emit(
1983         kMips64AssertEqual, g.NoOutput(), optimizedResult, fullResult,
1984         g.TempImmediate(
1985             static_cast<int>(AbortReason::kUnsupportedNonPrimitiveCompare)));
1986   }
1987 
1988   VisitWordCompare(selector, node, opcode, cont, false);
1989 }
1990 
VisitWord32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1991 void VisitWord32Compare(InstructionSelector* selector, Node* node,
1992                         FlagsContinuation* cont) {
1993   // MIPS64 doesn't support Word32 compare instructions. Instead it relies
1994   // that the values in registers are correctly sign-extended and uses
1995   // Word64 comparison instead. This behavior is correct in most cases,
1996   // but doesn't work when comparing signed with unsigned operands.
1997   // We could simulate full Word32 compare in all cases but this would
1998   // create an unnecessary overhead since unsigned integers are rarely
1999   // used in JavaScript.
2000   // The solution proposed here tries to match a comparison of signed
2001   // with unsigned operand, and perform full Word32Compare only
2002   // in those cases. Unfortunately, the solution is not complete because
2003   // it might skip cases where Word32 full compare is needed, so
2004   // basically it is a hack.
2005   if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) {
2006     VisitFullWord32Compare(selector, node, kMips64Cmp, cont);
2007   } else {
2008     VisitOptimizedWord32Compare(selector, node, kMips64Cmp, cont);
2009   }
2010 }
2011 
2012 
VisitWord64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)2013 void VisitWord64Compare(InstructionSelector* selector, Node* node,
2014                         FlagsContinuation* cont) {
2015   VisitWordCompare(selector, node, kMips64Cmp, cont, false);
2016 }
2017 
2018 
2019 
EmitWordCompareZero(InstructionSelector * selector,Node * value,FlagsContinuation * cont)2020 void EmitWordCompareZero(InstructionSelector* selector, Node* value,
2021                          FlagsContinuation* cont) {
2022   Mips64OperandGenerator g(selector);
2023   selector->EmitWithContinuation(kMips64Cmp, g.UseRegister(value),
2024                                  g.TempImmediate(0), cont);
2025 }
2026 
2027 }  // namespace
2028 
2029 // Shared routine for word comparisons against zero.
VisitWordCompareZero(Node * user,Node * value,FlagsContinuation * cont)2030 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
2031                                                FlagsContinuation* cont) {
2032   // Try to combine with comparisons against 0 by simply inverting the branch.
2033   while (CanCover(user, value)) {
2034     if (value->opcode() == IrOpcode::kWord32Equal) {
2035       Int32BinopMatcher m(value);
2036       if (!m.right().Is(0)) break;
2037       user = value;
2038       value = m.left().node();
2039     } else if (value->opcode() == IrOpcode::kWord64Equal) {
2040       Int64BinopMatcher m(value);
2041       if (!m.right().Is(0)) break;
2042       user = value;
2043       value = m.left().node();
2044     } else {
2045       break;
2046     }
2047 
2048     cont->Negate();
2049   }
2050 
2051   if (CanCover(user, value)) {
2052     switch (value->opcode()) {
2053       case IrOpcode::kWord32Equal:
2054         cont->OverwriteAndNegateIfEqual(kEqual);
2055         return VisitWord32Compare(this, value, cont);
2056       case IrOpcode::kInt32LessThan:
2057         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2058         return VisitWord32Compare(this, value, cont);
2059       case IrOpcode::kInt32LessThanOrEqual:
2060         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2061         return VisitWord32Compare(this, value, cont);
2062       case IrOpcode::kUint32LessThan:
2063         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2064         return VisitWord32Compare(this, value, cont);
2065       case IrOpcode::kUint32LessThanOrEqual:
2066         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2067         return VisitWord32Compare(this, value, cont);
2068       case IrOpcode::kWord64Equal:
2069         cont->OverwriteAndNegateIfEqual(kEqual);
2070         return VisitWord64Compare(this, value, cont);
2071       case IrOpcode::kInt64LessThan:
2072         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2073         return VisitWord64Compare(this, value, cont);
2074       case IrOpcode::kInt64LessThanOrEqual:
2075         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2076         return VisitWord64Compare(this, value, cont);
2077       case IrOpcode::kUint64LessThan:
2078         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2079         return VisitWord64Compare(this, value, cont);
2080       case IrOpcode::kUint64LessThanOrEqual:
2081         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2082         return VisitWord64Compare(this, value, cont);
2083       case IrOpcode::kFloat32Equal:
2084         cont->OverwriteAndNegateIfEqual(kEqual);
2085         return VisitFloat32Compare(this, value, cont);
2086       case IrOpcode::kFloat32LessThan:
2087         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2088         return VisitFloat32Compare(this, value, cont);
2089       case IrOpcode::kFloat32LessThanOrEqual:
2090         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2091         return VisitFloat32Compare(this, value, cont);
2092       case IrOpcode::kFloat64Equal:
2093         cont->OverwriteAndNegateIfEqual(kEqual);
2094         return VisitFloat64Compare(this, value, cont);
2095       case IrOpcode::kFloat64LessThan:
2096         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2097         return VisitFloat64Compare(this, value, cont);
2098       case IrOpcode::kFloat64LessThanOrEqual:
2099         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2100         return VisitFloat64Compare(this, value, cont);
2101       case IrOpcode::kProjection:
2102         // Check if this is the overflow output projection of an
2103         // <Operation>WithOverflow node.
2104         if (ProjectionIndexOf(value->op()) == 1u) {
2105           // We cannot combine the <Operation>WithOverflow with this branch
2106           // unless the 0th projection (the use of the actual value of the
2107           // <Operation> is either nullptr, which means there's no use of the
2108           // actual value, or was already defined, which means it is scheduled
2109           // *AFTER* this branch).
2110           Node* const node = value->InputAt(0);
2111           Node* const result = NodeProperties::FindProjection(node, 0);
2112           if (result == nullptr || IsDefined(result)) {
2113             switch (node->opcode()) {
2114               case IrOpcode::kInt32AddWithOverflow:
2115                 cont->OverwriteAndNegateIfEqual(kOverflow);
2116                 return VisitBinop(this, node, kMips64Dadd, cont);
2117               case IrOpcode::kInt32SubWithOverflow:
2118                 cont->OverwriteAndNegateIfEqual(kOverflow);
2119                 return VisitBinop(this, node, kMips64Dsub, cont);
2120               case IrOpcode::kInt32MulWithOverflow:
2121                 cont->OverwriteAndNegateIfEqual(kOverflow);
2122                 return VisitBinop(this, node, kMips64MulOvf, cont);
2123               case IrOpcode::kInt64AddWithOverflow:
2124                 cont->OverwriteAndNegateIfEqual(kOverflow);
2125                 return VisitBinop(this, node, kMips64DaddOvf, cont);
2126               case IrOpcode::kInt64SubWithOverflow:
2127                 cont->OverwriteAndNegateIfEqual(kOverflow);
2128                 return VisitBinop(this, node, kMips64DsubOvf, cont);
2129               default:
2130                 break;
2131             }
2132           }
2133         }
2134         break;
2135       case IrOpcode::kWord32And:
2136       case IrOpcode::kWord64And:
2137         return VisitWordCompare(this, value, kMips64Tst, cont, true);
2138       default:
2139         break;
2140     }
2141   }
2142 
2143   // Continuation could not be combined with a compare, emit compare against 0.
2144   EmitWordCompareZero(this, value, cont);
2145 }
2146 
VisitSwitch(Node * node,const SwitchInfo & sw)2147 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
2148   Mips64OperandGenerator g(this);
2149   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
2150 
2151   // Emit either ArchTableSwitch or ArchLookupSwitch.
2152   if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
2153     static const size_t kMaxTableSwitchValueRange = 2 << 16;
2154     size_t table_space_cost = 10 + 2 * sw.value_range();
2155     size_t table_time_cost = 3;
2156     size_t lookup_space_cost = 2 + 2 * sw.case_count();
2157     size_t lookup_time_cost = sw.case_count();
2158     if (sw.case_count() > 0 &&
2159         table_space_cost + 3 * table_time_cost <=
2160             lookup_space_cost + 3 * lookup_time_cost &&
2161         sw.min_value() > std::numeric_limits<int32_t>::min() &&
2162         sw.value_range() <= kMaxTableSwitchValueRange) {
2163       InstructionOperand index_operand = value_operand;
2164       if (sw.min_value()) {
2165         index_operand = g.TempRegister();
2166         Emit(kMips64Sub, index_operand, value_operand,
2167              g.TempImmediate(sw.min_value()));
2168       }
2169       // Generate a table lookup.
2170       return EmitTableSwitch(sw, index_operand);
2171     }
2172   }
2173 
2174   // Generate a sequence of conditional jumps.
2175   return EmitLookupSwitch(sw, value_operand);
2176 }
2177 
2178 
VisitWord32Equal(Node * const node)2179 void InstructionSelector::VisitWord32Equal(Node* const node) {
2180   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2181   Int32BinopMatcher m(node);
2182   if (m.right().Is(0)) {
2183     return VisitWordCompareZero(m.node(), m.left().node(), &cont);
2184   }
2185 
2186   VisitWord32Compare(this, node, &cont);
2187 }
2188 
2189 
VisitInt32LessThan(Node * node)2190 void InstructionSelector::VisitInt32LessThan(Node* node) {
2191   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2192   VisitWord32Compare(this, node, &cont);
2193 }
2194 
2195 
VisitInt32LessThanOrEqual(Node * node)2196 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2197   FlagsContinuation cont =
2198       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2199   VisitWord32Compare(this, node, &cont);
2200 }
2201 
2202 
VisitUint32LessThan(Node * node)2203 void InstructionSelector::VisitUint32LessThan(Node* node) {
2204   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2205   VisitWord32Compare(this, node, &cont);
2206 }
2207 
2208 
VisitUint32LessThanOrEqual(Node * node)2209 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2210   FlagsContinuation cont =
2211       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2212   VisitWord32Compare(this, node, &cont);
2213 }
2214 
2215 
VisitInt32AddWithOverflow(Node * node)2216 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
2217   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2218     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2219     return VisitBinop(this, node, kMips64Dadd, &cont);
2220   }
2221   FlagsContinuation cont;
2222   VisitBinop(this, node, kMips64Dadd, &cont);
2223 }
2224 
2225 
VisitInt32SubWithOverflow(Node * node)2226 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
2227   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2228     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2229     return VisitBinop(this, node, kMips64Dsub, &cont);
2230   }
2231   FlagsContinuation cont;
2232   VisitBinop(this, node, kMips64Dsub, &cont);
2233 }
2234 
VisitInt32MulWithOverflow(Node * node)2235 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
2236   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2237     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2238     return VisitBinop(this, node, kMips64MulOvf, &cont);
2239   }
2240   FlagsContinuation cont;
2241   VisitBinop(this, node, kMips64MulOvf, &cont);
2242 }
2243 
VisitInt64AddWithOverflow(Node * node)2244 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
2245   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2246     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2247     return VisitBinop(this, node, kMips64DaddOvf, &cont);
2248   }
2249   FlagsContinuation cont;
2250   VisitBinop(this, node, kMips64DaddOvf, &cont);
2251 }
2252 
2253 
VisitInt64SubWithOverflow(Node * node)2254 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
2255   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2256     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2257     return VisitBinop(this, node, kMips64DsubOvf, &cont);
2258   }
2259   FlagsContinuation cont;
2260   VisitBinop(this, node, kMips64DsubOvf, &cont);
2261 }
2262 
2263 
VisitWord64Equal(Node * const node)2264 void InstructionSelector::VisitWord64Equal(Node* const node) {
2265   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2266   Int64BinopMatcher m(node);
2267   if (m.right().Is(0)) {
2268     return VisitWordCompareZero(m.node(), m.left().node(), &cont);
2269   }
2270 
2271   VisitWord64Compare(this, node, &cont);
2272 }
2273 
2274 
VisitInt64LessThan(Node * node)2275 void InstructionSelector::VisitInt64LessThan(Node* node) {
2276   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2277   VisitWord64Compare(this, node, &cont);
2278 }
2279 
2280 
VisitInt64LessThanOrEqual(Node * node)2281 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2282   FlagsContinuation cont =
2283       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2284   VisitWord64Compare(this, node, &cont);
2285 }
2286 
2287 
VisitUint64LessThan(Node * node)2288 void InstructionSelector::VisitUint64LessThan(Node* node) {
2289   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2290   VisitWord64Compare(this, node, &cont);
2291 }
2292 
2293 
VisitUint64LessThanOrEqual(Node * node)2294 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2295   FlagsContinuation cont =
2296       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2297   VisitWord64Compare(this, node, &cont);
2298 }
2299 
2300 
VisitFloat32Equal(Node * node)2301 void InstructionSelector::VisitFloat32Equal(Node* node) {
2302   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2303   VisitFloat32Compare(this, node, &cont);
2304 }
2305 
2306 
VisitFloat32LessThan(Node * node)2307 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2308   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2309   VisitFloat32Compare(this, node, &cont);
2310 }
2311 
2312 
VisitFloat32LessThanOrEqual(Node * node)2313 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2314   FlagsContinuation cont =
2315       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2316   VisitFloat32Compare(this, node, &cont);
2317 }
2318 
2319 
VisitFloat64Equal(Node * node)2320 void InstructionSelector::VisitFloat64Equal(Node* node) {
2321   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2322   VisitFloat64Compare(this, node, &cont);
2323 }
2324 
2325 
VisitFloat64LessThan(Node * node)2326 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2327   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2328   VisitFloat64Compare(this, node, &cont);
2329 }
2330 
2331 
VisitFloat64LessThanOrEqual(Node * node)2332 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2333   FlagsContinuation cont =
2334       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2335   VisitFloat64Compare(this, node, &cont);
2336 }
2337 
2338 
VisitFloat64ExtractLowWord32(Node * node)2339 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
2340   VisitRR(this, kMips64Float64ExtractLowWord32, node);
2341 }
2342 
2343 
VisitFloat64ExtractHighWord32(Node * node)2344 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
2345   VisitRR(this, kMips64Float64ExtractHighWord32, node);
2346 }
2347 
VisitFloat64SilenceNaN(Node * node)2348 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
2349   VisitRR(this, kMips64Float64SilenceNaN, node);
2350 }
2351 
VisitFloat64InsertLowWord32(Node * node)2352 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2353   Mips64OperandGenerator g(this);
2354   Node* left = node->InputAt(0);
2355   Node* right = node->InputAt(1);
2356   Emit(kMips64Float64InsertLowWord32, g.DefineSameAsFirst(node),
2357        g.UseRegister(left), g.UseRegister(right));
2358 }
2359 
2360 
VisitFloat64InsertHighWord32(Node * node)2361 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2362   Mips64OperandGenerator g(this);
2363   Node* left = node->InputAt(0);
2364   Node* right = node->InputAt(1);
2365   Emit(kMips64Float64InsertHighWord32, g.DefineSameAsFirst(node),
2366        g.UseRegister(left), g.UseRegister(right));
2367 }
2368 
VisitWord32AtomicLoad(Node * node)2369 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
2370   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2371   Mips64OperandGenerator g(this);
2372   Node* base = node->InputAt(0);
2373   Node* index = node->InputAt(1);
2374   ArchOpcode opcode = kArchNop;
2375   switch (load_rep.representation()) {
2376     case MachineRepresentation::kWord8:
2377       opcode =
2378           load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
2379       break;
2380     case MachineRepresentation::kWord16:
2381       opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
2382                                    : kWord32AtomicLoadUint16;
2383       break;
2384     case MachineRepresentation::kWord32:
2385       opcode = kWord32AtomicLoadWord32;
2386       break;
2387     default:
2388       UNREACHABLE();
2389       return;
2390   }
2391   if (g.CanBeImmediate(index, opcode)) {
2392     Emit(opcode | AddressingModeField::encode(kMode_MRI),
2393          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
2394   } else {
2395     InstructionOperand addr_reg = g.TempRegister();
2396     Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
2397          g.UseRegister(index), g.UseRegister(base));
2398     // Emit desired load opcode, using temp addr_reg.
2399     Emit(opcode | AddressingModeField::encode(kMode_MRI),
2400          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
2401   }
2402 }
2403 
VisitWord32AtomicStore(Node * node)2404 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
2405   MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2406   Mips64OperandGenerator g(this);
2407   Node* base = node->InputAt(0);
2408   Node* index = node->InputAt(1);
2409   Node* value = node->InputAt(2);
2410   ArchOpcode opcode = kArchNop;
2411   switch (rep) {
2412     case MachineRepresentation::kWord8:
2413       opcode = kWord32AtomicStoreWord8;
2414       break;
2415     case MachineRepresentation::kWord16:
2416       opcode = kWord32AtomicStoreWord16;
2417       break;
2418     case MachineRepresentation::kWord32:
2419       opcode = kWord32AtomicStoreWord32;
2420       break;
2421     default:
2422       UNREACHABLE();
2423       return;
2424   }
2425 
2426   if (g.CanBeImmediate(index, opcode)) {
2427     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
2428          g.UseRegister(base), g.UseImmediate(index),
2429          g.UseRegisterOrImmediateZero(value));
2430   } else {
2431     InstructionOperand addr_reg = g.TempRegister();
2432     Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
2433          g.UseRegister(index), g.UseRegister(base));
2434     // Emit desired store opcode, using temp addr_reg.
2435     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
2436          addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
2437   }
2438 }
2439 
VisitWord32AtomicExchange(Node * node)2440 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
2441   Mips64OperandGenerator g(this);
2442   Node* base = node->InputAt(0);
2443   Node* index = node->InputAt(1);
2444   Node* value = node->InputAt(2);
2445   ArchOpcode opcode = kArchNop;
2446   MachineType type = AtomicOpRepresentationOf(node->op());
2447   if (type == MachineType::Int8()) {
2448     opcode = kWord32AtomicExchangeInt8;
2449   } else if (type == MachineType::Uint8()) {
2450     opcode = kWord32AtomicExchangeUint8;
2451   } else if (type == MachineType::Int16()) {
2452     opcode = kWord32AtomicExchangeInt16;
2453   } else if (type == MachineType::Uint16()) {
2454     opcode = kWord32AtomicExchangeUint16;
2455   } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2456     opcode = kWord32AtomicExchangeWord32;
2457   } else {
2458     UNREACHABLE();
2459     return;
2460   }
2461 
2462   AddressingMode addressing_mode = kMode_MRI;
2463   InstructionOperand inputs[3];
2464   size_t input_count = 0;
2465   inputs[input_count++] = g.UseUniqueRegister(base);
2466   inputs[input_count++] = g.UseUniqueRegister(index);
2467   inputs[input_count++] = g.UseUniqueRegister(value);
2468   InstructionOperand outputs[1];
2469   outputs[0] = g.UseUniqueRegister(node);
2470   InstructionOperand temp[3];
2471   temp[0] = g.TempRegister();
2472   temp[1] = g.TempRegister();
2473   temp[2] = g.TempRegister();
2474   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2475   Emit(code, 1, outputs, input_count, inputs, 3, temp);
2476 }
2477 
VisitWord32AtomicCompareExchange(Node * node)2478 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
2479   Mips64OperandGenerator g(this);
2480   Node* base = node->InputAt(0);
2481   Node* index = node->InputAt(1);
2482   Node* old_value = node->InputAt(2);
2483   Node* new_value = node->InputAt(3);
2484   ArchOpcode opcode = kArchNop;
2485   MachineType type = AtomicOpRepresentationOf(node->op());
2486   if (type == MachineType::Int8()) {
2487     opcode = kWord32AtomicCompareExchangeInt8;
2488   } else if (type == MachineType::Uint8()) {
2489     opcode = kWord32AtomicCompareExchangeUint8;
2490   } else if (type == MachineType::Int16()) {
2491     opcode = kWord32AtomicCompareExchangeInt16;
2492   } else if (type == MachineType::Uint16()) {
2493     opcode = kWord32AtomicCompareExchangeUint16;
2494   } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2495     opcode = kWord32AtomicCompareExchangeWord32;
2496   } else {
2497     UNREACHABLE();
2498     return;
2499   }
2500 
2501   AddressingMode addressing_mode = kMode_MRI;
2502   InstructionOperand inputs[4];
2503   size_t input_count = 0;
2504   inputs[input_count++] = g.UseUniqueRegister(base);
2505   inputs[input_count++] = g.UseUniqueRegister(index);
2506   inputs[input_count++] = g.UseUniqueRegister(old_value);
2507   inputs[input_count++] = g.UseUniqueRegister(new_value);
2508   InstructionOperand outputs[1];
2509   outputs[0] = g.UseUniqueRegister(node);
2510   InstructionOperand temp[3];
2511   temp[0] = g.TempRegister();
2512   temp[1] = g.TempRegister();
2513   temp[2] = g.TempRegister();
2514   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2515   Emit(code, 1, outputs, input_count, inputs, 3, temp);
2516 }
2517 
VisitWord32AtomicBinaryOperation(Node * node,ArchOpcode int8_op,ArchOpcode uint8_op,ArchOpcode int16_op,ArchOpcode uint16_op,ArchOpcode word32_op)2518 void InstructionSelector::VisitWord32AtomicBinaryOperation(
2519     Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2520     ArchOpcode uint16_op, ArchOpcode word32_op) {
2521   Mips64OperandGenerator g(this);
2522   Node* base = node->InputAt(0);
2523   Node* index = node->InputAt(1);
2524   Node* value = node->InputAt(2);
2525   ArchOpcode opcode = kArchNop;
2526   MachineType type = AtomicOpRepresentationOf(node->op());
2527   if (type == MachineType::Int8()) {
2528     opcode = int8_op;
2529   } else if (type == MachineType::Uint8()) {
2530     opcode = uint8_op;
2531   } else if (type == MachineType::Int16()) {
2532     opcode = int16_op;
2533   } else if (type == MachineType::Uint16()) {
2534     opcode = uint16_op;
2535   } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2536     opcode = word32_op;
2537   } else {
2538     UNREACHABLE();
2539     return;
2540   }
2541 
2542   AddressingMode addressing_mode = kMode_MRI;
2543   InstructionOperand inputs[3];
2544   size_t input_count = 0;
2545   inputs[input_count++] = g.UseUniqueRegister(base);
2546   inputs[input_count++] = g.UseUniqueRegister(index);
2547   inputs[input_count++] = g.UseUniqueRegister(value);
2548   InstructionOperand outputs[1];
2549   outputs[0] = g.UseUniqueRegister(node);
2550   InstructionOperand temps[4];
2551   temps[0] = g.TempRegister();
2552   temps[1] = g.TempRegister();
2553   temps[2] = g.TempRegister();
2554   temps[3] = g.TempRegister();
2555   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2556   Emit(code, 1, outputs, input_count, inputs, 4, temps);
2557 }
2558 
2559 #define VISIT_ATOMIC_BINOP(op)                                   \
2560   void InstructionSelector::VisitWord32Atomic##op(Node* node) {  \
2561     VisitWord32AtomicBinaryOperation(                            \
2562         node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
2563         kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16,     \
2564         kWord32Atomic##op##Word32);                              \
2565   }
2566 VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)2567 VISIT_ATOMIC_BINOP(Sub)
2568 VISIT_ATOMIC_BINOP(And)
2569 VISIT_ATOMIC_BINOP(Or)
2570 VISIT_ATOMIC_BINOP(Xor)
2571 #undef VISIT_ATOMIC_BINOP
2572 
2573 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
2574   UNREACHABLE();
2575 }
2576 
VisitInt64AbsWithOverflow(Node * node)2577 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
2578   UNREACHABLE();
2579 }
2580 
VisitSpeculationFence(Node * node)2581 void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
2582 
2583 #define SIMD_TYPE_LIST(V) \
2584   V(F32x4)                \
2585   V(I32x4)                \
2586   V(I16x8)                \
2587   V(I8x16)
2588 
2589 #define SIMD_UNOP_LIST(V)                                  \
2590   V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4)         \
2591   V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4)         \
2592   V(F32x4Abs, kMips64F32x4Abs)                             \
2593   V(F32x4Neg, kMips64F32x4Neg)                             \
2594   V(F32x4RecipApprox, kMips64F32x4RecipApprox)             \
2595   V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox)     \
2596   V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4)         \
2597   V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4)         \
2598   V(I32x4Neg, kMips64I32x4Neg)                             \
2599   V(I32x4SConvertI16x8Low, kMips64I32x4SConvertI16x8Low)   \
2600   V(I32x4SConvertI16x8High, kMips64I32x4SConvertI16x8High) \
2601   V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low)   \
2602   V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
2603   V(I16x8Neg, kMips64I16x8Neg)                             \
2604   V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low)   \
2605   V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
2606   V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low)   \
2607   V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
2608   V(I8x16Neg, kMips64I8x16Neg)                             \
2609   V(S128Not, kMips64S128Not)                               \
2610   V(S1x4AnyTrue, kMips64S1x4AnyTrue)                       \
2611   V(S1x4AllTrue, kMips64S1x4AllTrue)                       \
2612   V(S1x8AnyTrue, kMips64S1x8AnyTrue)                       \
2613   V(S1x8AllTrue, kMips64S1x8AllTrue)                       \
2614   V(S1x16AnyTrue, kMips64S1x16AnyTrue)                     \
2615   V(S1x16AllTrue, kMips64S1x16AllTrue)
2616 
2617 #define SIMD_SHIFT_OP_LIST(V) \
2618   V(I32x4Shl)                 \
2619   V(I32x4ShrS)                \
2620   V(I32x4ShrU)                \
2621   V(I16x8Shl)                 \
2622   V(I16x8ShrS)                \
2623   V(I16x8ShrU)                \
2624   V(I8x16Shl)                 \
2625   V(I8x16ShrS)                \
2626   V(I8x16ShrU)
2627 
2628 #define SIMD_BINOP_LIST(V)                         \
2629   V(F32x4Add, kMips64F32x4Add)                     \
2630   V(F32x4AddHoriz, kMips64F32x4AddHoriz)           \
2631   V(F32x4Sub, kMips64F32x4Sub)                     \
2632   V(F32x4Mul, kMips64F32x4Mul)                     \
2633   V(F32x4Max, kMips64F32x4Max)                     \
2634   V(F32x4Min, kMips64F32x4Min)                     \
2635   V(F32x4Eq, kMips64F32x4Eq)                       \
2636   V(F32x4Ne, kMips64F32x4Ne)                       \
2637   V(F32x4Lt, kMips64F32x4Lt)                       \
2638   V(F32x4Le, kMips64F32x4Le)                       \
2639   V(I32x4Add, kMips64I32x4Add)                     \
2640   V(I32x4AddHoriz, kMips64I32x4AddHoriz)           \
2641   V(I32x4Sub, kMips64I32x4Sub)                     \
2642   V(I32x4Mul, kMips64I32x4Mul)                     \
2643   V(I32x4MaxS, kMips64I32x4MaxS)                   \
2644   V(I32x4MinS, kMips64I32x4MinS)                   \
2645   V(I32x4MaxU, kMips64I32x4MaxU)                   \
2646   V(I32x4MinU, kMips64I32x4MinU)                   \
2647   V(I32x4Eq, kMips64I32x4Eq)                       \
2648   V(I32x4Ne, kMips64I32x4Ne)                       \
2649   V(I32x4GtS, kMips64I32x4GtS)                     \
2650   V(I32x4GeS, kMips64I32x4GeS)                     \
2651   V(I32x4GtU, kMips64I32x4GtU)                     \
2652   V(I32x4GeU, kMips64I32x4GeU)                     \
2653   V(I16x8Add, kMips64I16x8Add)                     \
2654   V(I16x8AddSaturateS, kMips64I16x8AddSaturateS)   \
2655   V(I16x8AddSaturateU, kMips64I16x8AddSaturateU)   \
2656   V(I16x8AddHoriz, kMips64I16x8AddHoriz)           \
2657   V(I16x8Sub, kMips64I16x8Sub)                     \
2658   V(I16x8SubSaturateS, kMips64I16x8SubSaturateS)   \
2659   V(I16x8SubSaturateU, kMips64I16x8SubSaturateU)   \
2660   V(I16x8Mul, kMips64I16x8Mul)                     \
2661   V(I16x8MaxS, kMips64I16x8MaxS)                   \
2662   V(I16x8MinS, kMips64I16x8MinS)                   \
2663   V(I16x8MaxU, kMips64I16x8MaxU)                   \
2664   V(I16x8MinU, kMips64I16x8MinU)                   \
2665   V(I16x8Eq, kMips64I16x8Eq)                       \
2666   V(I16x8Ne, kMips64I16x8Ne)                       \
2667   V(I16x8GtS, kMips64I16x8GtS)                     \
2668   V(I16x8GeS, kMips64I16x8GeS)                     \
2669   V(I16x8GtU, kMips64I16x8GtU)                     \
2670   V(I16x8GeU, kMips64I16x8GeU)                     \
2671   V(I16x8SConvertI32x4, kMips64I16x8SConvertI32x4) \
2672   V(I16x8UConvertI32x4, kMips64I16x8UConvertI32x4) \
2673   V(I8x16Add, kMips64I8x16Add)                     \
2674   V(I8x16AddSaturateS, kMips64I8x16AddSaturateS)   \
2675   V(I8x16AddSaturateU, kMips64I8x16AddSaturateU)   \
2676   V(I8x16Sub, kMips64I8x16Sub)                     \
2677   V(I8x16SubSaturateS, kMips64I8x16SubSaturateS)   \
2678   V(I8x16SubSaturateU, kMips64I8x16SubSaturateU)   \
2679   V(I8x16Mul, kMips64I8x16Mul)                     \
2680   V(I8x16MaxS, kMips64I8x16MaxS)                   \
2681   V(I8x16MinS, kMips64I8x16MinS)                   \
2682   V(I8x16MaxU, kMips64I8x16MaxU)                   \
2683   V(I8x16MinU, kMips64I8x16MinU)                   \
2684   V(I8x16Eq, kMips64I8x16Eq)                       \
2685   V(I8x16Ne, kMips64I8x16Ne)                       \
2686   V(I8x16GtS, kMips64I8x16GtS)                     \
2687   V(I8x16GeS, kMips64I8x16GeS)                     \
2688   V(I8x16GtU, kMips64I8x16GtU)                     \
2689   V(I8x16GeU, kMips64I8x16GeU)                     \
2690   V(I8x16SConvertI16x8, kMips64I8x16SConvertI16x8) \
2691   V(I8x16UConvertI16x8, kMips64I8x16UConvertI16x8) \
2692   V(S128And, kMips64S128And)                       \
2693   V(S128Or, kMips64S128Or)                         \
2694   V(S128Xor, kMips64S128Xor)
2695 
VisitS128Zero(Node * node)2696 void InstructionSelector::VisitS128Zero(Node* node) {
2697   Mips64OperandGenerator g(this);
2698   Emit(kMips64S128Zero, g.DefineSameAsFirst(node));
2699 }
2700 
2701 #define SIMD_VISIT_SPLAT(Type)                               \
2702   void InstructionSelector::Visit##Type##Splat(Node* node) { \
2703     VisitRR(this, kMips64##Type##Splat, node);               \
2704   }
2705 SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
2706 #undef SIMD_VISIT_SPLAT
2707 
2708 #define SIMD_VISIT_EXTRACT_LANE(Type)                              \
2709   void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
2710     VisitRRI(this, kMips64##Type##ExtractLane, node);              \
2711   }
SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)2712 SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
2713 #undef SIMD_VISIT_EXTRACT_LANE
2714 
2715 #define SIMD_VISIT_REPLACE_LANE(Type)                              \
2716   void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
2717     VisitRRIR(this, kMips64##Type##ReplaceLane, node);             \
2718   }
2719 SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
2720 #undef SIMD_VISIT_REPLACE_LANE
2721 
2722 #define SIMD_VISIT_UNOP(Name, instruction)            \
2723   void InstructionSelector::Visit##Name(Node* node) { \
2724     VisitRR(this, instruction, node);                 \
2725   }
2726 SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
2727 #undef SIMD_VISIT_UNOP
2728 
2729 #define SIMD_VISIT_SHIFT_OP(Name)                     \
2730   void InstructionSelector::Visit##Name(Node* node) { \
2731     VisitRRI(this, kMips64##Name, node);              \
2732   }
2733 SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
2734 #undef SIMD_VISIT_SHIFT_OP
2735 
2736 #define SIMD_VISIT_BINOP(Name, instruction)           \
2737   void InstructionSelector::Visit##Name(Node* node) { \
2738     VisitRRR(this, instruction, node);                \
2739   }
2740 SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
2741 #undef SIMD_VISIT_BINOP
2742 
2743 void InstructionSelector::VisitS128Select(Node* node) {
2744   VisitRRRR(this, kMips64S128Select, node);
2745 }
2746 
2747 namespace {
2748 
2749 struct ShuffleEntry {
2750   uint8_t shuffle[kSimd128Size];
2751   ArchOpcode opcode;
2752 };
2753 
2754 static const ShuffleEntry arch_shuffles[] = {
2755     {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
2756      kMips64S32x4InterleaveRight},
2757     {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
2758      kMips64S32x4InterleaveLeft},
2759     {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
2760      kMips64S32x4PackEven},
2761     {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
2762      kMips64S32x4PackOdd},
2763     {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
2764      kMips64S32x4InterleaveEven},
2765     {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
2766      kMips64S32x4InterleaveOdd},
2767 
2768     {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
2769      kMips64S16x8InterleaveRight},
2770     {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
2771      kMips64S16x8InterleaveLeft},
2772     {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
2773      kMips64S16x8PackEven},
2774     {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
2775      kMips64S16x8PackOdd},
2776     {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
2777      kMips64S16x8InterleaveEven},
2778     {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
2779      kMips64S16x8InterleaveOdd},
2780     {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
2781      kMips64S16x4Reverse},
2782     {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
2783      kMips64S16x2Reverse},
2784 
2785     {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
2786      kMips64S8x16InterleaveRight},
2787     {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
2788      kMips64S8x16InterleaveLeft},
2789     {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
2790      kMips64S8x16PackEven},
2791     {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
2792      kMips64S8x16PackOdd},
2793     {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
2794      kMips64S8x16InterleaveEven},
2795     {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
2796      kMips64S8x16InterleaveOdd},
2797     {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
2798      kMips64S8x8Reverse},
2799     {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
2800      kMips64S8x4Reverse},
2801     {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
2802      kMips64S8x2Reverse}};
2803 
TryMatchArchShuffle(const uint8_t * shuffle,const ShuffleEntry * table,size_t num_entries,uint8_t mask,ArchOpcode * opcode)2804 bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
2805                          size_t num_entries, uint8_t mask, ArchOpcode* opcode) {
2806   for (size_t i = 0; i < num_entries; ++i) {
2807     const ShuffleEntry& entry = table[i];
2808     int j = 0;
2809     for (; j < kSimd128Size; ++j) {
2810       if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
2811         break;
2812       }
2813     }
2814     if (j == kSimd128Size) {
2815       *opcode = entry.opcode;
2816       return true;
2817     }
2818   }
2819   return false;
2820 }
2821 
2822 }  // namespace
2823 
VisitS8x16Shuffle(Node * node)2824 void InstructionSelector::VisitS8x16Shuffle(Node* node) {
2825   const uint8_t* shuffle = OpParameter<uint8_t*>(node->op());
2826   uint8_t mask = CanonicalizeShuffle(node);
2827   uint8_t shuffle32x4[4];
2828   ArchOpcode opcode;
2829   if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
2830                           mask, &opcode)) {
2831     VisitRRR(this, opcode, node);
2832     return;
2833   }
2834   uint8_t offset;
2835   Mips64OperandGenerator g(this);
2836   if (TryMatchConcat(shuffle, mask, &offset)) {
2837     Emit(kMips64S8x16Concat, g.DefineSameAsFirst(node),
2838          g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)),
2839          g.UseImmediate(offset));
2840     return;
2841   }
2842   if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
2843     Emit(kMips64S32x4Shuffle, g.DefineAsRegister(node),
2844          g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
2845          g.UseImmediate(Pack4Lanes(shuffle32x4, mask)));
2846     return;
2847   }
2848   Emit(kMips64S8x16Shuffle, g.DefineAsRegister(node),
2849        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
2850        g.UseImmediate(Pack4Lanes(shuffle, mask)),
2851        g.UseImmediate(Pack4Lanes(shuffle + 4, mask)),
2852        g.UseImmediate(Pack4Lanes(shuffle + 8, mask)),
2853        g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
2854 }
2855 
VisitSignExtendWord8ToInt32(Node * node)2856 void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
2857   Mips64OperandGenerator g(this);
2858   Emit(kMips64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2859 }
2860 
VisitSignExtendWord16ToInt32(Node * node)2861 void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
2862   Mips64OperandGenerator g(this);
2863   Emit(kMips64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2864 }
2865 
VisitSignExtendWord8ToInt64(Node * node)2866 void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
2867   Mips64OperandGenerator g(this);
2868   Emit(kMips64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2869 }
2870 
VisitSignExtendWord16ToInt64(Node * node)2871 void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
2872   Mips64OperandGenerator g(this);
2873   Emit(kMips64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2874 }
2875 
VisitSignExtendWord32ToInt64(Node * node)2876 void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
2877   Mips64OperandGenerator g(this);
2878   Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
2879        g.TempImmediate(0));
2880 }
2881 
2882 // static
2883 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()2884 InstructionSelector::SupportedMachineOperatorFlags() {
2885   MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
2886   return flags | MachineOperatorBuilder::kWord32Ctz |
2887          MachineOperatorBuilder::kWord64Ctz |
2888          MachineOperatorBuilder::kWord32Popcnt |
2889          MachineOperatorBuilder::kWord64Popcnt |
2890          MachineOperatorBuilder::kWord32ShiftIsSafe |
2891          MachineOperatorBuilder::kInt32DivIsSafe |
2892          MachineOperatorBuilder::kUint32DivIsSafe |
2893          MachineOperatorBuilder::kFloat64RoundDown |
2894          MachineOperatorBuilder::kFloat32RoundDown |
2895          MachineOperatorBuilder::kFloat64RoundUp |
2896          MachineOperatorBuilder::kFloat32RoundUp |
2897          MachineOperatorBuilder::kFloat64RoundTruncate |
2898          MachineOperatorBuilder::kFloat32RoundTruncate |
2899          MachineOperatorBuilder::kFloat64RoundTiesEven |
2900          MachineOperatorBuilder::kFloat32RoundTiesEven |
2901          MachineOperatorBuilder::kWord32ReverseBytes |
2902          MachineOperatorBuilder::kWord64ReverseBytes;
2903 }
2904 
2905 // static
2906 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()2907 InstructionSelector::AlignmentRequirements() {
2908   if (kArchVariant == kMips64r6) {
2909     return MachineOperatorBuilder::AlignmentRequirements::
2910         FullUnalignedAccessSupport();
2911   } else {
2912     DCHECK_EQ(kMips64r2, kArchVariant);
2913     return MachineOperatorBuilder::AlignmentRequirements::
2914         NoUnalignedAccessSupport();
2915   }
2916 }
2917 
2918 #undef SIMD_BINOP_LIST
2919 #undef SIMD_SHIFT_OP_LIST
2920 #undef SIMD_UNOP_LIST
2921 #undef SIMD_TYPE_LIST
2922 #undef TRACE_UNIMPL
2923 #undef TRACE
2924 
2925 }  // namespace compiler
2926 }  // namespace internal
2927 }  // namespace v8
2928