1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/bits.h"
6 #include "src/compiler/backend/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h"
8 #include "src/compiler/node-properties.h"
9 
10 namespace v8 {
11 namespace internal {
12 namespace compiler {
13 
14 #define TRACE_UNIMPL() \
15   PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
16 
17 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
18 
19 // Adds Mips-specific methods for generating InstructionOperands.
20 class MipsOperandGenerator final : public OperandGenerator {
21  public:
MipsOperandGenerator(InstructionSelector * selector)22   explicit MipsOperandGenerator(InstructionSelector* selector)
23       : OperandGenerator(selector) {}
24 
UseOperand(Node * node,InstructionCode opcode)25   InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
26     if (CanBeImmediate(node, opcode)) {
27       return UseImmediate(node);
28     }
29     return UseRegister(node);
30   }
31 
32   // Use the zero register if the node has the immediate value zero, otherwise
33   // assign a register.
UseRegisterOrImmediateZero(Node * node)34   InstructionOperand UseRegisterOrImmediateZero(Node* node) {
35     if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
36         (IsFloatConstant(node) &&
37          (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
38       return UseImmediate(node);
39     }
40     return UseRegister(node);
41   }
42 
IsIntegerConstant(Node * node)43   bool IsIntegerConstant(Node* node) {
44     return (node->opcode() == IrOpcode::kInt32Constant);
45   }
46 
GetIntegerConstantValue(Node * node)47   int64_t GetIntegerConstantValue(Node* node) {
48     DCHECK_EQ(IrOpcode::kInt32Constant, node->opcode());
49     return OpParameter<int32_t>(node->op());
50   }
51 
IsFloatConstant(Node * node)52   bool IsFloatConstant(Node* node) {
53     return (node->opcode() == IrOpcode::kFloat32Constant) ||
54            (node->opcode() == IrOpcode::kFloat64Constant);
55   }
56 
GetFloatConstantValue(Node * node)57   double GetFloatConstantValue(Node* node) {
58     if (node->opcode() == IrOpcode::kFloat32Constant) {
59       return OpParameter<float>(node->op());
60     }
61     DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
62     return OpParameter<double>(node->op());
63   }
64 
CanBeImmediate(Node * node,InstructionCode opcode)65   bool CanBeImmediate(Node* node, InstructionCode opcode) {
66     Int32Matcher m(node);
67     if (!m.HasValue()) return false;
68     int32_t value = m.Value();
69     switch (ArchOpcodeField::decode(opcode)) {
70       case kMipsShl:
71       case kMipsSar:
72       case kMipsShr:
73         return is_uint5(value);
74       case kMipsAdd:
75       case kMipsAnd:
76       case kMipsOr:
77       case kMipsTst:
78       case kMipsSub:
79       case kMipsXor:
80         return is_uint16(value);
81       case kMipsLb:
82       case kMipsLbu:
83       case kMipsSb:
84       case kMipsLh:
85       case kMipsLhu:
86       case kMipsSh:
87       case kMipsLw:
88       case kMipsSw:
89       case kMipsLwc1:
90       case kMipsSwc1:
91       case kMipsLdc1:
92       case kMipsSdc1:
93         // true even for 32b values, offsets > 16b
94         // are handled in assembler-mips.cc
95         return is_int32(value);
96       default:
97         return is_int16(value);
98     }
99   }
100 
101  private:
ImmediateFitsAddrMode1Instruction(int32_t imm) const102   bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
103     TRACE_UNIMPL();
104     return false;
105   }
106 };
107 
VisitRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)108 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
109                      Node* node) {
110   MipsOperandGenerator g(selector);
111   selector->Emit(opcode, g.DefineAsRegister(node),
112                  g.UseRegister(node->InputAt(0)),
113                  g.UseRegister(node->InputAt(1)));
114 }
115 
VisitRRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)116 void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
117   MipsOperandGenerator g(selector);
118   selector->Emit(
119       opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
120       g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
121 }
122 
VisitRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)123 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
124                     Node* node) {
125   MipsOperandGenerator g(selector);
126   selector->Emit(opcode, g.DefineAsRegister(node),
127                  g.UseRegister(node->InputAt(0)));
128 }
129 
VisitRRI(InstructionSelector * selector,ArchOpcode opcode,Node * node)130 static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
131                      Node* node) {
132   MipsOperandGenerator g(selector);
133   int32_t imm = OpParameter<int32_t>(node->op());
134   selector->Emit(opcode, g.DefineAsRegister(node),
135                  g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
136 }
137 
VisitRRIR(InstructionSelector * selector,ArchOpcode opcode,Node * node)138 static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
139                       Node* node) {
140   MipsOperandGenerator g(selector);
141   int32_t imm = OpParameter<int32_t>(node->op());
142   selector->Emit(opcode, g.DefineAsRegister(node),
143                  g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
144                  g.UseRegister(node->InputAt(1)));
145 }
146 
VisitRRO(InstructionSelector * selector,ArchOpcode opcode,Node * node)147 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
148                      Node* node) {
149   MipsOperandGenerator g(selector);
150   selector->Emit(opcode, g.DefineAsRegister(node),
151                  g.UseRegister(node->InputAt(0)),
152                  g.UseOperand(node->InputAt(1), opcode));
153 }
154 
TryMatchImmediate(InstructionSelector * selector,InstructionCode * opcode_return,Node * node,size_t * input_count_return,InstructionOperand * inputs)155 bool TryMatchImmediate(InstructionSelector* selector,
156                        InstructionCode* opcode_return, Node* node,
157                        size_t* input_count_return, InstructionOperand* inputs) {
158   MipsOperandGenerator g(selector);
159   if (g.CanBeImmediate(node, *opcode_return)) {
160     *opcode_return |= AddressingModeField::encode(kMode_MRI);
161     inputs[0] = g.UseImmediate(node);
162     *input_count_return = 1;
163     return true;
164   }
165   return false;
166 }
167 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode,FlagsContinuation * cont)168 static void VisitBinop(InstructionSelector* selector, Node* node,
169                        InstructionCode opcode, bool has_reverse_opcode,
170                        InstructionCode reverse_opcode,
171                        FlagsContinuation* cont) {
172   MipsOperandGenerator g(selector);
173   Int32BinopMatcher m(node);
174   InstructionOperand inputs[2];
175   size_t input_count = 0;
176   InstructionOperand outputs[1];
177   size_t output_count = 0;
178 
179   if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
180                         &inputs[1])) {
181     inputs[0] = g.UseRegister(m.left().node());
182     input_count++;
183   } else if (has_reverse_opcode &&
184              TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
185                                &input_count, &inputs[1])) {
186     inputs[0] = g.UseRegister(m.right().node());
187     opcode = reverse_opcode;
188     input_count++;
189   } else {
190     inputs[input_count++] = g.UseRegister(m.left().node());
191     inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
192   }
193 
194   if (cont->IsDeoptimize()) {
195     // If we can deoptimize as a result of the binop, we need to make sure that
196     // the deopt inputs are not overwritten by the binop result. One way
197     // to achieve that is to declare the output register as same-as-first.
198     outputs[output_count++] = g.DefineSameAsFirst(node);
199   } else {
200     outputs[output_count++] = g.DefineAsRegister(node);
201   }
202 
203   DCHECK_NE(0u, input_count);
204   DCHECK_EQ(1u, output_count);
205   DCHECK_GE(arraysize(inputs), input_count);
206   DCHECK_GE(arraysize(outputs), output_count);
207 
208   selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
209                                  inputs, cont);
210 }
211 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode)212 static void VisitBinop(InstructionSelector* selector, Node* node,
213                        InstructionCode opcode, bool has_reverse_opcode,
214                        InstructionCode reverse_opcode) {
215   FlagsContinuation cont;
216   VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
217 }
218 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)219 static void VisitBinop(InstructionSelector* selector, Node* node,
220                        InstructionCode opcode, FlagsContinuation* cont) {
221   VisitBinop(selector, node, opcode, false, kArchNop, cont);
222 }
223 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode)224 static void VisitBinop(InstructionSelector* selector, Node* node,
225                        InstructionCode opcode) {
226   VisitBinop(selector, node, opcode, false, kArchNop);
227 }
228 
VisitPairAtomicBinop(InstructionSelector * selector,Node * node,ArchOpcode opcode)229 static void VisitPairAtomicBinop(InstructionSelector* selector, Node* node,
230                                  ArchOpcode opcode) {
231   MipsOperandGenerator g(selector);
232   Node* base = node->InputAt(0);
233   Node* index = node->InputAt(1);
234   Node* value = node->InputAt(2);
235   Node* value_high = node->InputAt(3);
236   AddressingMode addressing_mode = kMode_None;
237   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
238   InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
239                                  g.UseFixed(value, a1),
240                                  g.UseFixed(value_high, a2)};
241   InstructionOperand outputs[2];
242   size_t output_count = 0;
243   InstructionOperand temps[3];
244   size_t temp_count = 0;
245   temps[temp_count++] = g.TempRegister(a0);
246 
247   Node* projection0 = NodeProperties::FindProjection(node, 0);
248   Node* projection1 = NodeProperties::FindProjection(node, 1);
249   if (projection0) {
250     outputs[output_count++] = g.DefineAsFixed(projection0, v0);
251   } else {
252     temps[temp_count++] = g.TempRegister(v0);
253   }
254   if (projection1) {
255     outputs[output_count++] = g.DefineAsFixed(projection1, v1);
256   } else {
257     temps[temp_count++] = g.TempRegister(v1);
258   }
259   selector->Emit(code, output_count, outputs, arraysize(inputs), inputs,
260                  temp_count, temps);
261 }
262 
VisitStackSlot(Node * node)263 void InstructionSelector::VisitStackSlot(Node* node) {
264   StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
265   int alignment = rep.alignment();
266   int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
267   OperandGenerator g(this);
268 
269   Emit(kArchStackSlot, g.DefineAsRegister(node),
270        sequence()->AddImmediate(Constant(slot)),
271        sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
272 }
273 
VisitAbortCSAAssert(Node * node)274 void InstructionSelector::VisitAbortCSAAssert(Node* node) {
275   MipsOperandGenerator g(this);
276   Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
277 }
278 
VisitLoadTransform(Node * node)279 void InstructionSelector::VisitLoadTransform(Node* node) {
280   LoadTransformParameters params = LoadTransformParametersOf(node->op());
281   MipsOperandGenerator g(this);
282   Node* base = node->InputAt(0);
283   Node* index = node->InputAt(1);
284 
285   InstructionCode opcode = kArchNop;
286   switch (params.transformation) {
287     case LoadTransformation::kS8x16LoadSplat:
288       opcode = kMipsS8x16LoadSplat;
289       break;
290     case LoadTransformation::kS16x8LoadSplat:
291       opcode = kMipsS16x8LoadSplat;
292       break;
293     case LoadTransformation::kS32x4LoadSplat:
294       opcode = kMipsS32x4LoadSplat;
295       break;
296     case LoadTransformation::kS64x2LoadSplat:
297       opcode = kMipsS64x2LoadSplat;
298       break;
299     case LoadTransformation::kI16x8Load8x8S:
300       opcode = kMipsI16x8Load8x8S;
301       break;
302     case LoadTransformation::kI16x8Load8x8U:
303       opcode = kMipsI16x8Load8x8U;
304       break;
305     case LoadTransformation::kI32x4Load16x4S:
306       opcode = kMipsI32x4Load16x4S;
307       break;
308     case LoadTransformation::kI32x4Load16x4U:
309       opcode = kMipsI32x4Load16x4U;
310       break;
311     case LoadTransformation::kI64x2Load32x2S:
312       opcode = kMipsI64x2Load32x2S;
313       break;
314     case LoadTransformation::kI64x2Load32x2U:
315       opcode = kMipsI64x2Load32x2U;
316       break;
317     default:
318       UNIMPLEMENTED();
319   }
320 
321   if (g.CanBeImmediate(index, opcode)) {
322     Emit(opcode | AddressingModeField::encode(kMode_MRI),
323          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
324   } else {
325     InstructionOperand addr_reg = g.TempRegister();
326     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
327          g.UseRegister(index), g.UseRegister(base));
328     // Emit desired load opcode, using temp addr_reg.
329     Emit(opcode | AddressingModeField::encode(kMode_MRI),
330          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
331   }
332 }
333 
VisitLoad(Node * node)334 void InstructionSelector::VisitLoad(Node* node) {
335   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
336   MipsOperandGenerator g(this);
337   Node* base = node->InputAt(0);
338   Node* index = node->InputAt(1);
339 
340   InstructionCode opcode = kArchNop;
341   switch (load_rep.representation()) {
342     case MachineRepresentation::kFloat32:
343       opcode = kMipsLwc1;
344       break;
345     case MachineRepresentation::kFloat64:
346       opcode = kMipsLdc1;
347       break;
348     case MachineRepresentation::kBit:  // Fall through.
349     case MachineRepresentation::kWord8:
350       opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb;
351       break;
352     case MachineRepresentation::kWord16:
353       opcode = load_rep.IsUnsigned() ? kMipsLhu : kMipsLh;
354       break;
355     case MachineRepresentation::kTaggedSigned:   // Fall through.
356     case MachineRepresentation::kTaggedPointer:  // Fall through.
357     case MachineRepresentation::kTagged:         // Fall through.
358     case MachineRepresentation::kWord32:
359       opcode = kMipsLw;
360       break;
361     case MachineRepresentation::kSimd128:
362       opcode = kMipsMsaLd;
363       break;
364     case MachineRepresentation::kCompressedPointer:  // Fall through.
365     case MachineRepresentation::kCompressed:         // Fall through.
366     case MachineRepresentation::kWord64:             // Fall through.
367     case MachineRepresentation::kNone:
368       UNREACHABLE();
369   }
370   if (node->opcode() == IrOpcode::kPoisonedLoad) {
371     CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
372     opcode |= MiscField::encode(kMemoryAccessPoisoned);
373   }
374 
375   if (g.CanBeImmediate(index, opcode)) {
376     Emit(opcode | AddressingModeField::encode(kMode_MRI),
377          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
378   } else {
379     InstructionOperand addr_reg = g.TempRegister();
380     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
381          g.UseRegister(index), g.UseRegister(base));
382     // Emit desired load opcode, using temp addr_reg.
383     Emit(opcode | AddressingModeField::encode(kMode_MRI),
384          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
385   }
386 }
387 
VisitPoisonedLoad(Node * node)388 void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
389 
VisitProtectedLoad(Node * node)390 void InstructionSelector::VisitProtectedLoad(Node* node) {
391   // TODO(eholk)
392   UNIMPLEMENTED();
393 }
394 
VisitStore(Node * node)395 void InstructionSelector::VisitStore(Node* node) {
396   MipsOperandGenerator g(this);
397   Node* base = node->InputAt(0);
398   Node* index = node->InputAt(1);
399   Node* value = node->InputAt(2);
400 
401   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
402   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
403   MachineRepresentation rep = store_rep.representation();
404 
405   // TODO(mips): I guess this could be done in a better way.
406   if (write_barrier_kind != kNoWriteBarrier &&
407       V8_LIKELY(!FLAG_disable_write_barriers)) {
408     DCHECK(CanBeTaggedPointer(rep));
409     InstructionOperand inputs[3];
410     size_t input_count = 0;
411     inputs[input_count++] = g.UseUniqueRegister(base);
412     inputs[input_count++] = g.UseUniqueRegister(index);
413     inputs[input_count++] = g.UseUniqueRegister(value);
414     RecordWriteMode record_write_mode =
415         WriteBarrierKindToRecordWriteMode(write_barrier_kind);
416     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
417     size_t const temp_count = arraysize(temps);
418     InstructionCode code = kArchStoreWithWriteBarrier;
419     code |= MiscField::encode(static_cast<int>(record_write_mode));
420     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
421   } else {
422     ArchOpcode opcode = kArchNop;
423     switch (rep) {
424       case MachineRepresentation::kFloat32:
425         opcode = kMipsSwc1;
426         break;
427       case MachineRepresentation::kFloat64:
428         opcode = kMipsSdc1;
429         break;
430       case MachineRepresentation::kBit:  // Fall through.
431       case MachineRepresentation::kWord8:
432         opcode = kMipsSb;
433         break;
434       case MachineRepresentation::kWord16:
435         opcode = kMipsSh;
436         break;
437       case MachineRepresentation::kTaggedSigned:   // Fall through.
438       case MachineRepresentation::kTaggedPointer:  // Fall through.
439       case MachineRepresentation::kTagged:         // Fall through.
440       case MachineRepresentation::kWord32:
441         opcode = kMipsSw;
442         break;
443       case MachineRepresentation::kSimd128:
444         opcode = kMipsMsaSt;
445         break;
446       case MachineRepresentation::kCompressedPointer:  // Fall through.
447       case MachineRepresentation::kCompressed:         // Fall through.
448       case MachineRepresentation::kWord64:             // Fall through.
449       case MachineRepresentation::kNone:
450         UNREACHABLE();
451         return;
452     }
453 
454     if (g.CanBeImmediate(index, opcode)) {
455       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
456            g.UseRegister(base), g.UseImmediate(index),
457            g.UseRegisterOrImmediateZero(value));
458     } else {
459       InstructionOperand addr_reg = g.TempRegister();
460       Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
461            g.UseRegister(index), g.UseRegister(base));
462       // Emit desired store opcode, using temp addr_reg.
463       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
464            addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
465     }
466   }
467 }
468 
VisitProtectedStore(Node * node)469 void InstructionSelector::VisitProtectedStore(Node* node) {
470   // TODO(eholk)
471   UNIMPLEMENTED();
472 }
473 
VisitWord32And(Node * node)474 void InstructionSelector::VisitWord32And(Node* node) {
475   MipsOperandGenerator g(this);
476   Int32BinopMatcher m(node);
477   if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
478       m.right().HasValue()) {
479     uint32_t mask = m.right().Value();
480     uint32_t mask_width = base::bits::CountPopulation(mask);
481     uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
482     if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
483       // The mask must be contiguous, and occupy the least-significant bits.
484       DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
485 
486       // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
487       // significant bits.
488       Int32BinopMatcher mleft(m.left().node());
489       if (mleft.right().HasValue()) {
490         // Any shift value can match; int32 shifts use `value % 32`.
491         uint32_t lsb = mleft.right().Value() & 0x1F;
492 
493         // Ext cannot extract bits past the register size, however since
494         // shifting the original value would have introduced some zeros we can
495         // still use Ext with a smaller mask and the remaining bits will be
496         // zeros.
497         if (lsb + mask_width > 32) mask_width = 32 - lsb;
498 
499         if (lsb == 0 && mask_width == 32) {
500           Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
501         } else {
502           Emit(kMipsExt, g.DefineAsRegister(node),
503                g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
504                g.TempImmediate(mask_width));
505         }
506         return;
507       }
508       // Other cases fall through to the normal And operation.
509     }
510   }
511   if (m.right().HasValue()) {
512     uint32_t mask = m.right().Value();
513     uint32_t shift = base::bits::CountPopulation(~mask);
514     uint32_t msb = base::bits::CountLeadingZeros32(~mask);
515     if (shift != 0 && shift != 32 && msb + shift == 32) {
516       // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
517       // and remove constant loading of invereted mask.
518       Emit(kMipsIns, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
519            g.TempImmediate(0), g.TempImmediate(shift));
520       return;
521     }
522   }
523   VisitBinop(this, node, kMipsAnd, true, kMipsAnd);
524 }
525 
VisitWord32Or(Node * node)526 void InstructionSelector::VisitWord32Or(Node* node) {
527   VisitBinop(this, node, kMipsOr, true, kMipsOr);
528 }
529 
VisitWord32Xor(Node * node)530 void InstructionSelector::VisitWord32Xor(Node* node) {
531   Int32BinopMatcher m(node);
532   if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
533       m.right().Is(-1)) {
534     Int32BinopMatcher mleft(m.left().node());
535     if (!mleft.right().HasValue()) {
536       MipsOperandGenerator g(this);
537       Emit(kMipsNor, g.DefineAsRegister(node),
538            g.UseRegister(mleft.left().node()),
539            g.UseRegister(mleft.right().node()));
540       return;
541     }
542   }
543   if (m.right().Is(-1)) {
544     // Use Nor for bit negation and eliminate constant loading for xori.
545     MipsOperandGenerator g(this);
546     Emit(kMipsNor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
547          g.TempImmediate(0));
548     return;
549   }
550   VisitBinop(this, node, kMipsXor, true, kMipsXor);
551 }
552 
VisitWord32Shl(Node * node)553 void InstructionSelector::VisitWord32Shl(Node* node) {
554   Int32BinopMatcher m(node);
555   if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
556       m.right().IsInRange(1, 31)) {
557     MipsOperandGenerator g(this);
558     Int32BinopMatcher mleft(m.left().node());
559     // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
560     // contiguous, and the shift immediate non-zero.
561     if (mleft.right().HasValue()) {
562       uint32_t mask = mleft.right().Value();
563       uint32_t mask_width = base::bits::CountPopulation(mask);
564       uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
565       if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
566         uint32_t shift = m.right().Value();
567         DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
568         DCHECK_NE(0u, shift);
569         if ((shift + mask_width) >= 32) {
570           // If the mask is contiguous and reaches or extends beyond the top
571           // bit, only the shift is needed.
572           Emit(kMipsShl, g.DefineAsRegister(node),
573                g.UseRegister(mleft.left().node()),
574                g.UseImmediate(m.right().node()));
575           return;
576         }
577       }
578     }
579   }
580   VisitRRO(this, kMipsShl, node);
581 }
582 
VisitWord32Shr(Node * node)583 void InstructionSelector::VisitWord32Shr(Node* node) {
584   Int32BinopMatcher m(node);
585   if (m.left().IsWord32And() && m.right().HasValue()) {
586     uint32_t lsb = m.right().Value() & 0x1F;
587     Int32BinopMatcher mleft(m.left().node());
588     if (mleft.right().HasValue() && mleft.right().Value() != 0) {
589       // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
590       // shifted into the least-significant bits.
591       uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
592       unsigned mask_width = base::bits::CountPopulation(mask);
593       unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
594       if ((mask_msb + mask_width + lsb) == 32) {
595         MipsOperandGenerator g(this);
596         DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
597         Emit(kMipsExt, g.DefineAsRegister(node),
598              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
599              g.TempImmediate(mask_width));
600         return;
601       }
602     }
603   }
604   VisitRRO(this, kMipsShr, node);
605 }
606 
VisitWord32Sar(Node * node)607 void InstructionSelector::VisitWord32Sar(Node* node) {
608   Int32BinopMatcher m(node);
609   if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
610       m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
611     Int32BinopMatcher mleft(m.left().node());
612     if (m.right().HasValue() && mleft.right().HasValue()) {
613       MipsOperandGenerator g(this);
614       uint32_t sar = m.right().Value();
615       uint32_t shl = mleft.right().Value();
616       if ((sar == shl) && (sar == 16)) {
617         Emit(kMipsSeh, g.DefineAsRegister(node),
618              g.UseRegister(mleft.left().node()));
619         return;
620       } else if ((sar == shl) && (sar == 24)) {
621         Emit(kMipsSeb, g.DefineAsRegister(node),
622              g.UseRegister(mleft.left().node()));
623         return;
624       }
625     }
626   }
627   VisitRRO(this, kMipsSar, node);
628 }
629 
VisitInt32PairBinop(InstructionSelector * selector,InstructionCode pair_opcode,InstructionCode single_opcode,Node * node)630 static void VisitInt32PairBinop(InstructionSelector* selector,
631                                 InstructionCode pair_opcode,
632                                 InstructionCode single_opcode, Node* node) {
633   MipsOperandGenerator g(selector);
634 
635   Node* projection1 = NodeProperties::FindProjection(node, 1);
636 
637   if (projection1) {
638     // We use UseUniqueRegister here to avoid register sharing with the output
639     // register.
640     InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
641                                    g.UseUniqueRegister(node->InputAt(1)),
642                                    g.UseUniqueRegister(node->InputAt(2)),
643                                    g.UseUniqueRegister(node->InputAt(3))};
644 
645     InstructionOperand outputs[] = {
646         g.DefineAsRegister(node),
647         g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
648     selector->Emit(pair_opcode, 2, outputs, 4, inputs);
649   } else {
650     // The high word of the result is not used, so we emit the standard 32 bit
651     // instruction.
652     selector->Emit(single_opcode, g.DefineSameAsFirst(node),
653                    g.UseRegister(node->InputAt(0)),
654                    g.UseRegister(node->InputAt(2)));
655   }
656 }
657 
VisitInt32PairAdd(Node * node)658 void InstructionSelector::VisitInt32PairAdd(Node* node) {
659   VisitInt32PairBinop(this, kMipsAddPair, kMipsAdd, node);
660 }
661 
VisitInt32PairSub(Node * node)662 void InstructionSelector::VisitInt32PairSub(Node* node) {
663   VisitInt32PairBinop(this, kMipsSubPair, kMipsSub, node);
664 }
665 
VisitInt32PairMul(Node * node)666 void InstructionSelector::VisitInt32PairMul(Node* node) {
667   VisitInt32PairBinop(this, kMipsMulPair, kMipsMul, node);
668 }
669 
670 // Shared routine for multiple shift operations.
VisitWord32PairShift(InstructionSelector * selector,InstructionCode opcode,Node * node)671 static void VisitWord32PairShift(InstructionSelector* selector,
672                                  InstructionCode opcode, Node* node) {
673   MipsOperandGenerator g(selector);
674   Int32Matcher m(node->InputAt(2));
675   InstructionOperand shift_operand;
676   if (m.HasValue()) {
677     shift_operand = g.UseImmediate(m.node());
678   } else {
679     shift_operand = g.UseUniqueRegister(m.node());
680   }
681 
682   // We use UseUniqueRegister here to avoid register sharing with the output
683   // register.
684   InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
685                                  g.UseUniqueRegister(node->InputAt(1)),
686                                  shift_operand};
687 
688   Node* projection1 = NodeProperties::FindProjection(node, 1);
689 
690   InstructionOperand outputs[2];
691   InstructionOperand temps[1];
692   int32_t output_count = 0;
693   int32_t temp_count = 0;
694 
695   outputs[output_count++] = g.DefineAsRegister(node);
696   if (projection1) {
697     outputs[output_count++] = g.DefineAsRegister(projection1);
698   } else {
699     temps[temp_count++] = g.TempRegister();
700   }
701 
702   selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
703 }
704 
VisitWord32PairShl(Node * node)705 void InstructionSelector::VisitWord32PairShl(Node* node) {
706   VisitWord32PairShift(this, kMipsShlPair, node);
707 }
708 
VisitWord32PairShr(Node * node)709 void InstructionSelector::VisitWord32PairShr(Node* node) {
710   VisitWord32PairShift(this, kMipsShrPair, node);
711 }
712 
VisitWord32PairSar(Node * node)713 void InstructionSelector::VisitWord32PairSar(Node* node) {
714   VisitWord32PairShift(this, kMipsSarPair, node);
715 }
716 
VisitWord32Ror(Node * node)717 void InstructionSelector::VisitWord32Ror(Node* node) {
718   VisitRRO(this, kMipsRor, node);
719 }
720 
VisitWord32Clz(Node * node)721 void InstructionSelector::VisitWord32Clz(Node* node) {
722   VisitRR(this, kMipsClz, node);
723 }
724 
VisitWord32AtomicPairLoad(Node * node)725 void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
726   MipsOperandGenerator g(this);
727   Node* base = node->InputAt(0);
728   Node* index = node->InputAt(1);
729   ArchOpcode opcode = kMipsWord32AtomicPairLoad;
730   AddressingMode addressing_mode = kMode_MRI;
731   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
732   InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
733   InstructionOperand temps[3];
734   size_t temp_count = 0;
735   temps[temp_count++] = g.TempRegister(a0);
736   InstructionOperand outputs[2];
737   size_t output_count = 0;
738 
739   Node* projection0 = NodeProperties::FindProjection(node, 0);
740   Node* projection1 = NodeProperties::FindProjection(node, 1);
741   if (projection0) {
742     outputs[output_count++] = g.DefineAsFixed(projection0, v0);
743   } else {
744     temps[temp_count++] = g.TempRegister(v0);
745   }
746   if (projection1) {
747     outputs[output_count++] = g.DefineAsFixed(projection1, v1);
748   } else {
749     temps[temp_count++] = g.TempRegister(v1);
750   }
751   Emit(code, output_count, outputs, arraysize(inputs), inputs, temp_count,
752        temps);
753 }
754 
VisitWord32AtomicPairStore(Node * node)755 void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
756   MipsOperandGenerator g(this);
757   Node* base = node->InputAt(0);
758   Node* index = node->InputAt(1);
759   Node* value_low = node->InputAt(2);
760   Node* value_high = node->InputAt(3);
761 
762   InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
763                                  g.UseFixed(value_low, a1),
764                                  g.UseFixed(value_high, a2)};
765   InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(),
766                                 g.TempRegister()};
767   Emit(kMipsWord32AtomicPairStore | AddressingModeField::encode(kMode_MRI), 0,
768        nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
769 }
770 
VisitWord32AtomicPairAdd(Node * node)771 void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
772   VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairAdd);
773 }
774 
VisitWord32AtomicPairSub(Node * node)775 void InstructionSelector::VisitWord32AtomicPairSub(Node* node) {
776   VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairSub);
777 }
778 
VisitWord32AtomicPairAnd(Node * node)779 void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) {
780   VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairAnd);
781 }
782 
VisitWord32AtomicPairOr(Node * node)783 void InstructionSelector::VisitWord32AtomicPairOr(Node* node) {
784   VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairOr);
785 }
786 
VisitWord32AtomicPairXor(Node * node)787 void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
788   VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairXor);
789 }
790 
VisitWord32AtomicPairExchange(Node * node)791 void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
792   VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairExchange);
793 }
794 
VisitWord32AtomicPairCompareExchange(Node * node)795 void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
796   MipsOperandGenerator g(this);
797   InstructionOperand inputs[] = {
798       g.UseRegister(node->InputAt(0)),  g.UseRegister(node->InputAt(1)),
799       g.UseFixed(node->InputAt(2), a1), g.UseFixed(node->InputAt(3), a2),
800       g.UseFixed(node->InputAt(4), a3), g.UseUniqueRegister(node->InputAt(5))};
801 
802   InstructionCode code = kMipsWord32AtomicPairCompareExchange |
803                          AddressingModeField::encode(kMode_MRI);
804   Node* projection0 = NodeProperties::FindProjection(node, 0);
805   Node* projection1 = NodeProperties::FindProjection(node, 1);
806   InstructionOperand outputs[2];
807   size_t output_count = 0;
808   InstructionOperand temps[3];
809   size_t temp_count = 0;
810   temps[temp_count++] = g.TempRegister(a0);
811   if (projection0) {
812     outputs[output_count++] = g.DefineAsFixed(projection0, v0);
813   } else {
814     temps[temp_count++] = g.TempRegister(v0);
815   }
816   if (projection1) {
817     outputs[output_count++] = g.DefineAsFixed(projection1, v1);
818   } else {
819     temps[temp_count++] = g.TempRegister(v1);
820   }
821   Emit(code, output_count, outputs, arraysize(inputs), inputs, temp_count,
822        temps);
823 }
824 
VisitWord32ReverseBits(Node * node)825 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
826 
VisitWord64ReverseBytes(Node * node)827 void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
828 
VisitWord32ReverseBytes(Node * node)829 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
830   MipsOperandGenerator g(this);
831   Emit(kMipsByteSwap32, g.DefineAsRegister(node),
832        g.UseRegister(node->InputAt(0)));
833 }
834 
VisitSimd128ReverseBytes(Node * node)835 void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
836   UNREACHABLE();
837 }
838 
VisitWord32Ctz(Node * node)839 void InstructionSelector::VisitWord32Ctz(Node* node) {
840   MipsOperandGenerator g(this);
841   Emit(kMipsCtz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
842 }
843 
VisitWord32Popcnt(Node * node)844 void InstructionSelector::VisitWord32Popcnt(Node* node) {
845   MipsOperandGenerator g(this);
846   Emit(kMipsPopcnt, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
847 }
848 
VisitInt32Add(Node * node)849 void InstructionSelector::VisitInt32Add(Node* node) {
850   MipsOperandGenerator g(this);
851   Int32BinopMatcher m(node);
852 
853   if (IsMipsArchVariant(kMips32r6)) {
854     // Select Lsa for (left + (left_of_right << imm)).
855     if (m.right().opcode() == IrOpcode::kWord32Shl &&
856         CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
857       Int32BinopMatcher mright(m.right().node());
858       if (mright.right().HasValue() && !m.left().HasValue()) {
859         int32_t shift_value = static_cast<int32_t>(mright.right().Value());
860         if (shift_value > 0 && shift_value <= 31) {
861           Emit(kMipsLsa, g.DefineAsRegister(node),
862                g.UseRegister(m.left().node()),
863                g.UseRegister(mright.left().node()),
864                g.TempImmediate(shift_value));
865           return;
866         }
867       }
868     }
869 
870     // Select Lsa for ((left_of_left << imm) + right).
871     if (m.left().opcode() == IrOpcode::kWord32Shl &&
872         CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
873       Int32BinopMatcher mleft(m.left().node());
874       if (mleft.right().HasValue() && !m.right().HasValue()) {
875         int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
876         if (shift_value > 0 && shift_value <= 31) {
877           Emit(kMipsLsa, g.DefineAsRegister(node),
878                g.UseRegister(m.right().node()),
879                g.UseRegister(mleft.left().node()),
880                g.TempImmediate(shift_value));
881           return;
882         }
883       }
884     }
885   }
886 
887   VisitBinop(this, node, kMipsAdd, true, kMipsAdd);
888 }
889 
VisitInt32Sub(Node * node)890 void InstructionSelector::VisitInt32Sub(Node* node) {
891   VisitBinop(this, node, kMipsSub);
892 }
893 
VisitInt32Mul(Node * node)894 void InstructionSelector::VisitInt32Mul(Node* node) {
895   MipsOperandGenerator g(this);
896   Int32BinopMatcher m(node);
897   if (m.right().HasValue() && m.right().Value() > 0) {
898     uint32_t value = static_cast<uint32_t>(m.right().Value());
899     if (base::bits::IsPowerOfTwo(value)) {
900       Emit(kMipsShl | AddressingModeField::encode(kMode_None),
901            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
902            g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
903       return;
904     }
905     if (base::bits::IsPowerOfTwo(value - 1) && IsMipsArchVariant(kMips32r6) &&
906         value - 1 > 0 && value - 1 <= 31) {
907       Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
908            g.UseRegister(m.left().node()),
909            g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1)));
910       return;
911     }
912     if (base::bits::IsPowerOfTwo(value + 1)) {
913       InstructionOperand temp = g.TempRegister();
914       Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
915            g.UseRegister(m.left().node()),
916            g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
917       Emit(kMipsSub | AddressingModeField::encode(kMode_None),
918            g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
919       return;
920     }
921   }
922   VisitRRR(this, kMipsMul, node);
923 }
924 
VisitInt32MulHigh(Node * node)925 void InstructionSelector::VisitInt32MulHigh(Node* node) {
926   VisitRRR(this, kMipsMulHigh, node);
927 }
928 
VisitUint32MulHigh(Node * node)929 void InstructionSelector::VisitUint32MulHigh(Node* node) {
930   MipsOperandGenerator g(this);
931   Emit(kMipsMulHighU, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
932        g.UseRegister(node->InputAt(1)));
933 }
934 
VisitInt32Div(Node * node)935 void InstructionSelector::VisitInt32Div(Node* node) {
936   MipsOperandGenerator g(this);
937   Int32BinopMatcher m(node);
938   Emit(kMipsDiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
939        g.UseRegister(m.right().node()));
940 }
941 
VisitUint32Div(Node * node)942 void InstructionSelector::VisitUint32Div(Node* node) {
943   MipsOperandGenerator g(this);
944   Int32BinopMatcher m(node);
945   Emit(kMipsDivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
946        g.UseRegister(m.right().node()));
947 }
948 
VisitInt32Mod(Node * node)949 void InstructionSelector::VisitInt32Mod(Node* node) {
950   MipsOperandGenerator g(this);
951   Int32BinopMatcher m(node);
952   Emit(kMipsMod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
953        g.UseRegister(m.right().node()));
954 }
955 
VisitUint32Mod(Node * node)956 void InstructionSelector::VisitUint32Mod(Node* node) {
957   MipsOperandGenerator g(this);
958   Int32BinopMatcher m(node);
959   Emit(kMipsModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
960        g.UseRegister(m.right().node()));
961 }
962 
VisitChangeFloat32ToFloat64(Node * node)963 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
964   VisitRR(this, kMipsCvtDS, node);
965 }
966 
VisitRoundInt32ToFloat32(Node * node)967 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
968   VisitRR(this, kMipsCvtSW, node);
969 }
970 
VisitRoundUint32ToFloat32(Node * node)971 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
972   VisitRR(this, kMipsCvtSUw, node);
973 }
974 
VisitChangeInt32ToFloat64(Node * node)975 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
976   VisitRR(this, kMipsCvtDW, node);
977 }
978 
VisitChangeUint32ToFloat64(Node * node)979 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
980   VisitRR(this, kMipsCvtDUw, node);
981 }
982 
VisitTruncateFloat32ToInt32(Node * node)983 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
984   VisitRR(this, kMipsTruncWS, node);
985 }
986 
VisitTruncateFloat32ToUint32(Node * node)987 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
988   VisitRR(this, kMipsTruncUwS, node);
989 }
990 
VisitChangeFloat64ToInt32(Node * node)991 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
992   MipsOperandGenerator g(this);
993   Node* value = node->InputAt(0);
994   // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
995   // which does rounding and conversion to integer format.
996   if (CanCover(node, value)) {
997     switch (value->opcode()) {
998       case IrOpcode::kFloat64RoundDown:
999         Emit(kMipsFloorWD, g.DefineAsRegister(node),
1000              g.UseRegister(value->InputAt(0)));
1001         return;
1002       case IrOpcode::kFloat64RoundUp:
1003         Emit(kMipsCeilWD, g.DefineAsRegister(node),
1004              g.UseRegister(value->InputAt(0)));
1005         return;
1006       case IrOpcode::kFloat64RoundTiesEven:
1007         Emit(kMipsRoundWD, g.DefineAsRegister(node),
1008              g.UseRegister(value->InputAt(0)));
1009         return;
1010       case IrOpcode::kFloat64RoundTruncate:
1011         Emit(kMipsTruncWD, g.DefineAsRegister(node),
1012              g.UseRegister(value->InputAt(0)));
1013         return;
1014       default:
1015         break;
1016     }
1017     if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
1018       Node* next = value->InputAt(0);
1019       if (CanCover(value, next)) {
1020         // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
1021         switch (next->opcode()) {
1022           case IrOpcode::kFloat32RoundDown:
1023             Emit(kMipsFloorWS, g.DefineAsRegister(node),
1024                  g.UseRegister(next->InputAt(0)));
1025             return;
1026           case IrOpcode::kFloat32RoundUp:
1027             Emit(kMipsCeilWS, g.DefineAsRegister(node),
1028                  g.UseRegister(next->InputAt(0)));
1029             return;
1030           case IrOpcode::kFloat32RoundTiesEven:
1031             Emit(kMipsRoundWS, g.DefineAsRegister(node),
1032                  g.UseRegister(next->InputAt(0)));
1033             return;
1034           case IrOpcode::kFloat32RoundTruncate:
1035             Emit(kMipsTruncWS, g.DefineAsRegister(node),
1036                  g.UseRegister(next->InputAt(0)));
1037             return;
1038           default:
1039             Emit(kMipsTruncWS, g.DefineAsRegister(node),
1040                  g.UseRegister(value->InputAt(0)));
1041             return;
1042         }
1043       } else {
1044         // Match float32 -> float64 -> int32 representation change path.
1045         Emit(kMipsTruncWS, g.DefineAsRegister(node),
1046              g.UseRegister(value->InputAt(0)));
1047         return;
1048       }
1049     }
1050   }
1051   VisitRR(this, kMipsTruncWD, node);
1052 }
1053 
VisitChangeFloat64ToUint32(Node * node)1054 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1055   VisitRR(this, kMipsTruncUwD, node);
1056 }
1057 
VisitTruncateFloat64ToUint32(Node * node)1058 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
1059   VisitRR(this, kMipsTruncUwD, node);
1060 }
1061 
VisitTruncateFloat64ToFloat32(Node * node)1062 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1063   MipsOperandGenerator g(this);
1064   Node* value = node->InputAt(0);
1065   // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
1066   // instruction.
1067   if (CanCover(node, value) &&
1068       value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
1069     Emit(kMipsCvtSW, g.DefineAsRegister(node),
1070          g.UseRegister(value->InputAt(0)));
1071     return;
1072   }
1073   VisitRR(this, kMipsCvtSD, node);
1074 }
1075 
VisitTruncateFloat64ToWord32(Node * node)1076 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1077   VisitRR(this, kArchTruncateDoubleToI, node);
1078 }
1079 
VisitRoundFloat64ToInt32(Node * node)1080 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1081   VisitRR(this, kMipsTruncWD, node);
1082 }
1083 
VisitBitcastFloat32ToInt32(Node * node)1084 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1085   VisitRR(this, kMipsFloat64ExtractLowWord32, node);
1086 }
1087 
VisitBitcastInt32ToFloat32(Node * node)1088 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1089   MipsOperandGenerator g(this);
1090   Emit(kMipsFloat64InsertLowWord32, g.DefineAsRegister(node),
1091        ImmediateOperand(ImmediateOperand::INLINE, 0),
1092        g.UseRegister(node->InputAt(0)));
1093 }
1094 
VisitFloat32Add(Node * node)1095 void InstructionSelector::VisitFloat32Add(Node* node) {
1096   MipsOperandGenerator g(this);
1097   if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
1098     Float32BinopMatcher m(node);
1099     if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
1100       // For Add.S(Mul.S(x, y), z):
1101       Float32BinopMatcher mleft(m.left().node());
1102       Emit(kMipsMaddS, g.DefineAsRegister(node),
1103            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1104            g.UseRegister(mleft.right().node()));
1105       return;
1106     }
1107     if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
1108       // For Add.S(x, Mul.S(y, z)):
1109       Float32BinopMatcher mright(m.right().node());
1110       Emit(kMipsMaddS, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1111            g.UseRegister(mright.left().node()),
1112            g.UseRegister(mright.right().node()));
1113       return;
1114     }
1115   }
1116   VisitRRR(this, kMipsAddS, node);
1117 }
1118 
VisitFloat64Add(Node * node)1119 void InstructionSelector::VisitFloat64Add(Node* node) {
1120   MipsOperandGenerator g(this);
1121   if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
1122     Float64BinopMatcher m(node);
1123     if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
1124       // For Add.D(Mul.D(x, y), z):
1125       Float64BinopMatcher mleft(m.left().node());
1126       Emit(kMipsMaddD, g.DefineAsRegister(node),
1127            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1128            g.UseRegister(mleft.right().node()));
1129       return;
1130     }
1131     if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
1132       // For Add.D(x, Mul.D(y, z)):
1133       Float64BinopMatcher mright(m.right().node());
1134       Emit(kMipsMaddD, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1135            g.UseRegister(mright.left().node()),
1136            g.UseRegister(mright.right().node()));
1137       return;
1138     }
1139   }
1140   VisitRRR(this, kMipsAddD, node);
1141 }
1142 
VisitFloat32Sub(Node * node)1143 void InstructionSelector::VisitFloat32Sub(Node* node) {
1144   MipsOperandGenerator g(this);
1145   if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
1146     Float32BinopMatcher m(node);
1147     if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
1148       // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
1149       Float32BinopMatcher mleft(m.left().node());
1150       Emit(kMipsMsubS, g.DefineAsRegister(node),
1151            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1152            g.UseRegister(mleft.right().node()));
1153       return;
1154     }
1155   }
1156   VisitRRR(this, kMipsSubS, node);
1157 }
1158 
VisitFloat64Sub(Node * node)1159 void InstructionSelector::VisitFloat64Sub(Node* node) {
1160   MipsOperandGenerator g(this);
1161   if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
1162     Float64BinopMatcher m(node);
1163     if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
1164       // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
1165       Float64BinopMatcher mleft(m.left().node());
1166       Emit(kMipsMsubD, g.DefineAsRegister(node),
1167            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1168            g.UseRegister(mleft.right().node()));
1169       return;
1170     }
1171   }
1172   VisitRRR(this, kMipsSubD, node);
1173 }
1174 
VisitFloat32Mul(Node * node)1175 void InstructionSelector::VisitFloat32Mul(Node* node) {
1176   VisitRRR(this, kMipsMulS, node);
1177 }
1178 
VisitFloat64Mul(Node * node)1179 void InstructionSelector::VisitFloat64Mul(Node* node) {
1180   VisitRRR(this, kMipsMulD, node);
1181 }
1182 
VisitFloat32Div(Node * node)1183 void InstructionSelector::VisitFloat32Div(Node* node) {
1184   VisitRRR(this, kMipsDivS, node);
1185 }
1186 
VisitFloat64Div(Node * node)1187 void InstructionSelector::VisitFloat64Div(Node* node) {
1188   VisitRRR(this, kMipsDivD, node);
1189 }
1190 
VisitFloat64Mod(Node * node)1191 void InstructionSelector::VisitFloat64Mod(Node* node) {
1192   MipsOperandGenerator g(this);
1193   Emit(kMipsModD, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
1194        g.UseFixed(node->InputAt(1), f14))
1195       ->MarkAsCall();
1196 }
1197 
VisitFloat32Max(Node * node)1198 void InstructionSelector::VisitFloat32Max(Node* node) {
1199   MipsOperandGenerator g(this);
1200   Emit(kMipsFloat32Max, g.DefineAsRegister(node),
1201        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1202 }
1203 
VisitFloat64Max(Node * node)1204 void InstructionSelector::VisitFloat64Max(Node* node) {
1205   MipsOperandGenerator g(this);
1206   Emit(kMipsFloat64Max, g.DefineAsRegister(node),
1207        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1208 }
1209 
VisitFloat32Min(Node * node)1210 void InstructionSelector::VisitFloat32Min(Node* node) {
1211   MipsOperandGenerator g(this);
1212   Emit(kMipsFloat32Min, g.DefineAsRegister(node),
1213        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1214 }
1215 
VisitFloat64Min(Node * node)1216 void InstructionSelector::VisitFloat64Min(Node* node) {
1217   MipsOperandGenerator g(this);
1218   Emit(kMipsFloat64Min, g.DefineAsRegister(node),
1219        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1220 }
1221 
VisitFloat32Abs(Node * node)1222 void InstructionSelector::VisitFloat32Abs(Node* node) {
1223   VisitRR(this, kMipsAbsS, node);
1224 }
1225 
VisitFloat64Abs(Node * node)1226 void InstructionSelector::VisitFloat64Abs(Node* node) {
1227   VisitRR(this, kMipsAbsD, node);
1228 }
1229 
VisitFloat32Sqrt(Node * node)1230 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1231   VisitRR(this, kMipsSqrtS, node);
1232 }
1233 
VisitFloat64Sqrt(Node * node)1234 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1235   VisitRR(this, kMipsSqrtD, node);
1236 }
1237 
VisitFloat32RoundDown(Node * node)1238 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1239   VisitRR(this, kMipsFloat32RoundDown, node);
1240 }
1241 
VisitFloat64RoundDown(Node * node)1242 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1243   VisitRR(this, kMipsFloat64RoundDown, node);
1244 }
1245 
VisitFloat32RoundUp(Node * node)1246 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1247   VisitRR(this, kMipsFloat32RoundUp, node);
1248 }
1249 
VisitFloat64RoundUp(Node * node)1250 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1251   VisitRR(this, kMipsFloat64RoundUp, node);
1252 }
1253 
VisitFloat32RoundTruncate(Node * node)1254 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1255   VisitRR(this, kMipsFloat32RoundTruncate, node);
1256 }
1257 
VisitFloat64RoundTruncate(Node * node)1258 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1259   VisitRR(this, kMipsFloat64RoundTruncate, node);
1260 }
1261 
VisitFloat64RoundTiesAway(Node * node)1262 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1263   UNREACHABLE();
1264 }
1265 
VisitFloat32RoundTiesEven(Node * node)1266 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1267   VisitRR(this, kMipsFloat32RoundTiesEven, node);
1268 }
1269 
VisitFloat64RoundTiesEven(Node * node)1270 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1271   VisitRR(this, kMipsFloat64RoundTiesEven, node);
1272 }
1273 
VisitFloat32Neg(Node * node)1274 void InstructionSelector::VisitFloat32Neg(Node* node) {
1275   VisitRR(this, kMipsNegS, node);
1276 }
1277 
VisitFloat64Neg(Node * node)1278 void InstructionSelector::VisitFloat64Neg(Node* node) {
1279   VisitRR(this, kMipsNegD, node);
1280 }
1281 
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1282 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1283                                                    InstructionCode opcode) {
1284   MipsOperandGenerator g(this);
1285   Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2),
1286        g.UseFixed(node->InputAt(1), f4))
1287       ->MarkAsCall();
1288 }
1289 
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1290 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1291                                                   InstructionCode opcode) {
1292   MipsOperandGenerator g(this);
1293   Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
1294       ->MarkAsCall();
1295 }
1296 
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * call_descriptor,Node * node)1297 void InstructionSelector::EmitPrepareArguments(
1298     ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
1299     Node* node) {
1300   MipsOperandGenerator g(this);
1301 
1302   // Prepare for C function call.
1303   if (call_descriptor->IsCFunctionCall()) {
1304     Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
1305                                          call_descriptor->ParameterCount())),
1306          0, nullptr, 0, nullptr);
1307 
1308     // Poke any stack arguments.
1309     int slot = kCArgSlotCount;
1310     for (PushParameter input : (*arguments)) {
1311       if (input.node) {
1312         Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1313              g.TempImmediate(slot << kSystemPointerSizeLog2));
1314         ++slot;
1315       }
1316     }
1317   } else {
1318     // Possibly align stack here for functions.
1319     int push_count = static_cast<int>(call_descriptor->StackParameterCount());
1320     if (push_count > 0) {
1321       // Calculate needed space
1322       int stack_size = 0;
1323       for (size_t n = 0; n < arguments->size(); ++n) {
1324         PushParameter input = (*arguments)[n];
1325         if (input.node) {
1326           stack_size += input.location.GetSizeInPointers();
1327         }
1328       }
1329       Emit(kMipsStackClaim, g.NoOutput(),
1330            g.TempImmediate(stack_size << kSystemPointerSizeLog2));
1331     }
1332     for (size_t n = 0; n < arguments->size(); ++n) {
1333       PushParameter input = (*arguments)[n];
1334       if (input.node) {
1335         Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1336              g.TempImmediate(n << kSystemPointerSizeLog2));
1337       }
1338     }
1339   }
1340 }
1341 
EmitPrepareResults(ZoneVector<PushParameter> * results,const CallDescriptor * call_descriptor,Node * node)1342 void InstructionSelector::EmitPrepareResults(
1343     ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
1344     Node* node) {
1345   MipsOperandGenerator g(this);
1346 
1347   int reverse_slot = 0;
1348   for (PushParameter output : *results) {
1349     if (!output.location.IsCallerFrameSlot()) continue;
1350     // Skip any alignment holes in nodes.
1351     if (output.node != nullptr) {
1352       DCHECK(!call_descriptor->IsCFunctionCall());
1353       if (output.location.GetType() == MachineType::Float32()) {
1354         MarkAsFloat32(output.node);
1355       } else if (output.location.GetType() == MachineType::Float64()) {
1356         MarkAsFloat64(output.node);
1357       }
1358       Emit(kMipsPeek, g.DefineAsRegister(output.node),
1359            g.UseImmediate(reverse_slot));
1360     }
1361     reverse_slot += output.location.GetSizeInPointers();
1362   }
1363 }
1364 
IsTailCallAddressImmediate()1365 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1366 
GetTempsCountForTailCallFromJSFunction()1367 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
1368 
VisitUnalignedLoad(Node * node)1369 void InstructionSelector::VisitUnalignedLoad(Node* node) {
1370   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1371   MipsOperandGenerator g(this);
1372   Node* base = node->InputAt(0);
1373   Node* index = node->InputAt(1);
1374 
1375   ArchOpcode opcode = kArchNop;
1376   switch (load_rep.representation()) {
1377     case MachineRepresentation::kBit:  // Fall through.
1378     case MachineRepresentation::kWord8:
1379       UNREACHABLE();
1380     case MachineRepresentation::kWord16:
1381       opcode = load_rep.IsUnsigned() ? kMipsUlhu : kMipsUlh;
1382       break;
1383     case MachineRepresentation::kTaggedSigned:   // Fall through.
1384     case MachineRepresentation::kTaggedPointer:  // Fall through.
1385     case MachineRepresentation::kTagged:         // Fall through.
1386     case MachineRepresentation::kWord32:
1387       opcode = kMipsUlw;
1388       break;
1389     case MachineRepresentation::kFloat32:
1390       opcode = kMipsUlwc1;
1391       break;
1392     case MachineRepresentation::kFloat64:
1393       opcode = kMipsUldc1;
1394       break;
1395     case MachineRepresentation::kSimd128:
1396       opcode = kMipsMsaLd;
1397       break;
1398     case MachineRepresentation::kCompressedPointer:  // Fall through.
1399     case MachineRepresentation::kCompressed:         // Fall through.
1400     case MachineRepresentation::kWord64:             // Fall through.
1401     case MachineRepresentation::kNone:
1402       UNREACHABLE();
1403   }
1404 
1405   if (g.CanBeImmediate(index, opcode)) {
1406     Emit(opcode | AddressingModeField::encode(kMode_MRI),
1407          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
1408   } else {
1409     InstructionOperand addr_reg = g.TempRegister();
1410     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
1411          g.UseRegister(index), g.UseRegister(base));
1412     // Emit desired load opcode, using temp addr_reg.
1413     Emit(opcode | AddressingModeField::encode(kMode_MRI),
1414          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1415   }
1416 }
1417 
VisitUnalignedStore(Node * node)1418 void InstructionSelector::VisitUnalignedStore(Node* node) {
1419   MipsOperandGenerator g(this);
1420   Node* base = node->InputAt(0);
1421   Node* index = node->InputAt(1);
1422   Node* value = node->InputAt(2);
1423 
1424   UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
1425 
1426   // TODO(mips): I guess this could be done in a better way.
1427   ArchOpcode opcode = kArchNop;
1428   switch (rep) {
1429     case MachineRepresentation::kFloat32:
1430       opcode = kMipsUswc1;
1431       break;
1432     case MachineRepresentation::kFloat64:
1433       opcode = kMipsUsdc1;
1434       break;
1435     case MachineRepresentation::kBit:  // Fall through.
1436     case MachineRepresentation::kWord8:
1437       UNREACHABLE();
1438     case MachineRepresentation::kWord16:
1439       opcode = kMipsUsh;
1440       break;
1441     case MachineRepresentation::kTaggedSigned:   // Fall through.
1442     case MachineRepresentation::kTaggedPointer:  // Fall through.
1443     case MachineRepresentation::kTagged:         // Fall through.
1444     case MachineRepresentation::kWord32:
1445       opcode = kMipsUsw;
1446       break;
1447     case MachineRepresentation::kSimd128:
1448       opcode = kMipsMsaSt;
1449       break;
1450     case MachineRepresentation::kCompressedPointer:  // Fall through.
1451     case MachineRepresentation::kCompressed:         // Fall through.
1452     case MachineRepresentation::kWord64:             // Fall through.
1453     case MachineRepresentation::kNone:
1454       UNREACHABLE();
1455   }
1456 
1457   if (g.CanBeImmediate(index, opcode)) {
1458     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1459          g.UseRegister(base), g.UseImmediate(index),
1460          g.UseRegisterOrImmediateZero(value));
1461   } else {
1462     InstructionOperand addr_reg = g.TempRegister();
1463     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
1464          g.UseRegister(index), g.UseRegister(base));
1465     // Emit desired store opcode, using temp addr_reg.
1466     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1467          addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1468   }
1469 }
1470 
1471 namespace {
1472 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1473 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1474                          InstructionOperand left, InstructionOperand right,
1475                          FlagsContinuation* cont) {
1476   selector->EmitWithContinuation(opcode, left, right, cont);
1477 }
1478 
1479 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1480 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1481                          FlagsContinuation* cont) {
1482   MipsOperandGenerator g(selector);
1483   Float32BinopMatcher m(node);
1484   InstructionOperand lhs, rhs;
1485 
1486   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1487                           : g.UseRegister(m.left().node());
1488   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1489                            : g.UseRegister(m.right().node());
1490   VisitCompare(selector, kMipsCmpS, lhs, rhs, cont);
1491 }
1492 
1493 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1494 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1495                          FlagsContinuation* cont) {
1496   MipsOperandGenerator g(selector);
1497   Float64BinopMatcher m(node);
1498   InstructionOperand lhs, rhs;
1499 
1500   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1501                           : g.UseRegister(m.left().node());
1502   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1503                            : g.UseRegister(m.right().node());
1504   VisitCompare(selector, kMipsCmpD, lhs, rhs, cont);
1505 }
1506 
1507 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont,bool commutative)1508 void VisitWordCompare(InstructionSelector* selector, Node* node,
1509                       InstructionCode opcode, FlagsContinuation* cont,
1510                       bool commutative) {
1511   MipsOperandGenerator g(selector);
1512   Node* left = node->InputAt(0);
1513   Node* right = node->InputAt(1);
1514 
1515   // Match immediates on left or right side of comparison.
1516   if (g.CanBeImmediate(right, opcode)) {
1517     if (opcode == kMipsTst) {
1518       VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1519                    cont);
1520     } else {
1521       switch (cont->condition()) {
1522         case kEqual:
1523         case kNotEqual:
1524           if (cont->IsSet()) {
1525             VisitCompare(selector, opcode, g.UseRegister(left),
1526                          g.UseImmediate(right), cont);
1527           } else {
1528             VisitCompare(selector, opcode, g.UseRegister(left),
1529                          g.UseRegister(right), cont);
1530           }
1531           break;
1532         case kSignedLessThan:
1533         case kSignedGreaterThanOrEqual:
1534         case kUnsignedLessThan:
1535         case kUnsignedGreaterThanOrEqual:
1536           VisitCompare(selector, opcode, g.UseRegister(left),
1537                        g.UseImmediate(right), cont);
1538           break;
1539         default:
1540           VisitCompare(selector, opcode, g.UseRegister(left),
1541                        g.UseRegister(right), cont);
1542       }
1543     }
1544   } else if (g.CanBeImmediate(left, opcode)) {
1545     if (!commutative) cont->Commute();
1546     if (opcode == kMipsTst) {
1547       VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
1548                    cont);
1549     } else {
1550       switch (cont->condition()) {
1551         case kEqual:
1552         case kNotEqual:
1553           if (cont->IsSet()) {
1554             VisitCompare(selector, opcode, g.UseRegister(right),
1555                          g.UseImmediate(left), cont);
1556           } else {
1557             VisitCompare(selector, opcode, g.UseRegister(right),
1558                          g.UseRegister(left), cont);
1559           }
1560           break;
1561         case kSignedLessThan:
1562         case kSignedGreaterThanOrEqual:
1563         case kUnsignedLessThan:
1564         case kUnsignedGreaterThanOrEqual:
1565           VisitCompare(selector, opcode, g.UseRegister(right),
1566                        g.UseImmediate(left), cont);
1567           break;
1568         default:
1569           VisitCompare(selector, opcode, g.UseRegister(right),
1570                        g.UseRegister(left), cont);
1571       }
1572     }
1573   } else {
1574     VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1575                  cont);
1576   }
1577 }
1578 
VisitWordCompare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1579 void VisitWordCompare(InstructionSelector* selector, Node* node,
1580                       FlagsContinuation* cont) {
1581   VisitWordCompare(selector, node, kMipsCmp, cont, false);
1582 }
1583 
1584 }  // namespace
1585 
VisitStackPointerGreaterThan(Node * node,FlagsContinuation * cont)1586 void InstructionSelector::VisitStackPointerGreaterThan(
1587     Node* node, FlagsContinuation* cont) {
1588   StackCheckKind kind = StackCheckKindOf(node->op());
1589   InstructionCode opcode =
1590       kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
1591 
1592   MipsOperandGenerator g(this);
1593 
1594   // No outputs.
1595   InstructionOperand* const outputs = nullptr;
1596   const int output_count = 0;
1597 
1598   // Applying an offset to this stack check requires a temp register. Offsets
1599   // are only applied to the first stack check. If applying an offset, we must
1600   // ensure the input and temp registers do not alias, thus kUniqueRegister.
1601   InstructionOperand temps[] = {g.TempRegister()};
1602   const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 1 : 0);
1603   const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
1604                                  ? OperandGenerator::kUniqueRegister
1605                                  : OperandGenerator::kRegister;
1606 
1607   Node* const value = node->InputAt(0);
1608   InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
1609   static constexpr int input_count = arraysize(inputs);
1610 
1611   EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
1612                        temp_count, temps, cont);
1613 }
1614 
1615 // Shared routine for word comparisons against zero.
VisitWordCompareZero(Node * user,Node * value,FlagsContinuation * cont)1616 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
1617                                                FlagsContinuation* cont) {
1618   // Try to combine with comparisons against 0 by simply inverting the branch.
1619   while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
1620     Int32BinopMatcher m(value);
1621     if (!m.right().Is(0)) break;
1622 
1623     user = value;
1624     value = m.left().node();
1625     cont->Negate();
1626   }
1627 
1628   if (CanCover(user, value)) {
1629     switch (value->opcode()) {
1630       case IrOpcode::kWord32Equal:
1631         cont->OverwriteAndNegateIfEqual(kEqual);
1632         return VisitWordCompare(this, value, cont);
1633       case IrOpcode::kInt32LessThan:
1634         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1635         return VisitWordCompare(this, value, cont);
1636       case IrOpcode::kInt32LessThanOrEqual:
1637         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1638         return VisitWordCompare(this, value, cont);
1639       case IrOpcode::kUint32LessThan:
1640         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1641         return VisitWordCompare(this, value, cont);
1642       case IrOpcode::kUint32LessThanOrEqual:
1643         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1644         return VisitWordCompare(this, value, cont);
1645       case IrOpcode::kFloat32Equal:
1646         cont->OverwriteAndNegateIfEqual(kEqual);
1647         return VisitFloat32Compare(this, value, cont);
1648       case IrOpcode::kFloat32LessThan:
1649         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1650         return VisitFloat32Compare(this, value, cont);
1651       case IrOpcode::kFloat32LessThanOrEqual:
1652         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1653         return VisitFloat32Compare(this, value, cont);
1654       case IrOpcode::kFloat64Equal:
1655         cont->OverwriteAndNegateIfEqual(kEqual);
1656         return VisitFloat64Compare(this, value, cont);
1657       case IrOpcode::kFloat64LessThan:
1658         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1659         return VisitFloat64Compare(this, value, cont);
1660       case IrOpcode::kFloat64LessThanOrEqual:
1661         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1662         return VisitFloat64Compare(this, value, cont);
1663       case IrOpcode::kProjection:
1664         // Check if this is the overflow output projection of an
1665         // <Operation>WithOverflow node.
1666         if (ProjectionIndexOf(value->op()) == 1u) {
1667           // We cannot combine the <Operation>WithOverflow with this branch
1668           // unless the 0th projection (the use of the actual value of the
1669           // <Operation> is either nullptr, which means there's no use of the
1670           // actual value, or was already defined, which means it is scheduled
1671           // *AFTER* this branch).
1672           Node* const node = value->InputAt(0);
1673           Node* const result = NodeProperties::FindProjection(node, 0);
1674           if (!result || IsDefined(result)) {
1675             switch (node->opcode()) {
1676               case IrOpcode::kInt32AddWithOverflow:
1677                 cont->OverwriteAndNegateIfEqual(kOverflow);
1678                 return VisitBinop(this, node, kMipsAddOvf, cont);
1679               case IrOpcode::kInt32SubWithOverflow:
1680                 cont->OverwriteAndNegateIfEqual(kOverflow);
1681                 return VisitBinop(this, node, kMipsSubOvf, cont);
1682               case IrOpcode::kInt32MulWithOverflow:
1683                 cont->OverwriteAndNegateIfEqual(kOverflow);
1684                 return VisitBinop(this, node, kMipsMulOvf, cont);
1685               default:
1686                 break;
1687             }
1688           }
1689         }
1690         break;
1691       case IrOpcode::kWord32And:
1692         return VisitWordCompare(this, value, kMipsTst, cont, true);
1693       case IrOpcode::kStackPointerGreaterThan:
1694         cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
1695         return VisitStackPointerGreaterThan(value, cont);
1696       default:
1697         break;
1698     }
1699   }
1700 
1701   // Continuation could not be combined with a compare, emit compare against 0.
1702   MipsOperandGenerator g(this);
1703   InstructionOperand const value_operand = g.UseRegister(value);
1704   EmitWithContinuation(kMipsCmp, value_operand, g.TempImmediate(0), cont);
1705 }
1706 
VisitSwitch(Node * node,const SwitchInfo & sw)1707 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1708   MipsOperandGenerator g(this);
1709   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1710 
1711   // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
1712   if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
1713     static const size_t kMaxTableSwitchValueRange = 2 << 16;
1714     size_t table_space_cost = 9 + sw.value_range();
1715     size_t table_time_cost = 3;
1716     size_t lookup_space_cost = 2 + 2 * sw.case_count();
1717     size_t lookup_time_cost = sw.case_count();
1718     if (sw.case_count() > 0 &&
1719         table_space_cost + 3 * table_time_cost <=
1720             lookup_space_cost + 3 * lookup_time_cost &&
1721         sw.min_value() > std::numeric_limits<int32_t>::min() &&
1722         sw.value_range() <= kMaxTableSwitchValueRange) {
1723       InstructionOperand index_operand = value_operand;
1724       if (sw.min_value()) {
1725         index_operand = g.TempRegister();
1726         Emit(kMipsSub, index_operand, value_operand,
1727              g.TempImmediate(sw.min_value()));
1728       }
1729       // Generate a table lookup.
1730       return EmitTableSwitch(sw, index_operand);
1731     }
1732   }
1733 
1734   // Generate a tree of conditional jumps.
1735   return EmitBinarySearchSwitch(std::move(sw), value_operand);
1736 }
1737 
VisitWord32Equal(Node * const node)1738 void InstructionSelector::VisitWord32Equal(Node* const node) {
1739   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1740   Int32BinopMatcher m(node);
1741   if (m.right().Is(0)) {
1742     return VisitWordCompareZero(m.node(), m.left().node(), &cont);
1743   }
1744   VisitWordCompare(this, node, &cont);
1745 }
1746 
VisitInt32LessThan(Node * node)1747 void InstructionSelector::VisitInt32LessThan(Node* node) {
1748   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1749   VisitWordCompare(this, node, &cont);
1750 }
1751 
VisitInt32LessThanOrEqual(Node * node)1752 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1753   FlagsContinuation cont =
1754       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1755   VisitWordCompare(this, node, &cont);
1756 }
1757 
VisitUint32LessThan(Node * node)1758 void InstructionSelector::VisitUint32LessThan(Node* node) {
1759   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1760   VisitWordCompare(this, node, &cont);
1761 }
1762 
VisitUint32LessThanOrEqual(Node * node)1763 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1764   FlagsContinuation cont =
1765       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1766   VisitWordCompare(this, node, &cont);
1767 }
1768 
VisitInt32AddWithOverflow(Node * node)1769 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1770   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1771     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1772     return VisitBinop(this, node, kMipsAddOvf, &cont);
1773   }
1774   FlagsContinuation cont;
1775   VisitBinop(this, node, kMipsAddOvf, &cont);
1776 }
1777 
VisitInt32SubWithOverflow(Node * node)1778 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1779   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1780     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1781     return VisitBinop(this, node, kMipsSubOvf, &cont);
1782   }
1783   FlagsContinuation cont;
1784   VisitBinop(this, node, kMipsSubOvf, &cont);
1785 }
1786 
VisitInt32MulWithOverflow(Node * node)1787 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
1788   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1789     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1790     return VisitBinop(this, node, kMipsMulOvf, &cont);
1791   }
1792   FlagsContinuation cont;
1793   VisitBinop(this, node, kMipsMulOvf, &cont);
1794 }
1795 
VisitFloat32Equal(Node * node)1796 void InstructionSelector::VisitFloat32Equal(Node* node) {
1797   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1798   VisitFloat32Compare(this, node, &cont);
1799 }
1800 
VisitFloat32LessThan(Node * node)1801 void InstructionSelector::VisitFloat32LessThan(Node* node) {
1802   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1803   VisitFloat32Compare(this, node, &cont);
1804 }
1805 
VisitFloat32LessThanOrEqual(Node * node)1806 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
1807   FlagsContinuation cont =
1808       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1809   VisitFloat32Compare(this, node, &cont);
1810 }
1811 
VisitFloat64Equal(Node * node)1812 void InstructionSelector::VisitFloat64Equal(Node* node) {
1813   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1814   VisitFloat64Compare(this, node, &cont);
1815 }
1816 
VisitFloat64LessThan(Node * node)1817 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1818   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1819   VisitFloat64Compare(this, node, &cont);
1820 }
1821 
VisitFloat64LessThanOrEqual(Node * node)1822 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1823   FlagsContinuation cont =
1824       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1825   VisitFloat64Compare(this, node, &cont);
1826 }
1827 
VisitFloat64ExtractLowWord32(Node * node)1828 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
1829   MipsOperandGenerator g(this);
1830   Emit(kMipsFloat64ExtractLowWord32, g.DefineAsRegister(node),
1831        g.UseRegister(node->InputAt(0)));
1832 }
1833 
VisitFloat64ExtractHighWord32(Node * node)1834 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
1835   MipsOperandGenerator g(this);
1836   Emit(kMipsFloat64ExtractHighWord32, g.DefineAsRegister(node),
1837        g.UseRegister(node->InputAt(0)));
1838 }
1839 
VisitFloat64InsertLowWord32(Node * node)1840 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1841   MipsOperandGenerator g(this);
1842   Node* left = node->InputAt(0);
1843   Node* right = node->InputAt(1);
1844   Emit(kMipsFloat64InsertLowWord32, g.DefineSameAsFirst(node),
1845        g.UseRegister(left), g.UseRegister(right));
1846 }
1847 
VisitFloat64InsertHighWord32(Node * node)1848 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1849   MipsOperandGenerator g(this);
1850   Node* left = node->InputAt(0);
1851   Node* right = node->InputAt(1);
1852   Emit(kMipsFloat64InsertHighWord32, g.DefineSameAsFirst(node),
1853        g.UseRegister(left), g.UseRegister(right));
1854 }
1855 
VisitFloat64SilenceNaN(Node * node)1856 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
1857   MipsOperandGenerator g(this);
1858   Node* left = node->InputAt(0);
1859   InstructionOperand temps[] = {g.TempRegister()};
1860   Emit(kMipsFloat64SilenceNaN, g.DefineSameAsFirst(node), g.UseRegister(left),
1861        arraysize(temps), temps);
1862 }
1863 
VisitMemoryBarrier(Node * node)1864 void InstructionSelector::VisitMemoryBarrier(Node* node) {
1865   MipsOperandGenerator g(this);
1866   Emit(kMipsSync, g.NoOutput());
1867 }
1868 
VisitWord32AtomicLoad(Node * node)1869 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
1870   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1871   MipsOperandGenerator g(this);
1872   Node* base = node->InputAt(0);
1873   Node* index = node->InputAt(1);
1874   ArchOpcode opcode = kArchNop;
1875   switch (load_rep.representation()) {
1876     case MachineRepresentation::kWord8:
1877       opcode =
1878           load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
1879       break;
1880     case MachineRepresentation::kWord16:
1881       opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
1882                                    : kWord32AtomicLoadUint16;
1883       break;
1884     case MachineRepresentation::kWord32:
1885       opcode = kWord32AtomicLoadWord32;
1886       break;
1887     default:
1888       UNREACHABLE();
1889   }
1890 
1891   if (g.CanBeImmediate(index, opcode)) {
1892     Emit(opcode | AddressingModeField::encode(kMode_MRI),
1893          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
1894   } else {
1895     InstructionOperand addr_reg = g.TempRegister();
1896     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
1897          g.UseRegister(index), g.UseRegister(base));
1898     // Emit desired load opcode, using temp addr_reg.
1899     Emit(opcode | AddressingModeField::encode(kMode_MRI),
1900          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1901   }
1902 }
1903 
VisitWord32AtomicStore(Node * node)1904 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
1905   MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
1906   MipsOperandGenerator g(this);
1907   Node* base = node->InputAt(0);
1908   Node* index = node->InputAt(1);
1909   Node* value = node->InputAt(2);
1910   ArchOpcode opcode = kArchNop;
1911   switch (rep) {
1912     case MachineRepresentation::kWord8:
1913       opcode = kWord32AtomicStoreWord8;
1914       break;
1915     case MachineRepresentation::kWord16:
1916       opcode = kWord32AtomicStoreWord16;
1917       break;
1918     case MachineRepresentation::kWord32:
1919       opcode = kWord32AtomicStoreWord32;
1920       break;
1921     default:
1922       UNREACHABLE();
1923   }
1924 
1925   if (g.CanBeImmediate(index, opcode)) {
1926     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1927          g.UseRegister(base), g.UseImmediate(index),
1928          g.UseRegisterOrImmediateZero(value));
1929   } else {
1930     InstructionOperand addr_reg = g.TempRegister();
1931     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
1932          g.UseRegister(index), g.UseRegister(base));
1933     // Emit desired store opcode, using temp addr_reg.
1934     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1935          addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1936   }
1937 }
1938 
VisitWord32AtomicExchange(Node * node)1939 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
1940   MipsOperandGenerator g(this);
1941   Node* base = node->InputAt(0);
1942   Node* index = node->InputAt(1);
1943   Node* value = node->InputAt(2);
1944   ArchOpcode opcode = kArchNop;
1945   MachineType type = AtomicOpType(node->op());
1946   if (type == MachineType::Int8()) {
1947     opcode = kWord32AtomicExchangeInt8;
1948   } else if (type == MachineType::Uint8()) {
1949     opcode = kWord32AtomicExchangeUint8;
1950   } else if (type == MachineType::Int16()) {
1951     opcode = kWord32AtomicExchangeInt16;
1952   } else if (type == MachineType::Uint16()) {
1953     opcode = kWord32AtomicExchangeUint16;
1954   } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
1955     opcode = kWord32AtomicExchangeWord32;
1956   } else {
1957     UNREACHABLE();
1958     return;
1959   }
1960 
1961   AddressingMode addressing_mode = kMode_MRI;
1962   InstructionOperand inputs[3];
1963   size_t input_count = 0;
1964   inputs[input_count++] = g.UseUniqueRegister(base);
1965   inputs[input_count++] = g.UseUniqueRegister(index);
1966   inputs[input_count++] = g.UseUniqueRegister(value);
1967   InstructionOperand outputs[1];
1968   outputs[0] = g.UseUniqueRegister(node);
1969   InstructionOperand temp[3];
1970   temp[0] = g.TempRegister();
1971   temp[1] = g.TempRegister();
1972   temp[2] = g.TempRegister();
1973   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1974   Emit(code, 1, outputs, input_count, inputs, 3, temp);
1975 }
1976 
VisitWord32AtomicCompareExchange(Node * node)1977 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
1978   MipsOperandGenerator g(this);
1979   Node* base = node->InputAt(0);
1980   Node* index = node->InputAt(1);
1981   Node* old_value = node->InputAt(2);
1982   Node* new_value = node->InputAt(3);
1983   ArchOpcode opcode = kArchNop;
1984   MachineType type = AtomicOpType(node->op());
1985   if (type == MachineType::Int8()) {
1986     opcode = kWord32AtomicCompareExchangeInt8;
1987   } else if (type == MachineType::Uint8()) {
1988     opcode = kWord32AtomicCompareExchangeUint8;
1989   } else if (type == MachineType::Int16()) {
1990     opcode = kWord32AtomicCompareExchangeInt16;
1991   } else if (type == MachineType::Uint16()) {
1992     opcode = kWord32AtomicCompareExchangeUint16;
1993   } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
1994     opcode = kWord32AtomicCompareExchangeWord32;
1995   } else {
1996     UNREACHABLE();
1997     return;
1998   }
1999 
2000   AddressingMode addressing_mode = kMode_MRI;
2001   InstructionOperand inputs[4];
2002   size_t input_count = 0;
2003   inputs[input_count++] = g.UseUniqueRegister(base);
2004   inputs[input_count++] = g.UseUniqueRegister(index);
2005   inputs[input_count++] = g.UseUniqueRegister(old_value);
2006   inputs[input_count++] = g.UseUniqueRegister(new_value);
2007   InstructionOperand outputs[1];
2008   outputs[0] = g.UseUniqueRegister(node);
2009   InstructionOperand temp[3];
2010   temp[0] = g.TempRegister();
2011   temp[1] = g.TempRegister();
2012   temp[2] = g.TempRegister();
2013   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2014   Emit(code, 1, outputs, input_count, inputs, 3, temp);
2015 }
2016 
VisitWord32AtomicBinaryOperation(Node * node,ArchOpcode int8_op,ArchOpcode uint8_op,ArchOpcode int16_op,ArchOpcode uint16_op,ArchOpcode word32_op)2017 void InstructionSelector::VisitWord32AtomicBinaryOperation(
2018     Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2019     ArchOpcode uint16_op, ArchOpcode word32_op) {
2020   MipsOperandGenerator g(this);
2021   Node* base = node->InputAt(0);
2022   Node* index = node->InputAt(1);
2023   Node* value = node->InputAt(2);
2024   ArchOpcode opcode = kArchNop;
2025   MachineType type = AtomicOpType(node->op());
2026   if (type == MachineType::Int8()) {
2027     opcode = int8_op;
2028   } else if (type == MachineType::Uint8()) {
2029     opcode = uint8_op;
2030   } else if (type == MachineType::Int16()) {
2031     opcode = int16_op;
2032   } else if (type == MachineType::Uint16()) {
2033     opcode = uint16_op;
2034   } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2035     opcode = word32_op;
2036   } else {
2037     UNREACHABLE();
2038     return;
2039   }
2040 
2041   AddressingMode addressing_mode = kMode_MRI;
2042   InstructionOperand inputs[3];
2043   size_t input_count = 0;
2044   inputs[input_count++] = g.UseUniqueRegister(base);
2045   inputs[input_count++] = g.UseUniqueRegister(index);
2046   inputs[input_count++] = g.UseUniqueRegister(value);
2047   InstructionOperand outputs[1];
2048   outputs[0] = g.UseUniqueRegister(node);
2049   InstructionOperand temps[4];
2050   temps[0] = g.TempRegister();
2051   temps[1] = g.TempRegister();
2052   temps[2] = g.TempRegister();
2053   temps[3] = g.TempRegister();
2054   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2055   Emit(code, 1, outputs, input_count, inputs, 4, temps);
2056 }
2057 
2058 #define VISIT_ATOMIC_BINOP(op)                                   \
2059   void InstructionSelector::VisitWord32Atomic##op(Node* node) {  \
2060     VisitWord32AtomicBinaryOperation(                            \
2061         node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
2062         kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16,     \
2063         kWord32Atomic##op##Word32);                              \
2064   }
2065 VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)2066 VISIT_ATOMIC_BINOP(Sub)
2067 VISIT_ATOMIC_BINOP(And)
2068 VISIT_ATOMIC_BINOP(Or)
2069 VISIT_ATOMIC_BINOP(Xor)
2070 #undef VISIT_ATOMIC_BINOP
2071 
2072 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
2073   UNREACHABLE();
2074 }
2075 
VisitInt64AbsWithOverflow(Node * node)2076 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
2077   UNREACHABLE();
2078 }
2079 
2080 #define SIMD_TYPE_LIST(V) \
2081   V(F32x4)                \
2082   V(I32x4)                \
2083   V(I16x8)                \
2084   V(I8x16)
2085 
2086 #define SIMD_UNOP_LIST(V)                                \
2087   V(F64x2Abs, kMipsF64x2Abs)                             \
2088   V(F64x2Neg, kMipsF64x2Neg)                             \
2089   V(F64x2Sqrt, kMipsF64x2Sqrt)                           \
2090   V(I64x2Neg, kMipsI64x2Neg)                             \
2091   V(F32x4SConvertI32x4, kMipsF32x4SConvertI32x4)         \
2092   V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4)         \
2093   V(F32x4Abs, kMipsF32x4Abs)                             \
2094   V(F32x4Neg, kMipsF32x4Neg)                             \
2095   V(F32x4Sqrt, kMipsF32x4Sqrt)                           \
2096   V(F32x4RecipApprox, kMipsF32x4RecipApprox)             \
2097   V(F32x4RecipSqrtApprox, kMipsF32x4RecipSqrtApprox)     \
2098   V(I32x4SConvertF32x4, kMipsI32x4SConvertF32x4)         \
2099   V(I32x4UConvertF32x4, kMipsI32x4UConvertF32x4)         \
2100   V(I32x4Neg, kMipsI32x4Neg)                             \
2101   V(I32x4SConvertI16x8Low, kMipsI32x4SConvertI16x8Low)   \
2102   V(I32x4SConvertI16x8High, kMipsI32x4SConvertI16x8High) \
2103   V(I32x4UConvertI16x8Low, kMipsI32x4UConvertI16x8Low)   \
2104   V(I32x4UConvertI16x8High, kMipsI32x4UConvertI16x8High) \
2105   V(I16x8Neg, kMipsI16x8Neg)                             \
2106   V(I16x8SConvertI8x16Low, kMipsI16x8SConvertI8x16Low)   \
2107   V(I16x8SConvertI8x16High, kMipsI16x8SConvertI8x16High) \
2108   V(I16x8UConvertI8x16Low, kMipsI16x8UConvertI8x16Low)   \
2109   V(I16x8UConvertI8x16High, kMipsI16x8UConvertI8x16High) \
2110   V(I8x16Neg, kMipsI8x16Neg)                             \
2111   V(S128Not, kMipsS128Not)                               \
2112   V(S1x4AnyTrue, kMipsS1x4AnyTrue)                       \
2113   V(S1x4AllTrue, kMipsS1x4AllTrue)                       \
2114   V(S1x8AnyTrue, kMipsS1x8AnyTrue)                       \
2115   V(S1x8AllTrue, kMipsS1x8AllTrue)                       \
2116   V(S1x16AnyTrue, kMipsS1x16AnyTrue)                     \
2117   V(S1x16AllTrue, kMipsS1x16AllTrue)
2118 
2119 #define SIMD_SHIFT_OP_LIST(V) \
2120   V(I64x2Shl)                 \
2121   V(I64x2ShrS)                \
2122   V(I64x2ShrU)                \
2123   V(I32x4Shl)                 \
2124   V(I32x4ShrS)                \
2125   V(I32x4ShrU)                \
2126   V(I16x8Shl)                 \
2127   V(I16x8ShrS)                \
2128   V(I16x8ShrU)                \
2129   V(I8x16Shl)                 \
2130   V(I8x16ShrS)                \
2131   V(I8x16ShrU)
2132 
2133 #define SIMD_BINOP_LIST(V)                             \
2134   V(F64x2Add, kMipsF64x2Add)                           \
2135   V(F64x2Sub, kMipsF64x2Sub)                           \
2136   V(F64x2Mul, kMipsF64x2Mul)                           \
2137   V(F64x2Div, kMipsF64x2Div)                           \
2138   V(F64x2Min, kMipsF64x2Min)                           \
2139   V(F64x2Max, kMipsF64x2Max)                           \
2140   V(F64x2Eq, kMipsF64x2Eq)                             \
2141   V(F64x2Ne, kMipsF64x2Ne)                             \
2142   V(F64x2Lt, kMipsF64x2Lt)                             \
2143   V(F64x2Le, kMipsF64x2Le)                             \
2144   V(I64x2Add, kMipsI64x2Add)                           \
2145   V(I64x2Sub, kMipsI64x2Sub)                           \
2146   V(I64x2Mul, kMipsI64x2Mul)                           \
2147   V(F32x4Add, kMipsF32x4Add)                           \
2148   V(F32x4AddHoriz, kMipsF32x4AddHoriz)                 \
2149   V(F32x4Sub, kMipsF32x4Sub)                           \
2150   V(F32x4Mul, kMipsF32x4Mul)                           \
2151   V(F32x4Div, kMipsF32x4Div)                           \
2152   V(F32x4Max, kMipsF32x4Max)                           \
2153   V(F32x4Min, kMipsF32x4Min)                           \
2154   V(F32x4Eq, kMipsF32x4Eq)                             \
2155   V(F32x4Ne, kMipsF32x4Ne)                             \
2156   V(F32x4Lt, kMipsF32x4Lt)                             \
2157   V(F32x4Le, kMipsF32x4Le)                             \
2158   V(I32x4Add, kMipsI32x4Add)                           \
2159   V(I32x4AddHoriz, kMipsI32x4AddHoriz)                 \
2160   V(I32x4Sub, kMipsI32x4Sub)                           \
2161   V(I32x4Mul, kMipsI32x4Mul)                           \
2162   V(I32x4MaxS, kMipsI32x4MaxS)                         \
2163   V(I32x4MinS, kMipsI32x4MinS)                         \
2164   V(I32x4MaxU, kMipsI32x4MaxU)                         \
2165   V(I32x4MinU, kMipsI32x4MinU)                         \
2166   V(I32x4Eq, kMipsI32x4Eq)                             \
2167   V(I32x4Ne, kMipsI32x4Ne)                             \
2168   V(I32x4GtS, kMipsI32x4GtS)                           \
2169   V(I32x4GeS, kMipsI32x4GeS)                           \
2170   V(I32x4GtU, kMipsI32x4GtU)                           \
2171   V(I32x4GeU, kMipsI32x4GeU)                           \
2172   V(I32x4Abs, kMipsI32x4Abs)                           \
2173   V(I16x8Add, kMipsI16x8Add)                           \
2174   V(I16x8AddSaturateS, kMipsI16x8AddSaturateS)         \
2175   V(I16x8AddSaturateU, kMipsI16x8AddSaturateU)         \
2176   V(I16x8AddHoriz, kMipsI16x8AddHoriz)                 \
2177   V(I16x8Sub, kMipsI16x8Sub)                           \
2178   V(I16x8SubSaturateS, kMipsI16x8SubSaturateS)         \
2179   V(I16x8SubSaturateU, kMipsI16x8SubSaturateU)         \
2180   V(I16x8Mul, kMipsI16x8Mul)                           \
2181   V(I16x8MaxS, kMipsI16x8MaxS)                         \
2182   V(I16x8MinS, kMipsI16x8MinS)                         \
2183   V(I16x8MaxU, kMipsI16x8MaxU)                         \
2184   V(I16x8MinU, kMipsI16x8MinU)                         \
2185   V(I16x8Eq, kMipsI16x8Eq)                             \
2186   V(I16x8Ne, kMipsI16x8Ne)                             \
2187   V(I16x8GtS, kMipsI16x8GtS)                           \
2188   V(I16x8GeS, kMipsI16x8GeS)                           \
2189   V(I16x8GtU, kMipsI16x8GtU)                           \
2190   V(I16x8GeU, kMipsI16x8GeU)                           \
2191   V(I16x8SConvertI32x4, kMipsI16x8SConvertI32x4)       \
2192   V(I16x8UConvertI32x4, kMipsI16x8UConvertI32x4)       \
2193   V(I16x8RoundingAverageU, kMipsI16x8RoundingAverageU) \
2194   V(I16x8Abs, kMipsI16x8Abs)                           \
2195   V(I8x16Add, kMipsI8x16Add)                           \
2196   V(I8x16AddSaturateS, kMipsI8x16AddSaturateS)         \
2197   V(I8x16AddSaturateU, kMipsI8x16AddSaturateU)         \
2198   V(I8x16Sub, kMipsI8x16Sub)                           \
2199   V(I8x16SubSaturateS, kMipsI8x16SubSaturateS)         \
2200   V(I8x16SubSaturateU, kMipsI8x16SubSaturateU)         \
2201   V(I8x16Mul, kMipsI8x16Mul)                           \
2202   V(I8x16MaxS, kMipsI8x16MaxS)                         \
2203   V(I8x16MinS, kMipsI8x16MinS)                         \
2204   V(I8x16MaxU, kMipsI8x16MaxU)                         \
2205   V(I8x16MinU, kMipsI8x16MinU)                         \
2206   V(I8x16Eq, kMipsI8x16Eq)                             \
2207   V(I8x16Ne, kMipsI8x16Ne)                             \
2208   V(I8x16GtS, kMipsI8x16GtS)                           \
2209   V(I8x16GeS, kMipsI8x16GeS)                           \
2210   V(I8x16GtU, kMipsI8x16GtU)                           \
2211   V(I8x16GeU, kMipsI8x16GeU)                           \
2212   V(I8x16RoundingAverageU, kMipsI8x16RoundingAverageU) \
2213   V(I8x16SConvertI16x8, kMipsI8x16SConvertI16x8)       \
2214   V(I8x16UConvertI16x8, kMipsI8x16UConvertI16x8)       \
2215   V(I8x16Abs, kMipsI8x16Abs)                           \
2216   V(S128And, kMipsS128And)                             \
2217   V(S128Or, kMipsS128Or)                               \
2218   V(S128Xor, kMipsS128Xor)                             \
2219   V(S128AndNot, kMipsS128AndNot)
2220 
VisitS128Zero(Node * node)2221 void InstructionSelector::VisitS128Zero(Node* node) {
2222   MipsOperandGenerator g(this);
2223   Emit(kMipsS128Zero, g.DefineSameAsFirst(node));
2224 }
2225 
2226 #define SIMD_VISIT_SPLAT(Type)                               \
2227   void InstructionSelector::Visit##Type##Splat(Node* node) { \
2228     VisitRR(this, kMips##Type##Splat, node);                 \
2229   }
2230 SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
SIMD_VISIT_SPLAT(F64x2)2231 SIMD_VISIT_SPLAT(F64x2)
2232 #undef SIMD_VISIT_SPLAT
2233 
2234 #define SIMD_VISIT_EXTRACT_LANE(Type, Sign)                              \
2235   void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
2236     VisitRRI(this, kMips##Type##ExtractLane##Sign, node);                \
2237   }
2238 SIMD_VISIT_EXTRACT_LANE(F64x2, )
2239 SIMD_VISIT_EXTRACT_LANE(F32x4, )
2240 SIMD_VISIT_EXTRACT_LANE(I32x4, )
2241 SIMD_VISIT_EXTRACT_LANE(I16x8, U)
2242 SIMD_VISIT_EXTRACT_LANE(I16x8, S)
2243 SIMD_VISIT_EXTRACT_LANE(I8x16, U)
2244 SIMD_VISIT_EXTRACT_LANE(I8x16, S)
2245 #undef SIMD_VISIT_EXTRACT_LANE
2246 
2247 #define SIMD_VISIT_REPLACE_LANE(Type)                              \
2248   void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
2249     VisitRRIR(this, kMips##Type##ReplaceLane, node);               \
2250   }
2251 SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
2252 SIMD_VISIT_REPLACE_LANE(F64x2)
2253 #undef SIMD_VISIT_REPLACE_LANE
2254 
2255 #define SIMD_VISIT_UNOP(Name, instruction)            \
2256   void InstructionSelector::Visit##Name(Node* node) { \
2257     VisitRR(this, instruction, node);                 \
2258   }
2259 SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
2260 #undef SIMD_VISIT_UNOP
2261 
2262 #define SIMD_VISIT_SHIFT_OP(Name)                     \
2263   void InstructionSelector::Visit##Name(Node* node) { \
2264     VisitRRI(this, kMips##Name, node);                \
2265   }
2266 SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
2267 #undef SIMD_VISIT_SHIFT_OP
2268 
2269 #define SIMD_VISIT_BINOP(Name, instruction)           \
2270   void InstructionSelector::Visit##Name(Node* node) { \
2271     VisitRRR(this, instruction, node);                \
2272   }
2273 SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
2274 #undef SIMD_VISIT_BINOP
2275 
2276 void InstructionSelector::VisitS128Select(Node* node) {
2277   VisitRRRR(this, kMipsS128Select, node);
2278 }
2279 
2280 namespace {
2281 
2282 struct ShuffleEntry {
2283   uint8_t shuffle[kSimd128Size];
2284   ArchOpcode opcode;
2285 };
2286 
2287 static const ShuffleEntry arch_shuffles[] = {
2288     {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
2289      kMipsS32x4InterleaveRight},
2290     {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
2291      kMipsS32x4InterleaveLeft},
2292     {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
2293      kMipsS32x4PackEven},
2294     {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
2295      kMipsS32x4PackOdd},
2296     {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
2297      kMipsS32x4InterleaveEven},
2298     {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
2299      kMipsS32x4InterleaveOdd},
2300 
2301     {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
2302      kMipsS16x8InterleaveRight},
2303     {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
2304      kMipsS16x8InterleaveLeft},
2305     {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
2306      kMipsS16x8PackEven},
2307     {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
2308      kMipsS16x8PackOdd},
2309     {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
2310      kMipsS16x8InterleaveEven},
2311     {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
2312      kMipsS16x8InterleaveOdd},
2313     {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}, kMipsS16x4Reverse},
2314     {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, kMipsS16x2Reverse},
2315 
2316     {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
2317      kMipsS8x16InterleaveRight},
2318     {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
2319      kMipsS8x16InterleaveLeft},
2320     {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
2321      kMipsS8x16PackEven},
2322     {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
2323      kMipsS8x16PackOdd},
2324     {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
2325      kMipsS8x16InterleaveEven},
2326     {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
2327      kMipsS8x16InterleaveOdd},
2328     {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kMipsS8x8Reverse},
2329     {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kMipsS8x4Reverse},
2330     {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kMipsS8x2Reverse}};
2331 
TryMatchArchShuffle(const uint8_t * shuffle,const ShuffleEntry * table,size_t num_entries,bool is_swizzle,ArchOpcode * opcode)2332 bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
2333                          size_t num_entries, bool is_swizzle,
2334                          ArchOpcode* opcode) {
2335   uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
2336   for (size_t i = 0; i < num_entries; ++i) {
2337     const ShuffleEntry& entry = table[i];
2338     int j = 0;
2339     for (; j < kSimd128Size; ++j) {
2340       if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
2341         break;
2342       }
2343     }
2344     if (j == kSimd128Size) {
2345       *opcode = entry.opcode;
2346       return true;
2347     }
2348   }
2349   return false;
2350 }
2351 
2352 }  // namespace
2353 
VisitS8x16Shuffle(Node * node)2354 void InstructionSelector::VisitS8x16Shuffle(Node* node) {
2355   uint8_t shuffle[kSimd128Size];
2356   bool is_swizzle;
2357   CanonicalizeShuffle(node, shuffle, &is_swizzle);
2358   uint8_t shuffle32x4[4];
2359   ArchOpcode opcode;
2360   if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
2361                           is_swizzle, &opcode)) {
2362     VisitRRR(this, opcode, node);
2363     return;
2364   }
2365   Node* input0 = node->InputAt(0);
2366   Node* input1 = node->InputAt(1);
2367   uint8_t offset;
2368   MipsOperandGenerator g(this);
2369   if (TryMatchConcat(shuffle, &offset)) {
2370     Emit(kMipsS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
2371          g.UseRegister(input0), g.UseImmediate(offset));
2372     return;
2373   }
2374   if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
2375     Emit(kMipsS32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
2376          g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle32x4)));
2377     return;
2378   }
2379   Emit(kMipsS8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
2380        g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle)),
2381        g.UseImmediate(Pack4Lanes(shuffle + 4)),
2382        g.UseImmediate(Pack4Lanes(shuffle + 8)),
2383        g.UseImmediate(Pack4Lanes(shuffle + 12)));
2384 }
2385 
VisitS8x16Swizzle(Node * node)2386 void InstructionSelector::VisitS8x16Swizzle(Node* node) {
2387   MipsOperandGenerator g(this);
2388   InstructionOperand temps[] = {g.TempSimd128Register()};
2389   // We don't want input 0 or input 1 to be the same as output, since we will
2390   // modify output before do the calculation.
2391   Emit(kMipsS8x16Swizzle, g.DefineAsRegister(node),
2392        g.UseUniqueRegister(node->InputAt(0)),
2393        g.UseUniqueRegister(node->InputAt(1)),
2394        arraysize(temps), temps);
2395 }
2396 
VisitSignExtendWord8ToInt32(Node * node)2397 void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
2398   MipsOperandGenerator g(this);
2399   Emit(kMipsSeb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2400 }
2401 
VisitSignExtendWord16ToInt32(Node * node)2402 void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
2403   MipsOperandGenerator g(this);
2404   Emit(kMipsSeh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2405 }
2406 
2407 // static
2408 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()2409 InstructionSelector::SupportedMachineOperatorFlags() {
2410   MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
2411   if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2412       IsFp64Mode()) {
2413     flags |= MachineOperatorBuilder::kFloat64RoundDown |
2414              MachineOperatorBuilder::kFloat64RoundUp |
2415              MachineOperatorBuilder::kFloat64RoundTruncate |
2416              MachineOperatorBuilder::kFloat64RoundTiesEven;
2417   }
2418 
2419   return flags | MachineOperatorBuilder::kWord32Ctz |
2420          MachineOperatorBuilder::kWord32Popcnt |
2421          MachineOperatorBuilder::kInt32DivIsSafe |
2422          MachineOperatorBuilder::kUint32DivIsSafe |
2423          MachineOperatorBuilder::kWord32ShiftIsSafe |
2424          MachineOperatorBuilder::kFloat32RoundDown |
2425          MachineOperatorBuilder::kFloat32RoundUp |
2426          MachineOperatorBuilder::kFloat32RoundTruncate |
2427          MachineOperatorBuilder::kFloat32RoundTiesEven;
2428 }
2429 
2430 // static
2431 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()2432 InstructionSelector::AlignmentRequirements() {
2433   if (IsMipsArchVariant(kMips32r6)) {
2434     return MachineOperatorBuilder::AlignmentRequirements::
2435         FullUnalignedAccessSupport();
2436   } else {
2437     DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2438            IsMipsArchVariant(kMips32r2));
2439     return MachineOperatorBuilder::AlignmentRequirements::
2440         NoUnalignedAccessSupport();
2441   }
2442 }
2443 
2444 #undef SIMD_BINOP_LIST
2445 #undef SIMD_SHIFT_OP_LIST
2446 #undef SIMD_UNOP_LIST
2447 #undef SIMD_TYPE_LIST
2448 #undef TRACE_UNIMPL
2449 #undef TRACE
2450 
2451 }  // namespace compiler
2452 }  // namespace internal
2453 }  // namespace v8
2454