1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
6 #define V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
7 
8 #include <map>
9 
10 #include "src/codegen/cpu-features.h"
11 #include "src/common/globals.h"
12 #include "src/compiler/backend/instruction-scheduler.h"
13 #include "src/compiler/backend/instruction.h"
14 #include "src/compiler/common-operator.h"
15 #include "src/compiler/feedback-source.h"
16 #include "src/compiler/linkage.h"
17 #include "src/compiler/machine-operator.h"
18 #include "src/compiler/node.h"
19 #include "src/zone/zone-containers.h"
20 
21 #if V8_ENABLE_WEBASSEMBLY
22 #include "src/wasm/simd-shuffle.h"
23 #endif  // V8_ENABLE_WEBASSEMBLY
24 
25 namespace v8 {
26 namespace internal {
27 
28 class TickCounter;
29 
30 namespace compiler {
31 
32 // Forward declarations.
33 class BasicBlock;
34 struct CallBuffer;  // TODO(bmeurer): Remove this.
35 class Linkage;
36 class OperandGenerator;
37 class SwitchInfo;
38 class StateObjectDeduplicator;
39 
40 // The flags continuation is a way to combine a branch or a materialization
41 // of a boolean value with an instruction that sets the flags register.
42 // The whole instruction is treated as a unit by the register allocator, and
43 // thus no spills or moves can be introduced between the flags-setting
44 // instruction and the branch or set it should be combined with.
45 class FlagsContinuation final {
46  public:
FlagsContinuation()47   FlagsContinuation() : mode_(kFlags_none) {}
48 
49   // Creates a new flags continuation from the given condition and true/false
50   // blocks.
ForBranch(FlagsCondition condition,BasicBlock * true_block,BasicBlock * false_block)51   static FlagsContinuation ForBranch(FlagsCondition condition,
52                                      BasicBlock* true_block,
53                                      BasicBlock* false_block) {
54     return FlagsContinuation(kFlags_branch, condition, true_block, false_block);
55   }
56 
57   // Creates a new flags continuation for an eager deoptimization exit.
58   static FlagsContinuation ForDeoptimize(
59       FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
60       NodeId node_id, FeedbackSource const& feedback, Node* frame_state,
61       InstructionOperand* extra_args = nullptr, int extra_args_count = 0) {
62     return FlagsContinuation(kFlags_deoptimize, condition, kind, reason,
63                              node_id, feedback, frame_state, extra_args,
64                              extra_args_count);
65   }
66 
67   // Creates a new flags continuation for a boolean value.
ForSet(FlagsCondition condition,Node * result)68   static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
69     return FlagsContinuation(condition, result);
70   }
71 
72   // Creates a new flags continuation for a wasm trap.
ForTrap(FlagsCondition condition,TrapId trap_id,Node * result)73   static FlagsContinuation ForTrap(FlagsCondition condition, TrapId trap_id,
74                                    Node* result) {
75     return FlagsContinuation(condition, trap_id, result);
76   }
77 
ForSelect(FlagsCondition condition,Node * result,Node * true_value,Node * false_value)78   static FlagsContinuation ForSelect(FlagsCondition condition, Node* result,
79                                      Node* true_value, Node* false_value) {
80     return FlagsContinuation(condition, result, true_value, false_value);
81   }
82 
IsNone()83   bool IsNone() const { return mode_ == kFlags_none; }
IsBranch()84   bool IsBranch() const { return mode_ == kFlags_branch; }
IsDeoptimize()85   bool IsDeoptimize() const { return mode_ == kFlags_deoptimize; }
IsSet()86   bool IsSet() const { return mode_ == kFlags_set; }
IsTrap()87   bool IsTrap() const { return mode_ == kFlags_trap; }
IsSelect()88   bool IsSelect() const { return mode_ == kFlags_select; }
condition()89   FlagsCondition condition() const {
90     DCHECK(!IsNone());
91     return condition_;
92   }
kind()93   DeoptimizeKind kind() const {
94     DCHECK(IsDeoptimize());
95     return kind_;
96   }
reason()97   DeoptimizeReason reason() const {
98     DCHECK(IsDeoptimize());
99     return reason_;
100   }
node_id()101   NodeId node_id() const {
102     DCHECK(IsDeoptimize());
103     return node_id_;
104   }
feedback()105   FeedbackSource const& feedback() const {
106     DCHECK(IsDeoptimize());
107     return feedback_;
108   }
frame_state()109   Node* frame_state() const {
110     DCHECK(IsDeoptimize());
111     return frame_state_or_result_;
112   }
has_extra_args()113   bool has_extra_args() const {
114     DCHECK(IsDeoptimize());
115     return extra_args_ != nullptr;
116   }
extra_args()117   const InstructionOperand* extra_args() const {
118     DCHECK(has_extra_args());
119     return extra_args_;
120   }
extra_args_count()121   int extra_args_count() const {
122     DCHECK(has_extra_args());
123     return extra_args_count_;
124   }
result()125   Node* result() const {
126     DCHECK(IsSet() || IsSelect());
127     return frame_state_or_result_;
128   }
trap_id()129   TrapId trap_id() const {
130     DCHECK(IsTrap());
131     return trap_id_;
132   }
true_block()133   BasicBlock* true_block() const {
134     DCHECK(IsBranch());
135     return true_block_;
136   }
false_block()137   BasicBlock* false_block() const {
138     DCHECK(IsBranch());
139     return false_block_;
140   }
true_value()141   Node* true_value() const {
142     DCHECK(IsSelect());
143     return true_value_;
144   }
false_value()145   Node* false_value() const {
146     DCHECK(IsSelect());
147     return false_value_;
148   }
149 
Negate()150   void Negate() {
151     DCHECK(!IsNone());
152     condition_ = NegateFlagsCondition(condition_);
153   }
154 
Commute()155   void Commute() {
156     DCHECK(!IsNone());
157     condition_ = CommuteFlagsCondition(condition_);
158   }
159 
Overwrite(FlagsCondition condition)160   void Overwrite(FlagsCondition condition) { condition_ = condition; }
161 
OverwriteAndNegateIfEqual(FlagsCondition condition)162   void OverwriteAndNegateIfEqual(FlagsCondition condition) {
163     DCHECK(condition_ == kEqual || condition_ == kNotEqual);
164     bool negate = condition_ == kEqual;
165     condition_ = condition;
166     if (negate) Negate();
167   }
168 
OverwriteUnsignedIfSigned()169   void OverwriteUnsignedIfSigned() {
170     switch (condition_) {
171       case kSignedLessThan:
172         condition_ = kUnsignedLessThan;
173         break;
174       case kSignedLessThanOrEqual:
175         condition_ = kUnsignedLessThanOrEqual;
176         break;
177       case kSignedGreaterThan:
178         condition_ = kUnsignedGreaterThan;
179         break;
180       case kSignedGreaterThanOrEqual:
181         condition_ = kUnsignedGreaterThanOrEqual;
182         break;
183       default:
184         break;
185     }
186   }
187 
188   // Encodes this flags continuation into the given opcode.
Encode(InstructionCode opcode)189   InstructionCode Encode(InstructionCode opcode) {
190     opcode |= FlagsModeField::encode(mode_);
191     if (mode_ != kFlags_none) {
192       opcode |= FlagsConditionField::encode(condition_);
193     }
194     return opcode;
195   }
196 
197  private:
FlagsContinuation(FlagsMode mode,FlagsCondition condition,BasicBlock * true_block,BasicBlock * false_block)198   FlagsContinuation(FlagsMode mode, FlagsCondition condition,
199                     BasicBlock* true_block, BasicBlock* false_block)
200       : mode_(mode),
201         condition_(condition),
202         true_block_(true_block),
203         false_block_(false_block) {
204     DCHECK(mode == kFlags_branch);
205     DCHECK_NOT_NULL(true_block);
206     DCHECK_NOT_NULL(false_block);
207   }
208 
FlagsContinuation(FlagsMode mode,FlagsCondition condition,DeoptimizeKind kind,DeoptimizeReason reason,NodeId node_id,FeedbackSource const & feedback,Node * frame_state,InstructionOperand * extra_args,int extra_args_count)209   FlagsContinuation(FlagsMode mode, FlagsCondition condition,
210                     DeoptimizeKind kind, DeoptimizeReason reason,
211                     NodeId node_id, FeedbackSource const& feedback,
212                     Node* frame_state, InstructionOperand* extra_args,
213                     int extra_args_count)
214       : mode_(mode),
215         condition_(condition),
216         kind_(kind),
217         reason_(reason),
218         node_id_(node_id),
219         feedback_(feedback),
220         frame_state_or_result_(frame_state),
221         extra_args_(extra_args),
222         extra_args_count_(extra_args_count) {
223     DCHECK(mode == kFlags_deoptimize);
224     DCHECK_NOT_NULL(frame_state);
225   }
226 
FlagsContinuation(FlagsCondition condition,Node * result)227   FlagsContinuation(FlagsCondition condition, Node* result)
228       : mode_(kFlags_set),
229         condition_(condition),
230         frame_state_or_result_(result) {
231     DCHECK_NOT_NULL(result);
232   }
233 
FlagsContinuation(FlagsCondition condition,TrapId trap_id,Node * result)234   FlagsContinuation(FlagsCondition condition, TrapId trap_id, Node* result)
235       : mode_(kFlags_trap),
236         condition_(condition),
237         frame_state_or_result_(result),
238         trap_id_(trap_id) {
239     DCHECK_NOT_NULL(result);
240   }
241 
FlagsContinuation(FlagsCondition condition,Node * result,Node * true_value,Node * false_value)242   FlagsContinuation(FlagsCondition condition, Node* result, Node* true_value,
243                     Node* false_value)
244       : mode_(kFlags_select),
245         condition_(condition),
246         frame_state_or_result_(result),
247         true_value_(true_value),
248         false_value_(false_value) {
249     DCHECK_NOT_NULL(result);
250     DCHECK_NOT_NULL(true_value);
251     DCHECK_NOT_NULL(false_value);
252   }
253 
254   FlagsMode const mode_;
255   FlagsCondition condition_;
256   DeoptimizeKind kind_;             // Only valid if mode_ == kFlags_deoptimize*
257   DeoptimizeReason reason_;         // Only valid if mode_ == kFlags_deoptimize*
258   NodeId node_id_;                  // Only valid if mode_ == kFlags_deoptimize*
259   FeedbackSource feedback_;         // Only valid if mode_ == kFlags_deoptimize*
260   Node* frame_state_or_result_;     // Only valid if mode_ == kFlags_deoptimize*
261                                     // or mode_ == kFlags_set.
262   InstructionOperand* extra_args_;  // Only valid if mode_ == kFlags_deoptimize*
263   int extra_args_count_;            // Only valid if mode_ == kFlags_deoptimize*
264   BasicBlock* true_block_;          // Only valid if mode_ == kFlags_branch*.
265   BasicBlock* false_block_;         // Only valid if mode_ == kFlags_branch*.
266   TrapId trap_id_;                  // Only valid if mode_ == kFlags_trap.
267   Node* true_value_;                // Only valid if mode_ == kFlags_select.
268   Node* false_value_;               // Only valid if mode_ == kFlags_select.
269 };
270 
271 // This struct connects nodes of parameters which are going to be pushed on the
272 // call stack with their parameter index in the call descriptor of the callee.
273 struct PushParameter {
274   PushParameter(Node* n = nullptr,
275                 LinkageLocation l = LinkageLocation::ForAnyRegister())
nodePushParameter276       : node(n), location(l) {}
277 
278   Node* node;
279   LinkageLocation location;
280 };
281 
282 enum class FrameStateInputKind { kAny, kStackSlot };
283 
284 // Instruction selection generates an InstructionSequence for a given Schedule.
285 class V8_EXPORT_PRIVATE InstructionSelector final {
286  public:
287   // Forward declarations.
288   class Features;
289 
290   enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions };
291   enum EnableScheduling { kDisableScheduling, kEnableScheduling };
292   enum EnableRootsRelativeAddressing {
293     kDisableRootsRelativeAddressing,
294     kEnableRootsRelativeAddressing
295   };
296   enum EnableSwitchJumpTable {
297     kDisableSwitchJumpTable,
298     kEnableSwitchJumpTable
299   };
300   enum EnableTraceTurboJson { kDisableTraceTurboJson, kEnableTraceTurboJson };
301 
302   InstructionSelector(
303       Zone* zone, size_t node_count, Linkage* linkage,
304       InstructionSequence* sequence, Schedule* schedule,
305       SourcePositionTable* source_positions, Frame* frame,
306       EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
307       JSHeapBroker* broker, size_t* max_unoptimized_frame_height,
308       size_t* max_pushed_argument_count,
309       SourcePositionMode source_position_mode = kCallSourcePositions,
310       Features features = SupportedFeatures(),
311       EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
312                                                ? kEnableScheduling
313                                                : kDisableScheduling,
314       EnableRootsRelativeAddressing enable_roots_relative_addressing =
315           kDisableRootsRelativeAddressing,
316       EnableTraceTurboJson trace_turbo = kDisableTraceTurboJson);
317 
318   // Visit code for the entire graph with the included schedule.
319   bool SelectInstructions();
320 
321   void StartBlock(RpoNumber rpo);
322   void EndBlock(RpoNumber rpo);
323   void AddInstruction(Instruction* instr);
324   void AddTerminator(Instruction* instr);
325 
326   // ===========================================================================
327   // ============= Architecture-independent code emission methods. =============
328   // ===========================================================================
329 
330   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
331                     size_t temp_count = 0, InstructionOperand* temps = nullptr);
332   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
333                     InstructionOperand a, size_t temp_count = 0,
334                     InstructionOperand* temps = nullptr);
335   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
336                     InstructionOperand a, InstructionOperand b,
337                     size_t temp_count = 0, InstructionOperand* temps = nullptr);
338   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
339                     InstructionOperand a, InstructionOperand b,
340                     InstructionOperand c, size_t temp_count = 0,
341                     InstructionOperand* temps = nullptr);
342   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
343                     InstructionOperand a, InstructionOperand b,
344                     InstructionOperand c, InstructionOperand d,
345                     size_t temp_count = 0, InstructionOperand* temps = nullptr);
346   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
347                     InstructionOperand a, InstructionOperand b,
348                     InstructionOperand c, InstructionOperand d,
349                     InstructionOperand e, size_t temp_count = 0,
350                     InstructionOperand* temps = nullptr);
351   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
352                     InstructionOperand a, InstructionOperand b,
353                     InstructionOperand c, InstructionOperand d,
354                     InstructionOperand e, InstructionOperand f,
355                     size_t temp_count = 0, InstructionOperand* temps = nullptr);
356   Instruction* Emit(InstructionCode opcode, size_t output_count,
357                     InstructionOperand* outputs, size_t input_count,
358                     InstructionOperand* inputs, size_t temp_count = 0,
359                     InstructionOperand* temps = nullptr);
360   Instruction* Emit(Instruction* instr);
361 
362   // [0-3] operand instructions with no output, uses labels for true and false
363   // blocks of the continuation.
364   Instruction* EmitWithContinuation(InstructionCode opcode,
365                                     FlagsContinuation* cont);
366   Instruction* EmitWithContinuation(InstructionCode opcode,
367                                     InstructionOperand a,
368                                     FlagsContinuation* cont);
369   Instruction* EmitWithContinuation(InstructionCode opcode,
370                                     InstructionOperand a, InstructionOperand b,
371                                     FlagsContinuation* cont);
372   Instruction* EmitWithContinuation(InstructionCode opcode,
373                                     InstructionOperand a, InstructionOperand b,
374                                     InstructionOperand c,
375                                     FlagsContinuation* cont);
376   Instruction* EmitWithContinuation(InstructionCode opcode, size_t output_count,
377                                     InstructionOperand* outputs,
378                                     size_t input_count,
379                                     InstructionOperand* inputs,
380                                     FlagsContinuation* cont);
381   Instruction* EmitWithContinuation(
382       InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
383       size_t input_count, InstructionOperand* inputs, size_t temp_count,
384       InstructionOperand* temps, FlagsContinuation* cont);
385 
386   void EmitIdentity(Node* node);
387 
388   // ===========================================================================
389   // ============== Architecture-independent CPU feature methods. ==============
390   // ===========================================================================
391 
392   class Features final {
393    public:
Features()394     Features() : bits_(0) {}
Features(unsigned bits)395     explicit Features(unsigned bits) : bits_(bits) {}
Features(CpuFeature f)396     explicit Features(CpuFeature f) : bits_(1u << f) {}
Features(CpuFeature f1,CpuFeature f2)397     Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {}
398 
Contains(CpuFeature f)399     bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); }
400 
401    private:
402     unsigned bits_;
403   };
404 
IsSupported(CpuFeature feature)405   bool IsSupported(CpuFeature feature) const {
406     return features_.Contains(feature);
407   }
408 
409   // Returns the features supported on the target platform.
SupportedFeatures()410   static Features SupportedFeatures() {
411     return Features(CpuFeatures::SupportedFeatures());
412   }
413 
414   // TODO(sigurds) This should take a CpuFeatures argument.
415   static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags();
416 
417   static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
418 
419   // ===========================================================================
420   // ============ Architecture-independent graph covering methods. =============
421   // ===========================================================================
422 
423   // Used in pattern matching during code generation.
424   // Check if {node} can be covered while generating code for the current
425   // instruction. A node can be covered if the {user} of the node has the only
426   // edge and the two are in the same basic block.
427   // Before fusing two instructions a and b, it is useful to check that
428   // CanCover(a, b) holds. If this is not the case, code for b must still be
429   // generated for other users, and fusing is unlikely to improve performance.
430   bool CanCover(Node* user, Node* node) const;
431   // CanCover is not transitive.  The counter example are Nodes A,B,C such that
432   // CanCover(A, B) and CanCover(B,C) and B is pure: The the effect level of A
433   // and B might differ. CanCoverTransitively does the additional checks.
434   bool CanCoverTransitively(Node* user, Node* node, Node* node_input) const;
435 
436   // Used in pattern matching during code generation.
437   // This function checks that {node} and {user} are in the same basic block,
438   // and that {user} is the only user of {node} in this basic block.  This
439   // check guarantees that there are no users of {node} scheduled between
440   // {node} and {user}, and thus we can select a single instruction for both
441   // nodes, if such an instruction exists. This check can be used for example
442   // when selecting instructions for:
443   //   n = Int32Add(a, b)
444   //   c = Word32Compare(n, 0, cond)
445   //   Branch(c, true_label, false_label)
446   // Here we can generate a flag-setting add instruction, even if the add has
447   // uses in other basic blocks, since the flag-setting add instruction will
448   // still generate the result of the addition and not just set the flags.
449   // However, if we had uses of the add in the same basic block, we could have:
450   //   n = Int32Add(a, b)
451   //   o = OtherOp(n, ...)
452   //   c = Word32Compare(n, 0, cond)
453   //   Branch(c, true_label, false_label)
454   // where we cannot select the add and the compare together.  If we were to
455   // select a flag-setting add instruction for Word32Compare and Int32Add while
456   // visiting Word32Compare, we would then have to select an instruction for
457   // OtherOp *afterwards*, which means we would attempt to use the result of
458   // the add before we have defined it.
459   bool IsOnlyUserOfNodeInSameBlock(Node* user, Node* node) const;
460 
461   // Checks if {node} was already defined, and therefore code was already
462   // generated for it.
463   bool IsDefined(Node* node) const;
464 
465   // Checks if {node} has any uses, and therefore code has to be generated for
466   // it.
467   bool IsUsed(Node* node) const;
468 
469   // Checks if {node} is currently live.
IsLive(Node * node)470   bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); }
471 
472   // Gets the effect level of {node}.
473   int GetEffectLevel(Node* node) const;
474 
475   // Gets the effect level of {node}, appropriately adjusted based on
476   // continuation flags if the node is a branch.
477   int GetEffectLevel(Node* node, FlagsContinuation* cont) const;
478 
479   int GetVirtualRegister(const Node* node);
480   const std::map<NodeId, int> GetVirtualRegistersForTesting() const;
481 
482   // Check if we can generate loads and stores of ExternalConstants relative
483   // to the roots register.
484   bool CanAddressRelativeToRootsRegister(
485       const ExternalReference& reference) const;
486   // Check if we can use the roots register to access GC roots.
487   bool CanUseRootsRegister() const;
488 
isolate()489   Isolate* isolate() const { return sequence()->isolate(); }
490 
instr_origins()491   const ZoneVector<std::pair<int, int>>& instr_origins() const {
492     return instr_origins_;
493   }
494 
495  private:
496   friend class OperandGenerator;
497 
UseInstructionScheduling()498   bool UseInstructionScheduling() const {
499     return (enable_scheduling_ == kEnableScheduling) &&
500            InstructionScheduler::SchedulerSupported();
501   }
502 
503   void AppendDeoptimizeArguments(InstructionOperandVector* args,
504                                  DeoptimizeKind kind, DeoptimizeReason reason,
505                                  NodeId node_id, FeedbackSource const& feedback,
506                                  FrameState frame_state);
507 
508   void EmitTableSwitch(const SwitchInfo& sw,
509                        InstructionOperand const& index_operand);
510   void EmitBinarySearchSwitch(const SwitchInfo& sw,
511                               InstructionOperand const& value_operand);
512 
513   void TryRename(InstructionOperand* op);
514   int GetRename(int virtual_register);
515   void SetRename(const Node* node, const Node* rename);
516   void UpdateRenames(Instruction* instruction);
517   void UpdateRenamesInPhi(PhiInstruction* phi);
518 
519   // Inform the instruction selection that {node} was just defined.
520   void MarkAsDefined(Node* node);
521 
522   // Inform the instruction selection that {node} has at least one use and we
523   // will need to generate code for it.
524   void MarkAsUsed(Node* node);
525 
526   // Sets the effect level of {node}.
527   void SetEffectLevel(Node* node, int effect_level);
528 
529   // Inform the register allocation of the representation of the value produced
530   // by {node}.
531   void MarkAsRepresentation(MachineRepresentation rep, Node* node);
MarkAsWord32(Node * node)532   void MarkAsWord32(Node* node) {
533     MarkAsRepresentation(MachineRepresentation::kWord32, node);
534   }
MarkAsWord64(Node * node)535   void MarkAsWord64(Node* node) {
536     MarkAsRepresentation(MachineRepresentation::kWord64, node);
537   }
MarkAsFloat32(Node * node)538   void MarkAsFloat32(Node* node) {
539     MarkAsRepresentation(MachineRepresentation::kFloat32, node);
540   }
MarkAsFloat64(Node * node)541   void MarkAsFloat64(Node* node) {
542     MarkAsRepresentation(MachineRepresentation::kFloat64, node);
543   }
MarkAsSimd128(Node * node)544   void MarkAsSimd128(Node* node) {
545     MarkAsRepresentation(MachineRepresentation::kSimd128, node);
546   }
MarkAsTagged(Node * node)547   void MarkAsTagged(Node* node) {
548     MarkAsRepresentation(MachineRepresentation::kTagged, node);
549   }
MarkAsCompressed(Node * node)550   void MarkAsCompressed(Node* node) {
551     MarkAsRepresentation(MachineRepresentation::kCompressed, node);
552   }
553 
554   // Inform the register allocation of the representation of the unallocated
555   // operand {op}.
556   void MarkAsRepresentation(MachineRepresentation rep,
557                             const InstructionOperand& op);
558 
559   enum CallBufferFlag {
560     kCallCodeImmediate = 1u << 0,
561     kCallAddressImmediate = 1u << 1,
562     kCallTail = 1u << 2,
563     kCallFixedTargetRegister = 1u << 3
564   };
565   using CallBufferFlags = base::Flags<CallBufferFlag>;
566 
567   // Initialize the call buffer with the InstructionOperands, nodes, etc,
568   // corresponding
569   // to the inputs and outputs of the call.
570   // {call_code_immediate} to generate immediate operands to calls of code.
571   // {call_address_immediate} to generate immediate operands to address calls.
572   void InitializeCallBuffer(Node* call, CallBuffer* buffer,
573                             CallBufferFlags flags, int stack_slot_delta = 0);
574   bool IsTailCallAddressImmediate();
575 
576   void UpdateMaxPushedArgumentCount(size_t count);
577 
578   FrameStateDescriptor* GetFrameStateDescriptor(FrameState node);
579   size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
580                                          FrameState state, OperandGenerator* g,
581                                          StateObjectDeduplicator* deduplicator,
582                                          InstructionOperandVector* inputs,
583                                          FrameStateInputKind kind, Zone* zone);
584   size_t AddInputsToFrameStateDescriptor(StateValueList* values,
585                                          InstructionOperandVector* inputs,
586                                          OperandGenerator* g,
587                                          StateObjectDeduplicator* deduplicator,
588                                          Node* node, FrameStateInputKind kind,
589                                          Zone* zone);
590   size_t AddOperandToStateValueDescriptor(StateValueList* values,
591                                           InstructionOperandVector* inputs,
592                                           OperandGenerator* g,
593                                           StateObjectDeduplicator* deduplicator,
594                                           Node* input, MachineType type,
595                                           FrameStateInputKind kind, Zone* zone);
596 
597   // ===========================================================================
598   // ============= Architecture-specific graph covering methods. ===============
599   // ===========================================================================
600 
601   // Visit nodes in the given block and generate code.
602   void VisitBlock(BasicBlock* block);
603 
604   // Visit the node for the control flow at the end of the block, generating
605   // code if necessary.
606   void VisitControl(BasicBlock* block);
607 
608   // Visit the node and generate code, if any.
609   void VisitNode(Node* node);
610 
611   // Visit the node and generate code for IEEE 754 functions.
612   void VisitFloat64Ieee754Binop(Node*, InstructionCode code);
613   void VisitFloat64Ieee754Unop(Node*, InstructionCode code);
614 
615 #define DECLARE_GENERATOR(x) void Visit##x(Node* node);
616   MACHINE_OP_LIST(DECLARE_GENERATOR)
617   MACHINE_SIMD_OP_LIST(DECLARE_GENERATOR)
618 #undef DECLARE_GENERATOR
619 
620   // Visit the load node with a value and opcode to replace with.
621   void VisitLoad(Node* node, Node* value, InstructionCode opcode);
622   void VisitLoadTransform(Node* node, Node* value, InstructionCode opcode);
623   void VisitFinishRegion(Node* node);
624   void VisitParameter(Node* node);
625   void VisitIfException(Node* node);
626   void VisitOsrValue(Node* node);
627   void VisitPhi(Node* node);
628   void VisitProjection(Node* node);
629   void VisitConstant(Node* node);
630   void VisitCall(Node* call, BasicBlock* handler = nullptr);
631   void VisitDeoptimizeIf(Node* node);
632   void VisitDeoptimizeUnless(Node* node);
633   void VisitDynamicCheckMapsWithDeoptUnless(Node* node);
634   void VisitTrapIf(Node* node, TrapId trap_id);
635   void VisitTrapUnless(Node* node, TrapId trap_id);
636   void VisitTailCall(Node* call);
637   void VisitGoto(BasicBlock* target);
638   void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
639   void VisitSwitch(Node* node, const SwitchInfo& sw);
640   void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
641                        NodeId node_id, FeedbackSource const& feedback,
642                        FrameState frame_state);
643   void VisitSelect(Node* node);
644   void VisitReturn(Node* ret);
645   void VisitThrow(Node* node);
646   void VisitRetain(Node* node);
647   void VisitUnreachable(Node* node);
648   void VisitStaticAssert(Node* node);
649   void VisitDeadValue(Node* node);
650 
651   void VisitStackPointerGreaterThan(Node* node, FlagsContinuation* cont);
652 
653   void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont);
654 
655   void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
656                             const CallDescriptor* call_descriptor, Node* node);
657   void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
658                           const CallDescriptor* call_descriptor, Node* node);
659 
660   bool CanProduceSignalingNaN(Node* node);
661 
662   void AddOutputToSelectContinuation(OperandGenerator* g, int first_input_index,
663                                      Node* node);
664 
665   // ===========================================================================
666   // ============= Vector instruction (SIMD) helper fns. =======================
667   // ===========================================================================
668 
669 #if V8_ENABLE_WEBASSEMBLY
670   // Canonicalize shuffles to make pattern matching simpler. Returns the shuffle
671   // indices, and a boolean indicating if the shuffle is a swizzle (one input).
672   void CanonicalizeShuffle(Node* node, uint8_t* shuffle, bool* is_swizzle);
673 
674   // Swaps the two first input operands of the node, to help match shuffles
675   // to specific architectural instructions.
676   void SwapShuffleInputs(Node* node);
677 #endif  // V8_ENABLE_WEBASSEMBLY
678 
679   // ===========================================================================
680 
schedule()681   Schedule* schedule() const { return schedule_; }
linkage()682   Linkage* linkage() const { return linkage_; }
sequence()683   InstructionSequence* sequence() const { return sequence_; }
instruction_zone()684   Zone* instruction_zone() const { return sequence()->zone(); }
zone()685   Zone* zone() const { return zone_; }
686 
set_instruction_selection_failed()687   void set_instruction_selection_failed() {
688     instruction_selection_failed_ = true;
689   }
instruction_selection_failed()690   bool instruction_selection_failed() { return instruction_selection_failed_; }
691 
692   void MarkPairProjectionsAsWord32(Node* node);
693   bool IsSourcePositionUsed(Node* node);
694   void VisitWord32AtomicBinaryOperation(Node* node, ArchOpcode int8_op,
695                                         ArchOpcode uint8_op,
696                                         ArchOpcode int16_op,
697                                         ArchOpcode uint16_op,
698                                         ArchOpcode word32_op);
699   void VisitWord64AtomicBinaryOperation(Node* node, ArchOpcode uint8_op,
700                                         ArchOpcode uint16_op,
701                                         ArchOpcode uint32_op,
702                                         ArchOpcode uint64_op);
703   void VisitWord64AtomicNarrowBinop(Node* node, ArchOpcode uint8_op,
704                                     ArchOpcode uint16_op, ArchOpcode uint32_op);
705 
706 #if V8_TARGET_ARCH_64_BIT
707   bool ZeroExtendsWord32ToWord64(Node* node, int recursion_depth = 0);
708   bool ZeroExtendsWord32ToWord64NoPhis(Node* node);
709 
710   enum Upper32BitsState : uint8_t {
711     kNotYetChecked,
712     kUpperBitsGuaranteedZero,
713     kNoGuarantee,
714   };
715 #endif  // V8_TARGET_ARCH_64_BIT
716 
717   struct FrameStateInput {
FrameStateInputFrameStateInput718     FrameStateInput(Node* node_, FrameStateInputKind kind_)
719         : node(node_), kind(kind_) {}
720 
721     Node* node;
722     FrameStateInputKind kind;
723 
724     struct Hash {
operatorFrameStateInput::Hash725       size_t operator()(FrameStateInput const& source) const {
726         return base::hash_combine(source.node,
727                                   static_cast<size_t>(source.kind));
728       }
729     };
730 
731     struct Equal {
operatorFrameStateInput::Equal732       bool operator()(FrameStateInput const& lhs,
733                       FrameStateInput const& rhs) const {
734         return lhs.node == rhs.node && lhs.kind == rhs.kind;
735       }
736     };
737   };
738 
739   struct CachedStateValues;
740   class CachedStateValuesBuilder;
741 
742   // ===========================================================================
743 
744   Zone* const zone_;
745   Linkage* const linkage_;
746   InstructionSequence* const sequence_;
747   SourcePositionTable* const source_positions_;
748   SourcePositionMode const source_position_mode_;
749   Features features_;
750   Schedule* const schedule_;
751   BasicBlock* current_block_;
752   ZoneVector<Instruction*> instructions_;
753   InstructionOperandVector continuation_inputs_;
754   InstructionOperandVector continuation_outputs_;
755   InstructionOperandVector continuation_temps_;
756   BoolVector defined_;
757   BoolVector used_;
758   IntVector effect_level_;
759   IntVector virtual_registers_;
760   IntVector virtual_register_rename_;
761   InstructionScheduler* scheduler_;
762   EnableScheduling enable_scheduling_;
763   EnableRootsRelativeAddressing enable_roots_relative_addressing_;
764   EnableSwitchJumpTable enable_switch_jump_table_;
765   ZoneUnorderedMap<FrameStateInput, CachedStateValues*, FrameStateInput::Hash,
766                    FrameStateInput::Equal>
767       state_values_cache_;
768 
769   Frame* frame_;
770   bool instruction_selection_failed_;
771   ZoneVector<std::pair<int, int>> instr_origins_;
772   EnableTraceTurboJson trace_turbo_;
773   TickCounter* const tick_counter_;
774   // The broker is only used for unparking the LocalHeap for diagnostic printing
775   // for failed StaticAsserts.
776   JSHeapBroker* const broker_;
777 
778   // Store the maximal unoptimized frame height and an maximal number of pushed
779   // arguments (for calls). Later used to apply an offset to stack checks.
780   size_t* max_unoptimized_frame_height_;
781   size_t* max_pushed_argument_count_;
782 
783 #if V8_TARGET_ARCH_64_BIT
784   // Holds lazily-computed results for whether phi nodes guarantee their upper
785   // 32 bits to be zero. Indexed by node ID; nobody reads or writes the values
786   // for non-phi nodes.
787   ZoneVector<Upper32BitsState> phi_states_;
788 #endif
789 };
790 
791 }  // namespace compiler
792 }  // namespace internal
793 }  // namespace v8
794 
795 #endif  // V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
796