1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/backend/register-allocator.h"
6 
7 #include <iomanip>
8 
9 #include "src/base/iterator.h"
10 #include "src/base/small-vector.h"
11 #include "src/codegen/assembler-inl.h"
12 #include "src/codegen/tick-counter.h"
13 #include "src/compiler/linkage.h"
14 #include "src/strings/string-stream.h"
15 #include "src/utils/vector.h"
16 
17 namespace v8 {
18 namespace internal {
19 namespace compiler {
20 
21 #define TRACE_COND(cond, ...)      \
22   do {                             \
23     if (cond) PrintF(__VA_ARGS__); \
24   } while (false)
25 
26 #define TRACE(...) TRACE_COND(data()->is_trace_alloc(), __VA_ARGS__)
27 
28 namespace {
29 
30 static constexpr int kFloat32Bit =
31     RepresentationBit(MachineRepresentation::kFloat32);
32 static constexpr int kSimd128Bit =
33     RepresentationBit(MachineRepresentation::kSimd128);
34 
GetRegisterCount(const RegisterConfiguration * cfg,RegisterKind kind)35 int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
36   return kind == FP_REGISTERS ? cfg->num_double_registers()
37                               : cfg->num_general_registers();
38 }
39 
GetAllocatableRegisterCount(const RegisterConfiguration * cfg,RegisterKind kind)40 int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
41                                 RegisterKind kind) {
42   return kind == FP_REGISTERS ? cfg->num_allocatable_double_registers()
43                               : cfg->num_allocatable_general_registers();
44 }
45 
GetAllocatableRegisterCodes(const RegisterConfiguration * cfg,RegisterKind kind)46 const int* GetAllocatableRegisterCodes(const RegisterConfiguration* cfg,
47                                        RegisterKind kind) {
48   return kind == FP_REGISTERS ? cfg->allocatable_double_codes()
49                               : cfg->allocatable_general_codes();
50 }
51 
GetContainingLoop(const InstructionSequence * sequence,const InstructionBlock * block)52 const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
53                                           const InstructionBlock* block) {
54   RpoNumber index = block->loop_header();
55   if (!index.IsValid()) return nullptr;
56   return sequence->InstructionBlockAt(index);
57 }
58 
GetInstructionBlock(const InstructionSequence * code,LifetimePosition pos)59 const InstructionBlock* GetInstructionBlock(const InstructionSequence* code,
60                                             LifetimePosition pos) {
61   return code->GetInstructionBlock(pos.ToInstructionIndex());
62 }
63 
GetLastInstruction(InstructionSequence * code,const InstructionBlock * block)64 Instruction* GetLastInstruction(InstructionSequence* code,
65                                 const InstructionBlock* block) {
66   return code->InstructionAt(block->last_instruction_index());
67 }
68 
69 // TODO(dcarney): fix frame to allow frame accesses to half size location.
GetByteWidth(MachineRepresentation rep)70 int GetByteWidth(MachineRepresentation rep) {
71   switch (rep) {
72     case MachineRepresentation::kBit:
73     case MachineRepresentation::kWord8:
74     case MachineRepresentation::kWord16:
75     case MachineRepresentation::kWord32:
76     case MachineRepresentation::kFloat32:
77       return kSystemPointerSize;
78     case MachineRepresentation::kTaggedSigned:
79     case MachineRepresentation::kTaggedPointer:
80     case MachineRepresentation::kTagged:
81     case MachineRepresentation::kCompressedPointer:
82     case MachineRepresentation::kCompressed:
83       // TODO(ishell): kTaggedSize once half size locations are supported.
84       return kSystemPointerSize;
85     case MachineRepresentation::kWord64:
86     case MachineRepresentation::kFloat64:
87       return kDoubleSize;
88     case MachineRepresentation::kSimd128:
89       return kSimd128Size;
90     case MachineRepresentation::kNone:
91       break;
92   }
93   UNREACHABLE();
94 }
95 
96 }  // namespace
97 
98 class LiveRangeBound {
99  public:
LiveRangeBound(LiveRange * range,bool skip)100   explicit LiveRangeBound(LiveRange* range, bool skip)
101       : range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
102     DCHECK(!range->IsEmpty());
103   }
104 
CanCover(LifetimePosition position)105   bool CanCover(LifetimePosition position) {
106     return start_ <= position && position < end_;
107   }
108 
109   LiveRange* const range_;
110   const LifetimePosition start_;
111   const LifetimePosition end_;
112   const bool skip_;
113 
114  private:
115   DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
116 };
117 
118 struct FindResult {
119   LiveRange* cur_cover_;
120   LiveRange* pred_cover_;
121 };
122 
123 class LiveRangeBoundArray {
124  public:
LiveRangeBoundArray()125   LiveRangeBoundArray() : length_(0), start_(nullptr) {}
126 
ShouldInitialize()127   bool ShouldInitialize() { return start_ == nullptr; }
128 
Initialize(Zone * zone,TopLevelLiveRange * range)129   void Initialize(Zone* zone, TopLevelLiveRange* range) {
130     size_t max_child_count = range->GetMaxChildCount();
131 
132     start_ = zone->NewArray<LiveRangeBound>(max_child_count);
133     length_ = 0;
134     LiveRangeBound* curr = start_;
135     // Normally, spilled ranges do not need connecting moves, because the spill
136     // location has been assigned at definition. For ranges spilled in deferred
137     // blocks, that is not the case, so we need to connect the spilled children.
138     for (LiveRange *i = range; i != nullptr; i = i->next(), ++curr, ++length_) {
139       new (curr) LiveRangeBound(i, i->spilled());
140     }
141   }
142 
Find(const LifetimePosition position) const143   LiveRangeBound* Find(const LifetimePosition position) const {
144     size_t left_index = 0;
145     size_t right_index = length_;
146     while (true) {
147       size_t current_index = left_index + (right_index - left_index) / 2;
148       DCHECK(right_index > current_index);
149       LiveRangeBound* bound = &start_[current_index];
150       if (bound->start_ <= position) {
151         if (position < bound->end_) return bound;
152         DCHECK(left_index < current_index);
153         left_index = current_index;
154       } else {
155         right_index = current_index;
156       }
157     }
158   }
159 
FindPred(const InstructionBlock * pred)160   LiveRangeBound* FindPred(const InstructionBlock* pred) {
161     LifetimePosition pred_end =
162         LifetimePosition::InstructionFromInstructionIndex(
163             pred->last_instruction_index());
164     return Find(pred_end);
165   }
166 
FindSucc(const InstructionBlock * succ)167   LiveRangeBound* FindSucc(const InstructionBlock* succ) {
168     LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
169         succ->first_instruction_index());
170     return Find(succ_start);
171   }
172 
FindConnectableSubranges(const InstructionBlock * block,const InstructionBlock * pred,FindResult * result) const173   bool FindConnectableSubranges(const InstructionBlock* block,
174                                 const InstructionBlock* pred,
175                                 FindResult* result) const {
176     LifetimePosition pred_end =
177         LifetimePosition::InstructionFromInstructionIndex(
178             pred->last_instruction_index());
179     LiveRangeBound* bound = Find(pred_end);
180     result->pred_cover_ = bound->range_;
181     LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
182         block->first_instruction_index());
183 
184     if (bound->CanCover(cur_start)) {
185       // Both blocks are covered by the same range, so there is nothing to
186       // connect.
187       return false;
188     }
189     bound = Find(cur_start);
190     if (bound->skip_) {
191       return false;
192     }
193     result->cur_cover_ = bound->range_;
194     DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
195     return (result->cur_cover_ != result->pred_cover_);
196   }
197 
198  private:
199   size_t length_;
200   LiveRangeBound* start_;
201 
202   DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
203 };
204 
205 class LiveRangeFinder {
206  public:
LiveRangeFinder(const RegisterAllocationData * data,Zone * zone)207   explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone)
208       : data_(data),
209         bounds_length_(static_cast<int>(data_->live_ranges().size())),
210         bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
211         zone_(zone) {
212     for (int i = 0; i < bounds_length_; ++i) {
213       new (&bounds_[i]) LiveRangeBoundArray();
214     }
215   }
216 
ArrayFor(int operand_index)217   LiveRangeBoundArray* ArrayFor(int operand_index) {
218     DCHECK(operand_index < bounds_length_);
219     TopLevelLiveRange* range = data_->live_ranges()[operand_index];
220     DCHECK(range != nullptr && !range->IsEmpty());
221     LiveRangeBoundArray* array = &bounds_[operand_index];
222     if (array->ShouldInitialize()) {
223       array->Initialize(zone_, range);
224     }
225     return array;
226   }
227 
228  private:
229   const RegisterAllocationData* const data_;
230   const int bounds_length_;
231   LiveRangeBoundArray* const bounds_;
232   Zone* const zone_;
233 
234   DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
235 };
236 
237 using DelayedInsertionMapKey = std::pair<ParallelMove*, InstructionOperand>;
238 
239 struct DelayedInsertionMapCompare {
operator ()v8::internal::compiler::DelayedInsertionMapCompare240   bool operator()(const DelayedInsertionMapKey& a,
241                   const DelayedInsertionMapKey& b) const {
242     if (a.first == b.first) {
243       return a.second.Compare(b.second);
244     }
245     return a.first < b.first;
246   }
247 };
248 
249 using DelayedInsertionMap = ZoneMap<DelayedInsertionMapKey, InstructionOperand,
250                                     DelayedInsertionMapCompare>;
251 
UsePosition(LifetimePosition pos,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)252 UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
253                          void* hint, UsePositionHintType hint_type)
254     : operand_(operand), hint_(hint), next_(nullptr), pos_(pos), flags_(0) {
255   DCHECK_IMPLIES(hint == nullptr, hint_type == UsePositionHintType::kNone);
256   bool register_beneficial = true;
257   UsePositionType type = UsePositionType::kRegisterOrSlot;
258   if (operand_ != nullptr && operand_->IsUnallocated()) {
259     const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
260     if (unalloc->HasRegisterPolicy()) {
261       type = UsePositionType::kRequiresRegister;
262     } else if (unalloc->HasSlotPolicy()) {
263       type = UsePositionType::kRequiresSlot;
264       register_beneficial = false;
265     } else if (unalloc->HasRegisterOrSlotOrConstantPolicy()) {
266       type = UsePositionType::kRegisterOrSlotOrConstant;
267       register_beneficial = false;
268     } else {
269       register_beneficial = !unalloc->HasRegisterOrSlotPolicy();
270     }
271   }
272   flags_ = TypeField::encode(type) | HintTypeField::encode(hint_type) |
273            RegisterBeneficialField::encode(register_beneficial) |
274            AssignedRegisterField::encode(kUnassignedRegister);
275   DCHECK(pos_.IsValid());
276 }
277 
HasHint() const278 bool UsePosition::HasHint() const {
279   int hint_register;
280   return HintRegister(&hint_register);
281 }
282 
HintRegister(int * register_code) const283 bool UsePosition::HintRegister(int* register_code) const {
284   if (hint_ == nullptr) return false;
285   switch (HintTypeField::decode(flags_)) {
286     case UsePositionHintType::kNone:
287     case UsePositionHintType::kUnresolved:
288       return false;
289     case UsePositionHintType::kUsePos: {
290       UsePosition* use_pos = reinterpret_cast<UsePosition*>(hint_);
291       int assigned_register = AssignedRegisterField::decode(use_pos->flags_);
292       if (assigned_register == kUnassignedRegister) return false;
293       *register_code = assigned_register;
294       return true;
295     }
296     case UsePositionHintType::kOperand: {
297       InstructionOperand* operand =
298           reinterpret_cast<InstructionOperand*>(hint_);
299       *register_code = LocationOperand::cast(operand)->register_code();
300       return true;
301     }
302     case UsePositionHintType::kPhi: {
303       RegisterAllocationData::PhiMapValue* phi =
304           reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_);
305       int assigned_register = phi->assigned_register();
306       if (assigned_register == kUnassignedRegister) return false;
307       *register_code = assigned_register;
308       return true;
309     }
310   }
311   UNREACHABLE();
312 }
313 
HintTypeForOperand(const InstructionOperand & op)314 UsePositionHintType UsePosition::HintTypeForOperand(
315     const InstructionOperand& op) {
316   switch (op.kind()) {
317     case InstructionOperand::CONSTANT:
318     case InstructionOperand::IMMEDIATE:
319       return UsePositionHintType::kNone;
320     case InstructionOperand::UNALLOCATED:
321       return UsePositionHintType::kUnresolved;
322     case InstructionOperand::ALLOCATED:
323       if (op.IsRegister() || op.IsFPRegister()) {
324         return UsePositionHintType::kOperand;
325       } else {
326         DCHECK(op.IsStackSlot() || op.IsFPStackSlot());
327         return UsePositionHintType::kNone;
328       }
329     case InstructionOperand::INVALID:
330       break;
331   }
332   UNREACHABLE();
333 }
334 
SetHint(UsePosition * use_pos)335 void UsePosition::SetHint(UsePosition* use_pos) {
336   DCHECK_NOT_NULL(use_pos);
337   hint_ = use_pos;
338   flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
339 }
340 
ResolveHint(UsePosition * use_pos)341 void UsePosition::ResolveHint(UsePosition* use_pos) {
342   DCHECK_NOT_NULL(use_pos);
343   if (HintTypeField::decode(flags_) != UsePositionHintType::kUnresolved) return;
344   hint_ = use_pos;
345   flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
346 }
347 
set_type(UsePositionType type,bool register_beneficial)348 void UsePosition::set_type(UsePositionType type, bool register_beneficial) {
349   DCHECK_IMPLIES(type == UsePositionType::kRequiresSlot, !register_beneficial);
350   DCHECK_EQ(kUnassignedRegister, AssignedRegisterField::decode(flags_));
351   flags_ = TypeField::encode(type) |
352            RegisterBeneficialField::encode(register_beneficial) |
353            HintTypeField::encode(HintTypeField::decode(flags_)) |
354            AssignedRegisterField::encode(kUnassignedRegister);
355 }
356 
SplitAt(LifetimePosition pos,Zone * zone)357 UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
358   DCHECK(Contains(pos) && pos != start());
359   UseInterval* after = new (zone) UseInterval(pos, end_);
360   after->next_ = next_;
361   next_ = nullptr;
362   end_ = pos;
363   return after;
364 }
365 
Print() const366 void LifetimePosition::Print() const { StdoutStream{} << *this << std::endl; }
367 
operator <<(std::ostream & os,const LifetimePosition pos)368 std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
369   os << '@' << pos.ToInstructionIndex();
370   if (pos.IsGapPosition()) {
371     os << 'g';
372   } else {
373     os << 'i';
374   }
375   if (pos.IsStart()) {
376     os << 's';
377   } else {
378     os << 'e';
379   }
380   return os;
381 }
382 
LiveRange(int relative_id,MachineRepresentation rep,TopLevelLiveRange * top_level)383 LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
384                      TopLevelLiveRange* top_level)
385     : relative_id_(relative_id),
386       bits_(0),
387       last_interval_(nullptr),
388       first_interval_(nullptr),
389       first_pos_(nullptr),
390       top_level_(top_level),
391       next_(nullptr),
392       current_interval_(nullptr),
393       last_processed_use_(nullptr),
394       current_hint_position_(nullptr),
395       splitting_pointer_(nullptr) {
396   DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
397   bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
398           RepresentationField::encode(rep) |
399           ControlFlowRegisterHint::encode(kUnassignedRegister);
400 }
401 
VerifyPositions() const402 void LiveRange::VerifyPositions() const {
403   // Walk the positions, verifying that each is in an interval.
404   UseInterval* interval = first_interval_;
405   for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
406     CHECK(Start() <= pos->pos());
407     CHECK(pos->pos() <= End());
408     CHECK_NOT_NULL(interval);
409     while (!interval->Contains(pos->pos()) && interval->end() != pos->pos()) {
410       interval = interval->next();
411       CHECK_NOT_NULL(interval);
412     }
413   }
414 }
415 
VerifyIntervals() const416 void LiveRange::VerifyIntervals() const {
417   DCHECK(first_interval()->start() == Start());
418   LifetimePosition last_end = first_interval()->end();
419   for (UseInterval* interval = first_interval()->next(); interval != nullptr;
420        interval = interval->next()) {
421     DCHECK(last_end <= interval->start());
422     last_end = interval->end();
423   }
424   DCHECK(last_end == End());
425 }
426 
set_assigned_register(int reg)427 void LiveRange::set_assigned_register(int reg) {
428   DCHECK(!HasRegisterAssigned() && !spilled());
429   bits_ = AssignedRegisterField::update(bits_, reg);
430 }
431 
UnsetAssignedRegister()432 void LiveRange::UnsetAssignedRegister() {
433   DCHECK(HasRegisterAssigned() && !spilled());
434   bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
435 }
436 
AttachToNext()437 void LiveRange::AttachToNext() {
438   DCHECK_NOT_NULL(next_);
439   DCHECK_NE(TopLevel()->last_child_covers_, next_);
440   last_interval_->set_next(next_->first_interval());
441   next_->first_interval_ = nullptr;
442   last_interval_ = next_->last_interval_;
443   next_->last_interval_ = nullptr;
444   if (first_pos() == nullptr) {
445     first_pos_ = next_->first_pos();
446   } else {
447     UsePosition* ptr = first_pos_;
448     while (ptr->next() != nullptr) {
449       ptr = ptr->next();
450     }
451     ptr->set_next(next_->first_pos());
452   }
453   next_->first_pos_ = nullptr;
454   LiveRange* old_next = next_;
455   next_ = next_->next_;
456   old_next->next_ = nullptr;
457 }
458 
Unspill()459 void LiveRange::Unspill() {
460   DCHECK(spilled());
461   set_spilled(false);
462   bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
463 }
464 
Spill()465 void LiveRange::Spill() {
466   DCHECK(!spilled());
467   DCHECK(!TopLevel()->HasNoSpillType());
468   set_spilled(true);
469   bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
470 }
471 
kind() const472 RegisterKind LiveRange::kind() const {
473   return IsFloatingPoint(representation()) ? FP_REGISTERS : GENERAL_REGISTERS;
474 }
475 
FirstHintPosition(int * register_index) const476 UsePosition* LiveRange::FirstHintPosition(int* register_index) const {
477   for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
478     if (pos->HintRegister(register_index)) return pos;
479   }
480   return nullptr;
481 }
482 
NextUsePosition(LifetimePosition start) const483 UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const {
484   UsePosition* use_pos = last_processed_use_;
485   if (use_pos == nullptr || use_pos->pos() > start) {
486     use_pos = first_pos();
487   }
488   while (use_pos != nullptr && use_pos->pos() < start) {
489     use_pos = use_pos->next();
490   }
491   last_processed_use_ = use_pos;
492   return use_pos;
493 }
494 
NextUsePositionRegisterIsBeneficial(LifetimePosition start) const495 UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
496     LifetimePosition start) const {
497   UsePosition* pos = NextUsePosition(start);
498   while (pos != nullptr && !pos->RegisterIsBeneficial()) {
499     pos = pos->next();
500   }
501   return pos;
502 }
503 
NextLifetimePositionRegisterIsBeneficial(const LifetimePosition & start) const504 LifetimePosition LiveRange::NextLifetimePositionRegisterIsBeneficial(
505     const LifetimePosition& start) const {
506   UsePosition* next_use = NextUsePositionRegisterIsBeneficial(start);
507   if (next_use == nullptr) return End();
508   return next_use->pos();
509 }
510 
PreviousUsePositionRegisterIsBeneficial(LifetimePosition start) const511 UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
512     LifetimePosition start) const {
513   UsePosition* pos = first_pos();
514   UsePosition* prev = nullptr;
515   while (pos != nullptr && pos->pos() < start) {
516     if (pos->RegisterIsBeneficial()) prev = pos;
517     pos = pos->next();
518   }
519   return prev;
520 }
521 
NextRegisterPosition(LifetimePosition start) const522 UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const {
523   UsePosition* pos = NextUsePosition(start);
524   while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) {
525     pos = pos->next();
526   }
527   return pos;
528 }
529 
NextSlotPosition(LifetimePosition start) const530 UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const {
531   for (UsePosition* pos = NextUsePosition(start); pos != nullptr;
532        pos = pos->next()) {
533     if (pos->type() != UsePositionType::kRequiresSlot) continue;
534     return pos;
535   }
536   return nullptr;
537 }
538 
CanBeSpilled(LifetimePosition pos) const539 bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
540   // We cannot spill a live range that has a use requiring a register
541   // at the current or the immediate next position.
542   UsePosition* use_pos = NextRegisterPosition(pos);
543   if (use_pos == nullptr) return true;
544   return use_pos->pos() > pos.NextStart().End();
545 }
546 
IsTopLevel() const547 bool LiveRange::IsTopLevel() const { return top_level_ == this; }
548 
GetAssignedOperand() const549 InstructionOperand LiveRange::GetAssignedOperand() const {
550   DCHECK(!IsEmpty());
551   if (HasRegisterAssigned()) {
552     DCHECK(!spilled());
553     return AllocatedOperand(LocationOperand::REGISTER, representation(),
554                             assigned_register());
555   }
556   DCHECK(spilled());
557   DCHECK(!HasRegisterAssigned());
558   if (TopLevel()->HasSpillOperand()) {
559     InstructionOperand* op = TopLevel()->GetSpillOperand();
560     DCHECK(!op->IsUnallocated());
561     return *op;
562   }
563   return TopLevel()->GetSpillRangeOperand();
564 }
565 
FirstSearchIntervalForPosition(LifetimePosition position) const566 UseInterval* LiveRange::FirstSearchIntervalForPosition(
567     LifetimePosition position) const {
568   if (current_interval_ == nullptr) return first_interval_;
569   if (current_interval_->start() > position) {
570     current_interval_ = nullptr;
571     return first_interval_;
572   }
573   return current_interval_;
574 }
575 
AdvanceLastProcessedMarker(UseInterval * to_start_of,LifetimePosition but_not_past) const576 void LiveRange::AdvanceLastProcessedMarker(
577     UseInterval* to_start_of, LifetimePosition but_not_past) const {
578   if (to_start_of == nullptr) return;
579   if (to_start_of->start() > but_not_past) return;
580   LifetimePosition start = current_interval_ == nullptr
581                                ? LifetimePosition::Invalid()
582                                : current_interval_->start();
583   if (to_start_of->start() > start) {
584     current_interval_ = to_start_of;
585   }
586 }
587 
SplitAt(LifetimePosition position,Zone * zone)588 LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
589   int new_id = TopLevel()->GetNextChildId();
590   LiveRange* child = new (zone) LiveRange(new_id, representation(), TopLevel());
591   child->set_bundle(bundle_);
592   // If we split, we do so because we're about to switch registers or move
593   // to/from a slot, so there's no value in connecting hints.
594   DetachAt(position, child, zone, DoNotConnectHints);
595 
596   child->top_level_ = TopLevel();
597   child->next_ = next_;
598   next_ = child;
599   return child;
600 }
601 
DetachAt(LifetimePosition position,LiveRange * result,Zone * zone,HintConnectionOption connect_hints)602 UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
603                                  Zone* zone,
604                                  HintConnectionOption connect_hints) {
605   DCHECK(Start() < position);
606   DCHECK(End() > position);
607   DCHECK(result->IsEmpty());
608   // Find the last interval that ends before the position. If the
609   // position is contained in one of the intervals in the chain, we
610   // split that interval and use the first part.
611   UseInterval* current = FirstSearchIntervalForPosition(position);
612 
613   // If the split position coincides with the beginning of a use interval
614   // we need to split use positons in a special way.
615   bool split_at_start = false;
616 
617   if (current->start() == position) {
618     // When splitting at start we need to locate the previous use interval.
619     current = first_interval_;
620   }
621 
622   UseInterval* after = nullptr;
623   while (current != nullptr) {
624     if (current->Contains(position)) {
625       after = current->SplitAt(position, zone);
626       break;
627     }
628     UseInterval* next = current->next();
629     if (next->start() >= position) {
630       split_at_start = (next->start() == position);
631       after = next;
632       current->set_next(nullptr);
633       break;
634     }
635     current = next;
636   }
637   DCHECK_NOT_NULL(after);
638 
639   // Partition original use intervals to the two live ranges.
640   UseInterval* before = current;
641   result->last_interval_ =
642       (last_interval_ == before)
643           ? after            // Only interval in the range after split.
644           : last_interval_;  // Last interval of the original range.
645   result->first_interval_ = after;
646   last_interval_ = before;
647 
648   // Find the last use position before the split and the first use
649   // position after it.
650   UsePosition* use_after =
651       splitting_pointer_ == nullptr || splitting_pointer_->pos() > position
652           ? first_pos()
653           : splitting_pointer_;
654   UsePosition* use_before = nullptr;
655   if (split_at_start) {
656     // The split position coincides with the beginning of a use interval (the
657     // end of a lifetime hole). Use at this position should be attributed to
658     // the split child because split child owns use interval covering it.
659     while (use_after != nullptr && use_after->pos() < position) {
660       use_before = use_after;
661       use_after = use_after->next();
662     }
663   } else {
664     while (use_after != nullptr && use_after->pos() <= position) {
665       use_before = use_after;
666       use_after = use_after->next();
667     }
668   }
669 
670   // Partition original use positions to the two live ranges.
671   if (use_before != nullptr) {
672     use_before->set_next(nullptr);
673   } else {
674     first_pos_ = nullptr;
675   }
676   result->first_pos_ = use_after;
677 
678   // Discard cached iteration state. It might be pointing
679   // to the use that no longer belongs to this live range.
680   last_processed_use_ = nullptr;
681   current_interval_ = nullptr;
682 
683   if (connect_hints == ConnectHints && use_before != nullptr &&
684       use_after != nullptr) {
685     use_after->SetHint(use_before);
686   }
687 #ifdef DEBUG
688   VerifyChildStructure();
689   result->VerifyChildStructure();
690 #endif
691   return use_before;
692 }
693 
UpdateParentForAllChildren(TopLevelLiveRange * new_top_level)694 void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_top_level) {
695   LiveRange* child = this;
696   for (; child != nullptr; child = child->next()) {
697     child->top_level_ = new_top_level;
698   }
699 }
700 
ConvertUsesToOperand(const InstructionOperand & op,const InstructionOperand & spill_op)701 void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
702                                      const InstructionOperand& spill_op) {
703   for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
704     DCHECK(Start() <= pos->pos() && pos->pos() <= End());
705     if (!pos->HasOperand()) continue;
706     switch (pos->type()) {
707       case UsePositionType::kRequiresSlot:
708         DCHECK(spill_op.IsStackSlot() || spill_op.IsFPStackSlot());
709         InstructionOperand::ReplaceWith(pos->operand(), &spill_op);
710         break;
711       case UsePositionType::kRequiresRegister:
712         DCHECK(op.IsRegister() || op.IsFPRegister());
713         V8_FALLTHROUGH;
714       case UsePositionType::kRegisterOrSlot:
715       case UsePositionType::kRegisterOrSlotOrConstant:
716         InstructionOperand::ReplaceWith(pos->operand(), &op);
717         break;
718     }
719   }
720 }
721 
722 // This implements an ordering on live ranges so that they are ordered by their
723 // start positions.  This is needed for the correctness of the register
724 // allocation algorithm.  If two live ranges start at the same offset then there
725 // is a tie breaker based on where the value is first used.  This part of the
726 // ordering is merely a heuristic.
ShouldBeAllocatedBefore(const LiveRange * other) const727 bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
728   LifetimePosition start = Start();
729   LifetimePosition other_start = other->Start();
730   if (start == other_start) {
731     // Prefer register that has a controlflow hint to make sure it gets
732     // allocated first. This allows the control flow aware alloction to
733     // just put ranges back into the queue without other ranges interfering.
734     if (controlflow_hint() < other->controlflow_hint()) {
735       return true;
736     }
737     // The other has a smaller hint.
738     if (controlflow_hint() > other->controlflow_hint()) {
739       return false;
740     }
741     // Both have the same hint or no hint at all. Use first use position.
742     UsePosition* pos = first_pos();
743     UsePosition* other_pos = other->first_pos();
744     // To make the order total, handle the case where both positions are null.
745     if (pos == other_pos) return TopLevel()->vreg() < other->TopLevel()->vreg();
746     if (pos == nullptr) return false;
747     if (other_pos == nullptr) return true;
748     // To make the order total, handle the case where both positions are equal.
749     if (pos->pos() == other_pos->pos())
750       return TopLevel()->vreg() < other->TopLevel()->vreg();
751     return pos->pos() < other_pos->pos();
752   }
753   return start < other_start;
754 }
755 
SetUseHints(int register_index)756 void LiveRange::SetUseHints(int register_index) {
757   for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
758     if (!pos->HasOperand()) continue;
759     switch (pos->type()) {
760       case UsePositionType::kRequiresSlot:
761         break;
762       case UsePositionType::kRequiresRegister:
763       case UsePositionType::kRegisterOrSlot:
764       case UsePositionType::kRegisterOrSlotOrConstant:
765         pos->set_assigned_register(register_index);
766         break;
767     }
768   }
769 }
770 
CanCover(LifetimePosition position) const771 bool LiveRange::CanCover(LifetimePosition position) const {
772   if (IsEmpty()) return false;
773   return Start() <= position && position < End();
774 }
775 
Covers(LifetimePosition position) const776 bool LiveRange::Covers(LifetimePosition position) const {
777   if (!CanCover(position)) return false;
778   UseInterval* start_search = FirstSearchIntervalForPosition(position);
779   for (UseInterval* interval = start_search; interval != nullptr;
780        interval = interval->next()) {
781     DCHECK(interval->next() == nullptr ||
782            interval->next()->start() >= interval->start());
783     AdvanceLastProcessedMarker(interval, position);
784     if (interval->Contains(position)) return true;
785     if (interval->start() > position) return false;
786   }
787   return false;
788 }
789 
NextEndAfter(LifetimePosition position) const790 LifetimePosition LiveRange::NextEndAfter(LifetimePosition position) const {
791   UseInterval* start_search = FirstSearchIntervalForPosition(position);
792   while (start_search->end() < position) {
793     start_search = start_search->next();
794   }
795   return start_search->end();
796 }
797 
NextStartAfter(LifetimePosition position)798 LifetimePosition LiveRange::NextStartAfter(LifetimePosition position) {
799   UseInterval* start_search = FirstSearchIntervalForPosition(position);
800   while (start_search->start() < position) {
801     start_search = start_search->next();
802   }
803   next_start_ = start_search->start();
804   return next_start_;
805 }
806 
FirstIntersection(LiveRange * other) const807 LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
808   UseInterval* b = other->first_interval();
809   if (b == nullptr) return LifetimePosition::Invalid();
810   LifetimePosition advance_last_processed_up_to = b->start();
811   UseInterval* a = FirstSearchIntervalForPosition(b->start());
812   while (a != nullptr && b != nullptr) {
813     if (a->start() > other->End()) break;
814     if (b->start() > End()) break;
815     LifetimePosition cur_intersection = a->Intersect(b);
816     if (cur_intersection.IsValid()) {
817       return cur_intersection;
818     }
819     if (a->start() < b->start()) {
820       a = a->next();
821       if (a == nullptr || a->start() > other->End()) break;
822       AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
823     } else {
824       b = b->next();
825     }
826   }
827   return LifetimePosition::Invalid();
828 }
829 
Print(const RegisterConfiguration * config,bool with_children) const830 void LiveRange::Print(const RegisterConfiguration* config,
831                       bool with_children) const {
832   StdoutStream os;
833   PrintableLiveRange wrapper;
834   wrapper.register_configuration_ = config;
835   for (const LiveRange* i = this; i != nullptr; i = i->next()) {
836     wrapper.range_ = i;
837     os << wrapper << std::endl;
838     if (!with_children) break;
839   }
840 }
841 
Print(bool with_children) const842 void LiveRange::Print(bool with_children) const {
843   Print(RegisterConfiguration::Default(), with_children);
844 }
845 
RegisterFromBundle(int * hint) const846 bool LiveRange::RegisterFromBundle(int* hint) const {
847   if (bundle_ == nullptr || bundle_->reg() == kUnassignedRegister) return false;
848   *hint = bundle_->reg();
849   return true;
850 }
851 
UpdateBundleRegister(int reg) const852 void LiveRange::UpdateBundleRegister(int reg) const {
853   if (bundle_ == nullptr || bundle_->reg() != kUnassignedRegister) return;
854   bundle_->set_reg(reg);
855 }
856 
857 struct TopLevelLiveRange::SpillMoveInsertionList : ZoneObject {
SpillMoveInsertionListv8::internal::compiler::TopLevelLiveRange::SpillMoveInsertionList858   SpillMoveInsertionList(int gap_index, InstructionOperand* operand,
859                          SpillMoveInsertionList* next)
860       : gap_index(gap_index), operand(operand), next(next) {}
861   const int gap_index;
862   InstructionOperand* const operand;
863   SpillMoveInsertionList* const next;
864 };
865 
TopLevelLiveRange(int vreg,MachineRepresentation rep)866 TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep)
867     : LiveRange(0, rep, this),
868       vreg_(vreg),
869       last_child_id_(0),
870       splintered_from_(nullptr),
871       spill_operand_(nullptr),
872       spill_move_insertion_locations_(nullptr),
873       spilled_in_deferred_blocks_(false),
874       spill_start_index_(kMaxInt),
875       last_pos_(nullptr),
876       last_child_covers_(this),
877       splinter_(nullptr),
878       has_preassigned_slot_(false) {
879   bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
880 }
881 
882 #if DEBUG
debug_virt_reg() const883 int TopLevelLiveRange::debug_virt_reg() const {
884   return IsSplinter() ? splintered_from()->vreg() : vreg();
885 }
886 #endif
887 
RecordSpillLocation(Zone * zone,int gap_index,InstructionOperand * operand)888 void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
889                                             InstructionOperand* operand) {
890   DCHECK(HasNoSpillType());
891   spill_move_insertion_locations_ = new (zone) SpillMoveInsertionList(
892       gap_index, operand, spill_move_insertion_locations_);
893 }
894 
CommitSpillMoves(RegisterAllocationData * data,const InstructionOperand & op,bool might_be_duplicated)895 void TopLevelLiveRange::CommitSpillMoves(RegisterAllocationData* data,
896                                          const InstructionOperand& op,
897                                          bool might_be_duplicated) {
898   DCHECK_IMPLIES(op.IsConstant(),
899                  GetSpillMoveInsertionLocations(data) == nullptr);
900   InstructionSequence* sequence = data->code();
901   Zone* zone = sequence->zone();
902 
903   for (SpillMoveInsertionList* to_spill = GetSpillMoveInsertionLocations(data);
904        to_spill != nullptr; to_spill = to_spill->next) {
905     Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
906     ParallelMove* move =
907         instr->GetOrCreateParallelMove(Instruction::START, zone);
908     // Skip insertion if it's possible that the move exists already as a
909     // constraint move from a fixed output register to a slot.
910     if (might_be_duplicated || has_preassigned_slot()) {
911       bool found = false;
912       for (MoveOperands* move_op : *move) {
913         if (move_op->IsEliminated()) continue;
914         if (move_op->source().Equals(*to_spill->operand) &&
915             move_op->destination().Equals(op)) {
916           found = true;
917           if (has_preassigned_slot()) move_op->Eliminate();
918           break;
919         }
920       }
921       if (found) continue;
922     }
923     if (!has_preassigned_slot()) {
924       move->AddMove(*to_spill->operand, op);
925     }
926   }
927 }
928 
SetSpillOperand(InstructionOperand * operand)929 void TopLevelLiveRange::SetSpillOperand(InstructionOperand* operand) {
930   DCHECK(HasNoSpillType());
931   DCHECK(!operand->IsUnallocated() && !operand->IsImmediate());
932   set_spill_type(SpillType::kSpillOperand);
933   spill_operand_ = operand;
934 }
935 
SetSpillRange(SpillRange * spill_range)936 void TopLevelLiveRange::SetSpillRange(SpillRange* spill_range) {
937   DCHECK(!HasSpillOperand());
938   DCHECK(spill_range);
939   spill_range_ = spill_range;
940 }
941 
GetSpillRangeOperand() const942 AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const {
943   SpillRange* spill_range = GetSpillRange();
944   int index = spill_range->assigned_slot();
945   return AllocatedOperand(LocationOperand::STACK_SLOT, representation(), index);
946 }
947 
Splinter(LifetimePosition start,LifetimePosition end,Zone * zone)948 void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
949                                  Zone* zone) {
950   DCHECK(start != Start() || end != End());
951   DCHECK(start < end);
952 
953   TopLevelLiveRange splinter_temp(-1, representation());
954   UsePosition* last_in_splinter = nullptr;
955   // Live ranges defined in deferred blocks stay in deferred blocks, so we
956   // don't need to splinter them. That means that start should always be
957   // after the beginning of the range.
958   DCHECK(start > Start());
959 
960   if (end >= End()) {
961     DCHECK(start > Start());
962     DetachAt(start, &splinter_temp, zone, ConnectHints);
963     next_ = nullptr;
964   } else {
965     DCHECK(start < End() && Start() < end);
966 
967     const int kInvalidId = std::numeric_limits<int>::max();
968 
969     UsePosition* last = DetachAt(start, &splinter_temp, zone, ConnectHints);
970 
971     LiveRange end_part(kInvalidId, this->representation(), nullptr);
972     // The last chunk exits the deferred region, and we don't want to connect
973     // hints here, because the non-deferred region shouldn't be affected
974     // by allocation decisions on the deferred path.
975     last_in_splinter =
976         splinter_temp.DetachAt(end, &end_part, zone, DoNotConnectHints);
977 
978     next_ = end_part.next_;
979     last_interval_->set_next(end_part.first_interval_);
980     // The next splinter will happen either at or after the current interval.
981     // We can optimize DetachAt by setting current_interval_ accordingly,
982     // which will then be picked up by FirstSearchIntervalForPosition.
983     current_interval_ = last_interval_;
984     last_interval_ = end_part.last_interval_;
985 
986     if (first_pos_ == nullptr) {
987       first_pos_ = end_part.first_pos_;
988     } else {
989       splitting_pointer_ = last;
990       if (last != nullptr) last->set_next(end_part.first_pos_);
991     }
992   }
993 
994   if (splinter()->IsEmpty()) {
995     splinter()->first_interval_ = splinter_temp.first_interval_;
996     splinter()->last_interval_ = splinter_temp.last_interval_;
997   } else {
998     splinter()->last_interval_->set_next(splinter_temp.first_interval_);
999     splinter()->last_interval_ = splinter_temp.last_interval_;
1000   }
1001   if (splinter()->first_pos() == nullptr) {
1002     splinter()->first_pos_ = splinter_temp.first_pos_;
1003   } else {
1004     splinter()->last_pos_->set_next(splinter_temp.first_pos_);
1005   }
1006   if (last_in_splinter != nullptr) {
1007     splinter()->last_pos_ = last_in_splinter;
1008   } else {
1009     if (splinter()->first_pos() != nullptr &&
1010         splinter()->last_pos_ == nullptr) {
1011       splinter()->last_pos_ = splinter()->first_pos();
1012       for (UsePosition* pos = splinter()->first_pos(); pos != nullptr;
1013            pos = pos->next()) {
1014         splinter()->last_pos_ = pos;
1015       }
1016     }
1017   }
1018 #if DEBUG
1019   Verify();
1020   splinter()->Verify();
1021 #endif
1022 }
1023 
SetSplinteredFrom(TopLevelLiveRange * splinter_parent)1024 void TopLevelLiveRange::SetSplinteredFrom(TopLevelLiveRange* splinter_parent) {
1025   splintered_from_ = splinter_parent;
1026   if (!HasSpillOperand() && splinter_parent->spill_range_ != nullptr) {
1027     SetSpillRange(splinter_parent->spill_range_);
1028   }
1029 }
1030 
UpdateSpillRangePostMerge(TopLevelLiveRange * merged)1031 void TopLevelLiveRange::UpdateSpillRangePostMerge(TopLevelLiveRange* merged) {
1032   DCHECK(merged->TopLevel() == this);
1033 
1034   if (HasNoSpillType() && merged->HasSpillRange()) {
1035     set_spill_type(merged->spill_type());
1036     DCHECK_LT(0, GetSpillRange()->live_ranges().size());
1037     merged->spill_range_ = nullptr;
1038     merged->bits_ =
1039         SpillTypeField::update(merged->bits_, SpillType::kNoSpillType);
1040   }
1041 }
1042 
Merge(TopLevelLiveRange * other,Zone * zone)1043 void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) {
1044   DCHECK(Start() < other->Start());
1045   DCHECK(other->splintered_from() == this);
1046 
1047   LiveRange* first = this;
1048   LiveRange* second = other;
1049   DCHECK(first->Start() < second->Start());
1050   while (first != nullptr && second != nullptr) {
1051     DCHECK(first != second);
1052     // Make sure the ranges are in order each time we iterate.
1053     if (second->Start() < first->Start()) {
1054       LiveRange* tmp = second;
1055       second = first;
1056       first = tmp;
1057       continue;
1058     }
1059 
1060     if (first->End() <= second->Start()) {
1061       if (first->next() == nullptr ||
1062           first->next()->Start() > second->Start()) {
1063         // First is in order before second.
1064         LiveRange* temp = first->next();
1065         first->next_ = second;
1066         first = temp;
1067       } else {
1068         // First is in order before its successor (or second), so advance first.
1069         first = first->next();
1070       }
1071       continue;
1072     }
1073 
1074     DCHECK(first->Start() < second->Start());
1075     // If first and second intersect, split first.
1076     if (first->Start() < second->End() && second->Start() < first->End()) {
1077       LiveRange* temp = first->SplitAt(second->Start(), zone);
1078       CHECK(temp != first);
1079       temp->set_spilled(first->spilled());
1080       if (!temp->spilled())
1081         temp->set_assigned_register(first->assigned_register());
1082 
1083       first->next_ = second;
1084       first = temp;
1085       continue;
1086     }
1087     DCHECK(first->End() <= second->Start());
1088   }
1089 
1090   TopLevel()->UpdateParentForAllChildren(TopLevel());
1091   TopLevel()->UpdateSpillRangePostMerge(other);
1092   TopLevel()->register_slot_use(other->slot_use_kind());
1093 
1094 #if DEBUG
1095   Verify();
1096 #endif
1097 }
1098 
VerifyChildrenInOrder() const1099 void TopLevelLiveRange::VerifyChildrenInOrder() const {
1100   LifetimePosition last_end = End();
1101   for (const LiveRange* child = this->next(); child != nullptr;
1102        child = child->next()) {
1103     DCHECK(last_end <= child->Start());
1104     last_end = child->End();
1105   }
1106 }
1107 
GetChildCovers(LifetimePosition pos)1108 LiveRange* TopLevelLiveRange::GetChildCovers(LifetimePosition pos) {
1109   LiveRange* child = last_child_covers_;
1110   while (child != nullptr && child->End() <= pos) {
1111     child = child->next();
1112   }
1113   last_child_covers_ = child;
1114   return !child || !child->Covers(pos) ? nullptr : child;
1115 }
1116 
Verify() const1117 void TopLevelLiveRange::Verify() const {
1118   VerifyChildrenInOrder();
1119   for (const LiveRange* child = this; child != nullptr; child = child->next()) {
1120     VerifyChildStructure();
1121   }
1122 }
1123 
ShortenTo(LifetimePosition start,bool trace_alloc)1124 void TopLevelLiveRange::ShortenTo(LifetimePosition start, bool trace_alloc) {
1125   TRACE_COND(trace_alloc, "Shorten live range %d to [%d\n", vreg(),
1126              start.value());
1127   DCHECK_NOT_NULL(first_interval_);
1128   DCHECK(first_interval_->start() <= start);
1129   DCHECK(start < first_interval_->end());
1130   first_interval_->set_start(start);
1131 }
1132 
EnsureInterval(LifetimePosition start,LifetimePosition end,Zone * zone,bool trace_alloc)1133 void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
1134                                        LifetimePosition end, Zone* zone,
1135                                        bool trace_alloc) {
1136   TRACE_COND(trace_alloc, "Ensure live range %d in interval [%d %d[\n", vreg(),
1137              start.value(), end.value());
1138   LifetimePosition new_end = end;
1139   while (first_interval_ != nullptr && first_interval_->start() <= end) {
1140     if (first_interval_->end() > end) {
1141       new_end = first_interval_->end();
1142     }
1143     first_interval_ = first_interval_->next();
1144   }
1145 
1146   UseInterval* new_interval = new (zone) UseInterval(start, new_end);
1147   new_interval->set_next(first_interval_);
1148   first_interval_ = new_interval;
1149   if (new_interval->next() == nullptr) {
1150     last_interval_ = new_interval;
1151   }
1152 }
1153 
AddUseInterval(LifetimePosition start,LifetimePosition end,Zone * zone,bool trace_alloc)1154 void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
1155                                        LifetimePosition end, Zone* zone,
1156                                        bool trace_alloc) {
1157   TRACE_COND(trace_alloc, "Add to live range %d interval [%d %d[\n", vreg(),
1158              start.value(), end.value());
1159   if (first_interval_ == nullptr) {
1160     UseInterval* interval = new (zone) UseInterval(start, end);
1161     first_interval_ = interval;
1162     last_interval_ = interval;
1163   } else {
1164     if (end == first_interval_->start()) {
1165       first_interval_->set_start(start);
1166     } else if (end < first_interval_->start()) {
1167       UseInterval* interval = new (zone) UseInterval(start, end);
1168       interval->set_next(first_interval_);
1169       first_interval_ = interval;
1170     } else {
1171       // Order of instruction's processing (see ProcessInstructions) guarantees
1172       // that each new use interval either precedes, intersects with or touches
1173       // the last added interval.
1174       DCHECK(start <= first_interval_->end());
1175       first_interval_->set_start(Min(start, first_interval_->start()));
1176       first_interval_->set_end(Max(end, first_interval_->end()));
1177     }
1178   }
1179 }
1180 
AddUsePosition(UsePosition * use_pos,bool trace_alloc)1181 void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos, bool trace_alloc) {
1182   LifetimePosition pos = use_pos->pos();
1183   TRACE_COND(trace_alloc, "Add to live range %d use position %d\n", vreg(),
1184              pos.value());
1185   UsePosition* prev_hint = nullptr;
1186   UsePosition* prev = nullptr;
1187   UsePosition* current = first_pos_;
1188   while (current != nullptr && current->pos() < pos) {
1189     prev_hint = current->HasHint() ? current : prev_hint;
1190     prev = current;
1191     current = current->next();
1192   }
1193 
1194   if (prev == nullptr) {
1195     use_pos->set_next(first_pos_);
1196     first_pos_ = use_pos;
1197   } else {
1198     use_pos->set_next(prev->next());
1199     prev->set_next(use_pos);
1200   }
1201 
1202   if (prev_hint == nullptr && use_pos->HasHint()) {
1203     current_hint_position_ = use_pos;
1204   }
1205 }
1206 
AreUseIntervalsIntersecting(UseInterval * interval1,UseInterval * interval2)1207 static bool AreUseIntervalsIntersecting(UseInterval* interval1,
1208                                         UseInterval* interval2) {
1209   while (interval1 != nullptr && interval2 != nullptr) {
1210     if (interval1->start() < interval2->start()) {
1211       if (interval1->end() > interval2->start()) {
1212         return true;
1213       }
1214       interval1 = interval1->next();
1215     } else {
1216       if (interval2->end() > interval1->start()) {
1217         return true;
1218       }
1219       interval2 = interval2->next();
1220     }
1221   }
1222   return false;
1223 }
1224 
operator <<(std::ostream & os,const PrintableLiveRange & printable_range)1225 std::ostream& operator<<(std::ostream& os,
1226                          const PrintableLiveRange& printable_range) {
1227   const LiveRange* range = printable_range.range_;
1228   os << "Range: " << range->TopLevel()->vreg() << ":" << range->relative_id()
1229      << " ";
1230   if (range->TopLevel()->is_phi()) os << "phi ";
1231   if (range->TopLevel()->is_non_loop_phi()) os << "nlphi ";
1232 
1233   os << "{" << std::endl;
1234   UseInterval* interval = range->first_interval();
1235   UsePosition* use_pos = range->first_pos();
1236   while (use_pos != nullptr) {
1237     if (use_pos->HasOperand()) {
1238       os << *use_pos->operand() << use_pos->pos() << " ";
1239     }
1240     use_pos = use_pos->next();
1241   }
1242   os << std::endl;
1243 
1244   while (interval != nullptr) {
1245     os << '[' << interval->start() << ", " << interval->end() << ')'
1246        << std::endl;
1247     interval = interval->next();
1248   }
1249   os << "}";
1250   return os;
1251 }
1252 
1253 namespace {
PrintBlockRow(std::ostream & os,const InstructionBlocks & blocks)1254 void PrintBlockRow(std::ostream& os, const InstructionBlocks& blocks) {
1255   os << "     ";
1256   for (auto block : blocks) {
1257     LifetimePosition start_pos = LifetimePosition::GapFromInstructionIndex(
1258         block->first_instruction_index());
1259     LifetimePosition end_pos = LifetimePosition::GapFromInstructionIndex(
1260                                    block->last_instruction_index())
1261                                    .NextFullStart();
1262     int length = end_pos.value() - start_pos.value();
1263     constexpr int kMaxPrefixLength = 32;
1264     char buffer[kMaxPrefixLength];
1265     int rpo_number = block->rpo_number().ToInt();
1266     const char* deferred_marker = block->IsDeferred() ? "(deferred)" : "";
1267     int max_prefix_length = std::min(length, kMaxPrefixLength);
1268     int prefix = snprintf(buffer, max_prefix_length, "[-B%d-%s", rpo_number,
1269                           deferred_marker);
1270     os << buffer;
1271     int remaining = length - std::min(prefix, max_prefix_length) - 1;
1272     for (int i = 0; i < remaining; ++i) os << '-';
1273     os << ']';
1274   }
1275   os << '\n';
1276 }
1277 }  // namespace
1278 
PrintRangeRow(std::ostream & os,const TopLevelLiveRange * toplevel)1279 void LinearScanAllocator::PrintRangeRow(std::ostream& os,
1280                                         const TopLevelLiveRange* toplevel) {
1281   int position = 0;
1282   os << std::setw(3) << toplevel->vreg()
1283      << (toplevel->IsSplinter() ? "s:" : ": ");
1284 
1285   const char* kind_string;
1286   switch (toplevel->spill_type()) {
1287     case TopLevelLiveRange::SpillType::kSpillRange:
1288       kind_string = "ss";
1289       break;
1290     case TopLevelLiveRange::SpillType::kDeferredSpillRange:
1291       kind_string = "sd";
1292       break;
1293     case TopLevelLiveRange::SpillType::kSpillOperand:
1294       kind_string = "so";
1295       break;
1296     default:
1297       kind_string = "s?";
1298   }
1299 
1300   for (const LiveRange* range = toplevel; range != nullptr;
1301        range = range->next()) {
1302     for (UseInterval* interval = range->first_interval(); interval != nullptr;
1303          interval = interval->next()) {
1304       LifetimePosition start = interval->start();
1305       LifetimePosition end = interval->end();
1306       CHECK_GE(start.value(), position);
1307       for (; start.value() > position; position++) {
1308         os << ' ';
1309       }
1310       int length = end.value() - start.value();
1311       constexpr int kMaxPrefixLength = 32;
1312       char buffer[kMaxPrefixLength];
1313       int max_prefix_length = std::min(length + 1, kMaxPrefixLength);
1314       int prefix;
1315       if (range->spilled()) {
1316         prefix = snprintf(buffer, max_prefix_length, "|%s", kind_string);
1317       } else {
1318         prefix = snprintf(buffer, max_prefix_length, "|%s",
1319                           RegisterName(range->assigned_register()));
1320       }
1321       os << buffer;
1322       position += std::min(prefix, max_prefix_length - 1);
1323       CHECK_GE(end.value(), position);
1324       const char line_style = range->spilled() ? '-' : '=';
1325       for (; end.value() > position; position++) {
1326         os << line_style;
1327       }
1328     }
1329   }
1330   os << '\n';
1331 }
1332 
PrintRangeOverview(std::ostream & os)1333 void LinearScanAllocator::PrintRangeOverview(std::ostream& os) {
1334   PrintBlockRow(os, code()->instruction_blocks());
1335   for (auto const toplevel : data()->fixed_live_ranges()) {
1336     if (toplevel == nullptr) continue;
1337     PrintRangeRow(os, toplevel);
1338   }
1339   int rowcount = 0;
1340   for (auto toplevel : data()->live_ranges()) {
1341     if (!CanProcessRange(toplevel)) continue;
1342     if (rowcount++ % 10 == 0) PrintBlockRow(os, code()->instruction_blocks());
1343     PrintRangeRow(os, toplevel);
1344   }
1345 }
1346 
SpillRange(TopLevelLiveRange * parent,Zone * zone)1347 SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
1348     : live_ranges_(zone),
1349       assigned_slot_(kUnassignedSlot),
1350       byte_width_(GetByteWidth(parent->representation())) {
1351   // Spill ranges are created for top level, non-splintered ranges. This is so
1352   // that, when merging decisions are made, we consider the full extent of the
1353   // virtual register, and avoid clobbering it.
1354   DCHECK(!parent->IsSplinter());
1355   UseInterval* result = nullptr;
1356   UseInterval* node = nullptr;
1357   // Copy the intervals for all ranges.
1358   for (LiveRange* range = parent; range != nullptr; range = range->next()) {
1359     UseInterval* src = range->first_interval();
1360     while (src != nullptr) {
1361       UseInterval* new_node = new (zone) UseInterval(src->start(), src->end());
1362       if (result == nullptr) {
1363         result = new_node;
1364       } else {
1365         node->set_next(new_node);
1366       }
1367       node = new_node;
1368       src = src->next();
1369     }
1370   }
1371   use_interval_ = result;
1372   live_ranges().push_back(parent);
1373   end_position_ = node->end();
1374   parent->SetSpillRange(this);
1375 }
1376 
IsIntersectingWith(SpillRange * other) const1377 bool SpillRange::IsIntersectingWith(SpillRange* other) const {
1378   if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
1379       this->End() <= other->use_interval_->start() ||
1380       other->End() <= this->use_interval_->start()) {
1381     return false;
1382   }
1383   return AreUseIntervalsIntersecting(use_interval_, other->use_interval_);
1384 }
1385 
TryMerge(SpillRange * other)1386 bool SpillRange::TryMerge(SpillRange* other) {
1387   if (HasSlot() || other->HasSlot()) return false;
1388   if (byte_width() != other->byte_width() || IsIntersectingWith(other))
1389     return false;
1390 
1391   LifetimePosition max = LifetimePosition::MaxPosition();
1392   if (End() < other->End() && other->End() != max) {
1393     end_position_ = other->End();
1394   }
1395   other->end_position_ = max;
1396 
1397   MergeDisjointIntervals(other->use_interval_);
1398   other->use_interval_ = nullptr;
1399 
1400   for (TopLevelLiveRange* range : other->live_ranges()) {
1401     DCHECK(range->GetSpillRange() == other);
1402     range->SetSpillRange(this);
1403   }
1404 
1405   live_ranges().insert(live_ranges().end(), other->live_ranges().begin(),
1406                        other->live_ranges().end());
1407   other->live_ranges().clear();
1408 
1409   return true;
1410 }
1411 
MergeDisjointIntervals(UseInterval * other)1412 void SpillRange::MergeDisjointIntervals(UseInterval* other) {
1413   UseInterval* tail = nullptr;
1414   UseInterval* current = use_interval_;
1415   while (other != nullptr) {
1416     // Make sure the 'current' list starts first
1417     if (current == nullptr || current->start() > other->start()) {
1418       std::swap(current, other);
1419     }
1420     // Check disjointness
1421     DCHECK(other == nullptr || current->end() <= other->start());
1422     // Append the 'current' node to the result accumulator and move forward
1423     if (tail == nullptr) {
1424       use_interval_ = current;
1425     } else {
1426       tail->set_next(current);
1427     }
1428     tail = current;
1429     current = current->next();
1430   }
1431   // Other list is empty => we are done
1432 }
1433 
Print() const1434 void SpillRange::Print() const {
1435   StdoutStream os;
1436   os << "{" << std::endl;
1437   for (TopLevelLiveRange* range : live_ranges()) {
1438     os << range->vreg() << " ";
1439   }
1440   os << std::endl;
1441 
1442   for (UseInterval* i = interval(); i != nullptr; i = i->next()) {
1443     os << '[' << i->start() << ", " << i->end() << ')' << std::endl;
1444   }
1445   os << "}" << std::endl;
1446 }
1447 
PhiMapValue(PhiInstruction * phi,const InstructionBlock * block,Zone * zone)1448 RegisterAllocationData::PhiMapValue::PhiMapValue(PhiInstruction* phi,
1449                                                  const InstructionBlock* block,
1450                                                  Zone* zone)
1451     : phi_(phi),
1452       block_(block),
1453       incoming_operands_(zone),
1454       assigned_register_(kUnassignedRegister) {
1455   incoming_operands_.reserve(phi->operands().size());
1456 }
1457 
AddOperand(InstructionOperand * operand)1458 void RegisterAllocationData::PhiMapValue::AddOperand(
1459     InstructionOperand* operand) {
1460   incoming_operands_.push_back(operand);
1461 }
1462 
CommitAssignment(const InstructionOperand & assigned)1463 void RegisterAllocationData::PhiMapValue::CommitAssignment(
1464     const InstructionOperand& assigned) {
1465   for (InstructionOperand* operand : incoming_operands_) {
1466     InstructionOperand::ReplaceWith(operand, &assigned);
1467   }
1468 }
1469 
RegisterAllocationData(const RegisterConfiguration * config,Zone * zone,Frame * frame,InstructionSequence * code,RegisterAllocationFlags flags,TickCounter * tick_counter,const char * debug_name)1470 RegisterAllocationData::RegisterAllocationData(
1471     const RegisterConfiguration* config, Zone* zone, Frame* frame,
1472     InstructionSequence* code, RegisterAllocationFlags flags,
1473     TickCounter* tick_counter, const char* debug_name)
1474     : allocation_zone_(zone),
1475       frame_(frame),
1476       code_(code),
1477       debug_name_(debug_name),
1478       config_(config),
1479       phi_map_(allocation_zone()),
1480       live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1481       live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1482       live_ranges_(code->VirtualRegisterCount() * 2, nullptr,
1483                    allocation_zone()),
1484       fixed_live_ranges_(kNumberOfFixedRangesPerRegister *
1485                              this->config()->num_general_registers(),
1486                          nullptr, allocation_zone()),
1487       fixed_float_live_ranges_(allocation_zone()),
1488       fixed_double_live_ranges_(kNumberOfFixedRangesPerRegister *
1489                                     this->config()->num_double_registers(),
1490                                 nullptr, allocation_zone()),
1491       fixed_simd128_live_ranges_(allocation_zone()),
1492       spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
1493       delayed_references_(allocation_zone()),
1494       assigned_registers_(nullptr),
1495       assigned_double_registers_(nullptr),
1496       virtual_register_count_(code->VirtualRegisterCount()),
1497       preassigned_slot_ranges_(zone),
1498       spill_state_(code->InstructionBlockCount(), ZoneVector<LiveRange*>(zone),
1499                    zone),
1500       flags_(flags),
1501       tick_counter_(tick_counter) {
1502   if (!kSimpleFPAliasing) {
1503     fixed_float_live_ranges_.resize(
1504         kNumberOfFixedRangesPerRegister * this->config()->num_float_registers(),
1505         nullptr);
1506     fixed_simd128_live_ranges_.resize(
1507         kNumberOfFixedRangesPerRegister *
1508             this->config()->num_simd128_registers(),
1509         nullptr);
1510   }
1511 
1512   assigned_registers_ = new (code_zone())
1513       BitVector(this->config()->num_general_registers(), code_zone());
1514   assigned_double_registers_ = new (code_zone())
1515       BitVector(this->config()->num_double_registers(), code_zone());
1516   fixed_register_use_ = new (code_zone())
1517       BitVector(this->config()->num_general_registers(), code_zone());
1518   fixed_fp_register_use_ = new (code_zone())
1519       BitVector(this->config()->num_double_registers(), code_zone());
1520 
1521   this->frame()->SetAllocatedRegisters(assigned_registers_);
1522   this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
1523 }
1524 
AddGapMove(int index,Instruction::GapPosition position,const InstructionOperand & from,const InstructionOperand & to)1525 MoveOperands* RegisterAllocationData::AddGapMove(
1526     int index, Instruction::GapPosition position,
1527     const InstructionOperand& from, const InstructionOperand& to) {
1528   Instruction* instr = code()->InstructionAt(index);
1529   ParallelMove* moves = instr->GetOrCreateParallelMove(position, code_zone());
1530   return moves->AddMove(from, to);
1531 }
1532 
RepresentationFor(int virtual_register)1533 MachineRepresentation RegisterAllocationData::RepresentationFor(
1534     int virtual_register) {
1535   DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
1536   return code()->GetRepresentation(virtual_register);
1537 }
1538 
GetOrCreateLiveRangeFor(int index)1539 TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) {
1540   if (index >= static_cast<int>(live_ranges().size())) {
1541     live_ranges().resize(index + 1, nullptr);
1542   }
1543   TopLevelLiveRange* result = live_ranges()[index];
1544   if (result == nullptr) {
1545     result = NewLiveRange(index, RepresentationFor(index));
1546     live_ranges()[index] = result;
1547   }
1548   return result;
1549 }
1550 
NewLiveRange(int index,MachineRepresentation rep)1551 TopLevelLiveRange* RegisterAllocationData::NewLiveRange(
1552     int index, MachineRepresentation rep) {
1553   return new (allocation_zone()) TopLevelLiveRange(index, rep);
1554 }
1555 
GetNextLiveRangeId()1556 int RegisterAllocationData::GetNextLiveRangeId() {
1557   int vreg = virtual_register_count_++;
1558   if (vreg >= static_cast<int>(live_ranges().size())) {
1559     live_ranges().resize(vreg + 1, nullptr);
1560   }
1561   return vreg;
1562 }
1563 
NextLiveRange(MachineRepresentation rep)1564 TopLevelLiveRange* RegisterAllocationData::NextLiveRange(
1565     MachineRepresentation rep) {
1566   int vreg = GetNextLiveRangeId();
1567   TopLevelLiveRange* ret = NewLiveRange(vreg, rep);
1568   return ret;
1569 }
1570 
InitializePhiMap(const InstructionBlock * block,PhiInstruction * phi)1571 RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap(
1572     const InstructionBlock* block, PhiInstruction* phi) {
1573   RegisterAllocationData::PhiMapValue* map_value = new (allocation_zone())
1574       RegisterAllocationData::PhiMapValue(phi, block, allocation_zone());
1575   auto res =
1576       phi_map_.insert(std::make_pair(phi->virtual_register(), map_value));
1577   DCHECK(res.second);
1578   USE(res);
1579   return map_value;
1580 }
1581 
GetPhiMapValueFor(int virtual_register)1582 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
1583     int virtual_register) {
1584   auto it = phi_map_.find(virtual_register);
1585   DCHECK(it != phi_map_.end());
1586   return it->second;
1587 }
1588 
GetPhiMapValueFor(TopLevelLiveRange * top_range)1589 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
1590     TopLevelLiveRange* top_range) {
1591   return GetPhiMapValueFor(top_range->vreg());
1592 }
1593 
ExistsUseWithoutDefinition()1594 bool RegisterAllocationData::ExistsUseWithoutDefinition() {
1595   bool found = false;
1596   BitVector::Iterator iterator(live_in_sets()[0]);
1597   while (!iterator.Done()) {
1598     found = true;
1599     int operand_index = iterator.Current();
1600     PrintF("Register allocator error: live v%d reached first block.\n",
1601            operand_index);
1602     LiveRange* range = GetOrCreateLiveRangeFor(operand_index);
1603     PrintF("  (first use is at %d)\n", range->first_pos()->pos().value());
1604     if (debug_name() == nullptr) {
1605       PrintF("\n");
1606     } else {
1607       PrintF("  (function: %s)\n", debug_name());
1608     }
1609     iterator.Advance();
1610   }
1611   return found;
1612 }
1613 
1614 // If a range is defined in a deferred block, we can expect all the range
1615 // to only cover positions in deferred blocks. Otherwise, a block on the
1616 // hot path would be dominated by a deferred block, meaning it is unreachable
1617 // without passing through the deferred block, which is contradictory.
1618 // In particular, when such a range contributes a result back on the hot
1619 // path, it will be as one of the inputs of a phi. In that case, the value
1620 // will be transferred via a move in the Gap::END's of the last instruction
1621 // of a deferred block.
RangesDefinedInDeferredStayInDeferred()1622 bool RegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
1623   const size_t live_ranges_size = live_ranges().size();
1624   for (const TopLevelLiveRange* range : live_ranges()) {
1625     CHECK_EQ(live_ranges_size,
1626              live_ranges().size());  // TODO(neis): crbug.com/831822
1627     if (range == nullptr || range->IsEmpty() ||
1628         !code()
1629              ->GetInstructionBlock(range->Start().ToInstructionIndex())
1630              ->IsDeferred()) {
1631       continue;
1632     }
1633     for (const UseInterval* i = range->first_interval(); i != nullptr;
1634          i = i->next()) {
1635       int first = i->FirstGapIndex();
1636       int last = i->LastGapIndex();
1637       for (int instr = first; instr <= last;) {
1638         const InstructionBlock* block = code()->GetInstructionBlock(instr);
1639         if (!block->IsDeferred()) return false;
1640         instr = block->last_instruction_index() + 1;
1641       }
1642     }
1643   }
1644   return true;
1645 }
1646 
AssignSpillRangeToLiveRange(TopLevelLiveRange * range,SpillMode spill_mode)1647 SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
1648     TopLevelLiveRange* range, SpillMode spill_mode) {
1649   using SpillType = TopLevelLiveRange::SpillType;
1650   DCHECK(!range->HasSpillOperand());
1651 
1652   SpillRange* spill_range = range->GetAllocatedSpillRange();
1653   if (spill_range == nullptr) {
1654     DCHECK(!range->IsSplinter());
1655     spill_range = new (allocation_zone()) SpillRange(range, allocation_zone());
1656   }
1657   if (spill_mode == SpillMode::kSpillDeferred &&
1658       (range->spill_type() != SpillType::kSpillRange)) {
1659     DCHECK(is_turbo_control_flow_aware_allocation());
1660     range->set_spill_type(SpillType::kDeferredSpillRange);
1661   } else {
1662     range->set_spill_type(SpillType::kSpillRange);
1663   }
1664 
1665   int spill_range_index =
1666       range->IsSplinter() ? range->splintered_from()->vreg() : range->vreg();
1667 
1668   spill_ranges()[spill_range_index] = spill_range;
1669 
1670   return spill_range;
1671 }
1672 
CreateSpillRangeForLiveRange(TopLevelLiveRange * range)1673 SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange(
1674     TopLevelLiveRange* range) {
1675   DCHECK(is_turbo_preprocess_ranges());
1676   DCHECK(!range->HasSpillOperand());
1677   DCHECK(!range->IsSplinter());
1678   SpillRange* spill_range =
1679       new (allocation_zone()) SpillRange(range, allocation_zone());
1680   return spill_range;
1681 }
1682 
MarkFixedUse(MachineRepresentation rep,int index)1683 void RegisterAllocationData::MarkFixedUse(MachineRepresentation rep,
1684                                           int index) {
1685   switch (rep) {
1686     case MachineRepresentation::kFloat32:
1687     case MachineRepresentation::kSimd128:
1688       if (kSimpleFPAliasing) {
1689         fixed_fp_register_use_->Add(index);
1690       } else {
1691         int alias_base_index = -1;
1692         int aliases = config()->GetAliases(
1693             rep, index, MachineRepresentation::kFloat64, &alias_base_index);
1694         DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
1695         while (aliases--) {
1696           int aliased_reg = alias_base_index + aliases;
1697           fixed_fp_register_use_->Add(aliased_reg);
1698         }
1699       }
1700       break;
1701     case MachineRepresentation::kFloat64:
1702       fixed_fp_register_use_->Add(index);
1703       break;
1704     default:
1705       DCHECK(!IsFloatingPoint(rep));
1706       fixed_register_use_->Add(index);
1707       break;
1708   }
1709 }
1710 
HasFixedUse(MachineRepresentation rep,int index)1711 bool RegisterAllocationData::HasFixedUse(MachineRepresentation rep, int index) {
1712   switch (rep) {
1713     case MachineRepresentation::kFloat32:
1714     case MachineRepresentation::kSimd128:
1715       if (kSimpleFPAliasing) {
1716         return fixed_fp_register_use_->Contains(index);
1717       } else {
1718         int alias_base_index = -1;
1719         int aliases = config()->GetAliases(
1720             rep, index, MachineRepresentation::kFloat64, &alias_base_index);
1721         DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
1722         bool result = false;
1723         while (aliases-- && !result) {
1724           int aliased_reg = alias_base_index + aliases;
1725           result |= fixed_fp_register_use_->Contains(aliased_reg);
1726         }
1727         return result;
1728       }
1729       break;
1730     case MachineRepresentation::kFloat64:
1731       return fixed_fp_register_use_->Contains(index);
1732       break;
1733     default:
1734       DCHECK(!IsFloatingPoint(rep));
1735       return fixed_register_use_->Contains(index);
1736       break;
1737   }
1738 }
1739 
MarkAllocated(MachineRepresentation rep,int index)1740 void RegisterAllocationData::MarkAllocated(MachineRepresentation rep,
1741                                            int index) {
1742   switch (rep) {
1743     case MachineRepresentation::kFloat32:
1744     case MachineRepresentation::kSimd128:
1745       if (kSimpleFPAliasing) {
1746         assigned_double_registers_->Add(index);
1747       } else {
1748         int alias_base_index = -1;
1749         int aliases = config()->GetAliases(
1750             rep, index, MachineRepresentation::kFloat64, &alias_base_index);
1751         DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
1752         while (aliases--) {
1753           int aliased_reg = alias_base_index + aliases;
1754           assigned_double_registers_->Add(aliased_reg);
1755         }
1756       }
1757       break;
1758     case MachineRepresentation::kFloat64:
1759       assigned_double_registers_->Add(index);
1760       break;
1761     default:
1762       DCHECK(!IsFloatingPoint(rep));
1763       assigned_registers_->Add(index);
1764       break;
1765   }
1766 }
1767 
IsBlockBoundary(LifetimePosition pos) const1768 bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const {
1769   return pos.IsFullStart() &&
1770          code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
1771              pos.ToInstructionIndex();
1772 }
1773 
ConstraintBuilder(RegisterAllocationData * data)1774 ConstraintBuilder::ConstraintBuilder(RegisterAllocationData* data)
1775     : data_(data) {}
1776 
AllocateFixed(UnallocatedOperand * operand,int pos,bool is_tagged,bool is_input)1777 InstructionOperand* ConstraintBuilder::AllocateFixed(
1778     UnallocatedOperand* operand, int pos, bool is_tagged, bool is_input) {
1779   TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
1780   DCHECK(operand->HasFixedPolicy());
1781   InstructionOperand allocated;
1782   MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
1783   int virtual_register = operand->virtual_register();
1784   if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
1785     rep = data()->RepresentationFor(virtual_register);
1786   }
1787   if (operand->HasFixedSlotPolicy()) {
1788     allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, rep,
1789                                  operand->fixed_slot_index());
1790   } else if (operand->HasFixedRegisterPolicy()) {
1791     DCHECK(!IsFloatingPoint(rep));
1792     DCHECK(data()->config()->IsAllocatableGeneralCode(
1793         operand->fixed_register_index()));
1794     allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
1795                                  operand->fixed_register_index());
1796   } else if (operand->HasFixedFPRegisterPolicy()) {
1797     DCHECK(IsFloatingPoint(rep));
1798     DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register);
1799     allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
1800                                  operand->fixed_register_index());
1801   } else {
1802     UNREACHABLE();
1803   }
1804   if (is_input && allocated.IsAnyRegister()) {
1805     data()->MarkFixedUse(rep, operand->fixed_register_index());
1806   }
1807   InstructionOperand::ReplaceWith(operand, &allocated);
1808   if (is_tagged) {
1809     TRACE("Fixed reg is tagged at %d\n", pos);
1810     Instruction* instr = code()->InstructionAt(pos);
1811     if (instr->HasReferenceMap()) {
1812       instr->reference_map()->RecordReference(*AllocatedOperand::cast(operand));
1813     }
1814   }
1815   return operand;
1816 }
1817 
MeetRegisterConstraints()1818 void ConstraintBuilder::MeetRegisterConstraints() {
1819   for (InstructionBlock* block : code()->instruction_blocks()) {
1820     data_->tick_counter()->DoTick();
1821     MeetRegisterConstraints(block);
1822   }
1823 }
1824 
MeetRegisterConstraints(const InstructionBlock * block)1825 void ConstraintBuilder::MeetRegisterConstraints(const InstructionBlock* block) {
1826   int start = block->first_instruction_index();
1827   int end = block->last_instruction_index();
1828   DCHECK_NE(-1, start);
1829   for (int i = start; i <= end; ++i) {
1830     MeetConstraintsBefore(i);
1831     if (i != end) MeetConstraintsAfter(i);
1832   }
1833   // Meet register constraints for the instruction in the end.
1834   MeetRegisterConstraintsForLastInstructionInBlock(block);
1835 }
1836 
MeetRegisterConstraintsForLastInstructionInBlock(const InstructionBlock * block)1837 void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
1838     const InstructionBlock* block) {
1839   int end = block->last_instruction_index();
1840   Instruction* last_instruction = code()->InstructionAt(end);
1841   for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
1842     InstructionOperand* output_operand = last_instruction->OutputAt(i);
1843     DCHECK(!output_operand->IsConstant());
1844     UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
1845     int output_vreg = output->virtual_register();
1846     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
1847     bool assigned = false;
1848     if (output->HasFixedPolicy()) {
1849       AllocateFixed(output, -1, false, false);
1850       // This value is produced on the stack, we never need to spill it.
1851       if (output->IsStackSlot()) {
1852         DCHECK(LocationOperand::cast(output)->index() <
1853                data()->frame()->GetSpillSlotCount());
1854         range->SetSpillOperand(LocationOperand::cast(output));
1855         range->SetSpillStartIndex(end);
1856         assigned = true;
1857       }
1858 
1859       for (const RpoNumber& succ : block->successors()) {
1860         const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1861         DCHECK_EQ(1, successor->PredecessorCount());
1862         int gap_index = successor->first_instruction_index();
1863         // Create an unconstrained operand for the same virtual register
1864         // and insert a gap move from the fixed output to the operand.
1865         UnallocatedOperand output_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1866                                        output_vreg);
1867         data()->AddGapMove(gap_index, Instruction::START, *output, output_copy);
1868       }
1869     }
1870 
1871     if (!assigned) {
1872       for (const RpoNumber& succ : block->successors()) {
1873         const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1874         DCHECK_EQ(1, successor->PredecessorCount());
1875         int gap_index = successor->first_instruction_index();
1876         range->RecordSpillLocation(allocation_zone(), gap_index, output);
1877         range->SetSpillStartIndex(gap_index);
1878       }
1879     }
1880   }
1881 }
1882 
MeetConstraintsAfter(int instr_index)1883 void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
1884   Instruction* first = code()->InstructionAt(instr_index);
1885   // Handle fixed temporaries.
1886   for (size_t i = 0; i < first->TempCount(); i++) {
1887     UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
1888     if (temp->HasFixedPolicy()) AllocateFixed(temp, instr_index, false, false);
1889   }
1890   // Handle constant/fixed output operands.
1891   for (size_t i = 0; i < first->OutputCount(); i++) {
1892     InstructionOperand* output = first->OutputAt(i);
1893     if (output->IsConstant()) {
1894       int output_vreg = ConstantOperand::cast(output)->virtual_register();
1895       TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
1896       range->SetSpillStartIndex(instr_index + 1);
1897       range->SetSpillOperand(output);
1898       continue;
1899     }
1900     UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
1901     TopLevelLiveRange* range =
1902         data()->GetOrCreateLiveRangeFor(first_output->virtual_register());
1903     bool assigned = false;
1904     if (first_output->HasFixedPolicy()) {
1905       int output_vreg = first_output->virtual_register();
1906       UnallocatedOperand output_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1907                                      output_vreg);
1908       bool is_tagged = code()->IsReference(output_vreg);
1909       if (first_output->HasSecondaryStorage()) {
1910         range->MarkHasPreassignedSlot();
1911         data()->preassigned_slot_ranges().push_back(
1912             std::make_pair(range, first_output->GetSecondaryStorage()));
1913       }
1914       AllocateFixed(first_output, instr_index, is_tagged, false);
1915 
1916       // This value is produced on the stack, we never need to spill it.
1917       if (first_output->IsStackSlot()) {
1918         DCHECK(LocationOperand::cast(first_output)->index() <
1919                data()->frame()->GetTotalFrameSlotCount());
1920         range->SetSpillOperand(LocationOperand::cast(first_output));
1921         range->SetSpillStartIndex(instr_index + 1);
1922         assigned = true;
1923       }
1924       data()->AddGapMove(instr_index + 1, Instruction::START, *first_output,
1925                          output_copy);
1926     }
1927     // Make sure we add a gap move for spilling (if we have not done
1928     // so already).
1929     if (!assigned) {
1930       range->RecordSpillLocation(allocation_zone(), instr_index + 1,
1931                                  first_output);
1932       range->SetSpillStartIndex(instr_index + 1);
1933     }
1934   }
1935 }
1936 
MeetConstraintsBefore(int instr_index)1937 void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
1938   Instruction* second = code()->InstructionAt(instr_index);
1939   // Handle fixed input operands of second instruction.
1940   for (size_t i = 0; i < second->InputCount(); i++) {
1941     InstructionOperand* input = second->InputAt(i);
1942     if (input->IsImmediate()) {
1943       continue;  // Ignore immediates.
1944     }
1945     UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
1946     if (cur_input->HasFixedPolicy()) {
1947       int input_vreg = cur_input->virtual_register();
1948       UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1949                                     input_vreg);
1950       bool is_tagged = code()->IsReference(input_vreg);
1951       AllocateFixed(cur_input, instr_index, is_tagged, true);
1952       data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
1953     }
1954   }
1955   // Handle "output same as input" for second instruction.
1956   for (size_t i = 0; i < second->OutputCount(); i++) {
1957     InstructionOperand* output = second->OutputAt(i);
1958     if (!output->IsUnallocated()) continue;
1959     UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
1960     if (!second_output->HasSameAsInputPolicy()) continue;
1961     DCHECK_EQ(0, i);  // Only valid for first output.
1962     UnallocatedOperand* cur_input =
1963         UnallocatedOperand::cast(second->InputAt(0));
1964     int output_vreg = second_output->virtual_register();
1965     int input_vreg = cur_input->virtual_register();
1966     UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1967                                   input_vreg);
1968     *cur_input =
1969         UnallocatedOperand(*cur_input, second_output->virtual_register());
1970     MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END,
1971                                                 input_copy, *cur_input);
1972     DCHECK_NOT_NULL(gap_move);
1973     if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) {
1974       if (second->HasReferenceMap()) {
1975         RegisterAllocationData::DelayedReference delayed_reference = {
1976             second->reference_map(), &gap_move->source()};
1977         data()->delayed_references().push_back(delayed_reference);
1978       }
1979     }
1980   }
1981 }
1982 
ResolvePhis()1983 void ConstraintBuilder::ResolvePhis() {
1984   // Process the blocks in reverse order.
1985   for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) {
1986     data_->tick_counter()->DoTick();
1987     ResolvePhis(block);
1988   }
1989 }
1990 
ResolvePhis(const InstructionBlock * block)1991 void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) {
1992   for (PhiInstruction* phi : block->phis()) {
1993     int phi_vreg = phi->virtual_register();
1994     RegisterAllocationData::PhiMapValue* map_value =
1995         data()->InitializePhiMap(block, phi);
1996     InstructionOperand& output = phi->output();
1997     // Map the destination operands, so the commitment phase can find them.
1998     for (size_t i = 0; i < phi->operands().size(); ++i) {
1999       InstructionBlock* cur_block =
2000           code()->InstructionBlockAt(block->predecessors()[i]);
2001       UnallocatedOperand input(UnallocatedOperand::REGISTER_OR_SLOT,
2002                                phi->operands()[i]);
2003       MoveOperands* move = data()->AddGapMove(
2004           cur_block->last_instruction_index(), Instruction::END, input, output);
2005       map_value->AddOperand(&move->destination());
2006       DCHECK(!code()
2007                   ->InstructionAt(cur_block->last_instruction_index())
2008                   ->HasReferenceMap());
2009     }
2010     TopLevelLiveRange* live_range = data()->GetOrCreateLiveRangeFor(phi_vreg);
2011     int gap_index = block->first_instruction_index();
2012     live_range->RecordSpillLocation(allocation_zone(), gap_index, &output);
2013     live_range->SetSpillStartIndex(gap_index);
2014     // We use the phi-ness of some nodes in some later heuristics.
2015     live_range->set_is_phi(true);
2016     live_range->set_is_non_loop_phi(!block->IsLoopHeader());
2017   }
2018 }
2019 
LiveRangeBuilder(RegisterAllocationData * data,Zone * local_zone)2020 LiveRangeBuilder::LiveRangeBuilder(RegisterAllocationData* data,
2021                                    Zone* local_zone)
2022     : data_(data), phi_hints_(local_zone) {}
2023 
ComputeLiveOut(const InstructionBlock * block,RegisterAllocationData * data)2024 BitVector* LiveRangeBuilder::ComputeLiveOut(const InstructionBlock* block,
2025                                             RegisterAllocationData* data) {
2026   size_t block_index = block->rpo_number().ToSize();
2027   BitVector* live_out = data->live_out_sets()[block_index];
2028   if (live_out == nullptr) {
2029     // Compute live out for the given block, except not including backward
2030     // successor edges.
2031     Zone* zone = data->allocation_zone();
2032     const InstructionSequence* code = data->code();
2033 
2034     live_out = new (zone) BitVector(code->VirtualRegisterCount(), zone);
2035 
2036     // Process all successor blocks.
2037     for (const RpoNumber& succ : block->successors()) {
2038       // Add values live on entry to the successor.
2039       if (succ <= block->rpo_number()) continue;
2040       BitVector* live_in = data->live_in_sets()[succ.ToSize()];
2041       if (live_in != nullptr) live_out->Union(*live_in);
2042 
2043       // All phi input operands corresponding to this successor edge are live
2044       // out from this block.
2045       const InstructionBlock* successor = code->InstructionBlockAt(succ);
2046       size_t index = successor->PredecessorIndexOf(block->rpo_number());
2047       DCHECK(index < successor->PredecessorCount());
2048       for (PhiInstruction* phi : successor->phis()) {
2049         live_out->Add(phi->operands()[index]);
2050       }
2051     }
2052     data->live_out_sets()[block_index] = live_out;
2053   }
2054   return live_out;
2055 }
2056 
AddInitialIntervals(const InstructionBlock * block,BitVector * live_out)2057 void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
2058                                            BitVector* live_out) {
2059   // Add an interval that includes the entire block to the live range for
2060   // each live_out value.
2061   LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
2062       block->first_instruction_index());
2063   LifetimePosition end = LifetimePosition::InstructionFromInstructionIndex(
2064                              block->last_instruction_index())
2065                              .NextStart();
2066   BitVector::Iterator iterator(live_out);
2067   while (!iterator.Done()) {
2068     int operand_index = iterator.Current();
2069     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
2070     range->AddUseInterval(start, end, allocation_zone(),
2071                           data()->is_trace_alloc());
2072     iterator.Advance();
2073   }
2074 }
2075 
FixedFPLiveRangeID(int index,MachineRepresentation rep)2076 int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) {
2077   int result = -index - 1;
2078   switch (rep) {
2079     case MachineRepresentation::kSimd128:
2080       result -=
2081           kNumberOfFixedRangesPerRegister * config()->num_float_registers();
2082       V8_FALLTHROUGH;
2083     case MachineRepresentation::kFloat32:
2084       result -=
2085           kNumberOfFixedRangesPerRegister * config()->num_double_registers();
2086       V8_FALLTHROUGH;
2087     case MachineRepresentation::kFloat64:
2088       result -=
2089           kNumberOfFixedRangesPerRegister * config()->num_general_registers();
2090       break;
2091     default:
2092       UNREACHABLE();
2093   }
2094   return result;
2095 }
2096 
FixedLiveRangeFor(int index,SpillMode spill_mode)2097 TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index,
2098                                                        SpillMode spill_mode) {
2099   int offset = spill_mode == SpillMode::kSpillAtDefinition
2100                    ? 0
2101                    : config()->num_general_registers();
2102   DCHECK(index < config()->num_general_registers());
2103   TopLevelLiveRange* result = data()->fixed_live_ranges()[offset + index];
2104   if (result == nullptr) {
2105     MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
2106     result = data()->NewLiveRange(FixedLiveRangeID(offset + index), rep);
2107     DCHECK(result->IsFixed());
2108     result->set_assigned_register(index);
2109     data()->MarkAllocated(rep, index);
2110     if (spill_mode == SpillMode::kSpillDeferred) {
2111       result->set_deferred_fixed();
2112     }
2113     data()->fixed_live_ranges()[offset + index] = result;
2114   }
2115   return result;
2116 }
2117 
FixedFPLiveRangeFor(int index,MachineRepresentation rep,SpillMode spill_mode)2118 TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
2119     int index, MachineRepresentation rep, SpillMode spill_mode) {
2120   int num_regs = config()->num_double_registers();
2121   ZoneVector<TopLevelLiveRange*>* live_ranges =
2122       &data()->fixed_double_live_ranges();
2123   if (!kSimpleFPAliasing) {
2124     switch (rep) {
2125       case MachineRepresentation::kFloat32:
2126         num_regs = config()->num_float_registers();
2127         live_ranges = &data()->fixed_float_live_ranges();
2128         break;
2129       case MachineRepresentation::kSimd128:
2130         num_regs = config()->num_simd128_registers();
2131         live_ranges = &data()->fixed_simd128_live_ranges();
2132         break;
2133       default:
2134         break;
2135     }
2136   }
2137 
2138   int offset = spill_mode == SpillMode::kSpillAtDefinition ? 0 : num_regs;
2139 
2140   DCHECK(index < num_regs);
2141   USE(num_regs);
2142   TopLevelLiveRange* result = (*live_ranges)[offset + index];
2143   if (result == nullptr) {
2144     result = data()->NewLiveRange(FixedFPLiveRangeID(offset + index, rep), rep);
2145     DCHECK(result->IsFixed());
2146     result->set_assigned_register(index);
2147     data()->MarkAllocated(rep, index);
2148     if (spill_mode == SpillMode::kSpillDeferred) {
2149       result->set_deferred_fixed();
2150     }
2151     (*live_ranges)[offset + index] = result;
2152   }
2153   return result;
2154 }
2155 
LiveRangeFor(InstructionOperand * operand,SpillMode spill_mode)2156 TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand,
2157                                                   SpillMode spill_mode) {
2158   if (operand->IsUnallocated()) {
2159     return data()->GetOrCreateLiveRangeFor(
2160         UnallocatedOperand::cast(operand)->virtual_register());
2161   } else if (operand->IsConstant()) {
2162     return data()->GetOrCreateLiveRangeFor(
2163         ConstantOperand::cast(operand)->virtual_register());
2164   } else if (operand->IsRegister()) {
2165     return FixedLiveRangeFor(
2166         LocationOperand::cast(operand)->GetRegister().code(), spill_mode);
2167   } else if (operand->IsFPRegister()) {
2168     LocationOperand* op = LocationOperand::cast(operand);
2169     return FixedFPLiveRangeFor(op->register_code(), op->representation(),
2170                                spill_mode);
2171   } else {
2172     return nullptr;
2173   }
2174 }
2175 
NewUsePosition(LifetimePosition pos,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)2176 UsePosition* LiveRangeBuilder::NewUsePosition(LifetimePosition pos,
2177                                               InstructionOperand* operand,
2178                                               void* hint,
2179                                               UsePositionHintType hint_type) {
2180   return new (allocation_zone()) UsePosition(pos, operand, hint, hint_type);
2181 }
2182 
Define(LifetimePosition position,InstructionOperand * operand,void * hint,UsePositionHintType hint_type,SpillMode spill_mode)2183 UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
2184                                       InstructionOperand* operand, void* hint,
2185                                       UsePositionHintType hint_type,
2186                                       SpillMode spill_mode) {
2187   TopLevelLiveRange* range = LiveRangeFor(operand, spill_mode);
2188   if (range == nullptr) return nullptr;
2189 
2190   if (range->IsEmpty() || range->Start() > position) {
2191     // Can happen if there is a definition without use.
2192     range->AddUseInterval(position, position.NextStart(), allocation_zone(),
2193                           data()->is_trace_alloc());
2194     range->AddUsePosition(NewUsePosition(position.NextStart()),
2195                           data()->is_trace_alloc());
2196   } else {
2197     range->ShortenTo(position, data()->is_trace_alloc());
2198   }
2199   if (!operand->IsUnallocated()) return nullptr;
2200   UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
2201   UsePosition* use_pos =
2202       NewUsePosition(position, unalloc_operand, hint, hint_type);
2203   range->AddUsePosition(use_pos, data()->is_trace_alloc());
2204   return use_pos;
2205 }
2206 
Use(LifetimePosition block_start,LifetimePosition position,InstructionOperand * operand,void * hint,UsePositionHintType hint_type,SpillMode spill_mode)2207 UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
2208                                    LifetimePosition position,
2209                                    InstructionOperand* operand, void* hint,
2210                                    UsePositionHintType hint_type,
2211                                    SpillMode spill_mode) {
2212   TopLevelLiveRange* range = LiveRangeFor(operand, spill_mode);
2213   if (range == nullptr) return nullptr;
2214   UsePosition* use_pos = nullptr;
2215   if (operand->IsUnallocated()) {
2216     UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
2217     use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type);
2218     range->AddUsePosition(use_pos, data()->is_trace_alloc());
2219   }
2220   range->AddUseInterval(block_start, position, allocation_zone(),
2221                         data()->is_trace_alloc());
2222   return use_pos;
2223 }
2224 
ProcessInstructions(const InstructionBlock * block,BitVector * live)2225 void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
2226                                            BitVector* live) {
2227   int block_start = block->first_instruction_index();
2228   LifetimePosition block_start_position =
2229       LifetimePosition::GapFromInstructionIndex(block_start);
2230   bool fixed_float_live_ranges = false;
2231   bool fixed_simd128_live_ranges = false;
2232   if (!kSimpleFPAliasing) {
2233     int mask = data()->code()->representation_mask();
2234     fixed_float_live_ranges = (mask & kFloat32Bit) != 0;
2235     fixed_simd128_live_ranges = (mask & kSimd128Bit) != 0;
2236   }
2237   SpillMode spill_mode = SpillModeForBlock(block);
2238 
2239   for (int index = block->last_instruction_index(); index >= block_start;
2240        index--) {
2241     LifetimePosition curr_position =
2242         LifetimePosition::InstructionFromInstructionIndex(index);
2243     Instruction* instr = code()->InstructionAt(index);
2244     DCHECK_NOT_NULL(instr);
2245     DCHECK(curr_position.IsInstructionPosition());
2246     // Process output, inputs, and temps of this instruction.
2247     for (size_t i = 0; i < instr->OutputCount(); i++) {
2248       InstructionOperand* output = instr->OutputAt(i);
2249       if (output->IsUnallocated()) {
2250         // Unsupported.
2251         DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy());
2252         int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
2253         live->Remove(out_vreg);
2254       } else if (output->IsConstant()) {
2255         int out_vreg = ConstantOperand::cast(output)->virtual_register();
2256         live->Remove(out_vreg);
2257       }
2258       if (block->IsHandler() && index == block_start && output->IsAllocated() &&
2259           output->IsRegister() &&
2260           AllocatedOperand::cast(output)->GetRegister() ==
2261               v8::internal::kReturnRegister0) {
2262         // The register defined here is blocked from gap start - it is the
2263         // exception value.
2264         // TODO(mtrofin): should we explore an explicit opcode for
2265         // the first instruction in the handler?
2266         Define(LifetimePosition::GapFromInstructionIndex(index), output,
2267                spill_mode);
2268       } else {
2269         Define(curr_position, output, spill_mode);
2270       }
2271     }
2272 
2273     if (instr->ClobbersRegisters()) {
2274       for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
2275         // Create a UseInterval at this instruction for all fixed registers,
2276         // (including the instruction outputs). Adding another UseInterval here
2277         // is OK because AddUseInterval will just merge it with the existing
2278         // one at the end of the range.
2279         int code = config()->GetAllocatableGeneralCode(i);
2280         TopLevelLiveRange* range = FixedLiveRangeFor(code, spill_mode);
2281         range->AddUseInterval(curr_position, curr_position.End(),
2282                               allocation_zone(), data()->is_trace_alloc());
2283       }
2284     }
2285 
2286     if (instr->ClobbersDoubleRegisters()) {
2287       for (int i = 0; i < config()->num_allocatable_double_registers(); ++i) {
2288         // Add a UseInterval for all DoubleRegisters. See comment above for
2289         // general registers.
2290         int code = config()->GetAllocatableDoubleCode(i);
2291         TopLevelLiveRange* range = FixedFPLiveRangeFor(
2292             code, MachineRepresentation::kFloat64, spill_mode);
2293         range->AddUseInterval(curr_position, curr_position.End(),
2294                               allocation_zone(), data()->is_trace_alloc());
2295       }
2296       // Clobber fixed float registers on archs with non-simple aliasing.
2297       if (!kSimpleFPAliasing) {
2298         if (fixed_float_live_ranges) {
2299           for (int i = 0; i < config()->num_allocatable_float_registers();
2300                ++i) {
2301             // Add a UseInterval for all FloatRegisters. See comment above for
2302             // general registers.
2303             int code = config()->GetAllocatableFloatCode(i);
2304             TopLevelLiveRange* range = FixedFPLiveRangeFor(
2305                 code, MachineRepresentation::kFloat32, spill_mode);
2306             range->AddUseInterval(curr_position, curr_position.End(),
2307                                   allocation_zone(), data()->is_trace_alloc());
2308           }
2309         }
2310         if (fixed_simd128_live_ranges) {
2311           for (int i = 0; i < config()->num_allocatable_simd128_registers();
2312                ++i) {
2313             int code = config()->GetAllocatableSimd128Code(i);
2314             TopLevelLiveRange* range = FixedFPLiveRangeFor(
2315                 code, MachineRepresentation::kSimd128, spill_mode);
2316             range->AddUseInterval(curr_position, curr_position.End(),
2317                                   allocation_zone(), data()->is_trace_alloc());
2318           }
2319         }
2320       }
2321     }
2322 
2323     for (size_t i = 0; i < instr->InputCount(); i++) {
2324       InstructionOperand* input = instr->InputAt(i);
2325       if (input->IsImmediate()) {
2326         continue;  // Ignore immediates.
2327       }
2328       LifetimePosition use_pos;
2329       if (input->IsUnallocated() &&
2330           UnallocatedOperand::cast(input)->IsUsedAtStart()) {
2331         use_pos = curr_position;
2332       } else {
2333         use_pos = curr_position.End();
2334       }
2335 
2336       if (input->IsUnallocated()) {
2337         UnallocatedOperand* unalloc = UnallocatedOperand::cast(input);
2338         int vreg = unalloc->virtual_register();
2339         live->Add(vreg);
2340         if (unalloc->HasSlotPolicy()) {
2341           if (data()->is_turbo_control_flow_aware_allocation()) {
2342             data()->GetOrCreateLiveRangeFor(vreg)->register_slot_use(
2343                 block->IsDeferred()
2344                     ? TopLevelLiveRange::SlotUseKind::kDeferredSlotUse
2345                     : TopLevelLiveRange::SlotUseKind::kGeneralSlotUse);
2346           } else {
2347             data()->GetOrCreateLiveRangeFor(vreg)->register_slot_use(
2348                 TopLevelLiveRange::SlotUseKind::kGeneralSlotUse);
2349           }
2350         }
2351       }
2352       Use(block_start_position, use_pos, input, spill_mode);
2353     }
2354 
2355     for (size_t i = 0; i < instr->TempCount(); i++) {
2356       InstructionOperand* temp = instr->TempAt(i);
2357       // Unsupported.
2358       DCHECK_IMPLIES(temp->IsUnallocated(),
2359                      !UnallocatedOperand::cast(temp)->HasSlotPolicy());
2360       if (instr->ClobbersTemps()) {
2361         if (temp->IsRegister()) continue;
2362         if (temp->IsUnallocated()) {
2363           UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
2364           if (temp_unalloc->HasFixedPolicy()) {
2365             continue;
2366           }
2367         }
2368       }
2369       Use(block_start_position, curr_position.End(), temp, spill_mode);
2370       Define(curr_position, temp, spill_mode);
2371     }
2372 
2373     // Process the moves of the instruction's gaps, making their sources live.
2374     const Instruction::GapPosition kPositions[] = {Instruction::END,
2375                                                    Instruction::START};
2376     curr_position = curr_position.PrevStart();
2377     DCHECK(curr_position.IsGapPosition());
2378     for (const Instruction::GapPosition& position : kPositions) {
2379       ParallelMove* move = instr->GetParallelMove(position);
2380       if (move == nullptr) continue;
2381       if (position == Instruction::END) {
2382         curr_position = curr_position.End();
2383       } else {
2384         curr_position = curr_position.Start();
2385       }
2386       for (MoveOperands* cur : *move) {
2387         InstructionOperand& from = cur->source();
2388         InstructionOperand& to = cur->destination();
2389         void* hint = &to;
2390         UsePositionHintType hint_type = UsePosition::HintTypeForOperand(to);
2391         UsePosition* to_use = nullptr;
2392         int phi_vreg = -1;
2393         if (to.IsUnallocated()) {
2394           int to_vreg = UnallocatedOperand::cast(to).virtual_register();
2395           TopLevelLiveRange* to_range =
2396               data()->GetOrCreateLiveRangeFor(to_vreg);
2397           if (to_range->is_phi()) {
2398             phi_vreg = to_vreg;
2399             if (to_range->is_non_loop_phi()) {
2400               hint = to_range->current_hint_position();
2401               hint_type = hint == nullptr ? UsePositionHintType::kNone
2402                                           : UsePositionHintType::kUsePos;
2403             } else {
2404               hint_type = UsePositionHintType::kPhi;
2405               hint = data()->GetPhiMapValueFor(to_vreg);
2406             }
2407           } else {
2408             if (live->Contains(to_vreg)) {
2409               to_use =
2410                   Define(curr_position, &to, &from,
2411                          UsePosition::HintTypeForOperand(from), spill_mode);
2412               live->Remove(to_vreg);
2413             } else {
2414               cur->Eliminate();
2415               continue;
2416             }
2417           }
2418         } else {
2419           Define(curr_position, &to, spill_mode);
2420         }
2421         UsePosition* from_use = Use(block_start_position, curr_position, &from,
2422                                     hint, hint_type, spill_mode);
2423         // Mark range live.
2424         if (from.IsUnallocated()) {
2425           live->Add(UnallocatedOperand::cast(from).virtual_register());
2426         }
2427         // Resolve use position hints just created.
2428         if (to_use != nullptr && from_use != nullptr) {
2429           to_use->ResolveHint(from_use);
2430           from_use->ResolveHint(to_use);
2431         }
2432         DCHECK_IMPLIES(to_use != nullptr, to_use->IsResolved());
2433         DCHECK_IMPLIES(from_use != nullptr, from_use->IsResolved());
2434         // Potentially resolve phi hint.
2435         if (phi_vreg != -1) ResolvePhiHint(&from, from_use);
2436       }
2437     }
2438   }
2439 }
2440 
ProcessPhis(const InstructionBlock * block,BitVector * live)2441 void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
2442                                    BitVector* live) {
2443   for (PhiInstruction* phi : block->phis()) {
2444     // The live range interval already ends at the first instruction of the
2445     // block.
2446     int phi_vreg = phi->virtual_register();
2447     live->Remove(phi_vreg);
2448     // Select a hint from a predecessor block that precedes this block in the
2449     // rpo order. In order of priority:
2450     // - Avoid hints from deferred blocks.
2451     // - Prefer hints from allocated (or explicit) operands.
2452     // - Prefer hints from empty blocks (containing just parallel moves and a
2453     //   jump). In these cases, if we can elide the moves, the jump threader
2454     //   is likely to be able to elide the jump.
2455     // The enforcement of hinting in rpo order is required because hint
2456     // resolution that happens later in the compiler pipeline visits
2457     // instructions in reverse rpo order, relying on the fact that phis are
2458     // encountered before their hints.
2459     InstructionOperand* hint = nullptr;
2460     int hint_preference = 0;
2461 
2462     // The cost of hinting increases with the number of predecessors. At the
2463     // same time, the typical benefit decreases, since this hinting only
2464     // optimises the execution path through one predecessor. A limit of 2 is
2465     // sufficient to hit the common if/else pattern.
2466     int predecessor_limit = 2;
2467 
2468     for (RpoNumber predecessor : block->predecessors()) {
2469       const InstructionBlock* predecessor_block =
2470           code()->InstructionBlockAt(predecessor);
2471       DCHECK_EQ(predecessor_block->rpo_number(), predecessor);
2472 
2473       // Only take hints from earlier rpo numbers.
2474       if (predecessor >= block->rpo_number()) continue;
2475 
2476       // Look up the predecessor instruction.
2477       const Instruction* predecessor_instr =
2478           GetLastInstruction(code(), predecessor_block);
2479       InstructionOperand* predecessor_hint = nullptr;
2480       // Phis are assigned in the END position of the last instruction in each
2481       // predecessor block.
2482       for (MoveOperands* move :
2483            *predecessor_instr->GetParallelMove(Instruction::END)) {
2484         InstructionOperand& to = move->destination();
2485         if (to.IsUnallocated() &&
2486             UnallocatedOperand::cast(to).virtual_register() == phi_vreg) {
2487           predecessor_hint = &move->source();
2488           break;
2489         }
2490       }
2491       DCHECK_NOT_NULL(predecessor_hint);
2492 
2493       // For each predecessor, generate a score according to the priorities
2494       // described above, and pick the best one. Flags in higher-order bits have
2495       // a higher priority than those in lower-order bits.
2496       int predecessor_hint_preference = 0;
2497       const int kNotDeferredBlockPreference = (1 << 2);
2498       const int kMoveIsAllocatedPreference = (1 << 1);
2499       const int kBlockIsEmptyPreference = (1 << 0);
2500 
2501       // - Avoid hints from deferred blocks.
2502       if (!predecessor_block->IsDeferred()) {
2503         predecessor_hint_preference |= kNotDeferredBlockPreference;
2504       }
2505 
2506       // - Prefer hints from allocated operands.
2507       //
2508       // Already-allocated operands are typically assigned using the parallel
2509       // moves on the last instruction. For example:
2510       //
2511       //      gap (v101 = [x0|R|w32]) (v100 = v101)
2512       //      ArchJmp
2513       //    ...
2514       //    phi: v100 = v101 v102
2515       //
2516       // We have already found the END move, so look for a matching START move
2517       // from an allocated operand.
2518       //
2519       // Note that we cannot simply look up data()->live_ranges()[vreg] here
2520       // because the live ranges are still being built when this function is
2521       // called.
2522       // TODO(v8): Find a way to separate hinting from live range analysis in
2523       // BuildLiveRanges so that we can use the O(1) live-range look-up.
2524       auto moves = predecessor_instr->GetParallelMove(Instruction::START);
2525       if (moves != nullptr) {
2526         for (MoveOperands* move : *moves) {
2527           InstructionOperand& to = move->destination();
2528           if (predecessor_hint->Equals(to)) {
2529             if (move->source().IsAllocated()) {
2530               predecessor_hint_preference |= kMoveIsAllocatedPreference;
2531             }
2532             break;
2533           }
2534         }
2535       }
2536 
2537       // - Prefer hints from empty blocks.
2538       if (predecessor_block->last_instruction_index() ==
2539           predecessor_block->first_instruction_index()) {
2540         predecessor_hint_preference |= kBlockIsEmptyPreference;
2541       }
2542 
2543       if ((hint == nullptr) ||
2544           (predecessor_hint_preference > hint_preference)) {
2545         // Take the hint from this predecessor.
2546         hint = predecessor_hint;
2547         hint_preference = predecessor_hint_preference;
2548       }
2549 
2550       if (--predecessor_limit <= 0) break;
2551     }
2552     DCHECK_NOT_NULL(hint);
2553 
2554     LifetimePosition block_start = LifetimePosition::GapFromInstructionIndex(
2555         block->first_instruction_index());
2556     UsePosition* use_pos = Define(block_start, &phi->output(), hint,
2557                                   UsePosition::HintTypeForOperand(*hint),
2558                                   SpillModeForBlock(block));
2559     MapPhiHint(hint, use_pos);
2560   }
2561 }
2562 
ProcessLoopHeader(const InstructionBlock * block,BitVector * live)2563 void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
2564                                          BitVector* live) {
2565   DCHECK(block->IsLoopHeader());
2566   // Add a live range stretching from the first loop instruction to the last
2567   // for each value live on entry to the header.
2568   BitVector::Iterator iterator(live);
2569   LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
2570       block->first_instruction_index());
2571   LifetimePosition end = LifetimePosition::GapFromInstructionIndex(
2572                              code()->LastLoopInstructionIndex(block))
2573                              .NextFullStart();
2574   while (!iterator.Done()) {
2575     int operand_index = iterator.Current();
2576     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
2577     range->EnsureInterval(start, end, allocation_zone(),
2578                           data()->is_trace_alloc());
2579     iterator.Advance();
2580   }
2581   // Insert all values into the live in sets of all blocks in the loop.
2582   for (int i = block->rpo_number().ToInt() + 1; i < block->loop_end().ToInt();
2583        ++i) {
2584     live_in_sets()[i]->Union(*live);
2585   }
2586 }
2587 
BuildLiveRanges()2588 void LiveRangeBuilder::BuildLiveRanges() {
2589   // Process the blocks in reverse order.
2590   for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
2591        --block_id) {
2592     data_->tick_counter()->DoTick();
2593     InstructionBlock* block =
2594         code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
2595     BitVector* live = ComputeLiveOut(block, data());
2596     // Initially consider all live_out values live for the entire block. We
2597     // will shorten these intervals if necessary.
2598     AddInitialIntervals(block, live);
2599     // Process the instructions in reverse order, generating and killing
2600     // live values.
2601     ProcessInstructions(block, live);
2602     // All phi output operands are killed by this block.
2603     ProcessPhis(block, live);
2604     // Now live is live_in for this block except not including values live
2605     // out on backward successor edges.
2606     if (block->IsLoopHeader()) ProcessLoopHeader(block, live);
2607     live_in_sets()[block_id] = live;
2608   }
2609   // Postprocess the ranges.
2610   const size_t live_ranges_size = data()->live_ranges().size();
2611   for (TopLevelLiveRange* range : data()->live_ranges()) {
2612     data_->tick_counter()->DoTick();
2613     CHECK_EQ(live_ranges_size,
2614              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
2615     if (range == nullptr) continue;
2616     // Give slots to all ranges with a non fixed slot use.
2617     if (range->has_slot_use() && range->HasNoSpillType()) {
2618       SpillMode spill_mode =
2619           range->slot_use_kind() ==
2620                   TopLevelLiveRange::SlotUseKind::kDeferredSlotUse
2621               ? SpillMode::kSpillDeferred
2622               : SpillMode::kSpillAtDefinition;
2623       data()->AssignSpillRangeToLiveRange(range, spill_mode);
2624     }
2625     // TODO(bmeurer): This is a horrible hack to make sure that for constant
2626     // live ranges, every use requires the constant to be in a register.
2627     // Without this hack, all uses with "any" policy would get the constant
2628     // operand assigned.
2629     if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
2630       for (UsePosition* pos = range->first_pos(); pos != nullptr;
2631            pos = pos->next()) {
2632         if (pos->type() == UsePositionType::kRequiresSlot ||
2633             pos->type() == UsePositionType::kRegisterOrSlotOrConstant) {
2634           continue;
2635         }
2636         UsePositionType new_type = UsePositionType::kRegisterOrSlot;
2637         // Can't mark phis as needing a register.
2638         if (!pos->pos().IsGapPosition()) {
2639           new_type = UsePositionType::kRequiresRegister;
2640         }
2641         pos->set_type(new_type, true);
2642       }
2643     }
2644   }
2645   for (auto preassigned : data()->preassigned_slot_ranges()) {
2646     TopLevelLiveRange* range = preassigned.first;
2647     int slot_id = preassigned.second;
2648     SpillRange* spill = range->HasSpillRange()
2649                             ? range->GetSpillRange()
2650                             : data()->AssignSpillRangeToLiveRange(
2651                                   range, SpillMode::kSpillAtDefinition);
2652     spill->set_assigned_slot(slot_id);
2653   }
2654 #ifdef DEBUG
2655   Verify();
2656 #endif
2657 }
2658 
MapPhiHint(InstructionOperand * operand,UsePosition * use_pos)2659 void LiveRangeBuilder::MapPhiHint(InstructionOperand* operand,
2660                                   UsePosition* use_pos) {
2661   DCHECK(!use_pos->IsResolved());
2662   auto res = phi_hints_.insert(std::make_pair(operand, use_pos));
2663   DCHECK(res.second);
2664   USE(res);
2665 }
2666 
ResolvePhiHint(InstructionOperand * operand,UsePosition * use_pos)2667 void LiveRangeBuilder::ResolvePhiHint(InstructionOperand* operand,
2668                                       UsePosition* use_pos) {
2669   auto it = phi_hints_.find(operand);
2670   if (it == phi_hints_.end()) return;
2671   DCHECK(!it->second->IsResolved());
2672   it->second->ResolveHint(use_pos);
2673 }
2674 
Verify() const2675 void LiveRangeBuilder::Verify() const {
2676   for (auto& hint : phi_hints_) {
2677     CHECK(hint.second->IsResolved());
2678   }
2679   for (const TopLevelLiveRange* current : data()->live_ranges()) {
2680     if (current != nullptr && !current->IsEmpty()) {
2681       // New LiveRanges should not be split.
2682       CHECK_NULL(current->next());
2683       // General integrity check.
2684       current->Verify();
2685       const UseInterval* first = current->first_interval();
2686       if (first->next() == nullptr) continue;
2687 
2688       // Consecutive intervals should not end and start in the same block,
2689       // otherwise the intervals should have been joined, because the
2690       // variable is live throughout that block.
2691       CHECK(NextIntervalStartsInDifferentBlocks(first));
2692 
2693       for (const UseInterval* i = first->next(); i != nullptr; i = i->next()) {
2694         // Except for the first interval, the other intevals must start at
2695         // a block boundary, otherwise data wouldn't flow to them.
2696         CHECK(IntervalStartsAtBlockBoundary(i));
2697         // The last instruction of the predecessors of the block the interval
2698         // starts must be covered by the range.
2699         CHECK(IntervalPredecessorsCoveredByRange(i, current));
2700         if (i->next() != nullptr) {
2701           // Check the consecutive intervals property, except for the last
2702           // interval, where it doesn't apply.
2703           CHECK(NextIntervalStartsInDifferentBlocks(i));
2704         }
2705       }
2706     }
2707   }
2708 }
2709 
IntervalStartsAtBlockBoundary(const UseInterval * interval) const2710 bool LiveRangeBuilder::IntervalStartsAtBlockBoundary(
2711     const UseInterval* interval) const {
2712   LifetimePosition start = interval->start();
2713   if (!start.IsFullStart()) return false;
2714   int instruction_index = start.ToInstructionIndex();
2715   const InstructionBlock* block =
2716       data()->code()->GetInstructionBlock(instruction_index);
2717   return block->first_instruction_index() == instruction_index;
2718 }
2719 
IntervalPredecessorsCoveredByRange(const UseInterval * interval,const TopLevelLiveRange * range) const2720 bool LiveRangeBuilder::IntervalPredecessorsCoveredByRange(
2721     const UseInterval* interval, const TopLevelLiveRange* range) const {
2722   LifetimePosition start = interval->start();
2723   int instruction_index = start.ToInstructionIndex();
2724   const InstructionBlock* block =
2725       data()->code()->GetInstructionBlock(instruction_index);
2726   for (RpoNumber pred_index : block->predecessors()) {
2727     const InstructionBlock* predecessor =
2728         data()->code()->InstructionBlockAt(pred_index);
2729     LifetimePosition last_pos = LifetimePosition::GapFromInstructionIndex(
2730         predecessor->last_instruction_index());
2731     last_pos = last_pos.NextStart().End();
2732     if (!range->Covers(last_pos)) return false;
2733   }
2734   return true;
2735 }
2736 
NextIntervalStartsInDifferentBlocks(const UseInterval * interval) const2737 bool LiveRangeBuilder::NextIntervalStartsInDifferentBlocks(
2738     const UseInterval* interval) const {
2739   DCHECK_NOT_NULL(interval->next());
2740   LifetimePosition end = interval->end();
2741   LifetimePosition next_start = interval->next()->start();
2742   // Since end is not covered, but the previous position is, move back a
2743   // position
2744   end = end.IsStart() ? end.PrevStart().End() : end.Start();
2745   int last_covered_index = end.ToInstructionIndex();
2746   const InstructionBlock* block =
2747       data()->code()->GetInstructionBlock(last_covered_index);
2748   const InstructionBlock* next_block =
2749       data()->code()->GetInstructionBlock(next_start.ToInstructionIndex());
2750   return block->rpo_number() < next_block->rpo_number();
2751 }
2752 
BuildBundles()2753 void BundleBuilder::BuildBundles() {
2754   TRACE("Build bundles\n");
2755   // Process the blocks in reverse order.
2756   for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
2757        --block_id) {
2758     InstructionBlock* block =
2759         code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
2760     TRACE("Block B%d\n", block_id);
2761     for (auto phi : block->phis()) {
2762       LiveRange* out_range =
2763           data()->GetOrCreateLiveRangeFor(phi->virtual_register());
2764       LiveRangeBundle* out = out_range->get_bundle();
2765       if (out == nullptr) {
2766         out = new (data()->allocation_zone())
2767             LiveRangeBundle(data()->allocation_zone(), next_bundle_id_++);
2768         out->TryAddRange(out_range);
2769       }
2770       TRACE("Processing phi for v%d with %d:%d\n", phi->virtual_register(),
2771             out_range->TopLevel()->vreg(), out_range->relative_id());
2772       for (auto input : phi->operands()) {
2773         LiveRange* input_range = data()->GetOrCreateLiveRangeFor(input);
2774         TRACE("Input value v%d with range %d:%d\n", input,
2775               input_range->TopLevel()->vreg(), input_range->relative_id());
2776         LiveRangeBundle* input_bundle = input_range->get_bundle();
2777         if (input_bundle != nullptr) {
2778           TRACE("Merge\n");
2779           if (out->TryMerge(input_bundle, data()->is_trace_alloc()))
2780             TRACE("Merged %d and %d to %d\n", phi->virtual_register(), input,
2781                   out->id());
2782         } else {
2783           TRACE("Add\n");
2784           if (out->TryAddRange(input_range))
2785             TRACE("Added %d and %d to %d\n", phi->virtual_register(), input,
2786                   out->id());
2787         }
2788       }
2789     }
2790     TRACE("Done block B%d\n", block_id);
2791   }
2792 }
2793 
TryAddRange(LiveRange * range)2794 bool LiveRangeBundle::TryAddRange(LiveRange* range) {
2795   DCHECK_NULL(range->get_bundle());
2796   // We may only add a new live range if its use intervals do not
2797   // overlap with existing intervals in the bundle.
2798   if (UsesOverlap(range->first_interval())) return false;
2799   ranges_.insert(range);
2800   range->set_bundle(this);
2801   InsertUses(range->first_interval());
2802   return true;
2803 }
TryMerge(LiveRangeBundle * other,bool trace_alloc)2804 bool LiveRangeBundle::TryMerge(LiveRangeBundle* other, bool trace_alloc) {
2805   if (other == this) return true;
2806 
2807   auto iter1 = uses_.begin();
2808   auto iter2 = other->uses_.begin();
2809 
2810   while (iter1 != uses_.end() && iter2 != other->uses_.end()) {
2811     if (iter1->start >= iter2->end) {
2812       ++iter2;
2813     } else if (iter2->start >= iter1->end) {
2814       ++iter1;
2815     } else {
2816       TRACE_COND(trace_alloc, "No merge %d:%d %d:%d\n", iter1->start,
2817                  iter1->end, iter2->start, iter2->end);
2818       return false;
2819     }
2820   }
2821   // Uses are disjoint, merging is possible.
2822   for (auto it = other->ranges_.begin(); it != other->ranges_.end(); ++it) {
2823     (*it)->set_bundle(this);
2824     InsertUses((*it)->first_interval());
2825   }
2826   ranges_.insert(other->ranges_.begin(), other->ranges_.end());
2827   other->ranges_.clear();
2828 
2829   return true;
2830 }
2831 
MergeSpillRanges()2832 void LiveRangeBundle::MergeSpillRanges() {
2833   SpillRange* target = nullptr;
2834   for (auto range : ranges_) {
2835     if (range->TopLevel()->HasSpillRange()) {
2836       SpillRange* current = range->TopLevel()->GetSpillRange();
2837       if (target == nullptr) {
2838         target = current;
2839       } else if (target != current) {
2840         target->TryMerge(current);
2841       }
2842     }
2843   }
2844 }
2845 
RegisterAllocator(RegisterAllocationData * data,RegisterKind kind)2846 RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
2847                                      RegisterKind kind)
2848     : data_(data),
2849       mode_(kind),
2850       num_registers_(GetRegisterCount(data->config(), kind)),
2851       num_allocatable_registers_(
2852           GetAllocatableRegisterCount(data->config(), kind)),
2853       allocatable_register_codes_(
2854           GetAllocatableRegisterCodes(data->config(), kind)),
2855       check_fp_aliasing_(false) {
2856   if (!kSimpleFPAliasing && kind == FP_REGISTERS) {
2857     check_fp_aliasing_ = (data->code()->representation_mask() &
2858                           (kFloat32Bit | kSimd128Bit)) != 0;
2859   }
2860 }
2861 
GetSplitPositionForInstruction(const LiveRange * range,int instruction_index)2862 LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
2863     const LiveRange* range, int instruction_index) {
2864   LifetimePosition ret = LifetimePosition::Invalid();
2865 
2866   ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
2867   if (range->Start() >= ret || ret >= range->End()) {
2868     return LifetimePosition::Invalid();
2869   }
2870   return ret;
2871 }
2872 
SplitAndSpillRangesDefinedByMemoryOperand()2873 void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
2874   size_t initial_range_count = data()->live_ranges().size();
2875   for (size_t i = 0; i < initial_range_count; ++i) {
2876     CHECK_EQ(initial_range_count,
2877              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
2878     TopLevelLiveRange* range = data()->live_ranges()[i];
2879     if (!CanProcessRange(range)) continue;
2880     // Only assume defined by memory operand if we are guaranteed to spill it or
2881     // it has a spill operand.
2882     if (range->HasNoSpillType() ||
2883         (range->HasSpillRange() && !range->has_non_deferred_slot_use())) {
2884       continue;
2885     }
2886     LifetimePosition start = range->Start();
2887     TRACE("Live range %d:%d is defined by a spill operand.\n",
2888           range->TopLevel()->vreg(), range->relative_id());
2889     LifetimePosition next_pos = start;
2890     if (next_pos.IsGapPosition()) {
2891       next_pos = next_pos.NextStart();
2892     }
2893 
2894     // With splinters, we can be more strict and skip over positions
2895     // not strictly needing registers.
2896     UsePosition* pos =
2897         range->IsSplinter()
2898             ? range->NextRegisterPosition(next_pos)
2899             : range->NextUsePositionRegisterIsBeneficial(next_pos);
2900     // If the range already has a spill operand and it doesn't need a
2901     // register immediately, split it and spill the first part of the range.
2902     if (pos == nullptr) {
2903       Spill(range, SpillMode::kSpillAtDefinition);
2904     } else if (pos->pos() > range->Start().NextStart()) {
2905       // Do not spill live range eagerly if use position that can benefit from
2906       // the register is too close to the start of live range.
2907       LifetimePosition split_pos = GetSplitPositionForInstruction(
2908           range, pos->pos().ToInstructionIndex());
2909       // There is no place to split, so we can't split and spill.
2910       if (!split_pos.IsValid()) continue;
2911 
2912       split_pos =
2913           FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
2914 
2915       SplitRangeAt(range, split_pos);
2916       Spill(range, SpillMode::kSpillAtDefinition);
2917     }
2918   }
2919 }
2920 
SplitRangeAt(LiveRange * range,LifetimePosition pos)2921 LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
2922                                            LifetimePosition pos) {
2923   DCHECK(!range->TopLevel()->IsFixed());
2924   TRACE("Splitting live range %d:%d at %d\n", range->TopLevel()->vreg(),
2925         range->relative_id(), pos.value());
2926 
2927   if (pos <= range->Start()) return range;
2928 
2929   // We can't properly connect liveranges if splitting occurred at the end
2930   // a block.
2931   DCHECK(pos.IsStart() || pos.IsGapPosition() ||
2932          (GetInstructionBlock(code(), pos)->last_instruction_index() !=
2933           pos.ToInstructionIndex()));
2934 
2935   LiveRange* result = range->SplitAt(pos, allocation_zone());
2936   return result;
2937 }
2938 
SplitBetween(LiveRange * range,LifetimePosition start,LifetimePosition end)2939 LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
2940                                            LifetimePosition start,
2941                                            LifetimePosition end) {
2942   DCHECK(!range->TopLevel()->IsFixed());
2943   TRACE("Splitting live range %d:%d in position between [%d, %d]\n",
2944         range->TopLevel()->vreg(), range->relative_id(), start.value(),
2945         end.value());
2946 
2947   LifetimePosition split_pos = FindOptimalSplitPos(start, end);
2948   DCHECK(split_pos >= start);
2949   return SplitRangeAt(range, split_pos);
2950 }
2951 
FindOptimalSplitPos(LifetimePosition start,LifetimePosition end)2952 LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
2953                                                         LifetimePosition end) {
2954   int start_instr = start.ToInstructionIndex();
2955   int end_instr = end.ToInstructionIndex();
2956   DCHECK_LE(start_instr, end_instr);
2957 
2958   // We have no choice
2959   if (start_instr == end_instr) return end;
2960 
2961   const InstructionBlock* start_block = GetInstructionBlock(code(), start);
2962   const InstructionBlock* end_block = GetInstructionBlock(code(), end);
2963 
2964   if (end_block == start_block) {
2965     // The interval is split in the same basic block. Split at the latest
2966     // possible position.
2967     return end;
2968   }
2969 
2970   const InstructionBlock* block = end_block;
2971   // Find header of outermost loop.
2972   do {
2973     const InstructionBlock* loop = GetContainingLoop(code(), block);
2974     if (loop == nullptr ||
2975         loop->rpo_number().ToInt() <= start_block->rpo_number().ToInt()) {
2976       // No more loops or loop starts before the lifetime start.
2977       break;
2978     }
2979     block = loop;
2980   } while (true);
2981 
2982   // We did not find any suitable outer loop. Split at the latest possible
2983   // position unless end_block is a loop header itself.
2984   if (block == end_block && !end_block->IsLoopHeader()) return end;
2985 
2986   return LifetimePosition::GapFromInstructionIndex(
2987       block->first_instruction_index());
2988 }
2989 
FindOptimalSpillingPos(LiveRange * range,LifetimePosition pos,SpillMode spill_mode,LiveRange ** begin_spill_out)2990 LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
2991     LiveRange* range, LifetimePosition pos, SpillMode spill_mode,
2992     LiveRange** begin_spill_out) {
2993   *begin_spill_out = range;
2994   // TODO(herhut): Be more clever here as long as we do not move pos out of
2995   // deferred code.
2996   if (spill_mode == SpillMode::kSpillDeferred) return pos;
2997   const InstructionBlock* block = GetInstructionBlock(code(), pos.Start());
2998   const InstructionBlock* loop_header =
2999       block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
3000   if (loop_header == nullptr) return pos;
3001 
3002   if (data()->is_turbo_control_flow_aware_allocation()) {
3003     while (loop_header != nullptr) {
3004       // We are going to spill live range inside the loop.
3005       // If possible try to move spilling position backwards to loop header.
3006       // This will reduce number of memory moves on the back edge.
3007       LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
3008           loop_header->first_instruction_index());
3009       auto& loop_header_state =
3010           data()->GetSpillState(loop_header->rpo_number());
3011       for (LiveRange* live_at_header : loop_header_state) {
3012         if (live_at_header->TopLevel() != range->TopLevel() ||
3013             !live_at_header->Covers(loop_start) || live_at_header->spilled()) {
3014           continue;
3015         }
3016         LiveRange* check_use = live_at_header;
3017         for (; check_use != nullptr && check_use->Start() < pos;
3018              check_use = check_use->next()) {
3019           UsePosition* next_use = check_use->NextRegisterPosition(loop_start);
3020           // UsePosition at the end of a UseInterval may
3021           // have the same value as the start of next range.
3022           if (next_use != nullptr && next_use->pos() <= pos) {
3023             return pos;
3024           }
3025         }
3026         // No register use inside the loop before the pos.
3027         *begin_spill_out = live_at_header;
3028         pos = loop_start;
3029         break;
3030       }
3031 
3032       // Try hoisting out to an outer loop.
3033       loop_header = GetContainingLoop(code(), loop_header);
3034     }
3035   } else {
3036     const UsePosition* prev_use =
3037         range->PreviousUsePositionRegisterIsBeneficial(pos);
3038 
3039     while (loop_header != nullptr) {
3040       // We are going to spill live range inside the loop.
3041       // If possible try to move spilling position backwards to loop header
3042       // inside the current range. This will reduce number of memory moves on
3043       // the back edge.
3044       LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
3045           loop_header->first_instruction_index());
3046 
3047       if (range->Covers(loop_start)) {
3048         if (prev_use == nullptr || prev_use->pos() < loop_start) {
3049           // No register beneficial use inside the loop before the pos.
3050           pos = loop_start;
3051         }
3052       }
3053 
3054       // Try hoisting out to an outer loop.
3055       loop_header = GetContainingLoop(code(), loop_header);
3056     }
3057   }
3058   return pos;
3059 }
3060 
Spill(LiveRange * range,SpillMode spill_mode)3061 void RegisterAllocator::Spill(LiveRange* range, SpillMode spill_mode) {
3062   DCHECK(!range->spilled());
3063   DCHECK(spill_mode == SpillMode::kSpillAtDefinition ||
3064          GetInstructionBlock(code(), range->Start())->IsDeferred());
3065   TopLevelLiveRange* first = range->TopLevel();
3066   TRACE("Spilling live range %d:%d mode %d\n", first->vreg(),
3067         range->relative_id(), spill_mode);
3068 
3069   TRACE("Starting spill type is %d\n", static_cast<int>(first->spill_type()));
3070   if (first->HasNoSpillType()) {
3071     TRACE("New spill range needed");
3072     data()->AssignSpillRangeToLiveRange(first, spill_mode);
3073   }
3074   // Upgrade the spillmode, in case this was only spilled in deferred code so
3075   // far.
3076   if ((spill_mode == SpillMode::kSpillAtDefinition) &&
3077       (first->spill_type() ==
3078        TopLevelLiveRange::SpillType::kDeferredSpillRange)) {
3079     TRACE("Upgrading\n");
3080     first->set_spill_type(TopLevelLiveRange::SpillType::kSpillRange);
3081   }
3082   TRACE("Final spill type is %d\n", static_cast<int>(first->spill_type()));
3083   range->Spill();
3084 }
3085 
RegisterName(int register_code) const3086 const char* RegisterAllocator::RegisterName(int register_code) const {
3087   if (register_code == kUnassignedRegister) return "unassigned";
3088   return mode() == GENERAL_REGISTERS
3089              ? i::RegisterName(Register::from_code(register_code))
3090              : i::RegisterName(DoubleRegister::from_code(register_code));
3091 }
3092 
LinearScanAllocator(RegisterAllocationData * data,RegisterKind kind,Zone * local_zone)3093 LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
3094                                          RegisterKind kind, Zone* local_zone)
3095     : RegisterAllocator(data, kind),
3096       unhandled_live_ranges_(local_zone),
3097       active_live_ranges_(local_zone),
3098       inactive_live_ranges_(num_registers(), InactiveLiveRangeQueue(local_zone),
3099                             local_zone),
3100       next_active_ranges_change_(LifetimePosition::Invalid()),
3101       next_inactive_ranges_change_(LifetimePosition::Invalid()) {
3102   active_live_ranges().reserve(8);
3103 }
3104 
MaybeSpillPreviousRanges(LiveRange * begin_range,LifetimePosition begin_pos,LiveRange * end_range)3105 void LinearScanAllocator::MaybeSpillPreviousRanges(LiveRange* begin_range,
3106                                                    LifetimePosition begin_pos,
3107                                                    LiveRange* end_range) {
3108   // Spill begin_range after begin_pos, then spill every live range of this
3109   // virtual register until but excluding end_range.
3110   DCHECK(begin_range->Covers(begin_pos));
3111   DCHECK_EQ(begin_range->TopLevel(), end_range->TopLevel());
3112 
3113   if (begin_range != end_range) {
3114     DCHECK_LE(begin_range->End(), end_range->Start());
3115     if (!begin_range->spilled()) {
3116       SpillAfter(begin_range, begin_pos, SpillMode::kSpillAtDefinition);
3117     }
3118     for (LiveRange* range = begin_range->next(); range != end_range;
3119          range = range->next()) {
3120       if (!range->spilled()) {
3121         range->Spill();
3122       }
3123     }
3124   }
3125 }
3126 
MaybeUndoPreviousSplit(LiveRange * range)3127 void LinearScanAllocator::MaybeUndoPreviousSplit(LiveRange* range) {
3128   if (range->next() != nullptr && range->next()->ShouldRecombine()) {
3129     LiveRange* to_remove = range->next();
3130     TRACE("Recombining %d:%d with %d\n", range->TopLevel()->vreg(),
3131           range->relative_id(), to_remove->relative_id());
3132 
3133     // Remove the range from unhandled, as attaching it will change its
3134     // state and hence ordering in the unhandled set.
3135     auto removed_cnt = unhandled_live_ranges().erase(to_remove);
3136     DCHECK_EQ(removed_cnt, 1);
3137     USE(removed_cnt);
3138 
3139     range->AttachToNext();
3140   } else if (range->next() != nullptr) {
3141     TRACE("No recombine for %d:%d to %d\n", range->TopLevel()->vreg(),
3142           range->relative_id(), range->next()->relative_id());
3143   }
3144 }
3145 
SpillNotLiveRanges(RangeWithRegisterSet * to_be_live,LifetimePosition position,SpillMode spill_mode)3146 void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet* to_be_live,
3147                                              LifetimePosition position,
3148                                              SpillMode spill_mode) {
3149   for (auto it = active_live_ranges().begin();
3150        it != active_live_ranges().end();) {
3151     LiveRange* active_range = *it;
3152     TopLevelLiveRange* toplevel = (*it)->TopLevel();
3153     auto found = to_be_live->find({toplevel, kUnassignedRegister});
3154     if (found == to_be_live->end()) {
3155       // Is not contained in {to_be_live}, spill it.
3156       // Fixed registers are exempt from this. They might have been
3157       // added from inactive at the block boundary but we know that
3158       // they cannot conflict as they are built before register
3159       // allocation starts. It would be algorithmically fine to split
3160       // them and reschedule but the code does not allow to do this.
3161       if (toplevel->IsFixed()) {
3162         TRACE("Keeping reactivated fixed range for %s\n",
3163               RegisterName(toplevel->assigned_register()));
3164         ++it;
3165       } else {
3166         // When spilling a previously spilled/reloaded range, we add back the
3167         // tail that we might have split off when we reloaded/spilled it
3168         // previously. Otherwise we might keep generating small split-offs.
3169         MaybeUndoPreviousSplit(active_range);
3170         TRACE("Putting back %d:%d\n", toplevel->vreg(),
3171               active_range->relative_id());
3172         LiveRange* split = SplitRangeAt(active_range, position);
3173         DCHECK_NE(split, active_range);
3174 
3175         // Make sure we revisit this range once it has a use that requires
3176         // a register.
3177         UsePosition* next_use = split->NextRegisterPosition(position);
3178         if (next_use != nullptr) {
3179           // Move to the start of the gap before use so that we have a space
3180           // to perform the potential reload. Otherwise, do not spill but add
3181           // to unhandled for reallocation.
3182           LifetimePosition revisit_at = next_use->pos().FullStart();
3183           TRACE("Next use at %d\n", revisit_at.value());
3184           if (!data()->IsBlockBoundary(revisit_at)) {
3185             // Leave some space so we have enough gap room.
3186             revisit_at = revisit_at.PrevStart().FullStart();
3187           }
3188           // If this range became life right at the block boundary that we are
3189           // currently processing, we do not need to split it. Instead move it
3190           // to unhandled right away.
3191           if (position < revisit_at) {
3192             LiveRange* third_part = SplitRangeAt(split, revisit_at);
3193             DCHECK_NE(split, third_part);
3194             Spill(split, spill_mode);
3195             TRACE("Marking %d:%d to recombine\n", toplevel->vreg(),
3196                   third_part->relative_id());
3197             third_part->SetRecombine();
3198             AddToUnhandled(third_part);
3199           } else {
3200             AddToUnhandled(split);
3201           }
3202         } else {
3203           Spill(split, spill_mode);
3204         }
3205         it = ActiveToHandled(it);
3206       }
3207     } else {
3208       // This range is contained in {to_be_live}, so we can keep it.
3209       int expected_register = (*found).expected_register;
3210       to_be_live->erase(found);
3211       if (expected_register == active_range->assigned_register()) {
3212         // Was life and in correct register, simply pass through.
3213         TRACE("Keeping %d:%d in %s\n", toplevel->vreg(),
3214               active_range->relative_id(),
3215               RegisterName(active_range->assigned_register()));
3216         ++it;
3217       } else {
3218         // Was life but wrong register. Split and schedule for
3219         // allocation.
3220         TRACE("Scheduling %d:%d\n", toplevel->vreg(),
3221               active_range->relative_id());
3222         LiveRange* split = SplitRangeAt(active_range, position);
3223         split->set_controlflow_hint(expected_register);
3224         AddToUnhandled(split);
3225         it = ActiveToHandled(it);
3226       }
3227     }
3228   }
3229 }
3230 
AssignRegisterOnReload(LiveRange * range,int reg)3231 LiveRange* LinearScanAllocator::AssignRegisterOnReload(LiveRange* range,
3232                                                        int reg) {
3233   // We know the register is currently free but it might be in
3234   // use by a currently inactive range. So we might not be able
3235   // to reload for the full distance. In such case, split here.
3236   // TODO(herhut):
3237   // It might be better if we could use the normal unhandled queue and
3238   // give reloading registers pecedence. That way we would compute the
3239   // intersection for the entire future.
3240   LifetimePosition new_end = range->End();
3241   for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
3242     if ((kSimpleFPAliasing || !check_fp_aliasing()) && cur_reg != reg) {
3243       continue;
3244     }
3245     for (const auto cur_inactive : inactive_live_ranges(cur_reg)) {
3246       if (!kSimpleFPAliasing && check_fp_aliasing() &&
3247           !data()->config()->AreAliases(cur_inactive->representation(), cur_reg,
3248                                         range->representation(), reg)) {
3249         continue;
3250       }
3251       for (auto interval = cur_inactive->first_interval(); interval != nullptr;
3252            interval = interval->next()) {
3253         if (interval->start() > new_end) break;
3254         if (interval->end() <= range->Start()) continue;
3255         if (new_end > interval->start()) new_end = interval->start();
3256       }
3257     }
3258   }
3259   if (new_end != range->End()) {
3260     TRACE("Found new end for %d:%d at %d\n", range->TopLevel()->vreg(),
3261           range->relative_id(), new_end.value());
3262     LiveRange* tail = SplitRangeAt(range, new_end);
3263     AddToUnhandled(tail);
3264   }
3265   SetLiveRangeAssignedRegister(range, reg);
3266   return range;
3267 }
3268 
ReloadLiveRanges(RangeWithRegisterSet const & to_be_live,LifetimePosition position)3269 void LinearScanAllocator::ReloadLiveRanges(
3270     RangeWithRegisterSet const& to_be_live, LifetimePosition position) {
3271   // Assumption: All ranges in {to_be_live} are currently spilled and there are
3272   // no conflicting registers in the active ranges.
3273   // The former is ensured by SpillNotLiveRanges, the latter is by construction
3274   // of the to_be_live set.
3275   for (RangeWithRegister range_with_register : to_be_live) {
3276     TopLevelLiveRange* range = range_with_register.range;
3277     int reg = range_with_register.expected_register;
3278     LiveRange* to_resurrect = range->GetChildCovers(position);
3279     if (to_resurrect == nullptr) {
3280       // While the range was life until the end of the predecessor block, it is
3281       // not live in this block. Either there is a lifetime gap or the range
3282       // died.
3283       TRACE("No candidate for %d at %d\n", range->vreg(), position.value());
3284     } else {
3285       // We might be resurrecting a range that we spilled until its next use
3286       // before. In such cases, we have to unsplit it before processing as
3287       // otherwise we might get register changes from one range to the other
3288       // in the middle of blocks.
3289       // If there is a gap between this range and the next, we can just keep
3290       // it as a register change won't hurt.
3291       MaybeUndoPreviousSplit(to_resurrect);
3292       if (to_resurrect->Start() == position) {
3293         // This range already starts at this block. It might have been spilled,
3294         // so we have to unspill it. Otherwise, it is already in the unhandled
3295         // queue waiting for processing.
3296         DCHECK(!to_resurrect->HasRegisterAssigned());
3297         TRACE("Reload %d:%d starting at %d itself\n", range->vreg(),
3298               to_resurrect->relative_id(), position.value());
3299         if (to_resurrect->spilled()) {
3300           to_resurrect->Unspill();
3301           to_resurrect->set_controlflow_hint(reg);
3302           AddToUnhandled(to_resurrect);
3303         } else {
3304           // Assign the preassigned register if we know. Otherwise, nothing to
3305           // do as already in unhandeled.
3306           if (reg != kUnassignedRegister) {
3307             auto erased_cnt = unhandled_live_ranges().erase(to_resurrect);
3308             DCHECK_EQ(erased_cnt, 1);
3309             USE(erased_cnt);
3310             // We know that there is no conflict with active ranges, so just
3311             // assign the register to the range.
3312             to_resurrect = AssignRegisterOnReload(to_resurrect, reg);
3313             AddToActive(to_resurrect);
3314           }
3315         }
3316       } else {
3317         // This range was spilled before. We have to split it and schedule the
3318         // second part for allocation (or assign the register if we know).
3319         DCHECK(to_resurrect->spilled());
3320         LiveRange* split = SplitRangeAt(to_resurrect, position);
3321         TRACE("Reload %d:%d starting at %d as %d\n", range->vreg(),
3322               to_resurrect->relative_id(), split->Start().value(),
3323               split->relative_id());
3324         DCHECK_NE(split, to_resurrect);
3325         if (reg != kUnassignedRegister) {
3326           // We know that there is no conflict with active ranges, so just
3327           // assign the register to the range.
3328           split = AssignRegisterOnReload(split, reg);
3329           AddToActive(split);
3330         } else {
3331           // Let normal register assignment find a suitable register.
3332           split->set_controlflow_hint(reg);
3333           AddToUnhandled(split);
3334         }
3335       }
3336     }
3337   }
3338 }
3339 
ChooseOneOfTwoPredecessorStates(InstructionBlock * current_block,LifetimePosition boundary)3340 RpoNumber LinearScanAllocator::ChooseOneOfTwoPredecessorStates(
3341     InstructionBlock* current_block, LifetimePosition boundary) {
3342   using SmallRangeVector =
3343       base::SmallVector<TopLevelLiveRange*,
3344                         RegisterConfiguration::kMaxRegisters>;
3345   // Pick the state that would generate the least spill/reloads.
3346   // Compute vectors of ranges with imminent use for both sides.
3347   // As GetChildCovers is cached, it is cheaper to repeatedly
3348   // call is rather than compute a shared set first.
3349   auto& left = data()->GetSpillState(current_block->predecessors()[0]);
3350   auto& right = data()->GetSpillState(current_block->predecessors()[1]);
3351   SmallRangeVector left_used;
3352   for (const auto item : left) {
3353     LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary);
3354     if (at_next_block != nullptr &&
3355         at_next_block->NextUsePositionRegisterIsBeneficial(boundary) !=
3356             nullptr) {
3357       left_used.emplace_back(item->TopLevel());
3358     }
3359   }
3360   SmallRangeVector right_used;
3361   for (const auto item : right) {
3362     LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary);
3363     if (at_next_block != nullptr &&
3364         at_next_block->NextUsePositionRegisterIsBeneficial(boundary) !=
3365             nullptr) {
3366       right_used.emplace_back(item->TopLevel());
3367     }
3368   }
3369   if (left_used.empty() && right_used.empty()) {
3370     // There are no beneficial register uses. Look at any use at
3371     // all. We do not account for all uses, like flowing into a phi.
3372     // So we just look at ranges still being live.
3373     TRACE("Looking at only uses\n");
3374     for (const auto item : left) {
3375       LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary);
3376       if (at_next_block != nullptr &&
3377           at_next_block->NextUsePosition(boundary) != nullptr) {
3378         left_used.emplace_back(item->TopLevel());
3379       }
3380     }
3381     for (const auto item : right) {
3382       LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary);
3383       if (at_next_block != nullptr &&
3384           at_next_block->NextUsePosition(boundary) != nullptr) {
3385         right_used.emplace_back(item->TopLevel());
3386       }
3387     }
3388   }
3389   // Now left_used and right_used contains those ranges that matter.
3390   // Count which side matches this most.
3391   TRACE("Vote went %zu vs %zu\n", left_used.size(), right_used.size());
3392   return left_used.size() > right_used.size()
3393              ? current_block->predecessors()[0]
3394              : current_block->predecessors()[1];
3395 }
3396 
CheckConflict(MachineRepresentation rep,int reg,RangeWithRegisterSet * to_be_live)3397 bool LinearScanAllocator::CheckConflict(MachineRepresentation rep, int reg,
3398                                         RangeWithRegisterSet* to_be_live) {
3399   for (RangeWithRegister range_with_reg : *to_be_live) {
3400     if (data()->config()->AreAliases(range_with_reg.range->representation(),
3401                                      range_with_reg.expected_register, rep,
3402                                      reg)) {
3403       return true;
3404     }
3405   }
3406   return false;
3407 }
3408 
ComputeStateFromManyPredecessors(InstructionBlock * current_block,RangeWithRegisterSet * to_be_live)3409 void LinearScanAllocator::ComputeStateFromManyPredecessors(
3410     InstructionBlock* current_block, RangeWithRegisterSet* to_be_live) {
3411   struct Vote {
3412     size_t count;
3413     int used_registers[RegisterConfiguration::kMaxRegisters];
3414   };
3415   struct TopLevelLiveRangeComparator {
3416     bool operator()(const TopLevelLiveRange* lhs,
3417                     const TopLevelLiveRange* rhs) const {
3418       return lhs->vreg() < rhs->vreg();
3419     }
3420   };
3421   ZoneMap<TopLevelLiveRange*, Vote, TopLevelLiveRangeComparator> counts(
3422       data()->allocation_zone());
3423   int deferred_blocks = 0;
3424   for (RpoNumber pred : current_block->predecessors()) {
3425     if (!ConsiderBlockForControlFlow(current_block, pred)) {
3426       // Back edges of a loop count as deferred here too.
3427       deferred_blocks++;
3428       continue;
3429     }
3430     const auto& pred_state = data()->GetSpillState(pred);
3431     for (LiveRange* range : pred_state) {
3432       // We might have spilled the register backwards, so the range we
3433       // stored might have lost its register. Ignore those.
3434       if (!range->HasRegisterAssigned()) continue;
3435       TopLevelLiveRange* toplevel = range->TopLevel();
3436       auto previous = counts.find(toplevel);
3437       if (previous == counts.end()) {
3438         auto result = counts.emplace(std::make_pair(toplevel, Vote{1, {0}}));
3439         CHECK(result.second);
3440         result.first->second.used_registers[range->assigned_register()]++;
3441       } else {
3442         previous->second.count++;
3443         previous->second.used_registers[range->assigned_register()]++;
3444       }
3445     }
3446   }
3447 
3448   // Choose the live ranges from the majority.
3449   const size_t majority =
3450       (current_block->PredecessorCount() + 2 - deferred_blocks) / 2;
3451   bool taken_registers[RegisterConfiguration::kMaxRegisters] = {0};
3452   auto assign_to_live = [this, counts, majority](
3453                             std::function<bool(TopLevelLiveRange*)> filter,
3454                             RangeWithRegisterSet* to_be_live,
3455                             bool* taken_registers) {
3456     bool check_aliasing = !kSimpleFPAliasing && check_fp_aliasing();
3457     for (const auto& val : counts) {
3458       if (!filter(val.first)) continue;
3459       if (val.second.count >= majority) {
3460         int register_max = 0;
3461         int reg = kUnassignedRegister;
3462         bool conflict = false;
3463         int num_regs = num_registers();
3464         int num_codes = num_allocatable_registers();
3465         const int* codes = allocatable_register_codes();
3466         MachineRepresentation rep = val.first->representation();
3467         if (check_aliasing && (rep == MachineRepresentation::kFloat32 ||
3468                                rep == MachineRepresentation::kSimd128))
3469           GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
3470         for (int idx = 0; idx < num_regs; idx++) {
3471           int uses = val.second.used_registers[idx];
3472           if (uses == 0) continue;
3473           if (uses > register_max || (conflict && uses == register_max)) {
3474             reg = idx;
3475             register_max = uses;
3476             conflict = check_aliasing ? CheckConflict(rep, reg, to_be_live)
3477                                       : taken_registers[reg];
3478           }
3479         }
3480         if (conflict) {
3481           reg = kUnassignedRegister;
3482         } else if (!check_aliasing) {
3483           taken_registers[reg] = true;
3484         }
3485         to_be_live->emplace(val.first, reg);
3486         TRACE("Reset %d as live due vote %zu in %s\n",
3487               val.first->TopLevel()->vreg(), val.second.count,
3488               RegisterName(reg));
3489       }
3490     }
3491   };
3492   // First round, process fixed registers, as these have precedence.
3493   // There is only one fixed range per register, so we cannot have
3494   // conflicts.
3495   assign_to_live([](TopLevelLiveRange* r) { return r->IsFixed(); }, to_be_live,
3496                  taken_registers);
3497   // Second round, process the rest.
3498   assign_to_live([](TopLevelLiveRange* r) { return !r->IsFixed(); }, to_be_live,
3499                  taken_registers);
3500 }
3501 
ConsiderBlockForControlFlow(InstructionBlock * current_block,RpoNumber predecessor)3502 bool LinearScanAllocator::ConsiderBlockForControlFlow(
3503     InstructionBlock* current_block, RpoNumber predecessor) {
3504   // We ignore predecessors on back edges when looking for control flow effects,
3505   // as those lie in the future of allocation and we have no data yet. Also,
3506   // deferred bocks are ignored on deferred to non-deferred boundaries, as we do
3507   // not want them to influence allocation of non deferred code.
3508   return (predecessor < current_block->rpo_number()) &&
3509          (current_block->IsDeferred() ||
3510           !code()->InstructionBlockAt(predecessor)->IsDeferred());
3511 }
3512 
UpdateDeferredFixedRanges(SpillMode spill_mode,InstructionBlock * block)3513 void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
3514                                                     InstructionBlock* block) {
3515   if (spill_mode == SpillMode::kSpillDeferred) {
3516     LifetimePosition max = LifetimePosition::InstructionFromInstructionIndex(
3517         LastDeferredInstructionIndex(block));
3518     // Adds range back to inactive, resolving resulting conflicts.
3519     auto add_to_inactive = [this, max](LiveRange* range) {
3520       AddToInactive(range);
3521       // Splits other if it conflicts with range. Other is placed in unhandled
3522       // for later reallocation.
3523       auto split_conflicting = [this, max](LiveRange* range, LiveRange* other,
3524                                            std::function<void(LiveRange*)>
3525                                                update_caches) {
3526         if (other->TopLevel()->IsFixed()) return;
3527         int reg = range->assigned_register();
3528         if (kSimpleFPAliasing || !check_fp_aliasing()) {
3529           if (other->assigned_register() != reg) {
3530             return;
3531           }
3532         } else {
3533           if (!data()->config()->AreAliases(range->representation(), reg,
3534                                             other->representation(),
3535                                             other->assigned_register())) {
3536             return;
3537           }
3538         }
3539         // The inactive range might conflict, so check whether we need to
3540         // split and spill. We can look for the first intersection, as there
3541         // cannot be any intersections in the past, as those would have been a
3542         // conflict then.
3543         LifetimePosition next_start = range->FirstIntersection(other);
3544         if (!next_start.IsValid() || (next_start > max)) {
3545           // There is no conflict or the conflict is outside of the current
3546           // stretch of deferred code. In either case we can ignore the
3547           // inactive range.
3548           return;
3549         }
3550         // They overlap. So we need to split active and reschedule it
3551         // for allocation.
3552         TRACE("Resolving conflict of %d with deferred fixed for register %s\n",
3553               other->TopLevel()->vreg(),
3554               RegisterName(other->assigned_register()));
3555         LiveRange* split_off =
3556             other->SplitAt(next_start, data()->allocation_zone());
3557         // Try to get the same register after the deferred block.
3558         split_off->set_controlflow_hint(other->assigned_register());
3559         DCHECK_NE(split_off, other);
3560         AddToUnhandled(split_off);
3561         update_caches(other);
3562       };
3563       // Now check for conflicts in active and inactive ranges. We might have
3564       // conflicts in inactive, as we do not do this check on every block
3565       // boundary but only on deferred/non-deferred changes but inactive
3566       // live ranges might become live on any block boundary.
3567       for (auto active : active_live_ranges()) {
3568         split_conflicting(range, active, [this](LiveRange* updated) {
3569           next_active_ranges_change_ =
3570               Min(updated->End(), next_active_ranges_change_);
3571         });
3572       }
3573       for (int reg = 0; reg < num_registers(); ++reg) {
3574         if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
3575             reg != range->assigned_register()) {
3576           continue;
3577         }
3578         for (auto inactive : inactive_live_ranges(reg)) {
3579           split_conflicting(range, inactive, [this](LiveRange* updated) {
3580             next_inactive_ranges_change_ =
3581                 Min(updated->End(), next_inactive_ranges_change_);
3582           });
3583         }
3584       }
3585     };
3586     if (mode() == GENERAL_REGISTERS) {
3587       for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
3588         if (current != nullptr) {
3589           if (current->IsDeferredFixed()) {
3590             add_to_inactive(current);
3591           }
3592         }
3593       }
3594     } else {
3595       for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
3596         if (current != nullptr) {
3597           if (current->IsDeferredFixed()) {
3598             add_to_inactive(current);
3599           }
3600         }
3601       }
3602       if (!kSimpleFPAliasing && check_fp_aliasing()) {
3603         for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
3604           if (current != nullptr) {
3605             if (current->IsDeferredFixed()) {
3606               add_to_inactive(current);
3607             }
3608           }
3609         }
3610         for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
3611           if (current != nullptr) {
3612             if (current->IsDeferredFixed()) {
3613               add_to_inactive(current);
3614             }
3615           }
3616         }
3617       }
3618     }
3619   } else {
3620     // Remove all ranges.
3621     for (int reg = 0; reg < num_registers(); ++reg) {
3622       for (auto it = inactive_live_ranges(reg).begin();
3623            it != inactive_live_ranges(reg).end();) {
3624         if ((*it)->TopLevel()->IsDeferredFixed()) {
3625           it = inactive_live_ranges(reg).erase(it);
3626         } else {
3627           ++it;
3628         }
3629       }
3630     }
3631   }
3632 }
3633 
BlockIsDeferredOrImmediatePredecessorIsNotDeferred(const InstructionBlock * block)3634 bool LinearScanAllocator::BlockIsDeferredOrImmediatePredecessorIsNotDeferred(
3635     const InstructionBlock* block) {
3636   if (block->IsDeferred()) return true;
3637   if (block->PredecessorCount() == 0) return true;
3638   bool pred_is_deferred = false;
3639   for (auto pred : block->predecessors()) {
3640     if (pred.IsNext(block->rpo_number())) {
3641       pred_is_deferred = code()->InstructionBlockAt(pred)->IsDeferred();
3642       break;
3643     }
3644   }
3645   return !pred_is_deferred;
3646 }
3647 
HasNonDeferredPredecessor(InstructionBlock * block)3648 bool LinearScanAllocator::HasNonDeferredPredecessor(InstructionBlock* block) {
3649   for (auto pred : block->predecessors()) {
3650     InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
3651     if (!pred_block->IsDeferred()) return true;
3652   }
3653   return false;
3654 }
3655 
AllocateRegisters()3656 void LinearScanAllocator::AllocateRegisters() {
3657   DCHECK(unhandled_live_ranges().empty());
3658   DCHECK(active_live_ranges().empty());
3659   for (int reg = 0; reg < num_registers(); ++reg) {
3660     DCHECK(inactive_live_ranges(reg).empty());
3661   }
3662 
3663   SplitAndSpillRangesDefinedByMemoryOperand();
3664   data()->ResetSpillState();
3665 
3666   if (data()->is_trace_alloc()) {
3667     PrintRangeOverview(std::cout);
3668   }
3669 
3670   const size_t live_ranges_size = data()->live_ranges().size();
3671   for (TopLevelLiveRange* range : data()->live_ranges()) {
3672     CHECK_EQ(live_ranges_size,
3673              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
3674     if (!CanProcessRange(range)) continue;
3675     for (LiveRange* to_add = range; to_add != nullptr;
3676          to_add = to_add->next()) {
3677       if (!to_add->spilled()) {
3678         AddToUnhandled(to_add);
3679       }
3680     }
3681   }
3682 
3683   if (mode() == GENERAL_REGISTERS) {
3684     for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
3685       if (current != nullptr) {
3686         if (current->IsDeferredFixed()) continue;
3687         AddToInactive(current);
3688       }
3689     }
3690   } else {
3691     for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
3692       if (current != nullptr) {
3693         if (current->IsDeferredFixed()) continue;
3694         AddToInactive(current);
3695       }
3696     }
3697     if (!kSimpleFPAliasing && check_fp_aliasing()) {
3698       for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
3699         if (current != nullptr) {
3700           if (current->IsDeferredFixed()) continue;
3701           AddToInactive(current);
3702         }
3703       }
3704       for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
3705         if (current != nullptr) {
3706           if (current->IsDeferredFixed()) continue;
3707           AddToInactive(current);
3708         }
3709       }
3710     }
3711   }
3712 
3713   RpoNumber last_block = RpoNumber::FromInt(0);
3714   RpoNumber max_blocks =
3715       RpoNumber::FromInt(code()->InstructionBlockCount() - 1);
3716   LifetimePosition next_block_boundary =
3717       LifetimePosition::InstructionFromInstructionIndex(
3718           data()
3719               ->code()
3720               ->InstructionBlockAt(last_block)
3721               ->last_instruction_index())
3722           .NextFullStart();
3723   SpillMode spill_mode = SpillMode::kSpillAtDefinition;
3724 
3725   // Process all ranges. We also need to ensure that we have seen all block
3726   // boundaries. Linear scan might have assigned and spilled ranges before
3727   // reaching the last block and hence we would ignore control flow effects for
3728   // those. Not only does this produce a potentially bad assignment, it also
3729   // breaks with the invariant that we undo spills that happen in deferred code
3730   // when crossing a deferred/non-deferred boundary.
3731   while (!unhandled_live_ranges().empty() ||
3732          (data()->is_turbo_control_flow_aware_allocation() &&
3733           last_block < max_blocks)) {
3734     data()->tick_counter()->DoTick();
3735     LiveRange* current = unhandled_live_ranges().empty()
3736                              ? nullptr
3737                              : *unhandled_live_ranges().begin();
3738     LifetimePosition position =
3739         current ? current->Start() : next_block_boundary;
3740 #ifdef DEBUG
3741     allocation_finger_ = position;
3742 #endif
3743     if (data()->is_turbo_control_flow_aware_allocation()) {
3744       // Splintering is not supported.
3745       CHECK(!data()->is_turbo_preprocess_ranges());
3746       // Check whether we just moved across a block boundary. This will trigger
3747       // for the first range that is past the current boundary.
3748       if (position >= next_block_boundary) {
3749         TRACE("Processing boundary at %d leaving %d\n",
3750               next_block_boundary.value(), last_block.ToInt());
3751 
3752         // Forward state to before block boundary
3753         LifetimePosition end_of_block = next_block_boundary.PrevStart().End();
3754         ForwardStateTo(end_of_block);
3755 
3756         // Remember this state.
3757         InstructionBlock* current_block = data()->code()->GetInstructionBlock(
3758             next_block_boundary.ToInstructionIndex());
3759 
3760         // Store current spill state (as the state at end of block). For
3761         // simplicity, we store the active ranges, e.g., the live ranges that
3762         // are not spilled.
3763         data()->RememberSpillState(last_block, active_live_ranges());
3764 
3765         // Only reset the state if this was not a direct fallthrough. Otherwise
3766         // control flow resolution will get confused (it does not expect changes
3767         // across fallthrough edges.).
3768         bool fallthrough = (current_block->PredecessorCount() == 1) &&
3769                            current_block->predecessors()[0].IsNext(
3770                                current_block->rpo_number());
3771 
3772         // When crossing a deferred/non-deferred boundary, we have to load or
3773         // remove the deferred fixed ranges from inactive.
3774         if ((spill_mode == SpillMode::kSpillDeferred) !=
3775             current_block->IsDeferred()) {
3776           // Update spill mode.
3777           spill_mode = current_block->IsDeferred()
3778                            ? SpillMode::kSpillDeferred
3779                            : SpillMode::kSpillAtDefinition;
3780 
3781           ForwardStateTo(next_block_boundary);
3782 
3783 #ifdef DEBUG
3784           // Allow allocation at current position.
3785           allocation_finger_ = next_block_boundary;
3786 #endif
3787           UpdateDeferredFixedRanges(spill_mode, current_block);
3788         }
3789 
3790         // Allocation relies on the fact that each non-deferred block has at
3791         // least one non-deferred predecessor. Check this invariant here.
3792         DCHECK_IMPLIES(!current_block->IsDeferred(),
3793                        HasNonDeferredPredecessor(current_block));
3794 
3795         if (!fallthrough) {
3796 #ifdef DEBUG
3797           // Allow allocation at current position.
3798           allocation_finger_ = next_block_boundary;
3799 #endif
3800 
3801           // We are currently at next_block_boundary - 1. Move the state to the
3802           // actual block boundary position. In particular, we have to
3803           // reactivate inactive ranges so that they get rescheduled for
3804           // allocation if they were not live at the predecessors.
3805           ForwardStateTo(next_block_boundary);
3806 
3807           RangeWithRegisterSet to_be_live(data()->allocation_zone());
3808 
3809           // If we end up deciding to use the state of the immediate
3810           // predecessor, it is better not to perform a change. It would lead to
3811           // the same outcome anyway.
3812           // This may never happen on boundaries between deferred and
3813           // non-deferred code, as we rely on explicit respill to ensure we
3814           // spill at definition.
3815           bool no_change_required = false;
3816 
3817           auto pick_state_from = [this, current_block](
3818                                      RpoNumber pred,
3819                                      RangeWithRegisterSet* to_be_live) -> bool {
3820             TRACE("Using information from B%d\n", pred.ToInt());
3821             // If this is a fall-through that is not across a deferred
3822             // boundary, there is nothing to do.
3823             bool is_noop = pred.IsNext(current_block->rpo_number());
3824             if (!is_noop) {
3825               auto& spill_state = data()->GetSpillState(pred);
3826               TRACE("Not a fallthrough. Adding %zu elements...\n",
3827                     spill_state.size());
3828               for (const auto range : spill_state) {
3829                 // Filter out ranges that had their register stolen by backwards
3830                 // working spill heuristics. These have been spilled after the
3831                 // fact, so ignore them.
3832                 if (!range->HasRegisterAssigned()) continue;
3833                 to_be_live->emplace(range);
3834               }
3835             }
3836             return is_noop;
3837           };
3838 
3839           // Multiple cases here:
3840           // 1) We have a single predecessor => this is a control flow split, so
3841           //     just restore the predecessor state.
3842           // 2) We have two predecessors => this is a conditional, so break ties
3843           //     based on what to do based on forward uses, trying to benefit
3844           //     the same branch if in doubt (make one path fast).
3845           // 3) We have many predecessors => this is a switch. Compute union
3846           //     based on majority, break ties by looking forward.
3847           if (current_block->PredecessorCount() == 1) {
3848             TRACE("Single predecessor for B%d\n",
3849                   current_block->rpo_number().ToInt());
3850             no_change_required =
3851                 pick_state_from(current_block->predecessors()[0], &to_be_live);
3852           } else if (current_block->PredecessorCount() == 2) {
3853             TRACE("Two predecessors for B%d\n",
3854                   current_block->rpo_number().ToInt());
3855             // If one of the branches does not contribute any information,
3856             // e.g. because it is deferred or a back edge, we can short cut
3857             // here right away.
3858             RpoNumber chosen_predecessor = RpoNumber::Invalid();
3859             if (!ConsiderBlockForControlFlow(
3860                     current_block, current_block->predecessors()[0])) {
3861               chosen_predecessor = current_block->predecessors()[1];
3862             } else if (!ConsiderBlockForControlFlow(
3863                            current_block, current_block->predecessors()[1])) {
3864               chosen_predecessor = current_block->predecessors()[0];
3865             } else {
3866               chosen_predecessor = ChooseOneOfTwoPredecessorStates(
3867                   current_block, next_block_boundary);
3868             }
3869             no_change_required =
3870                 pick_state_from(chosen_predecessor, &to_be_live);
3871 
3872           } else {
3873             // Merge at the end of, e.g., a switch.
3874             ComputeStateFromManyPredecessors(current_block, &to_be_live);
3875           }
3876 
3877           if (!no_change_required) {
3878             SpillNotLiveRanges(&to_be_live, next_block_boundary, spill_mode);
3879             ReloadLiveRanges(to_be_live, next_block_boundary);
3880           }
3881         }
3882         // Update block information
3883         last_block = current_block->rpo_number();
3884         next_block_boundary = LifetimePosition::InstructionFromInstructionIndex(
3885                                   current_block->last_instruction_index())
3886                                   .NextFullStart();
3887 
3888         // We might have created new unhandled live ranges, so cycle around the
3889         // loop to make sure we pick the top most range in unhandled for
3890         // processing.
3891         continue;
3892       }
3893     }
3894 
3895     DCHECK_NOT_NULL(current);
3896 
3897     TRACE("Processing interval %d:%d start=%d\n", current->TopLevel()->vreg(),
3898           current->relative_id(), position.value());
3899 
3900     // Now we can erase current, as we are sure to process it.
3901     unhandled_live_ranges().erase(unhandled_live_ranges().begin());
3902 
3903     if (current->IsTopLevel() && TryReuseSpillForPhi(current->TopLevel()))
3904       continue;
3905 
3906     ForwardStateTo(position);
3907 
3908     DCHECK(!current->HasRegisterAssigned() && !current->spilled());
3909 
3910     ProcessCurrentRange(current, spill_mode);
3911   }
3912 
3913   if (data()->is_trace_alloc()) {
3914     PrintRangeOverview(std::cout);
3915   }
3916 }
3917 
TrySplitAndSpillSplinter(LiveRange * range)3918 bool LinearScanAllocator::TrySplitAndSpillSplinter(LiveRange* range) {
3919   DCHECK(!data()->is_turbo_control_flow_aware_allocation());
3920   DCHECK(range->TopLevel()->IsSplinter());
3921   // If we can spill the whole range, great. Otherwise, split above the
3922   // first use needing a register and spill the top part.
3923   const UsePosition* next_reg = range->NextRegisterPosition(range->Start());
3924   if (next_reg == nullptr) {
3925     Spill(range, SpillMode::kSpillAtDefinition);
3926     return true;
3927   } else if (range->FirstHintPosition() == nullptr) {
3928     // If there was no hint, but we have a use position requiring a
3929     // register, apply the hot path heuristics.
3930     return false;
3931   } else if (next_reg->pos().PrevStart() > range->Start()) {
3932     LiveRange* tail = SplitRangeAt(range, next_reg->pos().PrevStart());
3933     AddToUnhandled(tail);
3934     Spill(range, SpillMode::kSpillAtDefinition);
3935     return true;
3936   }
3937   return false;
3938 }
3939 
SetLiveRangeAssignedRegister(LiveRange * range,int reg)3940 void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
3941                                                        int reg) {
3942   data()->MarkAllocated(range->representation(), reg);
3943   range->set_assigned_register(reg);
3944   range->SetUseHints(reg);
3945   range->UpdateBundleRegister(reg);
3946   if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
3947     data()->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg);
3948   }
3949 }
3950 
AddToActive(LiveRange * range)3951 void LinearScanAllocator::AddToActive(LiveRange* range) {
3952   TRACE("Add live range %d:%d in %s to active\n", range->TopLevel()->vreg(),
3953         range->relative_id(), RegisterName(range->assigned_register()));
3954   active_live_ranges().push_back(range);
3955   next_active_ranges_change_ =
3956       std::min(next_active_ranges_change_, range->NextEndAfter(range->Start()));
3957 }
3958 
AddToInactive(LiveRange * range)3959 void LinearScanAllocator::AddToInactive(LiveRange* range) {
3960   TRACE("Add live range %d:%d to inactive\n", range->TopLevel()->vreg(),
3961         range->relative_id());
3962   next_inactive_ranges_change_ = std::min(
3963       next_inactive_ranges_change_, range->NextStartAfter(range->Start()));
3964   DCHECK(range->HasRegisterAssigned());
3965   inactive_live_ranges(range->assigned_register()).insert(range);
3966 }
3967 
AddToUnhandled(LiveRange * range)3968 void LinearScanAllocator::AddToUnhandled(LiveRange* range) {
3969   if (range == nullptr || range->IsEmpty()) return;
3970   DCHECK(!range->HasRegisterAssigned() && !range->spilled());
3971   DCHECK(allocation_finger_ <= range->Start());
3972 
3973   TRACE("Add live range %d:%d to unhandled\n", range->TopLevel()->vreg(),
3974         range->relative_id());
3975   unhandled_live_ranges().insert(range);
3976 }
3977 
ActiveToHandled(const ZoneVector<LiveRange * >::iterator it)3978 ZoneVector<LiveRange*>::iterator LinearScanAllocator::ActiveToHandled(
3979     const ZoneVector<LiveRange*>::iterator it) {
3980   TRACE("Moving live range %d:%d from active to handled\n",
3981         (*it)->TopLevel()->vreg(), (*it)->relative_id());
3982   return active_live_ranges().erase(it);
3983 }
3984 
ActiveToInactive(const ZoneVector<LiveRange * >::iterator it,LifetimePosition position)3985 ZoneVector<LiveRange*>::iterator LinearScanAllocator::ActiveToInactive(
3986     const ZoneVector<LiveRange*>::iterator it, LifetimePosition position) {
3987   LiveRange* range = *it;
3988   TRACE("Moving live range %d:%d from active to inactive\n",
3989         (range)->TopLevel()->vreg(), range->relative_id());
3990   LifetimePosition next_active = range->NextStartAfter(position);
3991   next_inactive_ranges_change_ =
3992       std::min(next_inactive_ranges_change_, next_active);
3993   DCHECK(range->HasRegisterAssigned());
3994   inactive_live_ranges(range->assigned_register()).insert(range);
3995   return active_live_ranges().erase(it);
3996 }
3997 
3998 LinearScanAllocator::InactiveLiveRangeQueue::iterator
InactiveToHandled(InactiveLiveRangeQueue::iterator it)3999 LinearScanAllocator::InactiveToHandled(InactiveLiveRangeQueue::iterator it) {
4000   LiveRange* range = *it;
4001   TRACE("Moving live range %d:%d from inactive to handled\n",
4002         range->TopLevel()->vreg(), range->relative_id());
4003   int reg = range->assigned_register();
4004   return inactive_live_ranges(reg).erase(it);
4005 }
4006 
4007 LinearScanAllocator::InactiveLiveRangeQueue::iterator
InactiveToActive(InactiveLiveRangeQueue::iterator it,LifetimePosition position)4008 LinearScanAllocator::InactiveToActive(InactiveLiveRangeQueue::iterator it,
4009                                       LifetimePosition position) {
4010   LiveRange* range = *it;
4011   active_live_ranges().push_back(range);
4012   TRACE("Moving live range %d:%d from inactive to active\n",
4013         range->TopLevel()->vreg(), range->relative_id());
4014   next_active_ranges_change_ =
4015       std::min(next_active_ranges_change_, range->NextEndAfter(position));
4016   int reg = range->assigned_register();
4017   return inactive_live_ranges(reg).erase(it);
4018 }
4019 
ForwardStateTo(LifetimePosition position)4020 void LinearScanAllocator::ForwardStateTo(LifetimePosition position) {
4021   if (position >= next_active_ranges_change_) {
4022     next_active_ranges_change_ = LifetimePosition::MaxPosition();
4023     for (auto it = active_live_ranges().begin();
4024          it != active_live_ranges().end();) {
4025       LiveRange* cur_active = *it;
4026       if (cur_active->End() <= position) {
4027         it = ActiveToHandled(it);
4028       } else if (!cur_active->Covers(position)) {
4029         it = ActiveToInactive(it, position);
4030       } else {
4031         next_active_ranges_change_ = std::min(
4032             next_active_ranges_change_, cur_active->NextEndAfter(position));
4033         ++it;
4034       }
4035     }
4036   }
4037 
4038   if (position >= next_inactive_ranges_change_) {
4039     next_inactive_ranges_change_ = LifetimePosition::MaxPosition();
4040     for (int reg = 0; reg < num_registers(); ++reg) {
4041       ZoneVector<LiveRange*> reorder(data()->allocation_zone());
4042       for (auto it = inactive_live_ranges(reg).begin();
4043            it != inactive_live_ranges(reg).end();) {
4044         LiveRange* cur_inactive = *it;
4045         if (cur_inactive->End() <= position) {
4046           it = InactiveToHandled(it);
4047         } else if (cur_inactive->Covers(position)) {
4048           it = InactiveToActive(it, position);
4049         } else {
4050           next_inactive_ranges_change_ =
4051               std::min(next_inactive_ranges_change_,
4052                        cur_inactive->NextStartAfter(position));
4053           it = inactive_live_ranges(reg).erase(it);
4054           reorder.push_back(cur_inactive);
4055         }
4056       }
4057       for (LiveRange* range : reorder) {
4058         inactive_live_ranges(reg).insert(range);
4059       }
4060     }
4061   }
4062 }
4063 
LastDeferredInstructionIndex(InstructionBlock * start)4064 int LinearScanAllocator::LastDeferredInstructionIndex(InstructionBlock* start) {
4065   DCHECK(start->IsDeferred());
4066   RpoNumber last_block =
4067       RpoNumber::FromInt(code()->InstructionBlockCount() - 1);
4068   while ((start->rpo_number() < last_block)) {
4069     InstructionBlock* next =
4070         code()->InstructionBlockAt(start->rpo_number().Next());
4071     if (!next->IsDeferred()) break;
4072     start = next;
4073   }
4074   return start->last_instruction_index();
4075 }
4076 
GetFPRegisterSet(MachineRepresentation rep,int * num_regs,int * num_codes,const int ** codes) const4077 void LinearScanAllocator::GetFPRegisterSet(MachineRepresentation rep,
4078                                            int* num_regs, int* num_codes,
4079                                            const int** codes) const {
4080   DCHECK(!kSimpleFPAliasing);
4081   if (rep == MachineRepresentation::kFloat32) {
4082     *num_regs = data()->config()->num_float_registers();
4083     *num_codes = data()->config()->num_allocatable_float_registers();
4084     *codes = data()->config()->allocatable_float_codes();
4085   } else if (rep == MachineRepresentation::kSimd128) {
4086     *num_regs = data()->config()->num_simd128_registers();
4087     *num_codes = data()->config()->num_allocatable_simd128_registers();
4088     *codes = data()->config()->allocatable_simd128_codes();
4089   } else {
4090     UNREACHABLE();
4091   }
4092 }
4093 
FindFreeRegistersForRange(LiveRange * range,Vector<LifetimePosition> positions)4094 void LinearScanAllocator::FindFreeRegistersForRange(
4095     LiveRange* range, Vector<LifetimePosition> positions) {
4096   int num_regs = num_registers();
4097   int num_codes = num_allocatable_registers();
4098   const int* codes = allocatable_register_codes();
4099   MachineRepresentation rep = range->representation();
4100   if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
4101                              rep == MachineRepresentation::kSimd128))
4102     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
4103   DCHECK_GE(positions.length(), num_regs);
4104 
4105   for (int i = 0; i < num_regs; ++i) {
4106     positions[i] = LifetimePosition::MaxPosition();
4107   }
4108 
4109   for (LiveRange* cur_active : active_live_ranges()) {
4110     int cur_reg = cur_active->assigned_register();
4111     if (kSimpleFPAliasing || !check_fp_aliasing()) {
4112       positions[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
4113       TRACE("Register %s is free until pos %d (1) due to %d\n",
4114             RegisterName(cur_reg),
4115             LifetimePosition::GapFromInstructionIndex(0).value(),
4116             cur_active->TopLevel()->vreg());
4117     } else {
4118       int alias_base_index = -1;
4119       int aliases = data()->config()->GetAliases(
4120           cur_active->representation(), cur_reg, rep, &alias_base_index);
4121       DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
4122       while (aliases--) {
4123         int aliased_reg = alias_base_index + aliases;
4124         positions[aliased_reg] = LifetimePosition::GapFromInstructionIndex(0);
4125       }
4126     }
4127   }
4128 
4129   for (int cur_reg = 0; cur_reg < num_regs; ++cur_reg) {
4130     for (LiveRange* cur_inactive : inactive_live_ranges(cur_reg)) {
4131       DCHECK_GT(cur_inactive->End(), range->Start());
4132       CHECK_EQ(cur_inactive->assigned_register(), cur_reg);
4133       // No need to carry out intersections, when this register won't be
4134       // interesting to this range anyway.
4135       // TODO(mtrofin): extend to aliased ranges, too.
4136       if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
4137           positions[cur_reg] <= cur_inactive->NextStart()) {
4138         break;
4139       }
4140       LifetimePosition next_intersection =
4141           cur_inactive->FirstIntersection(range);
4142       if (!next_intersection.IsValid()) continue;
4143       if (kSimpleFPAliasing || !check_fp_aliasing()) {
4144         positions[cur_reg] = std::min(positions[cur_reg], next_intersection);
4145         TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
4146               positions[cur_reg].value());
4147       } else {
4148         int alias_base_index = -1;
4149         int aliases = data()->config()->GetAliases(
4150             cur_inactive->representation(), cur_reg, rep, &alias_base_index);
4151         DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
4152         while (aliases--) {
4153           int aliased_reg = alias_base_index + aliases;
4154           positions[aliased_reg] =
4155               std::min(positions[aliased_reg], next_intersection);
4156         }
4157       }
4158     }
4159   }
4160 }
4161 
4162 // High-level register allocation summary:
4163 //
4164 // For regular, or hot (i.e. not splinter) ranges, we attempt to first
4165 // allocate first the preferred (hint) register. If that is not possible,
4166 // we find a register that's free, and allocate that. If that's not possible,
4167 // we search for a register to steal from a range that was allocated. The
4168 // goal is to optimize for throughput by avoiding register-to-memory
4169 // moves, which are expensive.
4170 //
4171 // For splinters, the goal is to minimize the number of moves. First we try
4172 // to allocate the preferred register (more discussion follows). Failing that,
4173 // we bail out and spill as far as we can, unless the first use is at start,
4174 // case in which we apply the same behavior as we do for regular ranges.
4175 // If there is no hint, we apply the hot-path behavior.
4176 //
4177 // For the splinter, the hint register may come from:
4178 //
4179 // - the hot path (we set it at splintering time with SetHint). In this case, if
4180 // we cannot offer the hint register, spilling is better because it's at most
4181 // 1 move, while trying to find and offer another register is at least 1 move.
4182 //
4183 // - a constraint. If we cannot offer that register, it's because  there is some
4184 // interference. So offering the hint register up to the interference would
4185 // result
4186 // in a move at the interference, plus a move to satisfy the constraint. This is
4187 // also the number of moves if we spill, with the potential of the range being
4188 // already spilled and thus saving a move (the spill).
4189 // Note that this can only be an input constraint, if it were an output one,
4190 // the range wouldn't be a splinter because it means it'd be defined in a
4191 // deferred
4192 // block, and we don't mark those as splinters (they live in deferred blocks
4193 // only).
4194 //
4195 // - a phi. The same analysis as in the case of the input constraint applies.
4196 //
ProcessCurrentRange(LiveRange * current,SpillMode spill_mode)4197 void LinearScanAllocator::ProcessCurrentRange(LiveRange* current,
4198                                               SpillMode spill_mode) {
4199   EmbeddedVector<LifetimePosition, RegisterConfiguration::kMaxRegisters>
4200       free_until_pos;
4201   FindFreeRegistersForRange(current, free_until_pos);
4202   if (!TryAllocatePreferredReg(current, free_until_pos)) {
4203     if (current->TopLevel()->IsSplinter()) {
4204       DCHECK(!data()->is_turbo_control_flow_aware_allocation());
4205       if (TrySplitAndSpillSplinter(current)) return;
4206     }
4207     if (!TryAllocateFreeReg(current, free_until_pos)) {
4208       AllocateBlockedReg(current, spill_mode);
4209     }
4210   }
4211   if (current->HasRegisterAssigned()) {
4212     AddToActive(current);
4213   }
4214 }
4215 
TryAllocatePreferredReg(LiveRange * current,const Vector<LifetimePosition> & free_until_pos)4216 bool LinearScanAllocator::TryAllocatePreferredReg(
4217     LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
4218   int hint_register;
4219   if (current->RegisterFromControlFlow(&hint_register) ||
4220       current->FirstHintPosition(&hint_register) != nullptr ||
4221       current->RegisterFromBundle(&hint_register)) {
4222     TRACE(
4223         "Found reg hint %s (free until [%d) for live range %d:%d (end %d[).\n",
4224         RegisterName(hint_register), free_until_pos[hint_register].value(),
4225         current->TopLevel()->vreg(), current->relative_id(),
4226         current->End().value());
4227 
4228     // The desired register is free until the end of the current live range.
4229     if (free_until_pos[hint_register] >= current->End()) {
4230       TRACE("Assigning preferred reg %s to live range %d:%d\n",
4231             RegisterName(hint_register), current->TopLevel()->vreg(),
4232             current->relative_id());
4233       SetLiveRangeAssignedRegister(current, hint_register);
4234       return true;
4235     }
4236   }
4237   return false;
4238 }
4239 
PickRegisterThatIsAvailableLongest(LiveRange * current,int hint_reg,const Vector<LifetimePosition> & free_until_pos)4240 int LinearScanAllocator::PickRegisterThatIsAvailableLongest(
4241     LiveRange* current, int hint_reg,
4242     const Vector<LifetimePosition>& free_until_pos) {
4243   int num_regs = 0;  // used only for the call to GetFPRegisterSet.
4244   int num_codes = num_allocatable_registers();
4245   const int* codes = allocatable_register_codes();
4246   MachineRepresentation rep = current->representation();
4247   if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
4248                              rep == MachineRepresentation::kSimd128)) {
4249     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
4250   }
4251 
4252   DCHECK_GE(free_until_pos.length(), num_codes);
4253 
4254   // Find the register which stays free for the longest time. Check for
4255   // the hinted register first, as we might want to use that one. Only
4256   // count full instructions for free ranges, as an instruction's internal
4257   // positions do not help but might shadow a hinted register. This is
4258   // typically the case for function calls, where all registered are
4259   // cloberred after the call except for the argument registers, which are
4260   // set before the call. Hence, the argument registers always get ignored,
4261   // as their available time is shorter.
4262   int reg = (hint_reg == kUnassignedRegister) ? codes[0] : hint_reg;
4263   int current_free = free_until_pos[reg].ToInstructionIndex();
4264   for (int i = 0; i < num_codes; ++i) {
4265     int code = codes[i];
4266     // Prefer registers that have no fixed uses to avoid blocking later hints.
4267     // We use the first register that has no fixed uses to ensure we use
4268     // byte addressable registers in ia32 first.
4269     int candidate_free = free_until_pos[code].ToInstructionIndex();
4270     TRACE("Register %s in free until %d\n", RegisterName(code), candidate_free);
4271     if ((candidate_free > current_free) ||
4272         (candidate_free == current_free && reg != hint_reg &&
4273          (data()->HasFixedUse(current->representation(), reg) &&
4274           !data()->HasFixedUse(current->representation(), code)))) {
4275       reg = code;
4276       current_free = candidate_free;
4277     }
4278   }
4279 
4280   return reg;
4281 }
4282 
TryAllocateFreeReg(LiveRange * current,const Vector<LifetimePosition> & free_until_pos)4283 bool LinearScanAllocator::TryAllocateFreeReg(
4284     LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
4285   // Compute register hint, if such exists.
4286   int hint_reg = kUnassignedRegister;
4287   current->RegisterFromControlFlow(&hint_reg) ||
4288       current->FirstHintPosition(&hint_reg) != nullptr ||
4289       current->RegisterFromBundle(&hint_reg);
4290 
4291   int reg =
4292       PickRegisterThatIsAvailableLongest(current, hint_reg, free_until_pos);
4293 
4294   LifetimePosition pos = free_until_pos[reg];
4295 
4296   if (pos <= current->Start()) {
4297     // All registers are blocked.
4298     return false;
4299   }
4300 
4301   if (pos < current->End()) {
4302     // Register reg is available at the range start but becomes blocked before
4303     // the range end. Split current at position where it becomes blocked.
4304     LiveRange* tail = SplitRangeAt(current, pos);
4305     AddToUnhandled(tail);
4306 
4307     // Try to allocate preferred register once more.
4308     if (TryAllocatePreferredReg(current, free_until_pos)) return true;
4309   }
4310 
4311   // Register reg is available at the range start and is free until the range
4312   // end.
4313   DCHECK(pos >= current->End());
4314   TRACE("Assigning free reg %s to live range %d:%d\n", RegisterName(reg),
4315         current->TopLevel()->vreg(), current->relative_id());
4316   SetLiveRangeAssignedRegister(current, reg);
4317 
4318   return true;
4319 }
4320 
AllocateBlockedReg(LiveRange * current,SpillMode spill_mode)4321 void LinearScanAllocator::AllocateBlockedReg(LiveRange* current,
4322                                              SpillMode spill_mode) {
4323   UsePosition* register_use = current->NextRegisterPosition(current->Start());
4324   if (register_use == nullptr) {
4325     // There is no use in the current live range that requires a register.
4326     // We can just spill it.
4327     LiveRange* begin_spill = nullptr;
4328     LifetimePosition spill_pos = FindOptimalSpillingPos(
4329         current, current->Start(), spill_mode, &begin_spill);
4330     MaybeSpillPreviousRanges(begin_spill, spill_pos, current);
4331     Spill(current, spill_mode);
4332     return;
4333   }
4334 
4335   MachineRepresentation rep = current->representation();
4336 
4337   // use_pos keeps track of positions a register/alias is used at.
4338   // block_pos keeps track of positions where a register/alias is blocked
4339   // from.
4340   EmbeddedVector<LifetimePosition, RegisterConfiguration::kMaxRegisters>
4341       use_pos(LifetimePosition::MaxPosition());
4342   EmbeddedVector<LifetimePosition, RegisterConfiguration::kMaxRegisters>
4343       block_pos(LifetimePosition::MaxPosition());
4344 
4345   for (LiveRange* range : active_live_ranges()) {
4346     int cur_reg = range->assigned_register();
4347     bool is_fixed_or_cant_spill =
4348         range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start());
4349     if (kSimpleFPAliasing || !check_fp_aliasing()) {
4350       if (is_fixed_or_cant_spill) {
4351         block_pos[cur_reg] = use_pos[cur_reg] =
4352             LifetimePosition::GapFromInstructionIndex(0);
4353       } else {
4354         DCHECK_NE(LifetimePosition::GapFromInstructionIndex(0),
4355                   block_pos[cur_reg]);
4356         use_pos[cur_reg] =
4357             range->NextLifetimePositionRegisterIsBeneficial(current->Start());
4358       }
4359     } else {
4360       int alias_base_index = -1;
4361       int aliases = data()->config()->GetAliases(
4362           range->representation(), cur_reg, rep, &alias_base_index);
4363       DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
4364       while (aliases--) {
4365         int aliased_reg = alias_base_index + aliases;
4366         if (is_fixed_or_cant_spill) {
4367           block_pos[aliased_reg] = use_pos[aliased_reg] =
4368               LifetimePosition::GapFromInstructionIndex(0);
4369         } else {
4370           use_pos[aliased_reg] =
4371               Min(block_pos[aliased_reg],
4372                   range->NextLifetimePositionRegisterIsBeneficial(
4373                       current->Start()));
4374         }
4375       }
4376     }
4377   }
4378 
4379   for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
4380     for (LiveRange* range : inactive_live_ranges(cur_reg)) {
4381       DCHECK(range->End() > current->Start());
4382       DCHECK_EQ(range->assigned_register(), cur_reg);
4383       bool is_fixed = range->TopLevel()->IsFixed();
4384 
4385       // Don't perform costly intersections if they are guaranteed to not update
4386       // block_pos or use_pos.
4387       // TODO(mtrofin): extend to aliased ranges, too.
4388       if ((kSimpleFPAliasing || !check_fp_aliasing())) {
4389         DCHECK_LE(use_pos[cur_reg], block_pos[cur_reg]);
4390         if (block_pos[cur_reg] <= range->NextStart()) break;
4391         if (!is_fixed && use_pos[cur_reg] <= range->NextStart()) continue;
4392       }
4393 
4394       LifetimePosition next_intersection = range->FirstIntersection(current);
4395       if (!next_intersection.IsValid()) continue;
4396 
4397       if (kSimpleFPAliasing || !check_fp_aliasing()) {
4398         if (is_fixed) {
4399           block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
4400           use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
4401         } else {
4402           use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
4403         }
4404       } else {
4405         int alias_base_index = -1;
4406         int aliases = data()->config()->GetAliases(
4407             range->representation(), cur_reg, rep, &alias_base_index);
4408         DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
4409         while (aliases--) {
4410           int aliased_reg = alias_base_index + aliases;
4411           if (is_fixed) {
4412             block_pos[aliased_reg] =
4413                 Min(block_pos[aliased_reg], next_intersection);
4414             use_pos[aliased_reg] =
4415                 Min(block_pos[aliased_reg], use_pos[aliased_reg]);
4416           } else {
4417             use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection);
4418           }
4419         }
4420       }
4421     }
4422   }
4423 
4424   // Compute register hint if it exists.
4425   int hint_reg = kUnassignedRegister;
4426   current->RegisterFromControlFlow(&hint_reg) ||
4427       register_use->HintRegister(&hint_reg) ||
4428       current->RegisterFromBundle(&hint_reg);
4429   int reg = PickRegisterThatIsAvailableLongest(current, hint_reg, use_pos);
4430 
4431   if (use_pos[reg] < register_use->pos()) {
4432     // If there is a gap position before the next register use, we can
4433     // spill until there. The gap position will then fit the fill move.
4434     if (LifetimePosition::ExistsGapPositionBetween(current->Start(),
4435                                                    register_use->pos())) {
4436       SpillBetween(current, current->Start(), register_use->pos(), spill_mode);
4437       return;
4438     }
4439   }
4440 
4441   // When in deferred spilling mode avoid stealing registers beyond the current
4442   // deferred region. This is required as we otherwise might spill an inactive
4443   // range with a start outside of deferred code and that would not be reloaded.
4444   LifetimePosition new_end = current->End();
4445   if (spill_mode == SpillMode::kSpillDeferred) {
4446     InstructionBlock* deferred_block =
4447         code()->GetInstructionBlock(current->Start().ToInstructionIndex());
4448     new_end = Min(new_end, LifetimePosition::GapFromInstructionIndex(
4449                                LastDeferredInstructionIndex(deferred_block)));
4450   }
4451 
4452   // We couldn't spill until the next register use. Split before the register
4453   // is blocked, if applicable.
4454   if (block_pos[reg] < new_end) {
4455     // Register becomes blocked before the current range end. Split before that
4456     // position.
4457     new_end = block_pos[reg].Start();
4458   }
4459 
4460   // If there is no register available at all, we can only spill this range.
4461   // Happens for instance on entry to deferred code where registers might
4462   // become blocked yet we aim to reload ranges.
4463   if (new_end == current->Start()) {
4464     SpillBetween(current, new_end, register_use->pos(), spill_mode);
4465     return;
4466   }
4467 
4468   // Split at the new end if we found one.
4469   if (new_end != current->End()) {
4470     LiveRange* tail = SplitBetween(current, current->Start(), new_end);
4471     AddToUnhandled(tail);
4472   }
4473 
4474   // Register reg is not blocked for the whole range.
4475   DCHECK(block_pos[reg] >= current->End());
4476   TRACE("Assigning blocked reg %s to live range %d:%d\n", RegisterName(reg),
4477         current->TopLevel()->vreg(), current->relative_id());
4478   SetLiveRangeAssignedRegister(current, reg);
4479 
4480   // This register was not free. Thus we need to find and spill
4481   // parts of active and inactive live regions that use the same register
4482   // at the same lifetime positions as current.
4483   SplitAndSpillIntersecting(current, spill_mode);
4484 }
4485 
SplitAndSpillIntersecting(LiveRange * current,SpillMode spill_mode)4486 void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current,
4487                                                     SpillMode spill_mode) {
4488   DCHECK(current->HasRegisterAssigned());
4489   int reg = current->assigned_register();
4490   LifetimePosition split_pos = current->Start();
4491   for (auto it = active_live_ranges().begin();
4492        it != active_live_ranges().end();) {
4493     LiveRange* range = *it;
4494     if (kSimpleFPAliasing || !check_fp_aliasing()) {
4495       if (range->assigned_register() != reg) {
4496         ++it;
4497         continue;
4498       }
4499     } else {
4500       if (!data()->config()->AreAliases(current->representation(), reg,
4501                                         range->representation(),
4502                                         range->assigned_register())) {
4503         ++it;
4504         continue;
4505       }
4506     }
4507 
4508     UsePosition* next_pos = range->NextRegisterPosition(current->Start());
4509     LiveRange* begin_spill = nullptr;
4510     LifetimePosition spill_pos =
4511         FindOptimalSpillingPos(range, split_pos, spill_mode, &begin_spill);
4512     MaybeSpillPreviousRanges(begin_spill, spill_pos, range);
4513     if (next_pos == nullptr) {
4514       SpillAfter(range, spill_pos, spill_mode);
4515     } else {
4516       // When spilling between spill_pos and next_pos ensure that the range
4517       // remains spilled at least until the start of the current live range.
4518       // This guarantees that we will not introduce new unhandled ranges that
4519       // start before the current range as this violates allocation invariants
4520       // and will lead to an inconsistent state of active and inactive
4521       // live-ranges: ranges are allocated in order of their start positions,
4522       // ranges are retired from active/inactive when the start of the
4523       // current live-range is larger than their end.
4524       DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(),
4525                                                         next_pos->pos()));
4526       SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos(),
4527                         spill_mode);
4528     }
4529     it = ActiveToHandled(it);
4530   }
4531 
4532   for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
4533     if (kSimpleFPAliasing || !check_fp_aliasing()) {
4534       if (cur_reg != reg) continue;
4535     }
4536     for (auto it = inactive_live_ranges(cur_reg).begin();
4537          it != inactive_live_ranges(cur_reg).end();) {
4538       LiveRange* range = *it;
4539       if (!kSimpleFPAliasing && check_fp_aliasing() &&
4540           !data()->config()->AreAliases(current->representation(), reg,
4541                                         range->representation(), cur_reg)) {
4542         ++it;
4543         continue;
4544       }
4545       DCHECK(range->End() > current->Start());
4546       if (range->TopLevel()->IsFixed()) {
4547         ++it;
4548         continue;
4549       }
4550 
4551       LifetimePosition next_intersection = range->FirstIntersection(current);
4552       if (next_intersection.IsValid()) {
4553         UsePosition* next_pos = range->NextRegisterPosition(current->Start());
4554         if (next_pos == nullptr) {
4555           SpillAfter(range, split_pos, spill_mode);
4556         } else {
4557           next_intersection = Min(next_intersection, next_pos->pos());
4558           SpillBetween(range, split_pos, next_intersection, spill_mode);
4559         }
4560         it = InactiveToHandled(it);
4561       } else {
4562         ++it;
4563       }
4564     }
4565   }
4566 }
4567 
TryReuseSpillForPhi(TopLevelLiveRange * range)4568 bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
4569   if (!range->is_phi()) return false;
4570 
4571   DCHECK(!range->HasSpillOperand());
4572   // Check how many operands belong to the same bundle as the output.
4573   LiveRangeBundle* out_bundle = range->get_bundle();
4574   RegisterAllocationData::PhiMapValue* phi_map_value =
4575       data()->GetPhiMapValueFor(range);
4576   const PhiInstruction* phi = phi_map_value->phi();
4577   const InstructionBlock* block = phi_map_value->block();
4578   // Count the number of spilled operands.
4579   size_t spilled_count = 0;
4580   for (size_t i = 0; i < phi->operands().size(); i++) {
4581     int op = phi->operands()[i];
4582     LiveRange* op_range = data()->GetOrCreateLiveRangeFor(op);
4583     if (!op_range->TopLevel()->HasSpillRange()) continue;
4584     const InstructionBlock* pred =
4585         code()->InstructionBlockAt(block->predecessors()[i]);
4586     LifetimePosition pred_end =
4587         LifetimePosition::InstructionFromInstructionIndex(
4588             pred->last_instruction_index());
4589     while (op_range != nullptr && !op_range->CanCover(pred_end)) {
4590       op_range = op_range->next();
4591     }
4592     if (op_range != nullptr && op_range->spilled() &&
4593         op_range->get_bundle() == out_bundle) {
4594       spilled_count++;
4595     }
4596   }
4597 
4598   // Only continue if more than half of the operands are spilled to the same
4599   // slot (because part of same bundle).
4600   if (spilled_count * 2 <= phi->operands().size()) {
4601     return false;
4602   }
4603 
4604   // If the range does not need register soon, spill it to the merged
4605   // spill range.
4606   LifetimePosition next_pos = range->Start();
4607   if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
4608   UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
4609   if (pos == nullptr) {
4610     Spill(range, SpillMode::kSpillAtDefinition);
4611     return true;
4612   } else if (pos->pos() > range->Start().NextStart()) {
4613     SpillBetween(range, range->Start(), pos->pos(),
4614                  SpillMode::kSpillAtDefinition);
4615     return true;
4616   }
4617   return false;
4618 }
4619 
SpillAfter(LiveRange * range,LifetimePosition pos,SpillMode spill_mode)4620 void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos,
4621                                      SpillMode spill_mode) {
4622   LiveRange* second_part = SplitRangeAt(range, pos);
4623   Spill(second_part, spill_mode);
4624 }
4625 
SpillBetween(LiveRange * range,LifetimePosition start,LifetimePosition end,SpillMode spill_mode)4626 void LinearScanAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
4627                                        LifetimePosition end,
4628                                        SpillMode spill_mode) {
4629   SpillBetweenUntil(range, start, start, end, spill_mode);
4630 }
4631 
SpillBetweenUntil(LiveRange * range,LifetimePosition start,LifetimePosition until,LifetimePosition end,SpillMode spill_mode)4632 void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
4633                                             LifetimePosition start,
4634                                             LifetimePosition until,
4635                                             LifetimePosition end,
4636                                             SpillMode spill_mode) {
4637   CHECK(start < end);
4638   LiveRange* second_part = SplitRangeAt(range, start);
4639 
4640   if (second_part->Start() < end) {
4641     // The split result intersects with [start, end[.
4642     // Split it at position between ]start+1, end[, spill the middle part
4643     // and put the rest to unhandled.
4644 
4645     // Make sure that the third part always starts after the start of the
4646     // second part, as that likely is the current position of the register
4647     // allocator and we cannot add ranges to unhandled that start before
4648     // the current position.
4649     LifetimePosition split_start = Max(second_part->Start().End(), until);
4650 
4651     // If end is an actual use (which it typically is) we have to split
4652     // so that there is a gap before so that we have space for moving the
4653     // value into its position.
4654     // However, if we have no choice, split right where asked.
4655     LifetimePosition third_part_end = Max(split_start, end.PrevStart().End());
4656     // Instead of spliting right after or even before the block boundary,
4657     // split on the boumndary to avoid extra moves.
4658     if (data()->IsBlockBoundary(end.Start())) {
4659       third_part_end = Max(split_start, end.Start());
4660     }
4661 
4662     LiveRange* third_part =
4663         SplitBetween(second_part, split_start, third_part_end);
4664     if (GetInstructionBlock(data()->code(), second_part->Start())
4665             ->IsDeferred()) {
4666       // Try to use the same register as before.
4667       TRACE("Setting control flow hint for %d:%d to %s\n",
4668             third_part->TopLevel()->vreg(), third_part->relative_id(),
4669             RegisterName(range->controlflow_hint()));
4670       third_part->set_controlflow_hint(range->controlflow_hint());
4671     }
4672 
4673     AddToUnhandled(third_part);
4674     // This can happen, even if we checked for start < end above, as we fiddle
4675     // with the end location. However, we are guaranteed to be after or at
4676     // until, so this is fine.
4677     if (third_part != second_part) {
4678       Spill(second_part, spill_mode);
4679     }
4680   } else {
4681     // The split result does not intersect with [start, end[.
4682     // Nothing to spill. Just put it to unhandled as whole.
4683     AddToUnhandled(second_part);
4684   }
4685 }
4686 
SpillSlotLocator(RegisterAllocationData * data)4687 SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data)
4688     : data_(data) {}
4689 
LocateSpillSlots()4690 void SpillSlotLocator::LocateSpillSlots() {
4691   const InstructionSequence* code = data()->code();
4692   const size_t live_ranges_size = data()->live_ranges().size();
4693   for (TopLevelLiveRange* range : data()->live_ranges()) {
4694     CHECK_EQ(live_ranges_size,
4695              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
4696     if (range == nullptr || range->IsEmpty()) continue;
4697     // We care only about ranges which spill in the frame.
4698     if (!range->HasSpillRange() ||
4699         range->IsSpilledOnlyInDeferredBlocks(data())) {
4700       continue;
4701     }
4702     TopLevelLiveRange::SpillMoveInsertionList* spills =
4703         range->GetSpillMoveInsertionLocations(data());
4704     DCHECK_NOT_NULL(spills);
4705     for (; spills != nullptr; spills = spills->next) {
4706       code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
4707     }
4708   }
4709 }
4710 
OperandAssigner(RegisterAllocationData * data)4711 OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
4712 
DecideSpillingMode()4713 void OperandAssigner::DecideSpillingMode() {
4714   if (data()->is_turbo_control_flow_aware_allocation()) {
4715     for (auto range : data()->live_ranges()) {
4716       data()->tick_counter()->DoTick();
4717       int max_blocks = data()->code()->InstructionBlockCount();
4718       if (range != nullptr && range->IsSpilledOnlyInDeferredBlocks(data())) {
4719         // If the range is spilled only in deferred blocks and starts in
4720         // a non-deferred block, we transition its representation here so
4721         // that the LiveRangeConnector processes them correctly. If,
4722         // however, they start in a deferred block, we uograde them to
4723         // spill at definition, as that definition is in a deferred block
4724         // anyway. While this is an optimization, the code in LiveRangeConnector
4725         // relies on it!
4726         if (GetInstructionBlock(data()->code(), range->Start())->IsDeferred()) {
4727           TRACE("Live range %d is spilled and alive in deferred code only\n",
4728                 range->vreg());
4729           range->TransitionRangeToSpillAtDefinition();
4730         } else {
4731           TRACE(
4732               "Live range %d is spilled deferred code only but alive outside\n",
4733               range->vreg());
4734           DCHECK(data()->is_turbo_control_flow_aware_allocation());
4735           range->TransitionRangeToDeferredSpill(data()->allocation_zone(),
4736                                                 max_blocks);
4737         }
4738       }
4739     }
4740   }
4741 }
4742 
AssignSpillSlots()4743 void OperandAssigner::AssignSpillSlots() {
4744   for (auto range : data()->live_ranges()) {
4745     data()->tick_counter()->DoTick();
4746     if (range != nullptr && range->get_bundle() != nullptr) {
4747       range->get_bundle()->MergeSpillRanges();
4748     }
4749   }
4750   ZoneVector<SpillRange*>& spill_ranges = data()->spill_ranges();
4751   // Merge disjoint spill ranges
4752   for (size_t i = 0; i < spill_ranges.size(); ++i) {
4753     data()->tick_counter()->DoTick();
4754     SpillRange* range = spill_ranges[i];
4755     if (range == nullptr) continue;
4756     if (range->IsEmpty()) continue;
4757     for (size_t j = i + 1; j < spill_ranges.size(); ++j) {
4758       SpillRange* other = spill_ranges[j];
4759       if (other != nullptr && !other->IsEmpty()) {
4760         range->TryMerge(other);
4761       }
4762     }
4763   }
4764   // Allocate slots for the merged spill ranges.
4765   for (SpillRange* range : spill_ranges) {
4766     data()->tick_counter()->DoTick();
4767     if (range == nullptr || range->IsEmpty()) continue;
4768     // Allocate a new operand referring to the spill slot.
4769     if (!range->HasSlot()) {
4770       int index = data()->frame()->AllocateSpillSlot(range->byte_width());
4771       range->set_assigned_slot(index);
4772     }
4773   }
4774 }
4775 
CommitAssignment()4776 void OperandAssigner::CommitAssignment() {
4777   const size_t live_ranges_size = data()->live_ranges().size();
4778   for (TopLevelLiveRange* top_range : data()->live_ranges()) {
4779     data()->tick_counter()->DoTick();
4780     CHECK_EQ(live_ranges_size,
4781              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
4782     if (top_range == nullptr || top_range->IsEmpty()) continue;
4783     InstructionOperand spill_operand;
4784     if (top_range->HasSpillOperand()) {
4785       spill_operand = *top_range->TopLevel()->GetSpillOperand();
4786     } else if (top_range->TopLevel()->HasSpillRange()) {
4787       spill_operand = top_range->TopLevel()->GetSpillRangeOperand();
4788     }
4789     if (top_range->is_phi()) {
4790       data()->GetPhiMapValueFor(top_range)->CommitAssignment(
4791           top_range->GetAssignedOperand());
4792     }
4793     for (LiveRange* range = top_range; range != nullptr;
4794          range = range->next()) {
4795       InstructionOperand assigned = range->GetAssignedOperand();
4796       DCHECK(!assigned.IsUnallocated());
4797       range->ConvertUsesToOperand(assigned, spill_operand);
4798     }
4799 
4800     if (!spill_operand.IsInvalid()) {
4801       // If this top level range has a child spilled in a deferred block, we use
4802       // the range and control flow connection mechanism instead of spilling at
4803       // definition. Refer to the ConnectLiveRanges and ResolveControlFlow
4804       // phases. Normally, when we spill at definition, we do not insert a
4805       // connecting move when a successor child range is spilled - because the
4806       // spilled range picks up its value from the slot which was assigned at
4807       // definition. For ranges that are determined to spill only in deferred
4808       // blocks, we let ConnectLiveRanges and ResolveControlFlow find the blocks
4809       // where a spill operand is expected, and then finalize by inserting the
4810       // spills in the deferred blocks dominators.
4811       if (!top_range->IsSpilledOnlyInDeferredBlocks(data())) {
4812         // Spill at definition if the range isn't spilled only in deferred
4813         // blocks.
4814         top_range->CommitSpillMoves(
4815             data(), spill_operand,
4816             top_range->has_slot_use() || top_range->spilled());
4817       }
4818     }
4819   }
4820 }
4821 
ReferenceMapPopulator(RegisterAllocationData * data)4822 ReferenceMapPopulator::ReferenceMapPopulator(RegisterAllocationData* data)
4823     : data_(data) {}
4824 
SafePointsAreInOrder() const4825 bool ReferenceMapPopulator::SafePointsAreInOrder() const {
4826   int safe_point = 0;
4827   for (ReferenceMap* map : *data()->code()->reference_maps()) {
4828     if (safe_point > map->instruction_position()) return false;
4829     safe_point = map->instruction_position();
4830   }
4831   return true;
4832 }
4833 
PopulateReferenceMaps()4834 void ReferenceMapPopulator::PopulateReferenceMaps() {
4835   DCHECK(SafePointsAreInOrder());
4836   // Map all delayed references.
4837   for (RegisterAllocationData::DelayedReference& delayed_reference :
4838        data()->delayed_references()) {
4839     delayed_reference.map->RecordReference(
4840         AllocatedOperand::cast(*delayed_reference.operand));
4841   }
4842   // Iterate over all safe point positions and record a pointer
4843   // for all spilled live ranges at this point.
4844   int last_range_start = 0;
4845   const ReferenceMapDeque* reference_maps = data()->code()->reference_maps();
4846   ReferenceMapDeque::const_iterator first_it = reference_maps->begin();
4847   const size_t live_ranges_size = data()->live_ranges().size();
4848   for (TopLevelLiveRange* range : data()->live_ranges()) {
4849     CHECK_EQ(live_ranges_size,
4850              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
4851     if (range == nullptr) continue;
4852     // Skip non-reference values.
4853     if (!data()->code()->IsReference(range->vreg())) continue;
4854     // Skip empty live ranges.
4855     if (range->IsEmpty()) continue;
4856     if (range->has_preassigned_slot()) continue;
4857 
4858     // Find the extent of the range and its children.
4859     int start = range->Start().ToInstructionIndex();
4860     int end = 0;
4861     for (LiveRange* cur = range; cur != nullptr; cur = cur->next()) {
4862       LifetimePosition this_end = cur->End();
4863       if (this_end.ToInstructionIndex() > end)
4864         end = this_end.ToInstructionIndex();
4865       DCHECK(cur->Start().ToInstructionIndex() >= start);
4866     }
4867 
4868     // Most of the ranges are in order, but not all.  Keep an eye on when they
4869     // step backwards and reset the first_it so we don't miss any safe points.
4870     if (start < last_range_start) first_it = reference_maps->begin();
4871     last_range_start = start;
4872 
4873     // Step across all the safe points that are before the start of this range,
4874     // recording how far we step in order to save doing this for the next range.
4875     for (; first_it != reference_maps->end(); ++first_it) {
4876       ReferenceMap* map = *first_it;
4877       if (map->instruction_position() >= start) break;
4878     }
4879 
4880     InstructionOperand spill_operand;
4881     if (((range->HasSpillOperand() &&
4882           !range->GetSpillOperand()->IsConstant()) ||
4883          range->HasSpillRange())) {
4884       if (range->HasSpillOperand()) {
4885         spill_operand = *range->GetSpillOperand();
4886       } else {
4887         spill_operand = range->GetSpillRangeOperand();
4888       }
4889       DCHECK(spill_operand.IsStackSlot());
4890       DCHECK(CanBeTaggedOrCompressedPointer(
4891           AllocatedOperand::cast(spill_operand).representation()));
4892     }
4893 
4894     LiveRange* cur = range;
4895     // Step through the safe points to see whether they are in the range.
4896     for (auto it = first_it; it != reference_maps->end(); ++it) {
4897       ReferenceMap* map = *it;
4898       int safe_point = map->instruction_position();
4899 
4900       // The safe points are sorted so we can stop searching here.
4901       if (safe_point - 1 > end) break;
4902 
4903       // Advance to the next active range that covers the current
4904       // safe point position.
4905       LifetimePosition safe_point_pos =
4906           LifetimePosition::InstructionFromInstructionIndex(safe_point);
4907 
4908       // Search for the child range (cur) that covers safe_point_pos. If we
4909       // don't find it before the children pass safe_point_pos, keep cur at
4910       // the last child, because the next safe_point_pos may be covered by cur.
4911       // This may happen if cur has more than one interval, and the current
4912       // safe_point_pos is in between intervals.
4913       // For that reason, cur may be at most the last child.
4914       DCHECK_NOT_NULL(cur);
4915       DCHECK(safe_point_pos >= cur->Start() || range == cur);
4916       bool found = false;
4917       while (!found) {
4918         if (cur->Covers(safe_point_pos)) {
4919           found = true;
4920         } else {
4921           LiveRange* next = cur->next();
4922           if (next == nullptr || next->Start() > safe_point_pos) {
4923             break;
4924           }
4925           cur = next;
4926         }
4927       }
4928 
4929       if (!found) {
4930         continue;
4931       }
4932 
4933       // Check if the live range is spilled and the safe point is after
4934       // the spill position.
4935       int spill_index = range->IsSpilledOnlyInDeferredBlocks(data())
4936                             ? cur->Start().ToInstructionIndex()
4937                             : range->spill_start_index();
4938 
4939       if (!spill_operand.IsInvalid() && safe_point >= spill_index) {
4940         TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
4941               range->vreg(), spill_index, safe_point);
4942         map->RecordReference(AllocatedOperand::cast(spill_operand));
4943       }
4944 
4945       if (!cur->spilled()) {
4946         TRACE(
4947             "Pointer in register for range %d:%d (start at %d) "
4948             "at safe point %d\n",
4949             range->vreg(), cur->relative_id(), cur->Start().value(),
4950             safe_point);
4951         InstructionOperand operand = cur->GetAssignedOperand();
4952         DCHECK(!operand.IsStackSlot());
4953         DCHECK(CanBeTaggedOrCompressedPointer(
4954             AllocatedOperand::cast(operand).representation()));
4955         map->RecordReference(AllocatedOperand::cast(operand));
4956       }
4957     }
4958   }
4959 }
4960 
LiveRangeConnector(RegisterAllocationData * data)4961 LiveRangeConnector::LiveRangeConnector(RegisterAllocationData* data)
4962     : data_(data) {}
4963 
CanEagerlyResolveControlFlow(const InstructionBlock * block) const4964 bool LiveRangeConnector::CanEagerlyResolveControlFlow(
4965     const InstructionBlock* block) const {
4966   if (block->PredecessorCount() != 1) return false;
4967   return block->predecessors()[0].IsNext(block->rpo_number());
4968 }
4969 
ResolveControlFlow(Zone * local_zone)4970 void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
4971   // Lazily linearize live ranges in memory for fast lookup.
4972   LiveRangeFinder finder(data(), local_zone);
4973   ZoneVector<BitVector*>& live_in_sets = data()->live_in_sets();
4974   for (const InstructionBlock* block : code()->instruction_blocks()) {
4975     if (CanEagerlyResolveControlFlow(block)) continue;
4976     BitVector* live = live_in_sets[block->rpo_number().ToInt()];
4977     BitVector::Iterator iterator(live);
4978     while (!iterator.Done()) {
4979       data()->tick_counter()->DoTick();
4980       int vreg = iterator.Current();
4981       LiveRangeBoundArray* array = finder.ArrayFor(vreg);
4982       for (const RpoNumber& pred : block->predecessors()) {
4983         FindResult result;
4984         const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
4985         if (!array->FindConnectableSubranges(block, pred_block, &result)) {
4986           continue;
4987         }
4988         InstructionOperand pred_op = result.pred_cover_->GetAssignedOperand();
4989         InstructionOperand cur_op = result.cur_cover_->GetAssignedOperand();
4990         if (pred_op.Equals(cur_op)) continue;
4991         if (!pred_op.IsAnyRegister() && cur_op.IsAnyRegister()) {
4992           // We're doing a reload.
4993           // We don't need to, if:
4994           // 1) there's no register use in this block, and
4995           // 2) the range ends before the block does, and
4996           // 3) we don't have a successor, or the successor is spilled.
4997           LifetimePosition block_start =
4998               LifetimePosition::GapFromInstructionIndex(block->code_start());
4999           LifetimePosition block_end =
5000               LifetimePosition::GapFromInstructionIndex(block->code_end());
5001           const LiveRange* current = result.cur_cover_;
5002           // TODO(herhut): This is not the successor if we have control flow!
5003           const LiveRange* successor = current->next();
5004           if (current->End() < block_end &&
5005               (successor == nullptr || successor->spilled())) {
5006             // verify point 1: no register use. We can go to the end of the
5007             // range, since it's all within the block.
5008 
5009             bool uses_reg = false;
5010             for (const UsePosition* use = current->NextUsePosition(block_start);
5011                  use != nullptr; use = use->next()) {
5012               if (use->operand()->IsAnyRegister()) {
5013                 uses_reg = true;
5014                 break;
5015               }
5016             }
5017             if (!uses_reg) continue;
5018           }
5019           if (current->TopLevel()->IsSpilledOnlyInDeferredBlocks(data()) &&
5020               pred_block->IsDeferred()) {
5021             // The spill location should be defined in pred_block, so add
5022             // pred_block to the list of blocks requiring a spill operand.
5023             TRACE("Adding B%d to list of spill blocks for %d\n",
5024                   pred_block->rpo_number().ToInt(),
5025                   current->TopLevel()->vreg());
5026             current->TopLevel()
5027                 ->GetListOfBlocksRequiringSpillOperands(data())
5028                 ->Add(pred_block->rpo_number().ToInt());
5029           }
5030         }
5031         int move_loc = ResolveControlFlow(block, cur_op, pred_block, pred_op);
5032         USE(move_loc);
5033         DCHECK_IMPLIES(
5034             result.cur_cover_->TopLevel()->IsSpilledOnlyInDeferredBlocks(
5035                 data()) &&
5036                 !(pred_op.IsAnyRegister() && cur_op.IsAnyRegister()),
5037             code()->GetInstructionBlock(move_loc)->IsDeferred());
5038       }
5039       iterator.Advance();
5040     }
5041   }
5042 
5043   // At this stage, we collected blocks needing a spill operand from
5044   // ConnectRanges and from ResolveControlFlow. Time to commit the spills for
5045   // deferred blocks.
5046   const size_t live_ranges_size = data()->live_ranges().size();
5047   for (TopLevelLiveRange* top : data()->live_ranges()) {
5048     CHECK_EQ(live_ranges_size,
5049              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
5050     if (top == nullptr || top->IsEmpty() ||
5051         !top->IsSpilledOnlyInDeferredBlocks(data()))
5052       continue;
5053     CommitSpillsInDeferredBlocks(top, finder.ArrayFor(top->vreg()), local_zone);
5054   }
5055 }
5056 
ResolveControlFlow(const InstructionBlock * block,const InstructionOperand & cur_op,const InstructionBlock * pred,const InstructionOperand & pred_op)5057 int LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
5058                                            const InstructionOperand& cur_op,
5059                                            const InstructionBlock* pred,
5060                                            const InstructionOperand& pred_op) {
5061   DCHECK(!pred_op.Equals(cur_op));
5062   int gap_index;
5063   Instruction::GapPosition position;
5064   if (block->PredecessorCount() == 1) {
5065     gap_index = block->first_instruction_index();
5066     position = Instruction::START;
5067   } else {
5068     DCHECK_EQ(1, pred->SuccessorCount());
5069     DCHECK(!code()
5070                 ->InstructionAt(pred->last_instruction_index())
5071                 ->HasReferenceMap());
5072     gap_index = pred->last_instruction_index();
5073     position = Instruction::END;
5074   }
5075   data()->AddGapMove(gap_index, position, pred_op, cur_op);
5076   return gap_index;
5077 }
5078 
ConnectRanges(Zone * local_zone)5079 void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
5080   DelayedInsertionMap delayed_insertion_map(local_zone);
5081   const size_t live_ranges_size = data()->live_ranges().size();
5082   for (TopLevelLiveRange* top_range : data()->live_ranges()) {
5083     CHECK_EQ(live_ranges_size,
5084              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
5085     if (top_range == nullptr) continue;
5086     bool connect_spilled = top_range->IsSpilledOnlyInDeferredBlocks(data());
5087     LiveRange* first_range = top_range;
5088     for (LiveRange *second_range = first_range->next(); second_range != nullptr;
5089          first_range = second_range, second_range = second_range->next()) {
5090       LifetimePosition pos = second_range->Start();
5091       // Add gap move if the two live ranges touch and there is no block
5092       // boundary.
5093       if (second_range->spilled()) continue;
5094       if (first_range->End() != pos) continue;
5095       if (data()->IsBlockBoundary(pos) &&
5096           !CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
5097         continue;
5098       }
5099       InstructionOperand prev_operand = first_range->GetAssignedOperand();
5100       InstructionOperand cur_operand = second_range->GetAssignedOperand();
5101       if (prev_operand.Equals(cur_operand)) continue;
5102       bool delay_insertion = false;
5103       Instruction::GapPosition gap_pos;
5104       int gap_index = pos.ToInstructionIndex();
5105       if (connect_spilled && !prev_operand.IsAnyRegister() &&
5106           cur_operand.IsAnyRegister()) {
5107         const InstructionBlock* block = code()->GetInstructionBlock(gap_index);
5108         DCHECK(block->IsDeferred());
5109         // Performing a reload in this block, meaning the spill operand must
5110         // be defined here.
5111         top_range->GetListOfBlocksRequiringSpillOperands(data())->Add(
5112             block->rpo_number().ToInt());
5113       }
5114 
5115       if (pos.IsGapPosition()) {
5116         gap_pos = pos.IsStart() ? Instruction::START : Instruction::END;
5117       } else {
5118         if (pos.IsStart()) {
5119           delay_insertion = true;
5120         } else {
5121           gap_index++;
5122         }
5123         gap_pos = delay_insertion ? Instruction::END : Instruction::START;
5124       }
5125       // Reloads or spills for spilled in deferred blocks ranges must happen
5126       // only in deferred blocks.
5127       DCHECK_IMPLIES(connect_spilled && !(prev_operand.IsAnyRegister() &&
5128                                           cur_operand.IsAnyRegister()),
5129                      code()->GetInstructionBlock(gap_index)->IsDeferred());
5130 
5131       ParallelMove* move =
5132           code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
5133               gap_pos, code_zone());
5134       if (!delay_insertion) {
5135         move->AddMove(prev_operand, cur_operand);
5136       } else {
5137         delayed_insertion_map.insert(
5138             std::make_pair(std::make_pair(move, prev_operand), cur_operand));
5139       }
5140     }
5141   }
5142   if (delayed_insertion_map.empty()) return;
5143   // Insert all the moves which should occur after the stored move.
5144   ZoneVector<MoveOperands*> to_insert(local_zone);
5145   ZoneVector<MoveOperands*> to_eliminate(local_zone);
5146   to_insert.reserve(4);
5147   to_eliminate.reserve(4);
5148   ParallelMove* moves = delayed_insertion_map.begin()->first.first;
5149   for (auto it = delayed_insertion_map.begin();; ++it) {
5150     bool done = it == delayed_insertion_map.end();
5151     if (done || it->first.first != moves) {
5152       // Commit the MoveOperands for current ParallelMove.
5153       for (MoveOperands* move : to_eliminate) {
5154         move->Eliminate();
5155       }
5156       for (MoveOperands* move : to_insert) {
5157         moves->push_back(move);
5158       }
5159       if (done) break;
5160       // Reset state.
5161       to_eliminate.clear();
5162       to_insert.clear();
5163       moves = it->first.first;
5164     }
5165     // Gather all MoveOperands for a single ParallelMove.
5166     MoveOperands* move =
5167         new (code_zone()) MoveOperands(it->first.second, it->second);
5168     moves->PrepareInsertAfter(move, &to_eliminate);
5169     to_insert.push_back(move);
5170   }
5171 }
5172 
CommitSpillsInDeferredBlocks(TopLevelLiveRange * range,LiveRangeBoundArray * array,Zone * temp_zone)5173 void LiveRangeConnector::CommitSpillsInDeferredBlocks(
5174     TopLevelLiveRange* range, LiveRangeBoundArray* array, Zone* temp_zone) {
5175   DCHECK(range->IsSpilledOnlyInDeferredBlocks(data()));
5176   DCHECK(!range->spilled());
5177 
5178   InstructionSequence* code = data()->code();
5179   InstructionOperand spill_operand = range->GetSpillRangeOperand();
5180 
5181   TRACE("Live Range %d will be spilled only in deferred blocks.\n",
5182         range->vreg());
5183   // If we have ranges that aren't spilled but require the operand on the stack,
5184   // make sure we insert the spill.
5185   for (const LiveRange* child = range; child != nullptr;
5186        child = child->next()) {
5187     for (const UsePosition* pos = child->first_pos(); pos != nullptr;
5188          pos = pos->next()) {
5189       if (pos->type() != UsePositionType::kRequiresSlot && !child->spilled())
5190         continue;
5191       range->AddBlockRequiringSpillOperand(
5192           code->GetInstructionBlock(pos->pos().ToInstructionIndex())
5193               ->rpo_number(),
5194           data());
5195     }
5196   }
5197 
5198   ZoneQueue<int> worklist(temp_zone);
5199 
5200   for (BitVector::Iterator iterator(
5201            range->GetListOfBlocksRequiringSpillOperands(data()));
5202        !iterator.Done(); iterator.Advance()) {
5203     worklist.push(iterator.Current());
5204   }
5205 
5206   ZoneSet<std::pair<RpoNumber, int>> done_moves(temp_zone);
5207   // Seek the deferred blocks that dominate locations requiring spill operands,
5208   // and spill there. We only need to spill at the start of such blocks.
5209   BitVector done_blocks(
5210       range->GetListOfBlocksRequiringSpillOperands(data())->length(),
5211       temp_zone);
5212   while (!worklist.empty()) {
5213     int block_id = worklist.front();
5214     worklist.pop();
5215     if (done_blocks.Contains(block_id)) continue;
5216     done_blocks.Add(block_id);
5217     InstructionBlock* spill_block =
5218         code->InstructionBlockAt(RpoNumber::FromInt(block_id));
5219 
5220     for (const RpoNumber& pred : spill_block->predecessors()) {
5221       const InstructionBlock* pred_block = code->InstructionBlockAt(pred);
5222 
5223       if (pred_block->IsDeferred()) {
5224         worklist.push(pred_block->rpo_number().ToInt());
5225       } else {
5226         LifetimePosition pred_end =
5227             LifetimePosition::InstructionFromInstructionIndex(
5228                 pred_block->last_instruction_index());
5229 
5230         LiveRangeBound* bound = array->Find(pred_end);
5231 
5232         InstructionOperand pred_op = bound->range_->GetAssignedOperand();
5233 
5234         RpoNumber spill_block_number = spill_block->rpo_number();
5235         if (done_moves.find(std::make_pair(
5236                 spill_block_number, range->vreg())) == done_moves.end()) {
5237           TRACE("Spilling deferred spill for range %d at B%d\n", range->vreg(),
5238                 spill_block_number.ToInt());
5239           data()->AddGapMove(spill_block->first_instruction_index(),
5240                              Instruction::GapPosition::START, pred_op,
5241                              spill_operand);
5242           done_moves.insert(std::make_pair(spill_block_number, range->vreg()));
5243           spill_block->mark_needs_frame();
5244         }
5245       }
5246     }
5247   }
5248 }
5249 
5250 #undef TRACE
5251 #undef TRACE_COND
5252 
5253 }  // namespace compiler
5254 }  // namespace internal
5255 }  // namespace v8
5256