1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/backend/spill-placer.h"
6 
7 #include "src/base/bits-iterator.h"
8 #include "src/compiler/backend/register-allocator.h"
9 
10 namespace v8 {
11 namespace internal {
12 namespace compiler {
13 
SpillPlacer(LiveRangeFinder * finder,TopTierRegisterAllocationData * data,Zone * zone)14 SpillPlacer::SpillPlacer(LiveRangeFinder* finder,
15                          TopTierRegisterAllocationData* data, Zone* zone)
16     : finder_(finder), data_(data), zone_(zone) {}
17 
~SpillPlacer()18 SpillPlacer::~SpillPlacer() {
19   if (assigned_indices_ > 0) {
20     CommitSpills();
21   }
22 }
23 
Add(TopLevelLiveRange * range)24 void SpillPlacer::Add(TopLevelLiveRange* range) {
25   DCHECK(range->HasGeneralSpillRange());
26   InstructionOperand spill_operand = range->GetSpillRangeOperand();
27   range->FilterSpillMoves(data(), spill_operand);
28 
29   InstructionSequence* code = data_->code();
30   InstructionBlock* top_start_block =
31       code->GetInstructionBlock(range->Start().ToInstructionIndex());
32   RpoNumber top_start_block_number = top_start_block->rpo_number();
33 
34   // Check for several cases where spilling at the definition is best.
35   // - The value is already moved on-stack somehow so the list of insertion
36   //   locations for spilling at the definition is empty.
37   // - If the first LiveRange is spilled, then there's no sense in doing
38   //   anything other than spilling at the definition.
39   // - If the value is defined in a deferred block, then the logic to select
40   //   the earliest deferred block as the insertion point would cause
41   //   incorrect behavior, so the value must be spilled at the definition.
42   // - We haven't seen any indication of performance improvements from seeking
43   //   optimal spilling positions except on loop-top phi values, so spill
44   //   any value that isn't a loop-top phi at the definition to avoid
45   //   increasing the code size for no benefit.
46   if (range->GetSpillMoveInsertionLocations(data()) == nullptr ||
47       range->spilled() || top_start_block->IsDeferred() ||
48       (!FLAG_stress_turbo_late_spilling && !range->is_loop_phi())) {
49     range->CommitSpillMoves(data(), spill_operand);
50     return;
51   }
52 
53   // Iterate through the range and mark every block that needs the value to be
54   // spilled.
55   for (const LiveRange* child = range; child != nullptr;
56        child = child->next()) {
57     if (child->spilled()) {
58       // Add every block that contains part of this live range.
59       for (UseInterval* interval = child->first_interval(); interval != nullptr;
60            interval = interval->next()) {
61         RpoNumber start_block =
62             code->GetInstructionBlock(interval->start().ToInstructionIndex())
63                 ->rpo_number();
64         if (start_block == top_start_block_number) {
65           // Can't do late spilling if the first spill is within the
66           // definition block.
67           range->CommitSpillMoves(data(), spill_operand);
68           // Verify that we never added any data for this range to the table.
69           DCHECK(!IsLatestVreg(range->vreg()));
70           return;
71         }
72         LifetimePosition end = interval->end();
73         int end_instruction = end.ToInstructionIndex();
74         // The end position is exclusive, so an end position exactly on a block
75         // boundary indicates that the range applies only to the prior block.
76         if (data()->IsBlockBoundary(end)) {
77           --end_instruction;
78         }
79         RpoNumber end_block =
80             code->GetInstructionBlock(end_instruction)->rpo_number();
81         while (start_block <= end_block) {
82           SetSpillRequired(code->InstructionBlockAt(start_block), range->vreg(),
83                            top_start_block_number);
84           start_block = start_block.Next();
85         }
86       }
87     } else {
88       // Add every block that contains a use which requires the on-stack value.
89       for (const UsePosition* pos = child->first_pos(); pos != nullptr;
90            pos = pos->next()) {
91         if (pos->type() != UsePositionType::kRequiresSlot) continue;
92         InstructionBlock* block =
93             code->GetInstructionBlock(pos->pos().ToInstructionIndex());
94         RpoNumber block_number = block->rpo_number();
95         if (block_number == top_start_block_number) {
96           // Can't do late spilling if the first spill is within the
97           // definition block.
98           range->CommitSpillMoves(data(), spill_operand);
99           // Verify that we never added any data for this range to the table.
100           DCHECK(!IsLatestVreg(range->vreg()));
101           return;
102         }
103         SetSpillRequired(block, range->vreg(), top_start_block_number);
104       }
105     }
106   }
107 
108   // If we haven't yet marked anything for this range, then it never needs to
109   // spill at all.
110   if (!IsLatestVreg(range->vreg())) {
111     range->SetLateSpillingSelected(true);
112     return;
113   }
114 
115   SetDefinition(top_start_block_number, range->vreg());
116 }
117 
118 class SpillPlacer::Entry {
119  public:
120   // Functions operating on single values (during setup):
121 
SetSpillRequiredSingleValue(int value_index)122   void SetSpillRequiredSingleValue(int value_index) {
123     DCHECK_LT(value_index, kValueIndicesPerEntry);
124     uint64_t bit = uint64_t{1} << value_index;
125     SetSpillRequired(bit);
126   }
SetDefinitionSingleValue(int value_index)127   void SetDefinitionSingleValue(int value_index) {
128     DCHECK_LT(value_index, kValueIndicesPerEntry);
129     uint64_t bit = uint64_t{1} << value_index;
130     SetDefinition(bit);
131   }
132 
133   // Functions operating on all values simultaneously, as bitfields:
134 
SpillRequired() const135   uint64_t SpillRequired() const { return GetValuesInState<kSpillRequired>(); }
SetSpillRequired(uint64_t mask)136   void SetSpillRequired(uint64_t mask) {
137     UpdateValuesToState<kSpillRequired>(mask);
138   }
SpillRequiredInNonDeferredSuccessor() const139   uint64_t SpillRequiredInNonDeferredSuccessor() const {
140     return GetValuesInState<kSpillRequiredInNonDeferredSuccessor>();
141   }
SetSpillRequiredInNonDeferredSuccessor(uint64_t mask)142   void SetSpillRequiredInNonDeferredSuccessor(uint64_t mask) {
143     UpdateValuesToState<kSpillRequiredInNonDeferredSuccessor>(mask);
144   }
SpillRequiredInDeferredSuccessor() const145   uint64_t SpillRequiredInDeferredSuccessor() const {
146     return GetValuesInState<kSpillRequiredInDeferredSuccessor>();
147   }
SetSpillRequiredInDeferredSuccessor(uint64_t mask)148   void SetSpillRequiredInDeferredSuccessor(uint64_t mask) {
149     UpdateValuesToState<kSpillRequiredInDeferredSuccessor>(mask);
150   }
Definition() const151   uint64_t Definition() const { return GetValuesInState<kDefinition>(); }
SetDefinition(uint64_t mask)152   void SetDefinition(uint64_t mask) { UpdateValuesToState<kDefinition>(mask); }
153 
154  private:
155   // Possible states for every value, at every block.
156   enum State {
157     // This block is not (yet) known to require the on-stack value.
158     kUnmarked,
159 
160     // The value must be on the stack in this block.
161     kSpillRequired,
162 
163     // The value doesn't need to be on-stack in this block, but some
164     // non-deferred successor needs it.
165     kSpillRequiredInNonDeferredSuccessor,
166 
167     // The value doesn't need to be on-stack in this block, but some
168     // deferred successor needs it.
169     kSpillRequiredInDeferredSuccessor,
170 
171     // The value is defined in this block.
172     kDefinition,
173   };
174 
175   template <State state>
GetValuesInState() const176   uint64_t GetValuesInState() const {
177     STATIC_ASSERT(state < 8);
178     return ((state & 1) ? first_bit_ : ~first_bit_) &
179            ((state & 2) ? second_bit_ : ~second_bit_) &
180            ((state & 4) ? third_bit_ : ~third_bit_);
181   }
182 
183   template <State state>
UpdateValuesToState(uint64_t mask)184   void UpdateValuesToState(uint64_t mask) {
185     STATIC_ASSERT(state < 8);
186     first_bit_ =
187         Entry::UpdateBitDataWithMask<(state & 1) != 0>(first_bit_, mask);
188     second_bit_ =
189         Entry::UpdateBitDataWithMask<(state & 2) != 0>(second_bit_, mask);
190     third_bit_ =
191         Entry::UpdateBitDataWithMask<(state & 4) != 0>(third_bit_, mask);
192   }
193 
194   template <bool set_ones>
UpdateBitDataWithMask(uint64_t data,uint64_t mask)195   static uint64_t UpdateBitDataWithMask(uint64_t data, uint64_t mask) {
196     return set_ones ? data | mask : data & ~mask;
197   }
198 
199   // Storage for the states of up to 64 live ranges.
200   uint64_t first_bit_ = 0;
201   uint64_t second_bit_ = 0;
202   uint64_t third_bit_ = 0;
203 };
204 
GetOrCreateIndexForLatestVreg(int vreg)205 int SpillPlacer::GetOrCreateIndexForLatestVreg(int vreg) {
206   DCHECK_LE(assigned_indices_, kValueIndicesPerEntry);
207   // If this vreg isn't yet the last one in the list, then add it.
208   if (!IsLatestVreg(vreg)) {
209     if (vreg_numbers_ == nullptr) {
210       DCHECK_EQ(assigned_indices_, 0);
211       DCHECK_EQ(entries_, nullptr);
212       // We lazily allocate these arrays because many functions don't have any
213       // values that use SpillPlacer.
214       entries_ =
215           zone_->NewArray<Entry>(data()->code()->instruction_blocks().size());
216       for (size_t i = 0; i < data()->code()->instruction_blocks().size(); ++i) {
217         new (&entries_[i]) Entry();
218       }
219       vreg_numbers_ = zone_->NewArray<int>(kValueIndicesPerEntry);
220     }
221 
222     if (assigned_indices_ == kValueIndicesPerEntry) {
223       // The table is full; commit the current set of values and clear it.
224       CommitSpills();
225       ClearData();
226     }
227 
228     vreg_numbers_[assigned_indices_] = vreg;
229     ++assigned_indices_;
230   }
231   return assigned_indices_ - 1;
232 }
233 
CommitSpills()234 void SpillPlacer::CommitSpills() {
235   FirstBackwardPass();
236   ForwardPass();
237   SecondBackwardPass();
238 }
239 
ClearData()240 void SpillPlacer::ClearData() {
241   assigned_indices_ = 0;
242   for (int i = 0; i < data()->code()->InstructionBlockCount(); ++i) {
243     new (&entries_[i]) Entry();
244   }
245   first_block_ = RpoNumber::Invalid();
246   last_block_ = RpoNumber::Invalid();
247 }
248 
ExpandBoundsToInclude(RpoNumber block)249 void SpillPlacer::ExpandBoundsToInclude(RpoNumber block) {
250   if (!first_block_.IsValid()) {
251     DCHECK(!last_block_.IsValid());
252     first_block_ = block;
253     last_block_ = block;
254   } else {
255     if (first_block_ > block) {
256       first_block_ = block;
257     }
258     if (last_block_ < block) {
259       last_block_ = block;
260     }
261   }
262 }
263 
SetSpillRequired(InstructionBlock * block,int vreg,RpoNumber top_start_block)264 void SpillPlacer::SetSpillRequired(InstructionBlock* block, int vreg,
265                                    RpoNumber top_start_block) {
266   // Spilling in loops is bad, so if the block is non-deferred and nested
267   // within a loop, and the definition is before that loop, then mark the loop
268   // top instead. Of course we must find the outermost such loop.
269   if (!block->IsDeferred()) {
270     while (block->loop_header().IsValid() &&
271            block->loop_header() > top_start_block) {
272       block = data()->code()->InstructionBlockAt(block->loop_header());
273     }
274   }
275 
276   int value_index = GetOrCreateIndexForLatestVreg(vreg);
277   entries_[block->rpo_number().ToSize()].SetSpillRequiredSingleValue(
278       value_index);
279   ExpandBoundsToInclude(block->rpo_number());
280 }
281 
SetDefinition(RpoNumber block,int vreg)282 void SpillPlacer::SetDefinition(RpoNumber block, int vreg) {
283   int value_index = GetOrCreateIndexForLatestVreg(vreg);
284   entries_[block.ToSize()].SetDefinitionSingleValue(value_index);
285   ExpandBoundsToInclude(block);
286 }
287 
FirstBackwardPass()288 void SpillPlacer::FirstBackwardPass() {
289   InstructionSequence* code = data()->code();
290 
291   for (int i = last_block_.ToInt(); i >= first_block_.ToInt(); --i) {
292     RpoNumber block_id = RpoNumber::FromInt(i);
293     InstructionBlock* block = code->instruction_blocks()[i];
294 
295     Entry& entry = entries_[i];
296 
297     // State that will be accumulated from successors.
298     uint64_t spill_required_in_non_deferred_successor = 0;
299     uint64_t spill_required_in_deferred_successor = 0;
300 
301     for (RpoNumber successor_id : block->successors()) {
302       // Ignore loop back-edges.
303       if (successor_id <= block_id) continue;
304 
305       InstructionBlock* successor = code->InstructionBlockAt(successor_id);
306       const Entry& successor_entry = entries_[successor_id.ToSize()];
307       if (successor->IsDeferred()) {
308         spill_required_in_deferred_successor |= successor_entry.SpillRequired();
309       } else {
310         spill_required_in_non_deferred_successor |=
311             successor_entry.SpillRequired();
312       }
313       spill_required_in_deferred_successor |=
314           successor_entry.SpillRequiredInDeferredSuccessor();
315       spill_required_in_non_deferred_successor |=
316           successor_entry.SpillRequiredInNonDeferredSuccessor();
317     }
318 
319     // Starting state of the current block.
320     uint64_t defs = entry.Definition();
321     uint64_t needs_spill = entry.SpillRequired();
322 
323     // Info about successors doesn't get to override existing info about
324     // definitions and spills required by this block itself.
325     spill_required_in_deferred_successor &= ~(defs | needs_spill);
326     spill_required_in_non_deferred_successor &= ~(defs | needs_spill);
327 
328     entry.SetSpillRequiredInDeferredSuccessor(
329         spill_required_in_deferred_successor);
330     entry.SetSpillRequiredInNonDeferredSuccessor(
331         spill_required_in_non_deferred_successor);
332   }
333 }
334 
ForwardPass()335 void SpillPlacer::ForwardPass() {
336   InstructionSequence* code = data()->code();
337   for (int i = first_block_.ToInt(); i <= last_block_.ToInt(); ++i) {
338     RpoNumber block_id = RpoNumber::FromInt(i);
339     InstructionBlock* block = code->instruction_blocks()[i];
340 
341     // Deferred blocks don't need to participate in the forward pass, because
342     // their spills all get pulled forward to the earliest possible deferred
343     // block (where a non-deferred block jumps to a deferred block), and
344     // decisions about spill requirements for non-deferred blocks don't take
345     // deferred blocks into account.
346     if (block->IsDeferred()) continue;
347 
348     Entry& entry = entries_[i];
349 
350     // State that will be accumulated from predecessors.
351     uint64_t spill_required_in_non_deferred_predecessor = 0;
352     uint64_t spill_required_in_all_non_deferred_predecessors =
353         static_cast<uint64_t>(int64_t{-1});
354 
355     for (RpoNumber predecessor_id : block->predecessors()) {
356       // Ignore loop back-edges.
357       if (predecessor_id >= block_id) continue;
358 
359       InstructionBlock* predecessor = code->InstructionBlockAt(predecessor_id);
360       if (predecessor->IsDeferred()) continue;
361       const Entry& predecessor_entry = entries_[predecessor_id.ToSize()];
362       spill_required_in_non_deferred_predecessor |=
363           predecessor_entry.SpillRequired();
364       spill_required_in_all_non_deferred_predecessors &=
365           predecessor_entry.SpillRequired();
366     }
367 
368     // Starting state of the current block.
369     uint64_t spill_required_in_non_deferred_successor =
370         entry.SpillRequiredInNonDeferredSuccessor();
371     uint64_t spill_required_in_any_successor =
372         spill_required_in_non_deferred_successor |
373         entry.SpillRequiredInDeferredSuccessor();
374 
375     // If all of the predecessors agree that a spill is required, then a
376     // spill is required. Note that we don't set anything for values that
377     // currently have no markings in this block, to avoid pushing data too
378     // far down the graph and confusing the next backward pass.
379     entry.SetSpillRequired(spill_required_in_any_successor &
380                            spill_required_in_non_deferred_predecessor &
381                            spill_required_in_all_non_deferred_predecessors);
382 
383     // If only some of the predecessors require a spill, but some successor
384     // of this block also requires a spill, then this merge point requires a
385     // spill. This ensures that no control-flow path through non-deferred
386     // blocks ever has to spill twice.
387     entry.SetSpillRequired(spill_required_in_non_deferred_successor &
388                            spill_required_in_non_deferred_predecessor);
389   }
390 }
391 
SecondBackwardPass()392 void SpillPlacer::SecondBackwardPass() {
393   InstructionSequence* code = data()->code();
394   for (int i = last_block_.ToInt(); i >= first_block_.ToInt(); --i) {
395     RpoNumber block_id = RpoNumber::FromInt(i);
396     InstructionBlock* block = code->instruction_blocks()[i];
397 
398     Entry& entry = entries_[i];
399 
400     // State that will be accumulated from successors.
401     uint64_t spill_required_in_non_deferred_successor = 0;
402     uint64_t spill_required_in_deferred_successor = 0;
403     uint64_t spill_required_in_all_non_deferred_successors =
404         static_cast<uint64_t>(int64_t{-1});
405 
406     for (RpoNumber successor_id : block->successors()) {
407       // Ignore loop back-edges.
408       if (successor_id <= block_id) continue;
409 
410       InstructionBlock* successor = code->InstructionBlockAt(successor_id);
411       const Entry& successor_entry = entries_[successor_id.ToSize()];
412       if (successor->IsDeferred()) {
413         spill_required_in_deferred_successor |= successor_entry.SpillRequired();
414       } else {
415         spill_required_in_non_deferred_successor |=
416             successor_entry.SpillRequired();
417         spill_required_in_all_non_deferred_successors &=
418             successor_entry.SpillRequired();
419       }
420     }
421 
422     // Starting state of the current block.
423     uint64_t defs = entry.Definition();
424 
425     // If all of the successors of a definition need the value to be
426     // spilled, then the value should be spilled at the definition.
427     uint64_t spill_at_def = defs & spill_required_in_non_deferred_successor &
428                             spill_required_in_all_non_deferred_successors;
429     for (int index_to_spill : base::bits::IterateBits(spill_at_def)) {
430       int vreg_to_spill = vreg_numbers_[index_to_spill];
431       TopLevelLiveRange* top = data()->live_ranges()[vreg_to_spill];
432       top->CommitSpillMoves(data(), top->GetSpillRangeOperand());
433     }
434 
435     if (block->IsDeferred()) {
436       DCHECK_EQ(defs, 0);
437       // Any deferred successor needing a spill is sufficient to make the
438       // current block need a spill.
439       entry.SetSpillRequired(spill_required_in_deferred_successor);
440     }
441 
442     // Propagate data upward if there are non-deferred successors and they
443     // all need a spill, regardless of whether the current block is
444     // deferred.
445     entry.SetSpillRequired(~defs & spill_required_in_non_deferred_successor &
446                            spill_required_in_all_non_deferred_successors);
447 
448     // Iterate the successors again to find out which ones require spills at
449     // their beginnings, and insert those spills.
450     for (RpoNumber successor_id : block->successors()) {
451       // Ignore loop back-edges.
452       if (successor_id <= block_id) continue;
453 
454       InstructionBlock* successor = code->InstructionBlockAt(successor_id);
455       const Entry& successor_entry = entries_[successor_id.ToSize()];
456       for (int index_to_spill :
457            base::bits::IterateBits(successor_entry.SpillRequired() &
458                                    ~entry.SpillRequired() & ~spill_at_def)) {
459         CommitSpill(vreg_numbers_[index_to_spill], block, successor);
460       }
461     }
462   }
463 }
464 
CommitSpill(int vreg,InstructionBlock * predecessor,InstructionBlock * successor)465 void SpillPlacer::CommitSpill(int vreg, InstructionBlock* predecessor,
466                               InstructionBlock* successor) {
467   TopLevelLiveRange* top = data()->live_ranges()[vreg];
468   LiveRangeBoundArray* array = finder_->ArrayFor(vreg);
469   LifetimePosition pred_end = LifetimePosition::InstructionFromInstructionIndex(
470       predecessor->last_instruction_index());
471   LiveRangeBound* bound = array->Find(pred_end);
472   InstructionOperand pred_op = bound->range_->GetAssignedOperand();
473   DCHECK(pred_op.IsAnyRegister());
474   DCHECK_EQ(successor->PredecessorCount(), 1);
475   data()->AddGapMove(successor->first_instruction_index(),
476                      Instruction::GapPosition::START, pred_op,
477                      top->GetSpillRangeOperand());
478   successor->mark_needs_frame();
479   top->SetLateSpillingSelected(true);
480 }
481 
482 }  // namespace compiler
483 }  // namespace internal
484 }  // namespace v8
485